diff --git "a/3549.jsonl" "b/3549.jsonl" new file mode 100644--- /dev/null +++ "b/3549.jsonl" @@ -0,0 +1,656 @@ +{"seq_id":"113774963","text":"splits = ['cu piru cu abraço abc final', 'abraço rodrigo b final', 'string cu de cu', 'abraço monstro final']\n\npalavras = []\n\nfor split in splits:\n\tif (split.find('cu') != -1):\n\t\tsplits.remove(split)\n\nprint(splits)\n\nfor split in splits:\n\tpalavra = ''\n\tfor n in range (split.find('abraço') + 6,split.find('final')):\n\t\tpalavra += split[n]\n\tpalavras.append(palavra)\n\nprint(palavras)","sub_path":"playground/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"593553781","text":"#!/usr/bin/python3\n\ntry:\n from tqdm import trange\nexcept ImportError:\n trange = range\n\nimport random\nimport subprocess\nimport os\nfrom pathlib import Path\nimport pickle\nimport glob\nfrom itertools import zip_longest\nimport argparse\n\n\nclass fg:\n black = '\\033[30m'\n red = '\\033[31m'\n green = '\\033[32m'\n orange = '\\033[33m'\n blue = '\\033[34m'\n purple = '\\033[35m'\n cyan = '\\033[36m'\n lightgrey = '\\033[37m'\n darkgrey = '\\033[90m'\n lightred = '\\033[91m'\n lightgreen = '\\033[92m'\n yellow = '\\033[93m'\n lightblue = '\\033[94m'\n pink = '\\033[95m'\n lightcyan = '\\033[96m'\n end = '\\033[00m'\n\n\nvalid_output = b'''rm -f lex.yy.c\nrm -f parser.tab.*pp\nrm -f hw3\nflex scanner.lex\nbison -d parser.ypp\ng++ -std=gnu++0x -o hw3 *.c *.cpp\n'''\n\n\ndef chunk_lines(lst, n):\n text = ''\n for i in range(0, len(lst), n):\n text += ' '.join(lst[i:i + n]) + '\\n'\n return text\n\n\ndef test_validate(testname: str, results1: str, results2: str):\n if (results1 == results2):\n print(f'{testname}: {fg.green}pass{fg.end}')\n else:\n print(f'{testname}: {fg.red}fail{fg.end}')\n\n\ndef log_and_exit(input, o1, o2):\n for i, (l1, l2) in enumerate(zip_longest(o1.splitlines(), o2.splitlines())):\n if l1 != l2:\n print(f'Line #{i}')\n print('Got:')\n print(l1)\n print('Expected:')\n print(l2)\n exit(0)\n\n\nparser = argparse.ArgumentParser(description='Run the test suite.')\n\nparser.add_argument('-l', '--log', action='store_true', help=\"Print the first differing file-line and exit\")\nparser.add_argument('-v', '--version', action='store_true', help=\"Print the version and exit\")\n\nargs = parser.parse_args()\n\nif args.version:\n print('HW3 Tests version 0.1.2')\n exit(0)\n\nrandom.seed(12345)\n\nprint('Compiling')\n\nchk1 = subprocess.check_output(['make'])\nif chk1 != valid_output:\n print('Compilation Error!')\n print(str(chk1))\n exit(1)\n\nEXE = './hw3'\n\noutputs = {}\ninputs = {}\n\n\nfor filename in sorted(glob.glob('tests/*.in')) + sorted(glob.glob('unofficial_tests/*.in')):\n filename = filename.split('.')[0]\n pretty = filename.split('/')[1]\n print(f'Running test - {pretty}')\n\n with open(f'{filename}.in') as f:\n sample = f.read()\n\n with open(f'{filename}.out') as f:\n output_check = f.read()\n\n final_line = output_check.splitlines()[-1]\n tst_err = 'error' in final_line\n try:\n output = subprocess.check_output(EXE, input=sample, encoding='utf-8', shell=True)\n except subprocess.CalledProcessError as e:\n # if tst_err and e.returncode == 1:\n # print(f'Program exited with error code ({e.returncode}) -- this is {fg.green}correct{fg.end}!')\n # else:\n # print(f'{fg.red}Program exited with error code ({e.returncode}){fg.end}')\n output = e.output\n\n test_validate('Result', output, output_check)\n if output != output_check:\n if args.log:\n log_and_exit(sample, output, output_check)\n","sub_path":"HW3/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"208578805","text":"# coding: UTF-8\n\n###############################################\n# Importando pacotes & definindo funções #\n###############################################\n\nfrom datetime import *\nfrom pyspark import StorageLevel\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import *\nfrom pyspark.sql.types import *\n\n###############################\n# Definição de variáveis #\n###############################\n\n# Definição do Schema de garnishItems que é um Array dentro de Items\nschema_value_currency = StructType([StructField(\"value\", StringType()), StructField(\"currency\", StringType())])\nschema_garnish = StructType(\n [\n StructField(\"name\", StringType()),\n StructField(\"addition\", schema_value_currency),\n StructField(\"discount\", schema_value_currency),\n StructField(\"quantity\", DoubleType()),\n StructField(\"sequence\", IntegerType()),\n StructField(\"unitPrice\", schema_value_currency),\n StructField(\"categoryId\", StringType()),\n StructField(\"externalId\", StringType()),\n StructField(\"totalValue\", schema_value_currency),\n StructField(\"categoryName\", StringType()),\n StructField(\"integrationId\", StringType())\n ])\n\n# Definição do Schema do Array de Items\nschema_items = StructType(\n [\n StructField(\"name\", StringType()),\n StructField(\"addition\", schema_value_currency),\n StructField(\"discount\", schema_value_currency),\n StructField(\"quantity\", DoubleType()),\n StructField(\"sequence\", IntegerType()),\n StructField(\"unitPrice\", schema_value_currency),\n StructField(\"externalId\", StringType()),\n StructField(\"totalValue\", schema_value_currency),\n StructField(\"customerNote\", StringType()),\n StructField(\"garnishItems\", ArrayType(schema_garnish)),\n StructField(\"integrationId\", StringType()),\n StructField(\"totalAddition\", schema_value_currency),\n StructField(\"totalDiscount\", schema_value_currency)\n ])\n\n# Definação do schema json\nschema_json = StructType(\n [\n StructField(\"cpf\", StringType()),\n StructField(\"customer_id\", StringType()),\n StructField(\"customer_name\", StringType()),\n StructField(\"delivery_address_city\", StringType()),\n StructField(\"delivery_address_country\", StringType()),\n StructField(\"delivery_address_district\", StringType()),\n StructField(\"delivery_address_external_id\", StringType()),\n StructField(\"delivery_address_latitude\", StringType()),\n StructField(\"delivery_address_longitude\", StringType()),\n StructField(\"delivery_address_state\", StringType()),\n StructField(\"delivery_address_zip_code\", StringType()),\n StructField(\"items\", StringType()),\n StructField(\"merchant_id\", StringType()),\n StructField(\"merchant_latitude\", StringType()),\n StructField(\"merchant_longitude\", StringType()),\n StructField(\"merchant_timezone\", StringType()),\n StructField(\"order_created_at\", StringType()),\n StructField(\"order_id\", StringType()),\n StructField(\"order_scheduled\", BooleanType()),\n StructField(\"order_scheduled_date\", StringType()),\n StructField(\"order_total_amount\", DoubleType()),\n StructField(\"origin_platform\", StringType())\n ])\n \n# Referência de processamento\nref = str(datetime.today() - timedelta(days=1))[0:10]\n\n# Caminho de origem da pouso order\norigem_pouso = \"hdfs://localhost:9000/ifood-landing-order/dt={}/*.json\".format(ref)\n\n# Caminho de destino da raw order\ndestino_raw = \"hdfs://localhost:9000/ifood-raw-order/\"\n\n# Inicia sessão spark\nspark = SparkSession.builder.appName(\"ifood-raw-order-prod\").getOrCreate()\n\n# Configurações básicas para o spark\nspark.conf.set(\"spark.sql.maxPartitionBytes\", 200 * 1024 * 1024) # Seta a quantidade máxima de bytes em uma partição ao ler os arquivos de entrada (Entre 100MB e 200MB é o ideal)\nspark.conf.set(\"spark.sql.sources.partitionOverwriteMode\", \"DYNAMIC\") # Necessário para sobrescrever partições \n\n################################\n# Etapas do Processamento #\n################################\n\n# Lê a pouso de origem\npousoDF = spark.read.schema(schema_json).json(origem_pouso)\n\n# Altera o schema da coluna items para ser reconhecida como um array\npousoDF_ = pousoDF \\\n .withColumn(\"items\", from_json(\"items\", ArrayType(schema_items)))\n \n# Cria a tabela raw\nrawDF = pousoDF_ \\\n .withColumn(\"dt_proc\", current_date()) \\\n .withColumn(\"dt\", col(\"order_created_at\").cast(DateType())) \\\n .repartition(100, \"dt\")\n\nrawDF.write.partitionBy(\"dt\").mode(\"overwrite\").option(\"compression\", \"snappy\").format(\"parquet\").save(destino_raw)\n\n# Validação\n# Leitura da base final\nrawDF_ = spark.read.parquet(destino_raw).filter(col(\"dt\") == lit(ref))\n\n# Validação Volumétrica\nprint(\"> Volumetria de saída equivale-se a de entrada ? --> {}\".format(pousoDF.count() == rawDF_.count()))\n\n# Validação do Schema\nschema = pousoDF.schema\nschema_ = rawDF_.drop(\"dt\", \"dt_proc\").schema\ncolunas = pousoDF.columns\ncolunas_ = rawDF_.drop(\"dt\", \"dt_proc\").columns\ncolunas.sort()\ncolunas_.sort()\nprint(\"> O schema de saída equivale-se ao de entrada ? ---> {}\".format(schema == schema_ or colunas == colunas_))","sub_path":"Solution/ifood-raw-order/ifood-raw-order-prod.py","file_name":"ifood-raw-order-prod.py","file_ext":"py","file_size_in_byte":5258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"358382133","text":"\"\"\"Ceasar\"\"\"\ndef main(msg):\n \"\"\"Ceasar\"\"\"\n result = \"\"\n letters = \"aeiou\"\n dict1 = {}\n dict2 = {}\n dict3 = {}\n for value in range(1, 27):\n for string in msg:\n if \"A\" <= string <= \"Z\":\n check = ord(string) - 65\n check = (check + value)%26\n string = chr(check + 65)\n elif \"a\" <= string <= \"z\":\n check = ord(string) - 97\n check = (check + value)%26\n string = chr(check + 97)\n else:\n result += string\n continue\n result += string\n dict1[value] = result.lower()\n dict3[value] = result\n result = \"\"\n for key in dict1.keys():\n dict2[key] = {}\n for i in letters:\n dict2[key][i] = dict1[key].count(i)\n check = 0\n checksum = 0\n for number, key in enumerate(dict2.keys()):\n if sum(dict2[key].values()) > checksum:\n checksum = sum(dict2[key].values())\n check = number\n print(dict3[check+1])\nmain(input())\n","sub_path":"Caesarv2.py","file_name":"Caesarv2.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"617520687","text":"def run ():\r\n import pandas as pd\r\n cost1 = pd.read_csv('Bilding App Cost.csv')\r\n Servise_list = list(cost1.keys())\r\n print('Hello \\nWelcome to Building Management APP\\nPlease select your service by help from the bottom legend and type it.')\r\n def start():\r\n\r\n print('\\n\\n'+'{} to Service\\n\"Remove\" to remove\\n\"Finish\" to save and finish\\n\"Now\" to get report'.format(Servise_list))\r\n Service = input('What is your service?\\n: ').title()\r\n if Service == 'Now':\r\n print(cost1.to_csv)\r\n start()\r\n elif Service == 'Finish':\r\n return cost1.to_csv('Bilding App Cost.csv',index=False)\r\n elif Service == 'Remove':\r\n remove_service = input('What service you want to remove?\\n: ').lower().title()\r\n respance = input('Are you certain to remove \"{}\" with \"{}\" cost? yes or no?\\n: '.format(remove_service, cost[remove_service][0])).title()\r\n if respance == 'Yes':\r\n del cost[remove_service]\r\n del cost1[remove_service]\r\n del Servise_list[Servise_list.index(remove_service)]\r\n start()\r\n else:\r\n Cost = input('what is it cost?\\n: ')\r\n respance = input('Your service is \\\"{}\\\" with \\\"{}\\\" cost.\\n This is True? Yes or No? \\n: '.format(Service,Cost)).title()\r\n if respance == 'Yes':\r\n if Service in Servise_list:\r\n cost1[Service][0] += eval(Cost)\r\n else:\r\n Servise_list.append(Service)\r\n cost1[Service] = eval(Cost)\r\n start()\r\n","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"399440191","text":"from verification_plotting import Metric_Diagrams\nfrom netCDF4 import Dataset\nfrom Config import Config\nfrom glob import glob\nimport pandas as pd\nimport numpy as np \nimport argparse\nimport os\n\ndef main():\n parser = argparse.ArgumentParser(\"calibration processor\")\n parser.add_argument(\"-c\" ,\"--config_file\",type=str, help=\"Configuration file\")\n parser.add_argument(\"-a\", \"--calibrate\", action=\"store_const\",const='calibrate',help=\"Calibrate data using IR\")\n parser.add_argument(\"-l\", \"--hourly\", action=\"store_const\",const=[17,19,21], help=\"If flagged, will use subdaily data\")\n parser.add_argument(\"-r\", \"--reliability_diagram\", action=\"store_const\",const=\"reliability\", help=\"Create reliabiliy diagram\")\n parser.add_argument(\"-e\", \"--ets_diagram\",action=\"store_const\",const=\"ets\", help=\"Create ETS diagram\")\n parser.add_argument(\"-b\", \"--bias_diagram\", action=\"store_const\",const=\"bias\", help=\"Create bias diagram\")\n parser.add_argument(\"-f\", \"--performance_diagram\", action=\"store_const\",const=\"performance\", help=\"Create performance diagram\")\n parser.add_argument(\"-m\", \"--map_figure\", action=\"store_true\", help=\"output maps\")\n parser.add_argument(\"-s\", \"--significance_testing\", action=\"store_true\", help=\"Perform significance testing\")\n parser.add_argument(\"-p\", \"--proc\", type=int, default=1,help=\"Number of processors\")\n\n args = parser.parse_args()\n required = ['model_data','obs_data','start_date',\n 'end_date','variable_names','out_path']\n if args.map_figure:\n required.append('map_filename')\n config = Config(args.config_file , required_attributes=required)\n \n if not hasattr(config, \"bootstrap_iterations\"):\n config.bootstrap_iterations = None\n else:\n config.out_path = config.out_path+'bootstrap_'\n\n if not hasattr(config, \"baseline_data\"):\n config.baseline_data = None\n non_obs_label = list(config.model_data.keys())\n else:\n non_obs_label = list(config.model_data.keys()) + list(config.baseline_data.keys())\n \n if hasattr(config, \"sector\"):\n #config.out_path= config.out_path+config.sector+'_'\n if hasattr(config, \"mask_file\"):\n mask = Dataset(config.mask_file).variables[\"usa_mask\"][:]\n config.rows,config.cols = np.where(mask > 0.5)\n else:\n config.sector = None\n \n if args.calibrate:\n config.out_path=config.out_path+'calibrated_'\n #\n # Finding possible data files already output\n #\n \n data_dictionary = {}\n data_files = glob(config.out_path+'*.pkl')\n if args.map_figure:\n label_to_read = non_obs_label\n elif not data_files:\n label_to_read = non_obs_label\n else: \n if args.reliability_diagram:\n arguments = ['bin','sharpness','bss']\n metric_labels = [l+'_'+a for a in arguments for l in non_obs_label] \n elif args.performance_diagram:\n arguments = ['pod','sr']\n metric_labels = [l+'_'+a for a in arguments for l in non_obs_label] \n elif args.ets_diagram:\n metric_labels = [l+'_ets' for l in non_obs_label] \n elif args.bias_diagram:\n metric_labels = [l+'_bias' for l in non_obs_label] \n #if config.sector:\n # metric_labels = [config.sector+'_'+m for m in metric_labels]\n labels_not_output = [m for m in metric_labels if not any(m in d for d in data_files)]\n label_to_read = [n for n in non_obs_label for o in labels_not_output if n in o]\n if not label_to_read: data_dictionary = None\n else: label_to_read = np.unique(label_to_read)\n print(label_to_read)\n \n #\n # Extracting data if files not found\n #\n \n if data_dictionary is not None: \n if args.map_figure:\n data_dictionary,kept_dates = extract_24_hour_data(config,label_to_read,mapping=True)\n else: \n data_dictionary = extract_24_hour_data(config,label_to_read)\n #\n # Calibrating data if flagged\n #\n\n if args.calibrate:\n if not hasattr(config, \"calibration_obs_label\"):\n print('No calibration data labels found')\n raise Exception\n if not hasattr(config, \"train_test_split\"):\n config.train_test_split = 0.7\n data_length = len(list(data_dictionary.values())[0])\n all_indices = np.arange(0, data_length)\n np.random.seed(50)\n train = np.random.choice(all_indices, \n int(len(all_indices)*config.train_test_split),\n replace=False)\n test = all_indices[np.isin(all_indices,train,invert=True)]\n print()\n print('No. of Train Indices:',len(train))\n print('No. of Test Indicies:',len(test))\n print()\n \n for c_label in config.calibration_obs_label: \n print('Calibrating to {0} data'.format(c_label))\n for model_data_label in config.model_data.keys():\n train_data= np.array(data_dictionary[model_data_label])[train]\n training_obs = np.array(data_dictionary[c_label])[train]\n test_data = np.array(data_dictionary[model_data_label])[test]\n \n data_dictionary['{0}_Calibrated'.format(c_label)] =\\\n calibration(training_obs,train_data,test_data)\n non_obs_label.append('{0}_Calibrated'.format(c_label)) \n for key,value in data_dictionary.items():\n if len(value) > len(test):\n data_dictionary[key] = np.array(value)[test]\n #\n # Specific diagram plotting\n #\n \n if any([args.reliability_diagram,args.ets_diagram,\n args.bias_diagram,args.performance_diagram,args.map_figure]):\n for obs_label in config.obs_data.keys(): \n plotting = Metric_Diagrams(\n data_dictionary,obs_label,non_obs_label,\n config.out_path,config.threshold,config.sector,\n config.bootstrap_iterations,args.proc)\n if args.reliability_diagram:\n plotting.reliability_diagram()\n elif args.ets_diagram:\n plotting.ets_diagram()\n elif args.bias_diagram:\n plotting.bias_diagram()\n elif args.performance_diagram:\n plotting.performance_diagram()\n elif args.map_figure:\n plotting.map_figure(config.map_filename,kept_dates)\n return \n \ndef extract_24_hour_data(config,label,mapping=False):\n \"\"\"\n Extract 24 hour data from various netCDF4 files with paths in \n the Config file\n \"\"\"\n print()\n print('Threshold value:', config.threshold)\n print()\n print(\"Extract Full Day Data\") \n print()\n \n dates = pd.date_range(start=config.start_date,\n end=config.end_date).strftime(\"%y%m%d\")\n path_files = {} \n removal_date = []\n \n for m,m_path in config.model_data.items():\n if m in label:\n path_files[m],not_found_date = find_files(config,dates,m_path)\n if not_found_date:\n for date in not_found_date:\n removal_date.append(date)\n \n if mapping is False:\n for o,o_path in config.obs_data.items():\n path_files[o],not_found_date = find_files(config,dates,o_path)\n if not_found_date:\n for date in not_found_date:\n removal_date.append(date)\n \n if config.baseline_data:\n for b_key, b_path in config.baseline_data.items():\n if b_key in label:\n path_files[b_key],not_found_date = find_files(config,dates,b_path)\n if not_found_date:\n for date in not_found_date:\n removal_date.append(date)\n else:\n if removal_date:\n kept_dates = dates.drop(removal_date)\n else:\n kept_dates=dates\n path_data = {}\n for k,files in path_files.items():\n for date in removal_date:\n for f in files:\n if date in str(f):\n path_files[k].remove(f)\n if 'PP' in k:\n path_data[k] = [(Dataset(new_file).variables[config.variable_names[k]][:])/100. for new_file in files]\n else: \n path_data[k] = [Dataset(new_file).variables[config.variable_names[k]][:] for new_file in files]\n if config.sector: \n path_data[k] = np.array(path_data[k])[:,config.rows,config.cols]\n \n print(k,np.shape(path_data[k]))\n print()\n if mapping is False:\n return path_data\n else:\n return path_data,kept_dates\n\ndef find_files(config,dates,path):\n path_list = []\n removal_date_list = []\n for date in dates:\n files = glob(path.format(date,config.threshold),recursive=True)\n if files:\n path_list.append(files[0])\n else:\n removal_date_list.append(date)\n return path_list,removal_date_list\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Calibration/new_calibration.py","file_name":"new_calibration.py","file_ext":"py","file_size_in_byte":9115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"605695477","text":"\"\"\"Feedbus client\"\"\"\n\nfrom __future__ import annotations\nimport asyncio\nfrom asyncio import Queue\nimport logging\nfrom typing import Optional, List, Callable, Awaitable\nfrom uuid import UUID\n\nfrom .client import Client\nfrom .io import DataReader, DataWriter, DataPacket\nfrom .authentication import Authenticator\n\nLOGGER = logging.getLogger(__name__)\n\nAuthorizationHandler = Callable[\n [UUID, str, str, str, str],\n Awaitable[None]\n]\nDataHandler = Callable[\n [str, str, str, str, Optional[List[DataPacket]], str],\n Awaitable[None]\n]\nNotificationHandler = Callable[\n [UUID, str, str, str, str, bool],\n Awaitable[None]\n]\nClosedHandler = Callable[\n [bool],\n Awaitable[None]\n]\n\n\nclass CallbackClient(Client):\n \"\"\"Feedbus callback client\"\"\"\n\n def __init__(\n self,\n reader: DataReader,\n writer: DataWriter,\n authenticator: Optional[Authenticator],\n monitor_heartbeat: bool\n ) -> None:\n super().__init__(reader, writer, authenticator, monitor_heartbeat)\n self._authorization_handlers: List[AuthorizationHandler] = list()\n self._data_handlers: List[DataHandler] = list()\n self._notification_handlers: List[NotificationHandler] = list()\n self._closed_handlers: List[ClosedHandler] = list()\n self._read_queue: Queue = asyncio.Queue()\n self._write_queue: Queue = asyncio.Queue()\n self._token = asyncio.Event()\n\n @property\n def authorization_handlers(self) -> List[AuthorizationHandler]:\n \"\"\"The handlers called when an authorization request is\n received.\n\n Returns:\n List[AuthorizationHandler]: The list of handlers\n \"\"\"\n return self._authorization_handlers\n\n @property\n def data_handlers(self) -> List[DataHandler]:\n \"\"\"The list of handlers called when data is received.\n\n Returns:\n List[DataHandler]: The list of handlers\n \"\"\"\n return self._data_handlers\n\n @property\n def notification_handlers(self) -> List[NotificationHandler]:\n \"\"\"The list of handlers called when a notification is received\n\n Returns:\n List[NotificationHandler]: The list of handlers\n \"\"\"\n return self._notification_handlers\n\n @property\n def closed_handlers(self) -> List[ClosedHandler]:\n \"\"\"The list of handlers called when a connection is closed\n\n Returns:\n List[ClosedHandler]: The list of handlers\n \"\"\"\n return self._closed_handlers\n\n async def on_authorization(\n self,\n client_id: UUID,\n host: str,\n user: str,\n feed: str,\n topic: str\n ) -> None:\n for handler in self._authorization_handlers:\n await handler(\n client_id,\n host,\n user,\n feed,\n topic\n )\n\n async def on_data(\n self,\n user: str,\n host: str,\n feed: str,\n topic: str,\n data_packets: Optional[List[DataPacket]],\n content_type: str\n ) -> None:\n for handler in self._data_handlers:\n await handler(\n user,\n host,\n feed,\n topic,\n data_packets,\n content_type\n )\n\n async def on_forwarded_subscription_request(\n self,\n client_id: UUID,\n user: str,\n host: str,\n feed: str,\n topic: str,\n is_add: bool\n ) -> None:\n for handler in self._notification_handlers:\n await handler(\n client_id,\n user,\n host,\n feed,\n topic,\n is_add\n )\n\n async def on_closed(self, is_faulted: bool) -> None:\n for handler in self._closed_handlers:\n await handler(is_faulted)\n","sub_path":"jetblack_messagebus/callback_client.py","file_name":"callback_client.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"297031465","text":"from typing import Union, List\n\nimport numpy as np\nimport pandas as pd\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.optimizers import Adam\nfrom keras.utils.np_utils import to_categorical\nfrom sklearn.model_selection import train_test_split\nimport keras\nfrom keras import Model\nfrom keras.layers import Input, LSTM, Dense, Dropout, TimeDistributed, Lambda, Reshape\n\nfrom_word_to_vector = dict()\n\nwith open(\"fasttext_train_and_public.txt\", \"r\", encoding=\"utf-8\") as file:\n for line in file:\n words = line.split()\n from_word_to_vector[words[0]] = np.array([float(x) for x in words[1:]])\n\ndataset = pd.read_csv(\"data/train.tsv\", names=[\"context_id\", \"context_2\", \"context_1\", \"context_0\",\n \"reply_id\", \"reply\", \"label\", \"confidence\"], header=None, sep=\"\\t\",\n quoting=3)\n\n\ndef convert_sentence_to_list_of_vectors(sentence: Union[str, float]):\n if type(sentence) == str:\n return [from_word_to_vector[word] for word in sentence.split()]\n else:\n return []\n\n\nlabel_to_number = {\n \"bad\": -1,\n \"neutral\": 0,\n \"good\": 1\n}\n\n\ndef convert_label_to_number(label: str):\n return label_to_number[label]\n\n\ndataset[[\"context_2\", \"context_1\", \"context_0\", \"reply\"]] = \\\n dataset[[\"context_2\", \"context_1\", \"context_0\", \"reply\"]].applymap(convert_sentence_to_list_of_vectors)\ndataset[[\"label\"]] = dataset[[\"label\"]].applymap(convert_label_to_number)\n\n\ndef convert_sequence_of_vectors_to_padded_2D_array(sequence: List[np.ndarray], max_len: int):\n padding_size = max_len - len(sequence)\n padding = [np.zeros((300,)) for _ in range(padding_size)]\n return np.array(padding + sequence)\n\n\ndef get_matrices(batch):\n max_sentence_len = np.max([len(x) for x in batch.context_2] +\n [len(x) for x in batch.context_1] +\n [len(x) for x in batch.context_0] +\n [len(x) for x in batch.reply])\n\n X = np.zeros((batch.shape[0], 4, max_sentence_len, 300), dtype=np.float32)\n # (train_data_batch.shape[0], 4, 128)\n y = batch.label.values\n weights = batch.confidence.values\n\n for i in range(batch.shape[0]):\n X[i, 0, :, :] = convert_sequence_of_vectors_to_padded_2D_array(batch.iloc[i].context_2, max_sentence_len)\n X[i, 1, :, :] = convert_sequence_of_vectors_to_padded_2D_array(batch.iloc[i].context_1, max_sentence_len)\n X[i, 2, :, :] = convert_sequence_of_vectors_to_padded_2D_array(batch.iloc[i].context_0, max_sentence_len)\n X[i, 3, :, :] = convert_sequence_of_vectors_to_padded_2D_array(batch.iloc[i].reply, max_sentence_len)\n\n return X, y, weights\n\n\ndef get_test_matrices(batch):\n max_sentence_len = np.max([len(x) for x in batch.context_2] +\n [len(x) for x in batch.context_1] +\n [len(x) for x in batch.context_0] +\n [len(x) for x in batch.reply])\n\n X = np.zeros((batch.shape[0], 4, max_sentence_len, 300), dtype=np.float32)\n\n for i in range(batch.shape[0]):\n X[i, 0, :, :] = convert_sequence_of_vectors_to_padded_2D_array(batch.iloc[i].context_2, max_sentence_len)\n X[i, 1, :, :] = convert_sequence_of_vectors_to_padded_2D_array(batch.iloc[i].context_1, max_sentence_len)\n X[i, 2, :, :] = convert_sequence_of_vectors_to_padded_2D_array(batch.iloc[i].context_0, max_sentence_len)\n X[i, 3, :, :] = convert_sequence_of_vectors_to_padded_2D_array(batch.iloc[i].reply, max_sentence_len)\n\n return X\n\n\ntrain_part, validation_part = train_test_split(dataset, test_size=0.1, stratify=dataset.label, random_state=0)\n\nbatch_size = 64\n\n\ndef generate_train_batch():\n while True:\n for i in range(train_part.shape[0] // batch_size):\n train_data_batch = train_part.iloc[i * batch_size: (i + 1) * batch_size]\n yield get_matrices(train_data_batch)\n\n\ndef generate_validation_batch():\n while True:\n for i in range(validation_part.shape[0] // batch_size):\n validation_data_batch = validation_part.iloc[i * batch_size: (i + 1) * batch_size]\n yield get_matrices(validation_data_batch)\n\n\ninp = Input(shape=(4, None, 300))\n\nget_context_2 = Lambda(lambda batch: batch[:, 0, :, :])(inp)\nget_context_1 = Lambda(lambda batch: batch[:, 1, :, :])(inp)\nget_context_0 = Lambda(lambda batch: batch[:, 2, :, :])(inp)\nget_reply = Lambda(lambda batch: batch[:, 3, :, :])(inp)\n\nshared_lstm = LSTM(100)\n\nencoded_context_2 = shared_lstm(get_context_2)\nencoded_context_1 = shared_lstm(get_context_1)\nencoded_context_0 = shared_lstm(get_context_0)\nencoded_reply = shared_lstm(get_reply)\n\nstacked = keras.layers.concatenate([encoded_context_2, encoded_context_1, encoded_context_0, encoded_reply])\n\ndrop0 = Dropout(0.4)(stacked)\n\ndense1 = Dense(200, activation=\"relu\")(drop0)\ndrop1 = Dropout(0.4)(dense1)\n\noutput_layer = Dense(1)(drop1)\n\nmodel = Model(input=inp, output=output_layer)\nmodel.compile(loss=\"mean_squared_error\", optimizer=Adam(clipnorm=1.), metrics=[\"mean_squared_error\"])\n\nmodel.fit_generator(generate_train_batch(),\n steps_per_epoch=train_part.shape[0] // batch_size,\n epochs=3,\n verbose=True,\n validation_data=generate_validation_batch(),\n validation_steps=validation_part.shape[0] // batch_size,\n callbacks=[\n ModelCheckpoint(\"base_reg_weights.{epoch:02d}-{val_loss:.2f}.hdf5\",\n monitor=\"val_loss\",\n save_best_only=True)\n ])\n\n#####\n\npublic_test = pd.read_csv(\"data/public.tsv\", names=[\"context_id\", \"context_2\", \"context_1\", \"context_0\",\n \"reply_id\", \"reply\"], header=None, sep=\"\\t\",\n quoting=3)\n\npublic_test[[\"context_2\", \"context_1\", \"context_0\", \"reply\"]] = \\\n public_test[[\"context_2\", \"context_1\", \"context_0\", \"reply\"]].applymap(convert_sentence_to_list_of_vectors)\n\nX_test = get_test_matrices(public_test)\n\ny_predicted = model.predict(X_test)\n\n\npublic_test[\"score\"] = y_predicted\n\npublic_test = public_test[[\"context_id\", \"reply_id\", \"score\"]]\n\n\npublic_test = public_test.groupby(\"context_id\", sort=True) \\\n .apply(lambda g: g.sort_values([\"score\"], ascending=False))[[\"reply_id\", \"score\"]] \\\n .reset_index()\n\n\n\n\npublic_test.to_csv(\"submission.txt\", sep=\"\\t\", columns=[\"context_id\", \"reply_id\"], header=False, index=False)\n","sub_path":"baseline_regression.py","file_name":"baseline_regression.py","file_ext":"py","file_size_in_byte":6553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"301382008","text":"#!/usr/local/bin/python env\n# coding:utf-8\n\n\nimport requests\nimport json\n\n\nclass TestSearchCommnityNote:\n def test_CheckNumWithAuth(self):\n url = \"https://functest.junhuahomes.com/imapi/user/checkNum\"\n para = {\n \"Imei\": \"be7faff4f79baaf9ad62db1cd26053eccd184674\",\n \"apiVersion\": \"1.3.0\",\n \"channel\": \"\",\n \"currentCommunityId\": \"\",\n \"currentVer\": \"3.0.0\",\n \"dscd\": \"\",\n \"login\": \"\",\n \"mobile\": \"13718591270\",\n \"phoneName\": \"iPhone\",\n \"platform\": \"ios\",\n \"platformVersion\": \"10.3.2\",\n \"system\": \"iOS\",\n \"validatecode\": \"1234\",\n \"xgToken\": \"191e35f7e0731cc5080\"\n }\n request = requests.post(url, data=para, verify=False)\n res = json.loads(request.text)\n token = request.headers['login']\n # self.assertIsNotNone(res['userPhone'], \"用户手机号不能为空\")\n return token\n\n\n # 获取社区公告\n def test_getcommnitynote(self):\n url = \"https://functest.junhuahomes.com/gtw/provider/searchCommnityNote\"\n para = {\n \"Imei\": \"be7faff4f79baaf9ad62db1cd26053eccd184674\",\n \"apiVersion\": \"1.3.0\",\n \"bizType\": \"COMMUNITY_BULLETIN\",\n \"channel\": \"\",\n \"communityId\": \"22206\",\n \"currentVer\": \"3.0.0\",\n \"houseId\": \"22575\",\n \"login\": self.test_CheckNumWithAuth(),\n \"numPerPage\": \"15\",\n \"pageNum\": \"1\",\n \"phoneName\": \"iPhone\",\n \"platform\": \"ios\",\n \"platformVersion\": \"10.3.2\",\n \"system\": \"iOS\"\n }\n req = requests.post(url, data=para, verify=False)\n res = json.loads(req.text)\n print(res)\n assert 'messageId'\n","sub_path":"API/Homepage/test_SearchCommnityNote.py","file_name":"test_SearchCommnityNote.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"504654636","text":"configs = {}\n\nconfigs['flag'] = 'online'\n#configs['flag'] = 'offline'\nconfigs['taobao_user'] = 'non_filter'\n#configs['taobao_user'] = 'filter'\n\n#####data info\nconfigs['data_path'] = \"../data/\"\nconfigs['train'] = configs['data_path'] + \"ijcai2016_koubei_train\"\nconfigs['test'] = configs['data_path'] + \"ijcai2016_koubei_test\"\nconfigs['merchant'] = configs['data_path'] + \"ijcai2016_merchant_info\"\nconfigs['taobao'] = configs['data_path'] + \"ijcai2016_taobao\"\nconfigs['taobao_filter_user'] = configs['data_path'] + \"ijcai2016_taobao_filter_user\"\nconfigs['submit'] = configs['data_path'] + \"submission.csv\"\n\nconfigs['offline_train'] = configs['data_path'] + 'off_train'\nconfigs['offline_test'] = configs['data_path'] + 'off_test'\nconfigs['offline_answer'] = configs['data_path'] + 'off_answer'\nconfigs['offline_taobao'] = configs['data_path'] + 'off_taobao'\nconfigs['offline_taobao_filter_user'] = configs['data_path'] + 'off_taobao_filter_user'\nconfigs['offline_submit'] = configs['data_path'] + \"off_submit\"\n\n#####user info\nconfigs['users_path'] = '../data/users/'\nconfigs['taobao_and_train'] = configs['users_path'] + 'taobao_and_train'\nconfigs['taobao_and_train_all'] = configs['users_path'] + 'taobao_and_train_all'\nconfigs['in_taobao'] = configs['users_path'] + 'in_taobao'\nconfigs['in_train'] = configs['users_path'] + 'in_train'\n#neither not in taobao and train\nconfigs['new_user'] = configs['users_path'] + 'new_user'\n\n\n\nconfigs['offline_taobao_and_train'] = configs['users_path'] + 'offline_taobao_and_train'\nconfigs['offline_in_taobao'] = configs['users_path'] + 'offline_in_taobao'\nconfigs['offline_in_train'] = configs['users_path'] + 'offline_in_train'\nconfigs['offline_new_user'] = configs['users_path'] + 'offline_new_user'\n\n#####matrix info\nconfigs['matrix_path'] = '../data/matrix/'\nconfigs['taobao_ui_3'] = configs['matrix_path'] + 'taobao_ui_3'\nconfigs['taobao_uc_3'] = configs['matrix_path'] + 'taobao_uc_3'\nconfigs['taobao_ui_matrix'] = configs['matrix_path'] + 'taobao_ui_matrix'\nconfigs['taobao_ui_matrix2'] = configs['matrix_path'] + 'taobao_ui_matrix2'\nconfigs['uu_sim'] = configs['matrix_path'] + 'uusim.txt'\n\n\n","sub_path":"src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"411519230","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# 使用PyMySQL 驱动模块,实现对这个表的增加,删除,修改,查询\nimport pymysql\nimport datetime\n\nmydb=pymysql.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"qazwsx123\",\n database=\"test\"\n)\n\ndef menu():\n print(\"请输入序号:\")\n print(\"1.进行留言\")\n print(\"2.删除留言\")\n print(\"3.更新留言\")\n print(\"4.查询所有留言\")\n print(\"0.退出留言板\")\n\n\ndef choic(x):\n cursor = mydb.cursor()\n if x == 1:\n Content = input(\"请输入要留言的内容:\")\n Name = input(\"请输入留言人:\")\n Time = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n is_delete = 0\n sql = \"INSERT INTO message(ID,content,name,time,is_delete) \" \\\n \"VALUES (0,'%s','%s','%s',%s)\"% \\\n (Content,Name,Time,is_delete)\n try:\n cursor.execute(sql)\n mydb.commit()\n except:\n mydb.rollback()\n print(\"添加成功!\")\n\n\n if x == 2:\n id = int(input(\"请输入留言号:\"))\n sql = \"UPDATE message SET is_delete = 1 WHERE ID = %d\" %(id)\n try:\n cursor.execute(sql)\n mydb.commit()\n except:\n mydb.rollback()\n print(\"删除成功!\")\n\n\n if x == 3:\n id = int(input(\"请输入留言号:\"))\n Content = input(\"请输入要留言的内容:\")\n Name = input(\"请输入留言人:\")\n Time = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n sql1 = \"UPDATE message SET content = '%s' WHERE ID = %d;\" %(Content,id)\n sql2 = \"UPDATE message SET name = '%s' WHERE ID = %d;\" %(Name,id)\n sql3 = \"UPDATE message SET time = '%s' WHERE ID = %d;\" %(Time,id)\n try:\n cursor.execute(sql1)\n mydb.commit()\n cursor.execute(sql2)\n mydb.commit()\n cursor.execute(sql3)\n mydb.commit()\n except:\n mydb.rollback()\n print(\"更新成功!\")\n\n if x == 4:\n sql = \"SELECT * FROM message\"\n try:\n # 执行SQL语句\n cursor.execute(sql)\n # 获取所有记录列表\n results = cursor.fetchall()\n for row in results:\n ID = row[0]\n Content = row[1]\n Name = row[2]\n Time= row[3]\n is_delete = row[4]\n # 打印结果\n print(\"ID=%s,Content=%s,Name=%s,Time=%s,is_delete=%d\" % \\\n (ID, Content, Name, Time, is_delete))\n except:\n mydb.rollback()\n print(\"查询成功!\")\n\n\nif __name__ == '__main__':\n menu()\n menu_choic = int(input(\"请输入0-4任意整数:\"))\n while(menu_choic):\n choic(menu_choic)\n menu_choic = int(input(\"请输入0-4任意整数:\"))\n else:\n print(\"退出留言板!\")","sub_path":"Homework/homework10/MessageBoard1.py","file_name":"MessageBoard1.py","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"7309724","text":"from collections import deque\nimport sys\n\nn = int(sys.stdin.readline())\nq = deque()\n\nif(n % 2 != 0):\n q.append(n)\n\nfor i in range(2, n+1, 2):\n num = (n + 3) - i\n q.append(i)\n\nwhile(len(q) != 1):\n q.popleft()\n q.append(q.popleft())\n\nprint(q.popleft())\n","sub_path":"by date/2021.02.23/2164-2.py","file_name":"2164-2.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"335011386","text":"import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport numpy as np\nfrom mixed_precision import maybe_half\nfrom costs import loss_MSE\nfrom PIL import Image # Nawid-Used to save the image\n\n\ndef test_model(model, test_loader, device, stats, output_dir, max_evals=200000): # Nawid - Added output directory to here to allow the saved reconstructions to be saved in a certain place\n '''\n Evaluate accuracy on test set\n '''\n # warm up batchnorm stats based on current model\n _warmup_batchnorm(model, test_loader, device, batches=50, train_loader=False) # Nawid - I think this obtains the normalisation values early on\n\n def get_correct_count(lgt_vals, lab_vals):\n # count how many predictions match the target labels\n max_lgt = torch.max(lgt_vals.cpu().data, 1)[1]\n num_correct = (max_lgt == lab_vals).sum().item()\n return num_correct\n\n # evaluate model on test_loader\n model.eval()\n correct_glb_mlp = 0.\n correct_glb_lin = 0.\n total = 0.\n for _, (images, labels) in enumerate(test_loader): # Nawid - Iterates through test loader images and labels\n if total > max_evals: # Nawid - End if greater than max_evals\n break\n images = images.to(device)\n labels = labels.cpu()\n with torch.no_grad():\n res_dict = model(x1=images, x2=images, class_only=True) # Nawid - Input images into the model\n lgt_glb_mlp, lgt_glb_lin = res_dict['class'] # Nawid - I believe this is outputs from the mlp and a linear classifier\n # check classification accuracy\n correct_glb_mlp += get_correct_count(lgt_glb_mlp, labels)\n correct_glb_lin += get_correct_count(lgt_glb_lin, labels)\n total += labels.size(0)\n acc_glb_mlp = correct_glb_mlp / total # Nawid - Calculates the accuracy using the mlp\n acc_glb_lin = correct_glb_lin / total # Nawid - Calculates the accuracy using the linear\n model.train()\n # record stats in the provided stat tracker\n stats.update('test_accuracy_mlp_classifier', acc_glb_mlp, n=1)\n stats.update('test_accuracy_linear_classifier', acc_glb_lin, n=1)\n\ndef test_decoder_model(model, test_loader, device, stats,output_dir, max_evals=200000):\n # warm up batchnorm stats based on current model\n _warmup_batchnorm(model, test_loader, device, batches = 50, train_loader = False)\n\n # evaluate model on test_loader\n model.eval()\n total_test_loss = 0\n total = 0\n for _, (images, labels) in enumerate(test_loader):\n if total > max_evals:\n break\n images = images.to(device)\n labels = labels.cpu()\n with torch.no_grad():\n res_dict = model(x1= images, x2= images, decoder_only =True)\n image_reconstructions = res_dict['decoder_output']\n test_loss = loss_MSE(images, image_reconstructions)\n\n total_test_loss += test_loss\n total += labels.size(0)\n average_test_loss = total_test_loss/total\n\n model.train()\n # record stats in the provided stat tracker\n stats.update('test_loss_decoder', average_test_loss, n = 1)\n save_reconstructions(images, image_reconstructions,output_dir, training = False)\n\n\ndef save_reconstructions(original_images, reconstructions,output_directory,training = True):\n original_images = torch.clamp(original_images, 0, 1) # Nawid - Clamp values between 0 and 1\n original_images = original_images.permute(0, 2, 3, 1).detach().cpu().numpy()\n\n reconstructions = torch.clamp(reconstructions, 0, 1)\n reconstructions = reconstructions.permute(0, 2, 3, 1).detach().cpu().numpy()\n original_images_first10 = original_images[0:10]\n\n reconstructions_first10 = reconstructions[0:10]\n columns = np.concatenate([reconstructions_first10, original_images_first10], axis=-2)\n columns = np.concatenate(columns, axis=0)\n\n print('output_directory',output_directory)\n\n if training:\n Image.fromarray((columns * 255).astype('uint8')).save(output_directory +'/training_reconstructions_rgb.png') # Nawid - Changes the valeus back to 255 and integers and save the reconstructions\n else:\n Image.fromarray((columns * 255).astype('uint8')).save(output_directory +'/testing_reconstructions_rgb.png') # Nawid - Changes the valeus back to 255 and integers and save the reconstructions\n\n\ndef _warmup_batchnorm(model, data_loader, device, batches=100, train_loader=False): #Nawid - Two different behaviours if it is in training mode or testing mode I believe\n '''\n Run some batches through all parts of the model to warmup the running\n stats for batchnorm layers.\n '''\n model.train()\n for i, (images, _) in enumerate(data_loader):\n if i == batches:\n break\n if train_loader:\n images = images[0]\n images = images.to(device)\n _ = model(x1=images, x2=images, class_only=True)\n\n\ndef flatten(x):\n return x.reshape(x.size(0), -1)\n\n\ndef random_locs_2d(x, k_hot=1):\n '''\n Sample a k-hot mask over spatial locations for each set of conv features\n in x, where x.shape is like (n_batch, n_feat, n_x, n_y).\n '''\n # assume x is (n_batch, n_feat, n_x, n_y)\n x_size = x.size()\n n_batch = x_size[0]\n n_locs = x_size[2] * x_size[3] # Nawid - Number of positions is the height * width\n idx_topk = torch.topk(torch.rand((n_batch, n_locs)), k=k_hot, dim=1)[1] # Nawid- In this case, k is equal to 1, top k gives the k largest elements and indices of an index tensor along a given dimension. The dimension in this case 1 which is the dimension of the different n locations. since [1] is chosen it obtains the indices whilst [0] would give the actual elements\n khot_mask = torch.zeros((n_batch, n_locs)).scatter_(1, idx_topk, 1.) # Nawid - This puts (along dimension 1 which is dimension related to the location), it places a value of 1 at the indices specified by idx_topk\n rand_locs = khot_mask.reshape((n_batch, 1, x_size[2], x_size[3]))\n rand_locs = maybe_half(rand_locs) # Nawid - Change the prevision\n return rand_locs\n\n\ndef init_pytorch_defaults(m, version='041'): # Nawid- Obtain the default values for a particular pytorch version\n '''\n Apply default inits from pytorch version 0.4.1 or 1.0.0.\n\n pytorch 1.0 default inits are wonky :-(\n '''\n if version == '041':\n # print('init.pt041: {0:s}'.format(str(m.weight.data.size())))\n if isinstance(m, nn.Linear):\n stdv = 1. / math.sqrt(m.weight.size(1))\n m.weight.data.uniform_(-stdv, stdv)\n if m.bias is not None:\n m.bias.data.uniform_(-stdv, stdv)\n elif isinstance(m, nn.Conv2d):\n n = m.in_channels\n for k in m.kernel_size:\n n *= k\n stdv = 1. / math.sqrt(n)\n m.weight.data.uniform_(-stdv, stdv)\n if m.bias is not None:\n m.bias.data.uniform_(-stdv, stdv)\n elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)):\n if m.affine:\n m.weight.data.uniform_()\n m.bias.data.zero_()\n else:\n assert False\n elif version == '100':\n # print('init.pt100: {0:s}'.format(str(m.weight.data.size())))\n if isinstance(m, nn.Linear):\n init.kaiming_uniform_(m.weight, a=math.sqrt(5))\n if m.bias is not None:\n fan_in, _ = init._calculate_fan_in_and_fan_out(m.weight)\n bound = 1 / math.sqrt(fan_in)\n init.uniform_(m.bias, -bound, bound)\n elif isinstance(m, nn.Conv2d):\n n = m.in_channels\n init.kaiming_uniform_(m.weight, a=math.sqrt(5))\n if m.bias is not None:\n fan_in, _ = init._calculate_fan_in_and_fan_out(m.weight)\n bound = 1 / math.sqrt(fan_in)\n init.uniform_(m.bias, -bound, bound)\n elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)):\n if m.affine:\n m.weight.data.uniform_()\n m.bias.data.zero_()\n else:\n assert False\n elif version == 'custom':\n # print('init.custom: {0:s}'.format(str(m.weight.data.size())))\n if isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n else:\n assert False\n else:\n assert False\n\n\ndef weight_init(m): # Nawid- Initialises the weights of a model\n '''\n Usage:\n model = Model()\n model.apply(weight_init)\n '''\n if isinstance(m, nn.Linear):\n init_pytorch_defaults(m, version='041')\n elif isinstance(m, nn.Conv2d):\n init_pytorch_defaults(m, version='041')\n elif isinstance(m, nn.BatchNorm1d):\n init_pytorch_defaults(m, version='041')\n elif isinstance(m, nn.BatchNorm2d):\n init_pytorch_defaults(m, version='041')\n elif isinstance(m, nn.Conv1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.Conv3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.BatchNorm3d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.LSTM):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.LSTMCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.GRU):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.GRUCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n\n\nclass Flatten(nn.Module):\n def __init__(self):\n super(Flatten, self).__init__()\n\n def forward(self, input_tensor):\n return input_tensor.view(input_tensor.size(0), -1)\n","sub_path":"Updated_Self_supervised_training/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"353435445","text":"# dumb variation of the encoder decoder\n# this will be using teacher forcing\nimport tensorflow as tf\nimport tensorflow_addons as tfa\nimport numpy as np\nimport copy\nimport glob\nimport os\nimport pickle\nfrom random import shuffle\nfrom Losses_and_Metrics import losses_and_metrics\nfrom datetime import datetime\ndef check_dataset_length(dataset_path):\n if os.path.isdir(dataset_path):\n file_list = glob.glob(dataset_path + '/*.tfrecord')\n return len(file_list)\n\ndef aggregate_dataset_info(dataset_path_list):\n print('extracting dataset information')\n\n #prep help variables\n aggregated_dataset_info = {}\n accumulated_samples = {'train': 0, 'validation': 0, 'test': 0}\n new_samples = {'train': 0, 'validation': 0, 'test': 0}\n\n for dataset_path in dataset_path_list:\n dataset_info = pickle.load(open(dataset_path + '/dataset_info.pickle', 'rb'))\n for key in dataset_info.keys():\n if 'baseline' in key:\n # aggregate weighted averages by dataset sample size for baseline metrics, lets do this properly\n if key not in aggregated_dataset_info.keys():\n aggregated_dataset_info[key] = dataset_info[key]\n\n # get the name of the dataset\n if 'train' in key:\n set_type = 'train'\n elif 'val' in key:\n set_type ='validation'\n elif 'test' in key:\n set_type ='test'\n else:\n print('huh ... stumbled upon baseline info not belonging to either test, train or val: ', key)\n accumulated_samples[set_type] = check_dataset_length(dataset_path + '/' + set_type)\n else:\n # get the name of the dataset\n if 'train' in key:\n set_type = 'train'\n elif 'val' in key:\n set_type ='validation'\n elif 'test' in key:\n set_type ='test'\n else:\n print('huh ... stumbled upon baseline info not belonging to either test, train or val: ', key)\n new_samples[set_type] = check_dataset_length(dataset_path + '/' + set_type)\n\n for metric in dataset_info[key].keys():\n aggregated_dataset_info[key][metric] = (accumulated_samples[set_type] * aggregated_dataset_info[key][metric]\n + new_samples[set_type]*dataset_info[key][metric]) / (accumulated_samples[set_type] + new_samples[set_type])\n\n\n accumulated_samples[set_type] = accumulated_samples[set_type] + new_samples[set_type]\n\n\n else:\n # other metrics, we should make sure those are consistent between datasets!\n if key not in aggregated_dataset_info.keys():\n aggregated_dataset_info[key] = dataset_info[key]\n else:\n if aggregated_dataset_info[key] != dataset_info[key]:\n print('UhOh ... stumbled upon discrepancy between datasets')\n print(key, 'aggregated holds ', aggregated_dataset_info[key], 'and new dataset', dataset_path, ' holds ', dataset_info[key])\n\n print(aggregated_dataset_info)\n return aggregated_dataset_info\n\nclass Model_Container():\n def __init__(self,\n dataset_path_list,\n\n model_kwargs, #see __build_model\n train_kwargs, #currently only batch size\n experiment_name='Default_Name',\n sw_len_days=3, #deprecated\n try_distribution_across_GPUs=True,\n ):\n self.dataset_path_list = dataset_path_list\n self.experiment_name = experiment_name\n\n self.forecast_mode = model_kwargs['forecast_mode']\n self.dataset_info = aggregate_dataset_info(dataset_path_list)\n\n self.fc_steps = self.dataset_info['fc_steps']\n self.fc_tiles = self.dataset_info['fc_tiles']\n self.nwp_dims = self.dataset_info['support_shape'][-1]\n # self.nwp_dims = 16\n self.teacher_shape = [self.fc_steps, self.fc_tiles]\n self.target_shape = [self.fc_steps, self.fc_tiles]\n self.pdf_history_shape = self.dataset_info['pdf_history_shape']\n self.raw_history_shape = self.dataset_info['raw_history_shape']\n\n model_kwargs['support_shape'] = self.dataset_info['support_shape']\n model_kwargs['history_shape'] = self.dataset_info['pdf_history_shape']\n model_kwargs['out_shape'] = self.target_shape\n if model_kwargs['model_type'] == 'Encoder-Decoder' or model_kwargs['model_type'] == 'E-D' or 'MiMo' in model_kwargs['model_type'] or 'E-D' in model_kwargs['model_type'] or model_kwargs['model_type'] == 'Transformer':\n\n model_kwargs['input_shape'] = self.dataset_info['support_shape']\n model_kwargs['input_shape'] = (model_kwargs['input_shape'][0], model_kwargs['input_shape'][-1] + 1) #since we have to add the PV dimension\n\n elif 'MiMo' in model_kwargs['model_type']:\n model_kwargs['input_shape'] = self.dataset_info['support_shape']\n model_kwargs['input_shape'][-1] = model_kwargs['input_shape'][-1] + 1 #since we have to add th\n\n self.model_kwargs = model_kwargs\n self.train_kwargs = train_kwargs\n self.folder_name = 'models/' + self.experiment_name\n\n def train(self):\n tf.keras.backend.clear_session() # make sure we are working clean\n\n self.metrics = {}\n\n self.__build_model(**self.model_kwargs)\n # for fine-tuning mode, find the pre-trained model\n if self.train_kwargs['mode'] == 'fine-tune':\n pretrain_folder = self.folder_name + '-pre-trained' # to extract the weights\n self.experiment_name = self.experiment_name + '-fine-tuned' + self.dataset_path_list[0] # to save tboard logs\n self.folder_name = self.folder_name + '-fine-tuned' + self.dataset_path_list[0] # to save the fine-tuned model\n if not os.path.isdir('./' + pretrain_folder): # if there are no weights in the folder\n print('There is an error with finding model checkpoint. Folder ', pretrain_folder, 'does not exist.')\n else:\n print('...Loading model weights from checkpoint...', pretrain_folder)\n self.model.load_weights(pretrain_folder + \"/model_ckpt\")\n # save the model in pre-trained folder\n elif self.train_kwargs['mode'] == 'pre-train':\n self.experiment_name = self.experiment_name + '-pre-trained' # to save tboard logs\n self.folder_name = self.folder_name + '-pre-trained' # to save pre-trained model\n # train\n train_history, test_results = self.__train_model()\n # save\n print('Saving model to ...', self.folder_name)\n self.model.save_weights(self.folder_name + \"/model_ckpt\")\n\n del self.model\n\n tf.keras.backend.clear_session()\n results_dict = self.__manage_metrics(train_history, test_results)\n del self.train_kwargs\n\n return results_dict\n\n\n def test(self):\n tf.keras.backend.clear_session() # make sure we are working clean\n\n self.metrics = {}\n # test pre-trained model\n pretrain_folder = self.folder_name # + '-pre-trained' # to extract the weights\n\n if not os.path.isdir('./'+pretrain_folder): # if there are no weights in the folder \n print('There is an error with finding model checkpoint. Folder', pretrain_folder, 'or', self.folder_name, 'does not exist.')\n\n self.__build_model(**self.model_kwargs)\n\n print('...Loading model weights from checkpoint...', pretrain_folder)\n self.model.load_weights(pretrain_folder + \"/model_ckpt\").expect_partial()\n\n self.experiment_name = self.experiment_name + 'test' # to save tboard logs\n # test\n test_results = self.__test_model()\n del self.model\n tf.keras.backend.clear_session()\n\n results_dict = self.__manage_test_metrics(test_results)\n del self.train_kwargs\n\n return results_dict\n\n def __build_model(self,\n out_shape,\n support_shape, \n history_shape,\n input_shape=None,\n forecast_mode='pdf',\n model_type='MiMo-sth',\n encoder_units=[[96],[96],[96]],\n decoder_units=[[64],[64],[64]],\n L1=0.0, L2=0.0,\n use_dropout=False, dropout_rate=0.0,\n use_hw=False, use_norm=False, use_residual=False, use_dense=False,#general architecture stuff\n use_attention=False, attention_heads=3,\n downsample=False, mode='project', #MiMo stuff\n encoder_blocks=3,\n decoder_blocks=3,\n positional_embedding=False,\n force_relevant_context=True,\n encoder_max_length_sequence=None,\n encoder_receptive_window=None,\n encoder_self_attention=False,\n encoder_transformer_blocks=1,\n decoder_max_length_sequence=None,\n decoder_receptive_window=None,\n decoder_self_attention=False,\n decoder_attention=False,\n decoder_transformer_blocks=1,\n full_targets=True,\n use_gru=False,\n ):\n # get projection layer\n if self.forecast_mode == 'pdf':\n projection_block = tf.keras.layers.Conv1D(filters=self.target_shape[-1],\n kernel_size=1,\n strides=1,\n padding='causal',\n activation=tf.keras.layers.Softmax(axis=-1),\n kernel_regularizer=tf.keras.regularizers.l1_l2(l1=L1, l2=L2),\n kernel_initializer='glorot_uniform')\n elif self.forecast_mode =='ev':\n projection_block = tf.keras.layers.Conv1D(filters=1,\n kernel_size=1,\n strides=1,\n padding='causal',\n activation=None,\n kernel_regularizer=tf.keras.regularizers.l1_l2(l1=L1, l2=L2),\n kernel_initializer='glorot_uniform')\n else:\n print('wrong forecast mode flag, must be either pdf or ev')\n\n # initialize the model\n if model_type == 'MiMo-LSTM':\n encoder_specs = {'units': encoder_units,\n 'num_encoder_blocks': encoder_transformer_blocks,\n 'use_dropout': use_dropout,\n 'dropout_rate': dropout_rate,\n 'use_norm': use_norm,\n 'use_hw': use_hw,\n 'use_residual': use_residual,\n 'L1': L1, 'L2': L2,\n 'return_state': False,\n 'gru':use_gru,\n }\n\n from Building_Blocks import MIMOModel\n self.model = MIMOModel(output_shape=self.target_shape,\n encoder_specs=encoder_specs,\n model_type=model_type,\n projection_block=projection_block,)\n\n elif model_type == 'Encoder-Decoder' or model_type == 'E-D' or model_type == 'E-D-luong':\n print('building E-D')\n encoder_specs = {'units': encoder_units,\n 'num_encoder_blocks': encoder_transformer_blocks,\n 'use_dropout': use_dropout,\n 'dropout_rate': dropout_rate,\n 'use_norm': use_norm,\n 'use_hw': use_hw,\n 'use_residual': use_residual,\n 'L1': L1, 'L2': L2,\n 'gru':use_gru}\n decoder_specs = {'units': decoder_units,\n 'num_decoder_blocks': decoder_transformer_blocks,\n 'use_dropout': use_dropout,\n 'dropout_rate': dropout_rate,\n 'use_norm': use_norm,\n 'use_hw': use_hw,\n 'use_residual': use_residual,\n 'L1': L1, 'L2': L2,\n 'gru':use_gru,\n 'use_attention': decoder_attention,\n 'attention_heads': attention_heads,\n 'projection_layer': projection_block}\n\n from Building_Blocks import S2SModel\n self.model = S2SModel(output_shape=self.target_shape,\n encoder_specs=encoder_specs,\n decoder_specs=decoder_specs,\n model_type=model_type)\n \n elif model_type == 'Transformer':\n print('Transformer')\n decoder_specs = {'num_initial_features': decoder_units,\n 'max_length_sequence_history': decoder_max_length_sequence,\n 'max_length_sequence_supplement': encoder_max_length_sequence,\n 'attention_heads': attention_heads,\n 'use_residual': use_residual,\n 'use_norm': use_norm,\n 'use_dense': use_dense,\n 'force_relevant_context': force_relevant_context,\n 'use_self_attention': decoder_self_attention,\n 'use_attention': decoder_attention,\n 'transformer_blocks': decoder_transformer_blocks,\n 'positional_embedding': positional_embedding,\n 'projection_layer': projection_block}\n encoder_specs = {'num_initial_features': encoder_units,\n 'max_length_sequence_supplement': encoder_max_length_sequence,\n 'use_residual': use_residual,\n 'use_norm': use_norm,\n 'attention_heads': attention_heads,\n 'transformer_blocks': encoder_transformer_blocks,\n 'positional_embedding': positional_embedding,}\n\n from Building_Blocks import S2SModel\n self.model = S2SModel(output_shape=self.target_shape,\n encoder_specs=encoder_specs,\n decoder_specs=decoder_specs,\n model_type=model_type)\n\n elif model_type == 'LSTM-Generator':\n from Building_Blocks import ForecasterModel\n print('building E-D')\n common_specs = {'use_dropout': use_dropout,\n 'dropout_rate': dropout_rate,\n 'use_norm': use_norm, 'use_hw': use_hw, 'use_residual': use_residual,\n 'L1': L1, 'L2': L2,\n }\n encoder_specs = copy.deepcopy(common_specs)\n encoder_specs['return_state'] = False\n encoder_specs['units'] = encoder_units\n decoder_specs = copy.deepcopy(common_specs)\n decoder_specs['units'] = decoder_units\n decoder_specs['use_attention'] = decoder_attention\n decoder_specs['attention_heads'] = attention_heads\n decoder_specs['projection_layer'] = projection_block\n\n self.model = ForecasterModel(output_shape=self.target_shape,\n encoder_specs=encoder_specs,\n decoder_specs=decoder_specs,\n model_type='LSTM-Generator')\n\n elif model_type == 'FFNN-Generator':\n from Building_Blocks import FFNN_encoder, FFNN_decoder\n # decoder: width=256, depth=3, attention_heads=3, norm=True, attention_squeeze=0.5, L1=0.0, L2=0.0, projection_layer=None)\n # encoder: width=256, depth=3, attention_heads=3, norm=True, attention_squeeze=0.5, L1=0.0, L2=0.0\n decoder_specs = {'num_initial_features': decoder_units,\n 'max_length_sequence_history': decoder_max_length_sequence,\n 'max_length_sequence_supplement': encoder_max_length_sequence,\n 'attention_heads': attention_heads,\n 'use_residual': use_residual,\n 'use_norm': use_norm,\n 'use_dense': use_dense,\n 'force_relevant_context': force_relevant_context,\n 'use_self_attention': decoder_self_attention,\n 'use_attention': decoder_attention,\n 'transformer_blocks': decoder_transformer_blocks,\n 'positional_embedding': positional_embedding,\n 'projection_layer': projection_block,\n 'full_targets': full_targets}\n encoder_specs = {'num_initial_features': encoder_units,\n 'max_length_sequence_supplement': encoder_max_length_sequence,\n 'use_residual': use_residual,\n 'use_norm': use_norm,\n 'use_dense': use_dense,\n 'force_relevant_context': force_relevant_context,\n 'attention_heads': attention_heads,\n 'use_self_attention': encoder_self_attention,\n 'transformer_blocks': encoder_transformer_blocks,\n 'positional_embedding': positional_embedding,}\n from Building_Blocks import ForecasterModel\n self.model = ForecasterModel(output_shape=self.target_shape,\n encoder_specs=encoder_specs,\n decoder_specs=decoder_specs,\n model_type=model_type,\n history_and_features=decoder_attention)\n\n elif model_type == 'FFNN-LSTM-Generator':\n from Building_Blocks import FFNN_encoder, FFNN_decoder\n # decoder: width=256, depth=3, attention_heads=3, norm=True, attention_squeeze=0.5, L1=0.0, L2=0.0, projection_layer=None)\n # encoder: width=256, depth=3, attention_heads=3, norm=True, attention_squeeze=0.5, L1=0.0, L2=0.0\n decoder_specs = {'num_initial_features': decoder_units,\n 'max_length_sequence_history': decoder_max_length_sequence,\n 'max_length_sequence_supplement': encoder_max_length_sequence,\n 'attention_heads': attention_heads,\n 'use_residual': use_residual,\n 'use_norm': use_norm,\n 'use_dense': use_dense,\n 'force_relevant_context': force_relevant_context,\n 'use_self_attention': decoder_self_attention,\n 'use_attention': decoder_attention,\n 'transformer_blocks': decoder_transformer_blocks,\n 'positional_embedding': positional_embedding,\n 'projection_layer': projection_block,\n 'full_targets': full_targets}\n encoder_specs = {'num_initial_features': encoder_units,\n 'max_length_sequence_supplement': encoder_max_length_sequence,\n 'use_residual': use_residual,\n 'use_norm': use_norm,\n 'use_dense': use_dense,\n 'force_relevant_context': force_relevant_context,\n 'attention_heads': attention_heads,\n 'use_self_attention': encoder_self_attention,\n 'transformer_blocks': encoder_transformer_blocks,\n 'positional_embedding': positional_embedding,}\n from Building_Blocks import ForecasterModel\n self.model = ForecasterModel(output_shape=self.target_shape,\n encoder_specs=encoder_specs,\n decoder_specs=decoder_specs,\n model_type=model_type,\n history_and_features=decoder_attention)\n\n elif model_type == 'Transformer-Generator':\n from Building_Blocks import FFNN_encoder, FFNN_decoder\n # decoder: width=256, depth=3, attention_heads=3, norm=True, attention_squeeze=0.5, L1=0.0, L2=0.0, projection_layer=None)\n # encoder: width=256, depth=3, attention_heads=3, norm=True, attention_squeeze=0.5, L1=0.0, L2=0.0\n decoder_specs = {'num_initial_features': decoder_units,\n 'max_length_sequence_history': decoder_max_length_sequence,\n 'max_length_sequence_supplement': encoder_max_length_sequence,\n 'attention_heads': attention_heads,\n 'use_residual': use_residual,\n 'use_norm': use_norm,\n 'use_dense': use_dense,\n 'force_relevant_context': force_relevant_context,\n 'use_self_attention': decoder_self_attention,\n 'use_attention': decoder_attention,\n 'transformer_blocks': decoder_transformer_blocks,\n 'positional_embedding': positional_embedding,\n 'projection_layer': projection_block,\n 'full_targets': full_targets}\n encoder_specs = {'num_initial_features': encoder_units,\n 'max_length_sequence_supplement': encoder_max_length_sequence,\n 'use_residual': use_residual,\n 'use_norm': use_norm,\n # 'force_relevant_context': force_relevant_context,\n 'attention_heads': attention_heads,\n 'transformer_blocks': encoder_transformer_blocks,\n 'positional_embedding': positional_embedding,}\n from Building_Blocks import ForecasterModel\n self.model = ForecasterModel(output_shape=self.target_shape,\n encoder_specs=encoder_specs,\n decoder_specs=decoder_specs,\n model_type=model_type,\n history_and_features=decoder_attention)\n\n else:\n print('trying to buid', model_type, 'but failed')\n\n def get_losses_and_metrics(self):\n # assign the losses depending on scenario\n if self.forecast_mode is not 'pdf' and self.forecast_mode is not 'ev':\n print('forecast mode was not specified as either or , no idea how compilation got this far but expect some issues!!')\n\n losses = losses_and_metrics(last_output_dim_size=self.target_shape[-1],\n normalizer_value=1.0,\n target_as_expected_value=False if self.forecast_mode=='pdf' else True,\n forecast_as_expected_value=False if self.forecast_mode=='pdf' else True)\n loss = losses.KLD if self.forecast_mode == 'pdf' else losses.MSE\n\n metrics = [losses.nME,\n losses.nRMSE]\n if self.forecast_mode == 'pdf':\n metrics.append(losses.CRPS)\n metrics.append(losses.EMC)\n\n return loss, metrics\n\n def __get_callbacks(self,\n tboard=True,\n ):\n callbacks = []\n if tboard:\n logdir = os.path.join(self.experiment_name)\n print('copy paste for tboard:', logdir)\n callbacks.append(tf.keras.callbacks.TensorBoard(log_dir=logdir,\n write_graph=False,\n # update_freq='epoch',\n ))\n\n if self.train_kwargs['mode'] == 'pre-train':\n print('setting autostop criteria to lowest training loss')\n callbacks.append(tf.keras.callbacks.EarlyStopping(monitor='loss',\n min_delta=1e-4,\n patience=20,\n mode='min',\n restore_best_weights=True))\n elif self.train_kwargs['mode'] == 'fine-tune' or self.train_kwargs['mode'] == 'normal':\n print('setting autostop criteria to lowest validation nRMSE')\n callbacks.append(tf.keras.callbacks.EarlyStopping(monitor='val_nRMSE',\n patience=10,\n mode='min',\n restore_best_weights=True))\n return callbacks\n\n def __get_optimizer(self, train_steps):\n # For fine-tuning we want smaller learning rate\n if self.train_kwargs['mode'] == 'fine-tune': # assuming we will be able to use the Transformer schedule for fine-tuning\n print('setting optimizer parameters to fine tuning')\n schedule_parameter = int(self.model_kwargs['decoder_units'])\n warmup_steps = train_steps * 4\n elif self.train_kwargs['mode'] == 'normal':\n print('setting optimizer parameters to normal training')\n schedule_parameter = int(self.model_kwargs['decoder_units'])\n warmup_steps = train_steps * 4\n elif self.train_kwargs['mode'] == 'pre-train':\n print('setting optimizer parameters to pre-training')\n schedule_parameter = int(self.model_kwargs['decoder_units'])\n warmup_steps = train_steps * 6\n\n optimizer = tf.keras.optimizers.Adam(CustomSchedule(schedule_parameter,\n warmup_steps=warmup_steps),\n beta_1=0.9,\n beta_2=0.98,\n epsilon=1e-9)\n\n return optimizer\n\n def __train_model(self):\n # get the dataset and fit the model\n epochs = 1\n dataset = dataset_generator(dataset_path_list=self.dataset_path_list,\n train_batch_size=self.train_kwargs['batch_size'],\n support_shape=self.model_kwargs['support_shape'],\n history_shape=self.model_kwargs['history_shape'],\n raw_history_shape=self.raw_history_shape,\n val_target_shape=self.target_shape,\n dataset_info=self.dataset_info,\n full_targets=self.model_kwargs['full_targets'],\n )\n\n if 'Generator' in self.model_kwargs['model_type']:\n train_set = dataset.pdf_generator_training_dataset\n val_set = dataset.pdf_generator_val_dataset\n test_set = dataset.pdf_generator_test_dataset\n elif 'E-D' in self.model_kwargs['model_type'] or self.model_kwargs['model_type'] == 'Transformer' or 'MiMo' in self.model_kwargs['model_type']:\n train_set = dataset.pdf_s2s_training_dataset\n val_set = dataset.pdf_s2s_val_dataset\n test_set = dataset.pdf_s2s_test_dataset\n\n # train_steps = dataset.get_train_steps_per_epoch()\n # val_steps = dataset.get_val_steps_per_epoch()\n # test_steps = dataset.get_test_steps_per_epoch()\n train_steps = 100\n val_steps = 10\n test_steps = 10\n\n loss, metrics = self.get_losses_and_metrics()\n # optimizer = self.__get_optimizer(train_steps)\n optimizer = tf.keras.optimizers.SGD(learning_rate=0.001, momentum=0.75, nesterov=True) # from previous setup\n\n # compile\n self.model.compile(optimizer=optimizer,\n loss=loss,\n metrics=metrics) \n # train\n print('starting to train model')\n train_history = self.model.fit(train_set(),\n steps_per_epoch=train_steps,\n epochs=epochs,\n verbose=2,\n validation_data=val_set(),\n validation_steps=val_steps,\n callbacks=self.__get_callbacks(tboard=True))\n # test\n test_results = self.model.evaluate(test_set(),\n steps=test_steps,\n verbose=2)\n self.model.summary()\n del dataset\n\n return train_history.history, test_results\n\n def __test_model(self):\n # evaluate without training\n dataset = dataset_generator(dataset_path_list=self.dataset_path_list,\n train_batch_size=self.train_kwargs['batch_size'],\n support_shape=self.model_kwargs['support_shape'],\n history_shape=self.model_kwargs['history_shape'],\n raw_history_shape=self.raw_history_shape,\n val_target_shape=self.target_shape,\n dataset_info=self.dataset_info,\n full_targets=self.model_kwargs['full_targets'],\n )\n\n if 'Generator' in self.model_kwargs['model_type']:\n test_set = dataset.pdf_generator_test_dataset\n elif 'E-D' in self.model_kwargs['model_type'] or self.model_kwargs['model_type'] == 'Transformer' or 'MiMo' in self.model_kwargs['model_type']:\n test_set = dataset.pdf_s2s_test_dataset\n\n test_steps = dataset.get_test_steps_per_epoch()\n\n # Transformer LR schedule\n # optimizer = tf.keras.optimizers.Adam(1e-9, beta_1=0.9, beta_2=0.98, epsilon=1e-9)\n optimizer = tf.keras.optimizers.SGD(learning_rate=0.001, momentum=0.75, nesterov=True) # from previous setup\n loss, metrics = self.get_losses_and_metrics()\n\n self.model.compile(optimizer=optimizer, loss=loss, metrics=metrics) # compile, print summary\n\n test_results = self.model.evaluate(test_set(),\n steps=test_steps,\n verbose=2)\n self.model.summary()\n del dataset\n\n return test_results\n\n\n def __manage_metrics(self, train_history, test_results):\n # process metrics, calculate skill scores\n results_dict = {}\n results_dict['test_loss'] = test_results[0]\n results_dict['test_nME'] = test_results[1]\n results_dict['test_nRMSE'] = test_results[2]\n if self.forecast_mode == 'pdf':\n results_dict['test_CRPS'] = test_results[3]\n\n for key in train_history.keys():\n results_dict[key] = train_history[key]\n\n saved_epoch = np.argmin(results_dict['val_nRMSE'])\n results_dict['save_epoch'] = saved_epoch\n results_dict['val_nRMSE_skill'] = 1 - (results_dict['val_nRMSE'][saved_epoch] / self.dataset_info['val_baseline']['nRMSE'])\n results_dict['test_nRMSE_skill'] = 1 - (results_dict['test_nRMSE'] / self.dataset_info['test_baseline']['nRMSE'])\n\n if self.forecast_mode == 'pdf':\n results_dict['val_CRPS_skill'] = 1 - (results_dict['val_CRPS'][saved_epoch] / self.dataset_info['val_baseline']['CRPS'])\n results_dict['test_CRPS_skill'] = 1 - (results_dict['test_CRPS'] / self.dataset_info['test_baseline']['CRPS'])\n\n print('val_skill nRMSE', results_dict['val_nRMSE_skill'])\n print('test_skill nRMSE', results_dict['test_nRMSE_skill'])\n\n if self.forecast_mode == 'pdf':\n print('val_skill CRPS', results_dict['val_CRPS_skill'])\n print('test_skill CRPS', results_dict['test_CRPS_skill'])\n\n return results_dict\n\n def __manage_test_metrics(self, test_results):\n # process metrics, calculate skill scores\n results_dict = {}\n results_dict['test_loss'] = test_results[0]\n results_dict['test_nME'] = test_results[1]\n results_dict['test_nRMSE'] = test_results[2]\n if self.forecast_mode == 'pdf':\n results_dict['test_CRPS'] = test_results[3]\n\n results_dict['test_nRMSE_skill'] = 1 - (results_dict['test_nRMSE'] / self.dataset_info['test_baseline']['nRMSE'])\n\n if self.forecast_mode == 'pdf':\n results_dict['test_CRPS_skill'] = 1 - (results_dict['test_CRPS'] / self.dataset_info['test_baseline']['CRPS'])\n\n print('test_skill nRMSE', results_dict['test_nRMSE_skill'])\n\n if self.forecast_mode == 'pdf':\n print('test_skill CRPS', results_dict['test_CRPS_skill'])\n\n return results_dict\n\ndef __get_max_min_targets(train_targets, test_targets):\n import numpy as np\n max_value_train = np.amax(train_targets, axis=0)\n max_value_train = np.amax(max_value_train, axis=0)\n max_value_test = np.amax(test_targets, axis=0)\n max_value_test = np.amax(max_value_test, axis=0)\n max_value = np.maximum(max_value_train, max_value_test)\n\n min_value_train = np.amin(train_targets, axis=0)\n min_value_train = np.amin(min_value_train, axis=0)\n min_value_test = np.amin(test_targets, axis=0)\n min_value_test = np.amin(min_value_test, axis=0)\n min_value = np.minimum(min_value_test, min_value_train)\n return max_value, min_value\n\nclass CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):\n def __init__(self, d_model, warmup_steps=4000):\n super(CustomSchedule, self).__init__()\n\n self.d_model = d_model\n self.d_model = tf.cast(self.d_model, tf.float32)\n\n self.warmup_steps = warmup_steps\n\n def __call__(self, step):\n arg1 = tf.math.rsqrt(step)\n arg2 = step * (self.warmup_steps ** -1.5)\n\n return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)\n\nclass dataset_generator():\n '''\n Load and process TFRecord dataset samples.\n '''\n def __init__(self,\n dataset_path_list=None,\n train_batch_size=None,\n support_shape=None,\n history_shape=None,\n raw_history_shape=None,\n train_target_shape=None,\n val_target_shape=None,\n dataset_info=None,\n full_targets=True):\n\n # We keep the training size as large as possible, this means the val size needs to be smaller due to memory thingies!\n self.train_batch_size = train_batch_size\n self.val_batch_size = int(train_batch_size / 6)*2\n\n self.support_shape = support_shape\n self.history_shape = history_shape\n self.raw_history_shape = raw_history_shape if isinstance(raw_history_shape, int) else raw_history_shape[0]\n self.train_target_shape = train_target_shape if train_target_shape is not None else self.history_shape\n self.val_target_shape = val_target_shape\n self.dataset_info = dataset_info\n self.full_targets = full_targets\n\n self.flattened_support_shape = tf.reduce_prod(self.support_shape).numpy()\n print(self.flattened_support_shape)\n self.flattened_historical_shape = tf.reduce_prod(self.history_shape).numpy()\n self.flattened_val_targets_shape = tf.reduce_prod(self.val_target_shape).numpy()\n\n\n self.train_sets = [dataset_path + '/train' for dataset_path in dataset_path_list]\n self.val_sets = [dataset_path + '/validation' for dataset_path in dataset_path_list]\n self.test_sets = [dataset_path + '/test' for dataset_path in dataset_path_list]\n\n def __get_all_tfrecords_in_folder(self, dataset_list):\n accumulatet_dataset_files = []\n for dataset in dataset_list:\n if os.path.isdir(dataset):\n dataset_files = glob.glob(dataset + '/*.tfrecord')\n accumulatet_dataset_files.extend(dataset_files)\n else:\n print('didnt find training data folder, expect issues!!')\n return accumulatet_dataset_files\n\n\n\n def get_train_steps_per_epoch(self):\n train_list = self.__get_all_tfrecords_in_folder(self.train_sets)\n return int(len(train_list) / self.train_batch_size)\n\n def get_val_steps_per_epoch(self):\n val_list = self.__get_all_tfrecords_in_folder(self.val_sets)\n return int(np.ceil(len(val_list) / self.val_batch_size))\n\n def get_test_steps_per_epoch(self):\n test_list = self.__get_all_tfrecords_in_folder(self.test_sets)\n return int(np.ceil(len(test_list) / self.val_batch_size))\n\n\n def pdf_generator_training_dataset(self):\n return self.__dataset_from_folder_and_sample(file_list=self.__get_all_tfrecords_in_folder(self.train_sets),\n batch_size=self.train_batch_size,\n process_sample=self.get_pdf_generator_train_sample)\n\n def pdf_generator_val_dataset(self):\n return self.__dataset_from_folder_and_sample(file_list=self.__get_all_tfrecords_in_folder(self.val_sets),\n batch_size=self.val_batch_size,\n process_sample=self.get_pdf_generator_inference_sample)\n\n def pdf_generator_test_dataset(self):\n return self.__dataset_from_folder_and_sample(file_list=self.__get_all_tfrecords_in_folder(self.test_sets),\n batch_size=self.val_batch_size,\n process_sample=self.get_pdf_generator_inference_sample)\n\n def pdf_s2s_training_dataset(self):\n return self.__dataset_from_folder_and_sample(file_list=self.__get_all_tfrecords_in_folder(self.train_sets),\n batch_size=self.train_batch_size,\n process_sample=self.get_s2s_train_sample)\n\n def pdf_s2s_val_dataset(self):\n return self.__dataset_from_folder_and_sample(file_list=self.__get_all_tfrecords_in_folder(self.val_sets),\n batch_size=self.val_batch_size,\n process_sample=self.get_s2s_train_sample)\n\n def pdf_s2s_test_dataset(self):\n return self.__dataset_from_folder_and_sample(file_list=self.__get_all_tfrecords_in_folder(self.test_sets),\n batch_size=self.val_batch_size,\n process_sample=self.get_s2s_train_sample)\n\n\n def get_pdf_generator_inference_sample(self, example):\n # process one data sample for generator inference\n features = {'support': tf.io.FixedLenFeature(self.flattened_support_shape, tf.float32),\n 'pdf_history': tf.io.FixedLenFeature(self.flattened_historical_shape, tf.float32),\n }\n\n raw_unprocessed_sample = tf.io.parse_single_example(example, features)\n support_data = tf.reshape(tensor=raw_unprocessed_sample['support'], shape=self.support_shape)\n full_pdf_history = tf.reshape(tensor=raw_unprocessed_sample['pdf_history'], shape=self.history_shape)\n target = full_pdf_history[-self.val_target_shape[0]:,:]\n history_input = full_pdf_history[:-self.val_target_shape[0],:]\n\n return {'support_input': support_data,\n 'history_input': history_input}, target\n\n def get_pdf_generator_train_sample(self, example):\n # process one data sample for generator training\n features = {\n 'support': tf.io.FixedLenFeature(self.flattened_support_shape, tf.float32),\n 'pdf_history': tf.io.FixedLenFeature(self.flattened_historical_shape, tf.float32),}\n\n raw_unprocessed_sample = tf.io.parse_single_example(example, features)\n full_pdf_history = tf.reshape(tensor=raw_unprocessed_sample['pdf_history'], shape=self.history_shape)\n support_data = tf.reshape(tensor=raw_unprocessed_sample['support'], shape=self.support_shape)\n\n if self.full_targets:\n target = full_pdf_history[1:, :] # for predicting full targets vs. last 24 steps\n else:\n target_shape = self.dataset_info['fc_steps']\n target = full_pdf_history[-target_shape:,:]\n\n history_input = full_pdf_history[:-1,:]\n\n # ATM sampling last 2days plus forecast day from NWP and last 2 days from history\n return {'support_input': support_data,\n 'history_input': history_input}, target\n\n def get_s2s_train_sample(self, example):\n # process one data sample for s2s model \n features = {'support': tf.io.FixedLenFeature(self.support_shape, tf.float32),\n 'raw_history': tf.io.FixedLenFeature([self.raw_history_shape,1], tf.float32),\n 'pdf_history': tf.io.FixedLenFeature(self.history_shape, tf.float32),\n }\n\n raw_unprocessed_sample = tf.io.parse_single_example(example, features)\n nwp_inputs = raw_unprocessed_sample['support']\n full_pdf_history = raw_unprocessed_sample['pdf_history']\n history = raw_unprocessed_sample['raw_history']\n\n target = full_pdf_history[-self.val_target_shape[0]:,:]\n teacher = full_pdf_history[-(self.val_target_shape[0]+1):-1,:]\n\n # downsample to concat with nwp\n history_downs_factor = int(history.shape[0]/nwp_inputs.shape[0])\n\n history = history[::history_downs_factor,:]\n\n # shifted cutting off (including forecast)\n nwp_inputs = nwp_inputs[self.val_target_shape[0]*4:,:]\n history = history[:-self.val_target_shape[0]*4,:]\n\n # concat into one input\n inputs = tf.concat([nwp_inputs,history], axis=-1)\n\n # non shifted cut off\n # inputs = inputs[:-self.val_target_shape[0]*4,:] # 24*4 for 15 minute samples\n\n print('SHAPES', inputs.shape, target.shape)\n\n # ATM sampling last 2days plus forecast day from NWP and last 2 days from history\n return {'nwp_pv_input': inputs,\n 'teacher':teacher}, target\n\n def __calculate_expected_value(self, signal, last_output_dim_size):\n indices = tf.range(last_output_dim_size) # (last_output_dim_size)\n weighted_signal = tf.multiply(signal, indices) # (batches, timesteps, last_output_dim_size)\n expected_value = tf.reduce_sum(weighted_signal, axis=-1, keepdims=True)\n return expected_value / last_output_dim_size\n\n def __dataset_from_folder_and_sample(self, process_sample, file_list, batch_size):\n option_no_order = tf.data.Options()\n option_no_order.experimental_deterministic = False\n\n # dataset = tf.data.Dataset.list_files(file_list)\n dataset = tf.data.TFRecordDataset(file_list, num_parallel_reads=5)\n dataset = dataset.with_options(option_no_order)\n dataset = dataset.map(process_sample, num_parallel_calls=5)\n dataset = dataset.repeat()\n\n dataset = dataset.shuffle(20 * batch_size, reshuffle_each_iteration=True)\n dataset = dataset.batch(batch_size, drop_remainder=False)\n dataset = dataset.prefetch(3)\n return dataset","sub_path":"Train_Eval.py","file_name":"Train_Eval.py","file_ext":"py","file_size_in_byte":44918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"360366500","text":"#import matplotlib\n#matplotlib.use(\"Agg\")\n\nfrom pyimagesearch.preprocessing import ImageToArrayPreprocessor\nfrom pyimagesearch.callbacks import EpochCheckpoint\nfrom pyimagesearch.callbacks import TrainingMonitor\nfrom pyimagesearch.io import HDF5DatasetGenerator\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.optimizers import SGD, Adam\nfrom keras.models import load_model\nfrom create_model import CRNN\nimport config\nimport argparse\nimport keras.backend as K\nimport os\n\n\nap = argparse.ArgumentParser()\n\nap.add_argument(\"-c\", \"--checkpoints\",\n\thelp=\"path to output checkpoints\")\nap.add_argument(\"-m\", \"--model\",\n\thelp=\"path to model\")\nap.add_argument(\"-s\", \"--start_epoch\", type=int, default=0,\n\thelp=\"epoch to restart training at\")\nargs = vars(ap.parse_args())\n\ntrainAug = ImageDataGenerator(rotation_range=10, zoom_range=0.1,\n\tfill_mode='nearest')\niap = ImageToArrayPreprocessor()\n\ntrainGen = HDF5DatasetGenerator(config.TRAIN_HDF5, config.BATCH_SIZE,\n\taug=trainAug, preprocessors=[iap], binarize=False, classes=config.NUM_CLASSES,max_label_length=config.MAX_LENGTH)\nvalGen = HDF5DatasetGenerator(config.VAL_HDF5, config.BATCH_SIZE,\n\tpreprocessors=[iap], binarize=False, classes=config.NUM_CLASSES,max_label_length=config.MAX_LENGTH)\n\n\nadam = Adam(lr=0.001)\nif args['model'] is None:\n\tprint(\"[info] compiling model..\")\n\tmodel = CRNN.build(width=config.HEIGHT, height=config.WIDTH, depth=1,\n\t\tclasses=config.NUM_CLASSES)\n\t#print(model.summary())\n\tmodel.compile(loss={'ctc': lambda y_true, y_pred:y_pred}, optimizer=adam)\nelse:\n\tprint(\"[info] loading {}..\".format(args[\"model\"]))\n\tmodel = CRNN.build(width=config.HEIGHT, height=config.WIDTH, depth=1,\n\t\tclasses=config.NUM_CLASSES)\n\tmodel.load_weights(args[\"model\"])\n\n\tmodel.compile(loss={'ctc': lambda y_true, y_pred:y_pred}, optimizer=adam)\n\nprint(model.summary())\n\ncallbacks = [\n\tEpochCheckpoint(args[\"checkpoints\"], every=5,\n\t\tstartAt=args[\"start_epoch\"])]\n\nmodel.fit(\n\ttrainGen.generator(),\n\tsteps_per_epoch=trainGen.numImages//config.BATCH_SIZE,\n\tvalidation_data=valGen.generator(),\n\tvalidation_steps=valGen.numImages//config.BATCH_SIZE,\n\tepochs=30,\n\tmax_queue_size=config.BATCH_SIZE*2,\n\tcallbacks=callbacks,\n\tverbose=1)\ntrainGen.close()\nvalGen.close()\n","sub_path":"train_crnn.py","file_name":"train_crnn.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"532760196","text":"import requests\nimport json\nfrom datetime import datetime\nfrom telegram.ext import Updater, CommandHandler\nfrom telegram import ParseMode\nimport logging\nimport socketserver\nimport re\nimport os\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n\ntoken = os.environ['token']\n\nupdater = Updater(token)\ndispatcher = updater.dispatcher\n\ndef latest(bot, update):\n try:\n device = (update.message.text).split(\" \")[1]\n except IndexError:\n device = None\n\n if not (device is None):\n bot.send_message(chat_id=update.message.chat_id, parse_mode=ParseMode.MARKDOWN, text=checkUpdates(\"Latest update\", device))\n else:\n bot.send_message(chat_id=update.message.chat_id, text=\"Device can't be empty.\\nUsage: /latest \")\n\ndef changelog(bot, update):\n cl_text = requests.get(\"https://raw.githubusercontent.com/PotatoProject/vendor_potato/baked-release/CHANGELOG.md\").text.replace(\"# Changelog\\n\", \"\")\n cl_array = cl_text.split(\"\\n\\n### \")\n\n bot.send_message(chat_id=update.message.chat_id, text=\"Latest release changelog:\\n\" + cl_array[1])\n\ndef checkUpdates(update_string, device):\n payload = {'device': device, 'type': 'weekly'}\n\n try:\n r = requests.get(\"https://api.potatoproject.co/checkUpdate\", params=payload)\n posp_standard_json = r.json()['response'][-1]\n except ConnectionError:\n r = requests.get(\"http://api.strangebits.co.in/checkUpdate\", params=payload, verify=False)\n posp_standard_json = r.json()['response'][-1]\n except IndexError:\n r = None\n\n if not (r is None):\n return (update_string + \" for device \" + device + \":\" +\n \"\\n *Download URL:* \" + \"[Get the update here!](\" + posp_standard_json['url'] + \")\" + \n \"\\n *Version:* \" + posp_standard_json['version'] + \n \"\\n *Date of upload:* \" + datetime.utcfromtimestamp(posp_standard_json['datetime']).strftime('%Y-%m-%d') +\n \"\\n *Size in megabytes:* \" + str(posp_standard_json['size'])[0:3] + \"MB\" +\n \"\\n *Release type:* \" + posp_standard_json['romtype'])\n else:\n return \"No updates found for device \" + device\n\nlatest_handler = CommandHandler('latest', latest)\ndispatcher.add_handler(latest_handler)\n\nchangelog_handler = CommandHandler('changelog', changelog)\ndispatcher.add_handler(changelog_handler)\n\nupdater.start_polling()","sub_path":"posp-releasebot.py","file_name":"posp-releasebot.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"192910860","text":"'''\n 10814 나이순 정렬\n 알고리즘 :\n 1. 이전 문제들과 동일\n'''\nN = int(input())\nmembers = []\n\nfor i in range(N):\n age, name = input().split()\n members.append([int(age), name, i]) # members에 들어온 순서를 i로 저장하기\n\nmembers.sort(key = lambda x:(x[0], x[2])) # 나이로 먼저 정렬을 하고, 나이가 같으면 들어온 순서인 x[2]로 정렬\n\nfor member in members:\n print(member[0], member[1])","sub_path":"10814.py","file_name":"10814.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"608323694","text":"vvv=input()\noo=list(set(vvv))\nppp=1\nviji=0\nchange=False\nwhile True:\n sk=l1[viji]\n for y in range(0,len(vvv)-ppp):\n if sk in cat[y:y+ppp]:\n change=True\n else:\n change=False\n viji=viji+1\n if viji>=len(oo):\n viji=0\n ppp=ppp+1\n break\n\n if change==True:\n break\nprint(ppp)\n","sub_path":"35.py","file_name":"35.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"428175491","text":"def insertion_sort(li):\n for i in range(1,len(li)):\n temp = li[i] #拿temp存好摸的牌的值\n j = i-1 #j是手里牌(有序区)一个从后往前的指针\n while j>=0 and li[j]>temp: #当指针不为负数(为负数时指针无效了),且指针指向的牌大于抓的牌时\n li[j+1] = li[j] #指针指向的牌向后移动一位\n j -= 1 #同时指针向前移动一位\n else:\n li[j+1] = temp #while循环条件不满足时,将牌插在比较的牌(指针)的后面。\n print(li)\n return li\n\nli = [1,6,1,8,6,4,3,7,9,3,1,4,3]\nprint(li)\ninsertion_sort(li)\n","sub_path":"Algorithm_Basic/insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"212449507","text":"from performance import Performance\ndef get_percent_time(data_list,percent):\n data_list = sorted(data_list)\n if len(data_list)*(1-percent)<= 1:\n r_length = 1\n else:\n r_length = len(data_list)*(1-percent)\n r_length = int(round(r_length))\n data_list = data_list[:len(data_list)-r_length]\n return data_list[-1]\nthread_count = 1000\nspend_time_list = []\nmax_time = 0\nmin_time = 3600\nless_than_3s_total = 0\nmore_than_3s_total = 0\nsuccess_total = 0\nfail_total = 0\nexcept_total = 0\ntotal = 0\nurl = \"http://223.201.0.198:8888/v1/chain/get_info\"\nheader = {}\ni = 0\ntime_total = 0\nwhile i < thread_count: \n pf = Performance(url=url,header=header)\n status,spend_time = pf.test_performance()\n spend_time_list.append(spend_time)\n total = total + 1\n if status == \"success\":\n success_total +=1\n elif status == \"fail\":\n fail_total += 1\n elif status == \"except\":\n except_total += 1\n if spend_time > max_time:\n max_time = spend_time\n if spend_time < min_time:\n min_time = spend_time\n if spend_time > 3:\n more_than_3s_total += 1\n else:\n less_than_3s_total += 1\n time_total += spend_time\n pf.start()\n i += 1\n \navg_time = time_total/thread_count\nspend_time_list = sorted(spend_time_list)\nprint(\"平均响应时间:%s\"% avg_time)\nprint(\"最大响应时间:%s\"% max_time)\nprint(\"最小响应时间:%s\"% min_time)\nprint(\"90%%响应时间:%s\"%(get_percent_time(spend_time_list,0.9)))\nprint(\"99%%响应时间:%s\"% (get_percent_time(spend_time_list,0.99)))\nprint(\"80%%响应时间:%s\"% (get_percent_time(spend_time_list,0.8)))\nprint(\"总请求数:%s\"% total)\nprint(\"请求成功数:%s\"% success_total)\nprint(\"请求失败数:%s\"% fail_total)\nprint(\"异常请求数:%s\"% except_total)\nprint(\"大于3秒请求数:%s\"% more_than_3s_total)\nprint(\"小于3秒请求数:%s\"% less_than_3s_total)\n","sub_path":"run_test.py","file_name":"run_test.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"364526357","text":"#!/usr/local/bin/python3\r\n# encoding: utf-8\r\n\r\n'''\r\nCreated on 20/12/2014\r\n\r\n@author: Manuel Guillermo Gonzale Malave\r\n@version: 1.0\r\n@contact: http://baulprogramas.blogspot.com/\r\n@note: Software creado para uso privado, especialmente como parte de los \r\n proyectos iniciales del grupo V-GameXUSB (USB-Baruta Venezuela)\r\n'''\r\nimport pygame\r\n\r\nfrom src.interface import Interface\r\n\r\ntry:\r\n from src.gallery import Gallery\r\nexcept ImportError:\r\n Gallery = None\r\n\r\n\r\nclass GameManager(object):\r\n '''\r\n Administrador del juego\r\n '''\r\n\r\n\r\n def __init__(self,mode,title,pathico = None):\r\n '''\r\n Establece el modo de video, crea la galeria,etc...\r\n @param mode: Modo de video a establecer.\r\n '''\r\n \r\n self.__clock = pygame.time.Clock () # Para controlar el tiempo\r\n \r\n self.setVideoMode(mode)\r\n if pathico is not None:\r\n try:\r\n pygame.display.set_icon(pygame.image.load(pathico))\r\n except:\r\n pass\r\n pygame.mixer.set_num_channels(3)\r\n pygame.display.set_caption(title)\r\n pygame.mouse.set_visible(0)\r\n self.__full = False\r\n \r\n # Creamos nuestro contenedor de recursos..\r\n self.__gallery = None\r\n\r\n if Gallery is not None:\r\n self.__gallery = Gallery()\r\n self.__gallery.loadImages()\r\n self.__gallery.loadFonts()\r\n self.__gallery.loadSfx()\r\n\r\n \r\n self.__interface = None\r\n self.__stack = [] # Pila de interfaces.\r\n \r\n def getResource(self,key):\r\n '''\r\n Regresa un recurso desde la galeria\r\n @param key:llave del recurso en el diccionario\r\n @return: Recurso desde el diccionario en la galeria del GameManager en parent \r\n '''\r\n \r\n if self.__gallery is not None:\r\n return self.__gallery.getResource(key)\r\n \r\n return None\r\n \r\n def setVideoMode(self,modo):\r\n '''\r\n Establece el modo de pantalla del juego\r\n @param modo: (width,height) tama#o de la pantalla\r\n '''\r\n \r\n if not pygame.display.get_init():\r\n pygame.display.init()\r\n \r\n self.screen = pygame.display.set_mode(modo)\r\n self.__modo = modo\r\n \r\n def togleFullScreen(self):\r\n '''\r\n Lanza pantalla completa\r\n '''\r\n self.__full = not self.__full\r\n flag = 0\r\n if self.__full:\r\n flag |= pygame.FULLSCREEN\r\n \r\n self.screen = pygame.display.set_mode(self.__modo,flag)\r\n \r\n def changeInterface(self,inter):\r\n '''\r\n Cambia la interfaz actual\r\n @param inter: Nueva interfaz\r\n '''\r\n if not isinstance(inter, Interface):\r\n return\r\n \r\n if self.__interface is not None:\r\n \r\n if not self.__interface.isOver():\r\n self.__interface.pause()\r\n self.__push(self.__interface)\r\n else:\r\n self.__interface.terminate()\r\n \r\n self.__interface = inter\r\n \r\n self.__interface.start()\r\n self.__interface.entireDraw()\r\n \r\n \r\n\r\n def run(self):\r\n '''\r\n Bucle principal del juego\r\n '''\r\n if self.__interface is None:\r\n self.changeInterface(Interface(self)) #Le establecemos una por default\r\n \r\n while True: #Game loop\r\n \r\n #handle events\r\n if self.__interface is None:\r\n break\r\n \r\n for event in pygame.event.get():\r\n self.__interface.handleEvent(event)\r\n \r\n self.__interface.update()\r\n \r\n if self.__interface.isOver(): \r\n self.__interface.terminate()\r\n self.__interface = self.__pop()\r\n else:\r\n self.__interface.draw(self.screen)\r\n\r\n self.__clock.tick (100)\r\n\r\n def __push(self,inter):\r\n '''\r\n Empila una interfaz\r\n @param inter:Interfaz a empilar\r\n '''\r\n self.__stack.append(inter)\r\n\r\n def __pop(self):\r\n '''\r\n desempila una interfaz\r\n @return :Interfaz desempilada\r\n '''\r\n try:\r\n return self.__stack.pop()\r\n except IndexError:\r\n return None\r\n","sub_path":"Block-Block/src/gamemanager.py","file_name":"gamemanager.py","file_ext":"py","file_size_in_byte":4487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"204662307","text":"\"\"\"One population of 100 hosts and 100 vectors in which the fitness of the genotype is given by the maximum hamming\ndistance to the wild type genotype or the resistant genotype (which have the greatest fitness values)\n-The number of loci is 5, with 5 possible alleles\n-The fitness function is given by the maximum hamming distance to either the wild type or the resistant genotypes,\nwhich have the greatest fitness (1)\n-Default setup\"\"\"\nfrom opqua.model import Model\nimport textdistance as td\nimport numpy as np\n\ndef fitnessGenotype(genome):\n WT = 'A' * len(genome)\n Resistant = 'Z' * len(genome)\n if genome == WT or genome==Resistant:\n return 1\n distanceWT = td.hamming(Resistant, genome) /len(genome)\n distanceRes = td.hamming(WT, genome) / len(genome)\n distance = max(distanceWT, distanceRes)\n fitness = np.exp(np.log(1e-10) * distance)\n return fitness\n\nmy_model = Model() # Make a new model object.\nmy_model.newSetup('my_setup',preset='vector-borne', num_loci=5,possible_alleles=\"AMNPZ\",\n fitnessHost=fitnessGenotype, recombine_in_vector=1)\n # Create a new set of parameters called \"my_setup\" to be used to simulate\n # a population in the model. Use the default parameter set for a\n # vector-borne model.\nmy_model.newPopulation(\n 'my_population', 'my_setup', num_hosts=100, num_vectors=100)\n # Create a new population of 100 hosts and 100 vectors called\n # \"my_population\". The population uses parameters stored in \"my_setup\".\nmy_model.addPathogensToHosts( 'my_population',{('M'*5):10,('P'*5):10,('N'*5):10} )\n # Add pathogens with a genome of \"MMMMM\",\"PPPPP\", \"NNNNN\", to 10 random hosts for each genotype in\n # population \"my_population\".\nmy_model.run(0,200) # Run the simulation for 200 time units.\ndata = my_model.saveToDataFrame('FitnessValley.csv')\n # Save the model results to a table.\ngraph_2= my_model.compositionPlot('FitnessValleyComp.png', data, num_top_sequences=10,\n track_specific_sequences=['ZZZZZ','AAAAA'])\n#Plot the different genotypes across time, tracking the wild type\n# and the resistant ones (AAAAA and ZZZZZ respectively)","sub_path":"examples/Malaria/FitnessValley.py","file_name":"FitnessValley.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"565715115","text":"\nfrom setuptools import setup, find_packages\nfrom pathlib import Path\nimport os\n\nos.chdir(str(Path(__file__).parent))\nwith open('requirements.txt') as f:\n REQUIREMENTS = f.readlines()\n\nsetup(\n name='alijazayeri-geocoding-device',\n version='0.0.1',\n description='Service to retrieve lat/lng of an address.',\n packages=find_packages(),\n install_requires=REQUIREMENTS,\n author='Ali Jazayeri',\n author_email='ali.jazayeri@ualberta.ca'\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"148191763","text":"from flask import Flask, request, redirect, render_template\nfrom flask_sqlalchemy import SQLAlchemy\nimport os\n\napp = Flask(__name__)\napp.config['DEBUG'] = True\n\nproject_dir = os.path.dirname(os.path.abspath(__file__))\napp.config['SQLALCHEMY_DATABASE_URI'] = \"sqlite:///{}\".format(os.path.join(project_dir, \"flicklist.db\"))\napp.config['SQLALCHEMY_ECHO'] = True\ndb = SQLAlchemy(app)\n\nclass Blog(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(500))\n body = db.Column(db.String(500))\n\n def __init__(self, title, body):\n self.title = title\n self.body = body\n@app.route('/')\ndef get_something():\n return redirect('/addnew')\n\n@app.route('/addnew', methods = ['GET', 'POST'])\ndef new_post():\n \n if request.method == 'POST':\n title = request.form['title']\n body = request.form['body']\n if title == '':\n return render_template('add-new.html', error1=\"Please add a title\")\n if body == '':\n return render_template('add-new.html', error2=\"Please add an entry\")\n new_entry = Blog(title, body)\n db.session.add(new_entry)\n db.session.commit()\n return redirect('/blog')\n \n \n return render_template('add-new.html')\n\n@app.route('/blog-entry')\ndef display_entry():\n id_value = request.args.get('id')\n blog = Blog.query.filter_by(id=id_value).first() \n return render_template('blog-entry.html', title=blog.title, body=blog.body)\n\n@app.route('/blog')\ndef blog():\n entries = Blog.query.all()\n return render_template('full-blog.html', title=\"My Blog\", entries=entries)\n\nif __name__ == '__main__':\n app.run()\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"371903925","text":"import os\r\nimport zipfile\r\nimport shutil\r\n\r\nversions = ['2017-2018', '2019']\r\nprojectFolderPrefix = './'\r\nbuildPathPrefix = 'bin/Release/'\r\n\r\npackageFolderPrefix = './'\r\nMaya2BabylonPackagePrefix = 'Maya2Babylon-'\r\nMaya2BabylonVersion = '1.3.0'\r\n\r\nwith zipfile.ZipFile(packageFolderPrefix + Maya2BabylonPackagePrefix + Maya2BabylonVersion + '.zip', 'w' ) as outputZip:\r\n\r\n for version in versions:\r\n # get file paths\r\n buildPath = projectFolderPrefix + buildPathPrefix + version + '/'\r\n buildDlls = [ dll for dll in os.listdir(buildPath) if dll.endswith('.dll')]\r\n\r\n packagePath = version + '/'\r\n # copy bins to publish location\r\n for dll in buildDlls:\r\n packageDll = dll\r\n if 'Maya2Babylon' in dll:\r\n packageDll = 'Maya2Babylon.nll.dll'\r\n outputZip.write(buildPath + dll, packagePath + packageDll)","sub_path":"Maya/Maya2Babylon_Package.py","file_name":"Maya2Babylon_Package.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"476279538","text":"import numpy as np\r\nimport math\r\nnp.random.seed(444)\r\nclass RRT():\r\n def __init__(self, map, start_point, goal_point):\r\n self.step = 5\r\n self.map = map\r\n self.width = np.shape(map)[1]\r\n self.height = np.shape(map)[0]\r\n self.start = start_point\r\n self.goal = goal_point\r\n self.parent = {}\r\n self.parent[self.start] = None\r\n\r\n def random_point(self):\r\n x = int(self.width * np.random.random_sample())\r\n y = int(self.height * np.random.random_sample())\r\n return np.array([x, y])\r\n\r\n def find_closest(self, pos):\r\n value_min = math.sqrt(self.width ** 2 + self.height ** 2)\r\n for key in self.parent.keys():\r\n value = math.sqrt((pos[0] - key[0]) ** 2 + (pos[1] - key[1]) ** 2)\r\n if value < value_min:\r\n closest = key\r\n value_min = value\r\n return closest\r\n\r\n def new_pt(self, pt, closest):\r\n a = (pt[1] - closest[1]) / (pt[0] - closest[0])\r\n b = closest[1] - a * closest[0]\r\n eukl = math.sqrt((pt[0] - closest[0]) ** 2 + (pt[1] - closest[1]) ** 2)\r\n cos = abs(pt[0] - closest[0])/eukl\r\n if closest[0] != pt[0]:\r\n if closest[0] > pt[0]:\r\n x = closest[0] - cos * self.step\r\n elif closest[0] < pt[0]:\r\n x = closest[0] + cos * self.step\r\n y = a * x + b\r\n else:\r\n x = closest[0]\r\n if closest[1] < pt[1]:\r\n y = closest[1] + self.step\r\n else:\r\n y = closest[1] - self.step\r\n pt[0] = int(x)\r\n pt[1] = int(y)\r\n return pt\r\n\r\n def check_if_valid(self, a, b):\r\n in_free_space = True\r\n div_x = 1\r\n div_y = 1\r\n div = 20\r\n if (a[0] != b[0]):\r\n a_v = (a[1]-b[1])/(a[0]-b[0])\r\n b_v = a[1] - a_v * a[0]\r\n step_x = np.abs((b[0] - a[0]) / div)\r\n x = min(a[0], b[0])\r\n for i in range(0, div):\r\n y = a_v * x + b_v\r\n x_int = int(x * div_x)\r\n y_int = int(y * div_y)\r\n if y_int < self.height:\r\n if self.map[y_int, x_int] == 0:\r\n in_free_space = False\r\n break\r\n x = x + step_x\r\n else:\r\n step_y = np.abs((b[1] - a[1]) / div)\r\n y = min(a[1], b[1])\r\n for i in range(0, div):\r\n if y < self.height:\r\n if self.map[int(y*div_y), int(a[0] * div_x)] == 0:\r\n in_free_space = False\r\n break\r\n y = y + step_y\r\n return in_free_space\r\n\r\n def search(self):\r\n path = []\r\n new_path = []\r\n endReached = False\r\n while (endReached != True):\r\n if len(self.parent) > 100:\r\n self.parent.clear()\r\n path.append(self.start)\r\n path.append(self.goal)\r\n break\r\n newPoint = self.random_point()\r\n closestNeigh = self.find_closest(newPoint)\r\n newPointSegment = self.new_pt(newPoint, closestNeigh)\r\n if self.check_if_valid(newPointSegment, closestNeigh):\r\n self.parent.update({tuple(newPointSegment): tuple(closestNeigh)})\r\n else:\r\n continue\r\n if self.check_if_valid(newPointSegment, self.goal):\r\n self.parent.update({self.goal: tuple(newPointSegment)})\r\n considered_node = self.goal\r\n path.append(considered_node)\r\n startReached = False\r\n while not startReached:\r\n print('Considered node: ' + str(considered_node))\r\n print('Goal: ' + str(self.goal))\r\n print(self.parent)\r\n considered_node = tuple(self.parent[considered_node])\r\n path.append(considered_node)\r\n if considered_node[0] == self.start[0] and considered_node[1] == self.start[1]:\r\n startReached = True\r\n endReached = True\r\n for object in path:\r\n object = list(object)\r\n object[0] = int(object[0])\r\n object[1] = int(object[1])\r\n object = tuple(object)\r\n new_path.append(object)\r\n print(\"Path found\")\r\n print(\"New path: \" + str(new_path))\r\n return new_path, self.parent\r\n","sub_path":"rrt.py","file_name":"rrt.py","file_ext":"py","file_size_in_byte":4504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"5885956","text":"\"\"\"\n\nДомашнее задание №1\n\nИсключения: KeyboardInterrupt\n\n* Перепишите функцию ask_user() из задания while2, чтобы она \n перехватывала KeyboardInterrupt, писала пользователю \"Пока!\" \n и завершала работу при помощи оператора break\n \n\"\"\"\n\ndef ask_user():\n \"\"\"\n Замените pass на ваш код\n \"\"\"\n q_and_a = {'How are you?': 'Just fine!', 'What are you doing?': 'Working', 'How old are you?': '100500',\n 'Where do you come from?': 'Neverland', 'Where are you going?': 'Nowhere'}\n while True:\n try:\n q = input('Your question: ')\n a = q_and_a.get(q, 0)\n if not a == 0:\n print(a)\n else:\n print('I don\\'t know the answer')\n except KeyboardInterrupt:\n print('Bye-Bye')\n break\n\n \nif __name__ == \"__main__\":\n ask_user()\n","sub_path":"exception1.py","file_name":"exception1.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"299427805","text":"bunnies = []\ncounter = 3005290\nindex = 1\n\nfor i in range(0, 3005290):\n bunnies.append(1)\n\n\ndef find_nearest(index, bunnies):\n # print(index, bunnies[index])\n if index == len(bunnies):\n index = 0\n if bunnies[index + 1] == 1:\n return index + 1\n else:\n idx = index\n while bunnies[idx] == 0:\n if idx == len(bunnies):\n idx = 0\n idx += 1\n return idx\n\nwhile counter > 1:\n index = find_nearest(index, bunnies)\n bunnies[index] = 0\n counter -= 1\n #if index < len(bunnies):\n #index += 1\n #else:\n #index = 0\n\nprint(index)\n","sub_path":"day19/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"38102181","text":"#!usr/bin/bash\n\n\nfile=\"RESULT_BED_WGS_UNITED.txt\"\nfp=open(file,\"r\")\nfout=open(\"Ensembl\"+file,\"w\")\n\nfor line in fp.xreadlines():\n\tline=line.strip()\n\t(chr,start,end,barcode,gene,ref,allele1,allele2,sType,Ensemble,tissue)=line.split(\"\\t\")\n\t(sGene_ID,what)=(Ensemble.split(\" \")[0],Ensemble.split(\" \")[1])\n\tregion=what.split(\":\")[0]\t\n\tfout.write(\"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\t{6}\\t{7}\\t{8}\\t{9}\\t{10}\\n\".format(chr,start,end,barcode,sGene_ID,ref,allele1,allele2,sType,region,tissue))\n\n\nfp.close()\nfout.close()\n\t\n\t\n","sub_path":"convert_result.py","file_name":"convert_result.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"150794693","text":"import os\nfrom PIL import Image\nimport numpy as np\nimport cv2\nimport pickle\n\nface_cascade = cv2.CascadeClassifier('data/haarcascade_frontalface_default.xml')\nrecognizer = cv2.face.LBPHFaceRecognizer_create()\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nimage_dir = os.path.join(BASE_DIR, \"images\")\n\ncurrent_id = 0\nlabel_id = {}\n\ny_labels =[]\nx_train = []\n\nfor roots, dirs, files in os.walk(image_dir):\n for file in files:\n if file.endswith(\"jpg\") or file.endswith(\"png\"):\n path = os.path.join(roots, file)\n label = os.path.basename(os.path.dirname(path)).replace(\" \", \"-\").upper()\n print(label, path)\n if not label in label_id:\n label_id[label] = current_id\n current_id += 1\n id_ = label_id[label]\n print(label_id)\n #y_labels.append(label)\n #x_train.append(path)\n pil_image = Image.open(path).convert(\"L\") #grayscale\n\n image_array = np.array(pil_image, \"uint8\")\n #print(image_array)\n faces = face_cascade.detectMultiScale(image_array, scaleFactor=2, minNeighbors=5)\n for (x,y,w,h) in faces:\n roi = image_array[y:y+h,x:x+w]\n x_train.append(roi)\n y_labels.append(id_)\n\n#print(x_train)\n#print(y_labels)\n\nwith open(\"labels.pickle\", 'wb') as f:\n pickle.dump(label_id, f)\n\nrecognizer.train(x_train, np.array(y_labels))\nrecognizer.save(\"trainer.yml\")\n","sub_path":"face_train.py","file_name":"face_train.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"151009142","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Core ATM module.\n\nThis module contains the ATM class, which is the one responsible for\nexecuting and orchestrating the main ATM functionalities.\n\"\"\"\n\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport logging\nimport os\nimport random\nimport time\nfrom builtins import object\nfrom datetime import datetime, timedelta\nfrom operator import attrgetter\n\nfrom atm.constants import TIME_FMT, PartitionStatus\nfrom atm.database import Database\nfrom atm.encoder import MetaData\nfrom atm.method import Method\nfrom atm.utilities import download_data, get_public_ip\nfrom atm.worker import ClassifierError, Worker\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass ATM(object):\n\n LOOP_WAIT = 1\n\n def __init__(self, sql_conf, aws_conf, log_conf):\n self.db = Database(**sql_conf.to_dict())\n self.aws_conf = aws_conf\n self.log_conf = log_conf\n\n def work(self, datarun_ids=None, save_files=False, choose_randomly=True,\n cloud_mode=False, total_time=None, wait=True):\n \"\"\"\n Check the ModelHub database for unfinished dataruns, and spawn workers to\n work on them as they are added. This process will continue to run until it\n exceeds total_time or is broken with ctrl-C.\n\n datarun_ids (optional): list of IDs of dataruns to compute on. If None,\n this will work on all unfinished dataruns in the database.\n choose_randomly: if True, work on all highest-priority dataruns in random\n order. If False, work on them in sequential order (by ID)\n cloud_mode: if True, save processed datasets to AWS. If this option is set,\n aws_config must be supplied.\n total_time (optional): if set to an integer, this worker will only work for\n total_time seconds. Otherwise, it will continue working until all\n dataruns are complete (or indefinitely).\n wait: if True, once all dataruns in the database are complete, keep spinning\n and wait for new runs to be added. If False, exit once all dataruns are\n complete.\n \"\"\"\n start_time = datetime.now()\n public_ip = get_public_ip()\n\n # main loop\n while True:\n # get all pending and running dataruns, or all pending/running dataruns\n # from the list we were given\n dataruns = self.db.get_dataruns(include_ids=datarun_ids, ignore_complete=True)\n if not dataruns:\n if wait:\n LOGGER.debug('No dataruns found. Sleeping %d seconds and trying again.',\n ATM.LOOP_WAIT)\n time.sleep(ATM.LOOP_WAIT)\n continue\n\n else:\n LOGGER.info('No dataruns found. Exiting.')\n break\n\n max_priority = max([datarun.priority for datarun in dataruns])\n priority_runs = [r for r in dataruns if r.priority == max_priority]\n\n # either choose a run randomly, or take the run with the lowest ID\n if choose_randomly:\n run = random.choice(priority_runs)\n else:\n run = sorted(dataruns, key=attrgetter('id'))[0]\n\n # say we've started working on this datarun, if we haven't already\n self.db.mark_datarun_running(run.id)\n\n LOGGER.info('Computing on datarun %d' % run.id)\n # actual work happens here\n worker = Worker(self.db, run, save_files=save_files,\n cloud_mode=cloud_mode, aws_config=self.aws_conf,\n log_config=self.log_conf, public_ip=public_ip)\n try:\n worker.run_classifier()\n\n except ClassifierError:\n # the exception has already been handled; just wait a sec so we\n # don't go out of control reporting errors\n LOGGER.error('Something went wrong. Sleeping %d seconds.', ATM.LOOP_WAIT)\n time.sleep(ATM.LOOP_WAIT)\n\n elapsed_time = (datetime.now() - start_time).total_seconds()\n if total_time is not None and elapsed_time >= total_time:\n LOGGER.info('Total run time for worker exceeded; exiting.')\n break\n\n def create_dataset(self, dataset_conf):\n \"\"\"\n Create a dataset and add it to the ModelHub database.\n \"\"\"\n # download data to the local filesystem to extract metadata\n train_local, test_local = download_data(dataset_conf.train_path,\n dataset_conf.test_path,\n self.aws_conf)\n\n # create the name of the dataset from the path to the data\n name = os.path.basename(train_local)\n name = name.replace(\"_train.csv\", \"\").replace(\".csv\", \"\")\n\n # process the data into the form ATM needs and save it to disk\n meta = MetaData(dataset_conf.class_column, train_local, test_local)\n\n # enter dataset into database\n dataset = self.db.create_dataset(name=name,\n description=dataset_conf.data_description,\n train_path=dataset_conf.train_path,\n test_path=dataset_conf.test_path,\n class_column=dataset_conf.class_column,\n n_examples=meta.n_examples,\n k_classes=meta.k_classes,\n d_features=meta.d_features,\n majority=meta.majority,\n size_kb=meta.size)\n return dataset\n\n def create_datarun(self, dataset, run_conf):\n \"\"\"\n Given a config, creates a set of dataruns for the config and enters them into\n the database. Returns the ID of the created datarun.\n\n dataset: Dataset SQLAlchemy ORM object\n \"\"\"\n # describe the datarun by its tuner and selector\n run_description = '__'.join([run_conf.tuner, run_conf.selector])\n\n # set the deadline, if applicable\n deadline = run_conf.deadline\n if deadline:\n deadline = datetime.strptime(deadline, TIME_FMT)\n # this overrides the otherwise configured budget_type\n # TODO: why not walltime and classifiers budget simultaneously?\n run_conf.budget_type = 'walltime'\n elif run_conf.budget_type == 'walltime':\n deadline = datetime.now() + timedelta(minutes=run_conf.budget)\n\n target = run_conf.score_target + '_judgment_metric'\n datarun = self.db.create_datarun(dataset_id=dataset.id,\n description=run_description,\n tuner=run_conf.tuner,\n selector=run_conf.selector,\n gridding=run_conf.gridding,\n priority=run_conf.priority,\n budget_type=run_conf.budget_type,\n budget=run_conf.budget,\n deadline=deadline,\n metric=run_conf.metric,\n score_target=target,\n k_window=run_conf.k_window,\n r_minimum=run_conf.r_minimum)\n return datarun\n\n def create_dataruns(self, run_conf):\n \"\"\"\n Generate a datarun, including a dataset if necessary.\n\n Returns: ID of the generated datarun\n \"\"\"\n dataset = self.db.get_dataset(run_conf.dataset_id)\n if not dataset:\n raise ValueError('Invalid Dataset ID: {}'.format(run_conf.dataset_id))\n\n method_parts = {}\n for m in run_conf.methods:\n # enumerate all combinations of categorical variables for this method\n method = Method(m)\n method_parts[m] = method.get_hyperpartitions()\n LOGGER.info('method %s has %d hyperpartitions' %\n (m, len(method_parts[m])))\n\n # create hyperpartitions and datarun(s)\n dataruns = []\n if not run_conf.run_per_partition:\n LOGGER.debug('saving datarun...')\n datarun = self.create_datarun(dataset, run_conf)\n dataruns.append(datarun)\n\n LOGGER.debug('saving hyperpartions...')\n for method, parts in list(method_parts.items()):\n for part in parts:\n # if necessary, create a new datarun for each hyperpartition.\n # This setting is useful for debugging.\n if run_conf.run_per_partition:\n datarun = self.create_datarun(dataset, run_conf)\n dataruns.append(datarun)\n\n # create a new hyperpartition in the database\n self.db.create_hyperpartition(datarun_id=datarun.id,\n method=method,\n tunables=part.tunables,\n constants=part.constants,\n categoricals=part.categoricals,\n status=PartitionStatus.INCOMPLETE)\n\n LOGGER.info('Dataruns created. Summary:')\n LOGGER.info('\\tDataset ID: %d', dataset.id)\n LOGGER.info('\\tTraining data: %s', dataset.train_path)\n LOGGER.info('\\tTest data: %s', (dataset.test_path or 'None'))\n\n datarun = dataruns[0]\n if run_conf.run_per_partition:\n LOGGER.info('\\tDatarun IDs: %s', ', '.join(str(datarun.id) for datarun in dataruns))\n\n else:\n LOGGER.info('\\tDatarun ID: %d', datarun.id)\n\n LOGGER.info('\\tHyperpartition selection strategy: %s', datarun.selector)\n LOGGER.info('\\tParameter tuning strategy: %s', datarun.tuner)\n LOGGER.info('\\tBudget: %d (%s)', datarun.budget, datarun.budget_type)\n\n return dataruns\n\n def enter_data(self, dataset_conf, run_conf):\n \"\"\"\n Generate a datarun, including a dataset if necessary.\n\n Returns: ID of the generated datarun\n \"\"\"\n # if the user has provided a dataset id, use that. Otherwise, create a new\n # dataset based on the arguments we were passed.\n if run_conf.dataset_id is None:\n dataset = self.create_dataset(dataset_conf)\n run_conf.dataset_id = dataset.id\n\n dataruns = self.create_dataruns(run_conf)\n return dataruns[0] if not run_conf.run_per_partition else dataruns\n","sub_path":"atm/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":10791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"543094729","text":"#!/usr/bin/env python3.6\nimport sys\nimport csv\nimport time\nimport datetime\nimport collections\nimport mysql.connector\nfrom mysql.connector import errorcode\nfrom mysql.connector import Error\nfrom time import gmtime, strftime\n\ndef main():\n db = mysql.connector.Connect( host='127.0.0.1', port=3307, database='scannet', user='scannet' , password='*****') \n query = ''' \n select distinct p.id_cmts from scannet.cmts_por_poller p \n left join (select id_cmts from scannet.actualizaciones_poller where fecha_insercion > date_add(now(), interval -4 hour) \n group by id_cmts \n having count(*)> 1000) c on(p.id_cmts=c.id_cmts) \n where c.id_cmts is null\n\n '''\n cursor = db.cursor()\n cursor.execute(query)\n records = cursor.fetchall()\n print( \"Total number rows - {0}\".format(cursor.rowcount) )\n q = collections.deque(records)\n print( \"Total en cola - {0}\".format(len(q)))\n \n #for iid_cmts in records:\n while q:\n print(\"Length Q: {0}\".format(len(q)))\n try: \n time.sleep(1)\n id = q.pop()\n print( \"Total elementos en cola: {0} insert id:{1}\".format(len(q), id) )\n cursor.callproc('scannet.validacion_parque_manual', (id) ) \n #except Exception as e:\n #print ('error', e)\n except mysql.connector.Error as err:\n print(err)\n if err.errno == errorcode.ER_LOCK_DEADLOCK:\n q.extendleft(id) \n print (\"Add: {0}\".format(id))\n\n\n cursor.close()\n db.close()\n print( \"MySQL connection is closed\" )\n print( \"Toral elementos en cola - {0}\".format(len(q)) )\n\nif __name__ == '__main__': \n main()\n","sub_path":"deque-test.py","file_name":"deque-test.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"478550566","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect\nfrom models import *\nfrom django.template import loader\nfrom django.urls import reverse\nfrom django.shortcuts import render, get_object_or_404,get_list_or_404\n\n\n# Create your views here.\n\ndef index(request):\n latest_question_list = Question.objects.all()\n template = loader.get_template('polls/index.html')\n context = {\n 'questions': latest_question_list\n }\n return HttpResponse(template.render(context, request))\n\n\n# # raising 404 error with HttpResponse\n# def detail(request, question_id):\n# template = loader.get_template('polls/detail.html')\n# try:\n# question = Question.objects.get(pk = question_id)\n# except Question.DoesNotExist:\n# raise Http404(\"Question Does not exist\");\n#\n# return HttpResponse(template.render({'question':question},request))\n\n\n# raising 404 exception with render & get_object_or_404()\ndef detail(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n # getting choices for specific question by filtering by F.K column name\n # if list is empty it will triger default 404 Choice exception\n choices = get_list_or_404(Choice , question = question_id )\n return render(request, 'polls/detail.html', context={\"question\": question , 'choices':choices})\n\n# action to validate vote then save\ndef vote(request, question_id):\n # if request.method() == 'POST':\n question = get_object_or_404(Question , pk = question_id)\n select_choice_id = request.POST['choice']\n try:\n selected_choice = question.choice_set.get(pk = select_choice_id)\n\n # redirect to details page if wrong choice\n except Choice.DoesNotExist:\n return render(request,'polls/detail.html',context = {'question':question})\n # if choice is existed\n else :\n selected_choice.votes += 1\n selected_choice.save()\n return HttpResponseRedirect(reverse('polls:results',args=(question_id,)))\n\n\ndef results(request, question_id):\n question = get_object_or_404(Question, pk = question_id)\n return render(request, 'polls/results.html', context={'question':question})\n\n","sub_path":"Blog/polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"555232249","text":"class Solution(object):\n def minimumTotal(self, triangle):\n length = len(triangle)\n columns = len(triangle[length - 1])\n matrix = [[0 for col in range(columns)] for row in range(length)]\n row_index = 0\n for row in range(length):\n elements = triangle[row]\n col_index = 0\n for val in elements:\n matrix[row_index][col_index] = val\n col_index += 1\n row_index += 1\n for row in range(length - 2, -1, -1):\n for col in range(row + 1):\n matrix[row][col] += min(matrix[row + 1][col + 1], matrix[row + 1][col])\n return matrix[0][0]\n","sub_path":"120/120.triangle.235096602.Accepted.leetcode.py","file_name":"120.triangle.235096602.Accepted.leetcode.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"332623339","text":"#auto script for hash hammer to run threw formatted txt file\nimport os, sys\n\n\nsf = raw_input('File: ')\nelements = []\nfile = open(sf)\n\n\nfor data in file:\n\ttry:\n\t\telements = data.split('\\t\\t')\n\t\thash = elements[1]\n\t\thash = hash.replace(\"\\n\",\"\")\n\t\thash = hash.strip()\n\t\t#print elements\n\t\tsys_str = \"python2 HashHammer.py -t md5 -h \" + hash\n\t\tos.system(sys_str)\n\texcept:\n\t\tcontinue\n\n","sub_path":"transfer/Pentest/hhauto.py","file_name":"hhauto.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"97211282","text":"# Copyright 2018 Iguazio\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\nimport _thread\n\nfrom tests.conftest import out_path, tag_test\nfrom tests.http_srv import create_function\nfrom mlrun import get_or_create_ctx, new_function, RunObject, new_task\n\n\ndef myfunction(context, event):\n ctx = get_or_create_ctx(\"nuclio-test\", event=event)\n p1 = ctx.get_param(\"p1\", 1)\n p2 = ctx.get_param(\"p2\", \"a-string\")\n\n context.logger.info(\n f\"Run: {ctx.name} uid={ctx.uid}:{ctx.iteration} Params: p1={p1}, p2={p2}\"\n )\n\n time.sleep(1)\n\n # log scalar values (KFP metrics)\n ctx.log_result(\"accuracy\", p1 * 2)\n ctx.log_result(\"latency\", p1 * 3)\n\n # log various types of artifacts (and set UI viewers)\n ctx.log_artifact(\"test.txt\", body=b\"abc is 123\")\n ctx.log_artifact(\"test.html\", body=b\" Some HTML \", viewer=\"web-app\")\n\n context.logger.info(\"run complete!\")\n return ctx.to_json()\n\n\nbase_spec = new_task(params={\"p1\": 8}, out_path=out_path)\n\n\ndef verify_state(result: RunObject):\n state = result.status.state\n assert state == \"completed\", f\"wrong state ({state}) {result.status.error}\"\n\n\ndef test_simple_function():\n # Thread(target=create_function, args=(myfunction, 4444)).start()\n _thread.start_new_thread(create_function, (myfunction, 4444))\n time.sleep(2)\n\n spec = tag_test(base_spec, \"simple_function\")\n result = new_function(command=\"http://localhost:4444\").run(spec)\n print(result)\n verify_state(result)\n\n\ndef test_hyper_function():\n # Thread(target=create_function, args=(myfunction, 4444))\n _thread.start_new_thread(create_function, (myfunction, 4444))\n time.sleep(2)\n\n spec = tag_test(base_spec, \"hyper_function\")\n spec.spec.hyperparams = {\"p1\": [1, 2, 3]}\n result = new_function(command=\"http://localhost:4444\").run(spec)\n print(result)\n verify_state(result)\n","sub_path":"tests/test_remote.py","file_name":"test_remote.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"405935200","text":"from urllib.parse import urljoin\n\nfrom flexget.utils.soup import get_soup\n\nfrom .site_base import SiteBase\n\n\nclass MeanTorrent(SiteBase):\n\n def sign_in(self, entry, config):\n self.sign_in_by_get(entry, config)\n\n def get_message(self, entry, config):\n self.get_meantorrent_message(entry, config)\n\n def get_details(self, entry, config):\n self.get_details_base(entry, config, self.build_selector())\n\n def build_selector(self):\n selector = {\n 'from_page': None,\n 'details_link': None,\n 'details_content': {\n 'details_table': 'html',\n },\n 'details': {\n 'downloaded': {\n 'regex': '(downloaded).*?([\\\\d]+)',\n 'group': 2,\n 'handle': self.handle_suffix\n },\n 'uploaded': {\n 'regex': '(uploaded).*?([\\\\d]+)',\n 'group': 2,\n 'handle': self.handle_suffix\n },\n 'share_ratio': None,\n 'points': {\n 'regex': '(score).*?([\\\\d.,]+)',\n 'group': 2,\n },\n 'seeding': {\n 'regex': '(seeded).*?(\\\\d+)',\n 'group': 2,\n },\n 'leeching': {\n 'regex': '(leeched).*?(\\\\d+)',\n 'group': 2,\n },\n 'hr': None,\n }\n }\n return selector\n\n def get_meantorrent_message(self, entry, config, messages_url='/messages.php'):\n pass\n\n def handle_suffix(self, value):\n return str(value) + 'B'\n","sub_path":"ptsites/meantorrent.py","file_name":"meantorrent.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"325081365","text":"import redis\nimport json\n\n\nclass Sm_Api(object):\n '''\n Sys Messenger API\n '''\n\n SMA_VERSION = '1.0'\n\n def __init__(self, sender, redisHost, redisPort=6379, redisDb=1):\n super(Sm_Api, self).__init__()\n\n self.redisCon = redis.StrictRedis(host=redisHost, port=redisPort, db=redisDb)\n\n self.sender = sender\n\n def send_msg(self, msg):\n finalMsg = {}\n finalMsg['sender'] = self.sender\n finalMsg['message'] = msg\n\n self.redisCon.publish('sys-message', json.dumps(finalMsg))","sub_path":"DsManager/Src/sm/sm_api.py","file_name":"sm_api.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"210729157","text":"# class Intge:\n#\n# def __init__(self,a,b):\n# self.a = a\n# self.b = b\n#\n# def print_sub(self):\n# return print(\"Tong 2 so nguyen : \", self.a , \"+\" ,self.b , \" = \" , self.a + self.b ,)\n#\n#\n#\n# s = Intge(4,3)\n# s.print_sub()\n#\n# # a = int(input(\"Please enter numer : \"))\n# # print(a//8,a%8) if a >= 8 else print(a)\n# while True:\n# a = float(input(\"Please enter number :\"))\n# print(round(a, 3))\n\n# class Output:\n#\n# def __init__(self, name, old):\n# self.name = name\n# self.old = old\n#\n# def __str__(self):\n# return f'{self.name} + {self.old}'\n#\n# def show(self):\n# return f'{self.name} + {self.old}'\nimport math\nclass Input:\n\n def __init__(self, a, b, c):\n self.a = a\n self.b = b\n self.c = c\n\n def check_rect(self):\n if self.a + self.b > self.c and self.a + self.c > self.b and self.c + self.b > self.a:\n print(\"a,b,c is squar of RECT\")\n else:\n print(\"a,b,c is None\")\n\n def quadratic(self):\n if self.a == 0:\n if self.b == 0:\n if self.c == 0:\n print(\"Quadratic VSN\")\n else:\n print(\"VN\")\n else:\n print(\"1N\")\n else:\n delta = self.b ** 2 - 4 * self.a * self.c\n if delta > 0:\n x1 = float((-self.b + math.sqrt(delta)) / 2 * self.a)\n x2 = float((-self.b - math.sqrt(delta)) / 2 * self.a)\n print(\"Phuong trinh cos 2 nghiem : nx1 = {} \\n nx2 = {}\",format(x1,x2))\n\n\n elif delta == 0:\n x = - self.b/2*self.a\n print(\"Phuong trinh co 1 nghiem duy nhat x = : {}\".format(x))\n else:\n print(\"PHUONG TRINH VN\")\nclass Print(Input):\n\n def __init__(self, a, b, c, number):\n super().__init__(a, b, c)\n self.number = number\n\n def print_str(self):\n print(\"10 of first_number : \", self.number.split(\"N\",1))\n\n# rect1 = Input(12,1,1)\n# rect1.check_rect()\nfx = Input(1, 2, 3)\nfx2 = Input(1, 2, 1)\nfx.quadratic()\nfx2.quadratic()\n\n\n\nfor i in range(10):\n print(i,end=\"\")\n\n\n\n\n\n\n\n\n\n\n","sub_path":"quadratic.py","file_name":"quadratic.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"7643763","text":"import vision.datasets.mtsd_default_classes as dflt\nfrom sys import argv\nimport pathlib\nimport json\nimport os\nfrom tqdm import tqdm\n\nif len(argv) != 4:\n print(\"Usage: prune_mtsd_dataset.py \")\n exit(1)\n\ndataset_path = pathlib.Path(argv[1])\norig_split = pathlib.Path(argv[2])\noutput = pathlib.Path(argv[3])\n\nids = list(map(str.strip, open(orig_split, 'r').readlines()))\nif output.exists() and not input(\"File exists: '%s'.\\nContinue? [y/N]\" % output).lower().startswith('y'):\n exit(0)\nwith open(output, 'w+') as ofile:\n for id in tqdm(ids):\n anno_file = dataset_path / \"annotations\" / f\"{id}.json\"\n if not os.path.isfile(anno_file):\n continue\n anno = json.load(open(anno_file, 'r'))\n labels = (obj[\"label\"] for obj in anno[\"objects\"])\n classes = list(filter(lambda x: x is not None, map(dflt.convert_label, labels)))\n if classes:\n ofile.write(id + '\\n')\n","sub_path":"prune_mtsd_dataset.py","file_name":"prune_mtsd_dataset.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"403560971","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 24 13:01:41 2018\n\n@author: kengo\n\"\"\"\n\nimport math\ndef main():\n myMessage =input('enter the encrypted message boss:>')\n myKey = int(input('enter your key boss:'))\n plaintext = decryptMessage(myKey, myMessage)\n# Print with a | (called \"pipe\" character) after it in case\n# there are spaces at the end of the decrypted message.\n print(plaintext)\n \ndef decryptMessage(key, message):\n print('decrypting your encrypted message......')\n# The transposition decrypt function will simulate the \"columns\" and\n# \"rows\" of the grid that the plaintext is written on by using a list\n# of strings. First, we need to calculate a few values.\n# The number of \"columns\" in our transposition grid:\n numOfColumns = math.ceil(len(message) / key)\n# The number of \"rows\" in our grid will need:\n numOfRows = key\n# The number of \"shaded boxes\" in the last \"column\" of the grid:\n numOfShadedBoxes = (numOfColumns * numOfRows) - len(message)\n# Each string in plaintext represents a column in the grid.\n \n plaintext = [''] * numOfColumns\n\n\n# The col and row variables point to where in the grid the next\n\n# character in the encrypted message will go.\n\n col = 0\n\n row = 0\n\n\n for symbol in message:\n\n plaintext[col] += symbol\n col += 1 # point to next column\n\n# If there are no more columns OR we're at a shaded box, go back to\n\n# the first column and the next row.\n if (col == numOfColumns) or (col == numOfColumns - 1 and row >=numOfRows - numOfShadedBoxes):\n\n col = 0\n row += 1\n print('here is the plaintext sir;') \n return ''.join(plaintext)\n # If transpositionDecrypt.py is run (instead of imported as a module) call\n # the main() function.\nmain()\n","sub_path":"DecryptingTranspositionCipher.py","file_name":"DecryptingTranspositionCipher.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"373005716","text":"class Solution:\n def mostCommonWord(self, paragraph: str, banned: List[str]) -> str:\n\n banned_words = set(banned)\n ans = \"\"\n max_count = 0\n word_count = defaultdict(int)\n word_buffer = []\n\n # iterate each character from the paragraph\n for p, char in enumerate(paragraph):\n\n # word_buffer = a word from the paragraph\n if char.isalnum():\n word_buffer.append(char.lower())\n if p != len(paragraph) - 1:\n continue\n\n # begin processing one word\n if len(word_buffer) > 0:\n # get the string\n word = \"\".join(word_buffer)\n # if the word is not part of the banned_words\n if word not in banned_words:\n # increase the number\n word_count[word] += 1\n # if the total cound exceeds the max_count\n if word_count[word] > max_count:\n # update the return answer\n max_count = word_count[word]\n ans = word\n # reset the buffer for the next word\n word_buffer = []\n\n return ans","sub_path":"easy/Most_Common_Word/sources/onfly.py","file_name":"onfly.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"222819747","text":"import requests, json\nfrom retrying import retry\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Mobile Safari/537.36'}\n@retry(stop_max_attempt_number=3)\ndef _parse_url(url,method,data,proxies):\n print('**' *20)\n if method == 'POST':\n response = requests.post(url, headers=headers,timeout=3,data=data,proxies=proxies)\n else :\n response = requests.get(url, headers=headers,timeout=3,proxies=proxies)\n assert response.status_code == 200\n return response.content.decode()\n\n\ndef parse_url(url, method='GET', data=None, proxies={'http': '122.243.9.5:9000'}):\n try:\n html_str = _parse_url(url,method,data,proxies)\n except:\n html_str = None\n return html_str\n\n# if __name__ == \"__main__\":\n# url = 'http://www.baidu.com'\n# print(parse_url(url))\n","sub_path":"advanced/12数据提取/parse_url.py","file_name":"parse_url.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"203295290","text":"import tensorflow as tf\n\n# The following variables are the hyperparameters.\nlearning_rate = 0.001\nepochs = 20\nbatch_size = 100\nclassification_threshold = 0.35\nlabel_name = \"median_house_value_is_high\"\n\n# Modify the following definition of METRICS to generate\n# not only accuracy and precision, but also recall:\nMETRICS = [\n tf.keras.metrics.BinaryAccuracy(name='accuracy',\n threshold=classification_threshold),\n tf.keras.metrics.Precision(thresholds=classification_threshold,\n name='precision'\n ),\n tf.keras.metrics.Recall(thresholds=classification_threshold,\n name='recall'\n )\n]\n\nlist_of_metrics_to_plot = ['accuracy', 'precision', 'recall']\n\n# The new graphs suggest that precision and recall are\n# somewhat in conflict. That is, improvements to one of\n# those metrics may hurt the other metric.","sub_path":"binary_classification/param_binary_class.py","file_name":"param_binary_class.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"64286572","text":"# -*- coding: utf-8 -*-\r\nimport pytest\r\n\r\nfrom matchpy.expressions.expressions import Wildcard, CommutativeOperation\r\nfrom matchpy.matching.one_to_one import match as match_one_to_one\r\nfrom matchpy.matching.many_to_one import ManyToOneMatcher\r\nfrom matchpy.matching.syntactic import DiscriminationNet\r\nfrom matchpy.expressions.functions import preorder_iter\r\n\r\n\r\ndef pytest_generate_tests(metafunc):\r\n if 'match' in metafunc.fixturenames:\r\n metafunc.parametrize('match', ['one-to-one', 'many-to-one'], indirect=True)\r\n if 'match_syntactic' in metafunc.fixturenames:\r\n metafunc.parametrize('match_syntactic', ['one-to-one', 'many-to-one', 'syntactic'], indirect=True)\r\n\r\n\r\ndef match_many_to_one(expression, pattern):\r\n try:\r\n commutative = next(\r\n p for p in preorder_iter(pattern.expression) if isinstance(p, CommutativeOperation)\r\n )\r\n next(wc for wc in preorder_iter(commutative) if isinstance(wc, Wildcard) and wc.min_count > 1)\r\n except StopIteration:\r\n pass\r\n else:\r\n pytest.xfail('Matcher does not support fixed wildcards with length != 1 in commutative operations')\r\n matcher = ManyToOneMatcher(pattern)\r\n for _, substitution in matcher.match(expression):\r\n yield substitution\r\n\r\n\r\ndef syntactic_matcher(expression, pattern):\r\n matcher = DiscriminationNet()\r\n matcher.add(pattern)\r\n for _, substitution in matcher.match(expression):\r\n yield substitution\r\n\r\n\r\n@pytest.fixture\r\ndef match(request):\r\n if request.param == 'one-to-one':\r\n return match_one_to_one\r\n elif request.param == 'many-to-one':\r\n return match_many_to_one\r\n else:\r\n raise ValueError(\"Invalid internal test config\")\r\n\r\n\r\n@pytest.fixture\r\ndef match_syntactic(request):\r\n if request.param == 'one-to-one':\r\n return match_one_to_one\r\n elif request.param == 'many-to-one':\r\n return match_many_to_one\r\n elif request.param == 'syntactic':\r\n return syntactic_matcher\r\n else:\r\n raise ValueError(\"Invalid internal test config\")\r\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"535816849","text":"import fcntl\nimport multiprocessing\nimport os\nimport socket\nimport struct\nimport subprocess\nimport sys\n\n\ndef get_sys_mem_total_kB():\n memory_total = None\n if 'linux' in sys.platform:\n memory_line = None\n for line in open(\"/proc/meminfo\"):\n if 'MemTotal:' in line:\n memory_line = line\n break\n\n if memory_line:\n memory_line = memory_line.replace('MemTotal:', '').strip()\n memory_line = memory_line.replace('kB', '')\n memory_line = memory_line.strip()\n memory_total = int(memory_line)\n\n return memory_total\n\n\ndef get_sys_mem_total_MB():\n memory_total_kb = get_sys_mem_total_kB()\n if memory_total_kb:\n memory_total_mb = memory_total_kb / 1024\n return memory_total_mb\n\n\ndef get_disk_size_GB(file_sys='/'):\n disk_size = None\n if 'linux' in sys.platform:\n file_system = os.statvfs(file_sys)\n disk_size = (file_system.f_blocks * file_system.f_frsize) / (1024 ** 3)\n\n return disk_size\n\n\ndef get_disk_usage():\n\n def get_size_in_GB(disk_size):\n if 'G' in disk_size:\n return float(disk_size.replace('G', ''))\n if 'M' in disk_size:\n return float(disk_size.replace('M', '')) / 1024\n if 'K' in disk_size:\n return float(disk_size.replace('K', '')) / (1024 ** 2)\n return None\n\n disk_usage = dict()\n\n if 'linux' in sys.platform:\n df_command = subprocess.Popen([\"df\", \"-h\"], stdout=subprocess.PIPE)\n df_output = df_command.communicate()[0]\n\n for file_system in df_output.split(\"\\n\")[1:]:\n if 'none'not in file_system:\n try:\n name, size, used, avail, use, mount = file_system.split()\n disk_usage[name] = {\n 'total': get_size_in_GB(size),\n 'used': get_size_in_GB(used)}\n except ValueError:\n pass\n return disk_usage\n\n\ndef get_interface_ip(ifname):\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n return socket.inet_ntoa(\n fcntl.ioctl(s.fileno(), 0x8915,\n struct.pack('256s', ifname[:15]))[20:24])\n\n\ndef get_lan_ip():\n ip = socket.gethostbyname(socket.gethostname())\n if ip.startswith(\"127.\") and os.name != \"nt\":\n interfaces = [\n \"eth0\",\n \"eth1\",\n \"eth2\",\n \"wlan0\",\n \"wlan1\",\n \"wifi0\",\n \"ath0\",\n \"ath1\",\n \"ppp0\",\n ]\n for ifname in interfaces:\n try:\n ip = get_interface_ip(ifname)\n break\n except IOError:\n pass\n return ip\n\n\ndef get_cpu_core_count():\n return multiprocessing.cpu_count()\n\n\ndef get_load_average():\n load_average = os.getloadavg()\n return {\n '1': load_average[0],\n '5': load_average[1],\n '15': load_average[2]\n }\n","sub_path":"meniscus/api/utils/sys_assist.py","file_name":"sys_assist.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"71781114","text":"import sys\r\nfrom PyQt5.QtWidgets import QApplication, QWidget, QLabel, QLineEdit, QGridLayout, QPushButton, \\\r\n QMessageBox, QDesktopWidget, QTabWidget, QTextEdit, QVBoxLayout, QGroupBox, QMainWindow, \\\r\n QHBoxLayout, QTableWidget, QRadioButton, QTableWidgetItem, QAbstractItemView, QDialog, QComboBox, QSpinBox\r\nfrom PyQt5.QtGui import QIcon, QFont\r\nfrom PyQt5.QtCore import Qt\r\nimport excel_file as ef\r\nimport pandas as pd\r\nimport openpyxl\r\nimport datetime\r\n\r\n\r\nclass exe_func(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n login = Login()\r\n login.exec_()\r\n self.login_Ycode = login.Ycode\r\n self.login_Name = login.NAME\r\n\r\n # login_info가 비어있는 경우 main을 실행하지 않고 닫기\r\n if not login.login_info.empty:\r\n self.mainUI()\r\n else:\r\n # 시스템 종료\r\n sys.exit()\r\n\r\n def mainUI(self):\r\n self.setWindowTitle('Ybio')\r\n self.setWindowIcon(QIcon('ybio.png'))\r\n self.left = 120\r\n self.top = 200\r\n self.width = 1600\r\n self.height = 700\r\n\r\n # 상태바에 표시\r\n Message = ' CODE: ' + self.login_Ycode +' ' + ' NAME : ' +self.login_Name\r\n self.statusBar().showMessage(Message)\r\n self.setGeometry(self.left, self.top, self.width, self.height)\r\n\r\n self.tab_widgets = tab_widget()\r\n self.setCentralWidget(self.tab_widgets)\r\n\r\n self.show()\r\n\r\nclass Login(QDialog):\r\n def __init__(self):\r\n super().__init__()\r\n\r\n self.Ycode = None\r\n self.NAME = None\r\n self.login_info = pd.DataFrame()\r\n\r\n self.loginUI()\r\n\r\n # 로그인 UI\r\n def loginUI(self):\r\n # Ycode와 NAME을 입력할 Label과 Line을 생성\r\n self.label1 = QLabel(\"코 드: \")\r\n self.label2 = QLabel(\"이 름: \")\r\n self.lineEdit1 = QLineEdit()\r\n self.lineEdit2 = QLineEdit()\r\n self.pushButton = QPushButton(\"Login\")\r\n\r\n # 위치 입력\r\n layout = QGridLayout()\r\n layout.addWidget(self.label1, 0, 0)\r\n layout.addWidget(self.lineEdit1, 0, 1)\r\n layout.addWidget(self.label2, 1, 0)\r\n layout.addWidget(self.lineEdit2, 1, 1)\r\n layout.addWidget(self.pushButton, 1, 2)\r\n\r\n self.setLayout(layout)\r\n\r\n self.pushButton.clicked.connect(self.pushButtonCliked)\r\n\r\n self.setWindowTitle('Ybio')\r\n self.resize(300, 100)\r\n self.setWindowIcon(QIcon('ybio.png'))\r\n # self.center()\r\n self.show()\r\n\r\n # 로그인 버튼 입력시 발생\r\n def pushButtonCliked(self):\r\n\r\n # 로그인 정보가 있는 EXCEL을 불러오기 위치 찾기\r\n excel_LDB = ef.excel_pd()\r\n login_DB = excel_LDB.load_LDB()\r\n\r\n self.login_db = pd.read_excel(login_DB)\r\n\r\n # Line에 입력된 값을 텍스트로 불러옴\r\n self.Ycode = self.lineEdit1.text()\r\n self.NAME = self.lineEdit2.text()\r\n\r\n # Ycode와 이름이 일치할 경우 DF 생성, 아닐경우 빈 DF 생성으로 로그인 성공과 실패를 결정\r\n self.login_info = self.login_db.loc[(self.login_db['Ycode'] == self.Ycode) & (self.login_db['Name'] == self.NAME)]\r\n\r\n # 위치에 없으면 다시 입력하는 칸이 나오고, 있을 경우 close 하고 다음 실행\r\n if not self.login_info.empty:\r\n self.close()\r\n\r\n else:\r\n reply = QMessageBox(self)\r\n reply.question(self, 'Error', '사번 및 이름을 확인하세요.', QMessageBox.Yes)\r\n\r\n def center(self):\r\n qr = self.frameGeometry()\r\n cp = QDesktopWidget().availableGeometry().center()\r\n qr.moveCenter(cp)\r\n self.move(qr.topLeft())\r\n\r\nclass tab_widget(QWidget):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.Q_spin1_text = 1\r\n self.date = datetime.date.today().isoformat()\r\n self.setFont(QFont(\"Times New Roman \", 10, QFont.Bold))\r\n self.exeUI()\r\n\r\n def exeUI(self):\r\n\r\n # ---------------------------------------------------------------- 위젯\r\n left_groupbox = QGroupBox(\"C-CODE SEARCH\")\r\n left_groupbox.setMaximumSize(400, 800)\r\n right_groupbox = QGroupBox(\"Quotation\")\r\n right_groupbox_tbl = QGroupBox(\"입력\")\r\n\r\n # ---------------------------------------------------------------- C-CODE SEARCH\r\n # left_groupbox\r\n lal1 = QLabel(\"검색: \")\r\n self.c_lin1 = QLineEdit()\r\n self.c_pbtn1 = QPushButton(\"확인\")\r\n self.c_pbtn2 = QPushButton(\"입력\")\r\n self.c_tbl1 = QTableWidget()\r\n\r\n # Table 짝수번째 색 변화\r\n self.c_tbl1.setAlternatingRowColors(True)\r\n # Table 셀을 선택할 때 전체 행을 선택하도록 설정\r\n self.c_tbl1.setSelectionBehavior(QAbstractItemView.SelectRows)\r\n # Table을 수정하지 못하도록 설정\r\n self.c_tbl1.setEditTriggers(QAbstractItemView.NoEditTriggers)\r\n # Table에서 더블클릭을 하면 실행되는 기능\r\n self.c_tbl1.doubleClicked.connect(self.move_toQ)\r\n # Table 크기 조절\r\n\r\n # 버튼을 누르면 시작\r\n self.c_pbtn1.clicked.connect(self.tab1_ccode_btn)\r\n self.c_pbtn2.clicked.connect(self.tab1_ccode_btn2)\r\n self.c_lin1.returnPressed.connect(self.tab1_ccode_btn)\r\n\r\n leftInnerLayout_top = QHBoxLayout()\r\n leftInnerLayout_top.addWidget(lal1)\r\n leftInnerLayout_top.addWidget(self.c_lin1)\r\n leftInnerLayout_top.addWidget(self.c_pbtn1)\r\n leftInnerLayout_top.addWidget(self.c_pbtn2)\r\n\r\n leftInnerLayout_btm = QVBoxLayout()\r\n leftInnerLayout_btm.addLayout(leftInnerLayout_top)\r\n leftInnerLayout_btm.addWidget(self.c_tbl1)\r\n\r\n left_groupbox.setLayout(leftInnerLayout_btm)\r\n\r\n # ---------------------------------------------------------------- Quotation\r\n # right_groupbox\r\n lal1 = QLabel('CODE:')\r\n lal2 = QLabel('COMPANY:')\r\n lal3 = QLabel('NAME:')\r\n lal4 = QLabel('Quotation Number:')\r\n lal5 = QLabel('CATEGORY:')\r\n lal6 = QLabel('Number Of Type:')\r\n lal_date = QLabel(self.date)\r\n lal1.setFixedSize(100, 20)\r\n lal2.setFixedSize(150, 20)\r\n lal3.setFixedSize(100, 20)\r\n lal4.setFixedSize(150, 20)\r\n lal5.setFixedSize(100, 20)\r\n lal6.setFixedSize(150, 20)\r\n\r\n self.Q_lal1 = QLabel('')\r\n self.Q_lal2 = QLabel('')\r\n self.Q_lal3 = QLabel('')\r\n self.Q_lal4 = QLabel('')\r\n self.Q_com1 = QComboBox()\r\n self.Q_spin1 = QSpinBox()\r\n self.Q_pbtn1 = QPushButton(\"확인\")\r\n self.Q_lal1.setFixedSize(100, 20)\r\n self.Q_lal2.setFixedSize(150, 20)\r\n self.Q_lal3.setFixedSize(100, 20)\r\n self.Q_lal4.setFixedSize(150, 20)\r\n self.Q_com1.setFixedSize(100, 20)\r\n self.Q_spin1.setFixedSize(150, 20)\r\n self.Q_pbtn1.setFixedSize(60, 30)\r\n\r\n # Quotation Groupbox\r\n Q_table_groupbox = QVBoxLayout()\r\n self.Q_tbl = QTableWidget()\r\n self.Q_tbl.setAlternatingRowColors(True)\r\n Q_table_groupbox.addWidget(self.Q_tbl)\r\n right_groupbox_tbl.setLayout(Q_table_groupbox)\r\n\r\n # lal 설정\r\n self.Q_lal1.setStyleSheet(\r\n \"color: black;\"\r\n # \"border-style: solid;\"\r\n # \"border-width: 2px;\"\r\n \"background-color: #BFB1B1;\"\r\n # \"border-radius: 3px;\"\r\n # 포트설정 !!!\r\n \"font: bold large 'Malgun Gothic'\"\r\n )\r\n self.Q_lal2.setStyleSheet(\r\n \"color: black;\"\r\n # \"border-style: solid;\"\r\n # \"border-width: 2px;\"\r\n \"background-color: #BFB1B1;\"\r\n # \"border-radius: 3px;\"\r\n # 포트설정 !!!\r\n \"font: bold large 'Malgun Gothic'\"\r\n )\r\n self.Q_lal3.setStyleSheet(\r\n \"color: black;\"\r\n # \"border-style: solid;\"\r\n # \"border-width: 2px;\"\r\n \"background-color: #BFB1B1;\"\r\n # \"border-radius: 3px;\"\r\n # 포트설정 !!!\r\n \"font: bold large 'Malgun Gothic'\"\r\n )\r\n self.Q_lal4.setStyleSheet(\r\n \"color: black;\"\r\n # \"border-style: solid;\"\r\n # \"border-width: 2px;\"\r\n \"background-color: #BFB1B1;\"\r\n # \"border-radius: 3px;\"\r\n # 포트설정 !!!\r\n \"font: bold large 'Malgun Gothic'\"\r\n )\r\n lal_date.setStyleSheet(\r\n \"color: black;\"\r\n \"background-color: #E0E0E0;\"\r\n \"font: bold large 'Malgun Gothic';\"\r\n \"font-size: 30px\"\r\n )\r\n\r\n # COMBOBOX ITEM\r\n self.Q_com1.addItem('PQ')\r\n self.Q_com1.addItem('NA')\r\n self.Q_com1.currentIndexChanged.connect(self.combo_act)\r\n # SPINBOX\r\n self.Q_spin1.setMinimum(1)\r\n self.Q_spin1.setMaximum(10)\r\n self.Q_spin1.valueChanged.connect(self.spin_act)\r\n # Q_pbtn1\r\n self.Q_pbtn1.clicked.connect(self.Q_tbl_show)\r\n\r\n rightInnerLayout_1 = QHBoxLayout()\r\n # rightInnerLayout_1.addStretch(0)\r\n rightInnerLayout_1.addWidget(lal1)\r\n rightInnerLayout_1.addWidget(self.Q_lal1)\r\n rightInnerLayout_1.addWidget(lal2)\r\n rightInnerLayout_1.addWidget(self.Q_lal2)\r\n # rightInnerLayout_1.addStretch(1)\r\n\r\n rightInnerLayout_2 = QHBoxLayout()\r\n rightInnerLayout_2.addWidget(lal3)\r\n rightInnerLayout_2.addWidget(self.Q_lal3)\r\n rightInnerLayout_2.addWidget(lal4)\r\n rightInnerLayout_2.addWidget(self.Q_lal4)\r\n\r\n rightInnerLayout_3 = QHBoxLayout()\r\n rightInnerLayout_3.addWidget(lal5)\r\n rightInnerLayout_3.addWidget(self.Q_com1)\r\n rightInnerLayout_3.addWidget(lal6)\r\n rightInnerLayout_3.addWidget(self.Q_spin1)\r\n\r\n rightInnerLayout_4 = QHBoxLayout()\r\n rightInnerLayout_4.addStretch(10)\r\n rightInnerLayout_4.addWidget(self.Q_pbtn1)\r\n rightInnerLayout_4.addStretch(1)\r\n\r\n rightInnerLayout_V = QVBoxLayout()\r\n rightInnerLayout_V.addLayout(rightInnerLayout_1)\r\n rightInnerLayout_V.addLayout(rightInnerLayout_2)\r\n rightInnerLayout_V.addLayout(rightInnerLayout_3)\r\n rightInnerLayout_V.addLayout(rightInnerLayout_4)\r\n\r\n right_groupbox.setLayout(rightInnerLayout_V)\r\n\r\n tablayout1 = QHBoxLayout()\r\n tablayout1.addWidget(right_groupbox)\r\n tablayout1.addWidget(lal_date)\r\n\r\n tablayout2 = QVBoxLayout()\r\n tablayout2.addLayout(tablayout1)\r\n tablayout2.addWidget(right_groupbox_tbl)\r\n\r\n tablayout3 = QHBoxLayout()\r\n tablayout3.addWidget(left_groupbox)\r\n tablayout3.addLayout(tablayout2)\r\n\r\n self.setLayout(tablayout3)\r\n\r\n # c_code 의 확인 button\r\n def tab1_ccode_btn(self):\r\n df_fun = ef.excel_pd()\r\n df = df_fun.search_ccode()\r\n\r\n ccode_text = self.c_lin1.text()\r\n df_search = df['COMPANY'].str.contains(ccode_text)\r\n\r\n DF = df[df_search]\r\n\r\n if DF.empty:\r\n reply = QMessageBox(self)\r\n reply.question(self, 'Error', '검색되지 않습니다.', QMessageBox.Yes)\r\n else:\r\n self.numROW = len(DF)\r\n self.numCOL = len(DF.columns)\r\n # ROW와 COLUMN 수 지정\r\n self.c_tbl1.setRowCount(self.numROW)\r\n self.c_tbl1.setColumnCount(self.numCOL)\r\n # COLUMN 지정\r\n self.c_tbl1.setHorizontalHeaderLabels(DF.columns.tolist())\r\n # 요소 넣기\r\n self.v_list = DF.values.tolist()\r\n for m, n in zip(self.v_list, range(self.numROW)):\r\n for a, b in zip(range(self.numCOL), m):\r\n self.c_tbl1.setItem(n, a, QTableWidgetItem(b))\r\n\r\n # c_code 의 입력 button\r\n def tab1_ccode_btn2(self):\r\n tab1_btn2_play = InputDialog()\r\n tab1_btn2_play.exec_()\r\n\r\n # 검색기능\r\n def move_toQ(self):\r\n # 더블 클릭된 행을 불러와서 각각 이름을 선언한다.\r\n row = self.c_tbl1.currentRow()\r\n self.tb_code = self.c_tbl1.item(row, 0).text()\r\n self.tb_comp = self.c_tbl1.item(row, 1).text()\r\n self.tb_name = self.c_tbl1.item(row, 2).text()\r\n\r\n # N NUMBER 추출\r\n mkfunc = ef.excel_pd()\r\n self.Nnumber = mkfunc.MK_DB()\r\n self.Nrow = len(self.Nnumber)\r\n last = self.Nnumber[self.Nnumber.columns[0]][-1:].values.tolist()\r\n # 마지막 N 번호 다음 번호를 붙일 땐 +1 해서 사용\r\n self.last = int(last[0][6:])\r\n self.N = str(self.last + 1)\r\n\r\n Message1 = self.tb_code\r\n Message2 = self.tb_comp\r\n Message3 = self.tb_name\r\n Message4 = self.N\r\n\r\n self.Q_lal1.setText(Message1)\r\n self.Q_lal2.setText(Message2)\r\n self.Q_lal3.setText(Message3)\r\n self.Q_lal4.setText(Message4)\r\n\r\n # Quotation COMBOBOX\r\n def combo_act(self):\r\n self.Q_com1_text = self.Q_com1.currentText()\r\n\r\n # Quotation SPINBOX\r\n def spin_act(self):\r\n # INT로 활용하기 위해 value로 값을 받음\r\n self.Q_spin1_text = self.Q_spin1.value()\r\n\r\n def Q_tbl_show(self):\r\n self.Q_tbl.setRowCount(self.Q_spin1_text)\r\n self.Q_tbl.setColumnCount(6)\r\n column_list = ['CLASS', 'NAME', 'SUB', '규격', '수량', '단가']\r\n self.Q_tbl.setHorizontalHeaderLabels(column_list)\r\n self.Q_tbl.setColumnWidth(0, 250)\r\n self.Q_tbl.setColumnWidth(1, 400)\r\n self.Q_tbl.setColumnWidth(2, 200)\r\n self.Q_tbl.setColumnWidth(3, 80)\r\n self.Q_tbl.setColumnWidth(4, 40)\r\n self.Q_tbl.setColumnWidth(5, 140)\r\n # self.Q_tbl.resizeColumnsToContents() 컬럼사이즈에 맞게\r\n\r\n self.combo_class_list = ['선택', 'RECOMBINANT PROTEIN', 'ANTIBODY', 'HYBRIDOMA', 'CULTURE MEDIA', 'BIACORE', 'SCREENING',\r\n 'HUMANIZATION', 'AFFINITY MATURATION', 'IMMUNE LIBRARY', 'RCB', 'ELISA', 'FACS', 'ETC', 'SEC-HPLC']\r\n self.combo_size_list = ['VOL', 'ML', 'EA']\r\n self.combo_sub = {'선택':['NA'],\r\n 'RECOMBINANT PROTEIN':['1ST-PURI', '2ND-PURI', 'ENDOTOXIN', 'SEC-HPLC', '배양액'],\r\n 'ANTIBODY':['1ST-PURI', '2ND-PURI', 'CHIMERIC(hIGg1/Hkappa'],\r\n 'HYBRIDOMA':['IDENTIFICATION', '1ST-PURI'],\r\n 'CULTURE MEDIA':['NA'],\r\n 'BIACORE':['AFFINITY', 'MAPPING'],\r\n 'SCREENING':['BASIC BINDER', 'CUSTOM', 'FULL BINDER', 'BLOCKER', 'RAPID BINDER'],\r\n 'HUMANIZATION':['CDR GRAFTING', 'GUIDED SELECTION', 'TOTAL SOLUTION'],\r\n 'AFFINITY MATURATION':['LC SHUFFLING', 'HOT SPOT MUTATION', 'CORE PACKING', 'TOTAL SOLUTION'],\r\n 'IMMUNE LIBRARY':['IMMUNE LIBRARY ONLY', 'SCREENING'],\r\n 'RCB':['FULL SERVICE', 'POOL GENERATION'],\r\n 'ELISA':['AFFINITY', 'MAPPING'],\r\n 'FACS':['NA'],\r\n 'ETC':['BUFFER', 'ENDOTOXIN', 'SEC-HPLC'],\r\n 'SEC-HPLC':['PURITY']\r\n }\r\n\r\n for i in range(self.Q_spin1_text):\r\n\r\n globals()['combo_size{}'.format(i)] = QComboBox()\r\n globals()['combo_class{}'.format(i)] = QComboBox()\r\n self.spin = QSpinBox()\r\n self.spin.setMinimum(1)\r\n self.spin.setMaximum(1000)\r\n\r\n globals()['combo_class{}'.format(i)].addItems(self.combo_class_list)\r\n globals()['combo_size{}'.format(i)].addItems(self.combo_size_list)\r\n\r\n self.Q_tbl.setCellWidget(i, 0, globals()['combo_class{}'.format(i)])\r\n # self.Q_tbl.setCellWidget(i, 2, globals()['combo_sub{}'.format(i)])\r\n self.Q_tbl.setCellWidget(i, 3, globals()['combo_size{}'.format(i)])\r\n self.Q_tbl.setCellWidget(i, 4, self.spin)\r\n\r\n\r\n # 각 콤보에서 필요한 기능 추가 -> 콤보가 변경될 때마다 작동하는 함수 필요\r\n # 콤보변화에 따른 하위 콤보의 변화\r\n if self.Q_spin1_text == 1:\r\n combo_class0.currentTextChanged.connect(self.combo_change0)\r\n elif self.Q_spin1_text == 2:\r\n combo_class0.currentTextChanged.connect(self.combo_change0)\r\n combo_class1.currentTextChanged.connect(self.combo_change1)\r\n elif self.Q_spin1_text == 3:\r\n combo_class0.currentTextChanged.connect(self.combo_change0)\r\n combo_class1.currentTextChanged.connect(self.combo_change1)\r\n combo_class2.currentTextChanged.connect(self.combo_change2)\r\n elif self.Q_spin1_text == 4:\r\n combo_class0.currentTextChanged.connect(self.combo_change0)\r\n combo_class1.currentTextChanged.connect(self.combo_change1)\r\n combo_class2.currentTextChanged.connect(self.combo_change2)\r\n combo_class3.currentTextChanged.connect(self.combo_change3)\r\n elif self.Q_spin1_text == 5:\r\n combo_class0.currentTextChanged.connect(self.combo_change0)\r\n combo_class1.currentTextChanged.connect(self.combo_change1)\r\n combo_class2.currentTextChanged.connect(self.combo_change2)\r\n combo_class3.currentTextChanged.connect(self.combo_change3)\r\n combo_class4.currentTextChanged.connect(self.combo_change4)\r\n elif self.Q_spin1_text == 6:\r\n combo_class0.currentTextChanged.connect(self.combo_change0)\r\n combo_class1.currentTextChanged.connect(self.combo_change1)\r\n combo_class2.currentTextChanged.connect(self.combo_change2)\r\n combo_class3.currentTextChanged.connect(self.combo_change3)\r\n combo_class4.currentTextChanged.connect(self.combo_change4)\r\n combo_class5.currentTextChanged.connect(self.combo_change5)\r\n elif self.Q_spin1_text == 7:\r\n combo_class0.currentTextChanged.connect(self.combo_change0)\r\n combo_class1.currentTextChanged.connect(self.combo_change1)\r\n combo_class2.currentTextChanged.connect(self.combo_change2)\r\n combo_class3.currentTextChanged.connect(self.combo_change3)\r\n combo_class4.currentTextChanged.connect(self.combo_change4)\r\n combo_class5.currentTextChanged.connect(self.combo_change5)\r\n combo_class6.currentTextChanged.connect(self.combo_change6)\r\n elif self.Q_spin1_text == 8:\r\n combo_class0.currentTextChanged.connect(self.combo_change0)\r\n combo_class1.currentTextChanged.connect(self.combo_change1)\r\n combo_class2.currentTextChanged.connect(self.combo_change2)\r\n combo_class3.currentTextChanged.connect(self.combo_change3)\r\n combo_class4.currentTextChanged.connect(self.combo_change4)\r\n combo_class5.currentTextChanged.connect(self.combo_change5)\r\n combo_class6.currentTextChanged.connect(self.combo_change6)\r\n combo_class7.currentTextChanged.connect(self.combo_change7)\r\n elif self.Q_spin1_text == 9:\r\n combo_class0.currentTextChanged.connect(self.combo_change0)\r\n combo_class1.currentTextChanged.connect(self.combo_change1)\r\n combo_class2.currentTextChanged.connect(self.combo_change2)\r\n combo_class3.currentTextChanged.connect(self.combo_change3)\r\n combo_class4.currentTextChanged.connect(self.combo_change4)\r\n combo_class5.currentTextChanged.connect(self.combo_change5)\r\n combo_class6.currentTextChanged.connect(self.combo_change6)\r\n combo_class7.currentTextChanged.connect(self.combo_change7)\r\n combo_class8.currentTextChanged.connect(self.combo_change8)\r\n elif self.Q_spin1_text == 10:\r\n combo_class0.currentTextChanged.connect(self.combo_change0)\r\n combo_class1.currentTextChanged.connect(self.combo_change1)\r\n combo_class2.currentTextChanged.connect(self.combo_change2)\r\n combo_class3.currentTextChanged.connect(self.combo_change3)\r\n combo_class4.currentTextChanged.connect(self.combo_change4)\r\n combo_class5.currentTextChanged.connect(self.combo_change5)\r\n combo_class6.currentTextChanged.connect(self.combo_change6)\r\n combo_class7.currentTextChanged.connect(self.combo_change7)\r\n combo_class8.currentTextChanged.connect(self.combo_change8)\r\n combo_class9.currentTextChanged.connect(self.combo_change9)\r\n\r\n def combo_change0(self):\r\n CD0 = QComboBox()\r\n list0 = self.combo_sub[combo_class0.currentText()]\r\n CD0.addItems(list0)\r\n self.Q_tbl.setCellWidget(0, 2, CD0)\r\n def combo_change1(self):\r\n CD1 = QComboBox()\r\n list1 = self.combo_sub[combo_class1.currentText()]\r\n CD1.addItems(list1)\r\n self.Q_tbl.setCellWidget(1, 2, CD1)\r\n def combo_change2(self):\r\n CD2 = QComboBox()\r\n list2 = self.combo_sub[combo_class2.currentText()]\r\n CD2.addItems(list2)\r\n self.Q_tbl.setCellWidget(2, 2, CD2)\r\n def combo_change3(self):\r\n CD3 = QComboBox()\r\n list3 = self.combo_sub[combo_class3.currentText()]\r\n CD3.addItems(list3)\r\n self.Q_tbl.setCellWidget(3, 2, CD3)\r\n def combo_change4(self):\r\n CD4 = QComboBox()\r\n list4 = self.combo_sub[combo_class4.currentText()]\r\n CD4.addItems(list4)\r\n self.Q_tbl.setCellWidget(4, 2, CD4)\r\n def combo_change5(self):\r\n CD5 = QComboBox()\r\n list5 = self.combo_sub[combo_class5.currentText()]\r\n CD5.addItems(list5)\r\n self.Q_tbl.setCellWidget(5, 2, CD5)\r\n def combo_change6(self):\r\n CD6 = QComboBox()\r\n list6 = self.combo_sub[combo_class6.currentText()]\r\n CD6.addItems(list6)\r\n self.Q_tbl.setCellWidget(6, 2, CD6)\r\n def combo_change7(self):\r\n CD7 = QComboBox()\r\n list7 = self.combo_sub[combo_class7.currentText()]\r\n CD7.addItems(list7)\r\n self.Q_tbl.setCellWidget(7, 2, CD7)\r\n def combo_change8(self):\r\n CD8 = QComboBox()\r\n list8 = self.combo_sub[combo_class8.currentText()]\r\n CD8.addItems(list8)\r\n self.Q_tbl.setCellWidget(0, 2, CD8)\r\n def combo_change9(self):\r\n CD9 = QComboBox()\r\n list9 = self.combo_sub[combo_class9.currentText()]\r\n CD9.addItems(list9)\r\n self.Q_tbl.setCellWidget(0, 2, CD9)\r\n\r\n# c_code popup창\r\nclass InputDialog(QDialog):\r\n def __init__(self):\r\n super().__init__()\r\n self.setupUI()\r\n\r\n self.COMPANY = None\r\n self.NAME = None\r\n\r\n def setupUI(self):\r\n self.label1 = QLabel(\"COMPANY\")\r\n self.label2 = QLabel(\"NAME\")\r\n self.line1 = QLineEdit()\r\n self.line2 = QLineEdit()\r\n self.btn = QPushButton('저장')\r\n\r\n # 위치 입력\r\n layout = QGridLayout()\r\n layout.addWidget(self.label1, 0, 0)\r\n layout.addWidget(self.line1, 0, 1)\r\n layout.addWidget(self.label2, 1, 0)\r\n layout.addWidget(self.line2, 1, 1)\r\n layout.addWidget(self.btn, 1, 2)\r\n\r\n self.setLayout(layout)\r\n\r\n # Enter를 치면 아래의 명령이 실행되도록록\r\n self.btn.clicked.connect(self.pushButtonCliked)\r\n self.line2.returnPressed.connect(self.pushButtonCliked)\r\n\r\n self.setWindowTitle('Ybio')\r\n self.resize(300, 100)\r\n self.setWindowIcon(QIcon('ybio.png'))\r\n # CENTER 기능이 없음\r\n self.show()\r\n\r\n def pushButtonCliked(self):\r\n adr_a = ef.excel_pd()\r\n df = adr_a.search_ccode()\r\n # 여기서 x는 마지막행을 말한다.\r\n p_x = len(df)\r\n y = str(p_x + 1)\r\n x = str(p_x + 2)\r\n wb = openpyxl.load_workbook(adr_a.DB_Cfile)\r\n w_sheet = wb['DB']\r\n w_sheet['A' + x] = self.line1.text()\r\n w_sheet['B' + x] = self.line2.text()\r\n w_sheet['C' + x] = 'C' + y.zfill(4)\r\n wb.save(adr_a.DB_Cfile)\r\n self.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = QApplication(sys.argv)\r\n first = exe_func()\r\n sys.exit(app.exec_())","sub_path":"tab_0.1.py","file_name":"tab_0.1.py","file_ext":"py","file_size_in_byte":24253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"4669365","text":"from .models.py import blockDB\nfrom hashlib import sha256\nimport json\nimport json\n\nclass block:\n\tdef __init__(self, index, data, timestamp, amount, sender, receiver ,previousHash = ''):\n\t\tself.index = index\n\t\tself.data = data\n\t\tself.previousHash = previousHash\n\t\tself.amount = amount\n\t\tself.sender = sender\n\t\tself.receiver = receiver\n\t\tself.timestamp = timestamp\n\t\tself.hash = ''\n\t\tself.nonce = 0\n\n\tdef calculateHash(self):\n\t\t# returning hash for a set of data\n\t\treturn sha256((str(self.index) + str(self.data) + str(self.amount) + str(self.sender) + str(self.receiver) + str(self.timestamp) + str(self.nonce) + str(self.previousHash)).encode())\n\n\tdef mineBlock(self, difficulty):\n\t\twhile not (sha256(self.hash)).hexdigest()[0:difficulty] == \"0\"*difficulty:\n\t\t\tself.nonce += 1 \n\n\nclass blockchain:\n\tdef __init__(self):\n\t\tself.chain = [self.createGenesis()]\n\t\tself.difficulty = 2\n\n\tdef createGenesis(self):\n\t\treturn block(0 , \"Created a genesis Block\", time.ctime(), 10 , 'Govenment', 'Govenment', \"0\")\n\n\tdef getLatestBlock(self):\n\t\treturn self.chain[-1]\n\n\tdef addBlock(self, newBlock):\n\t\tnewBlock.previousHash = self.getLatestBlock().hash\n\t\tnewBlock.mineBlock(self.difficulty)\n\t\tself.chain.append(newBlock)\n\n\tdef isChainValid(self):\n\t\tfor i in range(1, len(self.chain) - 1):\n\t\t\tpreviousNode = self.chain[i-1]\n\t\t\tcurrentNode = self.chain[i]\n\n\t\tif not currentNode.hash == currentNode.calculateHash():\n\t\t\treturn False\n\n\t\tif not previousNode.hash == currentNode.previousHash:\n\t\t\treturn False\n\n\t\treturn True\n\n\ncrypto = blockchain()\n\n# mining and printing JSON for block 1\ncrypto.addBlock(block(1, \"First User added\", time.ctime(), 10, \"GOVERMENT\", \"first_user\" ) )\nlastNode = crypto.getLatestBlock()\n\nprint(\"-------> Block 1 Mined.......\")\n\ndata = {\n\t'index'\t: lastNode.index,\n\t'data'\t: lastNode.data,\n\t'previousHash' : lastNode.previousHash.hexdigest(),\n\t'currentBlock' : lastNode.hash.hexdigest(),\n\t\n\t'sender'\t:\tlastNode.sender,\n\t'receiver'\t:\tlastNode.receiver,\n\t'time'\t\t: \tlastNode.timestamp,\n}\n\nprint(json.dumps(data))\n\n\n# mining and printing JSON for block 1\ncrypto.addBlock(block(2, \"Second User added\", time.ctime(), 5, \"GOVERMENT\", \"second_user\") )\nlastNode = crypto.getLatestBlock()\n\nprint(\"-------> Block 1 Mined.......\")\n\ndata = {\n\t'index'\t: lastNode.index,\n\t'data'\t: lastNode.data,\n\t'previousHash' : lastNode.previousHash.hexdigest(),\n\t'currentBlock' : lastNode.hash.hexdigest(),\n\t\n\t'sender'\t:\tlastNode.sender,\n\t'receiver'\t:\tlastNode.receiver,\n\t'time'\t\t: \tlastNode.timestamp,\n}\n\nprint(json.dumps(data))","sub_path":"crypto/.~c9_invoke_udNgLm.py","file_name":".~c9_invoke_udNgLm.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"591131287","text":"import sqlite3\r\nfrom sqlite3.dbapi2 import Error\r\n\r\ndef conectar():\r\n dbname= 'ecommerce.db'\r\n conn= sqlite3.connect(dbname)\r\n return conn\r\n\r\ndef listaUsuarios():\r\n conn= conectar()\r\n cursor= conn.execute(\"select * from Usuarios;\")\r\n resultusu= list(cursor.fetchall())\r\n conn.close()\r\n return resultusu\r\n\r\ndef get_user_db(userid):\r\n conn= conectar()\r\n cursor= conn.execute(\"select * from Usuarios where usuario='\"+userid+\"';\")\r\n resultSet= cursor.fetchone()\r\n conn.close()\r\n return resultSet\r\n\r\ndef addUsuario(nombre, usuario, correo, iduser, pas, tipo):\r\n try:\r\n conn= conectar()\r\n conn.execute(\"insert into Usuarios (nombre, usuario, correo, identificacion, contrasena, tipo) values (?,?,?,?,?,?);\", (nombre, usuario, correo, iduser, pas, tipo))\r\n conn.commit()\r\n conn.close()\r\n return True\r\n except Error as error:\r\n return False\r\n\r\ndef editUser(nombre, usuario, correo, iduser, pas, tipo):\r\n try:\r\n conn= conectar()\r\n conn.execute(\"UPDATE Usuarios SET nombre= '\" + nombre + \"', usuario= '\" + usuario + \"', correo= '\" + correo + \"', identificacion= '\" + iduser + \"', contrasena= '\" + pas + \"', tipo= '\" + tipo + \"' WHERE usuario = '\" + usuario + \"';\")\r\n conn.commit()\r\n conn.close()\r\n return True\r\n except Error as error:\r\n return False\r\n\r\ndef deleteUser(usuario):\r\n try:\r\n conn= conectar()\r\n conn.execute(\"DELETE from Usuarios WHERE usuario= '\" + usuario + \"';\")\r\n conn.commit()\r\n conn.close()\r\n return True\r\n except Error as error:\r\n return False\r\n\r\ndef listaProductos():\r\n conn= conectar()\r\n cursor= conn.execute(\"select * from Producto;\")\r\n resultprodu= list(cursor.fetchall())\r\n conn.close()\r\n return resultprodu\r\n\r\ndef get_produ_db(producod):\r\n conn= conectar()\r\n cursor= conn.execute(\"select * from Producto where codigo='\"+producod+\"';\")\r\n resultpro= cursor.fetchone()\r\n conn.close()\r\n return resultpro\r\n\r\ndef get_producto_nombre_db(nombre):\r\n conn= conectar()\r\n cursor = conn.execute(\"select id from Producto where nombre='\"+nombre+\"';\")\r\n resultpro= cursor.fetchone()\r\n conn.close()\r\n return resultpro\r\n\r\ndef addProduct(codigo, nombre, precio, existencia, cordes, londes, imagen):\r\n try:\r\n conn= conectar()\r\n conn.execute(\"insert into Producto (codigo, nombre, precio, existencia, shortdes, longdes, img) values (?,?,?,?,?,?,?);\", (codigo, nombre, precio, existencia, cordes, londes, imagen))\r\n conn.commit()\r\n conn.close()\r\n return True\r\n except Error as error:\r\n return False\r\n\r\ndef editProduct(codigo, nombre, precio, existencia, cordes, londes):\r\n try:\r\n conn= conectar()\r\n conn.execute(\"UPDATE Producto SET codigo= '\" + codigo + \"', nombre= '\" + nombre + \"', precio= '\" + precio + \"', existencia= '\" + existencia + \"', shortdes= '\" + cordes + \"', longdes= '\" + londes + \"' WHERE codigo = '\" + codigo + \"';\")\r\n conn.commit()\r\n conn.close()\r\n return True\r\n except Error as error:\r\n return False\r\n\r\ndef deleteProduct(codigo):\r\n try:\r\n conn= conectar()\r\n conn.execute(\"DELETE from Producto WHERE codigo= '\" + codigo + \"';\")\r\n conn.commit()\r\n conn.close()\r\n return True\r\n except Error as error:\r\n return False\r\n\r\ndef addComent(idpro, iduser, mensaje):\r\n try:\r\n conn= conectar()\r\n conn.execute(\"insert into Comentarios (idproducto, idusuario, mensaje) values (?,?,?);\", (idpro, iduser, mensaje))\r\n conn.commit()\r\n conn.close()\r\n return True\r\n except Error as error:\r\n return False\r\n\r\ndef addLista(iduser, idpro):\r\n try:\r\n conn= conectar()\r\n conn.execute(\"insert into ListaDeseos (idusuario, idproducto) values (?,?);\", (iduser, idpro))\r\n conn.commit()\r\n conn.close()\r\n return True\r\n except Error as error:\r\n return False\r\n\r\ndef listaComentarios():\r\n conn= conectar()\r\n cursor= conn.execute(\"select * from Comentarios;\")\r\n resultdeseos= list(cursor.fetchall())\r\n conn.close()\r\n return resultdeseos\r\n \r\ndef listaComentariosPorProducto(idProd):\r\n conn= conectar()\r\n try:\r\n cursor= conn.execute('SELECT nombre, mensaje FROM Usuarios INNER JOIN Comentarios on Usuarios.id = Comentarios.idusuario and Comentarios.idproducto = ?', (idProd))\r\n resultdeseos= list(cursor.fetchall())\r\n conn.close()\r\n return resultdeseos\r\n except Error as e:\r\n print(f\"error en listacomkentariosporproducto {e}\")\r\n return False\r\n\r\ndef getListaDeseos(idusu):\r\n conn= conectar()\r\n try:\r\n cursor= conn.execute('SELECT * FROM Producto INNER JOIN ListaDeseos on Producto.id = ListaDeseos.idproducto and ListaDeseos.idusuario = ?', (idusu, ))\r\n resultdeseos= list(cursor.fetchall())\r\n conn.close()\r\n return resultdeseos\r\n except Error as e:\r\n print(f\"error en listacomkentariosporproducto {e}\")\r\n return False","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":5078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"337644234","text":"\n\"\"\"Unit tests for server prepojenia.\n\nRun all unit test from command line as:\n python test.py\nTo run an individual unit test only, run (for example):\n python test.py TestHandlers.test_subgraph\n\"\"\"\nimport json\nimport os\nimport sys\nimport unittest\nimport webapp2\n\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../data/db')))\nfrom db import DatabaseConnection\n\nimport server\n\n\ndef _request_json(url, test_handler):\n \"\"\"Verifies that the given URL returns a valid JSON.\"\"\"\n request = webapp2.Request.blank(url)\n response = request.get_response(server.app)\n test_handler.assertEqual(response.status_int, 200)\n test_handler.assertEqual(response.content_type, 'application/json')\n j = json.loads(response.text)\n return j\n\n\nclass TestHandlers(unittest.TestCase):\n\n def test_connection(self):\n url = '/connection?eid1=39541,78864,94764,229752&eid2=136671,229753'\n content = _request_json(url, self)\n print('Connection:\\n%s' % (content))\n\n def test_a_shortest_path(self):\n url = '/a_shortest_path?eid1=3264887&eid2=706143,1184394,1662599,1703776,2349437,3135421'\n content = _request_json(url, self)\n print('AShortestPath:\\n%s' % (content))\n\n def test_a_shortest_path_of_unit_length(self):\n \"\"\"Tests finding a shortest path between endpoints of an edge.\"\"\"\n\n # Find a relation in the database:\n db = DatabaseConnection(path_config='db_config.yaml')\n schema = db.get_latest_schema('prod_')\n db.execute('SET search_path to ' + schema + ';')\n rel = db.query('SELECT eid, eid_relation FROM related LIMIT 1')[0]\n source = int(rel[\"eid\"])\n target = int(rel[\"eid_relation\"])\n\n # Check that the shortest path of length 1 is found:\n url = '/a_shortest_path?eid1=%d&eid2=%d' % (source, target)\n content = _request_json(url, self)\n print('AShortestPath:\\n%s' % (content))\n self.assertListEqual(content, [source, target])\n\n def test_subgraph(self):\n url = '/subgraph?eid1=3264887&eid2=706143,1184394,1662599,1703776,2349437,3135421'\n content = _request_json(url, self)\n print('Subgraph:\\n%s' % (content))\n self.assertTrue(content)\n\n\ndef main():\n max_relations_to_load = 123456789\n\n server.initialise_app(max_relations_to_load)\n unittest.main()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"prepojenia/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"80756969","text":"import time\nfrom datetime import datetime\nimport os\nimport sys\n\nsys.path.insert(0, '/mnt/e/Dev/Polyglot/PolyglotDB')\nimport re\nimport yaml\nimport csv\nimport platform\nimport polyglotdb.io as pgio\n\nfrom polyglotdb import CorpusContext\nfrom polyglotdb.io.enrichment import enrich_speakers_from_csv, enrich_lexicon_from_csv\nfrom polyglotdb.acoustics.formants.refined import analyze_formant_points_refinement\n\nbase_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# =============== CONFIGURATION ===============\n\nduration_threshold = 0.05\n##### JM #####\n# nIterations = 1\nnIterations = 20\n##############\nbase_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsibilant_script_path = os.path.join(base_dir, 'Common', 'sibilant_jane_optimized.praat')\n\n# =============================================\nnow = datetime.now()\ndate = '{}-{}-{}'.format(now.year, now.month, now.day)\n\n\ndef load_token():\n token_path = os.path.join(base_dir, 'auth_token')\n if not os.path.exists(token_path):\n return None\n with open(token_path, 'r') as f:\n token = f.read().strip()\n return token\n\n\ndef save_performance_benchmark(config, task, time_taken):\n benchmark_folder = os.path.join(base_dir, 'benchmarks')\n os.makedirs(benchmark_folder, exist_ok=True)\n benchmark_file = os.path.join(benchmark_folder, 'benchmarks.csv')\n if not os.path.exists(benchmark_file):\n with open(benchmark_file, 'w', encoding='utf8') as f:\n writer = csv.writer(f, delimiter=',')\n writer.writerow(['Computer', 'Corpus', 'Date', 'Corpus_size', 'Task', 'Time'])\n with open(benchmark_file, 'a', encoding='utf8') as f:\n writer = csv.writer(f, delimiter=',')\n writer.writerow([platform.node(), config.corpus_name, date, get_size_of_corpus(config), task, time_taken])\n\n\ndef load_config(corpus_name):\n path = os.path.join(base_dir, corpus_name, '{}.yaml'.format(corpus_name))\n if not os.path.exists(path):\n print('The config file for the specified corpus does not exist ({}).'.format(path))\n sys.exit(1)\n expected_keys = ['corpus_directory', 'input_format', 'dialect_code', 'unisyn_spade_directory',\n 'speaker_enrichment_file',\n 'speakers', 'vowel_inventory', 'stressed_vowels', 'sibilant_segments'\n ]\n with open(path, 'r', encoding='utf8') as f:\n conf = yaml.load(f)\n missing_keys = []\n for k in expected_keys:\n if k not in conf:\n missing_keys.append(k)\n\n ##### JM #####\n if not 'vowel_prototypes_path' in conf:\n conf['vowel_prototypes_path'] = ''\n print('no vowel prototypes path given, so using no prototypes')\n elif not os.path.exists(conf['vowel_prototypes_path']):\n conf['vowel_prototypes_path'] = ''\n print('vowel prototypes path not valid, so using no prototypes')\n ##############\n\n if missing_keys:\n print('The following keys were missing from {}: {}'.format(path, ', '.join(missing_keys)))\n sys.exit(1)\n return conf\n\n\ndef call_back(*args):\n args = [x for x in args if isinstance(x, str)]\n if args:\n print(' '.join(args))\n\n\ndef reset(config):\n with CorpusContext(config) as c:\n print('Resetting the corpus.')\n c.reset()\n\n\ndef loading(config, corpus_dir, textgrid_format):\n with CorpusContext(config) as c:\n exists = c.exists()\n if exists:\n print('Corpus already loaded, skipping import.')\n return\n if not os.path.exists(corpus_dir):\n print('The path {} does not exist.'.format(corpus_dir))\n sys.exit(1)\n with CorpusContext(config) as c:\n print('loading')\n\n if textgrid_format == \"buckeye\":\n parser = pgio.inspect_buckeye(corpus_dir)\n elif textgrid_format == \"csv\":\n parser = pgio.inspect_buckeye(corpus_dir)\n elif textgrid_format.lower() == \"fave\":\n parser = pgio.inspect_fave(corpus_dir)\n elif textgrid_format == \"ilg\":\n parser = pgio.inspect_ilg(corpus_dir)\n elif textgrid_format == \"labbcat\":\n parser = pgio.inspect_labbcat(corpus_dir)\n elif textgrid_format == \"partitur\":\n parser = pgio.inspect_partitur(corpus_dir)\n elif textgrid_format == \"timit\":\n parser = pgio.inspect_timit(corpus_dir)\n else:\n parser = pgio.inspect_mfa(corpus_dir)\n parser.call_back = call_back\n beg = time.time()\n c.load(parser, corpus_dir)\n end = time.time()\n time_taken = end - beg\n print('Loading took: {}'.format(time_taken))\n save_performance_benchmark(config, 'import', time_taken)\n\n\ndef basic_enrichment(config, syllabics, pauses):\n with CorpusContext(config) as g:\n if not 'utterance' in g.annotation_types:\n print('encoding utterances')\n begin = time.time()\n g.encode_pauses(pauses)\n # g.encode_pauses('^[<{].*$', call_back = call_back)\n g.encode_utterances(min_pause_length=0.15) # , call_back = call_back)\n # g.encode_utterances(min_pause_length = 0.5, call_back = call_back)\n time_taken = time.time() - begin\n print('Utterance enrichment took: {}'.format(time_taken))\n save_performance_benchmark(config, 'utterance_encoding', time_taken)\n\n if syllabics and 'syllable' not in g.annotation_types:\n print('encoding syllables')\n begin = time.time()\n g.encode_syllabic_segments(syllabics)\n g.encode_syllables('maxonset')\n time_taken = time.time() - begin\n print('Syllable enrichment took: {}'.format(time.time() - begin))\n save_performance_benchmark(config, 'syllable_encoding', time_taken)\n\n print('enriching utterances')\n if syllabics and not g.hierarchy.has_token_property('utterance', 'speech_rate'):\n begin = time.time()\n g.encode_rate('utterance', 'syllable', 'speech_rate')\n time_taken = time.time() - begin\n print('Speech rate encoding took: {}'.format(time.time() - begin))\n save_performance_benchmark(config, 'speech_rate_encoding', time_taken)\n\n if not g.hierarchy.has_token_property('utterance', 'num_words'):\n begin = time.time()\n g.encode_count('utterance', 'word', 'num_words')\n time_taken = time.time() - begin\n print('Word count encoding took: {}'.format(time.time() - begin))\n save_performance_benchmark(config, 'num_words_encoding', time_taken)\n\n if syllabics and not g.hierarchy.has_token_property('utterance', 'num_syllables'):\n begin = time.time()\n g.encode_count('utterance', 'syllable', 'num_syllables')\n time_taken = time.time() - begin\n print('Syllable count encoding took: {}'.format(time.time() - begin))\n save_performance_benchmark(config, 'num_syllables_encoding', time_taken)\n\n if syllabics and not g.hierarchy.has_token_property('syllable', 'position_in_word'):\n print('enriching syllables')\n begin = time.time()\n g.encode_position('word', 'syllable', 'position_in_word')\n time_taken = time.time() - begin\n print('Syllable position encoding took: {}'.format(time.time() - begin))\n save_performance_benchmark(config, 'position_in_word_encoding', time_taken)\n\n if syllabics and not g.hierarchy.has_token_property('syllable', 'num_phones'):\n begin = time.time()\n g.encode_count('syllable', 'phone', 'num_phones')\n time_taken = time.time() - begin\n print('Phone count encoding took: {}'.format(time.time() - begin))\n save_performance_benchmark(config, 'num_phones_encoding', time_taken)\n\n # print('enriching words')\n # if not g.hierarchy.has_token_property('word', 'position_in_utterance'):\n # begin = time.time()\n # g.encode_position('utterance', 'word', 'position_in_utterance')\n # print('Utterance position encoding took: {}'.format(time.time() - begin))\n\n if syllabics and not g.hierarchy.has_token_property('word', 'num_syllables'):\n begin = time.time()\n g.encode_count('word', 'syllable', 'num_syllables')\n time_taken = time.time() - begin\n print('Syllable count encoding took: {}'.format(time.time() - begin))\n save_performance_benchmark(config, 'num_syllables_encoding', time_taken)\n\n print('enriching syllables')\n if syllabics and g.hierarchy.has_type_property('word', 'stresspattern') and not g.hierarchy.has_token_property(\n 'syllable',\n 'stress'):\n begin = time.time()\n g.encode_stress_from_word_property('stresspattern')\n time_taken = time.time() - begin\n print(\"encoded stress\")\n save_performance_benchmark(config, 'stress_encoding_from_pattern', time_taken)\n elif syllabics and re.search(r\"\\d\", syllabics[0]) and not g.hierarchy.has_type_property('syllable',\n 'stress'): # If stress is included in the vowels\n begin = time.time()\n g.encode_stress_to_syllables(\"[0-9]\", clean_phone_label=False)\n time_taken = time.time() - begin\n print(\"encoded stress\")\n save_performance_benchmark(config, 'stress_encoding', time_taken)\n\n\ndef lexicon_enrichment(config, unisyn_spade_directory, dialect_code):\n enrichment_dir = os.path.join(unisyn_spade_directory, 'enrichment_files')\n if not os.path.exists(enrichment_dir):\n print('Could not find enrichment_files directory from {}, skipping lexical enrichment.'.format(\n unisyn_spade_directory))\n return\n with CorpusContext(config) as g:\n\n for lf in os.listdir(enrichment_dir):\n path = os.path.join(enrichment_dir, lf)\n if lf == 'rule_applications.csv':\n if g.hierarchy.has_type_property('word', 'UnisynPrimStressedVowel1'.lower()):\n print('Dialect independent enrichment already loaded, skipping.')\n continue\n elif lf.startswith(dialect_code):\n if g.hierarchy.has_type_property('word', 'UnisynPrimStressedVowel2_{}'.format(\n dialect_code).lower()):\n print('Dialect specific enrichment already loaded, skipping.')\n continue\n else:\n continue\n begin = time.time()\n enrich_lexicon_from_csv(g, path)\n time_taken = time.time() - begin\n print('Lexicon enrichment took: {}'.format(time.time() - begin))\n save_performance_benchmark(config, 'lexicon_enrichment', time_taken)\n\n\ndef speaker_enrichment(config, speaker_file):\n if not os.path.exists(speaker_file):\n print('Could not find {}, skipping speaker enrichment.'.format(speaker_file))\n return\n with CorpusContext(config) as g:\n if not g.hierarchy.has_speaker_property('gender'):\n begin = time.time()\n enrich_speakers_from_csv(g, speaker_file)\n time_taken = time.time() - begin\n print('Speaker enrichment took: {}'.format(time.time() - begin))\n save_performance_benchmark(config, 'speaker_enrichment', time_taken)\n else:\n print('Speaker enrichment already done, skipping.')\n\n\ndef sibilant_acoustic_analysis(config, sibilant_segments):\n # Encode sibilant class and analyze sibilants using the praat script\n with CorpusContext(config) as c:\n if c.hierarchy.has_token_property('phone', 'cog'):\n print('Sibilant acoustics already analyzed, skipping.')\n return\n print('Beginning sibilant analysis')\n beg = time.time()\n c.encode_class(sibilant_segments, 'sibilant')\n time_taken = time.time() - beg\n save_performance_benchmark(config, 'sibilant_encoding', time_taken)\n print('sibilants encoded')\n\n # analyze all sibilants using the script found at script_path\n beg = time.time()\n c.analyze_script('sibilant', sibilant_script_path, duration_threshold=0.01)\n end = time.time()\n time_taken = time.time() - beg\n print('Sibilant analysis took: {}'.format(end - beg))\n save_performance_benchmark(config, 'sibilant_acoustic_analysis', time_taken)\n\n\ndef formant_acoustic_analysis(config, vowels, vowel_prototypes_path, drop_formant=False):\n with CorpusContext(config) as c:\n # if c.hierarchy.has_token_property('phone', 'F1'): # JM TEMPORARY\n # print('Formant acoustics already analyzed, skipping.')\n # return\n print('Beginning formant analysis')\n beg = time.time()\n c.encode_class(vowels, 'vowel')\n time_taken = time.time() - beg\n save_performance_benchmark(config, 'vowel_encoding', time_taken)\n print('vowels encoded')\n beg = time.time()\n metadata = analyze_formant_points_refinement(c, 'vowel', duration_threshold=duration_threshold,\n num_iterations=nIterations,\n vowel_prototypes_path=vowel_prototypes_path,\n drop_formant=drop_formant\n )\n end = time.time()\n time_taken = time.time() - beg\n print('Analyzing formants took: {}'.format(end - beg))\n save_performance_benchmark(config, 'formant_acoustic_analysis', time_taken)\n\n\ndef formant_export(config, corpus_name, dialect_code, speakers, vowels): # Gets information into a csv\n\n csv_path = os.path.join(base_dir, corpus_name, '{}_formants.csv'.format(corpus_name))\n # Unisyn columns\n other_vowel_codes = ['unisynPrimStressedVowel2_{}'.format(dialect_code),\n 'UnisynPrimStressedVowel3_{}'.format(dialect_code),\n 'UnisynPrimStressedVowel3_XSAMPA',\n 'AnyRuleApplied_{}'.format(dialect_code)]\n\n with CorpusContext(config) as c:\n print('Beginning formant export')\n beg = time.time()\n q = c.query_graph(c.phone)\n if speakers:\n q = q.filter(c.phone.speaker.name.in_(speakers))\n q = q.filter(c.phone.label.in_(vowels))\n\n q = q.columns(c.phone.speaker.name.column_name('speaker'), c.phone.discourse.name.column_name('discourse'),\n c.phone.id.column_name('phone_id'), c.phone.label.column_name('phone_label'),\n c.phone.begin.column_name('begin'), c.phone.end.column_name('end'),\n c.phone.syllable.stress.column_name('syllable_stress'),\n c.phone.syllable.word.stresspattern.column_name('word_stress_pattern'),\n c.phone.syllable.position_in_word.column_name('syllable_position_in_word'),\n c.phone.duration.column_name('duration'),\n c.phone.following.label.column_name('following_phone'),\n c.phone.previous.label.column_name('previous_phone'), c.phone.word.label.column_name('word'),\n c.phone.F1.column_name('F1'), c.phone.F2.column_name('F2'), c.phone.F3.column_name('F3'),\n c.phone.B1.column_name('B1'), c.phone.B2.column_name('B2'), c.phone.B3.column_name('B3'),\n c.phone.A1.column_name('A1'), c.phone.A2.column_name('A2'), c.phone.A3.column_name('A3'), c.phone.Ax.column_name('Ax'), c.phone.num_formants.column_name('num_formants'), c.phone.drop_formant.column_name('drop_formant'))\n if c.hierarchy.has_type_property('word', 'UnisynPrimStressedVowel1'.lower()):\n q = q.columns(c.phone.word.unisynprimstressedvowel1.column_name('UnisynPrimStressedVowel1'))\n for v in other_vowel_codes:\n if c.hierarchy.has_type_property('word', v.lower()):\n q = q.columns(getattr(c.phone.word, v.lower()).column_name(v))\n for sp, _ in c.hierarchy.speaker_properties:\n if sp == 'name':\n continue\n q = q.columns(getattr(c.phone.speaker, sp).column_name(sp))\n q.to_csv(csv_path)\n end = time.time()\n time_taken = time.time() - beg\n print('Query took: {}'.format(end - beg))\n print(\"Results for query written to \" + csv_path)\n save_performance_benchmark(config, 'formant_export', time_taken)\n\n\ndef sibilant_export(config, corpus_name, dialect_code, speakers):\n csv_path = os.path.join(base_dir, corpus_name, '{}_sibilants.csv'.format(corpus_name))\n with CorpusContext(config) as c:\n # export to CSV all the measures taken by the script, along with a variety of data about each phone\n print(\"Beginning sibilant export\")\n beg = time.time()\n q = c.query_graph(c.phone).filter(c.phone.subset == 'sibilant')\n # q = q.filter(c.phone.begin == c.phone.syllable.word.begin)\n if speakers:\n q = q.filter(c.phone.speaker.name.in_(speakers))\n # qr = c.query_graph(c.phone).filter(c.phone.subset == 'sibilant')\n # this exports data for all sibilants\n qr = q.columns(c.phone.speaker.name.column_name('speaker'),\n c.phone.discourse.name.column_name('discourse'),\n c.phone.id.column_name('phone_id'), c.phone.label.column_name('phone_label'),\n c.phone.begin.column_name('begin'), c.phone.end.column_name('end'),\n c.phone.duration.column_name('duration'),\n # c.phone.syllable.position_in_word.column_name('syllable_position_in_word'),\n c.phone.following.label.column_name('following_phone'),\n c.phone.previous.label.column_name('previous_phone'),\n c.phone.syllable.word.label.column_name('word'),\n c.phone.syllable.stress.column_name('syllable_stress'),\n c.phone.syllable.phone.filter_by_subset('onset').label.column_name('onset'),\n c.phone.syllable.phone.filter_by_subset('nucleus').label.column_name('nucleus'),\n c.phone.syllable.phone.filter_by_subset('coda').label.column_name('coda'),\n c.phone.cog.column_name('cog'), c.phone.peak.column_name('peak'),\n c.phone.slope.column_name('slope'), c.phone.spread.column_name('spread'))\n for sp, _ in c.hierarchy.speaker_properties:\n if sp == 'name':\n continue\n q = q.columns(getattr(c.phone.speaker, sp).column_name(sp))\n qr.to_csv(csv_path)\n end = time.time()\n time_taken = time.time() - beg\n print('Query took: {}'.format(end - beg))\n print(\"Results for query written to \" + csv_path)\n save_performance_benchmark(config, 'sibilant_export', time_taken)\n\ndef polysyllabic_export(config, corpus_name, dialect_code, speakers):\n csv_path = os.path.join(base_dir, corpus_name, '{}_polysyllabic.csv'.format(corpus_name))\n with CorpusContext(config) as c:\n\n print(\"Beginning polysyllabic export\")\n beg = time.time()\n q = c.query_graph(c.syllable)\n q = q.filter(c.syllable.word.end == c.syllable.word.utterance.end)\n q = q.filter(c.syllable.begin == c.syllable.word.begin)\n if speakers:\n q = q.filter(c.phone.speaker.name.in_(speakers))\n\n qr = q.columns(c.syllable.speaker.name.column_name('speaker'),\n c.syllable.label.column_name('syllable_label'),\n c.syllable.duration.column_name('syllable_duration'),\n c.syllable.word.label.column_name('word'),\n c.syllable.word.stresspattern.column_name('stress_pattern'),\n c.syllable.word.num_syllables.column_name('num_syllables'))\n for sp, _ in c.hierarchy.speaker_properties:\n if sp == 'name':\n continue\n q = q.columns(getattr(c.syllable.speaker, sp).column_name(sp))\n\n qr.to_csv(csv_path)\n end = time.time()\n time_taken = time.time() - beg\n print('Query took: {}'.format(end - beg))\n print(\"Results for query written to \" + csv_path)\n save_performance_benchmark(config, 'polysyllabic_export', time_taken)\n\ndef get_size_of_corpus(config):\n from polyglotdb.query.base.func import Sum\n with CorpusContext(config) as c:\n c.config.query_behavior = 'other'\n if 'utterance' not in c.annotation_types:\n q = c.query_graph(c.word).columns(Sum(c.word.duration).column_name('result'))\n else:\n q = c.query_graph(c.utterance).columns(Sum(c.utterance.duration).column_name('result'))\n results = q.all()\n return results[0]['result']\n\n\ndef basic_queries(config):\n from polyglotdb.query.base.func import Sum\n with CorpusContext(config) as c:\n print(c.hierarchy)\n print('beginning basic queries')\n beg = time.time()\n q = c.query_lexicon(c.lexicon_phone).columns(c.lexicon_phone.label.column_name('label'))\n results = q.all()\n print('The phone inventory is:', ', '.join(sorted(x['label'] for x in results)))\n for r in results:\n total_count = c.query_graph(c.phone).filter(c.phone.label == r['label']).count()\n duration_threshold_count = c.query_graph(c.phone).filter(c.phone.label == r['label']).filter(\n c.phone.duration >= duration_threshold).count()\n qr = c.query_graph(c.phone).filter(c.phone.label == r['label']).limit(1)\n qr = qr.columns(c.phone.word.label.column_name('word'),\n c.phone.word.transcription.column_name('transcription'))\n res = qr.all()\n if len(res) == 0:\n print('An example for {} was not found.'.format(r['label']))\n else:\n res = res[0]\n print('An example for {} (of {}, {} above {}) is the word \"{}\" with the transcription [{}]'.format(\n r['label'], total_count, duration_threshold_count, duration_threshold, res['word'],\n res['transcription']))\n\n q = c.query_speakers().columns(c.speaker.name.column_name('name'))\n results = q.all()\n print('The speakers in the corpus are:', ', '.join(sorted(x['name'] for x in results)))\n c.config.query_behavior = 'other'\n q = c.query_graph(c.utterance).columns(Sum(c.utterance.duration).column_name('result'))\n results = q.all()\n q = c.query_graph(c.word).columns(Sum(c.word.duration).column_name('result'))\n word_results = q.all()\n print('The total length of speech in the corpus is: {} seconds (utterances) {} seconds (words'.format(\n results[0]['result'], word_results[0]['result']))\n time_taken = time.time() - beg\n save_performance_benchmark(config, 'basic_query', time_taken)\n","sub_path":"Common/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":23184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"588775559","text":"code_attributes = [\n 'co_argcount',\n 'co_posonlyargcount',\n 'co_kwonlyargcount',\n 'co_nlocals',\n 'co_stacksize',\n 'co_flags',\n 'co_code',\n 'co_consts',\n 'co_names',\n 'co_varnames',\n 'co_filename',\n 'co_name',\n 'co_firstlineno',\n 'co_lnotab',\n 'co_freevars',\n 'co_cellvars'\n]\n\nrude_types = {\n \"int\": int,\n \"float\": float,\n \"str\": str,\n \"bool\": bool,\n}\n\nobject_types = {\n \"list\": list,\n \"set\": set,\n \"frozenset\": frozenset,\n \"dict\": dict,\n \"tuple\": tuple,\n}","sub_path":"4-term(Python)/second/serializers/Types.py","file_name":"Types.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"8680688","text":"import numpy\nimport cv2\n\ncap = cv2.VideoCapture(0)\n\nwhile(1):\n ret,frame = cap.read()\n\n gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n\n cv2.imshow('framewindow',gray)\n if(cv2.waitKey(1)&0xFF == ord('q')):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"591729640","text":"import os\nfrom sys import stdin, stdout, stderr\n\nimport numpy\n\nfrom bcwd import *\nfrom svm_smo import *\nimport kernels\n\nDATA_DIR = \"data\"\nTMP_DIR = \"tmp\"\n\nDATA_URL = \"http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data\"\n\nDATA_LOCAL_NAME = \"wdbc.data\"\nDATA_LOCAL_PATH = os.path.join(TMP_DIR, DATA_LOCAL_NAME)\n\nDATA_SIZE = 569 # according to the set description\n\nTRAIN_SET_NAME = \"train_set\"\nTRAIN_SET_PATH = os.path.join(DATA_DIR, TRAIN_SET_NAME)\n\nTEST_SET_NAME = \"test_set\"\nTEST_SET_PATH = os.path.join(DATA_DIR, TEST_SET_NAME)\n\nVALID_SET_NAME = \"valid.set\"\nVALID_SET_PATH = os.path.join(DATA_DIR, VALID_SET_NAME)\n\nTEST_SET_FRACTION = 0.10\nVALID_SET_FRACTION = 0.10\n\nif not os.path.exists(DATA_DIR):\n os.makedirs(DATA_DIR)\n\nif not os.path.exists(TMP_DIR):\n os.makedirs(TMP_DIR)\n\nget_data(DATA_URL, DATA_LOCAL_PATH)\nstderr.write(\"Data set fetched to the file {}\\n\".format(DATA_LOCAL_PATH))\nstderr.write(\"Comment the line 39 of main.py to prevent downloading during the further runs\\n\")\n\ndata = load_data(DATA_LOCAL_PATH)\nassert len(data) == DATA_SIZE # there should be 569 instances according to the set description\n\n\ndef run_all(data, phi, verbose = False):\n test_size = int(DATA_SIZE * TEST_SET_FRACTION)\n valid_size = int(DATA_SIZE * VALID_SET_FRACTION)\n train_size = DATA_SIZE - test_size - valid_size\n train_set, test_set, valid_set = split_data(data, train_size, test_size, valid_size)\n\n if verbose:\n stderr.write(\"Train set size: {}\\n\".format(train_size))\n stderr.write(\"Test set size: {}\\n\".format(test_size))\n stderr.write(\"Valid set size: {}\\n\".format(valid_size))\n\n\n write_data(train_set, TRAIN_SET_PATH)\n if verbose:\n stderr.write(\"Train set was dumped to the file {}\\n\".format(TRAIN_SET_PATH))\n write_data(test_set, TEST_SET_PATH)\n if verbose:\n stderr.write(\"Test set was dumped to the file {}\\n\".format(TEST_SET_PATH))\n write_data(valid_set, VALID_SET_PATH)\n if verbose:\n stderr.write(\"Valid set was dumped to the file {}\\n\".format(VALID_SET_PATH))\n\n c = 1.0\n cv_ans = []\n for _ in range(15):\n #if verbose:\n stderr.write(\"Trying C = {} \".format(c))\n classifier = train_svm(train_set, c, phi)\n valid_ans = test_svm(valid_set, classifier)\n results = calculate_results(valid_set, valid_ans)\n err_rate = error_rate(results)\n cv_ans.append((c, err_rate))\n stderr.write(\"validation set error rate {}\\n\".format(err_rate))\n c *= 2\n\n if verbose:\n stderr.write(str(cv_ans) + \"\\n\")\n bestC = min(cv_ans, key = lambda p: p[1])[0]\n classifier = train_svm(train_set, bestC, phi)\n test_ans = test_svm(test_set, classifier)\n results = calculate_results(test_set, test_ans)\n\n err_rate = error_rate(results)\n prec = precision(results)\n rec = recall(results)\n f1 = f1score(results)\n if verbose:\n print(\"C = {}, test set error rate = {}\".format(bestC, err_rate))\n\n return (classifier, err_rate, prec, rec, f1)\n\nrandom.seed(6346) # uncomment to make the program deterministic\n\nks = [\n (kernels.identity, \"Identity kernel\"),\n (lambda x, y : kernels.poly2(x, y, 0.0), \"Homogeneous polynomial kernel\")\n (lambda x, y : kernels.gaussian(x, y, -0.00001), \"Gaussian kernel, gamma = -0.00001\")\n]\n\nfor phi, desc in ks:\n cnt = 2\n serr = 0.0\n sprec = 0.0\n srec = 0.0\n sf1 = 0.0\n print(\"--------------------------------\")\n print(\"Running {} iterations using {}\".format(cnt, desc))\n\n for i in range(cnt):\n stderr.write(\"Running... {}/{}\\n\".format(i, cnt))\n _, err_rate, prec, rec, f1 = run_all(data, phi, verbose = True)\n #stderr.write(\"Error rate: {}%\\n\".format(err_rate * 100))\n serr += err_rate\n sprec += prec\n srec += rec\n sf1 += f1\n\n print(\"Average error rate ({} runs) is {}%\".format(cnt, serr / cnt * 100))\n print(\"Average precision ({} runs) is {}%\".format(cnt, sprec / cnt * 100))\n print(\"Average recall ({} runs) is {}%\".format(cnt, srec / cnt * 100))\n print(\"Average f1 score ({} runs) is {}\".format(cnt, sf1 / cnt))\n print(\"--------------------------------\")\n","sub_path":"dmitry.gerasimov/lab-svm-smo/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"81550669","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children\n\"\"\"\n\nclass Solution:\n def levelOrder(self, root: 'Node') -> List[List[int]]:\n if root is None:\n return []\n traversal = []\n def traverse(cur_level: List[Node]):\n if len(cur_level) == 0: return\n traversal.append(list(map(lambda x: x.val, cur_level)))\n next_level = [cn for n in cur_level for cn in n.children]\n traverse(next_level)\n traverse([root])\n return traversal\n","sub_path":"Practice-2021/August/Python3/nary_tree_level_order_traversal.py","file_name":"nary_tree_level_order_traversal.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"527982735","text":"import argparse\nimport os\nimport copy as cp\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.style.use(\"seaborn-colorblind\")\nfrom algorithms.online_homomorphism_g_dict_conf import OnlineHomomorphismGDict\nimport model_utils\nimport vis_utils\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n\n\ndef gather_experience(env, num):\n\n transitions = []\n\n for _ in range(num // 5):\n\n env.reset()\n\n for _ in range(5):\n\n state = cp.deepcopy(env.state)\n action = np.random.uniform(0, 2)\n reward, next_state, done = env.step(action)\n\n transitions.append((state, action, reward, next_state, done))\n\n if done:\n break\n\n return transitions\n\n\ndef sample_actions(state):\n\n num = 10\n start = 0\n end = 2\n actions = list(np.linspace(start, end, num=num))\n return actions\n\n\ndef main(args):\n\n if args.environment == 1:\n from envs.continuous_1 import ContinuousEnv1 as Env\n elif args.environment == 2:\n from envs.continuous_2 import ContinuousEnv2 as Env\n else:\n from envs.continuous_3 import ContinuousEnv3 as Env\n\n env = Env()\n g = model_utils.BalancedMLP([1], [8, 16, 32], 0.0001, 128, 0.0001, verbose=True)\n\n def visualize_b(state_action_partition):\n vis_utils.plot_background(env, show=False)\n\n xx, yy = np.meshgrid(np.arange(0, env.STATE_ACTION_MAP.shape[1], 0.01),\n np.arange(0, env.STATE_ACTION_MAP.shape[0], 0.01))\n data = np.c_[xx.ravel(), yy.ravel()]\n Z = g.batch_predict(data[:, 0], data[:, 1])\n Z = np.array(Z).reshape(xx.shape)\n plt.contourf(xx, yy, Z, alpha=0.4)\n\n vis_utils.plot_state_action_partition(state_action_partition, show=True)\n\n def visualize_ignored(states):\n\n vis_utils.plot_background(env, show=False)\n\n plt.hist(states, bins=10, range=[0, len(env.STATE_MAP)], normed=True)\n plt.show()\n\n experience = gather_experience(env, args.num_experience)\n homo = OnlineHomomorphismGDict(experience, g, sample_actions, args.b_threshold, args.conf_threshold,\n OnlineHomomorphismGDict.RESOLVE_ADD_CLOSEST, 20, percentile=args.percentile,\n visualize_b=visualize_b, visualize_conf=vis_utils.show_confidences,\n visualize_ignored=visualize_ignored)\n homo.partition_iteration()\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"environment\", type=int, help=\"1, 2 or 3; increasing in difficulty\")\n\n parser.add_argument(\"--num-experience\", type=int, default=400, help=\"number of experience to collect\")\n parser.add_argument(\"--b-threshold\", type=int, default=50, help=\"hard threshold on the state-action block size\")\n parser.add_argument(\"--conf-threshold\", type=float, default=0.0, help=\"confidence threshold\")\n parser.add_argument(\"--percentile\", type=int, default=None, help=\"what percentile of confidences to threshold; \"\n \"if None, threshold the minimum confidence\")\n\n parsed = parser.parse_args()\n main(parsed)","sub_path":"scripts/homo_g_dict_conf/balanced_mlp/continuous.py","file_name":"continuous.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"585939260","text":"import json\n\nfrom django.conf import settings\n\nfrom slumber import API\nfrom slumber.exceptions import HttpClientError\n\nfrom .errors import ERROR_STRINGS\n\n\nclient = None\n\n\nclass SolitudeAPI(object):\n \"\"\"A solitude API client.\n\n :param url: URL of the solitude endpoint.\n \"\"\"\n\n def __init__(self, url):\n self.slumber = API(url)\n\n def _buyer_from_response(self, res):\n buyer = {}\n if res.get('errors'):\n return res\n elif res.get('objects'):\n buyer['id'] = res['objects'][0]['resource_pk']\n buyer['pin'] = res['objects'][0]['pin']\n buyer['uuid'] = res['objects'][0]['uuid']\n elif res.get('resource_pk'):\n buyer['id'] = res['resource_pk']\n buyer['pin'] = res['pin']\n buyer['uuid'] = res['uuid']\n return buyer\n\n def parse_res(self, res):\n if res == '':\n return {}\n if isinstance(res, (str, unicode)):\n return json.loads(res)\n return res\n\n def safe_run(self, command, *args, **kwargs):\n try:\n res = command(*args, **kwargs)\n except HttpClientError as e:\n res = self.parse_res(e.response.content)\n for key, value in res.iteritems():\n res[key] = [ERROR_STRINGS[v] for v in value]\n return {'errors': res}\n return self.parse_res(res)\n\n def create_buyer(self, uuid, pin=None):\n \"\"\"Creates a buyer with an optional PIN in solitude.\n\n :param uuid: String to identify the buyer by.\n :param pin: Optional PIN that will be hashed.\n :rtype: dictionary\n \"\"\"\n\n res = self.safe_run(self.slumber.generic.buyer.post, {'uuid': uuid,\n 'pin': pin})\n return self._buyer_from_response(res)\n\n def change_pin(self, buyer_id, pin):\n \"\"\"Changes a buyer's PIN in solitude.\n\n :param buyer_id integer: ID of the buyer you'd like to change the PIN\n for.\n :param pin: PIN to replace the buyer's pin with.\n :rtype: dictionary\n \"\"\"\n res = self.safe_run(self.slumber.generic.buyer(id=buyer_id).patch,\n {'pin': pin})\n # Empty string is a good thing from tastypie for a PATCH.\n if 'errors' in res:\n return res\n return {}\n\n def get_buyer(self, uuid):\n \"\"\"Retrieves a buyer by the their uuid.\n\n :param uuid: String to identify the buyer by.\n :rtype: dictionary\n \"\"\"\n\n res = self.safe_run(self.slumber.generic.buyer.get, uuid=uuid)\n return self._buyer_from_response(res)\n\n def verify_pin(self, uuid, pin):\n \"\"\"Checks the buyer's PIN against what is stored in solitude.\n\n :param uuid: String to identify the buyer by.\n :param pin: PIN to check\n :rtype: boolean\n \"\"\"\n\n res = self.safe_run(self.slumber.buyer.check_pin.post, {'uuid': uuid,\n 'pin': pin})\n return res['valid']\n\n\nif not client:\n if getattr(settings, 'SOLITUDE_URL', False):\n client = SolitudeAPI(settings.SOLITUDE_URL)\n else:\n client = SolitudeAPI('http://example.com')\n","sub_path":"lib/solitude/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"528241367","text":"\"\"\"6. 아래와 같이 별이 찍히게 출력하시오.\r\n숫자를 입력하세요 : 5\r\n ★\r\n ★★\r\n ★★★\r\n ★★★★\r\n★★★★★\r\n ★★★★\r\n ★★★\r\n ★★\r\n ★\r\n\r\n예시\r\n<입력>\r\n숫자를 입력하세요 : 5\r\n\r\n<출력>\r\n ★\r\n ★★\r\n ★★★\r\n ★★★★\r\n★★★★★ \r\n ★★★★\r\n ★★★\r\n ★★\r\n ★\r\n\r\n\r\n\"\"\"\r\n\r\nnumber = int(input(\"숫자를 입력하세요 : \"))\r\n\r\nreverse = False\r\nstars = 1\r\n\r\nwhile 0 < stars:\r\n for i in range(number - stars):\r\n print(' ', end='')\r\n for i in range(stars):\r\n print('★', end='')\r\n print()\r\n\r\n if stars == number:\r\n reverse = True\r\n\r\n if not reverse:\r\n stars += 1\r\n else:\r\n stars -= 1\r\n","sub_path":"quiz/pre_python_06.py","file_name":"pre_python_06.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"459235477","text":"import numpy as np\nimport scipy.io as sio\nfrom sklearn import svm\n\n\n# Ref - http://cs229.stanford.edu/materials/smo.pdf\n\n\ndef checkAccuracy(prediction, yTest):\n 'Get percentage of correct predictions'\n correctCtr = 0\n for i in range(0, yTest.__len__()):\n if prediction[i] == yTest[i]:\n correctCtr += 1\n\n correctCtr = correctCtr / yTest.__len__()\n\n return correctCtr * 100\n\n\ndef getJRandom(i, m):\n 'Get j not equal to i'\n j = i\n while j == i:\n j = int(np.random.uniform(0, m))\n return j\n\n\ndef getLandH(Y, alphas, i, j, C):\n yi = Y[0, i]\n yj = Y[0, j]\n ai = alphas[i, 0]\n aj = alphas[j, 0]\n\n L = 0\n H = 0\n\n if yi != yj:\n L = max(0, aj - ai)\n H = min(C, C + aj - ai)\n else:\n L = max(0, aj + ai - C)\n H = min(C, ai + aj)\n\n return L, H\n\n\ndef getPrediction(X_tst, X, Y, alphas, b):\n predList = []\n m = np.shape(X_tst)[0]\n for i in range(0, m):\n\n pred = np.dot(np.multiply(alphas, Y.transpose()).transpose(), np.dot(X, X_tst[i, :].transpose())) # fx\n pred += b\n\n #print('np.shape(alphas): ', np.shape(alphas))\n #print('np.shape(pred): ', np.shape(pred))\n #print('np.shape(Y.transpose()): ', np.shape(Y.transpose()))\n\n if pred < 0:\n predList.append(-1) # -1\n else:\n predList.append(1)\n\n return predList\n\n\ndef getTargetVariables(oldY):\n 'Convert Y from 0,1 to -1,1'\n Y = []\n for i in range(0, oldY.__len__()):\n if oldY[i] == 0:\n Y.append(-1)\n else:\n Y.append(1)\n return Y\n\n\ndef smo(X_trn, Y_trn, C, maxPasses, tolerance):\n # Init\n X = np.mat(X_trn)\n Y = np.mat(Y_trn)\n m, n = np.shape(X)\n alphas = np.mat(np.zeros((m, 1)))\n b = 0\n passes = 0\n\n while passes < maxPasses:\n\n numChangedAlphas = 0\n\n for i in range(0, m):\n\n fx = np.dot(np.multiply(alphas, Y.transpose()).transpose(), np.dot(X, X[i, :].transpose())) + b\n\n ei = fx - Y[0, i]\n # print('Error: ', ei)\n\n yiEi = Y[0, i] * ei\n\n if ((yiEi < -tolerance) and (alphas[i, 0] < C)) or ((yiEi > tolerance) and (alphas[i, 0] > C)):\n\n j = getJRandom(i, m)\n\n fxj = np.dot(np.multiply(alphas, Y.transpose()).transpose(), np.dot(X, X[j, :].transpose())) + b\n ej = fxj - Y[0, j]\n\n # save old alphas\n alpha_i_old = alphas[i, 0]\n alpha_j_old = alphas[j, 0]\n\n # get L and H\n L, H = getLandH(Y, alphas, i, j, C)\n\n if L == H:\n # print('--- L=H')\n continue\n\n n = 2.0 * np.dot(X[i, :], X[j, :].transpose()) - np.dot(X[i, :], X[i, :].transpose()) - np.dot(X[j, :],\n X[j,\n :].transpose())\n\n if n >= 0:\n # print('--- n >= 0')\n continue\n\n alphas[j, 0] = alphas[j, 0] - ((Y[0, j] * (ei - ej)) / n)\n\n # clip aj\n if alphas[j, 0] > H:\n alphas[j, 0] = H\n elif alphas[j, 0] < L:\n alphas[j, 0] = L\n\n if abs(alphas[j, 0] - alpha_j_old) < 0.00001:\n # print('--- < 0.00001')\n continue\n\n # set ai from aj\n alphas[i, 0] = alphas[i, 0] + Y[0, i] * Y[0, j] * (alpha_j_old - alphas[j, 0])\n\n # offset\n b1 = b - ei - (Y[0, i] * (alphas[i, 0] - alpha_i_old) * np.dot(X[i, :], X[i, :].transpose())) - (\n Y[0, j] * (alphas[j, 0] - alpha_j_old) * np.dot(X[i, :], X[j, :].transpose()))\n b2 = b - ej - (Y[0, i] * (alphas[i, 0] - alpha_i_old) * np.dot(X[i, :], X[j, :].transpose())) - (\n Y[0, j] * (alphas[j, 0] - alpha_j_old) * np.dot(X[j, :], X[j, :].transpose()))\n\n if ((0 < alphas[i, 0]) and (alphas[i, 0] < C)):\n b = b1\n elif ((0 < alphas[j, 0]) and (alphas[j, 0] < C)):\n b = b2\n else:\n b = (b1 + b2) / 2\n\n numChangedAlphas += 1\n\n if numChangedAlphas == 0:\n passes += 1\n else:\n passes = 0\n\n return alphas, b\n\n\ndef mySvm(X, Y, cList, X_tst, Y_tst, r):\n maxPasses = 1000\n tolerance = 0.01\n\n # print C - Accuracy for each\n\n results = []\n\n for c in cList:\n alphas, b = smo(X, Y, c, maxPasses, tolerance)\n\n pred = getPrediction(np.mat(X_tst), np.mat(X), np.mat(Y), alphas, b) # 0,1\n\n accuracy = checkAccuracy(pred, Y_tst)\n\n t = (alphas, b, accuracy, r)\n\n results.append(t)\n\n #for t in results:\n # print('SVM Accuracy: ', t[2], '%')\n # print('\\n')\n\n results = sorted(results, key= lambda obj: obj[2], reverse=True)\n\n return results[0]\n\n\ndef mySvmNew(X, Y, c, X_tst, Y_tst, r):\n maxPasses = 1000\n tolerance = 0.1\n\n alphas, b = smo(X, Y, c, maxPasses, tolerance)\n\n pred = getPrediction(np.mat(X_tst), np.mat(X), np.mat(Y), alphas, b) # 0,1\n\n accuracy = checkAccuracy(pred, Y_tst)\n\n t = (alphas, b, accuracy, r)\n\n return t, accuracy\n\n\ndef skLearnSvm(X, Y, X_tst, Y_tst):\n clf = svm.SVC(gamma='scale')\n clf.fit(X, Y)\n\n Y_test = getTargetVariables(Y_tst)\n print('sklearn accuracy: ', checkAccuracy(clf.predict(X_tst), Y_test), '\\n')\n\n return\n\ndef getTargetVariablesTest(oldY):\n 'Convert Y from -1,0,1 to -1,1'\n Y = []\n for i in range(0, oldY.__len__()):\n if (oldY[i] == 0) or oldY[i] == -1:\n Y.append(-1)\n else:\n Y.append(1)\n return Y\n\n\ndef getTargetVarMultiClass(oldY, res):\n\n Y = []\n\n for i in range(0, oldY.__len__()):\n if oldY[i] == res:\n Y.append(1)\n else:\n Y.append(-1)\n return Y\n\n\ndef getPredictionMultiClass(xTest, X, Y, wList):\n\n predProbabilites = []\n\n m = np.shape(xTest)[0]\n\n for w in wList:\n predictionW = []\n newY = np.mat(getTargetVarMultiClass(Y, w[3]))\n for i in range(0, m):\n #pred = np.dot(np.multiply(w[0], Y.transpose()).transpose(), np.dot(X, xTest[i, :].transpose())) # fx\n pred = np.dot(np.multiply(w[0], newY.transpose()).transpose(), np.dot(X, xTest[i, :].transpose())) # fx\n pred += w[1]\n predictionW.append(pred)\n\n #print('np.shape(w[0]): ', np.shape(w[0]))\n #print('np.shape(pred): ', np.shape(pred))\n #print('np.shape(Y.transpose()): ', np.shape(Y.transpose()))\n #print('np.shape(predictionW): ', np.shape(predictionW))\n predProbabilites.append(predictionW)\n\n prediction = []\n\n #print('type(predProbabilites): ', type(predProbabilites))\n #print('type(predProbabilites): ', type(predProbabilites[0]))\n\n #print('\\npredProbabilites[0]: ', predProbabilites[0])\n #print('predProbabilites[1]: ', predProbabilites[1])\n #print('predProbabilites[2]: ', predProbabilites[2])\n\n for i in range(predProbabilites[0].__len__()):\n\n if (predProbabilites[0][i] > predProbabilites[1][i]) and (predProbabilites[0][i] > predProbabilites[2][i]):\n prediction.append(1) # win\n elif (predProbabilites[1][i] > predProbabilites[0][i]) and (predProbabilites[1][i] > predProbabilites[2][i]):\n prediction.append(0) # draw\n else:\n prediction.append(-1) # lose\n\n return prediction\n\n\ndef checkAccuracyMultiClass(xTest, yTest, X, Y, wList):\n\n #Y = Y.transpose()\n #yTest = yTest.transpose()\n prediction = getPredictionMultiClass(xTest, X, Y, wList)\n\n print('prediction:\\n', prediction)\n print(yTest.T)\n\n correctCtr = 0\n for i in range(0, xTest.__len__()):\n if prediction[i] == yTest[i]:\n correctCtr += 1\n\n correctCtr = correctCtr / prediction.__len__()\n\n return correctCtr\n\n\ndef customSvmMultiClass(X_trn, Y_trn, X_tst, Y_tst):\n\n cList = [0.1, 0.5]\n wList = []\n resultTuple = (1, 0, -1) # 1- win, 0 - draw, -1 - loss\n\n for r in resultTuple:\n X = X_trn\n Y = getTargetVarMultiClass(Y_trn, r)\n yTst = getTargetVarMultiClass(Y_tst, r)\n w = mySvm(X, Y, cList, X_tst, yTst, r)\n wList.append(w)\n\n return wList\n\ndef customSvmMultiClassNew(X_trn, Y_trn, X_tst, Y_tst):\n\n #cList = [0.1, 0.5]\n #cList = [1, 2, 3]\n cList = [0.1, 1, 5, 10, 15, 25, 50]\n #wList = []\n resultTuple = (1, 0, -1) # 1- win, 0 - draw, -1 - loss\n findings = []\n #c = 0.1\n for c in cList:\n wList = []\n for r in resultTuple:\n X = X_trn\n Y = getTargetVarMultiClass(Y_trn, r)\n yTst = getTargetVarMultiClass(Y_tst, r)\n #w = mySvmNew(X, Y, cList, X_tst, yTst, r)\n w, _ = mySvmNew(X, Y, c, X_tst, yTst, r)\n wList.append(w)\n\n # get acc for multiclass\n acc = checkAccuracyMultiClass(X_tst, Y_tst, X_trn, Y_trn, wList)\n t = (wList, c, acc)\n\n findings.append(t)\n\n for i in findings:\n print(i[1],'-',i[2])\n\n findings = sorted(findings, key=lambda obj: obj[2], reverse=True)\n\n return findings[0][0], findings\n\n\ndef customSvm(X_trn, Y_trn, X_tst, Y_tst):\n\n #cList = [0.1, 0.2, 0.3, 1, 3, 5, 10]\n cList = [0.1, 0.5]\n\n X = X_trn\n Y = getTargetVariablesTest(Y_trn)\n\n print('\\nSVM for given data:\\n')\n\n skLearnSvm(X, Y, X_tst, Y_tst)\n\n mySvm(X, Y, cList, X_tst, Y_tst)\n\n #skLearnSvm(X, Y, X_tst, Y_tst)\n\n\n return\n\n\ndef main():\n # Reg param\n #cList = [0.1, 0.2, 0.3, 0.4, 0.5, 1, 2, 3, 5, 10]\n cList = [0.1]\n\n dataSet = sio.loadmat('./dataset1_svm.mat', squeeze_me=True)\n # Y_tst, X_trn Y_trn X_tst\n\n X_trn = dataSet['X_trn']\n Y_trn = dataSet['Y_trn']\n X_tst = dataSet['X_tst']\n Y_tst = dataSet['Y_tst']\n\n X = X_trn\n Y = getTargetVariables(Y_trn)\n\n print('SVM for dataset1:\\n')\n mySvm(X, Y, cList, X_tst, Y_tst)\n\n skLearnSvm(X, Y, X_tst, Y_tst)\n\n #############\n\n dataSet = sio.loadmat('./dataset2_svm.mat', squeeze_me=True)\n # Y_tst, X_trn Y_trn X_tst\n\n X_trn = dataSet['X_trn']\n Y_trn = dataSet['Y_trn']\n X_tst = dataSet['X_tst']\n Y_tst = dataSet['Y_tst']\n\n X = X_trn\n Y = getTargetVariables(Y_trn)\n\n print('SVM for dataset2:\\n')\n mySvm(X, Y, cList, X_tst, Y_tst)\n\n skLearnSvm(X, Y, X_tst, Y_tst)\n\n return\n\n\n#main()\n\n\n","sub_path":"svmSmo.py","file_name":"svmSmo.py","file_ext":"py","file_size_in_byte":10625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"117100500","text":"import sqlite3\nfrom sqlite3 import Error\n\n\nclass DataBase:\n\n def __init__(self):\n self.connection()\n\n def connection(self):\n connection = None\n try:\n connection = sqlite3.connect(\"DataBase.db\")\n except Error as ex:\n print(ex)\n return connection\n\n def dql(self, query):\n connection = self.connection()\n cursor = connection.cursor()\n cursor.execute(query)\n select = cursor.fetchall()\n connection.close()\n return select\n\n def dml(self, query):\n try:\n connection = self.connection()\n cursor = connection.cursor()\n cursor.execute(query)\n connection.commit()\n connection.close()\n except Error as ex:\n return ex\n","sub_path":"DB.py","file_name":"DB.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"122134704","text":"# (C) Copyright 2018-2021 Enthought, Inc., Austin, TX\n# All rights reserved.\n#\n# This software is provided without warranty under the terms of the BSD\n# license included in LICENSE.txt and may be redistributed only under\n# the conditions described in the aforementioned license. The license\n# is also available online at http://www.enthought.com/licenses/BSD.txt\n#\n# Thanks for using Enthought open source!\n\nimport os\n\nimport pkg_resources\n\n# Main package name, used for installing development sources.\nPACKAGE_NAME = \"traits_futures\"\n\n# Prefix used for generated EDM environments.\nPREFIX = PACKAGE_NAME.lower().replace(\"_\", \"-\")\n\n# Platforms\nMACOS = \"osx-x86_64\"\nLINUX = \"rh7-x86_64\"\nWINDOWS = \"win-x86_64\"\n\n# Python versions\nPYTHON36 = \"py36\"\nPYTHON_VERSIONS = [PYTHON36]\n\n# Toolkits\nNULL = \"null\" # no GUI toolkit; an asyncio event loop is used for tests\nPYQT = \"pyqt\" # Qt 4, PyQt\nPYQT5 = \"pyqt5\" # Qt 5, PyQt\nPYSIDE2 = \"pyside2\" # Qt 5, Qt for Python\nWXPYTHON = \"wxpython\" # wxPython 4\nTOOLKITS = [NULL, PYQT, PYQT5, PYSIDE2, WXPYTHON]\n\n# Default Python version and toolkit.\nDEFAULT_PYTHON = PYTHON36\nDEFAULT_TOOLKIT = PYSIDE2\n\n# Location of repository root. Assumes that the ci script is being\n# run from the root of the repository.\nROOT_DIR = os.path.abspath(\".\")\nEXAMPLES_DIR = os.path.join(ROOT_DIR, \"docs\", \"source\", \"guide\", \"examples\")\nPACKAGE_DIR = os.path.join(ROOT_DIR, PACKAGE_NAME)\nCOVERAGE_DIR = os.path.join(ROOT_DIR, \"coverage\")\n\n# Directories containing isort configurations. We need to run isort\n# separately from each such directory.\nISORT_ROOTS = [\n ROOT_DIR,\n EXAMPLES_DIR,\n]\n\n# Locations of data directories for the ci package.\nDATA = pkg_resources.resource_filename(\"ci\", \"data\")\n\n# Locations of documentation directories.\nDOCS_DIR = os.path.join(ROOT_DIR, \"docs\")\nDOCS_SOURCE_DIR = os.path.join(DOCS_DIR, \"source\")\nDOCS_API_SOURCE_DIR = os.path.join(DOCS_SOURCE_DIR, \"api\")\nDOCS_BUILD_DIR = os.path.join(DOCS_DIR, \"build\")\n\n# Templates for environment names.\nENVIRONMENT_TEMPLATE = \"{prefix}-{python_version}-{toolkit}\"\n\n# EDM configuration file.\nEDM_CONFIGURATION = os.path.join(DATA, \"edm.yml\")\n\n# Mapping from example names to example script paths.\nEXAMPLES = {\n \"squares\": os.path.join(EXAMPLES_DIR, \"slow_squares.py\"),\n \"pi\": os.path.join(EXAMPLES_DIR, \"pi_iterations.py\"),\n \"primes\": os.path.join(EXAMPLES_DIR, \"prime_counting.py\"),\n}\n\n# Python runtime versions.\nRUNTIME_VERSION = {\n PYTHON36: \"3.6\",\n}\n\n# Platforms and Python versions that we support.\n# Triples (edm-platform-string, Python major.minor version, GUI toolkit)\nPLATFORMS = [\n (MACOS, PYTHON36, NULL),\n (LINUX, PYTHON36, NULL),\n (WINDOWS, PYTHON36, NULL),\n (MACOS, PYTHON36, PYQT),\n (LINUX, PYTHON36, PYQT),\n (WINDOWS, PYTHON36, PYQT),\n (MACOS, PYTHON36, PYQT5),\n (LINUX, PYTHON36, PYQT5),\n (WINDOWS, PYTHON36, PYQT5),\n (MACOS, PYTHON36, PYSIDE2),\n (LINUX, PYTHON36, PYSIDE2),\n (WINDOWS, PYTHON36, PYSIDE2),\n (MACOS, PYTHON36, WXPYTHON),\n (LINUX, PYTHON36, WXPYTHON),\n (WINDOWS, PYTHON36, WXPYTHON),\n]\n\n# Dependencies needed for all platforms, toolkits and Python versions.\nCORE_DEPS = [\n \"pyface\",\n \"setuptools\",\n \"traits\",\n]\n\n# Python-version-specific core dependencies. Dictionary mapping Python version\n# to list of requirements.\nVERSION_CORE_DEPS = {}\n\n# Additional packages needed for running tests and style checks.\nADDITIONAL_CI_DEPS = [\n \"flake8\",\n \"flake8_ets\",\n \"isort\",\n \"pip\",\n]\n\n# Toolkit-specific ci dependencies. Dictionary mapping toolkit to\n# list of requirements.\nTOOLKIT_CI_DEPS = {\n PYQT: [\"pyqt\", \"traitsui\"],\n PYQT5: [\"pyqt5\", \"traitsui\"],\n PYSIDE2: [\"pyside2\", \"traitsui\"],\n # wxPython is not yet available through EDM, and needs special\n # handling in the main script.\n WXPYTHON: [\"traitsui\"],\n}\n\n# Additional packages needed for local development, examples.\nADDITIONAL_DEVELOP_DEPS = [\n \"chaco\",\n \"coverage\",\n \"enable\",\n \"enthought_sphinx_theme\",\n \"numpy\",\n \"sphinx\",\n \"traitsui\",\n]\n\n\ndef core_dependencies(python_version, toolkit):\n \"\"\"\n Compute core dependencies for the Python version and toolkit.\n \"\"\"\n # Make a copy to avoid accidentally mutating the CORE_DEPS global.\n dependencies = list(CORE_DEPS)\n dependencies.extend(VERSION_CORE_DEPS.get(python_version, []))\n return dependencies\n\n\ndef ci_dependencies(python_version, toolkit):\n \"\"\"\n Return dependencies for CI\n \"\"\"\n dependencies = core_dependencies(python_version, toolkit)\n dependencies.extend(ADDITIONAL_CI_DEPS)\n dependencies.extend(TOOLKIT_CI_DEPS.get(toolkit, []))\n return dependencies\n\n\ndef develop_dependencies(python_version, toolkit):\n \"\"\"\n Return dependencies for development.\n \"\"\"\n dependencies = ci_dependencies(python_version, toolkit)\n dependencies.extend(ADDITIONAL_DEVELOP_DEPS)\n return dependencies\n","sub_path":"ci/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"545895137","text":"import numpy as np\n\nlist1 = [1 , 2 , 3]\nlist2 = list1\nlist2 [1] = \"XX\"\nalphList = ['a', 'b', 'c']\nalphList1 = alphList\nalphList[2] = \"D\"\n\ndi = {\"eins\": 1, \"zwei\": 2, \"drei\": 3}\ndi2 = di\ndi2[\"drei\"] = 4\n\nnpa = np.array([6, 7, 8])\nnpb = npa\nnpb[1] = 9","sub_path":"Exercise02/Python/Task01/kai.py","file_name":"kai.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"160114607","text":"import requests\nimport os\nfrom django.db import models\nfrom django.utils.text import slugify\nfrom django.dispatch import receiver\nfrom django.shortcuts import render\nimport subprocess\nimport sys\nfrom subprocess import run,PIPE\nfrom django.core.files.storage import FileSystemStorage\nfrom django.http import HttpResponse\nimport pickle\nmodel = pickle.load(open('model.pkl', 'rb'))\n#Measure pitch of all wav files in directory\nimport glob\nimport numpy as np\nimport pandas as pd\nimport parselmouth\nfrom parselmouth.praat import call\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set() # Use seaborn's default style to make attractive graphs\nimport statistics \nfrom django import forms\nimport glob\nimport decimal\n\n\n\ndef deleteMedia(request):\n # Get a list of all the file paths that ends with .txt from in specified directory\n fileList = glob.glob('media/*.mp3')\n \n # Iterate over the list of filepaths & remove each file.\n for filePath in fileList:\n try:\n os.remove(filePath)\n except:\n print(\"Error while deleting file : \", filePath)\n return render(request,'upload1.html')\n\n\ndef button(request):\n return render(request,'upload1.html')\ndef vow(request):\n return render(request, 'vowel.html')\ndef el(request):\n aud=request.FILES['audio']\n aud1=request.FILES['audio1']\n aud2=request.FILES['audio2']\n fs=FileSystemStorage()\n filename=fs.save(aud.name,aud)\n filename1=fs.save(aud1.name,aud1)\n filename2=fs.save(aud2.name,aud2)\n fileurl=fs.open(filename)\n fileurl1=fs.open(filename1)\n fileurl2=fs.open(filename2)\n templateurl=fs.url(filename)\n print(\"dodo\")\n aud = subprocess.Popen([sys.executable,'py/vowel.py',str(fileurl),str(fileurl1),str(fileurl2),str(filename),str(filename1),str(filename2)],shell=False,stdout=PIPE)\n aud.communicate()\n print(\"dodo\")\n return render(request, 'vowel.html',{'raw_url':templateurl,'edit_url':aud.stdout})\ndef gen(request):\n return render(request,'audio.html')\ndef ttll(request):\n audi=request.FILES['files']\n print(\"audio is \",audi)\n fs=FileSystemStorage()\n filename=fs.save(audi.name,audi)\n fileurl=fs.open(filename)\n templateurl=fs.url(filename)\n print(\"file raw url\",filename)\n print(\"file full url\", fileurl)\n print(\"template url\",templateurl)\n print(\"dodo\")\n print(\"dodo\")\n # This is the function to measure voice pitch\n def measurePitch(voiceID, f0min, f0max, unit):\n sound = parselmouth.Sound(voiceID) # read the sound\n pitch = call(sound, \"To Pitch\", 0.0, f0min, f0max) #create a praat pitch object\n meanfreq = call(pitch, \"Get mean\", 0, 0, unit) # get mean pitch\n sd = call(pitch, \"Get standard deviation\", 0 ,0, unit) # get standard deviation\n return meanfreq, sd\n\n # Go through all the wave files in the folder and measure pitch\n sound = parselmouth.Sound(str(fileurl))\n (meanfreq, sd) = measurePitch(sound, 75, 600, \"Hertz\")\n \n meanfreq = meanfreq/1000\n sd = sd/1000\n \n pitch = sound.to_pitch()\n pitch_values = pitch.selected_array['frequency']\n \n minfun = (np.argmin(pitch_values))/1000\n maxfun = (np.argmax(pitch_values))/1000\n meanfun = (statistics.mean(pitch_values))/1000 \n\n df = pd.DataFrame(np.column_stack([meanfreq, sd, meanfun, minfun, maxfun]),\n columns=[\"meanfreq\", \"sd\", \"meanfun\", \"minfun\", \"maxfun\"]) #add these lists to pandas in the right order\n \n df.set_index(\"meanfreq\", inplace=True)\n # Write out the updated dataframe\n df.to_csv('media/test.csv')\n input_model = pd.read_csv('media/test.csv')\n prediction = model.predict(input_model)\n print(prediction)\n return render(request,'audio.html', {'prediction' : 'PREDICTED GENDER IS {}'.format(prediction)})\n\n\ndef upload(request):\n return render(request,'upload.html', {'start_page': 'yes'})\n\ndef SaveAudio(request):\n aud=request.FILES['audio']\n print(\"audio is \",aud)\n fs=FileSystemStorage()\n filename=fs.save(aud.name,aud)\n fileurl=fs.open(filename)\n templateurl=fs.url(filename)\n print(\"file raw url\",filename)\n print(\"file full url\", fileurl)\n print(\"template url\",templateurl)\n print(\"dodo\")\n\n #to get the info of the subject\n subjectInfoSave = 'media/subjectInfo.csv'\n sub_info = pd.read_csv(subjectInfoSave)\n # add the audio path infomation of the subject\n sub_info['audio_path']=[fileurl]\n #sub_info.to_csv('media/subjectInfo.csv',index=False)\n\n\n\n #Plots and waveinfo\n sound = parselmouth.Sound(str(fileurl))\n StartTime=sound.xmin\n StartTime = round(StartTime,3)\n EndTime=sound.xmax\n EndTime = round(EndTime,3)\n\n sub_info['audio_ST']=[str(StartTime)]\n sub_info['audio_ET']=[str(EndTime)]\n sub_info.to_csv('media/subjectInfo.csv',index=False) \n\n plt.figure()\n plt.plot(sound.xs(), sound.values.T[:,0])\n plt.xlim([sound.xmin, sound.xmax])\n plt.xlabel(\"time [s]\")\n plt.ylabel(\"amplitude\")\n plt.savefig('media/sound.png')#, or plt.savefig(\"sound.pdf\")\n\n def draw_spectrogram(spectrogram, dynamic_range=70):\n X, Y = spectrogram.x_grid(), spectrogram.y_grid()\n sg_db = 10 * np.log10(spectrogram.values)\n plt.pcolormesh(X, Y, sg_db, vmin=sg_db.max() - dynamic_range, cmap='afmhot')\n plt.ylim([spectrogram.ymin, spectrogram.ymax])\n plt.xlabel(\"time [s]\")\n plt.ylabel(\"frequency [Hz]\")\n\n #pre_emphasized_sound = sound.copy()\n #pre_emphasized_sound.pre_emphasize()\n #spectrogram = pre_emphasized_sound.to_spectrogram(window_length=0.03, maximum_frequency=5000)\n \n spectrogram = sound.to_spectrogram(window_length=0.02)\n plt.figure()\n draw_spectrogram(spectrogram)\n plt.xlim([sound.xmin, sound.xmax])\n plt.savefig('media/spectrogram.png')\n\n return render(request,'upload.html', {'wave_trim':'yes','max_dur':sound.xmax}) \n\ndef wavForm(request):\n\n #to get the info of the subject\n subjectInfoSave = 'media/subjectInfo.csv'\n sub_info = pd.read_csv(subjectInfoSave)\n fileurl = str(sub_info.audio_path[0])\n\n sound = parselmouth.Sound(str(fileurl))\n \n StartTime = request.GET.get('StartTime', sound.xmin)\n StartTime = float(StartTime)\n StartTime = round(StartTime,3)\n EndTime = request.GET.get('EndTime', sound.xmax)\n EndTime = float(EndTime)\n EndTime = round(EndTime,3)\n \n if StartTimesound.xmax:\n EndTime=round(sound.xmax,3)\n #if EndTime 0 else m\n\n\t\t\t# Create an m x n matrix where all elements are 0.\n\t\t\tself.matrix = [[0]*n for i in range(m)]\n\n\tdef get(self, i, j):\n\t\tif i < 0:\n\t\t\traise IndexError(\"Line index out of bounds ({0}).\"\n\t\t\t\t.format(i))\n\n\t\tif j < 0:\n\t\t\traise IndexError(\"Column index out of bounds ({0}).\"\n\t\t\t\t.format(j))\n\n\t\treturn self.matrix[i][j]\n\n\tdef set(self, i, j, a):\n\t\tself.matrix[i][j] = a\n\n\tdef clone(self):\n\t\tM = Matrix(self.m, self.n)\n\n\t\tfor i in range(self.m):\n\t\t\tfor j in range(self.n):\n\t\t\t\tM.set(i, j, self.get(i, j))\n\n\t\treturn M\n\n\tdef sum(self, other):\n\t\t\"\"\" The sum of two matrices. \"\"\"\n\n\t\tC = self.clone()\n\n\t\tfor i in range(C.m):\n\t\t\tfor j in range(C.n):\n\t\t\t\tC.set(i, j, self.get(i,j) + other.get(i, j))\n\n\t\treturn C\n\n\tdef difference(self, other):\n\t\t\"\"\" The difference of two matrices (this - other). \"\"\"\n\t\treturn self.sum(other.scalar_product(-1))\n\n\tdef real_product(self, other):\n\t\t\"\"\" Multiplication of two matrices. \"\"\"\n\n\t\tassert self.n == other.m, \\\n\t\t\t\"Number of columns in A must be equal to number of \\\n\t\t\t\tlines in B\"\n\n\t\tC = Matrix(self.m, other.n)\n\n\t\tfor i in range(C.m):\n\t\t\tfor k in range(C.n):\n\t\t\t\tfor j in range(self.n):\n\t\t\t\t\tC.set(i, k, \n\t\t\t\t\t\tC.get(i,k) + self.get(i,j) * other.get(j,k))\n\n\t\treturn C\n\n\tdef scalar_product(self, scalar):\n\t\t\"\"\" Multiplication of a scalar with a matrix. \"\"\"\n\t\tC = self.clone()\n\n\t\tfor i in range(C.m):\n\t\t\tfor j in range(C.n):\n\t\t\t\tC.set(i, j, self.get(i,j) * scalar)\n\n\t\treturn C\n\n\tdef boolean_product(self, other):\n\t\t\"\"\" Boolean multiplication of two matrices. \"\"\"\n\n\t\tassert self.n == other.m, \\\n\t\t\t\"Number of columns in A must be equal to number of \\\n\t\t\t\tlines in B\"\n\n\t\tC = Matrix(self.m, other.n)\n\n\t\tfor i in range(C.m):\n\t\t\tfor k in range(C.n):\n\t\t\t\tfor j in range(self.n):\n\t\t\t\t\tC.set(i, k, int(C.get(i, k)>0 or \n\t\t\t\t\t\t(self.get(i, j)>0 and other.get(j, k)>0)))\n\n\t\treturn C\n\n\tdef __str__(self):\n\t\tstring = \"\"\n\t\tfor i in range(self.m):\n\t\t\tfor j in range(self.n):\n\t\t\t\tstring = string + \"{0:2} \".format(self.get(i, j))\n\t\t\tstring = string + \"\\n\"\n\n\t\treturn string\n\n\tdef __repr__(self):\n\t return self.__str__()\n","sub_path":"matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"187063365","text":"from __future__ import print_function, division\nimport enaml\nfrom enaml.qt.qt_application import QtApplication\nimport os\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nfrom skxray.fitting.api import model_list as valid_models\nfrom replay.pipeline.pipeline import (DataMuggler)\nfrom replay.model.scalar_model import ScalarCollection\nfrom replay.model.fitting_model import MultiFitController\nfrom metadataStore.api import analysis\n\ndm = None\nview = None\n\nsleep_time = 0.25\n\nprev_hdr_id = None\nprev_max_seqno = -1\n\ndef init_ui(data_muggler):\n \"\"\" Do the enaml import and set up of the UI\n\n Parameters\n ----------\n data_muggler : replay.pipeline.DataMuggler\n \"\"\"\n global view\n with enaml.imports():\n from replay.gui.pipeline_hitting_mds import PipelineView\n\n c_c_combo_fitter = MultiFitController(valid_models=valid_models)\n scalar_collection = ScalarCollection()\n scalar_collection.data_muggler = data_muggler\n scalar_collection.multi_fit_controller = c_c_combo_fitter\n view = PipelineView()\n # provide the pipeline view with its attributes\n view.grab_latest = grab_latest\n view.scalar_collection=scalar_collection\n view.multi_fit_controller = c_c_combo_fitter\n return view\n\n\ndef grab_latest():\n # global dm\n # # grab the most recent run header\n # original_hdr_id = ''\n # while not original_hdr_id:\n # # this is going to raise an exception if mds is empty, just not sure\n # # what it will raise\n # header, ev_desc, events, beamline_configs = analysis.find_latest()\n # original_hdr_id = header['_id']\n # time.sleep(sleep_time)\n #\n # # this is the song that never ends\n # while True:\n global prev_hdr_id\n global dm\n global prev_max_seqno\n # grab the latest data\n header, ev_desc, events, beamline_configs = analysis.find_last()\n current_hdr_id = header['_id']\n # print('line 76: view.make_new_dm: {}'.format(view.make_new_dm))\n # print('line 76: prev_hdr_id, current_hdr_id: {}, {}'.format(\n # prev_hdr_id, current_hdr_id))\n if prev_hdr_id != current_hdr_id:\n if view.make_new_dm:\n prev_hdr_id = current_hdr_id\n # create a new data muggler\n keys = []\n for e in ev_desc:\n keys.extend(e['data_keys'])\n keys = ev_desc[0]['data_keys']\n col_info = [(key, 'ffill', 0) for key in keys]\n dm = DataMuggler(col_info)\n view.scalar_collection.data_muggler = dm\n prev_hdr_id = current_hdr_id\n prev_max_seqno = -1\n else:\n view.currently_watching = False\n return\n\n # make sure that I only keep searching while the header id remains\n # the same\n # if not original_hdr_id == current_hdr_id:\n # break\n # dm.clear()\n cur_max_seq_no = prev_max_seqno\n for e in events:\n # print('event\\n-----')\n # pprint(e)\n seq_no = e['seq_no']\n if seq_no < prev_max_seqno:\n continue\n elif seq_no > cur_max_seq_no:\n cur_max_seq_no = seq_no\n try:\n key = e['time']\n except KeyError:\n try:\n key = e['data']['time']\n except KeyError:\n key = e['seq_no']\n \n dm.append_data(key, e['data'])\n prev_max_seqno = cur_max_seq_no\n\n view.make_new_dm = False\n # stash the header id\n\n\ndef main():\n app = QtApplication()\n # init the UI\n view = init_ui(dm)\n view.scalar_collection.data_muggler = None\n # init the header and event pvs\n\n # add the proper callbacks to the pvs\n view.show()\n app.start()\n\nif __name__ == '__main__':\n main()","sub_path":"replay/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"171429671","text":"\"\"\"\n2-input XOR example -- this is most likely the simplest possible example.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport sys\n\nimport neat\n\n# 2-input XOR inputs and expected outputs.\nimport Main\n\nxor_inputs = [(0.0, 0.0), (0.0, 1.0), (1.0, 0.0), (1.0, 1.0)]\nxor_outputs = [(0.0,), (1.0,), (1.0,), (0.0,)]\n\nthis = sys.modules[__name__]\nthis.gen: int = -1\n\n\ndef eval_genomes(genomes, config):\n i: int = 0\n best_genome = None\n best_net = None\n for genome_id, genome in genomes:\n net = neat.nn.FeedForwardNetwork.create(genome, config)\n if i == 0:\n this.gen += 1\n best_genome = genome\n best_net = net\n # genome.fitness = Main.get_score(net=net, fastmode=False, gen=this.gen)\n genome.fitness = Main.get_score(net=net, fastmode=True, gen=this.gen)\n else:\n genome.fitness = Main.get_score(net=net, fastmode=True, gen=this.gen)\n\n if genome.fitness > best_genome.fitness:\n best_genome = genome\n best_net = net\n\n print(\"fitness: \" + str(genome.fitness))\n i += 1\n\n print(\"best fitness in gen \" + str(this.gen) + \": \" + str(best_genome.fitness))\n # net = neat.nn.FeedForwardNetwork.create(best, config)\n Main.get_score(net=best_net, fastmode=False, gen=this.gen)\n\n\n# Load configuration.\nconfig = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\n 'neat_config1')\n\n# Create the population, which is the top-level object for a NEAT run.\np = neat.Population(config)\n\n# Add a stdout reporter to show progress in the terminal.\np.add_reporter(neat.StdOutReporter(False))\n\n# Run until a solution is found.\nwinner = p.run(eval_genomes, 1000)\n\n# Display the winning genome.\nprint(\"===================================================\")\nprint('\\nBest genome:\\n{!s}'.format(winner))\n# Show output of the most fit genome against training data.\nprint('\\nOutput:')\nwinner_net = neat.nn.FeedForwardNetwork.create(winner, config)\nfitness = Main.get_score(net=winner_net, fastmode=False)\nprint(\"final fitness: \" + str(fitness))\n# for xi, xo in zip(xor_inputs, xor_outputs):\n# output = winner_net.activate(xi)\n# print(\" input {!r}, expected output {!r}, got {!r}\".format(xi, xo, output))\n","sub_path":"src/neat1.py","file_name":"neat1.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"230335388","text":"import itertools\r\nimport string\r\nfrom math import log2\r\nimport collections\r\n\r\ncommonRussian = ('о', 'е', 'и', 'а', 'н', 'т')\r\nrussianBigrams = ('ст', 'но', 'ен', 'то', 'на')\r\nalphabet = 'абвгдежзийклмнопрстуфхцчшщьыэюя'\r\nalphabet_length = len(alphabet)\r\n\r\nletter_to_number = {k: v for v, k in enumerate(alphabet[:])}\r\nnumber_to_letter = {v: k for v, k in enumerate(alphabet[:])}\r\n\r\n\r\ndef gcdex(a, b):\r\n if b == 0:\r\n return a, 1, 0\r\n else:\r\n d, x, y = gcdex(b, a % b)\r\n return d, y, x - y * (a // b)\r\n\r\n\r\ndef countFun(a, b, n):\r\n result = []\r\n gcdOfAB = gcdex(a, n)[0]\r\n if gcdOfAB == 1:\r\n result.append((gcdex(a, n)[1] * b) % n)\r\n return result\r\n elif gcdOfAB > 1:\r\n if b % gcdOfAB == 0:\r\n x0 = countFun(a / gcdOfAB, b / gcdOfAB, n / gcdOfAB)[0]\r\n result.append(int(x0))\r\n for i in range(0, gcdOfAB - 1):\r\n x0 = x0 + n / gcdOfAB\r\n result.append(int(x0))\r\n return result\r\n else:\r\n result.append(0)\r\n return result\r\n\r\n\r\ndef bigram(ready_for_use_text):\r\n bigram_freq = {}\r\n length = len(ready_for_use_text[:])\r\n for i in range(0, length, 2):\r\n bigram = ready_for_use_text[i:i + 2]\r\n if bigram not in bigram_freq:\r\n bigram_freq[bigram] = 1 / length\r\n continue\r\n bigram_freq[bigram] += 1 / length\r\n return bigram_freq\r\n\r\n\r\ndef monogram(ready_for_use_text):\r\n sym_freq = {}\r\n length = len(ready_for_use_text[:])\r\n for i in ready_for_use_text:\r\n if i not in sym_freq:\r\n sym_freq[i] = 1 / length\r\n continue\r\n sym_freq[i] += 1 / length\r\n return sym_freq\r\n\r\n\r\ndef takeFive(text):\r\n result = []\r\n bigrams = sorted(bigram(text).items(), key=lambda x: x[1])\r\n for i in range(0, 5):\r\n result.append(bigrams.pop())\r\n for i in range(0, len(result)):\r\n result[i] = result[i][0]\r\n return result\r\n\r\n\r\ndef countBigramScore(bigrams):\r\n result = []\r\n for bigram in bigrams:\r\n result.append(letter_to_number[bigram[0]] * alphabet_length + letter_to_number[bigram[1]])\r\n return result\r\n\r\n\r\ndef countSingleBigramScore(bigram):\r\n result = letter_to_number[bigram[0]] * alphabet_length + letter_to_number[bigram[1]]\r\n return result\r\n\r\n\r\ndef findKeys(ab1, cd1, ab2, cd2):\r\n a = countFun(ab1 - cd1, ab2 - cd2, alphabet_length * alphabet_length)\r\n result = []\r\n if len(a) == 1:\r\n b = (ab1 - ab2 * a[0]) % (alphabet_length * alphabet_length)\r\n result.append((a[0], b))\r\n return result\r\n else:\r\n for value in a:\r\n b = (ab1 - ab2 * value) % (alphabet_length * alphabet_length)\r\n result.append((value, b))\r\n return result\r\n\r\n\r\ndef clear_text(text):\r\n punct = string.punctuation + '–' + '0' + '1' + '2' + '3' + '4' + '5' + '6' + '7' + '8' + '9' + '«' + '»' + '…' + '‘' + '№'\r\n punct = punct + string.ascii_lowercase\r\n for sym in punct[:]:\r\n text = text.replace(sym, '')\r\n text = text.replace('\\n', '')\r\n text = text.replace(' ', ' ')\r\n return text.lower()\r\n\r\n\r\ndef compare(popular, decrypted):\r\n popularNumber = countBigramScore(popular)\r\n decryptedNumber = countBigramScore(decrypted)\r\n popComb = combinations(popularNumber)\r\n decComb = combinations(decryptedNumber)\r\n result = []\r\n for i in range(0, len(decComb)):\r\n for y in range(0, len(popComb)):\r\n keyValues = findKeys(decComb[i][0], decComb[i][1], popComb[y][0], popComb[y][1])\r\n if len(keyValues) == 1:\r\n result.append(keyValues[0])\r\n else:\r\n for value in keyValues:\r\n result.append(value)\r\n result = list(dict.fromkeys(result))\r\n print(result)\r\n return result\r\n\r\n\r\ndef textDecryption(text, keys):\r\n bigrams = countBigramScore(bigram(text))\r\n finalKey = 0\r\n finalIndex = 0\r\n for key in keys:\r\n if key[0] == 0:\r\n continue\r\n decrypted = decrypt(bigrams, key)\r\n freq = countFrequency(decrypted)\r\n index = count_index(decrypted)\r\n if listInList(freq, commonRussian) >= 5:\r\n print(\"Trying key :\", key)\r\n print(\"With index = \", index)\r\n if (index >= finalIndex)&(index< 0.9):\r\n finalIndex = index\r\n finalKey = key\r\n\r\n else:\r\n continue\r\n decrypted = decrypt(bigrams, finalKey)\r\n freq = countFrequency(decrypted)\r\n print(\"\\nText decrypted with key : \", finalKey)\r\n print(\"The most common letters : \", freq)\r\n print(decrypted)\r\n print(\"index = \", finalIndex)\r\n print(\"\\n---------------------------------\")\r\n\r\n\r\ndef count_frequency(text):\r\n frequency = {}\r\n text = text.replace(' ', '').upper()\r\n counts = collections.Counter(text)\r\n for i in counts:\r\n frequency[i] = counts[i]\r\n return frequency\r\n\r\n\r\ndef count_index(text):\r\n text = text.replace(' ', '')\r\n length = len(text)\r\n if length == 1:\r\n result = 1 / ((length) * length)\r\n else:\r\n result = 1 / ((length - 1) * length)\r\n\r\n freq = count_frequency(text)\r\n sum = 0\r\n for letter in freq:\r\n sum += freq[letter] * (freq[letter] - 1)\r\n return result * sum\r\n\r\n\r\ndef listInList(list1, list2):\r\n result = 0\r\n for elem in list1:\r\n if elem in list2:\r\n result += 1\r\n return result\r\n\r\n\r\ndef countFrequency(text):\r\n result = []\r\n monograms = sorted(monogram(text).items(), key=lambda x: x[1])\r\n for i in range(0, 6):\r\n result.append(monograms.pop())\r\n for i in range(0, len(result)):\r\n result[i] = result[i][0]\r\n return result\r\n\r\n\r\ndef decountScore(bigram):\r\n a = bigram[0] % alphabet_length\r\n b = (bigram[0] - a) / alphabet_length\r\n ab = number_to_letter[b] + number_to_letter[a]\r\n return ab\r\n\r\n\r\ndef decrypt(bigrams, key):\r\n result = \"\"\r\n for bigram in bigrams:\r\n ab = decountScore(countFun(key[0], bigram - key[1], alphabet_length * alphabet_length))\r\n result = result + ab\r\n return result\r\n\r\n\r\ndef combinations(list):\r\n result = []\r\n copylist = list\r\n for item in list:\r\n for item2 in copylist:\r\n result.append((item2,item))\r\n return result\r\n\r\n\r\nf = open(\"var17_.txt\", encoding=\"utf8\")\r\ntext = f.read()\r\ntext = clear_text(text)\r\nfiveBigrams = takeFive(text)\r\nkeys = compare(russianBigrams, fiveBigrams)\r\ntextDecryption(text, keys)\r\nprint(\"finish\")\r\n","sub_path":"cp_3/CP3_FB-83_Trotska_Rakovych/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"509889775","text":"# import modules\n# we use webbrowser module to open url or html page from our machine\n\nimport webbrowser\n\n\nclass Movie():\n # documentation of class Movie\n \"\"\"\n this class provide a way to store movies information\n \"\"\"\n # constructor method invoked when instatance created __init__\n\n def __init__(self, title, storyline, poster_image_url,\n trailer_youtube_url):\n self.title = title\n self.storyline = storyline\n self.poster_image_url = poster_image_url\n self.trailer_youtube_url = trailer_youtube_url\n\n # this method show_trailer display the trailer of video on browser\n\n def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)\n","sub_path":"Movie Trailer webpage/media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"91784137","text":"# coding=utf-8\n\nfrom django.test import TestCase\n\nfrom django_earthdistance.expressions import DistanceExpression\nfrom django_earthdistance.functions import CubeDistance, LlToEarth\n\nfrom .models import TestModel\n\n\nclass EarthdistanceTest(TestCase):\n def setUp(self):\n self.obj_1 = TestModel.objects.create(lat=51.490857, lon=-0.15071)\n self.obj_2 = TestModel.objects.create(lat=39.74829, lon=-1.23854)\n self.obj_3 = TestModel.objects.create(lat=39.46487, lon=-1.23854)\n\n def test_in_distance(self):\n objects = TestModel.objects.where(\n DistanceExpression(['lat', 'lon']).in_distance(\n 1500, (float(51.490857), float(-0.15071))))\n self.assertEqual(objects.count(), 1)\n self.assertEqual(objects[0].pk, self.obj_1.pk)\n\n objects = TestModel.objects.where(\n DistanceExpression(['lat', 'lon']).in_distance(\n 100000, (float(39.49087), float(-1.15071))))\n self.assertEqual(objects.count(), 2)\n self.assertTrue(\n all(o.pk in [self.obj_2.pk, self.obj_3.pk] for o in objects))\n\n def test_annotate(self):\n objects = TestModel.objects.where(\n DistanceExpression(['lat', 'lon']).in_distance(\n 10000, (float(39.49087), float(-1.15071)))).annotate_functions(\n distance=CubeDistance(\n LlToEarth([float(39.49087), float(-1.15071)]),\n LlToEarth(['lat', 'lon'])))\n self.assertEqual(objects.count(), 1)\n self.assertEqual(int(objects[0].distance), 8082)\n\n","sub_path":"tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"513849414","text":"import pygame\nfrom os import path\n\nfrom config import img_dir, BLACK, FPS, GAME, QUIT, WIDTH, HEIGHT,SELECAO_MINIGAME\n\ndef fim1_screen(screen):\n clock = pygame.time.Clock()\n\n background = pygame.image.load(path.join(img_dir, 'vitoria_1.jpg')).convert()\n background = pygame.transform.scale(background, (WIDTH, HEIGHT))\n background_rect = background.get_rect()\n\n running = True\n while running:\n clock.tick(FPS)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n state = QUIT\n running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n state = SELECAO_MINIGAME\n running = False\n if event.key == pygame.K_r:\n state = GAME\n running = False\n \n screen.fill(BLACK)\n screen.blit(background, background_rect)\n pygame.display.flip()\n\n return state\n\n","sub_path":"tela_vitoria.py","file_name":"tela_vitoria.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"106415582","text":"import os\nimport json\nimport matplotlib.pyplot as plt\nimport config\n\n\nplt.style.use(\"ggplot\")\n\n\nJSON_DUMP_DIR = \"D:/yt-year-in-review/dataset/details/\"\n\n\ncategories = {\n\t2: \"Autos & Vehicles\",\n\t1: \"Film & Animation\",\n\t10: \"Music\",\n\t15: \"Pets & Animals\",\n\t17: \"Sports\",\n\t18: \"Short Movies\",\n\t19: \"Travel & Events\",\n\t20: \"Gaming\",\n\t21: \"Videoblogging\",\n\t22: \"People & Blogs\",\n\t23: \"Comedy\",\n\t24: \"Entertainment\",\n\t25: \"News & Politics\",\n\t26: \"Howto & Style\",\n\t27: \"Education\",\n\t28: \"Science & Technology\",\n\t29: \"Nonprofits & Activism\",\n\t30: \"Movies\",\n\t31: \"Anime/Animation\",\n\t32: \"Action/Adventure\",\n\t33: \"Classics\",\n\t34: \"Comedy\",\n\t35: \"Documentary\",\n\t36: \"Drama\",\n\t37: \"Family\",\n\t38: \"Foreign\",\n\t39: \"Horror\",\n\t40: \"Sci-Fi/Fantasy\",\n\t41: \"Thriller\",\n\t42: \"Shorts\",\n\t43: \"Shows\",\n\t44: \"Trailers\"\n}\nfrequencies = {}\n\n\ndef count_frequency(filepath):\n\t# Forgot to actually save files as json while downloading so have to specify encoding\n\twith open(filepath, 'r', encoding=\"utf-8\") as json_file:\n\t\trecords = json.load(json_file)[\"items\"]\n\t\t\n\t\tfor record in records:\n\t\t\tcategory_id = int(record[\"snippet\"][\"categoryId\"]) \n\t\t\tif category_id in frequencies:\n\t\t\t\tfrequencies[category_id] += 1\n\t\t\telse:\n\t\t\t\tfrequencies[category_id] = 1\n\n\ndef category_frequencies():\n\n\tglobal frequencies\n\n\tfor filename in os.listdir(config.DETAILS_JSON_DIR):\n\t\tprint(f\"Processing file {filename}\")\n\n\t\tcount_frequency(os.path.join(config.DETAILS_JSON_DIR, filename))\n\t\t\n\tfrequencies = {categories[k]: v for k, v in sorted(frequencies.items(), key=lambda item: item[1], reverse=True)}\n\tprint(frequencies)\n\n\tplt.barh(range(len(frequencies)), list(frequencies.values()), color=(255/255, 52/255, 100/255))\n\tplt.yticks(range(len(frequencies)), list(frequencies.keys()), color=(0.32, 0.32, 0.32))\n\tplt.gca().invert_yaxis()\n\tplt.suptitle(\"Categories Ranked\", fontsize=36, color=(0.16, 0.16, 0.16))\n\tplt.xlabel(\"Nonconsecutive Views of One Video\", fontsize=16, color=(0.24, 0.24, 0.24))\n\tplt.show()\n","sub_path":"src/category_frequencies.py","file_name":"category_frequencies.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"426183976","text":"# 找出数组中重复的数字。\r\n\r\n# 在一个长度为 n 的数组 nums 里的所有数字都在 0~n-1 的范围内。数组中某些数字是重复的,但不知道有几个数字重复了,也不知道每个数字重复了几次。请找出数组中任意一个重复的数字。\r\n\r\n# 示例 1:\r\n\r\n# 输入:\r\n# [2, 3, 1, 0, 2, 5, 3]\r\n# 输出:2 或 3\r\n#\r\n\r\n# 限制:\r\n\r\n# 2 <= n <= 100000\r\n\r\n# 通过次数17,575提交次数25,962\r\n\r\n# 来源:力扣(LeetCode)\r\n# 链接:https://leetcode-cn.com/problems/shu-zu-zhong-zhong-fu-de-shu-zi-lcof\r\n# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\r\n\r\n\r\nclass Solution:\r\n def findRepeatNumber(self, nums: List[int]) -> int:\r\n # 因为数字有范围, 所以可以把它放在正确的位置, 如果该位置已经有元素则说明有重复\r\n for i in range(len(nums)):\r\n if nums[i] == i:\r\n continue\r\n cur = nums[i]\r\n nums[i] = -1\r\n while cur != -1 and cur != nums[cur]:\r\n nex = nums[cur]\r\n nums[cur] = cur\r\n cur = nex\r\n if cur == nums[cur]:\r\n return cur\r\n","sub_path":"Easy/面试题03. 数组中重复的数字.py","file_name":"面试题03. 数组中重复的数字.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"96580972","text":"import pyttsx3\nimport speech_recognition as sr\nimport datetime\nimport wikipedia\nimport webbrowser\nimport os\nimport smtplib\n\nprint (\"Starting Anna\")\nMASTER = \"Mario\"\n\nengine = pyttsx3.init('sapi5')\nvoices = engine.getProperty('voices')\nengine.setProperty('voice', voices[1].id)\n\n#Ta funkcja czyta zadany tekst\ndef speak(text):\n engine.say(text)\n engine.runAndWait()\n#Ustalanie powitania w zależności od pory dnia\ndef Greeting():\n godzina = int(datetime.datetime.now().hour)\n\n if godzina>=0 and godzina < 12:\n speak(\"Good morning \"+ MASTER)\n elif godzina>=12 and godzina <21:\n speak(\"Good afternoon \"+ MASTER)\n else: \n speak(\"Good evening\"+ MASTER)\n\ndef przyjmijRozkaz():\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"How may I assist? \")\n speak(\"How may I assist?\")\n audio = r.listen(source)\n try:\n print(\"Recognizing...\")\n speak(\"Recognizing...\")\n query = r.recognize_google(audio, language = 'en')\n print(f\"user said: {query}\\n\")\n except Exception as e:\n print(\"Powtórz\")\n przyjmijRozkaz()\n return query\n# Uruchamianie \n#speak(\"Starting Anna...\")\n#Greeting()\n\n\ndef main():\n \n query = przyjmijRozkaz()\n\n\n\n if 'wikipedia' in query.lower():\n wikipedia.set_lang(\"en\")\n speak('Searching in wikipedia...')\n query = query.replace(\"wikipedia\", \"\")\n results = wikipedia.summary(query, sentences =2)\n print(results)\n speak(results)\n main()\n elif 'open youtube' in query.lower():\n webbrowser.get('windows-default').open('http://www.youtube.com')\n main()\n elif 'open google' in query.lower():\n webbrowser.get('windows-default').open('http://www.google.com')\n main()\n elif 'play music' in query.lower():\n speak(\"Serching for music\")\n try:\n f = open(\"songs.txt\", \"r\")\n if f.mode == \"r\":\n songs_dir = f.read()\n speak(\"Songs found in\")\n print(songs_dir)\n speak(\"Playing music\")\n songs = os.listdir(songs_dir)\n os.startfile(os.path.join(songs_dir, songs[0]))\n f.close()\n main()\n else:\n speak(\"Something wrong with the file\")\n speak(\"setting file to read mode\")\n f = open(\"songs.txt\", \"r\")\n main()\n except IOError:\n speak(\"File was not found. Do you want to show directory with your Music?\")\n r = sr.Recognizer()\n with sr.Microphone() as source:\n audio = r.listen(source)\n try:\n print(\"Recognizing...\")\n speak(\"Recognizing...\")\n query = r.recognize_google(audio, language = 'en')\n print(f\"user said: {query}\\n\")\n except Exception as e:\n print(\"Repeat\")\n speak(\"repeat please\")\n if 'yes' in query.lower():\n speak(\"Where is music?\")\n songs_dir = input(\"Where is music? \")\n f = open(\"songs.txt\", \"w\")\n f.write(songs_dir)\n f.close()\n songs = os.listdir(songs_dir)\n os.startfile(os.path.join(songs_dir, songs[0]))\n main()\n if 'no' in query.lower():\n main()\n else:\n main()\n elif 'goodbye' in query.lower():\n speak(\"Goodbye \"+MASTER)\n elif 'time' in query.lower():\n time = datetime.datetime.now().strftime(\"%H:%M\")\n print(\"It is now \" + time)\n speak( \"It is now \" + time)\n main()\n else:\n speak(\"Goodbye \"+MASTER)\n \nmain()\n \n ","sub_path":"main copy.py","file_name":"main copy.py","file_ext":"py","file_size_in_byte":3729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"14185126","text":"# 단순 선형 회귀 클래스로 구현하기\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\ntorch.manual_seed(1)\n\n# data\nx_train = torch.FloatTensor([[1],[2],[3]])\ny_train = torch.FloatTensor([[2],[4],[6]])\n\nclass LinearRegressionModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.linear = nn.Linear(1, 1)\n\n def forward(self, x):\n return self.linear(x)\n\nmodel = LinearRegressionModel()\n\noptimizer = torch.optim.SGD(model.parameters(), lr=0.01) # W, b 최적화\n\nnb_epochs = 2000\nfor epoch in range(nb_epochs +1):\n\n prediction = model(x_train)\n\n cost = F.mse_loss(prediction, y_train) # mse\n\n # cost로 H(x) 개선하는 부분\n optimizer.zero_grad()\n cost.backward()\n optimizer.step()\n\n if epoch % 100 == 0 :\n print('Epoch {:4d}/{} Cost: {:.6f}'.format(\n epoch, nb_epochs, cost.item()\n ))\n","sub_path":"torch/torch12_class2.py","file_name":"torch12_class2.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"12736230","text":"import random\nimport urllib.request\n\n#printing message\ndef beef():\n print(\"Hello Daniel!\")\n\nbeef()\nn = 0\n\n# bitcoin calc\ndef bitcoinCalc(btc):\n amount = btc * 3\n print(amount)\n\nwhile n in range(100):\n print(\"How many btc do you want to calculate\")\n amount = input()\nbitcoinCalc(int(amount))\n\n#Download web image\ndef download_web_image(url):\n name = random.randrange(1,1000)\n full_name = str(name) + \".jpg\"\n urllib.request.urlretrieve(url,full_name)\n\n# random function\ndef rand():\n amount = random.randrange(1, 1000)\n print(amount)","sub_path":"Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"574055634","text":"import mock\nfrom nose.tools import *\n\nfrom framework.auth.core import Auth\nfrom framework.exceptions import PermissionsError\n\nfrom website import settings\nfrom website.addons.base import AddonConfig\nfrom website.addons.base import AddonOAuthNodeSettingsBase\nfrom website.addons.base import AddonOAuthUserSettingsBase\nfrom website.oauth.models import ExternalProvider\n\nfrom tests.base import OsfTestCase\nfrom tests.factories import AuthUserFactory\nfrom tests.factories import ExternalAccountFactory\nfrom tests.factories import MockOAuth2Provider\nfrom tests.factories import ProjectFactory\n\nclass MockConfig(AddonConfig):\n\n def __init__(self):\n super(MockConfig, self).__init__(\n short_name='mockaddon',\n full_name='Mock Addon',\n owners=[],\n categories=[]\n )\n\nclass MockNodeSettings(AddonOAuthNodeSettingsBase):\n oauth_provider = MockOAuth2Provider\n config = MockConfig()\n folder_id = 'foo'\n folder_name = 'Foo'\n folder_path = '/Foo'\n\n def deauthorize(*args, **kwargs):\n pass\n\n\nclass MockUserSettings(AddonOAuthUserSettingsBase):\n config = MockConfig\n oauth_provider = MockOAuth2Provider\n\n\nclass TestNodeSettings(OsfTestCase):\n\n ADDONS_UNDER_TEST = {\n MockOAuth2Provider.short_name: {\n 'user_settings': MockUserSettings,\n 'node_settings': MockNodeSettings,\n }\n }\n\n @classmethod\n def setUpClass(cls):\n super(TestNodeSettings, cls).setUpClass()\n\n @classmethod\n def tearDownClass(cls):\n super(TestNodeSettings, cls).tearDownClass()\n\n def setUp(self):\n super(TestNodeSettings, self).setUp()\n self.project = ProjectFactory()\n self.user = self.project.creator\n self.node_settings = self.project.get_or_add_addon(\n MockNodeSettings.oauth_provider.short_name,\n auth=Auth(user=self.user)\n )\n self.user_settings = self.user.get_or_add_addon(\n MockUserSettings.oauth_provider.short_name\n )\n self.external_account = ExternalAccountFactory()\n self.user.external_accounts.append(self.external_account)\n self.user.save()\n\n def test_has_auth_false(self):\n assert_false(self.node_settings.has_auth)\n\n def test_has_auth_no_grant(self):\n self.node_settings.external_account = self.external_account\n self.node_settings.user_settings = self.user_settings\n\n assert_false(self.node_settings.has_auth)\n\n def test_has_auth(self):\n self.node_settings.set_auth(\n external_account=self.external_account,\n user=self.user\n )\n\n assert_true(self.node_settings.has_auth)\n\n def test_set_auth(self):\n self.node_settings.set_auth(\n external_account=self.external_account,\n user=self.user\n )\n self.user_settings.reload()\n\n assert_equal(\n self.node_settings.external_account,\n self.external_account\n )\n assert_equal(\n self.node_settings.user_settings._id,\n self.user_settings._id\n )\n assert_in(\n self.project._id,\n self.user_settings.oauth_grants.keys()\n )\n\n @mock.patch('tests.test_addons_oauth.MockNodeSettings.deauthorize')\n @mock.patch('framework.auth.core._get_current_user')\n def test_revoke_auth(self, mock_decorator, mock_deauth):\n mock_decorator.return_value = self.user\n self.node_settings.set_auth(\n external_account=self.external_account,\n user=self.user\n )\n self.user_settings.reload()\n assert_equal(\n self.user_settings.oauth_grants,\n {self.project._id: {self.external_account._id: {}}}\n )\n\n self.user_settings.revoke_oauth_access(self.external_account, auth=Auth(self.user))\n self.user_settings.reload()\n\n assert_true(mock_deauth.called)\n assert_equal(\n self.user_settings.oauth_grants,\n {self.project._id: {}}\n )\n\n def test_clear_auth(self):\n self.node_settings.external_account = self.external_account\n self.node_settings.user_settings = self.user_settings\n\n self.node_settings.clear_auth()\n\n assert_is_none(self.node_settings.external_account)\n assert_is_none(self.node_settings.user_settings)\n\n\nclass TestUserSettings(OsfTestCase):\n\n ADDONS_UNDER_TEST = {\n MockOAuth2Provider.short_name: {\n 'user_settings': MockUserSettings,\n 'node_settings': MockNodeSettings,\n }\n }\n\n @classmethod\n def setUpClass(cls):\n super(TestUserSettings, cls).setUpClass()\n\n @classmethod\n def tearDownClass(cls):\n super(TestUserSettings, cls).tearDownClass()\n\n def setUp(self):\n super(TestUserSettings, self).setUp()\n self.user = AuthUserFactory()\n\n self.user_settings = self.user.get_or_add_addon(\n MockUserSettings.oauth_provider.short_name\n )\n\n self.external_account = ExternalAccountFactory()\n self.user.external_accounts.append(self.external_account)\n self.user.save()\n\n self.project = ProjectFactory(creator=self.user)\n\n def tearDown(self):\n super(TestUserSettings, self).tearDown()\n\n def test_connected_accounts_empty(self):\n self.user.external_accounts = []\n\n assert_equal(\n self.user_settings.external_accounts,\n []\n )\n\n def test_connected_accounts(self):\n assert_equal(\n self.user_settings.external_accounts,\n [self.external_account]\n )\n\n def test_verify_false_no_grants(self):\n assert_false(\n self.user_settings.verify_oauth_access(\n external_account=self.external_account,\n node=self.project\n )\n )\n\n def test_verify_false_with_grants(self):\n self.user_settings.grant_oauth_access(\n external_account=self.external_account,\n node=ProjectFactory()\n )\n\n assert_false(\n self.user_settings.verify_oauth_access(\n external_account=self.external_account,\n node=self.project\n )\n )\n\n def test_verify_false_metadata(self):\n self.user_settings.grant_oauth_access(\n external_account=self.external_account,\n node=self.project,\n metadata={'foo': 'bar'}\n )\n\n assert_false(\n self.user_settings.verify_oauth_access(\n external_account=self.external_account,\n node=self.project,\n metadata={'baz': 'qiz'}\n )\n )\n\n def test_verify_true(self):\n self.user_settings.grant_oauth_access(\n external_account=self.external_account,\n node=self.project\n )\n\n assert_true(\n self.user_settings.verify_oauth_access(\n external_account=self.external_account,\n node=self.project\n )\n )\n\n def test_verify_true_with_metadata(self):\n self.user_settings.grant_oauth_access(\n external_account=self.external_account,\n node=self.project,\n metadata={'foo': 'bar'}\n )\n\n assert_true(\n self.user_settings.verify_oauth_access(\n external_account=self.external_account,\n node=self.project,\n metadata={'foo': 'bar'}\n )\n )\n\n def test_grant(self):\n self.user_settings.grant_oauth_access(\n external_account=self.external_account,\n node=self.project\n )\n\n assert_equal(\n self.user_settings.oauth_grants,\n {\n self.project._id: {\n self.external_account._id: {}\n }\n }\n )\n\n def test_grant_not_owned(self):\n self.user.external_accounts = []\n\n with assert_raises(PermissionsError):\n self.user_settings.grant_oauth_access(\n external_account=self.external_account,\n node=self.project\n )\n\n assert_equal(\n self.user_settings.oauth_grants,\n {}\n )\n\n def test_grant_metadata(self):\n self.user_settings.grant_oauth_access(\n external_account=self.external_account,\n node=self.project,\n metadata={'foo': 'bar'}\n )\n\n assert_equal(\n self.user_settings.oauth_grants,\n {\n self.project._id: {\n self.external_account._id: {'foo': 'bar'}\n }\n }\n )\n\n @mock.patch('tests.test_addons_oauth.MockUserSettings.revoke_remote_oauth_access')\n @mock.patch('framework.auth.core._get_current_user')\n def test_revoke_remote_access_called(self, mock_decorator, mock_revoke):\n mock_decorator.return_value = self.user\n self.user_settings.delete()\n assert_equal(mock_revoke.call_count, 1)\n\n @mock.patch('tests.test_addons_oauth.MockUserSettings.revoke_remote_oauth_access')\n @mock.patch('framework.auth.core._get_current_user')\n def test_revoke_remote_access_not_called(self, mock_decorator, mock_revoke):\n mock_decorator.return_value = self.user\n user2 = AuthUserFactory()\n user2.external_accounts.append(self.external_account)\n user2.save()\n self.user_settings.delete()\n assert_equal(mock_revoke.call_count, 0)\n\n def test_on_delete(self):\n node_settings = self.project.get_or_add_addon(\n MockUserSettings.oauth_provider.short_name,\n auth=Auth(user=self.user)\n )\n\n node_settings.set_auth(\n external_account=self.external_account,\n user=self.user\n )\n\n self.user.delete_addon(\n MockUserSettings.oauth_provider.short_name\n )\n\n node_settings.reload()\n\n assert_is_none(node_settings.external_account)\n assert_is_none(node_settings.user_settings)\n\n","sub_path":"tests/test_addons_oauth.py","file_name":"test_addons_oauth.py","file_ext":"py","file_size_in_byte":10058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"446310148","text":"import argparse\nimport coronadb\nimport psycopg2\nimport csv\n\n\n_EXPECTED_HEADERS = [None, \"REGION\", \"DIVISION\", \"STATE\", \"COUNTY\", \"STNAME\", \"CTYNAME\", None, None, None, None, None, None, None, None, None, None, \"POPESTIMATE2018\"]\n\n\ndef load(source):\n with psycopg2.connect(dbname=coronadb.database, port=coronadb.port, user=coronadb.user, host=coronadb.host, password=coronadb.password) as db:\n clear_census_data(db)\n load_census_data(db, source)\n\n\ndef clear_census_data(db):\n with db.cursor() as cursor:\n cursor.execute(\"DELETE FROM covid19.census\")\n\n db.commit()\n print(\"Cleaned database\")\n\n\ndef load_census_data(db, source):\n with open(source, 'r', errors='ignore') as file:\n reader = csv.reader(file)\n header = next(reader, None)\n\n # First, confirm the file matches our expected layout so we don't load bad data\n for i, expected in enumerate(_EXPECTED_HEADERS):\n if expected is not None and header[i] != expected:\n print(\"Invalid Header at Position \" + str(i) + \": Expected \\\"\" + expected + \"\\\", found \\\"\" + header[i] + \"\\\"\")\n return False\n\n count = 1\n with db.cursor() as cursor:\n for row in reader:\n print(str(count), end='\\r')\n\n region = row[1]\n division = row[2]\n state_num = row[3]\n county_num = row[4]\n state = row[5]\n county = row[6]\n population_2018 = row[17]\n fips = str(state_num).zfill(2) + str(county_num).zfill(3)\n\n cursor.execute(\"\"\"\n INSERT INTO covid19.census (region, division, state_num, county_num, fips, state, county, pop_2018)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\"\"\",\n (region, division, state_num, county_num, fips, state, county, population_2018))\n count += 1\n\n print()\n db.commit()\n\n\nparser = argparse.ArgumentParser(description='Script to load Census data into the database')\nparser.add_argument(\"--source\", required=True, type=str, help=\"File to load\")\nargs = parser.parse_args()\nload(args.source)\n","sub_path":"scripts/load-census.py","file_name":"load-census.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"146313657","text":"import os\nfrom flask import Response, current_app\nimport datetime\nimport pytz\n\n\ndef output_html(data, code, headers=None):\n resp = Response(data, mimetype='text/html', headers=headers)\n resp.status_code = code\n return resp\n\n\nproject_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nmypath = os.path.join(project_root, 'static/image')\nALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.count(\".\") == 1 and \\\n filename.rsplit('.', 1)[-1].lower() in ALLOWED_EXTENSIONS\n\ndef save_file(image,name):\n import os\n from werkzeug import secure_filename\n filename = secure_filename(image.filename)\n image_name = name + '-' + \"image.\" + filename.rsplit('.', 1)[-1].lower()\n pname = os.path.join(mypath, image_name)\n sname = os.path.join('/static/image', image_name)\n image.save(pname)\n return sname\n\n\ndef time2cst(date_str):\n d1 = datetime.datetime.strptime(date_str, \"%Y-%m-%d %H:%M\")#convert string date time in specific date time format\n local_tz = pytz.timezone('Asia/Kolkata')\n date_temp = local_tz.localize(d1, is_dst=None) #navie time to aware time\n now_cst = date_temp.astimezone(pytz.timezone('CST6CDT'))#convert time to CST time\n\n\n return now_cst\n\ndef check_expire_time(given_timezone, ex_time):\n tz_NY = pytz.timezone(given_timezone)#take user specified time zone\n datetime_NY = datetime.datetime.now(tz_NY)#take current time of given time zone\n now_cst = datetime_NY.astimezone(pytz.timezone('CST6CDT'))#convert time to CST time\n\n local_tz = pytz.timezone('CST6CDT')\n ex_time = local_tz.localize(ex_time, is_dst=None)\n\n if now_cst > ex_time: #compare to aware time\n return True #if item expired\n else:\n return False #if item not expired\n","sub_path":"inventory/comman/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"151426322","text":"from django.contrib import admin\nfrom .models import Event\n\n# Register your models here.\n\n\nclass EventAdmin(admin.ModelAdmin):\n list_display = (\n 'name',\n 'day',\n 'start_time',\n 'location',\n 'image',\n )\n\n ordering = ('name',)\n\n\nadmin.site.register(Event, EventAdmin)\n","sub_path":"events/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"589268921","text":"# -*- coding: utf-8 -*-\r\n# @Time : 2021/8/4 20:27\r\n# @Author : Kevin_liu\r\n# @Email : 87281094@qq.com\r\n# @File : learn.py\r\n# education = [\r\n# '没有教育历史(包括幼儿园)',\r\n# '小学文化',\r\n# '初中文化',\r\n# '高中文化',\r\n# '大学文化(研究生)',\r\n# '研究生',\r\n# '博士生',\r\n# '教授'\r\n# ]\r\n# # 打印文化程度列表\r\n# def culture():\r\n# print('-'.center(40, '-'))\r\n# for key, name in enumerate(education):\r\n# print('\\t\\t\\t', key, name)\r\n# print('-'.center(40, '-'))\r\nfrom DBUtils import select, update\r\nfrom addUser import add_citizen\r\n\r\ntextbook = [\r\n '小学课本',\r\n '初中课本',\r\n '高中课本',\r\n '大学课本',\r\n '研究生课本',\r\n '博士生课本',\r\n '教授课本'\r\n]\r\n\r\n\r\n# 打印课本列表\r\ndef book():\r\n print('-'.center(40, '-'))\r\n for key, name in enumerate(textbook):\r\n print('\\t\\t\\t', key, name)\r\n print('-'.center(40, '-'))\r\n\r\n\r\ndef library(record, choose):\r\n if record[13] < choose:\r\n return 0\r\n elif record[13] == choose:\r\n return 1\r\n elif record[13] > choose:\r\n return 2\r\n\r\n\r\n# 普通公民操作逻辑\r\ndef ordinaryCitizen():\r\n print('\\033[31;5m-------------------------\\033[31;0m')\r\n print('\\033[31;5m-\\t\\t1.注册信息\\t\\t-\\033[31;0m')\r\n print('\\033[31;5m-\\t\\t2.学习\\t\\t\\t-\\033[31;0m')\r\n print('\\033[31;5m-------------------------\\033[31;0m')\r\n operation = input('\\033[33;5m请选择您要进行的操作:\\n\\033[33;0m')\r\n if operation.isdigit():\r\n operation = int(operation)\r\n if operation == 1:\r\n add_citizen()\r\n elif operation == 2:\r\n cardId = input('\\033[33;5m请输入您的身份证号码:\\033[33;0m\\n')\r\n sql = 'SELECT * FROM citizen WHERE cardID = %s'\r\n data = [cardId]\r\n model = 'one'\r\n record = select(sql, data, model, [])\r\n if record is None:\r\n print('\\033[36;5m抱歉,该用户不存在!\\033[36;0m')\r\n else:\r\n book()\r\n choose = input('\\033[33;5m请选择您想要学习的课本编号:\\033[33;0m\\n')\r\n if choose.isdigit():\r\n choose = int(choose)\r\n if choose > 7:\r\n print('\\033[36;5m抱歉,您选择的课本不存在!\\033[36;0m')\r\n else:\r\n datum = library(record, choose)\r\n if datum == 0:\r\n print('\\033[36;5m抱歉,您文化程度不够,请先学习相对低水平的课本。\\033[36;0m')\r\n elif datum == 1:\r\n sql = 'UPDATE citizen SET education = %s WHERE cardID =%s '\r\n data = [choose + 1, cardId]\r\n update(sql, data)\r\n sql = 'SELECT NAME FROM edulevel WHERE id = %s;'\r\n data = [choose]\r\n model = 'one'\r\n record1 = select(sql, data, model, [])\r\n print('\\033[34;5m您开始学习', textbook[choose], '!\\033[34;0m')\r\n print('\\033[34;5m您当前的文化水平为', record1[0], '!\\033[34;0m')\r\n elif datum == 2:\r\n print('\\033[36;5m程度已经达到,没必要在学习本课程!\\033[36;0m')\r\n else:\r\n print('\\033[36;5m抱歉,编号输入错误!\\033[36;0m')\r\n else:\r\n print('\\033[36;5m您选择的操作不存在!\\033[36;0m')\r\n else:\r\n print('\\033[36;5m请输入数字!\\033[36;0m')\r\n","sub_path":"PSB/learn.py","file_name":"learn.py","file_ext":"py","file_size_in_byte":3743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"170142986","text":"#!/usr/bin/env python3\n\nimport argparse\nimport os\nimport subprocess\nimport sys\nimport xml.etree.ElementTree\n\nparser=argparse.ArgumentParser('create sbatch scripts')\nparser.add_argument('-x','--cgqueryxml',required=True)\nargs=vars(parser.parse_args())\n\ncgqueryxml=args['cgqueryxml']\n\ndef main(cgqueryxml):\n outputxml=cgqueryxml\n tree=xml.etree.ElementTree.parse(outputxml)\n root=tree.getroot()\n results=root.getchildren()\n print('len(results)=%s' % len(results))\n print('000001')\n for result in results:\n if result.find('refassem_short_name') is not None:\n print('refassem_short_name=%s' % result.find('refassem_short_name').text)\n print('000002')\n root=tree.getroot()\n results=root.getchildren()\n for result in results:\n analysis_xml=result.find('analysis_xml')\n if result.find('analysis_xml') is not None:\n ANALYSIS_SET=analysis_xml.find('ANALYSIS_SET')\n ANALYSIS=ANALYSIS_SET.find('ANALYSIS')\n ANALYSIS_TYPE=ANALYSIS.find('ANALYSIS_TYPE')\n REFERENCE_ALIGNMENT=ANALYSIS_TYPE.find('REFERENCE_ALIGNMENT')\n ASSEMBLY=REFERENCE_ALIGNMENT.find('ASSEMBLY')\n for children in ASSEMBLY.getchildren():\n for item in children.items():\n print(item)\n print('000003')\n root=tree.getroot()\n results=root.getchildren() \n for result in results:\n analysis_xml=result.find('analysis_xml')\n if result.find('analysis_xml') is not None:\n ANALYSIS_SET=analysis_xml.find('ANALYSIS_SET')\n ANALYSIS=ANALYSIS_SET.find('ANALYSIS')\n ANALYSIS_ATTRIBUTES = ANALYSIS.find('ANALYSIS_ATTRIBUTES')\n if ANALYSIS_ATTRIBUTES is not None:\n for ANALYSIS_ATTRIBUTE in ANALYSIS_ATTRIBUTES.getchildren():\n if ANALYSIS_ATTRIBUTE.find('TAG').text=='assembly':\n print(ANALYSIS_ATTRIBUTE.find('VALUE').text)\n \n print('000004')\n root=tree.getroot()\n results=root.getchildren()\n for result in results:\n run_xml=result.find('run_xml')\n if result.find('run_xml') is not None:\n RUN_SET=run_xml.find('RUN_SET')\n RUN=RUN_SET.find('RUN')\n RUN_ATTRIBUTES=RUN.find('RUN_ATTRIBUTES')\n if RUN_ATTRIBUTES is not None:\n for RUN_ATTRIBUTE in RUN_ATTRIBUTES.getchildren():\n if RUN_ATTRIBUTE.find('TAG').text=='assembly':\n print(RUN_ATTRIBUTE.find('VALUE').text)\n\n\n\nif __name__=='__main__':\n main(cgqueryxml)\n","sub_path":"other/get_genomes_from_cgquery_xml.py","file_name":"get_genomes_from_cgquery_xml.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"462801809","text":"from collective.transmogrifier.interfaces import ISection\nfrom collective.transmogrifier.interfaces import ISectionBlueprint\nfrom datetime import datetime\nfrom opengever.base import advancedjson\nfrom opengever.base.pathfinder import PathFinder\nfrom opengever.bundle.report import ASCIISummaryBuilder\nfrom opengever.bundle.report import DataCollector\nfrom opengever.bundle.report import XLSXMainReportBuilder\nfrom opengever.bundle.report import XLSXValidationReportBuilder\nfrom opengever.bundle.sections.bundlesource import BUNDLE_KEY\nfrom zope.annotation import IAnnotations\nfrom zope.interface import classProvides\nfrom zope.interface import implements\nimport logging\nimport os\nimport tempfile\nimport transaction\n\n\nlog = logging.getLogger('opengever.bundle.report')\nlog.setLevel(logging.INFO)\n\nSKIP_REPORT_KEY = 'skip_report'\n\n\nclass ReportSection(object):\n \"\"\"Create import reports for the current OGGBundle.\n \"\"\"\n\n classProvides(ISectionBlueprint)\n implements(ISection)\n\n def __init__(self, transmogrifier, name, options, previous):\n self.previous = previous\n self.context = transmogrifier.context\n annotations = IAnnotations(transmogrifier)\n self.bundle = annotations[BUNDLE_KEY]\n self.skip_report = annotations.get(SKIP_REPORT_KEY, False)\n self.report_dir = None\n\n def __iter__(self):\n for item in self.previous:\n yield item\n\n transaction.commit()\n self.bundle.stats['timings']['migration_finished'] = datetime.now()\n\n if not self.skip_report:\n log.info(\"Creating import reports...\")\n self.report_dir = self.create_report_dir()\n\n self.store_as_json(self.bundle.errors, 'errors.json')\n self.store_as_json(self.bundle.stats, 'stats.json')\n\n report_data = DataCollector(self.bundle)()\n self.bundle.report_data = report_data\n\n self.build_ascii_summary(self.bundle)\n self.build_xlsx_main_report(self.bundle)\n self.build_xlsx_validation_report(self.bundle)\n\n def create_report_dir(self):\n \"\"\"Create a directory to store all import report files.\n\n In a real invocation, this will be created inside the instance's\n var/ directory (no git-pollution, variable data where it belongs).\n\n During tests, a temporary directory will be created.\n \"\"\"\n ts = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n dirname = 'import-report-%s' % ts\n try:\n report_dir = os.path.join(PathFinder().var, dirname)\n try:\n os.makedirs(report_dir)\n except OSError:\n # Already exists\n pass\n except RuntimeError:\n # During tests\n report_dir = tempfile.mkdtemp(prefix=dirname)\n return report_dir\n\n def store_as_json(self, data, filename):\n \"\"\"Store raw migration stats or errors as JSON files in report dir.\n \"\"\"\n json_path = os.path.join(self.report_dir, filename)\n with open(json_path, 'w') as json_file:\n advancedjson.dump(data, json_file, sort_keys=True,\n indent=4, separators=(',', ': '))\n log.info('Stored %s' % json_path)\n\n def build_ascii_summary(self, bundle):\n summary = ASCIISummaryBuilder(bundle).build()\n log.info('\\n\\n%s\\n' % summary)\n\n def build_xlsx_main_report(self, bundle):\n report_path = os.path.join(self.report_dir, 'main-report.xlsx')\n\n builder = XLSXMainReportBuilder(bundle)\n builder.build_and_save(report_path)\n\n def build_xlsx_validation_report(self, bundle):\n report_path = os.path.join(self.report_dir, 'validation-report.xlsx')\n\n builder = XLSXValidationReportBuilder(bundle)\n builder.build_and_save(report_path)\n","sub_path":"opengever/bundle/sections/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":3822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"237502817","text":"import os\nfrom django.test import TestCase\nfrom corehq.apps.app_manager.tests.util import TestFileMixin\nfrom corehq.apps.app_manager.xform import XForm, XFormError\n\nclass XFormParsingTest(TestCase, TestFileMixin):\n file_path = ('data',)\n\n def setUp(self):\n self.xforms = {}\n for filename in (\"label_form\", \"itext_form\"):\n self.xforms[filename] = XForm(self.get_xml(filename))\n self.xforms[filename].validate()\n\n def test_properties(self):\n for _,xform in self.xforms.items():\n xform.data_node\n xform.model_node\n xform.instance_node\n xform.case_node\n try:\n xform.itext_node\n except XFormError as e:\n self.assertEqual(str(e), \"Can't find \")\n\n def test_localize(self):\n try:\n self.assertEqual(self.xforms[\"label_form\"].localize(id=\"pork\", lang=\"kosher\"), None)\n self.fail()\n except XFormError as e:\n self.assertEqual(str(e), \"Can't find \")\n self.assertEqual(self.xforms[\"itext_form\"].localize(id=\"pork\", lang=\"kosher\"), None)\n self.assertEqual(self.xforms[\"itext_form\"].localize(id=\"question1\", lang=\"pt\"), \"P1\")\n\n def test_normalize_itext(self):\n original = self.xforms['itext_form']\n original.normalize_itext()\n self.assertXmlEqual(original.render(), self.get_xml('itext_form_normalized'))\n","sub_path":"corehq/apps/app_manager/tests/test_xform_parsing.py","file_name":"test_xform_parsing.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"621976753","text":"#ny-simple.py\nfrom secrets import *\nimport requests\n\n# gets stories from a particular section of NY times\ndef get_stories(section):\n baseurl = 'https://api.nytimes.com/svc/topstories/v2/'\n extendedurl = baseurl + section + '.json'\n params={'api-key': nyt_key}\n return requests.get(extendedurl, params).json()\n\ndef get_headlines(nyt_results_dict):\n results = nyt_results_dict['results']\n headlines = []\n for r in results:\n headlines.append(r['title'])\n return headlines\n\nstory_list_json = get_stories('science')\nheadlines = get_headlines(story_list_json)\nfor h in headlines:\n print(h)\n","sub_path":"nyt.py","file_name":"nyt.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"508746689","text":"'''\nSAMPLE INPUT\nMohd Kafeel Khan\nSAMPLE OUTPUT\nM. K. Khan\n'''\ns = input()\nlens = len(s)\nspc = s.count(\" \")\nhad_space = True\nouts = \"\"\nfor i in s:\n if spc == 0:\n outs += i\n elif i == \" \":\n outs += i\n spc -= 1\n had_space = True\n else:\n if had_space:\n outs += i\n if spc >= 0:\n outs += \".\"\n had_space = False\nprint(outs)\n\n\n\n","sub_path":"HackerEarth/short_name.py","file_name":"short_name.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"522102103","text":"import rubik\n\ndef shortest_path(start, end):\n \"\"\"\n Using 2-way BFS, finds the shortest path from start_position to\n end_position. Returns a list of moves. \n\n You can use the rubik.quarter_twists move set.\n Each move can be applied using rubik.perm_apply\n \n \"\"\"\n processed_f=set()\n processed_b=set()\n parent_f={start:None}\n parent_b={end:None}\n frontier_f=[start]\n frontier_b=[end]\n isforward=True\n for i in range(16):\n frontier=frontier_f if isforward else frontier_b\n parent=parent_f if isforward else parent_b\n processed=processed_f if isforward else processed_b\n next_=[]\n for u in frontier:\n process(u,parent,next_)\n processed.add(u)\n break_=(u in processed_b) if isforward else (u in processed_f)\n if break_:\n x=u\n break\n if break_: break\n if isforward: frontier_f=next_\n else: frontier_b=next_\n isforward= not isforward\n else:\n return None\n path=[]\n u=x\n while u != start:\n path.insert(0,parent_f[u][0])\n u=parent_f[u][1]\n u=x\n while u!=end:\n path.append(rubik.perm_inverse(parent_b[u][0]))\n u=parent_b[u][1]\n return path\n \n\ndef process(u,parent,next_):\n for move in rubik.quarter_twists:\n v=rubik.perm_apply(move,u)\n if v not in parent:\n## parent[v]=(rubik.perm_inverse(move),move,u) #u\n parent[v]=(move,u) #u\n next_.append(v)\n","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"487040576","text":"def intersection(arrays):\n \"\"\"\n Params:\n -----\n arrays: of arrays to find intersections\n\n Returns:\n -----\n results: of items that are found in all lists\n\n \"\"\"\n\n counts = dict()\n\n # iterate over all items in the lists and add\n # up the count of each item. This is under the \n # assumption that a number will not be seen more\n # than once in the same list (checked tests)\n for arr in arrays:\n for num in arr:\n if num not in counts:\n counts[num] = 0\n counts[num] += 1\n \n results = list()\n\n # look to see if the value is the same as the\n # number of lists.. if it is, then it should be \n # seen in all of them\n for k, v in counts.items():\n if v == len(arrays):\n results.append(k)\n\n return results\n\n\nif __name__ == \"__main__\":\n arrays = []\n\n arrays.append(list(range(1000000, 2000000)) + [1, 2, 3])\n arrays.append(list(range(2000000, 3000000)) + [1, 2, 3])\n arrays.append(list(range(3000000, 4000000)) + [1, 2, 3])\n\n print(intersection(arrays))\n","sub_path":"hashtables/ex3/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"379771134","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 19 15:05:16 2016\n\n@author: mmolinare\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pandas import read_excel\nfrom os.path import join\n\n## Local imports\nimport context\nfrom perceptron import Adaline\n\n\n\nif __name__ == '__main__':\n \n ## Read training set\n fn = join('data', 'coin-data.xlsx')\n df = read_excel(fn, sheetname='coin-data') \n X = df.as_matrix(['size','mass'])\n y = df['classification'].values\n \n \n ## Run adaline supervised learning alg. \n ada = Adaline(X, y, std=True)\n ada.train(stochastic=1, nu=.001)\n\n \n ## Plot results...\n ## ...Cost function\n plt.figure()\n plt.title(\"Adaline gradient descent\")\n plt.xlabel(\"Iterations\"), plt.ylabel(\"Sum-squared-error\")\n \n plt.scatter(range(1,len(ada.J)+1), \n ada.J,\n c='forestgreen',\n edgecolor='none',\n s=60)\n \n plt.plot(range(1,len(ada.J)+1), \n ada.J, \n c='forestgreen',\n lw=1.5)\n \n plt.xlim(0,len(ada.J))\n plt.grid('on')\n\n\n ## ...Weights\n plt.figure()\n plt.title(\"Perceptron learning algorithm (%i iterations)\" %ada.t) \n plt.xlabel(\"Size\"), plt.ylabel(\"Mass\")\n \n ax = plt.axes()\n kw = {'c':'none', 'lw':2, 's':50}\n \n ax.scatter(ada.X[y==1,0],\n ada.X[y==1,1],\n edgecolor='darkblue',\n marker='o',\n **kw)\n \n ax.scatter(ada.X[y==-1,0],\n ada.X[y==-1,1],\n edgecolor='maroon',\n marker='o',\n **kw)\n\n xlim, ylim = plt.xlim(), plt.ylim()\n x1 = np.linspace(*xlim)\n ax.plot(x1, \n -ada.w[2]/ada.w[1]-x1*ada.w[0]/ada.w[1],\n c='black',\n ls='--',\n lw=1.5)\n \n ax.grid('on')\n plt.xlim(xlim), plt.ylim(ylim)\n ","sub_path":"examples/coins.py","file_name":"coins.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"407900589","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nclass Lenet(object):\n def __init__(self, num_classes, data_format=\"NHWC\", dtype=tf.float32):\n self.num_classes = num_classes\n self.data_format = data_format\n self.dtype = dtype\n\n def __call__(self, inputs, training):\n with tf.variable_scope(\"lenet_model\"):\n if self.data_format == \"NCHW\":\n # Convert inputs from NHWC (channels_last) to NCHW\n # Performance gains on GPU\n inputs = tf.transpose(inputs, [0, 3, 1, 2])\n\n network = None\n with tf.variable_scope(\"c1\"):\n kernel = tf.get_variable(\"weights\", [5, 5, 1, 6],\n initializer=tf.random_normal_initializer())\n biases = tf.get_variable(\"biases\", [6],\n initializer=tf.zeros_initializer())\n conv = tf.nn.convolution(\n input=inputs,\n filter=kernel,\n padding=\"VALID\",\n strides=[1, 1],\n name=\"conv\",\n data_format=self.data_format\n )\n network = tf.nn.sigmoid(\n tf.nn.bias_add(conv, biases), name=\"activations\")\n\n with tf.variable_scope(\"s2\"):\n network = tf.nn.avg_pool(\n value=network,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n name=\"subsample\",\n padding=\"VALID\"\n )\n\n with tf.variable_scope(\"c3\"):\n kernel = tf.get_variable(\"weights\", [5, 5, 6, 16],\n initializer=tf.random_normal_initializer())\n biases = tf.get_variable(\"biases\", [16],\n initializer=tf.zeros_initializer())\n conv = tf.nn.convolution(\n input=network,\n filter=kernel,\n padding=\"VALID\",\n strides=[1, 1],\n name=\"conv\",\n data_format=self.data_format\n )\n network = tf.nn.sigmoid(\n tf.nn.bias_add(conv, biases), name=\"activations\")\n\n with tf.variable_scope(\"s4\"):\n network = tf.nn.avg_pool(\n value=network,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n name=\"subsample\",\n padding=\"VALID\"\n )\n\n with tf.variable_scope(\"c5\"):\n kernel = tf.get_variable(\"weights\", [5, 5, 16, 120],\n initializer=tf.random_normal_initializer())\n biases = tf.get_variable(\"biases\", [120],\n initializer=tf.zeros_initializer())\n conv = tf.nn.convolution(\n input=network,\n filter=kernel,\n padding=\"VALID\",\n strides=[1, 1],\n name=\"conv\",\n data_format=self.data_format\n )\n network = tf.nn.sigmoid(\n tf.nn.bias_add(conv, biases), name=\"activations\")\n\n with tf.variable_scope(\"f6\"):\n network = tf.squeeze(network)\n weights = tf.get_variable(\"weights\", [120, 84],\n initializer=tf.random_normal_initializer())\n biases = tf.get_variable(\"biases\", [84],\n initializer=tf.zeros_initializer())\n fc = tf.matmul(network, weights)\n network = tf.nn.tanh(\n tf.nn.bias_add(fc, biases), name=\"activations\")\n \n with tf.variable_scope(\"logits\"):\n weights = tf.get_variable(\"weights\", [84, self.num_classes],\n initializer=tf.random_normal_initializer())\n biases = tf.get_variable(\"biases\", [self.num_classes],\n initializer=tf.zeros_initializer())\n network = tf.nn.bias_add(tf.matmul(network, weights), biases)\n\n return network\n","sub_path":"models/lenet/lenet_model.py","file_name":"lenet_model.py","file_ext":"py","file_size_in_byte":4237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"614041906","text":"from tpot import TPOTClassifier\r\nfrom sklearn.datasets import load_iris\r\nfrom sklearn.model_selection import train_test_split\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.utils import resample\r\nimport torch\r\nfrom tpot.config import classifier_config_nn\r\nfrom tpot.builtins import nn as nn\r\n\r\nfrom sklearn.impute import SimpleImputer\r\n\r\nfrom keras.layers import Dense\r\nfrom keras.models import Sequential\r\nfrom keras.optimizers import SGD\r\nfrom keras.utils import np_utils\r\nfrom keras.wrappers.scikit_learn import KerasClassifier\r\n\r\n\r\nfrom sklearn.metrics import confusion_matrix, classification_report\r\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.pipeline import make_pipeline\r\nfrom tpot.export_utils import set_param_recursive\r\n\r\n\r\n\r\nimport argparse\r\n\r\nimport tensorflow as tf\r\nfrom sklearn.neural_network import MLPClassifier\r\n\r\ndef scale_data(df):\r\n X = np.zeros((len(df.index), len(df.columns)))\r\n print(len(df.index), len(df.columns))\r\n X[:, :len(df.columns) - 2] = df.iloc[:, :len(df.columns) - 2]\r\n sc = StandardScaler()\r\n X = sc.fit_transform(X)\r\n X[:, len(df.columns) - 1] = tpot_data.iloc[:, len(df.columns) - 1]\r\n dfX = pd.DataFrame(X, columns=tpot_data.columns.values)\r\n return dfX\r\n\r\ntpot_data = pd.read_csv('C:/Users/nrust/Downloads/ECG_single_007_partial _bkp273.csv', sep=',', usecols=[i for i in range(1,9)],\r\n dtype=np.float64, engine='python')\r\n\r\ntpot_data['target'] = pd.read_csv('C:/Users/nrust/Downloads/ECG_single_007_partial _bkp273.csv', sep=',', usecols=['target'],\r\n dtype=np.float64, engine='python')\r\ntpot_data = tpot_data.replace([np.inf, -np.inf, np.nan], 0).dropna(axis=1)\r\n\r\n\r\ndfX = scale_data(tpot_data)\r\n\r\nprint(dfX[dfX['target'] == 1])\r\n\r\nX_train, X_val, y_train, y_val = train_test_split(dfX.drop(columns=['target']), dfX['target'], test_size=0.2,\r\n random_state=10, stratify=dfX['target'])\r\n\r\ndfX2 = pd.concat([X_train, y_train], axis=1, sort=False)\r\nvalidate = pd.concat([X_val, y_val], axis=1, sort=False)\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(dfX2.drop(columns=['target']), dfX2['target'], test_size=0.33,\r\n stratify=dfX2['target'])\r\n\r\ntrain = pd.concat([X_train, y_train], axis=1, sort=False)\r\ntest = pd.concat([X_test, y_test], axis=1, sort=False)\r\n\r\n\r\n#train, validate, test = np.split(dfX.sample(frac=1, random_state=42), [int(.75 * len(tpot_data)), int(.85 * len(tpot_data))])\r\nprint(train.shape[0], validate.shape[0], test.shape[0])\r\n\r\nprint(train[train['target'] == 1])\r\n\r\n\r\n\r\n# start treat undersampled data\r\n# concatenate our training data back together\r\n\r\nprint('Before \\n', train['target'].value_counts())\r\n\r\n# separate minority and majority classes\r\nnot_fraud = train[train['target'] == 0]\r\nfraud = train[train['target'] == 1]\r\n#fraud2 = tpot_data[tpot_data['target'] == 2]\r\n\r\n# upsample minority\r\nfraud_upsampled = resample(fraud,\r\n replace=True, # sample with replacement\r\n n_samples=len(not_fraud), # match number in majority class\r\n random_state=42) # reproducible results\r\n\r\n\r\n# combine majority and upsampled minority\r\nupsampled = pd.concat([not_fraud, fraud_upsampled])\r\n\r\n# check new class counts\r\nprint('After \\n',upsampled['target'].value_counts())\r\n\r\n\r\n# trying logistic regression again with the balanced dataset\r\ny_train = np.array(upsampled['target'])\r\nprint('train data \\n', train.iloc[1])\r\nX_train = np.array(upsampled.iloc[:, :train.shape[1]-2])\r\n\r\nprint('train data\\n', X_train[1])\r\n\r\n#print(train['target'].value_counts())\r\n# end of undersampled\r\n\r\nX_test = np.array(test.iloc[:, :train.shape[1]-2])\r\ny_test = np.array(test['target'])\r\n\r\nX_validate = np.array(validate.iloc[:, :train.shape[1]-2])\r\ny_validate = np.array(validate['target'])\r\n\r\nimputer = SimpleImputer(strategy=\"median\")\r\nimputer.fit(X_train)\r\nX_train = imputer.transform(X_train)\r\nX_test = imputer.transform(X_test)\r\nX_validate = imputer.transform(X_validate)\r\n\r\n# Average CV score on the training set was: 0.7010039899271261\r\ntpot = make_pipeline(\r\n PCA(iterated_power=10, svd_solver=\"randomized\"),\r\n RandomForestClassifier(bootstrap=False, criterion=\"entropy\", max_features=0.55, min_samples_leaf=1, min_samples_split=19, n_estimators=100)\r\n)\r\n# Fix random state for all the steps in exported pipeline\r\nset_param_recursive(tpot.steps, 'random_state', 42)\r\n\r\n#tpot = TPOTClassifier(generations=5, population_size=20, cv=5,\r\n #random_state=42, verbosity=2)\r\ntpot.fit(X_train, y_train)\r\nprint(tpot.score(X_test, y_test))\r\n\r\n#tpot.export('tpot_beats_pipeline.py')\r\n\r\ntarget_names = ['normal', 'hypo'] #, 'hyper'\r\n\r\n\r\n#inv_predictions = onehot_encoder.inverse_transform([np.argmax(predictions[:, 1])])\r\n#inv_predictions = label_encoder.inverse_transform([np.argmax(predictions[0, :])])\r\n\r\nresults = tpot.predict(X_test)\r\n\r\nreport_dnn = classification_report(y_test, results, target_names=target_names)\r\nCM = confusion_matrix(y_test, results)\r\nprint(report_dnn)\r\nprint(CM)\r\nprint('####### Validation #######')\r\nvalidation = tpot.predict(X_validate)\r\nreport_dnn_val = classification_report(y_validate, validation, target_names=target_names)\r\nCM_val = confusion_matrix(y_validate, validation)\r\nprint(report_dnn_val)\r\nprint(CM_val)\r\n\r\n\r\n\r\n\r\n\"\"\"\r\nGradientBoostingClassifier(input_matrix, learning_rate=1.0, max_depth=7, max_features=0.25, min_samples_leaf=14, min_samples_split=20, n_estimators=100, subsample=0.55)\r\nDecisionTreeClassifier(MaxAbsScaler(XGBClassifier(input_matrix, learning_rate=0.01, max_depth=3, min_child_weight=2, n_estimators=100, n_jobs=1, subsample=0.9500000000000001, verbosity=0)), criterion=entropy, max_depth=6, min_samples_leaf=6, min_samples_split=9)\r\nXGBClassifier(FastICA(input_matrix, tol=0.75), learning_rate=0.5, max_depth=8, min_child_weight=7, n_estimators=100, n_jobs=1, subsample=0.6500000000000001, verbosity=0)\r\nGradientBoostingClassifier(PolynomialFeatures(input_matrix, degree=2, include_bias=False, interaction_only=False), learning_rate=0.5, max_depth=5, max_features=0.05, min_samples_leaf=1, min_samples_split=14, n_estimators=100, subsample=0.4)\r\nGradientBoostingClassifier(LogisticRegression(PCA(input_matrix, iterated_power=6, svd_solver=randomized), C=5.0, dual=False, penalty=l2), learning_rate=0.1, max_depth=4, max_features=0.25, min_samples_leaf=10, min_samples_split=3, n_estimators=100, subsample=0.7000000000000001)\r\nExtraTreesClassifier(input_matrix, bootstrap=False, criterion=entropy, max_features=0.25, min_samples_leaf=4, min_samples_split=18, n_estimators=100)\r\nMLPClassifier(StandardScaler(input_matrix), alpha=0.0001, learning_rate_init=0.001)\r\n\r\n\r\n\"\"\"\r\n","sub_path":"TPOTtrainBeats.py","file_name":"TPOTtrainBeats.py","file_ext":"py","file_size_in_byte":6912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"539166537","text":"import os\nimport shutil\nimport requests\n \nfrom xbmcswift2 import xbmc, xbmcvfs\n\nfrom meta import plugin\nfrom meta.gui import dialogs\nfrom meta.utils.text import to_utf8\nfrom meta.library.tools import scan_library, add_source\nfrom meta.navigation.base import get_icon_path, get_background_path\nfrom lastfm import lastfm\n\nfrom language import get_string as _\nfrom settings import SETTING_MUSIC_LIBRARY_FOLDER, SETTING_MUSIC_PLAYLIST_FOLDER\n\nimport re\n\ndef update_library():\n # setup library folder\n library_folder = plugin.get_setting(SETTING_MUSIC_LIBRARY_FOLDER, unicode)\n if not xbmcvfs.exists(library_folder):\n return\n scan_library(type=\"music\")\n #scan_library(type=\"video\")\n\ndef add_music_to_library(library_folder, artist_name, album_name, track_name):\n # replace non valid path characters with _\n safe_artist_name = to_utf8(re.sub('[^\\w\\-_\\. ]', '_', artist_name))\n safe_album_name = to_utf8(re.sub('[^\\w\\-_\\. ]', '_', album_name))\n safe_track_name = to_utf8(re.sub('[^\\w\\-_\\. ]', '_', track_name))\n\n changed = False\n artist_info = lastfm.get_artist_info(artist_name)\n album_info = lastfm.get_album_info(artist_name, album_name)\n # create nfo file\n artist_folder = os.path.join(library_folder, safe_artist_name)\n album_folder = os.path.join(artist_folder, safe_album_name)\n if not xbmcvfs.exists(artist_folder):\n xbmcvfs.mkdir(artist_folder)\n if not xbmcvfs.exists(album_folder):\n xbmcvfs.mkdir(album_folder)\n nfo_artist_path = os.path.join(artist_folder, \"artist.nfo\")\n nfo_album_path = os.path.join(album_folder, \"album.nfo\")\n track_info = lastfm.get_track_info(artist_name, track_name)\n track_number = \"\"\n if \"album\" in track_info:\n track_number = track_info[\"album\"][\"@attr\"][\"position\"]\n if track_number != \"\" and track_number != None: full_track_name = track_number + \". \" + safe_track_name\n else: full_track_name = safe_track_name\n else: full_track_name = safe_track_name\n nfo_track_path = os.path.join(album_folder, full_track_name + \".nfo\")\n if not xbmcvfs.exists(nfo_artist_path):\n changed = True\n image = artist_info[\"image\"][-1][\"#text\"]\n nfo_file = xbmcvfs.File(nfo_artist_path, 'w')\n content = \"\\n\" \\\n \" {0}\\n\" \\\n \" {1}\\n\" \\\n \"\".format(artist_name, image)\n nfo_file.write(content)\n nfo_file.close()\n\n if not xbmcvfs.exists(nfo_album_path):\n changed = True\n image = album_info[\"image\"][-1][\"#text\"]\n nfo_file = xbmcvfs.File(nfo_album_path, 'w')\n content = \"\\n\" \\\n \" {0}\\n\" \\\n \" {1}\\n\" \\\n \" {2}\\n\" \\\n \"\".format(album_name, artist_name, image)\n nfo_file.write(content)\n nfo_file.close()\n\n if not xbmcvfs.exists(nfo_track_path):\n changed = True\n track_info = lastfm.get_track_info(artist_name, track_name)\n track_number = \"\"\n if \"album\" in track_info:\n track_number = track_info[\"album\"][\"@attr\"][\"position\"]\n nfo_file = xbmcvfs.File(nfo_track_path, 'w')\n content = \"\\n\" \\\n \" {0}\\n\" \\\n \" {1}\\n\" \\\n \" {2}\\n\" \\\n \" {3}\\n\" \\\n \"\".format(to_utf8(track_name),\n artist_name,\n album_name,\n track_number)\n nfo_file.write(content)\n nfo_file.close()\n\n # create strm file\n strm_filepath = os.path.join(album_folder, full_track_name + \".strm\")\n if not xbmcvfs.exists(strm_filepath):\n changed = True\n track_info = lastfm.get_track_info(artist_name, track_name)\n track_number = \"\"\n if \"album\" in track_info:\n track_number = track_info[\"album\"][\"@attr\"][\"position\"]\n strm_filepath = os.path.join(album_folder, track_number + \". \" + safe_track_name + \".strm\")\n strm_file = xbmcvfs.File(strm_filepath, 'w')\n content = plugin.url_for(\"music_play\", artist_name=artist_name, track_name=track_name,\n album_name=album_name, mode='library')\n strm_file.write(content)\n strm_file.close()\n # create thumbnails\n thumb_album_path = os.path.join(artist_folder, \"folder.jpg\")\n if not xbmcvfs.exists(thumb_album_path):\n changed = True\n r = requests.get(artist_info[\"image\"][-1][\"#text\"], stream=True)\n if r.status_code == 200:\n try:\n with open(thumb_album_path, 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n except:\n pass\n thumb_album_path = os.path.join(album_folder, \"folder.jpg\")\n if not xbmcvfs.exists(thumb_album_path):\n changed = True\n try:\n r = requests.get(album_info[\"image\"][-1][\"#text\"], stream=True)\n if r.status_code == 200:\n with open(thumb_album_path, 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n except:\n pass\n return changed\n\ndef setup_library(library_folder):\n if library_folder[-1] != \"/\":\n library_folder += \"/\"\n metalliq_playlist_folder = \"special://profile/playlists/mixed/MetalliQ/\"\n if not xbmcvfs.exists(metalliq_playlist_folder): xbmcvfs.mkdir(metalliq_playlist_folder)\n playlist_folder = plugin.get_setting(SETTING_MUSIC_PLAYLIST_FOLDER, unicode)\n if plugin.get_setting(SETTING_MUSIC_PLAYLIST_FOLDER, unicode)[-1] != \"/\": playlist_folder += \"/\"\n # create folders\n if not xbmcvfs.exists(playlist_folder): xbmcvfs.mkdir(playlist_folder)\n if not xbmcvfs.exists(library_folder):\n # create folder\n xbmcvfs.mkdir(library_folder)\n msg = _(\"Would you like to automatically set [COLOR ff0084ff]M[/COLOR]etalli[COLOR ff0084ff]Q[/COLOR] as a music source?\")\n if dialogs.yesno(\"{0} {1}\".format(_(\"Library\"), \"setup\"), msg):\n source_thumbnail = get_icon_path(\"tv\")\n source_name = \"[COLOR ff0084ff]M[/COLOR]etalli[COLOR ff0084ff]Q[/COLOR] \" + _(\"Music\")\n source_content = \"('{0}','musicvideos','metadata.musicvideos.imvdb','',2147483647,0,'',0,0,NULL,NULL)\".format(library_folder)\n add_source(source_name, library_folder, source_content, source_thumbnail)\n # return translated path\n return xbmc.translatePath(library_folder)\n\ndef auto_music_setup(library_folder):\n if library_folder[-1] != \"/\":\n library_folder += \"/\"\n metalliq_playlist_folder = \"special://profile/playlists/mixed/MetalliQ/\"\n if not xbmcvfs.exists(metalliq_playlist_folder): xbmcvfs.mkdir(metalliq_playlist_folder)\n playlist_folder = plugin.get_setting(SETTING_MUSIC_PLAYLIST_FOLDER, unicode)\n if plugin.get_setting(SETTING_MUSIC_PLAYLIST_FOLDER, unicode)[-1] != \"/\": playlist_folder += \"/\"\n if not xbmcvfs.exists(playlist_folder): xbmcvfs.mkdir(playlist_folder)\n if not xbmcvfs.exists(library_folder):\n try:\n xbmcvfs.mkdir(library_folder)\n source_thumbnail = get_icon_path(\"music\")\n source_name = \"[COLOR ff0084ff]M[/COLOR]etalli[COLOR ff0084ff]Q[/COLOR] \" + _(\"Music\")\n source_content = \"('{0}','musicvideos','metadata.musicvideos.imvdb','',2147483647,0,'',0,0,NULL,NULL)\".format(library_folder)\n add_source(source_name, library_folder, source_content, source_thumbnail)\n return True\n except:\n False","sub_path":"plugin.video.metalliq/resources/lib/meta/library/music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":7896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"412301256","text":"import os\nfrom datetime import datetime\nimport socket\nfrom flask import Blueprint\n\ntimeApi = Blueprint('timeApi', __name__, url_prefix=\"/time\")\n\n@timeApi.route(\"/\")\ndef index():\n hostname = socket.gethostname()\n html = \"

Hello world!

\" \\\n \"

This is a test view to show the current time

\" \\\n \"host: {hostname}
\" \\\n \"time:{time}
\"\n time = str(datetime.now())\n return html.format(hostname=hostname, time=time)\n","sub_path":"web/timeView.py","file_name":"timeView.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"382419699","text":"import boto3\ncloudwatch=boto3.client('cloudwatch')\nRDS_IOPS_Alerts='arn:aws:sns:us-east-1:856465202724:RDS_IOPS_Alerts'\ndatabasename='database-1'\n'''cloudwatch.put_anomaly_detector(\n Namespace='AWS/RDS',\n MetricName='ReadIOPS',\n Dimensions=[\n {\n 'Name': 'DBInstanceIdentifier',\n 'Value': databasename\n },\n ],\n Stat='Average',\n\n)'''\ncloudwatch.put_metric_alarm(\n AlarmName='%s IOPS Utilization' % databasename,\n ActionsEnabled=True,\n AlarmActions=[\n RDS_IOPS_Alerts,\n ],\n OKActions=[\n RDS_IOPS_Alerts,\n ],\n InsufficientDataActions=[\n RDS_IOPS_Alerts,\n ],\n TreatMissingData='missing',\n EvaluationPeriods=5,\n DatapointsToAlarm=2,\n ThresholdMetricId=\"t1\",\n ComparisonOperator=\"GreaterThanUpperThreshold\",\n AlarmDescription=\"ReadIOPS Utilization is abnormal\",\n Metrics=[\n {\n 'Id': 'm1',\n 'MetricStat': {\n 'Metric': {\n 'Namespace': 'AWS/RDS',\n 'MetricName': 'ReadIOPS',\n 'Dimensions': [\n {\n 'Name': 'DBInstanceIdentifier',\n 'Value': databasename\n },\n ],\n },\n 'Period': 60,\n 'Stat': 'Average',\n 'Unit': 'Count/Second',\n\n },\n 'ReturnData': True,\n },\n {\n \"Id\": \"t1\",\n \"Expression\": \"ANOMALY_DETECTION_BAND(m1, 3)\"\n },\n\n\n ],\n Tags=[\n {\n 'Key': 'Name',\n 'Value': databasename\n },\n ]\n\n\n )\n","sub_path":"Boto3/anormaly_theshold_type_alarm.py","file_name":"anormaly_theshold_type_alarm.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"652779703","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Oct 10 21:23:35 2017\r\n\r\n@author: Krissy\r\n\"\"\"\r\n\r\n\"\"\" used for text purposes. \"\"\"\r\n\r\n# commands = ['blubb','play','pause','resume','stop']\r\n\r\ncommands = {}\r\ncommands[\"blubb\"] = \"exists\"\r\ncommands[\"play\"] = \"plays a nice song.\"\r\ncommands[\"pause\"] = \"stops the currently playing nice song.\"\r\ncommands[\"resume\"] = \"resumes the nice song that has been\" + \\\r\n \" stopped some time before.\"\r\ncommands[\"stop\"] = \"throws me out of the voice channel and makes me cry\" + \\\r\n \" silently.\"\r\ncommands[\"commands\"] = \"displays the commands and a description :)\"\r\ncommands[\"test\"] = \"exists, tho not relevant for you\"\r\ncommands[\"tset\"] = \"tset\"\r\ncommands[\"song_settings\"] = \"allows you to delete or add songs\"\r\ncommands[\"songs\"] = \"displays all songs\"\r\ncommands[\"start_physics\"] = \"computes stuff for you\"\r\ncommands[\"clear\"] = \"frees the channel from commands and answers from bot.\"\r\n\r\n ## = tset = ##\r\n \r\ntset_emoji = \"<:tset:368887633184817153>\" \r\ntset = \"tset \" + tset_emoji + \\\r\n \"\\n\\n>> holy pet of cookie crumbs <<\" + \\\r\n \"\\n\\n +5 fluffy\" + \\\r\n \"\\n +100 cute\" + \\\r\n \"\\n +10 hunger for carrots\" + \\\r\n \"\\n -1 speed\" + \\\r\n \"\\n\\n'Bites, when asked to bake a carrot cake.'\"\r\n \r\n\r\n ## = physics mode = ##\r\n\r\nproblems = {} \r\nproblems[\"B\"] = \"calculates the binding energy of a nucleus\"\r\n \r\n ","sub_path":"text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"367694319","text":"def find_recur(n):\n \"\"\"Uses good old division to find the recurting decimals.\"\"\"\n decimals = []\n dividends = [10]\n power = len(str(n))\n\n while True:\n next_dec = dividends[-1] // n\n next_div = (dividends[-1] % n) * 10\n\n if next_div == 0: return [] # there is no recurrence\n\n decimals.append(next_dec)\n\n if next_div in dividends: # started repeating, found recurrence\n place = dividends.index(next_div)\n return decimals[place:]\n\n dividends.append(next_div)\n\ndef solve():\n largest = 1\n for n in range(1, 1000):\n if len(find_recur(n)) > largest: largest = n\n\n return largest\n\nif __name__ == \"__main__\":\n print(solve())\n","sub_path":"pb026.py","file_name":"pb026.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"647354053","text":"class Solution(object):\n\n def reverseStr(self, s, k):\n \"\"\"\n :type s: str\n :type k: int\n :rtype: str\n \"\"\"\n s = list(s)\n for i in xrange(0, len(s), 2 * k):\n s[i:i + k] = reversed(s[i:i + k])\n return \"\".join(s)\n\n\n# https://discuss.leetcode.com/topic/82596/python-straightforward-with-explanation\n# For every block of 2k characters starting with position i,\n# we want to replace S[i:i+k] with it's reverse.\n# In Python, slices are handled safely with respect to indices that are out of bounds.\n","sub_path":"problems/541.Reverse_String_II/awice.py","file_name":"awice.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"247235581","text":"from INTERACTION2.SBC.OnePumpStation import OnePumpStation\nfrom INTERACTION2.AppSBC.UI.UI import Ui_SBC\nimport INTERACTION2.Util.QTools as QTools\n\n\nclass GuiOnePumpStation(object):\n def __init__(self, ui: Ui_SBC, one_pump_station: OnePumpStation):\n self._ui = ui\n self._one_pump_station = one_pump_station\n # UI: LEDS\n self._leds = dict()\n self._leds['PUMP STATION'] = self._ui.led_pump_station\n self._leds['WATER'] = self._ui.led_water\n self._leds['ALKALI'] = self._ui.led_alkali\n self._leds['ACID'] = self._ui.led_acid\n self._leds['WHEEL'] = self._ui.led_wheel\n self._leds['WAX'] = self._ui.led_wax\n # UI: PUMP\n self._leds['WATER_HIGH'] = self._ui.led_high_water\n self._leds['WHEEL'] = self._ui.led_ch1_wheel\n self._leds['CHEM_ALKALI'] = self._ui.led_ch_alkali\n self._leds['CHEM_ACID'] = self._ui.led_ch_acid\n self._leds['WATER_WAX'] = self._ui.led_ch1_wax\n # UI: SIGNAL\n self._one_pump_station.sign_tcp_online.connect(self._tcp_online)\n self._one_pump_station.sign_drain_raw_data.connect(self._drain_raw_data)\n self._one_pump_station.sign_liquid_raw_data.connect(self._liquid_raw_data)\n self._one_pump_station.sign_pump_emit.connect(self._pump_emit)\n self._one_pump_station.sign_pump_countdown.connect(self._pump_countdown)\n # UI: INIT\n self._ui.ip_local.setText(f'LOCAL_IP:{self._one_pump_station.get_my_ip()}')\n # TEST BTN\n self._ui.btn_all_stop.stateChanged.connect(lambda x: self._one_pump_station.stop_all(is_open=False, t_wait=0))\n self._ui.btn_high_water.stateChanged.connect(lambda x: self._one_pump_station.set_water_high_emit(\n is_open=x, t_wait=0))\n self._ui.btn_wheel.stateChanged.connect(lambda x: self._one_pump_station.set_wheel_emit(\n is_open=x, t_wait=0))\n self._ui.btn_alkali.stateChanged.connect(lambda x: self._one_pump_station.set_chem_alkali_emit(\n is_open=x, t_wait=0))\n self._ui.btn_acid.stateChanged.connect(lambda x: self._one_pump_station.set_chem_acid_emit(\n is_open=x, t_wait=0))\n self._ui.btn_water_wax.stateChanged.connect(lambda x: self._one_pump_station.set_water_wax_emit(\n is_open=x, t_wait=0))\n self._ui.btn_water_inflow.stateChanged.connect(lambda x: self._one_pump_station.set_water_inflow_emit(\n is_open=x, t_wait=0))\n self._ui.btn_drain.stateChanged.connect(lambda x: self._one_pump_station.set_drain_emit(\n is_open=x, t_wait=0))\n\n def _drain_raw_data(self, pump_id, drain_raw_data):\n self._ui.ui_drain_data1.setText(f'{drain_raw_data[0]}')\n self._ui.ui_drain_data2.setText(f'{drain_raw_data[1]}')\n\n def _liquid_raw_data(self, pump_id, liquid_raw_data):\n self._ui.ui_water_data.setText(f\"{liquid_raw_data['WATER']}\")\n self._ui.ui_alkali_data.setText(f\"{liquid_raw_data['ALKALI']}\")\n self._ui.ui_acid_data.setText(f\"{liquid_raw_data['ACID']}\")\n self._ui.ui_wheel_data.setText(f\"{liquid_raw_data['WHEEL']}\")\n self._ui.ui_wax_data.setText(f\"{liquid_raw_data['WAX']}\")\n for liquid_type in liquid_raw_data.keys():\n liquid_height = 100 if liquid_type == 'WATER' else 60\n container_height = 900 if liquid_type == 'WATER' else 250\n color = 'RED' if liquid_raw_data[liquid_type] <= liquid_height else 'GREEN'\n text = 'WARNING' if liquid_raw_data[liquid_type] <= liquid_height else ' '\n QTools.set_led_style(ui_btn=self._leds[liquid_type], color=color)\n self._leds[liquid_type].setText(text)\n percent = liquid_raw_data[liquid_type] / container_height\n progress = QTools.set_style_sheet(percent=percent)\n self._leds[liquid_type].setStyleSheet(progress)\n\n def _pump_countdown(self, pump_id, instruct, countdown):\n self._ui.ui_log_pump_countdown.setText(f'{pump_id} {instruct}')\n self._ui.pump_countdown_box.setValue(int(countdown))\n\n def _pump_emit(self, bot_id, instruct, is_open):\n self._ui.ui_log_pump.setText(f'{bot_id} {instruct} {is_open}')\n if instruct in ['WATER_HIGH', 'CHEM_ALKALI', 'CHEM_ACID', 'WHEEL', 'WATER_WAX']:\n QTools.set_led_style(ui_btn=self._leds[instruct], color='GREEN' if is_open else 'RED')\n elif instruct == 'ALL STOP':\n for instruct in ['WATER_HIGH', 'CHEM_ALKALI', 'CHEM_ACID', 'WHEEL', 'WATER_WAX']:\n QTools.set_led_style(ui_btn=self._leds[instruct + str(bot_id)], color='RED')\n\n def _tcp_online(self, is_online, ip, port):\n QTools.set_led_style(ui_btn=self._ui.led_pump_station, color='GREEN' if is_online else 'RED')\n self._ui.led_pump_station.setText('ON' if is_online else 'OFF')\n self._ui.ip_nuc.setText(f'NUC_IP {ip}')\n","sub_path":"INTERACTIONS_V1/INTERACTION2/AppSBC/_GuiOnePumpStation.py","file_name":"_GuiOnePumpStation.py","file_ext":"py","file_size_in_byte":4860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"124915413","text":"hardwareTotal = 0.0\nsoftwareTotal = 0.0\n\n# Get sum of all values\nfor i in range(0,ITEMS):\n hardwareTotal = hardwareTotal + computerParts[i] \n softwareTotal = softwareTotal + software[i]\n\n# Calculate the average\nhwAverage = hardwareTotal / ITEMS #Divide by number of ITEMS\nswAverage = softwareTotal / ITEMS","sub_path":"paper2_tasks/2015-2.1/MrF/exampleLoop.py","file_name":"exampleLoop.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"152566983","text":"import requests\nimport re\nimport random\nimport time\nimport pickle\n\n\ndef download_songlist(artist):\n url = \"https://www.azlyrics.com/\" + artist[0] + \"/\" + artist + \".html\"\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n seite = requests.get(url, headers=headers)\n expression = \"lyrics/(\" + artist + \"/\\w+.html)\"\n songliste = list(set(re.findall(expression, seite.text)))\n return songliste\n\n\ndef download_urls(songliste):\n song_url = []\n for i in songliste:\n url = \"https://www.azlyrics.com/lyrics/\" + i\n song_url.append(url)\n return song_url\n\n\ndef download_song(url):\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n seite = requests.get(url, headers=headers)\n text = re.findall(\"Sorry about that. -->(.+)\", seite.text, re.DOTALL)[0]\n text = re.sub(\"\\.{1,50}\\<\\/i\\>|\\|\\n|\\r|\\<\\/div\\>|\\(|\\)|-|\\.|\\;|\\:|\\!|\\,|'|\\?|\\"\", \" \", text)\n text = text.lower()\n return text\n\n\ndef download_songtexts(song_url, n):\n songs = []\n if n is None:\n for i in song_url:\n songs.append(download_song(i))\n time.sleep(3)\n else:\n for i in random.sample(song_url, n):\n songs.append(download_song(i))\n time.sleep(3)\n return songs\n\n\ndef download_main(artist, n=None):\n songlist = download_songlist(artist)\n urls = download_urls(songlist)\n lyrics = download_songtexts(urls, n)\n with open(artist + str(n) + '.pkl', 'wb') as f:\n pickle.dump(lyrics, f)\n","sub_path":"Konny/download_lyrics.py","file_name":"download_lyrics.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"548849874","text":"import unittest\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('../../'))\n\nfrom adminapp import manager\nfrom mainapp.catalog import Catalog, Category, Item\n\n\nclass TestManager(unittest.TestCase):\n def setUp(self):\n self.category_id = 0\n self.category_name = 'Одежда'\n self.item_id = 0\n self.item_style = '1BLCNVYM'\n self.item_name = 'Куртка'\n self.item_size = 'M'\n self.tst_catalog = Catalog()\n self.tst_category = manager._create_category('test')\n\n def test_create_category(self):\n self.assertIsInstance(self.tst_category, Category)\n self.assertIsInstance(self.tst_catalog.category_list, list)\n\n def test_update_category(self):\n self.assertEqual(type(manager._update_category(self.category_id,\n 'Обувь',\n 2.78)), dict)\n\n def test_remove_category_item(self):\n self.test_create_item()\n self.assertFalse(manager._remove_category_item(self.item_id))\n\n def test_remove_category(self):\n self.assertEqual(manager._remove_category(self.category_id),\n (Category, False))\n\n def test_create_item(self):\n self.assertIsInstance(manager._create_item(self.category_id,\n self.item_style,\n self.item_name,\n self.item_size), Item)\n\n def test_update_item(self):\n self.test_create_category()\n self.assertIsInstance(manager._update_item(self.item_id,\n '1REDXXL',\n 'Толстовка',\n 'XXL'), dict)\n\n def test_remove_item(self):\n self.test_create_item()\n self.assertFalse(manager._remove_item(self.item_id))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"adminapp/test_manager.py","file_name":"test_manager.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"303161634","text":"\"\"\"\nSummary:\n SVM methods using Scikit\n\n\"\"\"\n\nimport os\nfrom time import time\nfrom pickle import dump\nfrom pickle import load\n\nimport numpy as np\nfrom sklearn.svm import SVC\nfrom sklearn.decomposition import PCA\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import train_test_split\n# from sklearn.metrics import classification_report\n# from sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\nimport cv2\n\nimport utils\n\nFACE_DIM = (50, 50)\n\n\ndef error_rate(pred, actual):\n \"\"\"\n Calculate name prediction error rate\n\n Parameters\n ----------\n pred: ndarray (1, number_of_images_in_face_profiles)\n The predicated names of the test dataset\n\n actual: ndarray (1, number_of_images_in_face_profiles)\n The actual names of the test dataset\n\n Returns\n -------\n rate: float\n The calculated error rate\n\n \"\"\"\n if pred.shape != actual.shape:\n return None\n rate = np.count_nonzero(pred - actual) / float(pred.shape[0])\n return rate\n\n\ndef build_svc(face_profile_data, face_profile_name_index, face_profile_names):\n \"\"\"\n Build the SVM classification modle using the face_profile_data matrix (numOfFace X numOfPixel)\n and face_profile_name_index array, face_dim is a tuple of the dimension of each image(h,w)\n Returns the SVM classification modle\n\n Parameters\n ----------\n face_profile_data : ndarray (number_of_images_in_face_profiles, width * height of the image)\n The pca that contains the top eigenvectors extracted\n using approximated Singular Value Decomposition of the data\n\n face_profile_name_index : ndarray\n The name corresponding to the face profile is encoded in its index\n\n Returns\n -------\n clf : theano object\n The trained SVM classification model\n\n pca : theano ojbect\n The PCA that contains the top 128 eigenvectors extracted\n using approximated Singular Value Decomposition of the data\n\n \"\"\"\n x = face_profile_data\n y = face_profile_name_index\n\n n_samples = y.shape[0]\n n_features = x.shape[1]\n n_classes = len(face_profile_names)\n\n print(\"\\n%d samples from %d people are loaded\\n\" % (n_samples, n_classes))\n print(\"Samples:\", n_samples)\n print(\"Features:\", n_features)\n print(\"Classes:\", n_classes)\n\n # Split into a training set and a test set using a stratified k fold\n # split into a training and testing set\n\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25)\n # x_train, x_test, y_train, y_test = train_test_split(\n # x, y, test_size=0.25, random_state=42)\n\n # Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled\n # dataset): unsupervised feature extraction / dimensionality reduction\n n_components = 128 # maximum number of components to keep\n\n print(\"\\nExtracting the top %d eigenfaces from %d faces\" %\n (n_components, x_train.shape[0]))\n\n t_0 = time()\n pca = PCA(\n n_components=n_components, svd_solver='randomized',\n whiten=True).fit(x_train)\n print(\"done in %.3fs\" % (time() - t_0))\n\n # eigenfaces = pca.components_.reshape((n_components, FACE_DIM[0],\n # FACE_DIM[1]))\n\n # This portion of the code is used if the data is scarce, it uses the number\n # of imputs as the number of features\n # pca = PCA(n_components=None, whiten=True).fit(x_train)\n # eigenfaces = pca.components_.reshape((pca.components_.shape[0], FACE_DIM[0], FACE_DIM[1]))\n\n print(\"Projecting the input data on the eigenfaces orthonormal basis\")\n t_0 = time()\n x_train_pca = pca.transform(x_train)\n x_test_pca = pca.transform(x_test)\n print(\"done in %.3fs\" % (time() - t_0))\n\n # Train a SVM classification model\n\n print(\"Fitting the classifier to the training set\")\n t_0 = time()\n param_grid = {\n 'C': [1e3, 5e3, 1e4, 5e4, 1e5],\n 'gamma': [1e-4, 5e-4, 1e-3, 5e-3, 0.01, 0.1],\n }\n clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)\n\n # clf = GridSearchCV(\n # SVC(kernel='rbf',\n # class_weight='balanced',\n # cache_size=200,\n # coef0=0.0,\n # decision_function_shape='ovr',\n # degree=3,\n # max_iter=-1,\n # probability=False,\n # random_state=None,\n # shrinking=True,\n # tol=0.001,\n # verbose=False), param_grid)\n clf = clf.fit(x_train_pca, y_train)\n\n print(\"done in %.3fs\" % (time() - t_0))\n print(\"Best estimator found by grid search:\")\n # print(clf.best_estimator_)\n\n ###############################################################################\n # Quantitative evaluation of the model quality on the test set\n\n print(\"\\nPredicting people's names on the test set\")\n t_0 = time()\n y_pred = clf.predict(x_test_pca)\n print(\"\\nPrediction took %.8fs per sample on average\" %\n ((time() - t_0) / float(y_pred.shape[0])))\n\n # print(\n # classification_report(y_test, y_pred, target_names=face_profile_names))\n # print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))\n\n rate = error_rate(y_pred, y_test)\n print(\"\\nTest Error Rate:\\t %.4f%%\" % (rate * 100))\n print(\"Test Recognition Rate:\\t%.4f%%\" % ((1.0 - rate) * 100))\n\n def plot_gallery(images, titles, n_row: int = 3, n_col: int = 4):\n \"\"\"Helper function to plot a gallery of portraits\"\"\"\n plt.figure(figsize=(1.5 * n_col, 2 * n_row))\n plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)\n for i in range(n_row * n_col):\n plt.subplot(n_row, n_col, i + 1)\n plt.imshow(images[i].reshape(FACE_DIM), cmap='gray')\n plt.title(titles[i], size=8)\n plt.xticks(())\n plt.yticks(())\n\n def title(y_pred, y_test, target_names, i):\n \"\"\"Helper function to plot the result of the prediction\"\"\"\n pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]\n true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]\n return 'pred: %s\\ntest: %s' % (pred_name, true_name)\n\n prediction_titles = [\n title(y_pred, y_test, face_profile_names, i)\n for i in range(y_pred.shape[0])\n ]\n\n plot_gallery(x_test, prediction_titles)\n\n plt.show()\n\n return clf, pca, face_profile_names\n\n\ndef fetch_data():\n \"\"\"\n Saves and returns image training data\n\n \"\"\"\n # Load training data from face_profiles/\n face_profile_data, face_profile_name_index, face_profile_names = utils.load_training_data(\n )\n\n # Build the classifier\n face_profile = build_svc(face_profile_data, face_profile_name_index,\n face_profile_names)\n\n data_dir = os.path.join(os.path.dirname(__file__), \"../temp\")\n data_path = os.path.join(data_dir, \"SVM.pkl\")\n\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n # Save the classifier\n with open(data_path, 'wb') as file:\n dump(face_profile, file)\n\n print(\"\\nTraining data is successfully saved\\n\")\n return face_profile\n\n\ndef predict(face):\n \"\"\"\n Predict the name of the supplied image from the list of face profile names\n\n Parameters\n ----------\n\n img: ndarray\n The input image for prediction\n\n Returns\n -------\n name : string\n The predicated name\n\n \"\"\"\n # Building SVC from database\n\n data_path = os.path.join(os.path.dirname(__file__), \"../temp\", \"SVM.pkl\")\n\n if os.path.exists(data_path):\n with open(data_path, 'rb') as file:\n clf, pca, face_profile_names = load(file)\n else:\n clf, pca, face_profile_names = fetch_data()\n\n img = cv2.resize(face, FACE_DIM, interpolation=cv2.INTER_AREA)\n img = cv2.convertScaleAbs(img)\n img = img.ravel()\n # Apply dimensionality reduction on img, img is projected on the first principal components\n # previous extracted from the Yale Extended dataset B.\n principle_components = pca.transform(np.array(img).reshape(1, -1))\n pred = clf.predict(principle_components)\n name = face_profile_names[np.int(pred)]\n return name\n","sub_path":"scripts/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":8163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"231197336","text":"import numpy as np\nimport torch\nfrom scipy.spatial.distance import cdist\nfrom sklearn.cluster import DBSCAN\nfrom torch_cluster import knn\n\nfrom .globals import *\n\n\ndef compute_rescaled_charge(input_data, deghost_mask, last_index = 6, collection_only=False):\n \"\"\"\n Computes rescaled charge after deghosting\n\n Note\n ----\n This function should work on Numpy arrays or Torch tensors.\n\n Parameters\n ----------\n input_data: np.ndarray or torch.Tensor\n Shape (N, 4+num_features) where 4 corresponds to batch_id,x,y,z\n deghost_mask: np.ndarray or torch.Tensor\n Shape (N,), N_deghost is the predicted deghosted voxel count\n last_index: int, default 6\n Indexes where hit-related features start @ 4 + deghost_input_features\n collection_only : bool, default False\n Only use the collection plane to estimate the rescaled charge\n\n Returns\n -------\n np.ndarray or torch.Tensor\n Shape (N_deghost,) contains rescaled charge array for input data.\n Includes deghosted mask already.\n \"\"\"\n if torch.is_tensor(input_data):\n unique = torch.unique\n empty = lambda n: torch.empty(n, dtype=torch.long, device=hit_charges.device)\n sum = lambda x: torch.sum(x, dim=1)\n else:\n unique = np.unique\n empty = np.empty\n sum = lambda x: np.sum(x, axis=1)\n\n batches = unique(input_data[:, BATCH_COL])\n hit_charges = input_data[deghost_mask, last_index :last_index+3]\n hit_ids = input_data[deghost_mask, last_index+3:last_index+6]\n multiplicity = empty(hit_charges.shape, )\n for b in batches:\n batch_mask = input_data[deghost_mask, BATCH_COL] == b\n _, inverse, counts = unique(hit_ids[batch_mask], return_inverse=True, return_counts=True)\n multiplicity[batch_mask] = counts[inverse].reshape(-1,3)\n if not collection_only:\n pmask = hit_ids > -1\n charges = sum((hit_charges*pmask)/multiplicity)/sum(pmask) # Take average estimate\n else:\n charges = hit_charges[:,-1]/multiplicity[:,-1] # Only use the collection plate measurement\n\n return charges\n\n\ndef adapt_labels_knn(result, label_seg, label_clustering,\n num_classes=5,\n batch_column=0,\n coords_column_range=(1, 4),\n true_mask=None,\n use_numpy=False):\n \"\"\"\n Returns new cluster labels that have the same size as the input w/ ghost points.\n Points predicted as nonghost but that are true ghosts get the cluster label of\n the closest cluster.\n Points that are true ghosts and predicted as ghosts get \"empty\" (-1) values.\n\n Note\n ----\n Uses GPU version from `torch_cluster.knn` to speed up\n the label adaptation computation.\n\n Parameters\n ----------\n result: dict\n label_seg: list of torch.Tensor\n label_clustering: list of torch.Tensor\n num_classes: int, default 5\n Semantic classes count.\n batch_column: int, default 0\n coords_column_range: tuple, default (1, 4)\n true_mask: torch.Tensor, default None\n True nonghost mask. If None, will use the intersection\n of predicted nonghost and true nonghost. This option is\n useful to do \"cheat ghost predictions\" (i.e. mimic deghosting\n predictions using true ghost mask, to run later stages\n of the chain independently of the deghosting stage).\n\n Returns\n -------\n np.ndarray\n shape: (input w/ ghost points, label_clusters_features)\n\n See Also\n --------\n adapt_labels, adapt_labels_numpy\n \"\"\"\n complete_label_clustering = []\n c1, c2 = coords_column_range\n\n if use_numpy:\n unique = np.unique\n ones = np.ones\n argmax = lambda x: np.argmax(x, axis=1)\n where = np.where\n concatenate0 = lambda x: np.concatenate(x, axis=1)\n concatenate1 = lambda x: np.concatenate(x, axis=0)\n compute_neighbor = lambda X_true, X_pred: cdist(X_pred[:, c1:c2], X_true[:, c1:c2]).argmin(axis=1)\n compute_distances = lambda X_true, X_pred: np.amax(np.abs(X_true[:, c1:c2] - X_pred[:, c1:c2]), axis=1)\n make_float = lambda x : x\n make_long = lambda x: x.astype(np.int64)\n to_device = lambda x, y: x\n get_shape = lambda x, y: (x.shape[0], y.shape[1])\n else:\n unique = lambda x: x.int().unique()\n ones = torch.ones\n argmax = lambda x: torch.argmax(x, dim=1)\n where = torch.where\n concatenate0 = lambda x: torch.cat(x, dim=1).float()\n concatenate1 = lambda x: torch.cat(x, dim=0)\n compute_neighbor = lambda X_true, X_pred: knn(X_true[:, c1:c2].float(), X_pred[:, c1:c2].float(), 1)[1]\n compute_distances = lambda X_true, X_pred: torch.amax(torch.abs(X_true[:, c1:c2] - X_pred[:, c1:c2]), dim=1)\n make_float = lambda x: x.float()\n make_long = lambda x: x.long()\n to_device = lambda x, y: x.to(y.device)\n get_shape = lambda x, y: (x.size(0), y.size(1))\n\n if true_mask is not None:\n assert true_mask.shape[0] == label_seg[0].shape[0]\n c3 = max(c2, batch_column+1)\n\n for i in range(len(label_seg)):\n coords = label_seg[i][:, :c3]\n label_c = []\n full_nonghost_mask = argmax(result['ghost'][i]) == 0 if true_mask is None else true_mask\n full_semantic_pred = to_device(make_long(result['segmentation'][i].shape[1]*ones(len(coords))), coords)\n full_semantic_pred[full_nonghost_mask] = argmax(result['segmentation'][i])\n for batch_id in unique(coords[:, batch_column]):\n batch_mask = coords[:, batch_column] == batch_id\n batch_coords = coords[batch_mask]\n batch_clustering = label_clustering[i][label_clustering[i][:, batch_column] == batch_id]\n if len(batch_clustering) == 0:\n continue\n\n nonghost_mask = full_nonghost_mask[batch_mask]\n\n # Prepare new labels\n new_label_clustering = -1. * ones(get_shape(batch_coords, batch_clustering))\n if (not use_numpy) and torch.cuda.is_available():\n new_label_clustering = new_label_clustering.cuda()\n new_label_clustering[:, :c3] = batch_coords\n\n # Segmentation is always pre-deghosted\n semantic_pred = full_semantic_pred[batch_mask]\n\n # Check if the semantic labels and predictions are compatible.\n # Intra EM activity cross-contamination is tolerate.\n true_pred = label_seg[i][batch_mask, -1]\n incompatible_semantic = (semantic_pred == TRACK_SHP) ^ (true_pred == TRACK_SHP)\n\n # Include true nonghost voxels by default when they have the right semantic prediction\n new_label_clustering[(true_pred < num_classes)] = make_float(batch_clustering)\n new_label_clustering[(true_pred < num_classes) & incompatible_semantic, c3:] = -1.\n for semantic in unique(semantic_pred):\n semantic_mask = semantic_pred == semantic\n\n if true_mask is not None:\n continue\n # Select voxels predicted as nonghost, but true ghosts (or true nonghost, but incompatible semantic prediction)\n # mask = nonghost_mask & (label_seg[i][:, -1][batch_mask] == num_classes) & semantic_mask\n if semantic == TRACK_SHP:\n incompatible_semantic = true_pred != semantic\n else:\n incompatible_semantic = (true_pred == GHOST_SHP) | (true_pred == TRACK_SHP)\n mask = nonghost_mask & incompatible_semantic & semantic_mask\n mask = where(mask)[0]\n #print(semantic, np.intersect1d(mask.cpu().numpy(), indices))\n # Now we need a special treatment for these, if there are any.\n if batch_coords[mask].shape[0] == 0:\n continue\n tagged_voxels_count = 1 # to get the loop started\n if semantic == TRACK_SHP:\n compatible_semantic = batch_clustering[:, -1] == semantic\n else:\n compatible_semantic = (batch_clustering[:, -1] != GHOST_SHP) & (batch_clustering[:, -1] != TRACK_SHP)\n X_true = batch_clustering[compatible_semantic]\n if X_true.shape[0] == 0:\n continue\n X_pred = batch_coords[mask]\n while tagged_voxels_count > 0 and X_pred.shape[0] > 0:\n # print(batch_id, \"while\", X_true.shape, X_pred.shape, tagged_voxels_count)\n #neighbors = knn(X_true[:, c1:c2].float(), X_pred[:, c1:c2].float(), 1)\n #_, d = neighbors[0], neighbors[1]\n d = compute_neighbor(X_true, X_pred)\n\n # compute Chebyshev distance between predicted and true\n # distances = torch.amax(torch.abs(X_true[neighbors[1], c1:c2] - X_pred[neighbors[0], c1:c2]), dim=1)\n distances = compute_distances(X_true[d], X_pred)\n\n select_mask = distances <= 1\n\n tagged_voxels_count = select_mask.sum()\n if tagged_voxels_count > 0:\n # We assign label of closest true nonghost voxel to those within Chebyshev distance 1\n additional_label_clustering = concatenate0([X_pred[select_mask],\n X_true[d[select_mask], c3:]])\n\n new_label_clustering[mask[select_mask]] = additional_label_clustering\n mask = mask[~select_mask]\n # Update for next iteration\n X_true = additional_label_clustering\n X_pred = X_pred[~select_mask]\n\n\n # Now we save - need only to keep predicted nonghost voxels.\n label_c.append(new_label_clustering[nonghost_mask])\n #print(new_label_clustering[nonghost_mask][indices])\n #print(new_label_clustering[indices])\n label_c = concatenate1(label_c)\n #print(\"ignored 0 \", (label_c[:, -1] == -1).sum())\n # Also run DBSCAN on track true clusters\n # We want to avoid true track clusters broken in two having the same cluster id label\n # Note: we assume we are adapting either cluster or kinematics labels,\n # for which cluster id and group id columns are 5 and 6 respectively.\n cluster_id_col = 5\n track_label = 1\n dbscan = DBSCAN(eps=1.1, min_samples=1, metric='chebyshev')\n track_mask = label_c[:, -1] == track_label\n for batch_id in unique(coords[:, batch_column]):\n batch_mask = label_c[:, batch_column] == batch_id\n # We want to select cluster ids for track-like particles\n batch_clustering = label_clustering[i][(label_clustering[i][:, batch_column] == batch_id) & (label_clustering[i][:, -1] == track_label)]\n if len(batch_clustering) == 0:\n continue\n # Reset counter for each batch entry\n # we need to avoid labeling a cluster with an already existing cluster id (maybe not track)\n cluster_count = unique(label_clustering[i][(label_clustering[i][:, batch_column] == batch_id)][:, cluster_id_col]).max()+1\n for c in unique(batch_clustering[:, cluster_id_col]):\n if c < 0:\n continue\n cluster_mask = label_c[batch_mask, cluster_id_col] == c\n if cluster_mask.sum() == 0:\n continue\n coordinates = label_c[batch_mask][cluster_mask, c1:c2]\n if not isinstance(label_c, np.ndarray):\n coordinates = coordinates.detach().cpu().numpy()\n l = dbscan.fit(coordinates).labels_\n # Unclustered voxels can keep the label -1\n if not isinstance(label_c, np.ndarray):\n l = torch.tensor(l, device = label_c.device).float()\n l[l > -1] = l[l > -1] + cluster_count\n label_c[where(batch_mask)[0][cluster_mask], cluster_id_col] = l\n cluster_count = int(l.max() + 1)\n\n complete_label_clustering.append(label_c)\n\n return complete_label_clustering\n\n\ndef adapt_labels(*args, **kwargs):\n \"\"\"\n Kept for backward compatibility, to deprecate soon.\n\n See Also\n --------\n adapt_labels_knn, adapt_labels_numpy\n \"\"\"\n return adapt_labels_knn(*args, **kargs)\n\n\ndef adapt_labels_numpy(*args, **kwargs):\n \"\"\"\n Numpy version of `adapt_labels`.\n\n See Also\n --------\n adapt_labels, adapt_labels_knn\n \"\"\"\n return adapt_labels_knn(*args, **kwargs, use_numpy=True)\n\n\ndef deghost_labels_and_predictions(data_blob, result):\n '''\n Given dictionaries and , apply deghosting to\n uresnet predictions and labels for use in later reconstruction stages.\n\n Warning\n -------\n Modifies in place the input data and result dictionaries.\n\n Note\n ----\n Used in analysis tools (decorator).\n\n Parameters\n ----------\n data_blob: dict\n result: dict\n '''\n\n result['ghost_mask'] = [\n result['ghost'][i].argmax(axis=1) == 0 \\\n for i in range(len(result['ghost']))]\n\n if 'segment_label' in data_blob:\n data_blob['true_ghost_mask'] = [\n data_blob['segment_label'][i][:, -1] < 5 \\\n for i in range(len(data_blob['segment_label']))]\n\n data_blob['input_data_pre_deghost'] = data_blob['input_data']\n\n if 'segment_label' in data_blob:\n data_blob['input_data_true_nonghost'] = [data_blob['input_data'][i][mask] \\\n for i, mask in enumerate(data_blob['true_ghost_mask'])]\n\n data_blob['input_data'] = [data_blob['input_data'][i][mask] \\\n for i, mask in enumerate(result['ghost_mask'])]\n\n if 'cluster_label' in data_blob \\\n and data_blob['cluster_label'] is not None:\n # Save the clust_data before deghosting\n data_blob['cluster_label_true_nonghost'] = data_blob['cluster_label']\n data_blob['cluster_label'] = adapt_labels_numpy(\n result,\n data_blob['segment_label'],\n data_blob['cluster_label'])\n\n if 'seg_prediction' in result \\\n and result['seg_prediction'] is not None:\n result['seg_prediction'] = [\n result['seg_prediction'][i][result['ghost_mask'][i]] \\\n for i in range(len(result['seg_prediction']))]\n\n if 'segmentation' in result \\\n and result['segmentation'] is not None:\n result['segmentation_true_nonghost'] = result['segmentation']\n result['segmentation'] = [\n result['segmentation'][i][result['ghost_mask'][i]] \\\n for i in range(len(result['segmentation']))]\n\n if 'kinematics_label' in data_blob \\\n and data_blob['kinematics_label'] is not None:\n data_blob['kinematics_label_true_nonghost'] = data_blob['kinematics_label']\n data_blob['kinematics_label'] = adapt_labels_numpy(\n result,\n data_blob['segment_label'],\n data_blob['kinematics_label'])\n\n # This needs to come last - in adapt_labels seg_label is the original one\n if 'segment_label' in data_blob \\\n and data_blob['segment_label'] is not None:\n data_blob['segment_label_true_nonghost'] = data_blob['segment_label']\n data_blob['segment_label'] = [\n data_blob['segment_label'][i][result['ghost_mask'][i]] \\\n for i in range(len(data_blob['segment_label']))]\n","sub_path":"mlreco/utils/deghosting.py","file_name":"deghosting.py","file_ext":"py","file_size_in_byte":15749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"107393911","text":"def is_prime(num):\r\n if num > 1:\r\n for i in range(2, num // 2 + 1):\r\n if (num % i) == 0:\r\n return False\r\n else:\r\n return True\r\n else:\r\n return False\r\n\r\nn = int(input(\"How many terms to be Displayed? \"))\r\n\r\nn1, n2 = 1, 1 \r\ncnt = 0\r\n\r\nif n == 1:\r\n print(n1)\r\nelse:\r\n while cnt < n:\r\n if not is_prime(n1) and n1 % 5 != 0:\r\n print(n1, end=' ')\r\n else:\r\n print(0, end=' ')\r\n sum = n1 + n2\r\n n1 = n2\r\n n2 = sum\r\n cnt += 1 ","sub_path":"Python/2021/1stOct_BhagyaRana.py","file_name":"1stOct_BhagyaRana.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"262442477","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('portfolio', '0008_auto_20170330_1015'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Database',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=20)),\n ('db_type', models.CharField(default=b'Other', max_length=20, choices=[(b'NoSql', b'NoSql'), (b'RDBMS', b'RDBMS'), (b'Other', b'Other')])),\n ],\n ),\n migrations.CreateModel(\n name='Library',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=20)),\n ],\n ),\n migrations.CreateModel(\n name='Versioning',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=20)),\n ],\n options={\n 'verbose_name_plural': 'Versioning',\n },\n ),\n migrations.AddField(\n model_name='project',\n name='client_name',\n field=models.CharField(max_length=60, null=True, blank=True),\n ),\n migrations.AddField(\n model_name='project',\n name='dev_link',\n field=models.CharField(max_length=100, null=True, blank=True),\n ),\n migrations.AddField(\n model_name='project',\n name='localization',\n field=models.BooleanField(default=False, help_text=b' ::- need to add.'),\n ),\n migrations.AddField(\n model_name='project',\n name='logo',\n field=models.ImageField(upload_to=b'media', blank=True),\n ),\n migrations.AddField(\n model_name='project',\n name='production_link',\n field=models.CharField(max_length=100, null=True, blank=True),\n ),\n migrations.AlterField(\n model_name='category',\n name='status',\n field=models.BooleanField(default=False, max_length=20),\n ),\n migrations.AddField(\n model_name='project',\n name='database_used',\n field=models.ManyToManyField(to='portfolio.Database'),\n ),\n migrations.AddField(\n model_name='project',\n name='third_party_library',\n field=models.ManyToManyField(to='portfolio.Library', null=True, blank=True),\n ),\n migrations.AddField(\n model_name='project',\n name='version_controll',\n field=models.ForeignKey(to='portfolio.Versioning', null=True),\n ),\n ]\n","sub_path":"portfolio/migrations/0009_auto_20170330_1251.py","file_name":"0009_auto_20170330_1251.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"613826952","text":"#Task 1 - Wavelets and spectrogram\r\n#Complex Morlet Wavelet Example-----------------------------------------------\r\n\r\nimport numpy as np\r\nimport matplotlib.pylab as plt\r\n\r\nx = np.arange(-5,5, 0.001)\r\nsigma = 2\r\nf = 1\r\ns = np.sqrt(np.pi*sigma)*np.exp(2*np.pi*1j*f*x)*np.exp(-x**2/sigma)\r\n\r\n\r\nplt.figure()\r\n\r\nplt.plot(x, np.real(s), label=\"real\")\r\nplt.plot(x, np.imag(s), label=\"imag\")\r\nplt.plot(x, np.abs(s), label=\"abs\")\r\nplt.legend()\r\nplt.xlabel('Time [ms]')\r\nplt.ylabel('Amplitude [µ V]')\r\nplt.show()\r\n#Inspect wavelets from Ws-----------------------------------------------------\r\nimport numpy as np\r\nfrom wavelet import wavelet_coeffs\r\nfrom wavelet import wavelet_power\r\nimport matplotlib.pylab as plt\r\n\r\ndata=np.load(r'D:\\Desktop\\Studium\\7. Semester\\Brain-Machine Interfaces\\Praktikum\\bmi2020_tasks\\MainContent\\avrgPerChannel.npy')\r\n\r\nprint(data.shape)\r\n\r\nplt.figure()\r\nfor i in range(10):\r\n for j in range(2):\r\n plt.plot(data[i,j,:])\r\n#plt.plot(data[0,0,:])\r\n#plt.plot(data[0,1,:])\r\n\r\n\r\nfreqs = np.arange(1, 256, 50)\r\nsrate = 256\r\nnco = 2\r\n\r\ncoeffs, Ws = wavelet_coeffs(data,srate,freqs,nco)\r\n#print(coeffs, Ws)\r\n#print(len(coeffs))\r\n#print(coeffs[0])\r\n\r\ncoeffs = np.array(coeffs)\r\npower = wavelet_power(coeffs) \r\nprint(power.shape)\r\n\r\n#plt.plot(power[:,0,0,0])\r\n#plt.plot(power[:,0,1,0])\r\n#plt.plot(power[:,0,2,0])\r\n#plt.plot(power[:,0,3,0])\r\n#plt.plot(power[:,0,4,0])\r\n#plt.show()\r\n\r\nplt.xlabel('Samples')\r\nplt.ylabel('Amplitude [µ V]')\r\nplt.figure()\r\nfig,ax = plt.subplots()\r\ncax = ax.imshow(power[:,:,1,0],cmap='jet',aspect='auto',interpolation='none',\\\r\nextent=[0,power.shape[1],power.shape[0],0],origin='upper')\r\n\r\nnp.save(r'D:\\Desktop\\Studium\\7. Semester\\Brain-Machine Interfaces\\Praktikum\\bmi2020_tasks\\MainContent\\results_task1.npy', power)\r\nplt.xlabel('Channel')\r\nplt.ylabel('Samples')\r\nplt.show()\r\n#'Plot einer Frequenz mit wavelet für 10 samples'\r\n\r\n#Task 2.1 - Bandpass filtering with FFT\r\n#Calculation of cut-off frequency---------------------------------------------\r\nimport matplotlib.pylab as plt\r\nimport numpy as np\r\n\r\ndata=np.load(r'avrgPerChannel.npy')\r\ns = data.shape\r\nprint(s)\r\ndata_filt = np.zeros(s)\r\n\r\n#Transformation into frequency space------------------------------------------\r\n#rescaling 256 samples to 205 scale\r\nrescale = np.arange(0, 256, 256/205)\r\n#filter limit setting (removing coefficients)---------------------------------\r\nfltr1 = rescale>1\r\nfltr2 = rescale<10\r\nfltr = fltr1 & fltr2\r\nfor i in range(s[0]):\r\n for j in range(s[1]):\r\n coeffs=np.fft.fft(data[i,j,:])\r\n coeffs_filt = fltr*coeffs\r\n data_filt[i,j,:]=np.real(np.fft.ifft(coeffs_filt))\r\n \r\n# 1 value = 1/800ms (x axis) after first transformation\r\n#plt.figure()\r\n#plt.plot(coeffs)\r\n\r\n#print(fltr)\r\n#plt.figure()\r\nplt.plot(rescale,fltr*coeffs)\r\n\r\n#Inverse Transformation into time-amplitude plane-----------------------------\r\nplt.figure()\r\nplt.plot(data_filt[5,1,:])\r\n\r\n#origin data plot for comparison\r\nplt.plot(data[5,1,:])\r\n\r\n#Task 2.2 - Bandpass filtering with FFT\r\n#Calculation of cut-off frequency---------------------------------------------\r\nimport matplotlib.pylab as plt\r\nimport numpy as np\r\n\r\ndata=np.load(r'results_task1.npy')\r\ns = data.shape\r\nprint(s)\r\ndata_filt2 = np.zeros(s)\r\n\r\n#Transformation into frequency space------------------------------------------\r\n#rescaling 256 samples to 205 scale\r\nrescale2 = np.arange(0, 256, 256/205)\r\n#filter limit setting (removing coefficients)---------------------------------\r\nfltr1 = rescale2>1\r\nfltr2 = rescale2<10\r\nfltr3 = fltr1 & fltr2\r\nfor i in range(s[1]):\r\n for j in range(s[2]):\r\n for k in range (s[3]):\r\n coeffs2=np.fft.fft(data[:,i,j,k])\r\n coeffs_filt = fltr*coeffs2\r\n data_filt2[:,i,j,k]=np.real(np.fft.ifft(coeffs_filt))\r\n \r\n# 1 value = 1/800ms (x axis) after first transformation\r\n#plt.figure()\r\n#plt.plot(coeffs2)\r\n\r\n#print(fltr)\r\nplt.figure()\r\nplt.plot(rescale2,fltr3*coeffs2)\r\n\r\n#Inverse Transformation into time-amplitude plane-----------------------------\r\nplt.figure()\r\nplt.plot(data_filt2[:,5,1,0])\r\n\r\n#origin data plot for comparison\r\nplt.plot(data[:,5,1,0])","sub_path":"Philip_PartII.py","file_name":"Philip_PartII.py","file_ext":"py","file_size_in_byte":4136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"127823789","text":"# Copyright (c) 2018-2019 Simons Observatory.\n# Full license can be found in the top level \"LICENSE\" file.\n\"\"\"TOAST interface tools.\n\nThis module contains code for interfacing with TOAST data representations.\n\n\"\"\"\nimport os\nimport sys\nimport re\n\nimport itertools\nimport operator\n\nimport numpy as np\n\nimport toast\nfrom toast.mpi import MPI\nfrom toast.tod.interval import intervals_to_chunklist\nimport toast.qarray as qa\n\n# Import so3g first so that it can control the import and monkey-patching\n# of spt3g. Then our import of spt3g_core will use whatever has been imported\n# by so3g.\nimport so3g\nfrom spt3g import core as core3g\nfrom toast.tod import spt3g_utils as s3utils\n\n\ndef tod_to_frames(\n tod,\n start_frame,\n n_frames,\n frame_offsets,\n frame_sizes,\n cache_signal=None,\n cache_flags=None,\n cache_common_flags=None,\n copy_common=None,\n copy_detector=None,\n mask_flag_common=255,\n mask_flag=255,\n units=None):\n \"\"\"Gather all data from the distributed TOD cache for a set of frames.\n\n Args:\n tod (toast.TOD): instance of a TOD class.\n start_frame (int): the first frame index.\n n_frames (int): the number of frames.\n frame_offsets (array_like): list of the first samples of all frames.\n frame_sizes (list): list of the number of samples in each frame.\n cache_signal (str): if None, read signal from TOD. Otherwise use this\n cache prefix for the detector signal timestreams.\n cache_flags (str): if None read det flags from TOD. Otherwise use\n this cache prefix for the detector flag timestreams.\n cache_common_flags (str): if None, read common flags from TOD.\n Otherwise use this cache prefix.\n copy_common (tuple): (cache name, G3 type, frame name) of each extra\n common field to copy from cache.\n copy_detector (tuple): (cache name prefix, G3 type, G3 map type,\n frame name) of each distributed detector field (excluding the\n \"signal\") to copy from cache.\n mask_flag_common (int): Bitmask to apply to common flags.\n mask_flag (int): Bitmask to apply to per-detector flags.\n units: G3 units of the detector data.\n\n Returns:\n (list): List of frames on rank zero. Other processes have a list of\n None values.\n\n \"\"\"\n # Detector names\n detnames = tod.detectors\n\n # Local sample range\n local_first = tod.local_samples[0]\n nlocal = tod.local_samples[1]\n\n # The process grid\n detranks, sampranks = tod.grid_size\n rankdet, ranksamp = tod.grid_ranks\n\n def get_local_cache(prow, fld, cacheoff, ncache):\n \"\"\"Read a local slice of a cache field.\n \"\"\"\n mtype = None\n pdata = None\n if rankdet == prow:\n ref = tod.cache.reference(fld)\n nnz = 1\n if (len(ref.shape) > 1) and (ref.shape[1] > 0):\n nnz = ref.shape[1]\n if ref.dtype == np.dtype(np.float64):\n mtype = MPI.DOUBLE\n elif ref.dtype == np.dtype(np.int64):\n mtype = MPI.INT64_T\n elif ref.dtype == np.dtype(np.int32):\n mtype = MPI.INT32_T\n elif ref.dtype == np.dtype(np.uint8):\n mtype = MPI.UINT8_T\n else:\n msg = \"Cannot use cache field {} of type {}\"\\\n .format(fld, ref.dtype)\n raise RuntimeError(msg)\n if cacheoff is not None:\n pdata = ref.flatten()[nnz*cacheoff:nnz*(cacheoff+ncache)]\n else:\n pdata = np.zeros(0, dtype=ref.dtype)\n return (pdata, nnz, mtype)\n\n def gather_field(prow, pdata, nnz, mpitype, cacheoff, ncache, tag):\n \"\"\"Gather a single timestream buffer to the root process.\n \"\"\"\n gdata = None\n # We are going to allreduce this later, so that every process\n # knows the dimensions of the field.\n gproc = 0\n allnnz = 0\n\n # Size of the local buffer\n pz = len(pdata)\n\n if rankdet == prow:\n psizes = tod.grid_comm_row.gather(pz, root=0)\n disp = None\n totsize = None\n if ranksamp == 0:\n # We are the process collecting the gathered data.\n allnnz = nnz\n gproc = tod.mpicomm.rank\n # Compute the displacements into the receive buffer.\n disp = [0]\n for ps in psizes[:-1]:\n last = disp[-1]\n disp.append(last + ps)\n totsize = np.sum(psizes)\n # allocate receive buffer\n gdata = np.zeros(totsize, dtype=pdata.dtype)\n\n tod.grid_comm_row.Gatherv(pdata, [gdata, psizes, disp, mpitype],\n root=0)\n del disp\n del psizes\n\n # Now send this data to the root process of the whole communicator.\n # Only one process (the first one in process row \"prow\") has data\n # to send.\n\n # All processes find out which one did the gather\n gproc = tod.mpicomm.allreduce(gproc, MPI.SUM)\n # All processes find out the field dimensions\n allnnz = tod.mpicomm.allreduce(allnnz, MPI.SUM)\n\n mtag = 10 * tag\n\n rdata = None\n if gproc == 0:\n if gdata is not None:\n if allnnz == 1:\n rdata = gdata\n else:\n rdata = gdata.reshape((-1, allnnz))\n else:\n # Data not yet on rank 0\n if tod.mpicomm.rank == 0:\n # Receive data from the first process in this row\n rtype = tod.mpicomm.recv(source=gproc, tag=(mtag+1))\n rsize = tod.mpicomm.recv(source=gproc, tag=(mtag+2))\n rdata = np.zeros(rsize, dtype=np.dtype(rtype))\n tod.mpicomm.Recv(rdata, source=gproc, tag=mtag)\n # Reshape if needed\n if allnnz > 1:\n rdata = rdata.reshape((-1, allnnz))\n elif (tod.mpicomm.rank == gproc):\n # Send our data\n tod.mpicomm.send(gdata.dtype.char, dest=0, tag=(mtag+1))\n tod.mpicomm.send(len(gdata), dest=0, tag=(mtag+2))\n tod.mpicomm.Send(gdata, 0, tag=mtag)\n return rdata\n\n # For efficiency, we are going to gather the data for all frames at once.\n # Then we will split those up when doing the write.\n\n # Frame offsets relative to the memory buffers we are gathering\n fdataoff = [0]\n for f in frame_sizes[:-1]:\n last = fdataoff[-1]\n fdataoff.append(last+f)\n\n # The list of frames- only on the root process.\n fdata = None\n if tod.mpicomm.rank == 0:\n fdata = [core3g.G3Frame(core3g.G3FrameType.Scan)\n for f in range(n_frames)]\n else:\n fdata = [None for f in range(n_frames)]\n\n def flags_to_intervals(flgs):\n \"\"\"Convert a flag vector to an interval list.\n \"\"\"\n groups = [\n [i for i, value in it] for key, it in\n itertools.groupby(enumerate(flgs), key=operator.itemgetter(1))\n if key != 0]\n chunks = list()\n for grp in groups:\n chunks.append([grp[0], grp[-1]])\n return chunks\n\n def split_field(data, g3t, framefield, mapfield=None, g3units=units):\n \"\"\"Split a gathered data buffer into frames.\n \"\"\"\n if tod.mpicomm.rank == 0:\n if g3t == core3g.G3VectorTime:\n # Special case for time values stored as int64_t, but\n # wrapped in a class.\n for f in range(n_frames):\n dataoff = fdataoff[f]\n ndata = frame_sizes[f]\n g3times = list()\n for t in range(ndata):\n g3times.append(core3g.G3Time(data[dataoff + t]))\n if mapfield is None:\n fdata[f][framefield] = core3g.G3VectorTime(g3times)\n else:\n fdata[f][framefield][mapfield] = \\\n core3g.G3VectorTime(g3times)\n del g3times\n elif g3t == so3g.IntervalsInt:\n # This means that the data is actually flags\n # and we should convert it into a list of intervals.\n fint = flags_to_intervals(data)\n for f in range(n_frames):\n dataoff = fdataoff[f]\n ndata = frame_sizes[f]\n datalast = dataoff + ndata\n chunks = list()\n idomain = (0, ndata-1)\n for intr in fint:\n # Interval sample ranges are defined relative to the\n # frame itself.\n cfirst = None\n clast = None\n if (intr[0] < datalast) and (intr[1] >= dataoff):\n # there is some overlap...\n if intr[0] < dataoff:\n cfirst = 0\n else:\n cfirst = intr[0] - dataoff\n if intr[1] >= datalast:\n clast = ndata - 1\n else:\n clast = intr[1] - dataoff\n chunks.append([cfirst, clast])\n if mapfield is None:\n if len(chunks) == 0:\n fdata[f][framefield] = \\\n so3g.IntervalsInt()\n else:\n fdata[f][framefield] = \\\n so3g.IntervalsInt.from_array(\n np.array(chunks, dtype=np.int64))\n fdata[f][framefield].domain = idomain\n else:\n if len(chunks) == 0:\n fdata[f][framefield][mapfield] = \\\n so3g.IntervalsInt()\n else:\n fdata[f][framefield][mapfield] = \\\n so3g.IntervalsInt.from_array(\n np.array(chunks, dtype=np.int64))\n fdata[f][framefield][mapfield].domain = idomain\n del fint\n elif g3t == core3g.G3Timestream:\n for f in range(n_frames):\n dataoff = fdataoff[f]\n ndata = frame_sizes[f]\n if mapfield is None:\n if g3units is None:\n fdata[f][framefield] = \\\n g3t(data[dataoff:dataoff+ndata])\n else:\n fdata[f][framefield] = \\\n g3t(data[dataoff:dataoff+ndata], g3units)\n else:\n if g3units is None:\n fdata[f][framefield][mapfield] = \\\n g3t(data[dataoff:dataoff+ndata])\n else:\n fdata[f][framefield][mapfield] = \\\n g3t(data[dataoff:dataoff+ndata], g3units)\n else:\n # The bindings of G3Vector seem to only work with\n # lists. This is probably horribly inefficient.\n for f in range(n_frames):\n dataoff = fdataoff[f]\n ndata = frame_sizes[f]\n if len(data.shape) == 1:\n fdata[f][framefield] = \\\n g3t(data[dataoff:dataoff+ndata].tolist())\n else:\n # We have a 2D quantity\n fdata[f][framefield] = \\\n g3t(data[dataoff:dataoff+ndata, :].flatten()\n .tolist())\n return\n\n # Compute the overlap of all frames with the local process. We want to\n # to find the full sample range that this process overlaps the total set\n # of frames.\n\n cacheoff = None\n ncache = 0\n\n for f in range(n_frames):\n # Compute overlap of the frame with the local samples.\n fcacheoff, froff, nfr = s3utils.local_frame_indices(\n local_first, nlocal, frame_offsets[f], frame_sizes[f])\n if fcacheoff is not None:\n if cacheoff is None:\n cacheoff = fcacheoff\n ncache = nfr\n else:\n ncache += nfr\n\n # Now gather the full sample data one field at a time. The root process\n # splits up the results into frames.\n\n # First collect boresight data. In addition to quaternions for the Az/El\n # pointing, we convert this back into angles that follow the specs\n # for telescope pointing.\n\n bore = None\n if rankdet == 0:\n bore = tod.read_boresight(local_start=cacheoff, n=ncache)\n bore = gather_field(0, bore.flatten(), 4, MPI.DOUBLE, cacheoff, ncache, 0)\n split_field(bore.reshape(-1, 4), core3g.G3VectorDouble, \"qboresight_radec\")\n\n bore = None\n if rankdet == 0:\n bore = tod.read_boresight_azel(local_start=cacheoff, n=ncache)\n bore = gather_field(0, bore.flatten(), 4, MPI.DOUBLE, cacheoff, ncache, 1)\n split_field(bore.reshape(-1, 4), core3g.G3VectorDouble, \"qboresight_azel\")\n\n if tod.mpicomm.rank == 0:\n for f in range(n_frames):\n fdata[f][\"boresight\"] = core3g.G3TimestreamMap()\n\n ang_theta, ang_phi, ang_psi = qa.to_angles(bore)\n ang_az = ang_phi\n ang_el = (np.pi / 2.0) - ang_theta\n ang_roll = ang_psi\n split_field(ang_az, core3g.G3Timestream, \"boresight\", \"az\", None)\n split_field(ang_el, core3g.G3Timestream, \"boresight\", \"el\", None)\n split_field(ang_roll, core3g.G3Timestream, \"boresight\", \"roll\", None)\n\n # Now the position and velocity information\n\n pos = None\n if rankdet == 0:\n pos = tod.read_position(local_start=cacheoff, n=ncache)\n pos = gather_field(0, pos.flatten(), 3, MPI.DOUBLE, cacheoff, ncache, 2)\n split_field(pos.reshape(-1, 3), core3g.G3VectorDouble, \"site_position\")\n\n vel = None\n if rankdet == 0:\n vel = tod.read_velocity(local_start=cacheoff, n=ncache)\n vel = gather_field(0, vel.flatten(), 3, MPI.DOUBLE, cacheoff, ncache, 3)\n split_field(vel.reshape(-1, 3), core3g.G3VectorDouble, \"site_velocity\")\n\n # Now handle the common flags- either from a cache object or from the\n # TOD methods\n\n cflags = None\n nnz = 1\n mtype = MPI.UINT8_T\n if cache_common_flags is None:\n if rankdet == 0:\n cflags = tod.read_common_flags(local_start=cacheoff, n=ncache)\n cflags &= mask_flag_common\n else:\n cflags, nnz, mtype = get_local_cache(0, cache_common_flags, cacheoff,\n ncache)\n cflags &= mask_flag_common\n cflags = gather_field(0, cflags, nnz, mtype, cacheoff, ncache, 4)\n split_field(cflags, so3g.IntervalsInt, \"flags_common\")\n\n # Any extra common fields\n\n tod.mpicomm.barrier()\n\n if copy_common is not None:\n for cindx, (cname, g3typ, fname) in enumerate(copy_common):\n cdata, nnz, mtype = get_local_cache(0, cname, cacheoff, ncache)\n cdata = gather_field(0, cdata, nnz, mtype, cacheoff, ncache, cindx)\n split_field(cdata, g3typ, fname)\n\n # Now read all per-detector quantities.\n\n # For each detector field, processes which have the detector\n # in their local_dets should be in the same process row.\n\n if tod.mpicomm.rank == 0:\n for f in range(n_frames):\n fdata[f][\"signal\"] = core3g.G3TimestreamMap()\n fdata[f][\"flags\"] = so3g.MapIntervalsInt()\n if copy_detector is not None:\n for cname, g3typ, g3maptyp, fnm in copy_detector:\n fdata[f][fnm] = g3maptyp()\n\n for dindx, dname in enumerate(detnames):\n drow = -1\n if dname in tod.local_dets:\n drow = rankdet\n # As a sanity check, verify that every process which\n # has this detector is in the same process row.\n rowcheck = tod.mpicomm.gather(drow, root=0)\n prow = 0\n if tod.mpicomm.rank == 0:\n rc = np.array([x for x in rowcheck if (x >= 0)],\n dtype=np.int32)\n prow = np.max(rc)\n if np.min(rc) != prow:\n msg = \"Processes with detector {} are not in the \"\\\n \"same row of the process grid\\n\".format(dname)\n sys.stderr.write(msg)\n tod.mpicomm.abort()\n\n # Every process finds out which process row is participating.\n prow = tod.mpicomm.bcast(prow, root=0)\n\n # \"signal\"\n\n detdata = None\n nnz = 1\n mtype = MPI.DOUBLE\n if cache_signal is None:\n if rankdet == prow:\n detdata = tod.read(detector=dname, local_start=cacheoff,\n n=ncache)\n else:\n cache_det = \"{}_{}\".format(cache_signal, dname)\n detdata, nnz, mtype = get_local_cache(prow, cache_det, cacheoff,\n ncache)\n detdata = gather_field(prow, detdata, nnz, mtype, cacheoff,\n ncache, dindx)\n split_field(detdata, core3g.G3Timestream, \"signal\", mapfield=dname)\n\n # \"flags\"\n\n detdata = None\n nnz = 1\n mtype = MPI.UINT8_T\n if cache_flags is None:\n if rankdet == prow:\n detdata = tod.read_flags(detector=dname, local_start=cacheoff,\n n=ncache)\n detdata &= mask_flag\n else:\n cache_det = \"{}_{}\".format(cache_flags, dname)\n detdata, nnz, mtype = get_local_cache(prow, cache_det, cacheoff,\n ncache)\n detdata &= mask_flag\n detdata = gather_field(prow, detdata, nnz, mtype, cacheoff,\n ncache, dindx)\n split_field(detdata, so3g.IntervalsInt, \"flags\", mapfield=dname)\n\n # Now copy any additional fields.\n\n if copy_detector is not None:\n for cname, g3typ, g3maptyp, fnm in copy_detector:\n cache_det = \"{}_{}\".format(cname, dname)\n detdata, nnz, mtype = get_local_cache(prow, cache_det,\n cacheoff, ncache)\n detdata = gather_field(prow, detdata, nnz, mtype, cacheoff,\n ncache, dindx)\n split_field(detdata, g3typ, fnm, mapfield=dname)\n\n return fdata\n\n\nclass ToastExport(toast.Operator):\n \"\"\"Operator which writes data to a directory tree of frame files.\n\n The top level directory will contain one subdirectory per observation.\n Each observation directory will contain frame files of the approximately\n the specified size. A single frame file will contain multiple frames.\n The size of each frame is determined by either the TOD distribution\n chunks or the separate time intervals for the observation.\n\n Args:\n outdir (str): the top-level output directory.\n prefix (str): the file name prefix for each frame file.\n use_todchunks (bool): if True, use the chunks of the original TOD for\n data distribution.\n use_intervals (bool): if True, use the intervals in the observation\n dictionary for data distribution.\n cache_name (str): The name of the cache object (_) in\n the existing TOD to use for the detector timestream. If None, use\n the read* methods from the existing TOD.\n cache_common (str): The name of the cache object in the existing TOD\n to use for common flags. If None, use the read* methods from the\n existing TOD.\n cache_flag_name (str): The name of the cache object\n (_) in the existing TOD to use for the flag\n timestream. If None, use the read* methods from the existing TOD.\n mask_flag_common (int): Bitmask to apply to common flags.\n mask_flag (int): Bitmask to apply to per-detector flags.\n filesize (int): The approximate file size of each frame file in\n bytes.\n units (G3TimestreamUnits): The units of the detector data.\n\n \"\"\"\n def __init__(self, outdir, prefix=\"so\", use_todchunks=False,\n use_intervals=False, cache_name=None, cache_common=None,\n cache_flag_name=None, mask_flag_common=255, mask_flag=255,\n filesize=500000000, units=None):\n self._outdir = outdir\n self._prefix = prefix\n self._cache_common = cache_common\n self._cache_name = cache_name\n self._cache_flag_name = cache_flag_name\n self._mask_flag = mask_flag\n self._mask_flag_common = mask_flag_common\n if use_todchunks and use_intervals:\n raise RuntimeError(\"cannot use both TOD chunks and Intervals\")\n self._usechunks = use_todchunks\n self._useintervals = use_intervals\n self._target_framefile = filesize\n self._units = units\n # We call the parent class constructor\n super().__init__()\n\n def _write_obs(self, writer, props, detindx):\n \"\"\"Write an observation frame.\n\n Given a dictionary of scalars, write these to an observation frame.\n\n Args:\n writer (G3Writer): The writer instance.\n props (dict): Dictionary of properties.\n detindx (dict): Dictionary of UIDs for each detector.\n\n Returns:\n None\n\n \"\"\"\n f = core3g.G3Frame(core3g.G3FrameType.Observation)\n for k, v in props.items():\n f[k] = s3utils.to_g3_type(v)\n indx = core3g.G3MapInt()\n for k, v in detindx.items():\n indx[k] = int(v)\n f[\"detector_uid\"] = indx\n writer(f)\n return\n\n def _write_precal(self, writer, dets, noise):\n \"\"\"Write the calibration frame at the start of an observation.\n\n This frame nominally contains \"preliminary\" values for the detectors.\n For simulations, this contains the true detector offsets and noise\n properties.\n\n\n \"\"\"\n qname = \"detector_offset\"\n f = core3g.G3Frame(core3g.G3FrameType.Calibration)\n # Add a vector map for quaternions\n f[qname] = core3g.G3MapVectorDouble()\n for k, v in dets.items():\n f[qname][k] = core3g.G3VectorDouble(v)\n if noise is not None:\n kfreq = \"noise_stream_freq\"\n kpsd = \"noise_stream_psd\"\n kindx = \"noise_stream_index\"\n dstr = \"noise_detector_streams\"\n dwt = \"noise_detector_weights\"\n f[kfreq] = core3g.G3MapVectorDouble()\n f[kpsd] = core3g.G3MapVectorDouble()\n f[kindx] = core3g.G3MapInt()\n f[dstr] = core3g.G3MapVectorInt()\n f[dwt] = core3g.G3MapVectorDouble()\n nse_dets = list(noise.detectors)\n nse_keys = list(noise.keys)\n st = dict()\n wts = dict()\n for d in nse_dets:\n st[d] = list()\n wts[d] = list()\n for k in nse_keys:\n f[kfreq][k] = core3g.G3VectorDouble(noise.freq(k).tolist())\n f[kpsd][k] = core3g.G3VectorDouble(noise.psd(k).tolist())\n f[kindx][k] = int(noise.index(k))\n for d in nse_dets:\n wt = noise.weight(d, k)\n if wt > 0:\n st[d].append(noise.index(k))\n wts[d].append(wt)\n for d in nse_dets:\n f[dstr][d] = core3g.G3VectorInt(st[d])\n f[dwt][d] = core3g.G3VectorDouble(wts[d])\n writer(f)\n return\n\n def _bytes_per_sample(self, ndet, nflavor):\n # For each sample we have:\n # - 1 x 8 bytes for timestamp\n # - 4 x 8 bytes for boresight RA/DEC quats\n # - 4 x 8 bytes for boresight Az/El quats\n # - 2 x 8 bytes for boresight Az/El angles\n # - 3 x 8 bytes for telescope position\n # - 3 x 8 bytes for telescope velocity\n # - 1 x 8 bytes x number of dets x number of flavors\n persample = 8 + 1 + 32 + 48 + 24 + 24 + 8 * ndet * nflavor\n return persample\n\n def exec(self, data):\n \"\"\"Export data to a directory tree of so3g frames.\n\n For errors that prevent the export, this function will directly call\n MPI Abort() rather than raise exceptions. This could be changed in\n the future if additional logic is implemented to ensure that all\n processes raise an exception when one process encounters an error.\n\n Args:\n data (toast.Data): The distributed data.\n\n \"\"\"\n # the two-level toast communicator\n comm = data.comm\n # the global communicator\n cworld = comm.comm_world\n # the communicator within the group\n cgroup = comm.comm_group\n # the communicator with all processes with\n # the same rank within their group\n crank = comm.comm_rank\n\n # One process checks the path\n if cworld.rank == 0:\n if not os.path.isdir(self._outdir):\n os.makedirs(self._outdir)\n cworld.barrier()\n\n for obs in data.obs:\n # Observation information. Anything here that is a simple data\n # type will get written to the observation frame.\n props = dict()\n for k, v in obs.items():\n if isinstance(v, (int, str, bool, float)):\n props[k] = v\n\n # Every observation must have a name...\n obsname = obs[\"name\"]\n\n # The TOD\n tod = obs[\"tod\"]\n nsamp = tod.total_samples\n detquat = tod.detoffset()\n detindx = tod.detindx\n ndets = len(detquat)\n detnames = tod.detectors\n\n # Get any other metadata from the TOD\n props.update(tod.meta())\n\n # First process in the group makes the output directory\n obsdir = os.path.join(self._outdir, obsname)\n if cgroup.rank == 0:\n if not os.path.isdir(obsdir):\n os.makedirs(obsdir)\n cgroup.barrier()\n\n detranks, sampranks = tod.grid_size\n\n # Determine frame sizes based on the data distribution\n framesizes = None\n if self._usechunks:\n framesizes = tod.total_chunks\n elif self._useintervals:\n if \"intervals\" not in obs:\n raise RuntimeError(\n \"Observation does not contain intervals, cannot \\\n distribute using them\")\n framesizes = intervals_to_chunklist(obs[\"intervals\"], nsamp)\n if framesizes is None:\n framesizes = [nsamp]\n\n # Examine all the cache objects and find the set of prefixes\n flavors = set()\n flavor_type = dict()\n flavor_maptype = dict()\n pat = re.compile(r\"^(.*?)_(.*)\")\n for nm in list(tod.cache.keys()):\n mat = pat.match(nm)\n if mat is not None:\n pref = mat.group(1)\n md = mat.group(2)\n if md in detnames:\n # This cache field has the form _\n if pref not in flavor_type:\n ref = tod.cache.reference(nm)\n if ref.dtype == np.dtype(np.float64):\n flavors.add(pref)\n flavor_type[pref] = core3g.G3Timestream\n flavor_maptype[pref] = core3g.G3TimestreamMap\n elif ref.dtype == np.dtype(np.int32):\n flavors.add(pref)\n flavor_type[pref] = core3g.G3VectorInt\n flavor_maptype[pref] = core3g.G3MapVectorInt\n elif ref.dtype == np.dtype(np.uint8):\n flavors.add(pref)\n flavor_type[pref] = so3g.IntervalsInt\n flavor_maptype[pref] = so3g.MapIntervalsInt\n else:\n msg = \"Cache prefix {} has unsupported \\\n data type. Skipping export\"\n raise RuntimeError(msg)\n flavors.discard(self._cache_name)\n flavors.discard(self._cache_flag_name)\n copy_flavors = [\n (x, flavor_type[x], flavor_maptype[x], \"signal_{}\".format(x))\n for x in flavors]\n\n print(\"found cache flavors \", flavors, flush=True)\n\n # Given the dimensions of this observation, compute the frame\n # file sizes and all relevant offsets.\n\n frame_sample_offs = None\n file_sample_offs = None\n file_frame_offs = None\n if cgroup.rank == 0:\n # Compute the frame file breaks. We ignore the observation\n # and calibration frames since they are small.\n sampbytes = self._bytes_per_sample(len(detquat), len(flavors))\n\n file_sample_offs, file_frame_offs, frame_sample_offs = \\\n s3utils.compute_file_frames(\n sampbytes, framesizes,\n file_size=self._target_framefile)\n\n file_sample_offs = cgroup.bcast(file_sample_offs, root=0)\n file_frame_offs = cgroup.bcast(file_frame_offs, root=0)\n frame_sample_offs = cgroup.bcast(frame_sample_offs, root=0)\n\n ex_files = [os.path.join(obsdir,\n \"{}_{:08d}.g3\".format(self._prefix, x))\n for x in file_sample_offs]\n\n # Loop over each frame file. Write the header frames and then\n # gather the data from all processes before writing the scan\n # frames.\n\n for ifile, (ffile, foff) in enumerate(zip(ex_files,\n file_frame_offs)):\n nframes = None\n print(\" ifile = {}, ffile = {}, foff = {}\".format(ifile, ffile, foff), flush=True)\n if ifile == len(ex_files) - 1:\n # we are at the last file\n nframes = len(framesizes) - foff\n else:\n # get number of frames in this file\n nframes = file_frame_offs[ifile+1] - foff\n\n writer = None\n if cgroup.rank == 0:\n writer = core3g.G3Writer(ffile)\n self._write_obs(writer, props, detindx)\n if \"noise\" in obs:\n self._write_precal(writer, detquat, obs[\"noise\"])\n else:\n self._write_precal(writer, detquat, None)\n\n # Collect data for all frames in the file in one go.\n\n frm_offsets = [frame_sample_offs[foff+f]\n for f in range(nframes)]\n frm_sizes = [framesizes[foff+f] for f in range(nframes)]\n\n if cgroup.rank == 0:\n print(\" {} file {}\".format(obsdir, ifile), flush=True)\n print(\" start frame = {}, nframes = {}\".format(foff, nframes), flush=True)\n print(\" frame offs = \", frm_offsets, flush=True)\n print(\" frame sizes = \", frm_sizes, flush=True)\n\n fdata = tod_to_frames(\n tod, foff, nframes, frm_offsets, frm_sizes,\n cache_signal=self._cache_name,\n cache_flags=self._cache_flag_name,\n cache_common_flags=self._cache_common,\n copy_common=None,\n copy_detector=copy_flavors,\n units=self._units)\n\n if cgroup.rank == 0:\n for fdt in fdata:\n writer(fdt)\n del writer\n del fdata\n\n return\n","sub_path":"sotodlib/data/toast.py","file_name":"toast.py","file_ext":"py","file_size_in_byte":32581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"105684532","text":"\"\"\"Ngenix test task.\n(C) 2021 by Vitaly Bogomolov mail@vitaly-bogomolov.ru\n\"\"\"\n\nfrom threading import Thread\nfrom random import randint\nfrom zipfile import ZipFile\nfrom xml.sax import ContentHandler, parseString\nfrom csv import writer\n\ntry:\n from queue import Queue\nexcept ImportError:\n from Queue import Queue # Python2\n\nZIP_THREADS = 10\n\nZIP_FILES = 50\nXML_IN_ZIP = 100\n\nXML_LEVEL_MIN = 1\nXML_LEVEL_MAX = 100\nXML_OBJECTS_MIN = 1\nXML_OBJECTS_MAX = 10\n\nXML_OBJECT_TEMPLATE = \"\"\nXML_TEMPLATE = \"\"\"\n\n\n\n{}\n\n\n\"\"\"\n\n\ndef make_xml(zip_id, xml_num):\n \"\"\"Create xml content according rules.\"\"\"\n xml_id = \"Z{}X{}\".format(zip_id, xml_num)\n\n return XML_TEMPLATE.format(\n xml_id,\n randint(XML_LEVEL_MIN, XML_LEVEL_MAX),\n '\\n'.join([\n XML_OBJECT_TEMPLATE.format(\"{}OBJ{}\".format(xml_id, i))\n for i in range(randint(XML_OBJECTS_MIN, XML_OBJECTS_MAX))\n ])\n )\n\n\nclass NgenixXml(ContentHandler):\n \"\"\"Ngenix xml sax parser.\"\"\"\n\n def __init__(self, csv1, csv2):\n \"\"\"Args csv1 and csv2 are the queues for csv rows.\"\"\"\n ContentHandler.__init__(self)\n self.var_id = None\n self.csv1 = csv1\n self.csv2 = csv2\n\n def startElement(self, name, attrs):\n \"\"\"Put parsed values to csv queues.\"\"\"\n if name == 'var':\n attr_name = attrs.getValue('name')\n if attr_name == 'id':\n self.var_id = attrs.getValue('value')\n elif attr_name == 'level':\n self.csv1.put((self.var_id, attrs.getValue('value')))\n elif name == 'object':\n self.csv2.put((self.var_id, attrs.getValue('name')))\n\n\ndef handle_zip(mode, zip_names, csv1, csv2):\n \"\"\"Get zip file names and handle according mode.\n mode must be 'w' or 'r'.\n \"\"\"\n while True:\n zip_id = zip_names.get()\n if zip_id is None:\n break\n\n with ZipFile(\"{}.zip\".format(zip_id), mode) as zfile:\n if mode == 'w':\n for i in range(XML_IN_ZIP):\n zfile.writestr(\"{}.xml\".format(i), make_xml(zip_id, i))\n else:\n parser = NgenixXml(csv1, csv2)\n for i in range(XML_IN_ZIP):\n parser.var_id = None\n parseString(zfile.read(\"{}.xml\".format(i)), parser)\n\n zip_names.task_done()\n\n zip_names.task_done()\n\n\ndef make_csv(file_name, queue_csv):\n \"\"\"Get data from queue and put to csv file.\"\"\"\n fhandle = open(file_name, 'w')\n output = writer(fhandle)\n while True:\n row = queue_csv.get()\n if row is None:\n break\n output.writerow(row)\n queue_csv.task_done()\n\n queue_csv.task_done()\n fhandle.close()\n\n\ndef init_zip_queue(zip_names):\n \"\"\"Fill zip file names and terminator marks for working threads.\"\"\"\n for i in range(ZIP_FILES):\n zip_names.put(i)\n for _ in range(ZIP_THREADS):\n zip_names.put(None)\n\n\ndef main():\n \"\"\"Write zips, read zips/xml, write csv.\"\"\"\n zip_names = Queue()\n\n print(\"Step 1: Create zip files.\")\n init_zip_queue(zip_names)\n\n for _ in range(ZIP_THREADS):\n Thread(target=handle_zip, args=('w', zip_names, None, None)).start()\n zip_names.join()\n\n print(\"Step 2: Create csv files.\")\n init_zip_queue(zip_names)\n csv1 = Queue()\n csv2 = Queue()\n\n for _ in range(ZIP_THREADS):\n Thread(target=handle_zip, args=('r', zip_names, csv1, csv2)).start()\n\n Thread(target=make_csv, args=('1.csv', csv1)).start()\n Thread(target=make_csv, args=('2.csv', csv2)).start()\n\n zip_names.join() # wait for all zips handled\n\n csv1.put(None)\n csv2.put(None)\n csv1.join()\n csv2.join()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"threading/source/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"625026805","text":"\nimport os\nimport glob\nimport shutil\n\nfrom IPython import embed\n\nimport numpy as np\n\nimport pytest\n\nfrom pypeit.tests.tstutils import dev_suite_required\nfrom pypeit.pypeitsetup import PypeItSetup\nfrom pypeit.par.util import parse_pypeit_file\n\n@dev_suite_required\ndef test_deimos():\n # Raw DEIMOS directory\n raw_dir = os.path.join(os.getenv('PYPEIT_DEV'), 'RAW_DATA', 'keck_deimos')\n\n # Get the list of setup directories\n setups = glob.glob(os.path.join(raw_dir, '*'))\n\n # Set the output path and *remove if* if it already exists\n output_path = os.path.join(os.getcwd(), 'output')\n if os.path.isdir(output_path):\n shutil.rmtree(output_path)\n\n # Iterate through the setups\n for setup in setups:\n \n # Find the relevant pypeit file constructed by hand.\n by_hand_pypeit = os.path.join(os.getenv('PYPEIT_DEV'), 'pypeit_files',\n 'keck_deimos_{0}.pypeit'.format(\n os.path.split(setup)[1].lower()))\n\n if not os.path.isfile(by_hand_pypeit):\n # It doesn't exist, so assume there is no by-hand pypeit\n # file to compare to\n continue\n\n # Run pypeit_setup\n ps = PypeItSetup.from_file_root(setup, 'keck_deimos', output_path=output_path)\n ps.run(setup_only=True)\n # Write the automatically generated pypeit data\n pypeit_files = ps.fitstbl.write_pypeit(output_path, cfg_lines=ps.user_cfg)\n\n # Read the frame types from both the by-hand and automated\n # pypeit files\n _, _, by_hand_frametypes, _, _ = parse_pypeit_file(by_hand_pypeit, file_check=False)\n _, _, auto_frametypes, _, _ = parse_pypeit_file(pypeit_files[0], file_check=False)\n\n # For each file in the by-hand list, check that the frame types\n # in the automatically generated pypeit file are identical\n for f in by_hand_frametypes.keys():\n type_list = np.sort(by_hand_frametypes[f].split(','))\n if 'science' in type_list or 'standard' in type_list:\n # Only ensuring that calibrations are correctly typed\n continue\n assert f in auto_frametypes.keys(), \\\n 'Frame {0} not automatically parsed for setup {1}.'.format(f, setup)\n assert np.array_equal(type_list, np.sort(auto_frametypes[f].split(','))), \\\n 'Frame types differ for file {0} in setup {1}\\n'.format(f, setup) \\\n + ' By-hand types: {0}'.format(by_hand_frametypes[f]) \\\n + ' Automated types: {0}'.format(auto_frametypes[f])\n\n # Clean up after every setup\n shutil.rmtree(output_path)\n\n","sub_path":"pypeit/tests/test_frametype.py","file_name":"test_frametype.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"602331244","text":"from django.urls import path\n\nfrom market.views import ProductList, DiscountView, RemoveDiscountView, ProductReviewView, \\\n ProductReviewDetailsView, CharacteristicsView, PropertiesView, GroupPropertiesView, ProductAvailabilityView\n\nfrom rest_framework import routers\n\nrouter = routers.SimpleRouter()\nrouter.register('products', ProductList, basename='products')\nrouter.register('characteristics', CharacteristicsView, basename='characteristics')\nrouter.register('properties', PropertiesView, basename='properties')\nrouter.register('group-properties', GroupPropertiesView, basename='group-properties')\nrouter.register('product-availability', ProductAvailabilityView, basename='product-availability')\n\n\nurlpatterns = [\n path('products/set-discount', DiscountView.as_view({'post': 'post'})),\n path('products/remove-discount', RemoveDiscountView.as_view({'post': 'post'})),\n path('products//reviews', ProductReviewView.as_view({'get': 'list', 'post': 'create'})),\n path('products//reviews/', ProductReviewDetailsView.as_view({'delete': 'destroy'})),\n\n]\nurlpatterns += router.urls","sub_path":"market_place/market/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"186826982","text":"'''\nTic Tac Toe -- Raspberry Pi GPIO -- v0.0\nAmanda on Electrothoughts -- March 2016\n--arcade game\n--four-way keypad\n--3x3 LED grid\n--sound FX\n--decent computer AI\n--uses software PWM\nFunctions defined here:\nstartLEDs\nup\nright\nselect\ncheckForWin\ncomputerMove\nplaySong\n'''\n\nimport RPi.GPIO as GPIO, time, random\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\n#LED grid vars; \"col0\" is column 1\ncol0 = [5, 17, 25]\ncol1 = [6, 27, 8]\ncol2 = [13, 22, 7]\n\n#generate grid\nmatriz = [col0, col1, col2]\n\nupb = 21\nrightb = 26\nselectb = 16\nspeaker = 18\n\n#setup GPIO\nGPIO.setup(col0, GPIO.OUT, initial=0)\nGPIO.setup(col1, GPIO.OUT, initial=0)\nGPIO.setup(col2, GPIO.OUT, initial=0)\nGPIO.setup(upb, GPIO.IN)\nGPIO.setup(rightb, GPIO.IN)\nGPIO.setup(selectb, GPIO.IN)\nGPIO.setup(speaker, GPIO.OUT)\nspeaker = GPIO.PWM(speaker, 50)\nspeaker.start(0)\n\n#setup PWM objects in grid\nfor row in matriz:\n for i in row:\n matriz[matriz.index(row)][row.index(i)] = GPIO.PWM(i, 50)\n\n#lists to store moves made\nplayed =[]\ncomputer_moves = []\nplayer_moves = []\n\n#for setting the freq and duty cycle of player's and computer's 'pieces'\nplayer_freq = 3\ncomputer_freq = 50\ncomputer_duty_cycle = 10\n\n#game vars\nplaying_game = True\nwinner = None\nplayers_turn = True\n\n#coordinates for cursor\n#LEDs referenced by coordinate plane, where (0,0) is lower left corner\neixo_x = 0\neixo_y = 0\n\n#winning sequences, should be eight total\nwinning_sequences = [[matriz[0][0], matriz[1][0], matriz[2][0]],\n [matriz[0][1], matriz[1][1], matriz[2][1]],\n [matriz[0][2], matriz[1][2], matriz[2][2]],\n [matriz[0][0], matriz[0][1], matriz[0][2]],\n [matriz[1][0], matriz[1][1], matriz[1][2]],\n [matriz[2][0], matriz[2][1], matriz[2][2]],\n [matriz[0][0], matriz[1][1], matriz[2][2]],\n [matriz[2][0], matriz[1][1], matriz[0][2]]]\n\n#events for buttons\nGPIO.add_event_detect(upb, GPIO.FALLING, bouncetime = 200)\nGPIO.add_event_detect(rightb, GPIO.FALLING, bouncetime=200)\nGPIO.add_event_detect(selectb, GPIO.FALLING, bouncetime=200)\n\n#faulty func\ndef startLEDs(duty_cycle, freq):\n for row in matriz:\n for i in row:\n i.start(duty_cycle)\n i.ChangeFrequency(freq)\n\n#DELETE THIS FUNC\ndef flash(freq):\n for row in matriz:\n for i in row:\n i.stop()\n i.start(50)\n i.ChangeFrequency(freq)\n\n time.sleep(3)\n\n for row in matriz:\n for i in row:\n i.ChangeDutyCycle(0)\n\ndef select():\n\n global eixo_x\n global eixo_y\n global players_turn\n\n if matriz[eixo_x][eixo_y] not in played:\n matriz[eixo_x][eixo_y].ChangeDutyCycle(50)\n matriz[eixo_x][eixo_y].ChangeFrequency(player_freq)\n played.append(matriz[eixo_x][eixo_y])\n player_moves.append(matriz[eixo_x][eixo_y])\n players_turn = False\n speaker.ChangeFrequency(800)\n speaker.ChangeDutyCycle(50)\n time.sleep(0.25)\n speaker.ChangeDutyCycle(0)\n return players_turn\n\n else:\n players_turn = True\n speaker.ChangeFrequency(450)\n speaker.ChangeDutyCycle(50)\n time.sleep(0.25)\n speaker.ChangeDutyCycle(0)\n return players_turn\n\ndef up():\n\n global eixo_y\n global eixo_x\n\n if eixo_y < 2:\n eixo_y += 1\n matriz[eixo_x][eixo_y].ChangeDutyCycle(100)\n matriz[eixo_x][eixo_y].ChangeFrequency(50)\n if matriz[eixo_x][eixo_y-1] in played:\n if matriz[eixo_x][eixo_y-1] in player_moves:\n matriz[eixo_x][eixo_y - 1].ChangeDutyCycle(50)\n matriz[eixo_x][eixo_y - 1].ChangeFrequency(player_freq)\n if matriz[eixo_x][eixo_y-1] in computer_moves:\n matriz[eixo_x][eixo_y - 1].ChangeDutyCycle(computer_duty_cycle)\n matriz[eixo_x][eixo_y - 1].ChangeFrequency(computer_freq)\n else:\n matriz[eixo_x][eixo_y - 1].ChangeDutyCycle(0)\n speaker.ChangeFrequency(650)\n speaker.ChangeDutyCycle(50)\n time.sleep(0.15)\n speaker.ChangeDutyCycle(0)\n\ndef right():\n\n global eixo_y\n global eixo_x\n\n if eixo_x < 2:\n eixo_x += 1\n matriz[eixo_x][eixo_y].ChangeDutyCycle(100)\n matriz[eixo_x][eixo_y].ChangeFrequency(50)\n if matriz[eixo_x-1][eixo_y] in played:\n if matriz[eixo_x-1][eixo_y] in player_moves:\n matriz[eixo_x - 1][eixo_y].ChangeDutyCycle(50)\n matriz[eixo_x - 1][eixo_y].ChangeFrequency(player_freq)\n if matriz[eixo_x-1][eixo_y] in computer_moves:\n matriz[eixo_x - 1][eixo_y].ChangeDutyCycle(computer_duty_cycle)\n matriz[eixo_x - 1][eixo_y].ChangeFrequency(computer_freq)\n else:\n matriz[eixo_x - 1][eixo_y].ChangeDutyCycle(0)\n speaker.ChangeFrequency(650)\n speaker.ChangeDutyCycle(50)\n time.sleep(0.15)\n speaker.ChangeDutyCycle(0)\n\ndef checkForWin():\n\n global winning_sequences\n global computer_moves\n global player_moves\n\n check = 0\n\n for sequence in winning_sequences:\n for point in sequence:\n if point in computer_moves:\n check += 1\n else:\n break\n if check == 3:\n return 'computer'\n check = 0\n\n for sequence in winning_sequences:\n for point in sequence:\n if point in player_moves:\n check += 1\n else:\n break\n if check == 3:\n return 'player'\n check = 0\n\n if (len(player_moves) + len(computer_moves)) >= 9:\n return 'tie'\n\n return None\n\n\n######---Start Buzzer Code---######\n\n#frequencies of musical notes suitable to be played on buzzer\nc = [32, 65, 131, 262]\ndb = [34, 69, 139, 277]\nd = [36, 73, 147, 294]\neb = [37, 78, 156, 311]\ne = [41, 82, 165, 330]\nf = [43, 87, 175, 349]\ngb = [46, 92, 185, 370]\ng = [49, 98, 196, 392]\nab = [52, 104, 208, 415]\na = [55, 110, 220, 440]\nbb = [58, 117, 223, 466]\nb = [61, 123, 246, 492]\n\n#this func plays a melody given a list of notes and beats\ndef playSong(songnotes, songbeats, tempo):\n speaker.ChangeDutyCycle(50)\n for i in range(0, len(songnotes)):\n speaker.ChangeFrequency(songnotes[i])\n time.sleep(songbeats[i]*tempo)\n speaker.ChangeDutyCycle(0)\n\n#data list vars for melodies\nloser_song_notes = [a[1], g[1], a[1], e[1], a[0]]\nloser_song_beats = [1, 1, 2, 4, 4]\nvictory_song_notes = [g[2], g[1], g[2], d[3], b[2], d[3], g[3]]\nvictory_song_beats = [0.8, 0.2, 1, 2, 1, 1, 4]\ntied_song_notes = [g[2], g[1], c[1], g[1], g[0], c[0]]\ntied_song_beats = [1, 1, 1, 1, 1, 1, 2]\n\n######---End Buzzer Code---######\n\ntry:\n #intro flash\n startLEDs(0, 1)\n startLEDs(50, 2)\n time.sleep(2)\n startLEDs(0, 2)\n\n while playing_game:\n\n while players_turn:\n if GPIO.event_detected(upb):\n up()\n if GPIO.event_detected(rightb):\n right()\n if GPIO.event_detected(selectb):\n select()\n\n if checkForWin() == 'player':\n winner = 'player'\n playing_game = False\n break\n\n if checkForWin() == 'tie':\n winner = 'tie'\n playing_game = False\n break\n\n time.sleep(1.25)\n #computerMove()\n\n #see if anyone won\n if checkForWin() == 'computer':\n\n for move in computer_moves:\n move.ChangeFrequency(computer_freq)\n move.stop()\n move.start(computer_duty_cycle)\n winner = 'computer'\n playing_game = False\n break\n\n if checkForWin() == 'tie':\n for move in computer_moves:\n move.ChangeFrequency(computer_freq)\n move.stop()\n move.start(computer_duty_cycle)\n winner = 'tie'\n playing_game = False\n break\n\n #display current moves\n for move in computer_moves:\n move.ChangeFrequency(computer_freq)\n move.stop()\n move.start(computer_duty_cycle)\n\n for move in player_moves:\n move.ChangeFrequency(player_freq)\n move.stop()\n move.start(50)\n\n #main game loop exited\n\n time.sleep(0.5)\n\n #display winning/losing animation and song\n\n if winner == 'player':\n startLEDs(0, 2)\n startLEDs(50, 2)\n playSong(victory_song_notes, victory_song_beats, 0.25)\n time.sleep(2)\n startLEDs(0, 1)\n\n if winner == 'computer':\n startLEDs(0, 2)\n startLEDs(50, 2)\n playSong(loser_song_notes, loser_song_beats, 0.25)\n time.sleep(3)\n startLEDs(0, 1)\n\n if winner == 'tie':\n startLEDs(0, 2)\n startLEDs(50, 2)\n playSong(tied_song_notes, tied_song_beats, 0.25)\n time.sleep(3)\n startLEDs(0, 1)\n\nexcept KeyboardInterrupt:\n GPIO.cleanup()","sub_path":"tictactoe2.py","file_name":"tictactoe2.py","file_ext":"py","file_size_in_byte":8972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"484369337","text":"import gensim\nimport random\nimport numpy as np\nimport pickle as pkl\n\ntype=[\"union\",\"intersection\",\"uberon\",\"mp\",\"go\"]\n\nfor tp in type:\n print(tp)\n with open(\"../data/\"+tp+\"_disease_gene.pkl\",\"rb\") as f:\n disease_gene=pkl.load(f)\n with open(\"../data/\"+tp+\"_gene_set.pkl\",\"rb\") as f:\n gene_set=pkl.load(f)\n print(len(gene_set))\n\n model=gensim.models.Word2Vec.load(\"../opamodel/\"+tp+\".model\")\n\n disease_set=[disease for disease in disease_gene.keys()]\n random.shuffle(disease_set)\n train_disease=disease_set[:int(len(disease_set)*0.8)]\n\n\n positive_data=[]\n negative_data=[]\n gene_list=[gene for gene in gene_set]\n\n eval_dict=dict()\n\n for disease in disease_gene.keys():\n disease_vec=model[disease]\n if disease in train_disease:\n genes=disease_gene[disease]\n\n # generate the positive data\n for gene in genes:\n gene_vec=model[gene]\n gene_disease_vec=np.append(gene_vec,disease_vec,0)\n positive_data.append(gene_disease_vec)\n\n # generate the negative data\n\n random_index_list=[]\n while(len(random_index_list)<50):\n random_index=random.choice(range(len(gene_list)))\n if (random_index_list not in random_index_list):\n if(gene_list[random_index] not in genes):\n random_index_list.append(random_index)\n\n for index in random_index_list:\n gene=gene_list[index]\n gene_vec=model[gene]\n gene_disease_vec=np.append(gene_vec,disease_vec,0)\n negative_data.append(gene_disease_vec)\n\n\n else:\n eval_dict[disease]=disease_gene[disease]\n\n\n with open(\"../data/\"+tp+\"_eval_data.pkl\",\"wb\") as f:\n pkl.dump(eval_dict,f)\n with open(\"../data/\"+tp+\"_positive_data.pkl\",\"wb\") as f:\n pkl.dump(positive_data,f)\n with open(\"../data/\"+tp+\"_negative_data.pkl\",\"wb\") as f:\n pkl.dump(negative_data,f)\n","sub_path":"phenoinfer/preprocessing/nn_data_process.py","file_name":"nn_data_process.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"194672689","text":"\nimport sympy\nimport unyt as u\n\nimport gmso\nfrom gmso.lib.potential_templates import RyckaertBellemansTorsionPotential\nfrom gmso.lib.potential_templates import OPLSTorsionPotential\nfrom gmso.exceptions import GMSOError \n\ndef convert_opls_to_ryckaert(opls_connection_type):\n \"\"\"Convert an OPLS dihedral to Ryckaert-Bellemans dihedral\n\n Equations taken/modified from:\n http://manual.gromacs.org/documentation/2019/\n reference-manual/functions/bonded-interactions.html\n\n NOTE: the conventions defining the dihedral angle are different\n for OPLS and RB torsions. OPLS torsions are defined with\n phi_cis = 0 while RB torsions are defined as phi_trans = 0.\n \"\"\"\n\n valid_connection_type = False\n if ( opls_connection_type.independent_variables ==\n OPLSTorsionPotential().independent_variables ):\n if sympy.simplify(opls_connection_type.expression -\n OPLSTorsionPotential().expression) == 0:\n valid_connection_type = True\n if not valid_connection_type:\n raise GMSOError('Cannot use convert_opls_to_ryckaert '\n 'function to convert a ConnectionType that is not an '\n 'OPLSTorsionPotential')\n\n f0 = opls_connection_type.parameters['k0']\n f1 = opls_connection_type.parameters['k1']\n f2 = opls_connection_type.parameters['k2']\n f3 = opls_connection_type.parameters['k3']\n f4 = opls_connection_type.parameters['k4']\n\n converted_params = {\n 'c0' : (f2 + 0.5 * (f0 + f1 + f3)),\n 'c1' : (0.5 * (-f1 + 3. * f3)),\n 'c2' : (-f2 + 4. * f4),\n 'c3' : (-2. * f3),\n 'c4' : (-4. * f4),\n 'c5' : 0. * u.Unit('kJ/mol')\n }\n\n name = RyckaertBellemansTorsionPotential().name\n expression = RyckaertBellemansTorsionPotential().expression\n variables = RyckaertBellemansTorsionPotential().independent_variables\n\n ryckaert_connection_type = gmso.DihedralType(\n name=name,\n expression=expression,\n independent_variables=variables,\n parameters=converted_params)\n\n return ryckaert_connection_type\n\ndef convert_ryckaert_to_opls(ryckaert_connection_type):\n \"\"\"Convert Ryckaert-Bellemans dihedral to OPLS\n\n NOTE: the conventions defining the dihedral angle are different\n for OPLS and RB torsions. OPLS torsions are defined with\n phi_cis = 0 while RB torsions are defined as phi_trans = 0.\n \"\"\"\n\n valid_connection_type = False\n if ( ryckaert_connection_type.independent_variables ==\n RyckaertBellemansTorsionPotential().independent_variables ):\n if sympy.simplify(ryckaert_connection_type.expression -\n RyckaertBellemansTorsionPotential().expression) == 0:\n valid_connection_type = True\n if not valid_connection_type:\n raise GMSOError('Cannot use convert_ryckaert_to_opls '\n 'function to convert a ConnectionType that is not an '\n 'RyckaertBellemansTorsionPotential')\n\n\n c0 = ryckaert_connection_type.parameters['c0']\n c1 = ryckaert_connection_type.parameters['c1']\n c2 = ryckaert_connection_type.parameters['c2']\n c3 = ryckaert_connection_type.parameters['c3']\n c4 = ryckaert_connection_type.parameters['c4']\n c5 = ryckaert_connection_type.parameters['c5']\n\n if c5 != 0.0:\n raise GMSOError('Cannot convert Ryckaert-Bellemans dihedral '\n 'to OPLS dihedral if c5 is not equal to zero.')\n\n converted_params = {\n 'k0' : 2. * (c0 + c1 + c2 + c3 + c4),\n 'k1' : (-2. * c1 - (3./2.) * c3),\n 'k2' : (-c2 - c4),\n 'k3' : ((-1./2.) * c3),\n 'k4' : ((-1./4.) * c4)\n }\n\n name = OPLSTorsionPotential().name\n expression = OPLSTorsionPotential().expression\n variables = OPLSTorsionPotential().independent_variables\n\n opls_connection_type = gmso.DihedralType(\n name=name,\n expression=expression,\n independent_variables=variables,\n parameters=converted_params)\n\n return opls_connection_type\n\n","sub_path":"gmso/utils/conversions.py","file_name":"conversions.py","file_ext":"py","file_size_in_byte":4055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"568281091","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport json\n\nimport socket\nimport hashlib\n\nimport os\nimport sys\nimport time\n\nBASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(BASE_PATH)\n\nfrom conf import protocol\nfrom client_log import logger\n\nRECV_SIZE = 1024\nIP = 'localhost'\nPORT = 9999\n\n\nclass FTPClient(object):\n def __init__(self):\n self.client = socket.socket()\n self.username = None\n self.USER_ID = None\n self.home = os.path.join(BASE_PATH, 'home')\n self.show = list()\n\n def connect(self, ip, port):\n self.client.connect((ip, port))\n\n def authenticate(self, username, password):\n m = hashlib.md5(password.encode())\n m.update(username.encode())\n\n msg_dict = {\n 'username': username,\n 'action': 'authenticate',\n 'USER_ID': m.hexdigest()\n }\n self.client.send(json.dumps(msg_dict).encode())\n\n recv_dict = self.client.recv(RECV_SIZE)\n data_dict = json.loads(recv_dict.decode())\n if data_dict.get('status_code') == protocol.SUCCESS_CODE:\n self.username = username\n self.USER_ID = m.hexdigest()\n self.home = os.path.join(self.home, username)\n return True\n else:\n return False\n\n def push(self, *args):\n cmd_list = args[0].split()\n if len(cmd_list) != 2:\n logger.warning('need 2 parameters, but give %s parameters' %\n len(cmd_list))\n return\n\n filename = cmd_list[1]\n if not os.path.isfile(filename):\n logger.warning('file: %s is not exist' % filename)\n return\n\n filesize = os.stat(filename).st_size\n msg_dict = {\n 'username': self.username,\n 'USER_ID': self.USER_ID,\n 'action': 'push',\n 'filename': filename,\n 'filesize': filesize,\n }\n self.client.send(json.dumps(msg_dict).encode())\n\n res_data = self.client.recv(RECV_SIZE)\n res_dict = json.loads(res_data.decode())\n if res_dict['status_code'] == protocol.DISK_NOT_ENOUGH:\n logger.warning('disk space is not enough')\n return\n\n m = hashlib.md5()\n f = open(filename, 'rb')\n for line in f:\n self.client.send(line)\n m.update(line)\n else:\n f.close()\n\n msg_dict['FILE_ID'] = m.hexdigest()\n self.client.send(json.dumps(msg_dict).encode())\n data = self.client.recv(RECV_SIZE)\n data_dict = json.loads(data.decode())\n if data_dict['status_code'] == protocol.SUCCESS_CODE:\n logger.info('upload file: %s success' % filename)\n else:\n logger.info('upload file: %s fail' % filename)\n\n def pull(self, *args):\n cmd_list = args[0].split()\n if len(cmd_list) != 2:\n logger.warning('need 2 parameters, but give %s parameters' %\n len(cmd_list))\n return\n\n filename = cmd_list[1]\n msg_dict = {\n 'username': self.username,\n 'action': 'pull',\n 'filename': filename,\n }\n self.client.send(json.dumps(msg_dict).encode())\n self.data = self.client.recv(RECV_SIZE)\n res_dict = json.loads(self.data.decode())\n if res_dict['status_code'] == protocol.FILE_NOT_EXIST:\n logger.warning('file %s is not exist' % filename)\n return\n else:\n self.client.send(b'ok')\n logger.info('file %s is receiving ...' % filename)\n\n receive_size = 0\n m = hashlib.md5()\n filesize = res_dict['filesize']\n self.show = [x for x in range(10, 0, -1)]\n f = open(filename, 'wb')\n while receive_size < filesize:\n if filesize - receive_size > RECV_SIZE:\n SIZE = RECV_SIZE\n else:\n SIZE = filesize - receive_size\n data = self.client.recv(SIZE)\n f.write(data)\n m.update(data)\n receive_size += len(data)\n self.show_bar(receive_size, filesize)\n else:\n f.close()\n logger.info('file %s has received done' % filename)\n data = self.client.recv(RECV_SIZE)\n res_dict = json.loads(data.decode())\n if res_dict['FILE_ID'] == m.hexdigest():\n logger.info('file %s has downloaded successfully' % filename)\n else:\n logger.info('file %s has downloaded fail' % filename)\n if os.path.isfile(filename):\n os.remove(filename)\n\n def ls(self, *args):\n msg_dict = {\n 'username': self.username,\n 'action': 'ls',\n 'path': self.home,\n }\n self.client.send(json.dumps(msg_dict).encode())\n self.data = self.client.recv(RECV_SIZE)\n res_dict = json.loads(self.data.decode())\n content_list = res_dict['content']\n print('content list'.center(20, '-'))\n for content in content_list:\n print(content)\n print('-' * 20)\n\n def cd(self, *args):\n cmd_list = args[0].split()\n if len(cmd_list) != 2:\n logger.warning('need 2 parameters, but give %s parameters' %\n len(cmd_list))\n return\n\n path_list = self.home.split(os.sep)\n index = path_list.index(self.username)\n if cmd_list[1] == '..':\n if index + 1 < len(path_list):\n path_list.pop()\n self.home = os.sep.join(path_list)\n else:\n logger.warning('You must change directory in home directory')\n else:\n msg_dict = {\n 'username': self.username,\n 'action': 'cd',\n 'dirname': cmd_list[1],\n 'path': self.home,\n }\n self.client.send(json.dumps(msg_dict).encode())\n data = self.client.recv(RECV_SIZE)\n res_dict = json.loads(data.decode())\n if res_dict['status_code'] == protocol.SUCCESS_CODE:\n path_list.append(cmd_list[1])\n self.home = os.sep.join(path_list)\n elif res_dict['status_code'] == protocol.FILE_DIR_NOT_EXIST:\n logger.warning('No such file or directory')\n else:\n logger.warning('Not a directory')\n\n def pwd(self, *args):\n path_list = self.home.split(os.sep)\n index = path_list.index('home')\n if os.path.isfile(path_list[-1]):\n path = '/'.join(path_list[index:-1])\n else:\n path = '/'.join(path_list[index:])\n print('/%s' % path)\n\n def interaction(self):\n while True:\n username = input(\"Enter username: \").strip()\n password = input(\"Enter password: \").strip()\n if not username or not password:\n print('\\t\\033[31;0m用户名和密码,皆不能为空\\033[0m')\n continue\n\n res = self.authenticate(username, password)\n if not res:\n print('\\t\\033[31;0m用户名或密码错误\\033[0m')\n continue\n else:\n print('\\t\\033[32;0mwelcome %s\\033[0m' % username)\n\n while True:\n path_list = self.home.split(os.sep)\n if os.path.isfile(path_list[-1]):\n position = path_list[-2]\n else:\n position = path_list[-1]\n cmd = input('[%s@%s]$ ' % (self.username, position)).strip()\n if len(cmd) == 0:\n continue\n\n cmd_str = cmd.split()[0]\n if hasattr(self, cmd_str):\n func = getattr(self, cmd_str)\n func(cmd)\n else:\n print('\\t\\033[32;0m请输入help,进行查询\\033[0m')\n\n time.sleep(0.1)\n\n def help(self, *args):\n msg = \"\"\"\n ls\n pwd\n cd ..\n cd dirname\n push filename\n pull filename\n \"\"\"\n print(\"请输入以下命令:\")\n print(msg)\n\n def quit(self, *args):\n msg_dict = {\n 'action': 'quit',\n 'username': self.username\n }\n self.client.send(json.dumps(msg_dict).encode())\n self.client.close()\n sys.exit()\n\n def show_bar(self, recv_size, total):\n if int((recv_size / total) * 10) in self.show:\n logger.info('recv ..... {0:.2%}'.format(recv_size / total))\n self.show.pop()\n\n\ndef main():\n client_obj = FTPClient()\n client_obj.connect(IP, PORT)\n client_obj.interaction()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"network-program/FTP/ftp_client/socket_client.py","file_name":"socket_client.py","file_ext":"py","file_size_in_byte":8770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"365344048","text":"from transitions.extensions import GraphMachine\n\nimport xml.etree.ElementTree as ET\nimport urllib.request\nimport apiai\nimport json\n\nWEATHER_API_KEY = 'WEATHER_API_KEY'\nweather_response = urllib.request.urlopen('http://opendata.cwb.gov.tw/opendataapi?dataid=F-C0032-001&authorizationkey=' + WEATHER_API_KEY)\nair_respons = urllib.request.urlopen('http://opendata.epa.gov.tw/ws/Data/REWXQA/?%24orderby=SiteName&%24skip=0&%24top=1000&format=xml')\nuv_response = urllib.request.urlopen('http://opendata.epa.gov.tw/ws/Data/UV/?format=xml')\n\nweather_tree = ET.parse(weather_response).getroot()\nair_tree = ET.parse(air_respons).getroot()\nuv_tree = ET.parse(uv_response).getroot()\n\nclass TocMachine(GraphMachine):\n def __init__(self, **machine_configs):\n self.machine = GraphMachine(\n model = self,\n **machine_configs\n )\n\n def default_query(self, update):\n text = update.message.text\n return 1\n\n def weather_query(self, update):\n ptxt = intent_parser(update.message.text)\n text = ptxt['result']['fulfillment']['speech']\n print(ptxt)\n return text == '天氣'\n\n def weather_city_query(self, update):\n text = update.message.text\n return 1\n\n def air_query(self, update):\n ptxt = intent_parser(update.message.text)\n text = ptxt['result']['fulfillment']['speech']\n print(ptxt)\n return text == '空氣'\n\n def air_city_query(self, update):\n text = update.message.text\n return 1\n\n def uv_query(self, update):\n ptxt = intent_parser(update.message.text)\n text = ptxt['result']['fulfillment']['speech']\n print(ptxt)\n return text == '紫外線'\n\n def uv_city_query(self, update):\n text = update.message.text\n return 1\n\n def uvi_query(self, update):\n ptxt = intent_parser(update.message.text)\n text = ptxt['result']['fulfillment']['speech']\n return text == 'uvi'\n\n def on_enter_state1(self, update):\n update.message.reply_text(\"你好~ 可以問我目前的 天氣狀況、空氣狀況、或紫外線情形喔!\\n你想知道些什麼呢~?\\n\\n(e.g. 今天天氣如何?)\")\n self.go_back(update)\n\n def on_exit_state1(self, update):\n print('Leaving state1')\n\n def on_enter_state2(self, update):\n update.message.reply_text(\"你想知道目前哪個縣市的天氣狀況呢?\\n(e.g. 臺南市)\")\n #self.go_back(update)\n\n def on_exit_state2(self, update):\n print('Leaving state2')\n\n def on_enter_state3(self, update):\n ptxt = intent_parser(update.message.text)\n #text = ptxt['result']['parameters']['Taiwan-city']\n print(ptxt)\n if ptxt['result']['fulfillment']['speech'] != '你在說啥':\n for location in weather_tree.findall('.//{urn:cwb:gov:tw:cwbcommon:0.1}location'):\n if ptxt['result']['parameters']['Taiwan-city'] in location[0].text:\n update.message.reply_text('%s目前的天氣為%s。\\n' \\\n '溫度為 %s 至 %s ℃,降雨機率為 %s %%。' \\\n % (location[0].text, location[1][1][2][0].text,\n location[3][1][2][0].text, location[2][1][2][0].text,\n location[5][1][2][0].text))\n break\n else:\n for location in weather_tree.findall('.//{urn:cwb:gov:tw:cwbcommon:0.1}location'):\n if '臺南市' in location[0].text:\n update.message.reply_text('對不起,無法辨識您想查詢的是哪個城市,\\n為您查詢臺南市的空氣資訊:\\n%s目前的天氣為%s。\\n' \\\n '溫度為 %s 至 %s ℃,降雨機率為 %s %%。' \\\n % (location[0].text, location[1][1][2][0].text,\n location[3][1][2][0].text, location[2][1][2][0].text,\n location[5][1][2][0].text))\n break\n self.go_back(update)\n\n def on_exit_state3(self, update):\n print('Leaving state3')\n\n def on_enter_state4(self, update):\n update.message.reply_text(\"你想知道目前哪個縣市的空氣狀況呢?\\n(e.g. 臺南市)\")\n\n def on_exit_state4(self, update):\n print('Leaving state4')\n\n def on_enter_state5(self, update):\n ptxt = intent_parser(update.message.text)\n if ptxt['result']['fulfillment']['speech'] != '你在說啥':\n for data in air_tree.findall('./Data'):\n if ptxt['result']['parameters']['Taiwan-city'] in data[1].text:\n update.message.reply_text('%s目前空氣品質%s!\\n一氧化碳濃度(CO):%s\\n' \\\n '臭氧濃度(O3):%s\\n二氧化氮濃度(NO2):%s\\n懸浮微粒濃度(PM10):%s\\n' \\\n '細懸浮微粒濃度(PM2.5):%s\\n' \\\n % (data[1].text, data[4].text, data[6].text, data[7].text,\n data[10].text, data[8].text, data[9].text))\n break\n else:\n for data in air_tree.findall('./Data'):\n if '臺南市' in data[1].text:\n update.message.reply_text('對不起,無法辨識您想查詢的是哪個城市,\\n為您查詢臺南市的空氣資訊:\\n%s目前空氣品質%s!\\n一氧化碳濃度(CO):%s\\n' \\\n '臭氧濃度(O3):%s\\n二氧化氮濃度(NO2):%s\\n懸浮微粒濃度(PM10):%s\\n' \\\n '��懸浮微粒濃度(PM2.5):%s\\n' \\\n % (data[1].text, data[4].text, data[6].text, data[7].text,\n data[10].text, data[8].text, data[9].text))\n break\n self.go_back(update)\n\n def on_exit_state5(self, update):\n print('Leaving state5')\n\n def on_enter_state6(self, update):\n update.message.reply_text(\"你想知道目前哪個縣市的紫外線狀況呢?\\n(e.g. 臺南市)\")\n\n def on_exit_state6(self, update):\n print('Leaving state6')\n\n def on_enter_state7(self, update):\n ptxt = intent_parser(update.message.text)\n if ptxt['result']['fulfillment']['speech'] != '你在說啥':\n for data in uv_tree.findall('./Data'):\n if ptxt['result']['parameters']['Taiwan-city'] in data[3].text:\n if int(data[1].text) <= 2:\n update.message.reply_text('%s目前紫外線氣象指標(UVI)為 %s。\\n目前 UVI 屬於弱等級,基本上不須要保護措施!\\n可以安心外出,但請留意瞬間紫外線。\\n\\np.s. 如果不清楚紫外線氣象指標(UVI)是什麼的話,也可以問我喔~!' \\\n % (data[3].text, data[1].text))\n elif int(data[1].text) <= 7:\n update.message.reply_text('%s目前紫外線氣象指標(UVI)為 %s。\\n目前 UVI 屬於中、強等級!需要保護措施!\\n外出時,請盡量待在陰涼處,並使用長袖衣物、帽子、陽傘、防曬乳、太陽眼鏡作為保護!\\n\\np.s. 如果不清楚紫外線氣象指標(UVI)是什麼的話,也可以問我喔~!' \\\n % (data[3].text, data[1].text))\n else:\n update.message.reply_text('%s目前紫外線氣象指標(UVI)為 %s。\\n目前 UVI 屬於極強、危險等級!必須要保護措施!!\\n上午 10 點至下午 2 點最好不要外出!盡量待在室內,並使用帽子、陽傘、防曬乳、太陽眼鏡作為保護!\\n\\np.s. 如果不清楚紫外線氣象指標(UVI)是什麼的話,也可以問我喔~!' \\\n % (data[3].text, data[1].text))\n break\n else:\n for data in uv_tree.findall('./Data'):\n if '臺南市' in data[3].text:\n if int(data[1].text) <= 2.0:\n update.message.reply_text('對不起,無法辨識您想查詢的是哪個城市,\\n為您查詢臺南市的空氣資訊:\\n\\n%s目前紫外線氣象指標(UVI)為 %s。\\n目前 UVI 屬於弱等級,基本上不須要保護措施!\\n可以安心外出,但請留意瞬間紫外線。\\n\\np.s. 如果不清楚紫外線氣象指標(UVI)是什麼的話,也可以問我喔~!' \\\n % (data[3].text, data[1].text))\n elif int(data[1].text) <= 7.0:\n update.message.reply_text('對不起,無法辨識您想查詢的是哪個城市,\\n為您查詢臺南市的空氣資訊:\\n\\n%s目前紫外線氣象指標(UVI)為 %s。\\n目前 UVI 屬於中、強等級!需要保護措施!\\n外出時,請盡量待在陰涼處,並使用長袖衣物、帽子、陽傘、防曬乳、太陽眼鏡作為保護!\\n\\np.s. 如果不清楚紫外線氣象指標(UVI)是什麼的話,也可以問我喔~!' \\\n % (data[3].text, data[1].text))\n else:\n update.message.reply_text('對不起,無法辨識您想查詢的是哪個城市,\\n為您查詢臺南市的空氣資訊:\\n\\n%s目前紫外線氣象指標(UVI)為 %s。\\n目前 UVI 屬於極強、危險等級!必須要保護措施!!\\n上午 10 點至下午 2 點最好不要外出!盡量待在室內,並使用帽子、陽傘、防曬乳、太陽眼鏡作為保護!\\n\\np.s. 如果不清楚紫外線氣象指標(UVI)是什麼的話,也可以問我喔~!' \\\n % (data[3].text, data[1].text))\n break\n self.go_back(update)\n\n def on_exit_state7(self, update):\n print('Leaving state7')\n\n def on_enter_state8(self, update):\n update.message.reply_text(\"UVI (Ultraviolet INDEX)由世界衛生組織(WHO)、聯合國環境組織、世界氣象組織…等所開發所制定的標準,氣象上用來表示紫外線照射的安全範圍,避免過度暴露於紫外線而產生病變。1994年世界氣象組織向世界各國推廣紫外線指標,UVI被公認為無形的紫外傷害的最佳指標。UVI數值越高,表示潛在的危險越高,因採取避可能對皮膚和眼睛的傷害。\")\n update.message.reply_photo(\"https://drive.google.com/open?id=0B5U04sdfA2_1Y1VxT1c3d1BzQk0\")\n self.go_back(update)\n\n def on_exit_state8(self, update):\n print('Leaving state8')\n\ndef intent_parser(input):\n client = apiai.ApiAI('3963ac09560347289b24325bad76401a')\n\n request = client.text_request()\n request.query = input\n\n response = request.getresponse()\n return json.loads(response.read().decode())","sub_path":"fsm.py","file_name":"fsm.py","file_ext":"py","file_size_in_byte":10623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"25062529","text":"import xaby\nfrom xaby.utils import to_tuple\n\nimport jax.numpy as np\nfrom jax import jit, random, vmap\nfrom jax.lax import conv_with_general_padding\n\n\ndef init_weights(k, shape):\n key = xaby.random.key()\n sqrt_k = np.sqrt(k)\n return random.uniform(key, shape=shape, minval=-sqrt_k, maxval=sqrt_k)\n\n\nclass Linear(xaby.Op):\n def __init__(self, in_size, out_size):\n super().__init__()\n self.params = {}\n self.params[\"weights\"] = init_weights(1 / in_size, shape=(out_size, in_size))\n self.params[\"bias\"] = init_weights(1 / in_size, shape=(out_size,))\n\n self._build_op()\n\n def forward(self):\n @jit\n def func(x, params):\n w, b = params[\"weights\"], params[\"bias\"]\n return np.matmul(x, w.T) + b\n\n return func\n\n def __repr__(self):\n return f\"Linear{self.params['weights'].shape}\"\n\n\nclass Conv2d(xaby.Op):\n def __init__(self, in_features, out_features, kernel_size=3, strides=1, padding=0):\n super().__init__()\n\n kernel = to_tuple(kernel_size, (2,))\n kernel_shape = (out_features, in_features, kernel[0], kernel[1])\n k = 1 / (in_features * np.prod(np.array(kernel)))\n self.params[\"weights\"] = init_weights(k, kernel_shape)\n self.params[\"bias\"] = init_weights(k, shape=(1, out_features, 1, 1))\n\n self.in_features = in_features\n self.out_features = out_features\n self.kernel = kernel\n self.strides = to_tuple(strides, (2,))\n self.padding = to_tuple(padding, 2)\n\n self._build_op()\n\n def forward(self):\n def func(x, params):\n conv = conv_with_general_padding(\n x, params[\"weights\"], self.strides, self.padding, None, None\n )\n return conv + params[\"bias\"]\n\n return jit(func)\n\n def __repr__(self):\n return (\n f\"Conv2d({self.in_features}, {self.out_features}, strides={self.strides})\"\n )\n","sub_path":"xaby/nn/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"74930163","text":"import numpy as np\n\nfrom random import shuffle\n\nrand = np.random\n\n#........................................................................\n\nclass Data_synth:\n \n max_seq_len = 10\n\n strs = [str(chr(i)) for i in range(ord('A'), ord('Z')+1)]\n nums = [str(chr(i)) for i in range(ord('0'), ord('9')+1)]\n\n n_train_samples = 3000\n n_test_samples = 200\n\n @classmethod\n def synth(_, synther):\n X = []\n Y = []\n for i in range(_.n_train_samples + _.n_test_samples):\n x, y = synther.synth()\n X.append(x)\n Y.append(y)\n return X, Y\n\n#........................................................................\n\nclass Synther_base:\n\n def b_e_wrap(_, a): # begin/end wrap\n return ['<'] + a + ['>']\n\n def str(_, i):\n return Data_synth.strs[i]\n \n def num(_, i):\n return Data_synth.nums[i]\n\n def get_random_str_seq(_, m):\n return [_.str(rand.randint(len(Data_synth.strs))) for i in range(m)]\n\n#........................................................................\n\nclass Synther_1a(Synther_base): \n\n def __init__(_):\n _.name = '1a'\n _.description = 'echo last item in sequence, as binary'\n \n def synth(_):\n m = max(1, rand.randint(Data_synth.max_seq_len-2))\n s = _.get_random_str_seq(m)\n l = float(ord(s[-1]) % 2)\n return (_.b_e_wrap(s), l)\n \n#........................................................................\n\nclass Synther_1b(Synther_base):\n def __init__(_):\n _.name = '1b'\n _.description = 'echo second to last item, as binary'\n def synth(_):\n m = max(2, rand.randint(Data_synth.max_seq_len-2))\n s = _.get_random_str_seq(m)\n l = float(ord(s[-2]) % 2)\n return (_.b_e_wrap(s), l)\n \n#........................................................................\n\nclass Synther_1c(Synther_base):\n def __init__(_):\n _.name = '1c'\n _.description = 'echo third to last item, as binary'\n def synth(_):\n m = max(3, rand.randint(Data_synth.max_seq_len-2))\n s = _.get_random_str_seq(m)\n l = float(ord(s[-3]) % 2)\n return (_.b_e_wrap(s), l)\n \n#........................................................................\n\nclass Synther_1d(Synther_base):\n def __init__(_):\n _.name = '1d'\n _.description = 'echo the first item in the sequence, as binary'\n def synth(_):\n m = max(1, rand.randint(Data_synth.max_seq_len-2))\n s = _.get_random_str_seq(m)\n l = float(ord(s[0]) % 2)\n return (_.b_e_wrap(s), l)\n \n#........................................................................\n\nclass Synther_2a(Synther_base):\n def __init__(_):\n _.name = '2a'\n _.description = 'single signal in sequence of noise'\n def synth(_):\n m = max(1, rand.randint(Data_synth.max_seq_len-2-1))\n s = _.get_random_str_seq(m)\n num = _.num(rand.randint(len(Data_synth.nums))) # TODO helper\n i_num = rand.randint(len(s))\n s = s[:i_num] + [num] + s[i_num:]\n l = float(ord(num) % 2)\n return (_.b_e_wrap(s), l)\n\n#........................................................................\n\nclass Synther_2b(Synther_base):\n def __init__(_):\n _.name = '2b'\n _.description = '2-skip-gram signal in sequence of noise'\n def synth(_):\n m = Data_synth.max_seq_len-2-2 # max(1, rand.randint(Data_synth.max_seq_len-2-2))\n s = _.get_random_str_seq(m)\n \n num1 = _.num(rand.randint(len(Data_synth.nums)))\n i_num1 = rand.randint(len(s))\n s = s[:i_num1] + [num1] + s[i_num1:]\n\n num2 = _.num(rand.randint(len(Data_synth.nums)))\n i_num2 = rand.randint(len(s))\n s = s[:i_num2] + [num2] + s[i_num2:]\n if i_num2 <= i_num1:\n i_num1 += 1\n \n if i_num2 < i_num1:\n num1, num2 = num2, num1\n l = 1. if num1 <= num2 else 0.\n \n return (_.b_e_wrap(s), l)\n \n#........................................................................\n\nclass Synther_2c(Synther_base):\n def __init__(_):\n _.name = '2c'\n _.description = '3-skip-gram signal in sequence of noise'\n\n _.triples = []\n for i in range(10):\n for j in range(10):\n for k in range(10):\n _.triples.append((i, j, k))\n shuffle(_.triples)\n _.triples = _.triples[:100]\n\n def synth(_):\n \n m = Data_synth.max_seq_len-2-3 # max(1, rand.randint(Data_synth.max_seq_len-2-3))\n s = _.get_random_str_seq(m)\n\n triple_values = _.triples[rand.randint(len(_.triples))]\n triple_indices = sorted([rand.randint(len(s)) for i in range(len(triple_values))])\n \n for k in range(len(triple_values) -1, -1, -1):\n v = triple_values[k]\n i = triple_indices[k]\n s = s[:i] + [str(v)] + s[i:]\n \n l = hash(''.join(str(triple_values))) % 2\n \n return (_.b_e_wrap(s), l)\n\n#........................................................................\n\nclass Synther_2d(Synther_base):\n def __init__(_):\n _.name = '2d'\n _.description = '4-skip-gram signal in sequence of noise'\n\n _.quads = []\n for i in range(10):\n for j in range(10):\n for k in range(10):\n for m in range(10):\n _.quads.append((i, j, k, m))\n shuffle(_.quads)\n _.quads = _.quads[:100]\n\n def synth(_):\n \n m = Data_synth.max_seq_len-2-4 # max(1, rand.randint(Data_synth.max_seq_len-2-4))\n s = _.get_random_str_seq(m)\n\n quad_values = _.quads[rand.randint(len(_.quads))]\n quad_indices = sorted([rand.randint(len(s)) for i in range(len(quad_values))])\n \n for k in range(len(quad_values) -1, -1, -1):\n v = quad_values[k]\n i = quad_indices[k]\n s = s[:i] + [str(v)] + s[i:]\n \n l = hash(''.join(str(quad_values))) % 2\n \n return (_.b_e_wrap(s), l)\n \n#........................................................................\n\nclass Synther_collection:\n synthers = [Synther_1a(), \n Synther_1b(), \n Synther_1c(), \n Synther_1d(),\n Synther_2a(), \n Synther_2b(),\n Synther_2c(),\n Synther_2d()]\n\n","sub_path":"Python/umtanum-a/data_synth.py","file_name":"data_synth.py","file_ext":"py","file_size_in_byte":6463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"282969784","text":"# ----Hou Jue 2020.08.21----\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing\nimport seaborn as sns\nfrom sklearn import tree\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import DBSCAN\nfrom sklearn import metrics\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2020.8.21\n\n@author: MARs\n\"\"\"\n\nimport os\n#\n\nprint('------------------read data--------------------')\nimport folium\nimport webbrowser\n\nimport csv\n\n# import datatime\n'''\n车机号(车辆唯一标识)\n控制字(A:正常)\n业务状态(0:正常,1:报警)\n载客状态(0:重车,1:空车)\n顶灯状态(0:营运,1:待运,2:电调,3:暂停,4:求助,5:停运)\n业务状态(0:���面道路,1:快速道路)\n业务状态(0:无刹车,1:刹车)\n无意义字段\n数据接收时间\n终端GPS时间\n经度\n纬度\n速度\n方向\n卫星数\n无意义字段\n'''\n\n'''private car'''\n# data_PHEV_private=pd.read_excel('...\\\\....xlsx',sheet_name='...')\n# data_EV_private=pd.read_excel('...\\\\....xlsx',sheet_name='...')\n\n'''ride_hailing car'''\n# data_PHEV_ride_hailing=pd.read_excel('...\\\\....xlsx',sheet_name='...')\n\n'''taxi'''\n# data_EV_taxi=pd.read_excel('...\\\\....xlsx',sheet_name='...')\n\n'''test'''\ndata_PHEV_private=pd.read_excel('F:\\\\PycharmProjects\\\\ACM\\\\(Data)Vehicle trajectory\\\\data\\\\data\\\\PHEV_2.xlsx',sheet_name='Raw Data')\n\n\n\n''' time\n vehiclestatus\n sumvoltage\n summileage\n sumcurrent\n speed\n soc\n runmodel (1:EV 2:PHEV 3:diesel)\n insulationresisitance\n gearnum\n dcdcstatus\n chargestatus\n \n longitude\n lon\n locationstate\n latitude\n lat\n'''\n# data_taxi=pd.read_csv('F:\\\\PycharmProjects\\\\ACM\\\\(Data)Vehicle trajectory\\\\data\\\\taxi.csv')\n# data=pd.read_csv(f,names=['数据采集时间',' ','累积行驶里程','定位状态','东经.西经',\\\n# '北纬.南纬','经度','维度','方向',\\\n# '速度','电机控制器温度','驱动电机转速','驱动电机温度','电机母线电流','加速踏板行程','制动踏板状态',\\\n# '动力系统就绪','电池剩余电量(SOC)','电池剩余能量','高压电池电流','电池总电压','单体最高温', \\\n# '单体最低温度','单体最高电压','单体最低电压','绝缘电阻值','电池包最高温度','电池包最高温度_1',\\\n# '电池包最低温度','电池包最低温度_1','电池均衡激活',\\\n# '紧急下电请求','启动时间','液体燃料消耗量','上下线状态','熄火时间','车辆当前状态'])\n# data=pd.read_csv(f,names=['ID','status_control','status_work','status_passenger','status_light',\\\n# 'status_lane','status_break','null','time_receive',\\\n# 'time_GPS','lat','lon','speed','direction','satellite','null'],sep='|')\n#\n'''data各列数据的统计特征'''\nprint(data_PHEV_private.describe())\n'''data某列类型'''\n# print(\"data['定位状态'].unique()\",data_PHEV_private['定位状态'].unique())\n# print(\"data.unique()\",data.unique())\nprint(data_PHEV_private.head())\n'''核密度估计'''\ndata_PHEV_private['speed'].plot(kind='kde')\nprint(\"max(data['speed']\",max(data_PHEV_private['speed']))\nprint(\"max(data['speed']\",min(data_PHEV_private['speed']))\n\n\n'''绘制特征两两之间的pearson相关系数矩阵'''\nprint(data_PHEV_private.corr())\n\nsns.heatmap(data_PHEV_private.corr(),annot=True)\n'''箱型图'''\ndata_PHEV_private.plot(kind='box')\n# plt.show()\n# data_PHEV_private['数据采集时间']=pd.to_datetime(data_PHEV_private['数据采集时间'])\n\n'''pandas hist'''\ndata_PHEV_private.hist(bins=20)\n# plt.show()\n\n'''------------'''\ncoords=np.array([data_PHEV_private['latitude'],data_PHEV_private['longitude']])\ncoords=coords.T\n\n# earth's radius in km\nkms_per_radian = 6371.0088\n# define epsilon as 0.5 kilometers, converted to radians for use by haversine\n''':parameters 0.01 10'''\nepsilon = 0.01 / kms_per_radian\n\n# eps is the max distance that points can be from each other to be considered in a cluster\n# min_samples is the minimum cluster size (everything else is classified as noise)\ndb = DBSCAN(eps=epsilon, min_samples=10, algorithm='ball_tree', metric='haversine').fit(np.radians(coords))\ncluster_labels = db.labels_\n# get the number of clusters (ignore noisy samples which are given the label -1)\nnum_clusters = len(set(cluster_labels) - set([-1]))\n\n# print( 'Clustered ' + str(len(df_min)) + ' points to ' + str(num_clusters) + ' clusters')\nprint( 'Clustered ' + str(len(coords)) + ' points to ' + str(num_clusters) + ' clusters')\n# turn the clusters in to a pandas series\nclusters = pd.Series([coords[cluster_labels == n] for n in range(num_clusters)])\nprint(clusters)\n\n\nprint('------------------genarete cluster figure-------------------')\nfrom shapely.geometry import MultiPoint\nfrom geopy.distance import great_circle\ndef get_centermost_point(cluster):\n centroid = (MultiPoint(cluster).centroid.x, MultiPoint(cluster).centroid.y)\n centermost_point = min(cluster, key=lambda point: great_circle(point, centroid).m)\n return tuple(centermost_point)\n\n# get the centroid point for each cluster\ncentermost_points = clusters.map(get_centermost_point)\nlats, lons = zip(*centermost_points)\nrep_points = pd.DataFrame({'lon':lons, 'lat':lats})\nfig, ax = plt.subplots(figsize=[10, 6])\nrs_scatter = ax.scatter(rep_points['lon'][0], rep_points['lat'][0], c='#dd8a81', edgecolor='None', alpha=0.7, s=450)\nax.scatter(rep_points['lon'][1], rep_points['lat'][1], c='#dd8a81', edgecolor='None', alpha=0.7, s=250)\nax.scatter(rep_points['lon'][2], rep_points['lat'][2], c='#dd8a81', edgecolor='None', alpha=0.7, s=250)\nax.scatter(rep_points['lon'][3], rep_points['lat'][3], c='#dd8a81', edgecolor='None', alpha=0.7, s=150)\n\n'''经纬度相反'''\n# for i in range(5):\n# df_scatter = ax.scatter(data_lon_day6, data_lat_day6, c='k', alpha=0.9, s=3)\n\ndf_scatter = ax.scatter(data_PHEV_private['longitude'], data_PHEV_private['latitude'],c='k', alpha=0.9, s=3)\nax.set_title('(PHEV Day02) 2016/12/2-2016/12/3 GPS trace and cluster points')\nax.set_xlabel('Longitude')\nax.set_ylabel('Latitude')\nax.legend([df_scatter, rs_scatter], ['GPS points', 'Cluster centers'], loc='upper right')\n\nlabels = ['cluster{0}'.format(i) for i in range(1, num_clusters+1)]\nfor label, x, y in zip(labels, rep_points['lon'], rep_points['lat']):\n plt.annotate(\n label,\n xy = (x, y), xytext = (-25, -30),\n textcoords = 'offset points', ha = 'right', va = 'bottom',\n bbox = dict(boxstyle = 'round,pad=1', fc = 'white', alpha = 0.5),\n arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))\n\nplt.show()\nprint('------------------plt1 shows-------------------')\n'''To infer home and work locations, here I used a very simple heuristic: time. Below \nI plot the time distribution of GPS data points in each of the four clusters. We can see that from 9am to 18pm, \nthe user stays in the cluster 1 area, while during midnight to 8am, the user tends to stay in cluster 2 and cluster 3. \nTherefore I deduce that user 001’s work location is in cluster 1 and home location is in cluster 2. Cluster 3 might be her secondary residence location.\nOf course, we can apply more sophisticated heuristics to infer home and work locations. For example, we can also check users’ \nlocations during weekdays and weekends to give us additional clues.'''\n\n\n\n''' Get the hours for each cluster 获取每个群集的小时数'''\n\n\n\n\n\n\n\n\n\n\n'''数据缺失判断--------------------->begin'''\n\n\n# print('数据缺失判断:data_lat',data_lat.isnull().sum())\n# print('数据缺失判断:data_lon',data_lon.isnull().sum())\n\n\n\n'''数据缺失判断--------------------->end'''\n\n\n\n'''查看是否有缺失值'''\n\n\n'''I 数据清洗'''\n'''1.重复记录'''\n\n\n'''2.不完整记录'''\n\n\n'''3.非当日记录'''\n\n\n'''4.漂移记录'''\n\n\n'''II 统计分析'''\n\n\n'''III 特征提取'''\n\n\n'''Python「第一节」-制作自己的pip安装包\nhttps://blog.csdn.net/ligaopan/article/details/103187590'''\n\n\n\n\n\n","sub_path":"readFile/readFile_3.py","file_name":"readFile_3.py","file_ext":"py","file_size_in_byte":8112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"147356306","text":"## $Id: goodPrimaryVertexFilter_cfi.py,v 1.2 2012/05/24 14:50:55 mschrode Exp $\n##\n## Standard requirement for a good primary vertex as recommended in\n## https://twiki.cern.ch/twiki/bin/view/CMSPublic/WorkBookCollisionsDataAnalysis#Analysis_of_the_processed_data\n\nimport FWCore.ParameterSet.Config as cms\n\ngoodPrimaryVertexFilter = cms.EDFilter(\n \"VertexSelector\",\n src = cms.InputTag(\"offlinePrimaryVertices\"),\n cut = cms.string(\"!isFake && ndof > 4 && abs(z) <= 24 && position.Rho <= 2\"),\n filter = cms.bool(True)\n )\n\ngoodPrimaryVertices = cms.EDFilter(\n \"VertexSelector\",\n src = cms.InputTag(\"offlinePrimaryVertices\"),\n cut = cms.string(\"!isFake && ndof > 4 && abs(z) <= 24 && position.Rho <= 2\"),\n filter = cms.bool(False)\n )\n","sub_path":"CalibTreeMaker/python/goodPrimaryVertexFilter_cfi.py","file_name":"goodPrimaryVertexFilter_cfi.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"566923845","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nsize = 50\r\n\r\ndef getAveragedData(path):\r\n\r\n return (acc, valAcc)\r\n\r\nlabels = []\r\ndata = [[0 for i in range(6)] for k in range(size)]\r\n\r\nschedules = {\r\n 'LQ': ['rnn-0-10-128-history-',\r\n 'rnn-11-20-512-history-',\r\n 'rnn-21-50-2048-history-'],\r\n 'SS': ['rnn-0-5-32-history-',\r\n 'rnn-6-10-64-history-',\r\n 'rnn-11-15-128-history-',\r\n 'rnn-16-20-256-history-',\r\n 'rnn-21-25-512-history-',\r\n 'rnn-26-50-2048-history-'],\r\n 'CQ': ['rnn-0-5-128-history-',\r\n 'rnn-6-10-512-history-',\r\n 'rnn-11-15-2048-history-',\r\n 'rnn-16-20-128-history-',\r\n 'rnn-21-25-512-history-',\r\n 'rnn-26-50-2048-history-']\r\n}\r\n\r\nfor i, schedule in enumerate(schedules.keys()):\r\n labels += [schedule + '_acc', schedule + '_val_acc']\r\n offset = 0\r\n for name in schedules[schedule]:\r\n for k in range(5):\r\n with open(schedule + '/' + name + str(k) + '.csv', 'r') as f:\r\n lines = f.readlines()\r\n for r in range(len(lines) - 1):\r\n terms = lines[r+1].split(',')\r\n data[offset + r][2*i] += float(terms[1]) / 5\r\n data[offset + r][2*i + 1] += float(terms[3]) / 5\r\n offset += len(lines) - 1\r\n\r\nwith open('data.csv', 'w') as f:\r\n f.write(','.join(labels) + '\\n')\r\n for i in range(size):\r\n f.write('%f,%f,%f,%f,%f,%f\\n' % (data[i][0], data[i][1], data[i][2], data[i][3], data[i][4], data[i][5]))\r\n\r\nepochs = [i for i in range(1, 51)]\r\ndata = np.array(data)\r\n\r\nplt.ylabel('Accuracy')\r\nplt.xlabel('Epoch')\r\nplt.ylim((0, 1))\r\nlq = plt.plot(epochs, data[:, 0], 'b--')\r\nlqv = plt.plot(epochs, data[:, 1], 'b-')\r\nss = plt.plot(epochs, data[:, 2], 'g--')\r\nssv = plt.plot(epochs, data[:, 3], 'g-')\r\ncq = plt.plot(epochs, data[:, 4], 'r--')\r\ncqv = plt.plot(epochs, data[:, 5], 'r-')\r\nplt.legend(('LQ - Train', 'LQ- Test', 'SS - Train', 'SS - Test', 'CQ - Train', 'CQ - Test'), loc=0)\r\n\r\nplt.savefig('ag.png')\r\nplt.show()","sub_path":"graph-bs-data.py","file_name":"graph-bs-data.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"328368812","text":"from random import *\n\nclass Student:\n \"\"\" Classe définissant un étudiant caractérisé par :\n - son prénom\n - son nom\n - ses réponses au questionnaire dans un tableau \"\"\"\n\n def __init__(self, prenom, nom, reponses):\n \"\"\" Constructeur de notre classe \"\"\"\n self.prenom = prenom\n self.nom = nom\n self.reponses = reponses\n\n\ndef LoveScore(Romeo,Juliet):\n \"\"\" Première approche très simpliste du LoveScore \"\"\"\n r1, r2 = Romeo.reponses, Juliet.reponses\n nbReponses = len(r1)\n return len([ 0 for k in range(nbReponses) if r1[k] + r2[k] != 1])\n\ndef MatchingScore(matchingTable,Ei1,Ei2,nbCouples):\n \"\"\" Calcul le score total \"\"\"\n score = 0\n for k in range(nbCouples):\n score += LoveScore(Ei1[k],Ei2[matchingTable[k]])\n return score\n\ndef MatchStudents(Ei1,Ei2):\n \"\"\" Ei1 et Ei2 sont des arrays de Student de même longueur. Cette fonction renvoie un array contenant une permutation de {0,...,nbCouples-1}\"\"\"\n nbCouples = len(Ei1)\n matchingTable = [k for k in range(nbCouples)]\n matchingScore = MatchingScore(matchingTable,Ei1,Ei2,nbCouples)\n unsuccessfulIterations = 0\n while True:\n i,j = randrange(0,nbCouples),randrange(0,nbCouples)\n if LoveScore(Ei1[i],Ei2[matchingTable[j]]) + LoveScore(Ei1[j],Ei2[matchingTable[i]]) > LoveScore(Ei1[i],Ei2[matchingTable[i]]) + LoveScore(Ei1[j],Ei2[matchingTable[j]]):\n matchingScore = matchingScore - (LoveScore(Ei1[i],Ei2[matchingTable[i]]) + LoveScore(Ei1[j],Ei2[matchingTable[j]])) + LoveScore(Ei1[i],Ei2[matchingTable[j]]) + LoveScore(Ei1[j],Ei2[matchingTable[i]])\n matchingTable[i],matchingTable[j] = matchingTable[j],matchingTable[i]\n unsuccessfulIterations = 0\n unsuccessfulIterations += 1\n if unsuccessfulIterations > 10000:\n stop = input(\"Il semble difficile de trouver un LoveScore moyen supérieur à \" + str(matchingScore/nbCouples) + \"! Appuyez sur entrée pour continuer quand même.\")\n if stop:\n return matchingTable\n unsuccessfulIterations = 0\n\n# Fonctions de test\ndef GenerateRandomAnswers(nbReponses):\n \"\"\" Renvoie un tableau aléatoire contenant des 0 et 1 de taille nbReponses \"\"\"\n return [randrange(0,2) for k in range(nbReponses)]\n\ndef GenerateRandomStudents(nbCouples,nbReponses):\n \"\"\" Renvoie 2 tableaux contenant chacun nbCouples student(s) générés aléatoirement \"\"\"\n Ei1, Ei2 = [], []\n for k in range(nbCouples):\n Ei1.append(Student(\"Ei1 \"+str(k),\"\",GenerateRandomAnswers(nbReponses)))\n Ei2.append(Student(\"Ei2 \"+str(k),\"\",GenerateRandomAnswers(nbReponses)))\n return Ei1,Ei2\n\ndef DisplayCouples(Ei1,Ei2,MatchingTable):\n nbCouples = len(Ei1)\n for k in range(nbCouples):\n print(Ei1[k].prenom + Ei1[k].nom + \" matches with \" + Ei2[MatchingTable[k]].prenom + Ei2[MatchingTable[k]].nom)\n","sub_path":"BuddySystem/MatchingAlgorithm.py","file_name":"MatchingAlgorithm.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"274833592","text":"#!/usr/bin/python3\n# Saif\nfrom tkinter import *\nimport tkinter as tk\n\n####################### Corinci Deployment Function ########################\ndef corinci_deployment():\n corinci_win = tk.Toplevel(root)\n\n def close_window():\n corinci_win.destroy()\n button1.config(state='normal')\n\n button1.config(state='disable')\n\n corinci_win.protocol(\"WM_DELETE_WINDOW\", close_window)\n\n corinci_win.title('Corinci Patch deployemnt')\n corinci_win.geometry('{}x{}'.format(300, 150))\n desc_title = Label(corinci_win, text='Corinci', width=45, height=1, bg=\"blue\").pack()\n\n patch_name = Label(corinci_win, text='Patch Name :- ').place(x=0, y=30)\n patch_entry = Entry(corinci_win).place(x=100, y=30)\n\n targt_name = Label(corinci_win, text='Target :- ').place(x=0, y=60)\n target_entry = Entry(corinci_win).place(x=100, y=60)\n\n def reset_cor_window():\n corinci_win.destroy()\n button1.config(state='normal')\n\n submit_patch_button = Button(corinci_win, text =\"Deploy\").place(x=50, y=100)\n reset_patch_button = Button(corinci_win, text =\"Reset\", command=reset_cor_window).place(x=150, y=100)\n\n\n####################### Adhoc Deployment Function ########################\ndef adhoc_deployment():\n adhoc_win = tk.Toplevel(root)\n\n def close_window():\n adhoc_win.destroy()\n button2.config(state='normal')\n\n button2.config(state='disable')\n\n adhoc_win.protocol(\"WM_DELETE_WINDOW\", close_window)\n\n adhoc_win.title('Adhoc Patch deployemnt')\n adhoc_win.geometry('{}x{}'.format(300, 150))\n desc_title = Label(adhoc_win, text='Adhoc', width=45, height=1, bg=\"blue\").pack()\n\n patch_name = Label(adhoc_win, text='Patch Name :- ').place(x=0, y=30)\n patch_entry = Entry(adhoc_win).place(x=100, y=30)\n\n targt_name = Label(adhoc_win, text='Target :- ').place(x=0, y=60)\n target_entry = Entry(adhoc_win).place(x=100, y=60)\n\n def reset_adc_window():\n adhoc_win.destroy()\n button2.config(state='normal')\n\n submit_patch_button = Button(adhoc_win, text =\"Deploy\").place(x=50, y=100)\n reset_patch_button = Button(adhoc_win, text =\"Reset\", command=reset_adc_window).place(x=150, y=100)\n\n\n\n################################# Main ##########################################\nroot = Tk()\nroot.title('Model Definition')\nroot.geometry('{}x{}'.format(450, 260))\n\n# create all of the main containers\ntitle_frame = Frame(root, bg='blue', width=450, height=50)\nempty1_frame = Frame(root, bg='white', width=450, height=20)\ncenter_frame = Frame(root, bg='gray', width=450, height=50)\nempty2_frame = Frame(root, bg='white', width=450, height=20)\nbutton_frame = Frame(root, bg='blue', width=450, height=70)\nempty3_frame = Frame(root, bg='white', width=450, height=20)\nsupport_frame = Frame(root, bg='gray', width=450, height=20)\nbtm_frame = Frame(root, bg='white', width=450, height=100)\nbtm_frame2 = Frame(root, bg='lavender', width=450, height=60)\n\n\n# -- layout all of the main containers -- #\ntitle_frame.grid(row=0, sticky=\"ew\")\nempty1_frame.grid(row=1, sticky=\"ew\")\ncenter_frame.grid(row=2, sticky=\"ew\")\nempty2_frame.grid(row=3, sticky=\"ew\")\nbutton_frame.grid(row=4, sticky=\"ew\")\nempty3_frame.grid(row=5, sticky=\"ew\")\nsupport_frame.grid(row=6, sticky=\"ew\")\n# ---------------------------------\n\n# -- Create Widgets for the Frame -- #\ntool_title = Label(title_frame, text='Patch Deployment Tool', width=45, height=3, bg=\"blue\", font=15)\n#tool_title.pack()\ntool_title.grid(row=0)\n\ntool_description = Label(center_frame, text='Tool Description Should go here', width=45, height=3, bg='gray')\ntool_description.pack()\n\nbutton1 = Button(button_frame, text =\"Corinci\", width=15, height=4, command=corinci_deployment)\nbutton1.grid(row=4, column=1)\n\nbutton2 = Button(button_frame, text =\"AdHoc\", width=15, height=4, command=adhoc_deployment)\nbutton2.grid(row=4, column=2)\n\nbutton3 = Button(button_frame, text =\"Special\", width=15, height=4)\nbutton3.grid(row=4, column=3)\n\nsupport_label = Label(support_frame, text='Support:- saifn@hcl.com')\nsupport_label.pack()\n# ---------------------------------\n\nroot.mainloop()\n\n##########################################################################################################\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"623469928","text":"\"\"\"\nName: Sean Hassett\n\nBased on code provided by https://stackoverflow.com/users/2814626/nettux443\n\"\"\"\n\nimport socket\nimport threading\nimport packet_utils\n\nTIMEOUT = 120\nIP = '127.0.0.1'\nPORT = 50002\nBUFFER_SIZE = 1024\nSTARTING_SEQUENCE_NUMBER = 0\nMAX_SEQUENCE_NUMBER = 2\n\n\nclass Server(object):\n def __init__(self, host, port):\n self.sequence_number = STARTING_SEQUENCE_NUMBER\n self.host = host\n self.port = port\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.bind((self.host, self.port))\n self.client_dict = {}\n\n def listen_for_gateway(self):\n print(\"Server3\\nListening for gateway...\\n\")\n self.sock.listen(5)\n while True:\n gateway, address = self.sock.accept()\n gateway.settimeout(TIMEOUT)\n threading.Thread(target=self.listen, args=(gateway, address)).start()\n\n def listen(self, gateway, address):\n size = BUFFER_SIZE\n while True:\n try:\n packet = gateway.recv(size)\n if packet:\n unpacked = packet_utils.unpack(packet)\n client_sequence_num = unpacked.sequence_number\n source_ip = unpacked.source_ip\n source_port = unpacked.source_port\n data = unpacked.data\n\n client_id = source_port\n destination_ip = source_ip\n destination_port = source_port\n\n if client_id not in self.client_dict:\n self.client_dict[client_id] = 0\n\n expected_sequence_number = self.client_dict[client_id]\n\n sequence_number = self.sequence_number\n if client_sequence_num == expected_sequence_number:\n print(data)\n self.client_dict[client_id] += 1\n if self.client_dict[client_id] > MAX_SEQUENCE_NUMBER:\n self.client_dict[client_id] = 0\n next_sequence_number = self.client_dict[client_id]\n response_packet = packet_utils.create_packet(sequence_number, IP, PORT, destination_ip,\n destination_port,\n bytes(str(next_sequence_number), \"utf-8\"))\n else:\n response_packet = packet_utils.create_packet(sequence_number, IP, PORT, destination_ip,\n destination_port,\n bytes(str(expected_sequence_number), \"utf-8\"))\n gateway.sendall(response_packet)\n gateway.close()\n self.sequence_number += 1\n if self.sequence_number > MAX_SEQUENCE_NUMBER:\n self.sequence_number = 0\n else:\n gateway.close()\n except:\n gateway.close()\n return\n\nif __name__ == \"__main__\":\n Server(IP, PORT).listen_for_gateway()\n","sub_path":"gateway/server3.py","file_name":"server3.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"307817072","text":"import sqlite3\n\n#etablir une connexion avec la BD\nconnection = sqlite3.connect('musique.db')\n#definir un curseur\ncursor = connection.cursor()\n\n#recuperer tous les artistes\ncursor.execute(\"select * from artiste\")\nfor row in cursor:\n #on range toutes les données dans des variables\n identifier, nom, est_solo, combien = row\n print(\"Artiste n: %d Nom : %s\\n\" %(identifier,nom))\n\nprint (\"Choisissez un artiste en entrant son id :\")\n#on recupère le choix de l'utilisateur et on le cast en int\nchoix = int(input())\n#pas sécuritère d'utiliser 'where artisteé-id=%d'\ncursor.execute(\"select titre, annee from album where artiste_id=%d\" %choix)\nfor row in cursor:\n titre,annee = row\n print (\"%s %d\\n\" %(titre, annee))\n\n# ne jamais oublier de fermer la connexion\nconnection.close()","sub_path":"SQlite/lire_bd.py","file_name":"lire_bd.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"562046455","text":"'''\nCreated on 17 Aug 2016\n\n@author: james\nfrom: http://machinelearningmastery.com/handwritten-digit-recognition-using-convolutional-neural-networks-python-keras/\n'''\n\nimport numpy\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.layers import Flatten\nfrom keras.layers.convolutional import Convolution2D\nfrom keras.layers.convolutional import MaxPooling2D\nfrom keras.utils import np_utils\n\n# fix random seed for reproducibility\nseed = 7\nnumpy.random.seed(seed)\n\n# In Keras, the layers used for two-dimensional convolutions expect pixel values with the \n# dimensions [pixels][width][height].\n# In the case of RGB, the first dimension pixels would be 3 for the red, green and blue \n# components and it would be like having 3 image inputs for every color image. \n# In the case of MNIST where the pixel values are gray scale, the pixel dimension is set to 1.\n \n# load data\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n# reshape to be [samples][pixels][width][height]\nX_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32')\nX_test = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32') \n\n# As before, it is a good idea to normalize the pixel values to the range 0 and 1 \n# and one hot encode the output variables.\n\n# normalize inputs from 0-255 to 0-1\nX_train = X_train / 255\nX_test = X_test / 255\n# one hot encode outputs\ny_train = np_utils.to_categorical(y_train)\ny_test = np_utils.to_categorical(y_test)\nnum_classes = y_test.shape[1]\n\n# Layers we are defining:\ndef baseline_model():\n # create model\n model = Sequential()\n# The first hidden layer is a convolutional layer called a Convolution2D. \n# The layer has 32 feature maps, with a filter size of 5 x 5 and a rectifier\n# activation function. This is the input layer, expecting images with the \n# structure outline above [pixels][width][height].\n model.add(Convolution2D(32, 5, 5, border_mode='valid', input_shape=(1, 28, 28), activation='relu'))\n# Next we define a pooling layer that takes the max called MaxPooling2D. \n# It is configured with a pool size of 2x2.\n model.add(MaxPooling2D(pool_size=(2, 2)))\n# The next layer is a regularization layer using dropout called Dropout. \n# It is configured to randomly exclude 20% of neurons in the layer in order \n# to reduce overfitting.\n model.add(Dropout(0.2))\n# Next is a layer that converts the 2D matrix data to a vector called Flatten. \n# It allows the output to be processed by standard fully connected layers.\n model.add(Flatten())\n# Next a fully connected layer with 128 neurons and rectifier activation function.\n model.add(Dense(128, activation='relu'))\n# Finally, the output layer has 10 neurons for the 10 classes \n# and a softmax activation function to output probability-like \n# predictions for each class.\n model.add(Dense(num_classes, activation='softmax'))\n \n# The model is trained using logarithmic loss and the ADAM gradient descent algorithm.\n # Compile model\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\n# build the model\nmodel = baseline_model()\n# Fit the model\nmodel.fit(X_train, y_train, validation_data=(X_test, y_test), nb_epoch=10, batch_size=200, verbose=2)\n# Final evaluation of the model\nscores = model.evaluate(X_test, y_test, verbose=0)\nprint(\"Baseline Error: %.2f%%\" % (100-scores[1]*100))\n\n\n\n\n\n","sub_path":"src/tutorial10b_MNSIT_CNN.py","file_name":"tutorial10b_MNSIT_CNN.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"86530116","text":"# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration\n\ninfile = 'aod/AOD-21.0.79/AOD-21.0.79-full.pool.root'\nkeys = [\n #xAOD::MuonAuxContainer_v4\n 'Muons',\n 'Staus',\n 'HLT_xAOD__MuonContainer_MuonEFInfo',\n\n #xAOD::MuonSegmentAuxContainer_v1\n 'MuonSegments',\n 'MuonTruthSegments',\n 'NCB_MuonSegments',\n\n #xAOD::SlowMuonAuxContainer_v1\n 'SlowMuons',\n ]\n\ninclude ('AthenaPoolUtilities/TPCnvTest.py')\n","sub_path":"Event/xAOD/xAODMuonAthenaPool/share/xAODMuonAthenaPool_21.0.79_test.py","file_name":"xAODMuonAthenaPool_21.0.79_test.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"314345972","text":"# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nAria's deployment Package\nPath: aria.deployment\n\nMethods:\n * modify_deployment\n * prepare_deployment_plan\n\n\"\"\"\n\nfrom ..exceptions import UnknownInputError\nfrom ..parser.models import Plan\nfrom ..parser.scan import scan_service_template\nfrom ..parser.framework.functions import plan_evaluation_handler\nfrom .exceptions import MissingRequiredInputError\n\n\nfrom .relationships_graph import (\n build_node_graph,\n build_deployment_node_graph,\n extract_node_instances,\n build_previous_deployment_node_graph,\n extract_added_node_instances,\n extract_removed_node_instances,\n extract_added_relationships,\n extract_removed_relationships,\n)\n\n__all__ = [\n 'modify_deployment',\n 'prepare_deployment_plan',\n]\n\n\ndef prepare_deployment_plan(plan, inputs=None):\n \"\"\"\n Prepare a plan for deployment\n :param plan:\n :type plan (Plan, dict)\n :param inputs:\n :return:\n \"\"\"\n if not isinstance(plan, dict):\n raise TypeError(\n 'plan arg mast be a dict or Plan type, got: {0} [{1}]'.format(\n plan, type(plan)))\n if not isinstance(plan, Plan):\n plan = Plan(plan)\n _set_plan_inputs(plan, inputs)\n _process_functions(plan)\n return _create_deployment(plan)\n\n\ndef modify_deployment(\n nodes,\n previous_nodes,\n previous_node_instances,\n modified_nodes,\n scaling_groups):\n \"\"\"\n modifies deployment according to the expected nodes.\n based on previous_node_instances\n Prepare a modify plan for deployment\n :param nodes: the entire set of expected nodes.\n :param previous_nodes:\n :param previous_node_instances:\n :param modified_nodes: existing nodes whose instance number has changed\n Add a line note\n :param scaling_groups:\n :return: a dict of add,extended,reduced and removed instances\n Add a line note\n \"\"\"\n plan_node_graph = build_node_graph(\n nodes=nodes,\n scaling_groups=scaling_groups)\n previous_plan_node_graph = build_node_graph(\n nodes=previous_nodes,\n scaling_groups=scaling_groups)\n previous_deployment_node_graph, previous_deployment_contained_graph = ( # pylint: disable=invalid-name\n build_previous_deployment_node_graph(\n plan_node_graph=previous_plan_node_graph,\n previous_node_instances=previous_node_instances))\n new_deployment_node_graph, ctx = build_deployment_node_graph(\n plan_node_graph=plan_node_graph,\n previous_deployment_node_graph=previous_deployment_node_graph,\n previous_deployment_contained_graph=previous_deployment_contained_graph,\n modified_nodes=modified_nodes)\n\n # Any node instances which were added or removed\n added_and_related = extract_added_node_instances(\n previous_deployment_node_graph,\n new_deployment_node_graph,\n ctx=ctx)\n removed_and_related = extract_removed_node_instances(\n previous_deployment_node_graph,\n new_deployment_node_graph,\n ctx=ctx)\n\n # Any node instances which had a modification to their relationship.\n # (newly introduced and removed nodes)\n extended_and_related = extract_added_relationships(\n previous_deployment_node_graph,\n new_deployment_node_graph,\n ctx=ctx)\n reduced_and_related = extract_removed_relationships(\n previous_deployment_node_graph,\n new_deployment_node_graph,\n ctx=ctx)\n\n # The extracted extended and reduced relationships hold the new and old\n # node instances. These are not required, since the change is on\n # node instance level (and not the relationship level)\n return {\n 'added_and_related': added_and_related,\n 'extended_and_related': _filter_out_node_instances(\n added_and_related, extended_and_related),\n 'reduced_and_related': _filter_out_node_instances(\n removed_and_related, reduced_and_related),\n 'removed_and_related': removed_and_related,\n }\n\n\ndef _create_deployment(plan):\n \"\"\"\n Expand node instances based on number of instances to deploy and\n defined relationships\n \"\"\"\n plan_node_graph = build_node_graph(\n nodes=plan['nodes'], scaling_groups=plan['scaling_groups'])\n\n deployment_node_graph, ctx = build_deployment_node_graph(plan_node_graph)\n\n plan['node_instances'] = extract_node_instances(\n node_instances_graph=deployment_node_graph,\n ctx=ctx)\n\n return plan\n\n\ndef _set_plan_inputs(plan, inputs=None):\n inputs = inputs or {}\n missing_inputs = _missing_inputs(plan, inputs)\n if missing_inputs:\n raise MissingRequiredInputError(\n 'Required inputs {0} were not specified - expected '\n 'inputs: {1}'.format(missing_inputs, plan['inputs'].keys()))\n\n not_expected = _not_expected_inputs(inputs, plan)\n if not_expected:\n raise UnknownInputError(\n 'Unknown inputs {0} specified - '\n 'expected inputs: {1}'.format(\n not_expected, plan['inputs'].keys()))\n\n plan['inputs'] = inputs\n\n\ndef _missing_inputs(plan, inputs):\n missing_inputs = []\n for input_name, input_def in plan['inputs'].iteritems():\n if input_name not in inputs:\n if 'default' in input_def and input_def['default'] is not None:\n inputs[input_name] = input_def['default']\n else:\n missing_inputs.append(input_name)\n return missing_inputs\n\n\ndef _not_expected_inputs(inputs, plan):\n return [input_name for input_name in inputs.keys()\n if input_name not in plan['inputs']]\n\n\ndef _process_functions(plan):\n handler = plan_evaluation_handler(plan)\n scan_service_template(plan, handler, replace=True)\n\n\ndef _filter_out_node_instances(node_instances_to_filter_out, base_node_instances):\n instance_ids_to_remove = set(\n node['id']\n for node in node_instances_to_filter_out\n if 'modification' in node)\n return [\n node\n for node in base_node_instances\n if node['id'] not in instance_ids_to_remove]\n","sub_path":"aria/deployment/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"599657284","text":"import requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\nLinks = []\npage_number = 0 #pages for the url, separated for being more clear\nn = 10 #this is number of pages in the website\n\nwith open(\"res2.txt\", \"w\") as output:\n for i in range(n):\n page_number +=1 \n URL = f\"http://www.parsehbook.com/category/%da%a9%d9%88%d8%af%da%a9-%d9%88-%d9%86%d9%88%d8%ac%d9%88%d8%a7%d9%86/%d9%85%d8%ac%d9%85%d9%88%d8%b9%db%80-%d9%86%db%8c%da%a9%d9%88%d9%84%d8%a7-%da%a9%d9%88%da%86%d9%88%d9%84%d9%88/?pg={page_number}\"\n page = requests.get(URL)\n soup = BeautifulSoup(page.content, 'html.parser')\n books = soup.find_all(\"li\", class_=\"fRight\")\n for book in books:\n LINK_tmp = book.find('a', href=True)\n LINK = LINK_tmp['href']\n print(LINK)\n# print(page_number)\n output.write('{}\\n'.format(LINK))\n\n#df = pd.DataFrame(ALL, columns=['Links'])\n#df.to_excel (r'/home/pilot/fun/Rekab/scrappers/output.xlsx', index = False, header=True, encoding='utf-8')","sub_path":"Parseh/links.py","file_name":"links.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"338805200","text":"#!/usr/bin/env python3\n\"\"\"Processes a text output from docker logs \n\nRequires this:\n docker-compose logs phpfpm 2>&1 | tee /tmp/drupal-spider-logs.log\n docker cp /tmp/drupal-spider-logs.log \\\n ${BRAND}-beamly-webhop-travisci-tests:/tests/test-data/\n\n\"\"\"\n\nimport os\nimport json\nimport pprint\npp = pprint.PrettyPrinter(indent=2, depth=99)\nimport subprocess\nfrom subprocess import Popen\n\nBOLD = '\\033[1m'\nGREEN = '\\033[92m'\nAMBER = '\\033[93m'\nRED = '\\033[91m'\nENDC = '\\033[0m'\n\n\ndrupal_version = os.environ['DRUPAL_VERSION']\ndrupal_majorver = drupal_version.split(\".\")\ndrupal_majorver = drupal_majorver[0]\nenvironment = os.environ['DOCKER_ENVIRONMENT']\n\nfail_on = [\" ERROR: \", \" CORE_ERROR: \", \" COMPILE_ERROR: \", ]\n\nwarn_on = [\" WARNING: \", \" PARSE: \", \" CORE_WARNING: \", \" NOTICE: \"]\n\nfilename='/tests/test-data/drupal-spider-logs.log'\n\nprint(GREEN + \"Checking PHP logs for errors\" + ENDC + \"| test: \" + os.path.basename(__file__))\n\nlogs = []\n\ntry:\n with open(filename) as logfile:\n for line in logfile:\n logs.append(line.strip())\nexcept Exception as e:\n print(RED + BOLD + \"[ ERROR ] loading log file, ABORTING!\" + ENDC + \"\\n\" + str(e))\n quit(1)\n\n\nfails = 0\nwarnings = 0\n\nfor l in logs:\n for w in warn_on:\n if w in l:\n warnings += 1\n print(BOLD + AMBER + 'WARNING ' + l + ENDC)\n for w in fail_on:\n if w in l:\n fails += 1\n print(BOLD + RED + 'FAILED ' + l + ENDC)\n\n\nprint(\"\\n \" + AMBER + str(warnings) + \" WARNINGS\" + ENDC + \", \" + RED + str(fails) + \" FAILS\" + ENDC + \"\\n\")\n\nif fails > 0:\n quit(1)\n","sub_path":"tests/drupal-spider-logs.py","file_name":"drupal-spider-logs.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"243899053","text":"'''\n\n This script takes in a bitmap and outputs a text file that is a\n byte array used in Arduino files.\n\n It is loosely based on Spark Fun's bmp2array script.\n\n You'll need python 3.6 (the original use Python 2.7)\n\n usage: python fourbitbmp2array.py [-v] star.bmp [-o myfile.c]\n \n Create the bmp file in Gimp by :\n\n . Remove the alpha channel (if it has one) Layer -> Transparency -> Remove Alpha Channel\n . Set the mode to indexed. Image -> Mode -> Indexed...\n . Select Generate optimum palette with 16 colors (max)\n . Export the file with a .bmp extension. Options are:\n . Run-Length Encoded: not selected\n . Compatibility Options: \"Do not write color space information\" not selected\n . There are no Advanced Options available with these settings\n\n\n \n\n'''\n\nimport sys\nimport struct\nimport math\nimport argparse\nimport os\n\ndebug = None\n\ndef debugOut(s):\n if debug:\n print(s)\n\n# look at arguments\nparser = argparse.ArgumentParser(description=\"Convert bmp file to C array\")\nparser.add_argument(\"-v\", \"--verbose\", help=\"debug output\", action=\"store_true\")\nparser.add_argument(\"input\", help=\"input file name\")\nparser.add_argument(\"-o\", \"--output\", help=\"output file name\")\nargs = parser.parse_args()\n\nif not os.path.exists(args.input):\n parser.print_help()\n print(\"The input file {} does not exist\".format(args.input))\n sys.exit(1)\n\nif args.output == None:\n output = os.path.basename(args.input).replace(\".bmp\", \".c\")\nelse:\n output = args.output\n\ndebug = args.verbose\n\ntry:\n #Open our input file which is defined by the first commandline argument\n #then dump it into a list of bytes\n infile = open(args.input,\"rb\") #b is for binary\n contents = bytearray(infile.read())\n infile.close()\nexcept:\n print(\"could not read input file {}\".format(args.input))\n sys.exit(1)\n\n# first two bytes should be \"BM\"\nupto = 2\n#Get the size of this image\ndata = struct.pack(\"BBBB\", contents[upto], contents[upto+1], contents[upto+2], contents[upto+3])\nfileSize = struct.unpack(\"I\", bytearray(data))\n\nupto += 4\n# four bytes are reserved\n\nupto += 4\n\ndebugOut(\"Size of file: {}\".format(fileSize[0]))\n\n#Get the header offset amount\ndata = struct.pack(\"BBBB\", contents[upto], contents[upto+1], contents[upto+2], contents[upto+3])\noffset = struct.unpack(\"I\", bytearray(data))\n\ndebugOut(\"Offset: {}\".format(offset[0]))\nupto += 4\n\ndata = struct.pack(\"BBBB\", contents[upto], contents[upto+1], contents[upto+2], contents[upto+3])\nheadersize = struct.unpack(\"I\", bytearray(data))\nheaderLength = headersize[0]\nstartOfDefinitions = headerLength + upto\ndebugOut(\"header size: {}, up to {}, startOfDefinitions {}\".format(headersize[0], upto, startOfDefinitions))\nupto += 4\n\ndata = struct.pack(\"BBBB\", contents[upto], contents[upto+1], contents[upto+2], contents[upto+3])\nt = struct.unpack(\"I\", bytearray(data))\ndebugOut(\"width: {}\".format(t[0]))\nwidth = t[0]\n\nupto += 4\ndata = struct.pack(\"BBBB\", contents[upto], contents[upto+1], contents[upto+2], contents[upto+3])\nt = struct.unpack(\"I\", bytearray(data))\ndebugOut(\"height: {}\".format(t[0]))\nheight = t[0]\n\n# 26\nupto += 4\n\ndata = struct.pack(\"BB\", contents[upto], contents[upto+1])\nt = struct.unpack(\"H\", bytearray(data))\ndebugOut(\"planes: {}\".format(t[0]))\n\nupto = upto + 2\ndata = struct.pack(\"BB\", contents[upto], contents[upto+1])\nt = struct.unpack(\"H\", bytearray(data))\ndebugOut(\"bits per pixel: {}\".format(t[0]))\nbitsPerPixel = t[0]\n\nupto = upto + 2\ndata = struct.pack(\"BBBB\", contents[upto], contents[upto+1], contents[upto+2], contents[upto+3])\nt = struct.unpack(\"I\", bytearray(data))\ndebugOut(\"biCompression: {}\".format(t[0]))\n\nupto = upto + 4\ndata = struct.pack(\"BBBB\", contents[upto], contents[upto+1], contents[upto+2], contents[upto+3])\nt = struct.unpack(\"I\", bytearray(data))\ndebugOut(\"biSizeImage: {}\".format(t[0]))\n\nupto = upto + 4\ndata = struct.pack(\"BBBB\", contents[upto], contents[upto+1], contents[upto+2], contents[upto+3])\nt = struct.unpack(\"I\", bytearray(data))\ndebugOut(\"biXPelsPerMeter: {}\".format(t[0]))\n\nupto = upto + 4\ndata = struct.pack(\"BBBB\", contents[upto], contents[upto+1], contents[upto+2], contents[upto+3])\nt = struct.unpack(\"I\", bytearray(data))\ndebugOut(\"biYPelsPerMeter: {}\".format(t[0]))\n\nupto = upto + 4\ndata = struct.pack(\"BBBB\", contents[upto], contents[upto+1], contents[upto+2], contents[upto+3])\nt = struct.unpack(\"I\", bytearray(data))\ndebugOut(\"biClrUsed: {}\".format(t[0]))\ncolorsUsed = t\n\nupto = upto + 4\ndata = struct.pack(\"BBBB\", contents[upto], contents[upto+1], contents[upto+2], contents[upto+3])\nt = struct.unpack(\"I\", bytearray(data))\ndebugOut(\"biClrImportant: {}\".format(t[0]))\n\nupto += 4\n\ndebugOut(\"Upto: {} Number of colors used: {} definitions start at: {}\".format(upto, colorsUsed[0], startOfDefinitions))\n\n#Create color definition array and init the array of color values\ncolorIndex = [] #(colorsUsed[0])\nfor i in range(colorsUsed[0]):\n colorIndex.append(0)\n\n#Assign the colors to the array. upto = 54\n# startOfDefinitions = upto\nfor i in range(colorsUsed[0]):\n upto = startOfDefinitions + (i * 4)\n blue = contents[upto]\n green = contents[upto + 1]\n red = contents[upto + 2]\n # ignore the alpha channel.\n\n # data = struct.pack(\"BBBB\", contents[upto], contents[upto+1], contents[upto+2], contents[upto+3])\n # t = struct.unpack(\"I\", bytearray(data))\n # colorIndex[i] = t[0]\n\n colorIndex[i] = (((red & 0xf8)<<8) + ((green & 0xfc)<<3)+(blue>>3))\n debugOut(\"color at index {0} is {1:04x}, (r,g,b,a) = ({2:02x}, {3:02x}, {4:02x}, {5:02x})\".format(i, colorIndex[i], red, green, blue, contents[upto+3]))\n\n#debugOut(the color definitions\n# for i in range(colorsUsed[0]): \n# print hex(colorIndex[i])\n\n# perfect, except upside down.\n\n#Make a string to hold the output of our script\narraySize = (len(contents) - offset[0]) \noutputString = \"/* This was generated using a script based on the SparkFun BMPtoArray python script\" + '\\n'\noutputString += \" See https://github.com/sparkfun/BMPtoArray for more info */\" + '\\n\\n'\noutputString += \"static const uint16_t palette[\" + str(colorsUsed[0]) + \"] = {\";\nfor i in range(colorsUsed[0]): \n # print hexlify(colorIndex[i])\n if i % 4 == 0:\n outputString += \"\\n\\t\"\n outputString += \"0x{:04x}, \".format(colorIndex[i])\n\noutputString = outputString[:-2]\noutputString += \"\\n};\\n\\n\"\noutputString += \"// width is \" + str(width) + \", height is \" + str(height) + \"\\n\"\noutputString += \"static const uint8_t myGraphic[\" + str(arraySize) + \"] PROGMEM = {\" + '\\n'\n\nif bitsPerPixel != 4:\n print(\"Expected 4 bits per pixel; found {}\".format(bitsPerPixel))\n sys.exit(1)\n \n#Start converting spots to values\n#Start at the offset and go to the end of the file\ndropLastNumber = True #(width % 4) == 2 or (width % 4) == 1\npaddedWidth = int(math.ceil(bitsPerPixel * width / 32.0) * 4)\ndebugOut(\"array range is {} {} len(contents) is {} paddedWidth is {} width is {}\".format(offset[0], fileSize[0], len(contents), paddedWidth, width))\n\nr = 0\nwidth = int(width / 2)\n#for i in range(offset[0], fileSize[0]): # close but image is upside down. Each row is correct but need to swap columns.\n#for i in range(fileSize[0], offset[0], -1):\n\nfor col in range(height-1, -1, -1):\n i = 0\n for row in range(width):\n colorCode1 = contents[row + col*paddedWidth + offset[0]] \n\n if r > 0 and r % width == 0:\n i = 0\n outputString += '\\n\\n'\n elif (i + 1) % 12 == 0 :\n outputString += '\\n'\n i = 0\n \n #debugOut(\"cell ({0}, {1})\".format(row, col)\n\n r = r + 1\n i = i + 1\n outputString += \"0x{:02x}, \".format(colorCode1)\n\n\n \n#Once we've reached the end of our input string, pull the last two\n#characters off (the last comma and space) since we don't need\n#them. Top it off with a closing bracket and a semicolon.\noutputString = outputString[:-2]\noutputString += \"};\"\n\ntry:\n #Write the output string to our output file\n outfile = open(output, \"w\")\n outfile.write(outputString)\n outfile.close()\nexcept:\n print(\"could not write output to file {}\".format(output))\n sys.exit(1)\n\ndebugOut(\"{} complete\".format(output))\ndebugOut(\"Copy and paste this array into a image.h or other header file\")\n\nif not debug:\n print(\"Completed; the output is in {}\".format(output))\n","sub_path":"firmware/Prototype_v4/libraries/TFT_eSPI/Tools/Images/bmp2array4bit.py","file_name":"bmp2array4bit.py","file_ext":"py","file_size_in_byte":8335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"170796845","text":"import logging\nlogger = logging.getLogger(\"webapi\")\n\nimport socorro.database.database as db\nimport socorro.webapi.webapiService as webapi\nimport socorro.lib.util as util\nimport socorro.lib.datetimeutil as dtutil\n\nimport collections as col\nimport functools as ft\n\nfrom collections import defaultdict\n\nclass CurrentVersions(webapi.JsonServiceBase):\n def __init__(self, configContext):\n super(CurrentVersions, self).__init__(configContext)\n logger.debug('CurrentVersions __init__')\n\n # curl 'http://localhost:8085/201106/current/versions'\n uri = '/201106/current/versions/(.*)'\n\n def get(self, *args):\n\n connection = self.database.connection()\n cursor = connection.cursor()\n\n featured_only = False\n if 'featured' in args:\n featured_only = True\n\n # use the last date that we have data for as the end\n currentVersions = \"\"\"\n /* socorro.services.CurrentVersions curentVersions */\n SELECT product_name, version_string, is_featured,\n start_date, end_date\n FROM product_info\"\"\"\n\n if featured_only:\n currentVersions += \"\"\" WHERE is_featured\"\"\"\n\n cursor.execute(currentVersions)\n\n defaultdict_factory_with_list = ft.partial(col.defaultdict, list)\n\n result = col.defaultdict(defaultdict_factory_with_list)\n for product, version, featured, start, end in cursor.fetchall():\n releases = { 'version': version,\n 'start_date': str(start),\n 'end_date': str(end) }\n\n if not featured_only:\n releases['featured'] = featured\n\n result[product]['releases'].append(releases)\n\n return {'currentversions': result}\n","sub_path":"socorro/services/currentVersions.py","file_name":"currentVersions.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"174737071","text":"'''\nSamuel Weissmann\nspw2136\n2019-10-02\n\nContains the code that was collaboratively written during lab 5. I also\nadded a hamming_distance2 function which will detect the hamming distance\nfor two strings of unequal length. The logic is fundamentally the same,\nbut it needs to take a few more factors into consideration.\n'''\n\ndef is_palindrome(str1):\n '''Takes in a string, tests if it is a palindrome, and returns\n True or False'''\n return str1 == str1[::-1]\n\n\ndef hamming_distance(str1, str2):\n '''Find the Hamming distance between two strings of equal length, which is\n defined as the number of character substitutions required to make the\n two strings equal.'''\n difference = 0\n for i in range(len(str1)):\n if str1[i] != str2[i]:\n difference += 1\n\n return difference\n\ndef hamming_distance2(str1, str2):\n '''Can define the hamming distance between two strings of unequal length.'''\n\n #identify shorter string\n if len(str1) < len(str2):\n short_str, long_str = str1, str2\n else:\n short_str, long_str = str2, str1\n\n #counter difference between shorter and longer string\n difference = 0\n for i in range(len(str1)):\n if str1[i] != str2[i]:\n difference += 1\n\n #add on the difference that results from shorter string missing characters\n difference += len(long_str) - len(short_str)\n\n return difference","sub_path":"Lab5/lab5.py","file_name":"lab5.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"554321566","text":"\"\"\"\nProject Euler problem 24:\n\nA permutation is an ordered arrangement of objects.\n For example, 3124 is one possible permutation of the digits \n 1, 2, 3 and 4. If all of the permutations are listed numerically \n or alphabetically, we call it lexicographic order. \n The lexicographic permutations of 0, 1 and 2 are:\n\n012 021 102 120 201 210\n\nWhat is the millionth lexicographic permutation of the digits\n 0, 1, 2, 3, 4, 5, 6, 7, 8 and 9?\n\"\"\"\n\ndef permutations(current,digits):\n\t#function to recursively find all possible permutations starting \n\t#with a given digit and using the remaining digits\n\t#takes in: current, a list of current permutations\n\t#\t\t\tdigits, a string of all the digits that can be used\n\t#returns a list of all permutations\n\tnew = []\n\ttemp = [] #creats a list of all possible digits\n\tfor digit in digits:\n\t\ttemp.append(digit)\n\t\n\tfor number in current:\n\t\tpossible = temp[:] #resets the list for every number\n\t\tfor digit in number:\n\t\t\tif digit in possible:\n\t\t\t\t#removes digits already in the number from possible, \n\t\t\t\t#so no repeats\n\t\t\t\tpossible.remove(digit)\n\n\t\tif len(possible) == 0:\n\t\t\treturn current\n\t\t\t# if all digits are used, then return current list \n\t\t\t# ---> alrady has all permutations\n\n\t\tfor num in possible:\n\t\t\tnew.append(number+num) \n\t\t#print \"gi\",new\n\n\treturn permutations(new,digits)\n\t# keeps calling function again to add on to the list of permutations\n\n# main loop\t#\ndigits = \"0123456789\"\noverall = []\ncurrent = []\nfor digit in digits:\n\t#goes through each digit one by one\n\tcurrent.append(digit)\n\toneDigit = permutations(current,digits)\n\toverall += oneDigit\n\t#adds all possible permu for each digit into an overall list\n\tcurrent = [] #resets current to be empty \n\noverall.sort()\nprint( overall[999999]) #find 1 millionth\n\n\n\n##attempt 2 pseudocode\n\"\"\"\n#main loop outside:\ndigits = string of all digits 0-9\noverall = [] #all the numbers\ncurrent = [] # all numbers starting with given digit\nfor digit in digits:\n\tcurrent.append(\"digit\")\n\trecursion(current)\n\t\n\toverall+=current\n\tcurrent = [] #clear current for next digit\n\nrecursion(current):\n\tnew = [] #new list \n\tfor number in current: ex.0\n\t\tpossible = make a string of digits not in number (a string)\n\n\t\t# BASE CASE\n\t\tif possible == 0:\n\t\t\t# no more digits left\n\t\t\treturn current\n\n\t\tfor each num in possible ex.1\n\t\t\tappend (number+num ... so 01) to new\n\n\treturn recursion(new) #keep going to use up all possible digits\n\n## SORT + find 1 millionth\noverallINT = []\nfor number in overall:\n\t##change to int so can compare\n\toverallINT. append( int(number))\n\nx = sort(overallINT)\nprint( x[999999]) #find 1 millionth\n\n\n\n\"\"\"\n","sub_path":"lexicographic_permuations.py","file_name":"lexicographic_permuations.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"275527425","text":"#!/usr/bin/env python\nimport tf\nimport rospy\nfrom geometry_msgs.msg import Point\nfrom rospy import ServiceProxy\nimport actionlib\nimport abc\nimport math\nfrom hri_api.util import InitNode, TypeChecker\nfrom hri_api.actions import MultiGoalActionClient\nimport uuid\nimport sys\nfrom enum import Enum\nimport inspect\n\n\nclass NamingScheme(Enum):\n\n Flat = 1\n Hierarchical = 2\n\n\nclass Entity(object):\n\n \"\"\" The Entity class encapsulates functions and attributes common to all entities (objects) perceived / interacted\n with by the robot. It contains high level functions for computing spatial relationships and can be used to represent\n hierarchical objects detected by the robots perception system.\n\n \"\"\"\n\n def __init__(self, local_frame_id, parent, naming_scheme=NamingScheme.Hierarchical):\n TypeChecker.accepts(inspect.currentframe().f_code.co_name,\n (str, (None, Entity), NamingScheme),\n local_frame_id, parent, naming_scheme)\n\n InitNode()\n self.tl = tf.TransformListener()\n self.local_frame_id = local_frame_id\n self.parent = parent\n self.visible = True\n self._global_id = str(uuid.uuid4())\n self.naming_scheme = naming_scheme\n self.children = []\n\n # Adds self to parent to create parent -> child relationship\n if parent is not None:\n parent.add_child(self)\n\n def add_child(self, child):\n \"\"\"\n\n Adds a child to the entity.\n\n :param child: the child\n :type child: Entity\n :raises TypeError: child not an Entity\n \"\"\"\n\n TypeChecker.accepts(inspect.currentframe().f_code.co_name, (Entity,), child)\n\n self.children.append(child)\n\n @property\n def global_id(self):\n \"\"\"\n\n :return: Get the entities global id. The global id is used to uniquely identify the entity. Note that this It uniquely identifies this entit\n :rtype: str\n \"\"\"\n\n return self._global_id\n\n @classmethod\n def make(cls, local_id):\n \"\"\"\n\n Instantiate a derived Entity class, e.g. Person(local_id) and return it.\n\n :param local_id: the id that uniquely identifies the entity amongst all entities of the same type\n :type local_id: int\n :return: the entity instance\n :rtype: Entity\n :raises NotImplementedError: please implement this method\n \"\"\"\n\n raise NotImplementedError('please implement this method')\n\n def default_body_part(self):\n \"\"\"\n\n Gets the default body part to use for math calculations. Only applicable to entities with children.\n\n :return: the default body part to use for math calculations.\n :rtype: Entity\n :raises NotImplementedError: please implement this method\n \"\"\"\n\n if self.parent is None and len(self.children) > 0:\n raise NotImplementedError(\"Please implement this method\")\n\n def global_frame_id(self, depth=0):\n \"\"\"\n\n Gets the global frame id. Only applicable to entities with children.\n\n :return: the default body part to use for math calculations.\n :rtype: str\n :raises NotImplementedError: please implement this method\n \"\"\"\n TypeChecker.accepts(inspect.currentframe().f_code.co_name, (int,), depth)\n\n if self.naming_scheme is NamingScheme.Flat:\n return self.local_frame_id\n\n elif self.naming_scheme is NamingScheme.Hierarchical:\n if depth == 0 and self.parent is None and len(self.children) > 0:\n return self.default_body_part().global_frame_id()\n\n elif depth == 0 and self.parent is None:\n return self.local_frame_id\n\n else:\n return self.parent.global_frame_id(depth + 1) + '_' + self.local_frame_id\n\n def infront_of(self, entity):\n \"\"\"\n\n Check if the current instance is in front of entity\n\n :param entity:\n :type entity: Entity\n :return: whether this assertion is true or not\n :rtype: bool\n :raises TypeError: entity is not an Entity\n \"\"\"\n\n TypeChecker.accepts(inspect.currentframe().f_code.co_name, (Entity,), entity)\n\n origin = Point()\n other = self.__translation_to(entity)\n\n if other.x >= origin.x:\n return True\n return False\n\n def behind(self, entity):\n \"\"\"\n\n Check if the current instance is behind entity\n\n :param entity:\n :type entity: Entity\n :return: whether this assertion is true or not\n :rtype: bool\n :raises TypeError: entity is not an Entity\n \"\"\"\n\n TypeChecker.accepts(inspect.currentframe().f_code.co_name, (Entity,), entity)\n\n origin = Point()\n other = self.__translation_to(entity)\n\n if other.x < origin.x:\n return True\n return False\n\n def left_of(self, entity):\n \"\"\"\n\n Check if the current instance is left_of entity\n\n :param entity:\n :type entity: Entity\n :return: whether this assertion is true or not\n :rtype: bool\n :raises TypeError: entity is not an Entity\n \"\"\"\n\n TypeChecker.accepts(inspect.currentframe().f_code.co_name, (Entity,), entity)\n\n origin = Point()\n other = self.__translation_to(entity)\n\n if other.y >= origin.y:\n return True\n return False\n\n def right_of(self, entity):\n \"\"\"\n\n Check if the current instance is right_of entity\n\n :param entity:\n :type entity: Entity\n :return: whether this assertion is true or not\n :rtype: bool\n :raises TypeError: entity is not an Entity\n \"\"\"\n\n TypeChecker.accepts(inspect.currentframe().f_code.co_name, (Entity,), entity)\n\n origin = Point()\n other = self.__translation_to(entity)\n\n if other.y < origin.y:\n return True\n return False\n\n def distance_to(self, entity):\n \"\"\"\n\n Calculate the distance from the current instance to entity\n\n :param entity: the entity we are finding the distance too\n :type entity: Entity\n :return: the distance from the instance to entity\n :rtype: float\n :raises TypeError: entity is not an Entity\n \"\"\"\n\n TypeChecker.accepts(inspect.currentframe().f_code.co_name, (Entity,), entity)\n\n origin = Point()\n other = self.__translation_to(entity)\n\n x_diff = origin.x - other.x\n y_diff = origin.y - other.y\n z_diff = origin.z - other.z\n return math.sqrt(x_diff * x_diff + y_diff * y_diff + z_diff * z_diff)\n\n def __translation_to(self, target):\n TypeChecker.accepts(inspect.currentframe().f_code.co_name, (Entity,), target)\n\n try:\n (trans, rot) = self.tl.lookupTransform(self.default_body_part(), target.default_body_part(), rospy.Time())\n point = Point(trans[0], trans[1], trans[2])\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n point = Point()\n rospy.loginfo(\"Couldn't transform from '\" + self.global_frame_id() + \"' to '\" + target.global_frame_id() + \"'\")\n\n return point\n\n def __repr__(self):\n return self.global_id\n\n def __eq__(self, other):\n if self.global_id == other.global_id:\n return True\n return False\n\n # @staticmethod\n # def wait_for_services(*services):\n # for i, service in enumerate(services):\n # if not isinstance(service, ServiceProxy):\n # raise TypeError(\"wait_for_services() parameter action_servers[{0}]={1} is not a ServiceProxy\".format(i, service))\n #\n # rospy.loginfo(\"Waiting for service: %s\", service.resolved_name)\n # service.wait_for_service()\n # rospy.loginfo(\"Service found: %s\", service.resolved_name)\n #\n # rospy.loginfo(\"All services found\")\n #\n # @staticmethod\n # def wait_for_action_servers(*action_servers):\n # for i, action_server in enumerate(action_servers):\n # if not isinstance(action_server, (actionlib.SimpleActionClient, MultiGoalActionClient)):\n # raise TypeError(\"wait_for_action_servers() parameter action_servers[{0}]={1} is not a SimpleActionClient or MultiGoalActionClient\".format(i, action_server))\n #\n # name = action_server.action_client.ns\n # rospy.loginfo(\"Waiting for action server: %s\", name)\n # action_server.wait_for_server()\n # rospy.loginfo(\"Action server: %s found\", name)\n #\n # rospy.loginfo(\"All action servers found\")\n\n","sub_path":"hri_api/src/hri_api/entities/entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":8669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"520165597","text":"def switch(s, i):\n l = []\n for j in range(len(s)):\n l.append(s[j])\n\n if l[i] == '9':\n l[i] = '6'\n else:\n l[i] = '9'\n\n return ''.join(l)\n\n\ns = input()\nn = int(s)\n\nl1 = []\nl1.append(n)\n\nif s=='12':\n print(s)\n \nelse:\n for i in range(len(s)):\n l1.append(int(switch(s, i)))\n\n print(max(l1))\n\n","sub_path":"Code/CodeRecords/2421/60691/270022.py","file_name":"270022.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"495088341","text":"def inside(y,x):\n return 0<=y=1000:\n return -1\n t+=1\n for i in range(k):\n y,x,d = horses[i]\n ny = y + dy[d]\n nx = x + dx[d]\n if not inside(ny,nx) or color[ny][nx]==2:\n nd = rev_d[d]\n ny,nx = y+dy[nd],x+dx[nd]\n horses[i][2] = nd\n if not inside(ny,nx) or color[ny][nx]==2:\n horses[i][2] = nd\n continue\n if color[ny][nx]==0:\n idx = a[y][x].index(i)\n temp = a[y][x][idx:]\n a[ny][nx] +=temp\n for num in temp:\n horses[num] = [ny,nx,horses[num][2]]\n del a[y][x][idx:]\n if len(a[ny][nx])>=4:return t\n continue\n if color[ny][nx]==1:\n idx = a[y][x].index(i)\n temp = a[y][x][idx:][::-1]\n\n a[ny][nx] += temp\n for num in temp:\n horses[num] = [ny,nx,horses[num][2]]\n del a[y][x][idx:]\n if len(a[ny][nx]) >= 4: return t\n continue\n\nif __name__ == '__main__':\n\n n, k = map(int, input().split())\n color = [list(map(int, input().split())) for _ in range(n)]\n horses = []\n a = [[[] * n for _ in range(n)] for _ in range(n)]\n\n for i in range(k):\n y, x, d = map(int, input().split())\n a[y - 1][x - 1].append(i)\n horses.append([y - 1, x - 1, d - 1])\n\n dy = [0, 0, -1, 1]\n dx = [1, -1, 0, 0]\n rev_d = {0: 1, 1: 0, 2: 3, 3: 2}\n\nans = solve(color,a,horses)\nprint(ans)","sub_path":"Gold2/새로운 게임 2.py","file_name":"새로운 게임 2.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"137654714","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/prompt_tool_kit/key_binding/manager.py\n# Compiled at: 2019-08-15 23:53:39\n# Size of source mod 2**32: 3727 bytes\n\"\"\"\nDEPRECATED:\nUse `prompt_tool_kit.key_binding.defaults.load_key_bindings` instead.\n\n:class:`KeyBindingManager` is a utility (or shortcut) for loading all the key\nbindings in a key binding registry, with a logic set of filters to quickly to\nquickly change from Vi to Emacs key bindings at runtime.\n\nYou don't have to use this, but it's practical.\n\nUsage::\n\n manager = KeyBindingManager()\n app = Application(key_bindings_registry=manager.registry)\n\"\"\"\nfrom __future__ import unicode_literals\nfrom .defaults import load_key_bindings\nfrom prompt_tool_kit.filters import to_cli_filter\nfrom prompt_tool_kit.key_binding.registry import Registry, ConditionalRegistry, MergedRegistry\n__all__ = ('KeyBindingManager', )\n\nclass KeyBindingManager(object):\n __doc__ = '\\n Utility for loading all key bindings into memory.\\n\\n :param registry: Optional `Registry` instance.\\n :param enable_abort_and_exit_bindings: Filter to enable Ctrl-C and Ctrl-D.\\n :param enable_system_bindings: Filter to enable the system bindings\\n (meta-! prompt and Control-Z suspension.)\\n :param enable_search: Filter to enable the search bindings.\\n :param enable_open_in_editor: Filter to enable open-in-editor.\\n :param enable_open_in_editor: Filter to enable open-in-editor.\\n :param enable_extra_page_navigation: Filter for enabling extra page navigation.\\n (Bindings for up/down scrolling through long pages, like in Emacs or Vi.)\\n :param enable_auto_suggest_bindings: Filter to enable fish-style suggestions.\\n\\n :param enable_vi_mode: Deprecated!\\n '\n\n def __init__(self, registry=None, enable_vi_mode=None, enable_all=True, get_search_state=None, enable_abort_and_exit_bindings=False, enable_system_bindings=False, enable_search=False, enable_open_in_editor=False, enable_extra_page_navigation=False, enable_auto_suggest_bindings=False):\n if not registry is None:\n if not isinstance(registry, Registry):\n raise AssertionError\n if not get_search_state is None:\n if not callable(get_search_state):\n raise AssertionError\n enable_all = to_cli_filter(enable_all)\n defaults = load_key_bindings(get_search_state=get_search_state,\n enable_abort_and_exit_bindings=enable_abort_and_exit_bindings,\n enable_system_bindings=enable_system_bindings,\n enable_search=enable_search,\n enable_open_in_editor=enable_open_in_editor,\n enable_extra_page_navigation=enable_extra_page_navigation,\n enable_auto_suggest_bindings=enable_auto_suggest_bindings)\n self.registry = MergedRegistry([\n ConditionalRegistry(defaults, enable_all)])\n\n @classmethod\n def for_prompt(cls, **kw):\n \"\"\"\n Create a ``KeyBindingManager`` with the defaults for an input prompt.\n This activates the key bindings for abort/exit (Ctrl-C/Ctrl-D),\n incremental search and auto suggestions.\n\n (Not for full screen applications.)\n \"\"\"\n kw.setdefault('enable_abort_and_exit_bindings', True)\n kw.setdefault('enable_search', True)\n kw.setdefault('enable_auto_suggest_bindings', True)\n return cls(**kw)\n\n def reset(self, cli):\n pass\n\n def get_vi_state(self, cli):\n return cli.vi_state","sub_path":"pycfiles/prompt_tool_kit-1.0.14-py3.6/manager.cpython-36.py","file_name":"manager.cpython-36.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"547454933","text":"## joint follow mesh silhouette with controllers (joint on skull to simulate muscle)\n## select controllers and run script\n## redefine mesh!!!!\nctrls = cmds.ls(sl=1)\nmesh = \"skull_geoShape\"\njntMainGrp = cmds.group(name = \"jnt_%s\"%mesh.replace(\"Shape\",\"_grp\"),em=1)\nutilMainGrp = cmds.group(name = \"%s_util\"%mesh.replace(\"Shape\",\"_grp\"),em=1)\nfor ctrl in ctrls:\n ##create groups, joints and locators\n prefix = ctrl.replace(\"CTRL_\",\"\")\n locGrp = cmds.group(em=1,name = \"%s_loc_grp\"%prefix)\n jntGrp = cmds.group(em=1,name = \"jnt_%s_loc_grp\"%prefix)\n loc = cmds.spaceLocator(name = \"%s_loc\"%prefix)[0]\n jnt = cmds.joint(name = \"jnt_%s\"%prefix)\n node = cmds.shadingNode(\"closestPointOnMesh\",au=1,name = \"%s_pntOnMEsh\"%prefix)\n \n locShp = cmds.listRelatives(loc,type=\"locator\")[0]\n \n ##clean up\n cmds.parent(jntGrp,jntMainGrp)\n cmds.parent(locGrp,utilMainGrp)\n cmds.parent(loc,locGrp)\n cmds.parent(jnt,jntGrp)\n \n ##relocate locator group to controller\n cmds.delete(cmds.parentConstraint(ctrl,locGrp,mo=0))\n \n #connect attribute\n cmds.connectAttr(\"%s.outMesh\"%mesh,\"%s.inMesh\"%node)\n cmds.connectAttr(\"%s.t\"%ctrl,\"%s.t\"%loc)\n cmds.connectAttr(\"%s.worldPosition\"%locShp,\"%s.inPosition\"%node)\n cmds.connectAttr(\"%s.position\"%node,\"%s.t\"%jntGrp)\n","sub_path":"jointOnMesh.py","file_name":"jointOnMesh.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"178368799","text":"from sys import argv\nimport itertools\n\nscript, isolate_list, outputdir = argv\n\nisolates = []\nwith open(isolate_list, 'r') as infile1:\n\tfor line in infile1:\n\t\tisolates.append(line.strip())\n\n\nfor i in isolates:\n\theader = \"#!/bin/bash\\n#PBS -l nodes=1:ppn=8\\n#PBS -l walltime=00:30:00\\n#PBS -N \" + i + \"_ecoli\\n#PBS -A zke-503-ab\"\n\toutput_file = outputdir + i + \".ecoli.spades.job\"\n\twith open(output_file, 'w') as outfile1:\n\t\toutfile1.write(header)\n\t\t# outfile1.write(\"\\n\" + \"\\n\" + \"cd /scratch/d/dguttman/cizydor/ecoli_assemblies/\" + \"\\n\")\n\t\t# outfile1.write(\"\\n\" + \"mkdir \" + i + \"_assembly\" + \"\\n\" + \"cd \" + i + \"_assembly\" + \"\\n\")\n\t\toutfile1.write(\"\\n\" + \"\\n\" + \"module load spades/spades3.11.1\" + \"\\n\")\n\t\toutfile1.write(\"\\n\" + \"spades.py --careful -1 /scratch/d/dguttman/cizydor/ecoli_fastq/\" + i + \"_R1.fastq -2 /scratch/d/dguttman/cizydor/ecoli_fastq/\" + i + \"_R2.fastq -o /scratch/d/dguttman/cizydor/ecoli_assemblies/\" + i + \" -t 8\" + \"\\n\")\n\n\n\n#!/bin/bash\n#PBS -l nodes=1:ppn=8\n#PBS -l walltime=48:00:00\n#PBS -N 36_pae\n#PBS -A zke-503-ab\n\n# do spades.py --careful -1 ~/Data/primary_project_3/fastq_files/G37_good_reads_rarefied/$i\"_R1.fastq\" -2 ~/Data/primary_project_3/fastq_files/G37_good_reads_rarefied/$i\"_R2.fastq\" -o ~/Data/primary_project_3/de_novo_assemblies/H51_spades_155_rarefied/$i/ -t 8 -m 50; done","sub_path":"masters_scripts/EC4_spades_assemblies_scinet_jobs.py","file_name":"EC4_spades_assemblies_scinet_jobs.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"39768974","text":"#coding:utf-8\r\n\r\nimport speech_recognition as sr\r\nimport re\r\nfrom jtalk import jtalk\r\nimport face_emotion_recognizer\r\nimport time\r\nfrom firestoreAPI import FireStoreDB\r\nfrom jaconv import kata2hira\r\nfrom freeeAPI import freeeAPI\r\n\r\n\r\nclass reserve_dakoku:\r\n def __init__(self):\r\n self.r = sr.Recognizer()\r\n self.mic = sr.Microphone()\r\n\r\n self.user_db = FireStoreDB().db\r\n\r\n self.fr = face_emotion_recognizer.FaceEmotionRecognizer(self.user_db)\r\n \r\n self.dakoku_patterns = [\r\n '.*?(おはよう).*',\r\n '.*?(お疲れ|お先|失礼|さようなら).*',\r\n '.*?(休憩).*?(入り|いただきます).*',\r\n '.*?(休憩).*?(上がり|いただきました).*'\r\n ]\r\n\r\n self.cancel_pattern = '.*?(キャンセル).*'\r\n\r\n self.dakoku_message_dict = {\r\n 0: 'おはようございます',\r\n 1: 'おつかれさまでした',\r\n 2: 'きゅうけいいってらっしゃい',\r\n 3: 'がんばってください'\r\n }\r\n\r\n self.dakoku_attr_str = {\r\n 0: '出勤',\r\n 1: '退勤',\r\n 2: '休憩開始',\r\n 3: '休憩終了'\r\n }\r\n\r\n self.company_id = freeeAPI().getCompanyID()\r\n\r\n def reserve_dakoku(self, dakoku_queue):\r\n\r\n while True:\r\n print(\"\\n待受中\")\r\n\r\n with self.mic as source:\r\n self.r.adjust_for_ambient_noise(source) #雑音対策\r\n audio = self.r.listen(source, phrase_time_limit=5)\r\n\r\n print (\"処理中\")\r\n\r\n try:\r\n now = time.time()\r\n recog_result = self.r.recognize_google(\r\n audio, language='ja-JP', show_all=True)\r\n print(recog_result)\r\n print('音声認識時間: {:.3g}秒'.format(time.time() - now))\r\n\r\n # 音声認識がうまくいってないとき\r\n if not isinstance(recog_result, dict) or len(recog_result.get(\"alternative\", [])) == 0:\r\n continue;\r\n\r\n sorted_result = sorted(recog_result['alternative'], key=lambda x: x['confidence']\r\n ) if \"confidence\" in recog_result[\"alternative\"] else recog_result['alternative']\r\n recog_texts = [recog_elem['transcript']\r\n for recog_elem in sorted_result]\r\n recog_text = recog_texts[0]\r\n\r\n dakoku_results = [re.match(dakoku_pattern, recog_text) for dakoku_pattern in self.dakoku_patterns]\r\n \r\n # 打刻WORDにマッチ\r\n if any(dakoku_results):\r\n user = self.fr.authorize(num_trial=20)\r\n \r\n # マッチした打刻種類の最初のindexを取得 0:出勤 1:退勤 2:休憩始 3:休憩終\r\n dakoku_attr = None\r\n for index, dakoku_result in enumerate(dakoku_results):\r\n if dakoku_result is None:\r\n continue\r\n else:\r\n dakoku_attr = index\r\n break\r\n \r\n message = self.dakoku_message_dict[dakoku_attr] + ('、どちらさまですか' if user is None else '、' + user['last_name_kana'] + 'さん')\r\n\r\n if user is not None:\r\n message += '、' + mercy_message(dakoku_attr, user['emotion'])\r\n\r\n sec = jtalk(message)\r\n time.sleep(sec)\r\n\r\n if user is None:\r\n user = self.detect_unknown_visitor()\r\n\r\n # また失敗したとき\r\n if user is None:\r\n sec = jtalk('登録されたユーザを認識できませんでした')\r\n time.sleep(sec)\r\n continue\r\n\r\n message = '{}さんの{}を打刻します'.format(\r\n user['last_name_kana'], self.dakoku_attr_str[dakoku_attr])\r\n sec = jtalk(message)\r\n time.sleep(sec)\r\n\r\n dakoku_queue.append({'employee_id':user['employee_id'], 'dakoku_attr':dakoku_attr, 'time':time.time()})\r\n\r\n # 削除フロー\r\n elif re.match(self.cancel_pattern, recog_text):\r\n if len(dakoku_queue) != 0:\r\n sec = jtalk('打刻をキャンセルします')\r\n time.sleep(sec)\r\n dakoku_queue.pop()\r\n continue\r\n else:\r\n continue\r\n\r\n # 訂正フロー\r\n else:\r\n users_ref = self.user_db.collection(str(self.company_id))\r\n users = [doc.to_dict() for doc in users_ref.get()]\r\n\r\n # 前回打刻されたユーザを除き、登録ユーザ名が発話に含まれるか否か\r\n for user in users:\r\n if len(dakoku_queue):\r\n if user['employee_id'] != dakoku_queue[-1]['employee_id'] and name_in_texts(user, recog_texts):\r\n dakoku_queue[-1] = {'employee_id': user['employee_id'],\r\n 'dakoku_attr': dakoku_queue[-1]['dakoku_attr'],\r\n 'time': time.time()}\r\n\r\n message = '{}さんの{}を打刻します'.format(\r\n user['last_name_kana'], self.dakoku_attr_str[dakoku_queue[-1]['dakoku_attr']])\r\n sec = jtalk(message)\r\n time.sleep(sec)\r\n\r\n # 以下は認識できなかったときに止まらないように。\r\n except sr.UnknownValueError:\r\n print(\"音声を認識できませんでした\")\r\n except sr.RequestError as e:\r\n print(\r\n \"音声認識のリクエストが正常に完了しませんでした; {0}\".format(e))\r\n except ValueError as e:\r\n print(e)\r\n\r\n\r\n # 顔認証で失敗したユーザに対して、名前をもとに判別\r\n def detect_unknown_visitor(self):\r\n try_cnt = 0\r\n\r\n while try_cnt < 3:\r\n try_cnt += 1\r\n\r\n print(\"待受中\")\r\n\r\n with self.mic as source:\r\n self.r.adjust_for_ambient_noise(source) # 雑音対策\r\n audio = self.r.listen(source)\r\n\r\n print(\"処理中\")\r\n\r\n try:\r\n recog_result = self.r.recognize_google(audio, language='ja-JP', show_all=True)\r\n print(recog_result)\r\n\r\n # 音声認識がうまくいってないとき\r\n if not isinstance(recog_result, dict) or len(recog_result.get(\"alternative\", [])) == 0:\r\n print(\"could not understand audio\")\r\n continue\r\n\r\n sorted_result = sorted(recog_result['alternative'], key=lambda x: x['confidence']) if \"confidence\" in recog_result[\"alternative\"] else recog_result['alternative']\r\n recog_texts = [recog_elem['transcript'] for recog_elem in sorted_result]\r\n \r\n users_ref = self.user_db.collection(str(self.company_id))\r\n users = [doc.to_dict() for doc in users_ref.get()]\r\n\r\n for user in users:\r\n if name_in_texts(user, recog_texts):\r\n return user\r\n\r\n # 以下は認識できなかったときに止まらないように。\r\n except sr.UnknownValueError:\r\n print(\"音声を認識できませんでした\")\r\n except sr.RequestError as e:\r\n print(\r\n \"音声認識のリクエストが正常に完了しませんでした; {0}\".format(e))\r\n\r\n return None\r\n\r\n\r\n# userの姓名がtextに含まれるか否か\r\ndef name_in_text(user, text):\r\n return user['last_name_kanji'] in text or user['first_name_kanji'] in text or kata2hira(user['last_name_kana']) in text or kata2hira(user['first_name_kana']) in text or user['last_name_kana'] in text or user['first_name_kana'] in text\r\n\r\n# userの姓名がtextsに含まれるか否か\r\ndef name_in_texts(user, texts):\r\n return any([name_in_text(user, text) for text in texts])\r\n\r\n# 慈悲深い声掛けを追加\r\ndef mercy_message(attr, emotion):\r\n if attr == 0:\r\n return '今日も頑張りましょう' if emotion == 1 else '無理しないで下さい'\r\n elif attr == 1:\r\n return '次も頑張ってください' if emotion == 1 else 'ゆっくり休んでください'\r\n elif attr == 2:\r\n return '' if emotion == 1 else 'ゆっくり休んでください'\r\n elif attr == 3:\r\n return '' if emotion == 1 else '無理しないで下さい'\r\n else:\r\n raise ValueError('打刻種類が不正な値です')\r\n \r\n\r\n\r\ndef main():\r\n reserve_dakoku([])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"reserve_dakoku.py","file_name":"reserve_dakoku.py","file_ext":"py","file_size_in_byte":9286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"207564846","text":"from random import randint\n\n# Third Party\nfrom django.db import models\nfrom django.core.exceptions import ObjectDoesNotExist\n\n# LingoApp\nfrom accounts.models import ForeignLanguage\n\ndef getParagraphsBounds(text):\n\tparagraphsCount = 1\n\tparagraphsPoints = []\n\tstartPoint = 0\n\tendPoint = 0\n\tfor i in range(0, len(text)-2):\n\t\tif ( text[i] == '\\n' ):\n\t\t\tparagraphsCount += 1\n\t\t\tendPoint = i\n\t\t\tparagraphsPoints.append((startPoint, endPoint))\n\t\t\tstartPoint = endPoint + 1\n\n\treturn paragraphsPoints\n\nclass TextManager(models.Manager):\n\n\tdef createPatterns(self):\n\t\tprint(\"Creating patterns\")\n\t\tallTexts = self.all()\n\t\tfor text in allTexts:\n\t\t\tif ( Pattern.objects.filter(full_text=text).exists() ):\n\t\t\t\tcontinue;\n\n\t\t\tallBounds = getParagraphsBounds(text.content)\n\t\t\tcounter = 0\n\t\t\tfor bound in allBounds:\n\t\t\t\tpatternName = text.name + \" \" + str(counter)\n\t\t\t\tcounter += 1\n\t\t\t\tnewPattern = Pattern.objects.create(\n\t\t\t\t\t\t\t\t full_text=text,\n\t\t\t\t\t\t\t\t position_start=bound[0],\n\t\t\t\t\t\t\t\t position_end=bound[1])\n\t\t\t\tnewPattern.save()\n\nclass ResourceType(models.Model):\n\t\"\"\"\n\t\n\tType of resourse of a text.\n\n\t\"\"\"\n\n\tname = models.CharField(max_length = 30,\n\t\t\t\t\t\t\tverbose_name = 'Resource type')\n\n\tlanguage = models.ForeignKey(ForeignLanguage,\n\t\t\t\t\t\t\t\t on_delete=models.CASCADE)\n\n\turl = models.CharField(max_length=100,\n\t\t\t\t\t\t verbose_name = \"Resource url\")\n\n\tdef __str__(self):\n\t\treturn self.name\n\nclass Text(models.Model):\n\t\"\"\"\n\t\n\tAuthentic original text from which \n\tpatterns are taken.\n\t\n\t\"\"\"\n\t\n\tBEGGINER_1 = 'A1'\n\tBEGGINER_2 = 'A2'\n\tINTERMEDIATE_1 = 'B1'\n\tINTERMEDIATE_2 = 'B2'\n\tADVANCED_1 = 'C1'\n\tADVANCED_2 = 'C2'\n\n\tCOMPLEXITY_CHOICES = (\n\t\t\t(BEGGINER_1, 'Begginer 1'),\n\t\t\t(BEGGINER_2, 'Begginer 2'),\n\t\t\t(INTERMEDIATE_1, 'Intermediate 1'),\n\t\t\t(INTERMEDIATE_2, 'Intermediate 2'),\n\t\t\t(ADVANCED_1, 'Advanced 1'),\n\t\t\t(ADVANCED_2, 'Advanced 2'),\n\t\t)\n\n\tname = models.CharField(max_length = 50,\n\t\t\t\t\t\t\tverbose_name = 'Text name')\n\n\tresource = models.ForeignKey(ResourceType,\n\t\t\t\t\t\t\t \t editable=True,\n\t\t\t\t\t\t\t \t null=True,\n\t\t\t\t\t\t\t \t blank=True,\n\t\t\t\t\t\t\t \t on_delete=models.CASCADE)\n\n\tcomplexity_rank = models.CharField(max_length=2,\n\t\t\t\t\t\t\t\t\t default=ADVANCED_2,\n\t\t\t\t\t\t\t\t\t choices=COMPLEXITY_CHOICES)\n\n\tcontent = models.TextField(max_length=16384)\n\n\tobjects = TextManager()\n\n\tdef __str__(self):\n\t\treturn self.name\n\nclass Pattern(models.Model):\n\t\"\"\"\n\t\t\n\tPattern of text. \n\tPatterns are studied by users.\n\n\t\"\"\"\n\t\n\tfull_text = models.ForeignKey(Text,\n\t\t\t\t\t\t\t\t on_delete=models.CASCADE)\n\n\n\tposition_start = models.IntegerField(default=0)\n\t\n\tposition_end = models.IntegerField(default=0)\n\n\tclass Meta:\n\t\tunique_together=(('full_text', \n\t\t\t\t\t\t 'position_start', \n\t\t\t\t\t\t 'position_end'),)\n\t\t\n\tdef getText(self):\n\t\treturn self.full_text.content[self.position_start:self.position_end]\n\n\tdef __str__(self):\n\t\treturn str(self.id)\n","sub_path":"texts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"617355360","text":"import libconf\nimport io\n\n# The path to the verbatim log that will be compressed using logrotate depends on the coin used. The path can be found\n# in the provided netmine.cfg, so we will parse it and replace it in verbatim-rotate.cfg\n\n# Read verbatim-rotate\nwith open('/coinscope/tools/resources/verbatim-rotate.cfg') as vrotate:\n vrotate_data = vrotate.read()\n\n# Read netmine\nwith io.open(\"/coinscope/netmine.cfg\", encoding='utf-8') as netmine:\n cfg = libconf.load(netmine)\n path = cfg.get(\"verbatim\").get(\"logpath\")\n # Replace the place holder (absolute_path) with the actual path\n vrotate_data = vrotate_data.replace(\"absolute_path/\", path)\n\n# Write verbatim-rotate back\nwith open('/coinscope/tools/resources/verbatim-rotate.cfg', 'w') as vrotate:\n vrotate.write(vrotate_data)\n\n","sub_path":"libraries/python/verbatim_rotate.py","file_name":"verbatim_rotate.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"262247325","text":"\"\"\" Implementation of Cosmic RIM estimator\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nphysical_devices = tf.config.experimental.list_physical_devices('GPU')\nprint(physical_devices)\nassert len(physical_devices) > 0, \"Not enough GPU hardware devices available\"\nconfig = tf.config.experimental.set_memory_growth(physical_devices[0], True)\n\n\nimport numpy as np\nimport os, sys, argparse, time\nfrom scipy.interpolate import InterpolatedUnivariateSpline as iuspline\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\n\nfrom rim_utils import build_rim_parallel, myAdam\nfrom recon_models import Recon_DM, Recon_Bias\n\nimport flowpm\nfrom flowpm import linear_field, lpt_init, nbody, cic_paint, cic_readout\nfrom flowpm.utils import r2c3d, c2r3d\nsys.path.append('../../utils/')\nimport tools\nfrom getbiasparams import getbias\nimport diagnostics as dg\n\n\n\ndef str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\n\nparser = argparse.ArgumentParser(description='Process some integers.')\nparser.add_argument('--nc', type=int, default=32, help='Grid size')\nparser.add_argument('--ncf', type=int, default=4, help='Grid size')\nparser.add_argument('--bs', type=float, default=200, help='Box Size')\nparser.add_argument('--numd', type=float, default=0.001, help='number density')\nparser.add_argument('--nsteps', type=int, default=3, help='')\nparser.add_argument('--niter', type=int, default=200, help='Number of iterations/Max iterations')\nparser.add_argument('--lr', type=float, default=0.001, help='Learning rate')\nparser.add_argument('--optimizer', type=str, default='adam', help='Which optimizer to use')\nparser.add_argument('--batch_size', type=int, default=8, help='Batch size')\nparser.add_argument('--nsims', type=int, default=100, help='Number of simulations')\nparser.add_argument('--nbody', type=str2bool, default=True, help='Number of simulationss')\nparser.add_argument('--lpt_order', type=int, default=2, help='Order of LPT Initial conditions')\nparser.add_argument('--input_size', type=int, default=8, help='Input layer channel size')\nparser.add_argument('--cell_size', type=int, default=8, help='Cell channel size')\nparser.add_argument('--rim_iter', type=int, default=20, help='Optimization iteration')\nparser.add_argument('--epochs', type=int, default=10, help='Number of epochs')\nparser.add_argument('--suffix', type=str, default='', help='Suffix for folder pathname')\nparser.add_argument('--batch_in_epoch', type=int, default=20, help='Number of batches in epochs')\nparser.add_argument('--posdata', type=str2bool, default=True, help='Position data')\nparser.add_argument('--prior', type=str2bool, default=True, help='Use prior as sum')\nparser.add_argument('--RRs', type=int, default=2, help='Position data')\n\n\n\nargs = parser.parse_args()\n\n\nnc, bs = args.nc, args.bs\nnumd = args.numd\nncf = args.ncf*args.nc\nniter = args.niter\noptimizer = args.optimizer\nlr = args.lr\na0, af, nsteps = 0.1, 1.0, args.nsteps\nstages = np.linspace(a0, af, nsteps, endpoint=True)\n#anneal = True\nif args.RRs == 2: RRs = [1, 0]\nelif args.RRs == 3: RRs = [2., 1., 0]\nprint(RRs)\n\n#\nklin = np.loadtxt('../../data/Planck15_a1p00.txt').T[0]\nplin = np.loadtxt('../../data//Planck15_a1p00.txt').T[1]\nipklin = iuspline(klin, plin)\n# Compute necessary Fourier kernels \nkvec = tools.fftk((nc, nc, nc), boxsize=nc, symmetric=False)\nkmesh = (sum(k**2 for k in kvec)**0.5).astype(np.float32)\npriorwt = ipklin(kmesh)\n\n\n\n#RIM params\nparams = {}\nparams['input_size'] = args.input_size\nparams['cell_size'] = args.cell_size\nparams['strides'] = 2\nparams['middle_size'] = args.input_size // params['strides'] #lets divide by strides\nparams['cell_kernel_size'] = 5\nparams['input_kernel_size'] = 5\nparams['middle_kernel_size'] = 5\nparams['output_kernel_size'] = 5\nparams['rim_iter'] = args.rim_iter\nparams['input_activation'] = 'tanh'\nparams['output_activation'] = 'linear'\nparams['nc'] = nc\n\n\nadam = myAdam(params['rim_iter'])\nadam10 = myAdam(10*params['rim_iter'])\n#fid_recon = Recon_DM(nc, bs, a0=a0, af=af, nsteps=nsteps, nbody=args.nbody, lpt_order=args.lpt_order, anneal=True)\n\nofolder = './figs/'\n\ndef get_data(nsims=args.nsims, posdata=True):\n\n path = '//mnt/ceph/users/cmodi/cosmo4d/z00/'\n path = path + '/L%04d_N%04d_D%04d//'%(bs, nc, numd*1e4)\n #if args.nbody: dpath = '/project/projectdirs/m3058/chmodi/rim-data/L%04d_N%03d_T%02d/'%(bs, nc, nsteps)\n #else: dpath = '/project/projectdirs/m3058/chmodi/rim-data/L%04d_N%03d_LPT%d/'%(bs, nc, args.lpt_order)\n alldata = np.array([np.load(path + 'S%04d.npy'%i) for i in range(100, 100+nsims)]).astype(np.float32)\n print(alldata.shape)\n if args.posdata: traindata, testdata = alldata[:int(0.9*nsims), [0,1]], alldata[int(0.9*nsims):, [0,1]]\n else: traindata, testdata = alldata[:int(0.9*nsims), [0,2]], alldata[int(0.9*nsims):, [0,2]]\n return traindata, testdata\n\n\ntraindata, testdata = get_data()\nprint(traindata.shape, testdata.shape)\n \n\n\n@tf.function\ndef pmpos(linear):\n if args.nbody:\n print('Nobdy sim')\n state = lpt_init(linear, a0=a0, order=args.lpt_order)\n final_state = nbody(state, stages, nc)\n else:\n print('ZA/2LPT sim')\n final_state = lpt_init(linear, a0=af, order=args.lpt_order)\n tfinal_field = cic_paint(tf.zeros_like(linear), final_state[0])\n return tfinal_field, final_state[0]\n\n\n\n@tf.function\ndef biasfield(linear, bias):\n \n b1, b2 = bias[0], bias[1]\n final_field, fpos = pmpos(linear)\n w0 = tf.reshape(linear, (linear.shape[0], -1))\n w0 = w0 - tf.expand_dims(tf.reduce_mean(w0, 1), -1)\n w2 = w0*w0\n w2 = w2 - tf.expand_dims(tf.reduce_mean(w2, 1), -1)\n weight = b1*w0 + b2*w2\n bmodel = cic_paint(tf.zeros_like(linear), fpos, weight = weight)\n #d0 = cic_paint(tf.zeros_like(linear), fpos, weight = w0)\n #d2 = cic_paint(tf.zeros_like(linear), fpos, weight = w2)\n #bmodel = b1*d0 + b2*d2\n return bmodel\n\n\n\n@tf.function\ndef recon_model(linear, data, bias, errormesh):\n\n print('new graph')\n\n bmodel = biasfield(linear, bias)\n residual = bmodel - data\n resk = r2c3d(residual, norm=nc**3)\n reskmesh = tf.square(tf.cast(tf.abs(resk), tf.float32))\n chisq = tf.reduce_sum(tf.multiply(reskmesh, 1/errormesh))\n\n lineark = r2c3d(linear, norm=nc**3)\n priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))\n if args.prior: prior = tf.reduce_sum(tf.multiply(priormesh, 1/priorwt))\n else: prior = tf.reduce_mean(tf.multiply(priormesh, 1/priorwt))\n\n loss = chisq + prior\n\n return loss, chisq, prior\n #return loss*nc**3, chisq*nc**3, prior*nc**3\n\n\n@tf.function\ndef recon_grad(x, y, bias, errormesh):\n with tf.GradientTape() as tape:\n tape.watch(x)\n loss = recon_model(x, y, bias, errormesh)[0]\n grad = tape.gradient(loss, x)\n return grad\n\n\n\n\ndef check_im(xx, x_init, pred, fname=None):\n fig, ax = plt.subplots(1, 3, figsize = (12, 4))\n vmin, vmax = xx.sum(axis=0).min(), xx.sum(axis=0).max()\n ax[0].imshow(xx.sum(axis=0), vmin=vmin, vmax=vmax)\n ax[0].set_title('Truth')\n ax[1].imshow(x_init.sum(axis=0), vmin=vmin, vmax=vmax)\n ax[1].set_title('initial point')\n ax[2].imshow(pred.sum(axis=0), vmin=vmin, vmax=vmax)\n ax[2].set_title('RIM recon')\n if fname is not None: plt.savefig(fname)\n else: plt.savefig(ofolder + 'rim-im.png')\n plt.close()\n\n\n\ndef check_2pt(xx, yy, rim, grad_fn, grad_params, compares, nrim=10, fname=None):\n truemesh = [xx[0], yy[0]]\n rimpreds = []\n for it in range(nrim):\n x_init = np.random.normal(size=xx.size).reshape(xx.shape).astype(np.float32)\n #x_init = (yy - (yy.max() - yy.min())/2.)/yy.std() + np.random.normal(size=xx.size).reshape(xx.shape).astype(np.float32)\n pred = rim(tf.constant(x_init), tf.constant(yy), grad_fn, grad_params)[-1]\n rimpreds.append([pred[0].numpy(), biasfield(pred, grad_params[0])[0].numpy()])\n\n fig, ax = plt.subplots(1, 2, figsize=(9, 4), sharex=True)\n for ip, preds in enumerate(rimpreds):\n k, pks = tools.get_ps(preds, truemesh, bs)\n for i in range(2):\n lbl = None\n if ip == 0 and i == 0: lbl = 'Linear'\n if ip == 0 and i == 1: lbl = 'Final'\n ax[0].plot(k, pks[i][2]/(pks[i][0]*pks[i][1])**0.5, 'C%d-'%i, alpha=0.4, label=lbl)\n ax[1].plot(k, (pks[i][0]/pks[i][1])**0.5, 'C%d-'%i, alpha=0.4)\n\n lss = ['-', '--', ':', '-.']\n lws = [ 1, 1, 2, 2]\n lbls = ['Adam', 'Adam 10x', 'Best recon']\n #for ip, preds in enumerate([pred_adam, pred_adam10]):\n for ip, preds in enumerate(compares):\n k, pks = tools.get_ps(preds, truemesh, bs)\n for i in range(2):\n lbl = None\n if i == 0: lbl = lbls[ip]\n ax[0].plot(k, pks[i][2]/(pks[i][0]*pks[i][1])**0.5, 'C%d'%i, ls=lss[ip+1], lw=lws[ip+1])\n ax[1].plot(k, (pks[i][0]/pks[i][1])**0.5, 'C%d'%i, label=lbl, ls=lss[ip+1], lw=lws[ip+1])\n \n for axis in ax: \n axis.semilogx()\n axis.grid(which='both')\n axis.legend(fontsize=12)\n axis.set_xlabel('k(h/Mpc)', fontsize=12)\n ax[0].set_ylim(-0.1, 1.2)\n ax[1].set_ylim(-0.5, 2.0)\n ax[0].set_ylabel('$r_c$', fontsize=12)\n ax[1].set_ylabel('$t_f$', fontsize=12)\n plt.tight_layout()\n if fname is not None: plt.savefig(fname)\n else: plt.savefig('rim-2pt.png')\n plt.close()\n\n\n\ndef setupbias(nsims = 2, cutoff=1.5):\n\n b1, b2, perr = [], [], []\n for i in range(nsims):\n idx = np.random.randint(0, traindata.shape[0], 1)\n idx = idx*0 + 1\n xx = traindata[idx, 0].astype(np.float32)\n yy = traindata[idx, 1].astype(np.float32)\n _, fpos = pmpos(tf.constant(xx))\n fpos = fpos[0].numpy() *bs/nc\n bparams, bmodel = getbias(bs, nc, yy[0]+1, xx[0], fpos)\n errormesh = yy - np.expand_dims(bmodel, 0)\n kerror, perror = tools.power(errormesh[0]+1, boxsize=bs)\n kerror, perror = kerror[1:], perror[1:]\n perr += [perror]\n b1 += [bparams[0]]\n b2 += [bparams[1]]\n print(\"b1 : %0.3f $\\pm$ %0.2f\"%(np.array(b1).mean(), np.array(b1).std()))\n print(\"b2 : : %0.3f $\\pm$ %0.2f\"%(np.array(b2).mean(), np.array(b2).std()))\n b1, b2 = np.array(b1).mean(), np.array(b2).mean()\n\n perr = np.array(perr).mean(axis=0)\n kny = nc*np.pi/bs\n perr[np.where(kerror > cutoff*kny)] = perr[np.where(kerror > cutoff*kny)[0][0]]\n \n ipkerror = lambda x: 10**np.interp(np.log10(x), np.log10(kerror), np.log10(perr))\n errormesh = tf.constant(ipkerror(kmesh), dtype=tf.float32)\n return b1, b2, errormesh\n\n\ndef main():\n \"\"\"\n Model function for the CosmicRIM.\n \"\"\"\n\n if args.posdata: suff = 'pos'\n else: suff = 'mass'\n if args.nbody: suff = suff + '-T%02d'%nsteps\n else: suff = suff + '-LPT2'\n if args.prior : pass\n else: suff = suff + '-noprior'\n if len(RRs) !=2 : suff = suff + \"-RR%d\"%(len(RRs))\n print(suff)\n\n rim = build_rim_parallel(params)\n #grad_fn = recon_dm_grad\n #\n b1, b2, errormesh = setupbias()\n bias = tf.constant([b1, b2], dtype=tf.float32)\n grad_fn = recon_grad\n grad_params = [bias, errormesh]\n\n idx = np.random.randint(0, testdata.shape[0], 1)\n idx = idx*0 + 1\n xx, yy = testdata[idx, 0].astype(np.float32), testdata[idx, 1].astype(np.float32), \n x_init = np.random.normal(size=xx.size).reshape(xx.shape).astype(np.float32)\n fid_recon = Recon_Bias(nc, bs, bias, errormesh, a0=0.1, af=1.0, nsteps=args.nsteps, nbody=args.nbody, lpt_order=2, anneal=True, prior=args.prior)\n #minic, minfin = fid_recon.reconstruct(tf.constant(yy), RRs=[1.0, 0.0], niter=args.rim_iter*10, lr=0.1)\n \n print(\"Loss at truth : \", recon_model(tf.constant(xx), tf.constant(yy), *[bias, errormesh]))\n print(\"Loss at init : \", recon_model(tf.constant(x_init), tf.constant(yy), *[bias, errormesh]))\n\n pred_adam = adam(tf.constant(x_init), tf.constant(yy), grad_fn, [bias, errormesh])\n print(\"Loss at adam : \", recon_model(tf.constant(pred_adam), tf.constant(yy), *[bias, errormesh]))\n pred_adam = [pred_adam[0].numpy(), biasfield(pred_adam, bias)[0].numpy()]\n\n pred_adam10 = adam10(tf.constant(x_init), tf.constant(yy), grad_fn, [bias, errormesh])\n print(\"Loss at adam 10x : \", recon_model(tf.constant(pred_adam10), tf.constant(yy), *[bias, errormesh]))\n pred_adam10 = [pred_adam10[0].numpy(), biasfield(pred_adam10, bias)[0].numpy()]\n minic, minfin = fid_recon.reconstruct(tf.constant(yy), RRs=RRs, niter=args.rim_iter*10, lr=0.1)\n compares = [pred_adam, pred_adam10, [minic[0], minfin[0]]]\n\n check_im(xx[0], x_init[0], minic[0], fname= './figs/L%04d-N%03d-%s-im.png'%(bs, nc, suff))\n check_2pt(xx, yy, rim, grad_fn, grad_params, compares, fname= './figs/L%04d-N%03d-%s-2pt.png'%(bs, nc, suff))\n print('Test set generated')\n\n sys.exit()\n\n\n x_init = np.random.normal(size=xx.size).reshape(xx.shape).astype(np.float32)\n x_pred = rim(x_init, yy, grad_fn, grad_params)\n\n \n\n #\n # @tf.function\n def rim_train(x_true, x_init, y):\n\n with tf.GradientTape() as tape:\n x_pred = rim(x_init, y, grad_fn, grad_params)\n res = (x_true - x_pred)\n loss = tf.reduce_mean(tf.square(res))\n gradients = tape.gradient(loss, rim.trainable_variables)\n return loss, gradients\n\n\n ##Train and save\n piter, testiter = 10, 20\n losses = []\n lrs = [0.001, 0.0005, 0.0001]\n liters = [201, 1001, 1001]\n trainiter = 0 \n start = time.time()\n x_test, y_test = None, None\n\n for il in range(3):\n print('Learning rate = %0.3e'%lrs[il])\n opt = tf.keras.optimizers.Adam(learning_rate=lrs[il])\n\n for i in range(liters[il]):\n idx = np.random.randint(0, traindata.shape[0], args.batch_size)\n xx, yy = traindata[idx, 0].astype(np.float32), traindata[idx, 1].astype(np.float32), \n x_init = np.random.normal(size=xx.size).reshape(xx.shape).astype(np.float32)\n #x_init = (yy - (yy.max() - yy.min())/2.)/yy.std() + np.random.normal(size=xx.size).reshape(xx.shape).astype(np.float32)\n \n\n loss, gradients = rim_train(x_true=tf.constant(xx), \n x_init=tf.constant(x_init), \n y=tf.constant(yy))\n\n losses.append(loss.numpy()) \n opt.apply_gradients(zip(gradients, rim.trainable_variables))\n\n if i%piter == 0: \n print(\"Time taken for %d iterations : \"%piter, time.time() - start)\n print(\"Loss at iteration %d : \"%i, losses[-1])\n start = time.time()\n if i%testiter == 0: \n plt.plot(losses)\n plt.savefig(ofolder + 'losses.png')\n plt.close()\n\n if x_test is None:\n idx = np.random.randint(0, testdata.shape[0], 1)\n x_test, y_test = testdata[idx, 0].astype(np.float32), testdata[idx, 1].astype(np.float32), \n print(\"Loss at truth : \", recon_model(tf.constant(x_test), tf.constant(y_test), *[bias, errormesh]))\n print(\"Loss at init : \", recon_model(tf.constant(x_init), tf.constant(y_test), *[bias, errormesh]))\n \n pred_adam = adam(tf.constant(x_init), tf.constant(y_test), grad_fn, [bias, errormesh])\n print(\"Loss at adam : \", recon_model(tf.constant(pred_adam), tf.constant(y_test), *[bias, errormesh]))\n pred_adam = [pred_adam[0].numpy(), biasfield(pred_adam, bias)[0].numpy()]\n \n pred_adam10 = adam10(tf.constant(x_init), tf.constant(y_test), grad_fn, [bias, errormesh])\n print(\"Loss at adam 10x : \", recon_model(tf.constant(pred_adam10), tf.constant(y_test), *[bias, errormesh]))\n pred_adam10 = [pred_adam10[0].numpy(), biasfield(pred_adam10, bias)[0].numpy()]\n minic, minfin = fid_recon.reconstruct(tf.constant(y_test), RRs=[1.0, 0.0], niter=args.rim_iter*10, lr=0.1)\n compares = [pred_adam, pred_adam10, [minic[0], minfin[0]]]\n check_2pt(x_test, y_test, rim, grad_fn, grad_params, compares, fname= 'halosrecon.png')\n print('Test set generated')\n\n x_init = np.random.normal(size=x_test.size).reshape(x_test.shape).astype(np.float32)\n #x_init = (y_test - (y_test.max() - y_test.min())/2.)/y_test.std() + np.random.normal(size=x_test.size).reshape(x_test.shape).astype(np.float32)\n pred = rim(tf.constant(x_init), tf.constant(y_test), grad_fn, grad_params)[-1]\n check_im(x_test[0], x_init[0], pred.numpy()[0], fname=ofolder + 'rim-im-%04d.png'%trainiter)\n check_2pt(x_test, y_test, rim, grad_fn, grad_params, compares, fname=ofolder + 'rim-2pt-%04d.png'%trainiter)\n rim.save_weights(ofolder + '/%d'%trainiter)\n\n trainiter += 1\n\n\n\n\n\n#$#$\n#$#$\n#$#$ k, ps = tools.power(xx[0], boxsize=bs)\n#$#$ k, pf = tools.power(zz[0], boxsize=bs)\n#$#$ k, ph = tools.power(yy[0], boxsize=bs)\n#$#$ k, pb = tools.power(bmodel, boxsize=bs)\n#$#$ k, pxsh = tools.power(xx[0], f2=yy[0], boxsize=bs)\n#$#$ k, pxfh = tools.power(zz[0], f2=yy[0], boxsize=bs)\n#$#$ k, pxbh = tools.power(bmodel, f2=yy[0], boxsize=bs)\n#$#$\n#$#$ fig, ax = plt.subplots(1, 2, figsize = (9, 4))\n#$#$ ax[0].plot(k, (ps*ps)**0.5, 'C%d-'%0, alpha=0.7)\n#$#$ ax[0].plot(k, (pf*pf)**0.5, 'C%d-'%1, alpha=0.7)\n#$#$ ax[0].plot(k, (ph*ph)**0.5, 'C%d-'%2, alpha=0.7)\n#$#$ ax[0].plot(k, pb, 'C%d--'%3, alpha=0.7)\n#$#$ ax[0].plot(kerror, perror, 'C%d:'%4, alpha=0.7)\n#$#$ ax[1].plot(k, pxsh/(ps*ph)**0.5, 'C%d-'%0, alpha=0.7)\n#$#$ ax[1].plot(k, pxfh/(pf*ph)**0.5, 'C%d-'%1, alpha=0.7)\n#$#$ ax[1].plot(k, pxbh/(pb*ph)**0.5, 'C%d-'%3, alpha=0.7)\n#$#$ ax[0].loglog()\n#$#$ ax[1].semilogx()\n#$#$ ax[1].set_ylim(-0.1, 1.2)\n#$#$ for axis in ax: axis.grid(which='both')\n#$#$ plt.savefig('2pt-halos.png')\n#$#$ plt.close()\n#$#$\n#$#$ print(xx.shape, yy.shape, zz.shape)\n#$#$ fig, axar = plt.subplots(2, 3, figsize = (9, 8))\n#$#$ ax = axar[0]\n#$#$ ax[0].imshow(xx[0].sum(axis=0))\n#$#$ ax[1].imshow(zz[0].sum(axis=0))\n#$#$ ax[2].imshow(yy[0].sum(axis=0))\n#$#$ ax = axar[1]\n#$#$ ax[0].imshow(yy[0].sum(axis=0))\n#$#$ ax[1].imshow(bmodel.sum(axis=0))\n#$#$ ax[2].imshow(errormesh[0].sum(axis=0))\n#$#$ plt.savefig('testhalos.png')\n#$#$\n#$#$\n#$#$\n#$#$ idx = np.random.randint(0, traindata.shape[0], 4)\n#$#$ xx = traindata[idx, 0].astype(np.float32)\n#$#$ bmodel = biasfield(tf.constant(xx), tf.constant([b1, b2], dtype=tf.float32)).numpy()\n#$#$ yy = traindata[idx, 1].astype(np.float32)\n#$#$\n#$#$ print(xx.shape, bmodel.shape, yy.shape)\n#$#$\n#$#$ fig, ax = plt.subplots(1, 2, figsize = (9, 4))\n#$#$ for i in range(len(idx)):\n#$#$ print(i)\n#$#$ k, ps = tools.power(xx[i], boxsize=bs)\n#$#$ k, ph = tools.power(yy[i], boxsize=bs)\n#$#$ k, pb = tools.power(bmodel[i], boxsize=bs)\n#$#$ k, pxsh = tools.power(xx[i], f2=yy[i], boxsize=bs)\n#$#$ k, pxbh = tools.power(bmodel[i], f2=yy[i], boxsize=bs)\n#$#$ \n#$#$ ax[0].plot(k, ps, 'C%d-'%0, alpha=0.7)\n#$#$ ax[0].plot(k, ph, 'C%d-'%2, alpha=0.7)\n#$#$ ax[0].plot(k, pb, 'C%d--'%3, alpha=0.7)\n#$#$ ax[1].plot(k, pxsh/(ps*ph)**0.5, 'C%d-'%0, alpha=0.7)\n#$#$ ax[1].plot(k, pxbh/(pb*ph)**0.5, 'C%d-'%3, alpha=0.7)\n#$#$ ax[0].loglog()\n#$#$ ax[1].semilogx()\n#$#$ ax[1].set_ylim(-0.1, 1.2)\n#$#$ for axis in ax: axis.grid(which='both')\n#$#$ plt.savefig('2pt-halos-tf.png')\n#$#$ plt.close()\n#$#$\n#$#$\n#$#$\nif __name__==\"__main__\":\n main()\n","sub_path":"code/rim/halo_recon.py","file_name":"halo_recon.py","file_ext":"py","file_size_in_byte":20145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"44281917","text":"from flask import Flask, Markup, make_response\nfrom flask_restful import Api, Resource\nfrom flask import render_template, request, session, url_for, redirect, flash\nimport logging\nfrom logging.handlers import TimedRotatingFileHandler\nimport configs\nfrom flask_pymongo import PyMongo\n\napp = Flask(__name__)\napp.debug = True\napp.config.from_object(configs)\n\napp.config.update(\n MONGO_URI='mongodb://localhost:27019/market_value',\n MONGO_USERNAME='',\n MONGO_PASSWORD=''\n)\nmongo = PyMongo(app)\n\napp_2 = Flask('dongqiudi')\napp_2.debug = True\napp_2.config.from_object(configs)\napp_2.config.update(\n MONGO_URI='mongodb://localhost:27019/player_analysis',\n MONGO_USERNAME='',\n MONGO_PASSWORD=''\n)\nmongo_2 = PyMongo(app_2)\n\n@app.route('/')\ndef index():\n return Markup('
Hello %s
') % 'Flask'\n\n@app.route('/dongqiudi/')\ndef dongqiudi():\n matchs = mongo_2.db.dongqiudi_player.find({'score': 'VS', 'support_direction': {'$ne': ''}, \"match_day\": {\"$gt\": \"2018-11-15\"}}).sort([('match_day', 1)])\n return render_template('dongqiudi.html', matchs=matchs)\n\n@app.route('/matchs/')\ndef matchs():\n qi_shu = mongo.db.new_realtime_matchs.find().sort([('qi_shu', -1)])[0]['qi_shu']\n # qi_shu = 181004\n matchs = mongo.db.new_realtime_matchs.find({'qi_shu': qi_shu}).sort([('match_time', 1)])\n return render_template('matchs.html', matchs=matchs)\n\n@app.route('/shili_matchs/')\ndef shili_matchs():\n qi_shu = mongo.db.shili_realtime_matchs.find().sort([('qi_shu', -1)])[0]['qi_shu']\n # qi_shu = 180903\n matchs = mongo.db.shili_realtime_matchs.find({'qi_shu': qi_shu}).sort([('match_time', 1)])\n return render_template('shili_matchs.html', matchs=matchs)\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return render_template('page_not_found.html'), 404\n\n\nclass InvalidUsage(Exception):\n status_code = 400\n\n def __init__(self, message, status_code=400):\n Exception.__init__(self)\n self.message = message\n self.status_code = status_code\n\n@app.errorhandler(InvalidUsage)\ndef invalid_usage(error):\n response = make_response(error.message)\n response.status_code = error.status_code\n return response\n\n@app.route('/exception')\ndef exception():\n app.logger.debug('Enter exception method')\n app.logger.error('403 error happened')\n raise InvalidUsage('No privilege to access the resource', status_code=403)\n\n# 日志部分\nserver_log = TimedRotatingFileHandler('server.log', 'D')\nserver_log.setLevel(logging.DEBUG)\nserver_log.setFormatter(logging.Formatter(\n '%(asctime)s %(levelname)s: %(message)s'\n))\nerror_log = TimedRotatingFileHandler('error.log', 'D')\nerror_log.setLevel(logging.ERROR)\nerror_log.setFormatter(logging.Formatter(\n '%(asctime)s: %(message)s [in %(pathname)s:%(lineno)d]'\n))\napp.logger.addHandler(server_log)\napp.logger.addHandler(error_log)\n# 日志部分结束\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"crossOdds_web.py","file_name":"crossOdds_web.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"391264095","text":"#7-5 부품찾기\n#부품별 고유 번호가 있다. 고객이 요청한 부품이 있는지 확인하는 프로그램을 작성하자\n#입력 예시\n#보유 부품 수 N = 5\n#보유 부품 고유 번호 [8, 3, 7, 9, 2]\n#고객 요청 부품 수 M = 3\n#고객 요청 부품 번호 [5, 7, 9]\n#출력 예시\n#no yes yes\n\n#이진 탐색 소스코드\ndef binary_search(array, target, start, end):\n if start > end:\n return None\n mid = (start + end) // 2\n if array[mid] == target:\n return mid\n elif array[mid] > target:\n return binary_search(array, target, start, mid - 1) #[1]\n else:\n return binary_search(array, target, mid + 1, end)\n\nn = int(input())\narray = list(map(int, input().split()))\narray.sort()\n\nm = int(input())\nx = list(map(int, input().split()))\n\n\nfor i in x:\n #부품이 있는지 확인\n result = binary_search(array, i, 0, n - 1)\n if result != None:\n print('yes', end = ' ')\n else:\n print('no', end = ' ')\n\n#[1] 왜 여기 return 넣는걸 까먹을까? 값을 돌려줘야지 다음 계산을 하지...\n","sub_path":"7-5.py","file_name":"7-5.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"4143192","text":"import time\n\ndef isprime2(n):\n '''\n check if integer n is a prime, return True or False\n '''\n # 2 is the only even prime\n if n == 2:\n return True\n # integers less than 2 and even numbers other than 2 are not prime\n elif n < 2 or not n & 1:\n return False\n # loop looks at odd numbers 3, 5, 7, ... to sqrt(n)\n for i in range(3, int(n**0.5)+1, 2):\n if n % i == 0:\n return False\n return True\n\ndef main():\n start = time.time()\n prime_list = [2]\n\n for n in range(3, 2000000, 2):\n #for printing primes\n if n % 5001 == 0:\n print(\"Testing: \" + str(n))\n if isprime2(n):\n #print(\"Prime Found: \"+str(n))\n prime_list.append(n)\n \n #print(prime_list)\n total = sum(prime_list)\n end = time.time()\n\n print(\"The sum is: \"+str(total))\n print(\"This took \"+str(end-start)+\" seconds.\")\n\nmain()\n","sub_path":"0010/solution10.py","file_name":"solution10.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"45149654","text":"import shutil\ncolumns = shutil.get_terminal_size().columns\nimport pandas as pd\nfrom sklearn.model_selection import GridSearchCV\n\nclass BestClassifier:\n \"\"\"\n class that fits a list of models to the training data to determine\n the model of best fit, where best fit is defined against any scoring_metric\n allowed by the scikit-learn API.\n\n models: a dictionary of {modelName: modelInstance()}\n params: a dictionary of {modelName: {modelParameters}}\n scoring_metric: ['f1_weighted', 'precision', 'recall', 'accuracy']\n \"\"\"\n def __init__(self, models, params, scoring_metric):\n if not isinstance(models, dict):\n raise ValueError(\"please specify a dictionary of {model_name: model_instance}\")\n if not isinstance(params, dict):\n raise ValueError(\"please specify a dictionary of parameters\")\n if set(models.keys())-set(params.keys()) != set():\n raise ValueError(\"No specified parameters for model(s) {}\".format(set(models.keys())-set(params.keys())))\n\n self.models = models \n self.params = params \n self.single_classifier_best = {}\n self.scoring_metric = scoring_metric\n\n def fit(self, train_features, train_actuals):\n \"\"\"\n fits the list of models to the training data, thereby obtaining in each case an evaluation score after GridSearchCV cross-validation\n \"\"\"\n for name in self.models.keys():\n print('-'*shutil.get_terminal_size().columns)\n print(\"evaluating {}\".format(name).center(columns))\n print('-'*shutil.get_terminal_size().columns)\n estimator = self.models[name]\n est_params = self.params[name]\n gscv = GridSearchCV(estimator, est_params, cv=5, scoring=self.scoring_metric)\n gscv.fit(train_features, train_actuals)\n print(\"best parameters are: {}\".format(gscv.best_estimator_))\n self.single_classifier_best[name] = gscv\n \n def evaluation(self):\n \"\"\"\n prints a summary report, ranking the models in terms of highest evaluation score\n \"\"\"\n rows_list = []\n for name in self.single_classifier_best.keys():\n row = {}\n row['algorithm'] = name \n row[self.scoring_metric] = self.single_classifier_best[name].best_score_\n rows_list.append(row)\n \n scoring_df = pd.DataFrame(rows_list)\n scoring_sorted = scoring_df.sort_values(self.scoring_metric, ascending=False)\n print()\n print('*'*shutil.get_terminal_size().columns)\n print(scoring_sorted)\n print('*'*shutil.get_terminal_size().columns)\n self.evaluation_scores = scoring_sorted","sub_path":"src/bestclassifier.py","file_name":"bestclassifier.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"183974407","text":"from django.contrib import admin\nfrom .models import Cliente\n#from guardian.shortcuts import assign_perm\n#from guardian.admin import GuardedModelAdmin\n\n\n# Register your models here.\n@admin.register(Cliente)\nclass ClienteAdmin(admin.ModelAdmin):\n '''Admin View for Cliente '''\n readonly_fields = ('advogado',)\n def save_model(self, request, obj, form, change):\n if not obj.pk:\n obj.advogado = request.user\n \n \n super().save_model(request, obj, form, change) \n assign_perm('view_cliente',request.user,obj)","sub_path":"clientes/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"431957017","text":"# -*- coding: utf-8 -*-\nimport math, io, random\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\nimport numpy as np\nfrom PIL import Image\nfrom baselines.PythonClient import *\nfrom baselines.projection import *\nimport tensorflow as tf\n\nclass AirSimEnv(gym.Env):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 30\n }\n\n def __init__(self):\n self.client = AirSimClient(port=41451)\n self.client.confirmConnection()\n self.client.enableApiControl(True)\n self.client.armDisarm(True)\n self.log_file = open('logs.txt', 'w')\n\n self.min_X = 0.0\n self.max_X = 1.0\n self.min_Y = 0.0\n self.max_Y = 1.0\n self.rt2 = math.sqrt(2)\n self.episodes = 0\n self.cumulative = 0.0\n\n MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'\n\n # Path to frozen detection graph. This is the actual model that is used for the object detection.\n PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'\n\n # List of the strings that is used to add correct label for each box.\n # PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')\n self.detection_graph = tf.Graph()\n with self.detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n # label_map = label_map_util.load_labelmap(PATH_TO_LABELS)\n # categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,\n # use_display_name=True)\n # category_index = label_map_util.create_category_index(categories)\n\n self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')\n # Each box represents a part of the image where a particular object was detected.\n self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')\n self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')\n self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')\n self.sess = tf.Session(graph=self.detection_graph)\n\n\n self._reset()\n # self.min_position = -1.2\n # self.max_position = 0.6\n # self.max_speed = 0.07\n # self.goal_position = 0.45 # was 0.5 in gym, 0.45 in Arnaud de Broissia's version\n # self.power = 0.0015\n\n # self.low_state = np.array([self.min_position, -self.max_speed])\n # self.high_state = np.array([self.max_position, self.max_speed])\n\n self.viewer = None\n #elf.observation = self.image\n #self.observation = np.concatenate([self.last_image, self.image])\n # self.action_space = spaces.Box(0.0, 1.0, shape = (4,))\n self.action_space = spaces.Box(-0.5, 0.5, shape = (2,))\n #self.observation_space = spaces.Box(low=np.zeros(int(self.width),int(self.height),3), high=np.zeros(int(self.width),int(self.height),3)+255)\n self.observation_space = spaces.Box(low=np.zeros(self.observation.shape), high=np.zeros(self.observation.shape)+255)\n self.observation = None\n\n self._seed()\n\n def get_rbg(self, response):\n binary_rgb = response.image_data_uint8\n png = Image.open(io.BytesIO(binary_rgb)).convert('RGB')\n rgb = np.array(png)\n self.width = rgb.shape[1]\n self.height = rgb.shape[0]\n #rgb_vec = rgb.flatten()\n return rgb\n\n def get_depth(self, response):\n binary_rgb = response.image_data_uint8\n png = Image.open(io.BytesIO(binary_rgb)).convert('RGB')\n rgb = np.array(png)\n depth = np.expand_dims(rgb[:,:,0], axis=2)\n #w = Image.fromarray(depth, mode='L')\n #w.show()\n self.width = rgb.shape[1]\n self.height = rgb.shape[0]\n #depth_vec = depth.flatten()\n return depth\n\n def _seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def get_obs(self, action=None):\n if self.image is None:\n return None\n #self.observation = np.concatenate([self.last_image, self.image])\n self.observation = self.image\n\n # 5 is airplane\n # 16 is bird\n rgb = np.array([np.transpose(np.transpose(self.image)[:3])])\n (boxes, scores, classes, num) = self.sess.run(\n [self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],\n feed_dict={self.image_tensor: rgb})\n\n plane_box = boxes[0][5][:]\n bird_box = boxes[0][16][:]\n maybe_plane_box = boxes[0][15][:]\n maybe_bird_box = boxes[0][16][:]\n self.observation = self.observation.flatten()\n if action is not None:\n a = np.array(action).flatten()\n self.observation = np.concatenate([a,\n plane_box, bird_box, maybe_plane_box, maybe_bird_box,\n self.observation])\n return self.observation\n\n def _step(self, raw_action):\n #action = np.matrix([raw_action.item(0)*float(self.width),raw_action.item(1)*float(self.height)])\n action = np.matrix([raw_action.item(0)+0.5,raw_action.item(1)+0.5])\n #x = self.c.item(0)/self.width\n #y = self.c.item(1)/self.height\n self.reward = 1-((np.linalg.norm(action-self.last_loc))/self.rt2)\n self.cumulative += self.reward\n self.iteration += 1\n #print(self.iteration)\n\n if self.episodes % 500 == 0:\n if self.fw is None:\n self.fw = open('./images/episode_'+str(self.episodes)+'/actions.txt', 'w')\n self.fw.write('('+str(action.item(0))+','+str(action.item(1))+')\\n')\n\n # An action of 0 is the NOOP\n j = 0\n while True:\n if j > 50:\n self.done = True\n\n if self.episodes % 500 == 0:\n self.fw.close()\n self.fw = None\n self.episodes+=1\n print(str(self.episodes)+': '+str(self.cumulative/float(self.iteration))+' *'+str(self.iteration))\n self.log_file.write(str(self.episodes)+': '+str(self.cumulative/float(self.iteration))+' *'+str(self.iteration)+'\\n')\n self.cumulative = 0\n return self.observation, self.reward, self.done, self.info\n rot_inc = 5.0+float(j)/10.0\n vel_inc = 1.0+float(j)/10.0\n #print(rot_inc)\n dC = np.matrix([random.normalvariate(self.v.item(0),vel_inc/self.fps),\n random.normalvariate(self.v.item(1),vel_inc/self.fps),\n random.normalvariate(self.v.item(2),vel_inc/self.fps)]\n )\n dO = np.matrix([random.normalvariate(self.r.item(0),vel_inc/self.fps),\n random.normalvariate(self.r.item(1),rot_inc/self.fps),\n random.normalvariate(self.r.item(2),rot_inc/self.fps)]\n )\n newC = np.add(self.c, dC)\n newO = np.add(self.o, dO)\n d = np.linalg.norm(self.t-newC)\n (x, y) = projection(self.t, newC, newO, w=float(self.width), h=float(self.height))\n total_v = np.linalg.norm(dC)\n if x <= float(self.width)*0.95 and x >= float(self.width)*0.05 and y <= float(self.height)*0.95 and y >= float(self.height)*0.05 \\\n and d > 3 and d < 30 and newC.item(2) < -2 \\\n and total_v*self.fps <= 30:\n break\n j += 1\n self.c = newC\n self.v = dC\n self.o = newO\n self.r = dO\n x = x/float(self.width)\n y = y/float(self.height)\n self.last_loc = np.matrix([x, y])\n self.state = self._render()\n self.observation = self.get_obs(self.last_loc)\n self.done = (self.iteration > 100)\n info = (self.c, self.v, self.o, self.r)\n self.info = {}\n #print(action)\n #print(np.matrix([x,y]))\n #print(self.reward)\n if self.done:\n if self.episodes % 500 == 0:\n self.fw.close()\n self.fw = None\n self.episodes+=1\n print(str(self.episodes)+': '+str(self.cumulative/float(self.iteration)))\n self.log_file.write(str(self.episodes)+': '+str(self.cumulative/float(self.iteration))+'\\n')\n self.cumulative = 0\n return self.observation, self.reward, self.done, self.info\n\n def _reset(self):\n self.iteration = 0\n self.t = np.matrix([-10.0, 10.0, -10.0])\n self.o = np.matrix([0.0,0.0,0.0])\n self.c = np.matrix([-20.0, 10.0, -10.0])\n self.v = np.matrix([0.0,0.0,0.0])\n self.r = np.matrix([0.0,0.0,0.0])\n self.fps = 30.0\n self.client.simSetPose(Vector3r(self.c.item(0), self.c.item(1), self.c.item(2)), \n self.client.toQuaternion(math.radians(self.o.item(1)),math.radians(self.o.item(0)),math.radians(self.o.item(2))))\n self.image = None\n self.fw = None\n #response = self.client.simGetImages([ImageRequest(0, AirSimImageType.Scene)])[0]\n #self.image = self.get_rbg(response)\n\n self._render()\n\n self.observation = self.get_obs(np.matrix([0.5,0.5]))\n\n (x, y) = projection(self.t, self.c, self.o, w=float(self.width), h=float(self.height))\n x = x/float(self.width)\n y = y/float(self.height)\n self.last_loc = np.matrix([x, y])\n return self.observation\n\n def _render(self, mode='human', close=False):\n self.client.simSetPose(Vector3r(self.c.item(0), self.c.item(1), self.c.item(2)),\n self.client.toQuaternion(math.radians(self.o.item(1)),math.radians(self.o.item(0)),math.radians(self.o.item(2))))\n\n self.last_image = self.image\n responses = self.client.simGetImages([ImageRequest(0, AirSimImageType.Scene),\n ImageRequest(0, AirSimImageType.DepthVis)])\n if self.episodes % 500 == 0:\n if not os.path.exists('./images/episode_'+str(self.episodes)+'/'):\n os.makedirs('./images/episode_'+str(self.episodes)+'/')\n AirSimClient.write_file(os.path.normpath('./images/episode_'+str(self.episodes)+'/'+str(self.iteration)+'.png'),\n responses[0].image_data_uint8)\n rgb = self.get_rbg(responses[0])\n #response = self.client.simGetImages([ImageRequest(0, AirSimImageType.DepthVis)])[0]\n depth = self.get_depth(responses[1])\n self.image = np.concatenate([rgb, depth], axis=2)\n\n if self.last_image is None:\n self.last_image = self.image\n\n return self.image\n\n","sub_path":"baselines/AirSimEnv.py","file_name":"AirSimEnv.py","file_ext":"py","file_size_in_byte":11292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"133195768","text":"from rest_framework import serializers\n\nclass ContextSerializer(serializers.Serializer):\n \"\"\"\n Return context with object data.\n \"\"\"\n @property\n def data(self):\n context = self.context.get('context', {})\n context.update(self.to_native(self.object))\n return context","sub_path":"glazed_routes/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"467211953","text":"from datetime import datetime\r\nimport operator\r\n\r\n\r\n\r\nplain_commands = []\r\n\r\nwith open(\"data/day4.txt\") as data:\r\n\tplain_commands=data.readlines()\r\n\r\nguard_sleep_mins = {}\r\nguard_minutes_tracker = {}\r\n\r\ni = 0\r\n\r\ncurrent_guard = 0\r\nfell_asleep_min = -199999\r\n\r\n\r\nwhile (i < (len(plain_commands))):\r\n\r\n\tcurrent_min = int(plain_commands[i].split(\":\")[1].split(\"]\")[0])\r\n\t\r\n\t# if the line is that X is now the guard, store the shift events\r\n\tif(\"begins shift\" in plain_commands[i]):\r\n\t#\tprint(\"starting a shift\")\r\n\t\t# reset events ( new guard )\r\n\t\t# update who's on shift\r\n\t\tfell_asleep_min = 100000\r\n\t\tcurrent_guard = plain_commands[i].split(\"#\")[1].split(\" \")[0]\r\n\r\n\t\tif(not guard_sleep_mins.get(current_guard)):\r\n\t\t\tguard_sleep_mins[current_guard] =0\r\n\t\t\tguard_minutes_tracker[current_guard] = [0] * 60\r\n\tif(\"falls asleep\" in plain_commands[i]):\r\n\t\tfell_asleep_min = current_min\r\n\tif(\"wakes up\" in plain_commands[i]):\r\n\t\tfor j in range(fell_asleep_min,current_min):\r\n\t\t\tguard_minutes_tracker[current_guard][j]+=1\r\n\t\t\tguard_sleep_mins[current_guard] += 1\r\n\ti+=1\r\n\r\n\r\n\r\nsleepiest_guard = max(guard_sleep_mins.items(), key=operator.itemgetter(1))[0]\r\nsleepiest_minute_for_sleepiest_guard = guard_minutes_tracker[sleepiest_guard].index(max(guard_minutes_tracker[sleepiest_guard]))\r\nprint(f\"pt1:\\nOverall sleepiest guard, minute: {sleepiest_guard} @ {sleepiest_minute_for_sleepiest_guard}m\")\r\nprint(f\"= {sleepiest_minute_for_sleepiest_guard * int(sleepiest_guard)}\")\r\n\r\nsleepiest_minute_amount = -1\r\nsleepiest_minute = -1\r\nsleepiest_guard_id = -1\r\n\r\nfor guard in guard_minutes_tracker:\r\n\tguards_sleepiest_minute_amount = max(guard_minutes_tracker[guard])\r\n\tguards_sleepiest_minute = guard_minutes_tracker[guard].index(max(guard_minutes_tracker[guard]))\r\n\r\n\tif guards_sleepiest_minute_amount > sleepiest_minute_amount:\r\n\t\tsleepiest_minute_amount = guards_sleepiest_minute_amount\r\n\t\tsleepiest_minute = guards_sleepiest_minute\r\n\t\tsleepiest_guard_id = int(guard)\r\n\r\nprint(f\"pt2: amount,time of guard most consistently sleeping : {sleepiest_minute_amount} @ {sleepiest_minute}m\")\r\nprint(f\"= {sleepiest_guard_id * sleepiest_minute}\")\r\n\r\n\r\n","sub_path":"day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"268581556","text":"class Node:\n\n def __init__(self, n, p):\n self.name = n\n self.position = p\n self.visited = False\n self.edges = []\n\n def get_name(self):\n return self.name\n\n def get_edges(self):\n return self.edges\n\n\nclass Edge:\n\n def __init__(self, eName, startNode, endNode, alpha, beta, road_length):\n self.edgeName = eName\n self.startNode = startNode\n self.endNode = endNode\n self.startNodeDirToEndNode = alpha\n self.endNodeDirToStartNode = beta\n self.road_length = road_length\n\n def get_edgeName(self):\n return self.edgeName\n\n def get_startNode(self):\n return self.startNode\n\n def get_endNode(self):\n return self.endNode\n\n def get_startNode_name(self):\n return self.startNode.name\n\n def get_endNode_name(self):\n return self.endNode.name\n\n def get_startNodeDirToEndNode(self):\n return self.startNodeDirToEndNode\n\n def get_endNodeDirToStartNode(self):\n return self.endNodeDirToStartNode\n\n\nclass Graph:\n def __init__(self):\n self.nodes_dict = None\n self.edges_dict = None\n\n def create_simple(self):\n node_names = ['x', 'a', 's', 'b']\n node_pos = [(0, 10), (-10, 0), (0, 0), (10, 0)]\n nodes_dict = {}\n\n # Create nodes and add them to the dictionary\n for index in range(0, len(node_names)):\n key = node_names[index]\n value = Node(node_names[index], node_pos[index])\n nodes_dict[key] = value\n\n edges_dict = {}\n e1 = Edge(\"e1\", nodes_dict['s'], nodes_dict['x'], 90, 270, 10)\n e2 = Edge(\"e2\", nodes_dict['a'], nodes_dict['s'], 0, 180, 10)\n e3 = Edge(\"e3\", nodes_dict['b'], nodes_dict['s'], 180, 0, 10)\n\n edges_dict['sx'] = e1\n edges_dict['as'] = e2\n edges_dict['bs'] = e3\n\n # add e1 to both vertices\n nodes_dict['s'].edges.append(e1)\n nodes_dict['x'].edges.append(e1)\n\n # add e2 to both vertices\n nodes_dict['a'].edges.append(e2)\n nodes_dict['s'].edges.append(e2)\n\n # add e3 to both vertices\n nodes_dict['b'].edges.append(e3)\n nodes_dict['s'].edges.append(e3)\n\n # set class vars\n self.nodes_dict = nodes_dict\n self.edges_dict = edges_dict\n\n def create_simple_different_road_lengths(self):\n node_names = ['x', 'a', 's', 'b']\n node_pos = [(0, 8), (5, 0), (0, 0), (2, 0)]\n nodes_dict = {}\n\n # Create nodes and add them to the dictionary\n for index in range(0, len(node_names)):\n key = node_names[index]\n value = Node(node_names[index], node_pos[index])\n nodes_dict[key] = value\n\n edges_dict = {}\n e1 = Edge(\"e1\", nodes_dict['s'], nodes_dict['x'], 90, 270, 8)\n e2 = Edge(\"e2\", nodes_dict['a'], nodes_dict['s'], 0, 180, 5)\n e3 = Edge(\"e3\", nodes_dict['b'], nodes_dict['s'], 180, 0, 2)\n\n edges_dict['sx'] = e1\n edges_dict['as'] = e2\n edges_dict['bs'] = e3\n\n # add e1 to both vertices\n nodes_dict['s'].edges.append(e1)\n nodes_dict['x'].edges.append(e1)\n\n # add e2 to both vertices\n nodes_dict['a'].edges.append(e2)\n nodes_dict['s'].edges.append(e2)\n\n # add e3 to both vertices\n nodes_dict['b'].edges.append(e3)\n nodes_dict['s'].edges.append(e3)\n\n # set class vars\n self.nodes_dict = nodes_dict\n self.edges_dict = edges_dict\n\n def create_simple_different_angles(self):\n # 100 = 25 + x^2\n # x^2 = 100 - 25\n # x = square_root(75)\n x = math.sqrt(75)\n\n node_names = ['x', 'a', 's', 'b']\n node_pos = [(0, 10), (-x, 5), (0, 0), (x, 5)]\n nodes_dict = {}\n\n # Create nodes and add them to the dictionary\n for index in range(0, len(node_names)):\n key = node_names[index]\n value = Node(node_names[index], node_pos[index])\n nodes_dict[key] = value\n\n edges_dict = {}\n e1 = Edge(\"e1\", nodes_dict['s'], nodes_dict['x'], 90, 270, 10) # input hyp.\n e2 = Edge(\"e2\", nodes_dict['a'], nodes_dict['s'], 315, 135, 10)\n e3 = Edge(\"e3\", nodes_dict['b'], nodes_dict['s'], 225, 45, 10)\n\n edges_dict['sx'] = e1\n edges_dict['as'] = e2\n edges_dict['bs'] = e3\n\n # add e1 to both vertices\n nodes_dict['s'].edges.append(e1)\n nodes_dict['x'].edges.append(e1)\n\n # add e2 to both vertices\n nodes_dict['a'].edges.append(e2)\n nodes_dict['s'].edges.append(e2)\n\n # add e3 to both vertices\n nodes_dict['b'].edges.append(e3)\n nodes_dict['s'].edges.append(e3)\n\n # set class vars\n self.nodes_dict = nodes_dict\n self.edges_dict = edges_dict\n\n # Mystery Graph CODE\n def mystery_graph(self):\n node_names = ['x', 'a', 's', 'b', 'y']\n node_pos = [(-10, 10), (0, 10), (0, 0), (0, -10), (10, -10)]\n nodes_dict = {}\n\n # Create nodes and add them to the dictionary\n for index in range(0, len(node_names)):\n key = node_names[index]\n value = Node(node_names[index], node_pos[index])\n nodes_dict[key] = value\n\n edges_dict = {}\n\n e1 = Edge(\"e1\", nodes_dict['a'], nodes_dict['x'], 180, 0, 10)\n e2 = Edge(\"e2\", nodes_dict['s'], nodes_dict['x'], 135, 315, 10)\n e3 = Edge(\"e3\", nodes_dict['a'], nodes_dict['s'], 270, 90, 10)\n e4 = Edge(\"e4\", nodes_dict['b'], nodes_dict['s'], 90, 270, 10)\n e5 = Edge(\"e5\", nodes_dict['s'], nodes_dict['y'], 315, 135, 10)\n e6 = Edge(\"e6\", nodes_dict['b'], nodes_dict['y'], 0, 180, 10)\n\n edges_dict['sx'] = e1\n edges_dict['sx'] = e2\n edges_dict['as'] = e3\n edges_dict['bs'] = e4\n edges_dict['sy'] = e5\n edges_dict['by'] = e6\n\n # Node/Vertex 'S' add, all the edges\n nodes_dict['s'].edges.append(e2)\n nodes_dict['s'].edges.append(e3)\n nodes_dict['s'].edges.append(e4)\n nodes_dict['s'].edges.append(e5)\n\n # Node/Vertex 'A' add, all the edges\n nodes_dict['a'].edges.append(e1)\n nodes_dict['a'].edges.append(e3)\n\n # Node/Vertex 'B' add, all the edges\n nodes_dict['b'].edges.append(e4)\n nodes_dict['b'].edges.append(e6)\n\n # Node/Vertex 'Y' add, all the edges\n nodes_dict['y'].edges.append(e5)\n nodes_dict['y'].edges.append(e6)\n\n # Node/Vertex 'X' add, all the edges\n nodes_dict['x'].edges.append(e2)\n nodes_dict['x'].edges.append(e1)\n\n # set class vars\n self.nodes_dict = nodes_dict\n self.edges_dict = edges_dict\n\n def create_complex(self):\n node_names = ['x', 'y', 'z', 'a', 's', 'b']\n node_pos = [(-5, 5), (0, 5), (5, 5), (-5, 0), (0, 0), (5, 0)]\n\n nodes_dict = {}\n # Create nodes and add them to the dictionary\n for index in range(0, len(node_names)):\n key = node_names[index]\n value = Node(node_names[index], node_pos[index])\n nodes_dict[key] = value\n edges_dict = {}\n\n e1 = Edge(\"e1\", nodes_dict['x'], nodes_dict['y'], 0, 180, 5)\n e2 = Edge(\"e2\", nodes_dict['y'], nodes_dict['z'], 0, 180, 5)\n e3 = Edge(\"e3\", nodes_dict['s'], nodes_dict['y'], 90, 270, 5)\n e4 = Edge(\"e4\", nodes_dict['s'], nodes_dict['z'], 45, 225, 5)\n e5 = Edge(\"e5\", nodes_dict['b'], nodes_dict['z'], 90, 270, 5)\n e6 = Edge(\"e6\", nodes_dict['a'], nodes_dict['s'], 0, 180, 5)\n e7 = Edge(\"e7\", nodes_dict['s'], nodes_dict['b'], 0, 180, 5)\n\n edges_dict['xy'] = e1\n edges_dict['yz'] = e2\n edges_dict['sy'] = e3\n edges_dict['sz'] = e4\n edges_dict['bz'] = e5\n edges_dict['as'] = e6\n edges_dict['sb'] = e7\n\n # add e1 to both vertices\n nodes_dict['x'].edges.append(e1)\n nodes_dict['y'].edges.append(e1)\n # add e2 to both vertices\n nodes_dict['y'].edges.append(e2)\n nodes_dict['z'].edges.append(e2)\n # add e3 to both vertices\n nodes_dict['s'].edges.append(e3)\n nodes_dict['y'].edges.append(e3)\n # add e4 to both vertices\n nodes_dict['s'].edges.append(e4)\n nodes_dict['z'].edges.append(e4)\n # add e5 to both vertices\n nodes_dict['b'].edges.append(e5)\n nodes_dict['z'].edges.append(e5)\n # add e6 to both vertices\n nodes_dict['a'].edges.append(e6)\n nodes_dict['s'].edges.append(e6)\n # add e7 to both vertices\n nodes_dict['s'].edges.append(e7)\n nodes_dict['b'].edges.append(e7)\n\n # set class vars\n self.nodes_dict = nodes_dict\n self.edges_dict = edges_dict\n\n def search(self, startChar, destinationChar):\n startNode = self.nodes_dict[startChar]\n destinationNode = self.nodes_dict[destinationChar]\n queue = []\n queue.append((startNode, []))\n\n while (len(queue) > 0):\n\n currNode, currRoute = queue.pop(0)\n currNode.visited = True\n\n if currNode == destinationNode:\n print(\"FOUND\")\n return currRoute\n\n for edge in currNode.edges:\n # print(\"Examining edge from ->\", edge.get_startNode_name(), \"<- to ->\", edge.get_endNode_name(), \"<-\" )\n if edge.startNode != currNode and edge.startNode.visited == False:\n new_route = currRoute.copy()\n new_route.append((edge, edge.get_endNodeDirToStartNode()))\n # new_route.append( edge.startNode )\n queue.append((edge.startNode, new_route))\n elif edge.endNode != currNode and edge.endNode.visited == False:\n new_route = currRoute.copy()\n new_route.append((edge, edge.get_startNodeDirToEndNode()))\n # new_route.append(edge.endNode)\n queue.append((edge.endNode, new_route))\n\n return \"FAILED\"\n","sub_path":"ZumiGraphCellOne.py","file_name":"ZumiGraphCellOne.py","file_ext":"py","file_size_in_byte":10006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"404001422","text":"\n# Copyright 2019 Google Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_hub as hub\nfrom datetime import datetime\nfrom tensorflow import keras\nimport os\nimport re\nimport sys\n\nimport run_classifier\nimport optimization\nimport tokenization\nimport modeling\n\nimport mlflow\nmlflow.set_tracking_uri(\"http://localhost:5000\")\nmlflow.set_experiment('EventDetection')\nfrom mlflow import tensorflow\ntensorflow.autolog(every_n_iter=1) #default 100\n\n# Parameters\n# ==================================================\nFLAGS = tf.flags.FLAGS\n\ntf.flags.DEFINE_string(\"OUTPUT_DIR\", 'models/model2',\n \"\"\"Path to output dir\"\"\")\ntf.flags.DEFINE_string(\"train_data_file\",\"data/aclImdb/aclImdb_train.csv\",\n \"\"\"Path to training data. Expect 'text' and 'label' columns \"\"\")\ntf.flags.DEFINE_string(\"test_data_file\",\"data/aclImdb/aclImdb_test.csv\",\n \"\"\"Path to development data \"\"\")\ntf.flags.DEFINE_string(\"BERT_VOCAB\",\"data/uncased_L-12_H-768_A-12/vocab.txt\",\n \"\"\"Path to BERT vocab file \"\"\")\ntf.flags.DEFINE_string(\"BERT_INIT_CHKPNT\",\"data/uncased_L-12_H-768_A-12/bert_model.ckpt\",\n \"\"\"Path to BERT model checkpoint \"\"\")\ntf.flags.DEFINE_string(\"BERT_CONFIG\",\"data/uncased_L-12_H-768_A-12/bert_config.json\",\n \"\"\"Path to BERT model config file \"\"\")\ntf.flags.DEFINE_integer(\"MAX_SEQ_LENGTH\",128,\n \"\"\"max length of (token?) sequence. can increase up to 512 \"\"\")\n\n# These hyperparameters are copied from this colab notebook (https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb)\ntf.flags.DEFINE_integer(\"BATCH_SIZE\",32,\n \"\"\" batch size for training \"\"\")\ntf.flags.DEFINE_float(\"LEARNING_RATE\",2e-5,\n \"\"\" learning rate for training \"\"\")\ntf.flags.DEFINE_float(\"NUM_TRAIN_EPOCHS\",3,\n \"\"\" number of training epochs \"\"\")\n# Warmup is a period of time where hte learning rate\n# is small and gradually increases--usually helps training.\ntf.flags.DEFINE_float(\"WARMUP_PROPORTION\",0.1,\n \"\"\" number of training epochs \"\"\")\ntf.flags.DEFINE_integer(\"SAVE_CHECKPOINTS_STEPS\",500,\n \"\"\" number of checkpoints to save \"\"\")\ntf.flags.DEFINE_integer(\"SAVE_SUMMARY_STEPS\",100,\n \"\"\" number of checkpoints to save \"\"\")\nFLAGS = tf.flags.FLAGS\nFLAGS(sys.argv, known_only=True)\n\n# note that extra FLAGS paramters are saved in run_classifier.py. This were parameters set by Google.\n\n# <--------- run specific settings\n\nFLAGS.SAVE_CHECKPOINTS_STEPS=2\nFLAGS.SAVE_SUMMARY_STEPS=1\nFLAGS.OUTPUT_DIR = 'models/model5'\nFLAGS.NUM_TRAIN_EPOCHS = 1\n\n\n\n\nfor key, values in FLAGS.flag_values_dict().items():\n mlflow.log_param(key,values)\n\n# end of parameters\n\n\ntf.gfile.MakeDirs(FLAGS.OUTPUT_DIR)\n\ntf.logging.info('***** Model output directory: {} *****'.format(FLAGS.OUTPUT_DIR))\n\n# <------------------ Load the data\n\n# Load all files from a directory in a DataFrame.\ntrain = pd.read_csv(FLAGS.train_data_file)\ntest = pd.read_csv(FLAGS.test_data_file)\ntrain = train.sample(70)\ntest = test.sample(70)\n\n# label_list is the list of labels, i.e. True, False or 0, 1 or 'dog', 'cat'\nlabel_list = list(np.unique(train['label'])) #[0, 1]\n\ntf.logging.info('shape of data: train: (%d,%d), test: (%d,%d)' % (train.shape+test.shape))\ntf.logging.info('columns of train file: %s' % ','.join(train.columns))\n\n\n# <------------------ Prepare the training input\n\ntf.logging.info('prepare training input...')\n\n# Use the InputExample class from BERT's run_classifier code to create examples from the data. Each data point\n# wrapped into a InputExample class\ntrain_InputExamples = train.apply(lambda x: run_classifier.InputExample(guid=None, # Globally unique ID for bookkeeping, unused in this example\n text_a = x['text'],\n text_b = None, \n label = x['label']), axis = 1)\n\ntest_InputExamples = test.apply(lambda x: run_classifier.InputExample(guid=None,\n text_a = x['text'],\n text_b = None, \n label = x['label']), axis = 1)\n\n\n# <------------------ Prepare tokenizer and do tokenization\n\ntf.logging.info('prepare tokenizer')\n\n# Checks whether the casing config is consistent with the checkpoint name.\ntokenization.validate_case_matches_checkpoint(do_lower_case=True,init_checkpoint=FLAGS.BERT_INIT_CHKPNT)\n\n# build the tokenizer\ntokenizer = tokenization.FullTokenizer(vocab_file=FLAGS.BERT_VOCAB,do_lower_case=True)\n\ntf.logging.info('created tokenizer')\n#tokens_test = tokenizer.tokenize(\"This here's an example of using the BERT tokenizer\")\n#tf.logging.info('example tokenization:'+)\n\n\n\n# Convert our train and test features to InputFeatures that BERT understands.\n# creates lists with elements/sentences of type InputFeatures class\ntrain_features = run_classifier.convert_examples_to_features(train_InputExamples, label_list, FLAGS.MAX_SEQ_LENGTH, tokenizer)\ntest_features = run_classifier.convert_examples_to_features(test_InputExamples, label_list, FLAGS.MAX_SEQ_LENGTH, tokenizer)\n\n\n# <------------------ Prepare model\n\n\ndef create_model(bert_config,is_predicting, input_ids, input_mask, segment_ids, labels,\n num_labels):\n\n # this initializes the BERT model, see model code in modeling module!\n model = modeling.BertModel(\n config=bert_config,\n is_training=not is_predicting,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids)# ,\n #use_one_hot_embeddings=use_one_hot_embeddings)\n\n output_layer = model.get_pooled_output()\n\n hidden_size = output_layer.shape[-1].value\n\n # Create our own layer to tune for politeness data.\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n\n # Dropout helps prevent overfitting\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n # Convert labels into one-hot encoding\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n predicted_labels = tf.squeeze(tf.argmax(log_probs, axis=-1, output_type=tf.int32))\n # If we're predicting, we want predicted labels and the probabiltiies.\n if is_predicting:\n return (predicted_labels, log_probs)\n\n # If we're train/eval, compute loss between predicted and actual label\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n return (loss, predicted_labels, log_probs)\n\n\n# model_fn_builder actually creates our model function\n# using the passed parameters for num_labels, learning_rate, etc.\ndef model_fn_builder(bert_config,init_checkpoint,num_labels, learning_rate, num_train_steps,\n num_warmup_steps):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n \n # TRAIN and EVAL\n if not is_predicting:\n\n # create the model, specifically for training\n (loss, predicted_labels, log_probs) = create_model(bert_config,\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n # init weights (added CR)\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n if init_checkpoint:\n tf.logging.info('start loading weights %d from checkpoint %s' % (len(tvars),init_checkpoint))\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n # load weight maps\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n tf.logging.info('loading weights done')\n tf.logging.info(\"**** Trainable Variables ****\")\n\n\n # creates optimizer operation, based on Adam and exponential decaying lr\n train_op = optimization.create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n\n # Calculate evaluation metrics.\n accuracy = tf.metrics.accuracy(label_ids, predicted_labels)\n def metric_fn(label_ids, predicted_labels):\n f1_score = tf.contrib.metrics.f1_score(\n label_ids,\n predicted_labels)\n #mlflow.log_metric('f1_score test',f1_score[1])\n auc = tf.metrics.auc(\n label_ids,\n predicted_labels)\n recall = tf.metrics.recall(\n label_ids,\n predicted_labels)\n precision = tf.metrics.precision(\n label_ids,\n predicted_labels) \n true_pos = tf.metrics.true_positives(\n label_ids,\n predicted_labels)\n true_neg = tf.metrics.true_negatives(\n label_ids,\n predicted_labels) \n false_pos = tf.metrics.false_positives(\n label_ids,\n predicted_labels) \n false_neg = tf.metrics.false_negatives(\n label_ids,\n predicted_labels)\n return {\n #\"eval_accuracy\": accuracy,\n \"f1_score\": f1_score,\n \"auc\": auc,\n \"precision\": precision,\n \"recall\": recall,\n \"true_positives\": true_pos,\n \"true_negatives\": true_neg,\n \"false_positives\": false_pos,\n \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n #mlflow.log_metric('eval_metrics recall',eval_metrics['recall'])\n # this metric will be logging only during evaluation\n eval_metrics['eval_accuracy'] = accuracy\n\n # this metric will be logging only during training\n tf.summary.scalar('accuracy', accuracy[1])\n\n # two mode optoins: TRAIN or EVAL, train here\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n #eval_metric_ops=eval_metrics)\n else:\n # eval metrics are being printed to tf log output and saved to disk\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n\n\n\n # create the model specifically for predictions\n (predicted_labels, log_probs) = create_model(bert_config,\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n\n # Return the actual model function in the closure\n return model_fn\n\n\n# Compute train and warmup steps from batch size\n\n# Compute # train and warmup steps from batch size\nnum_train_steps = int(len(train_features) / FLAGS.BATCH_SIZE * FLAGS.NUM_TRAIN_EPOCHS)\nnum_warmup_steps = int(num_train_steps * FLAGS.WARMUP_PROPORTION)\n\ntf.logging.info('num warmump steps: %d' % (num_warmup_steps))\ntf.logging.info('num training steps: %d' % (num_train_steps))\n\n\n# specifies the configurations for an Estimator run.\n# Specify outpit directory and number of checkpoint steps to save\n# model_dir: directory where model parameters, graph, etc are saved.\n# save_summary_steps: Save summaries every this many steps.\nrun_config = tf.estimator.RunConfig(\n model_dir=FLAGS.OUTPUT_DIR,\n save_summary_steps=FLAGS.SAVE_SUMMARY_STEPS,\n save_checkpoints_steps=FLAGS.SAVE_CHECKPOINTS_STEPS)\n\n# build the model\nbert_config = modeling.BertConfig.from_json_file(FLAGS.BERT_CONFIG)\n\nmodel_fn = model_fn_builder(\n bert_config=bert_config,\n init_checkpoint = FLAGS.BERT_INIT_CHKPNT,\n num_labels=len(label_list),\n learning_rate=FLAGS.LEARNING_RATE,\n num_train_steps=num_train_steps,\n num_warmup_steps=num_warmup_steps)\n\n# initializes the estimator\nestimator = tf.estimator.Estimator(\n model_fn=model_fn,\n config=run_config,\n params={\"batch_size\": FLAGS.BATCH_SIZE})\n\n\n\n# <------------------ Begin training\n\n# Create an input function for training. drop_remainder = True for using TPUs.\ntrain_input_fn = run_classifier.input_fn_builder(\n features=train_features,\n seq_length=FLAGS.MAX_SEQ_LENGTH,\n is_training=True,\n drop_remainder=False)\n\ntf.logging.info('Beginning Training!')\ncurrent_time = datetime.now()\nestimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\ntf.logging.info(\"Training took time %s\" % (datetime.now() - current_time))\n\n\n# <------------------ Begin evaluation on test data\n\ntf.logging.info('Beginning Evaluation!')\ntest_input_fn = run_classifier.input_fn_builder(\n features=test_features,\n seq_length=FLAGS.MAX_SEQ_LENGTH,\n is_training=False,\n drop_remainder=False)\n\ncurrent_time = datetime.now()\nestimator.evaluate(input_fn=test_input_fn, steps=None)\ntf.logging.info(\"evaluation took time %s\" % (datetime.now() - current_time))\n\n\n\n# <------------------ Prediction: Apply prediction function to text data\n\n\ndef getPrediction(in_sentences):\n '''\n Function to provide class predictions for list of input sentences\n :param in_sentences:\n :return:\n '''\n\n #labels = [\"Negative\", \"Positive\"]\n\n # pre-process input\n input_examples = [run_classifier.InputExample(guid=\"\", text_a = x, text_b = None, label = 0) for x in in_sentences] # here, \"\" is just a dummy label\n input_features = run_classifier.convert_examples_to_features(input_examples, label_list, FLAGS.MAX_SEQ_LENGTH, tokenizer)\n\n # create model function input\n predict_input_fn = run_classifier.input_fn_builder(features=input_features, seq_length=FLAGS.MAX_SEQ_LENGTH, is_training=False, drop_remainder=False)\n\n # calculate the predictions\n predictions = estimator.predict(predict_input_fn)\n\n #\n output = [(sentence, prediction['probabilities'], prediction['labels']) for sentence, prediction in zip(in_sentences, predictions)]\n return output\n\ntf.logging.info(\"start prediction\")\n\n# input test sentences\npred_sentences = [\n \"That movie was absolutely awful\",\n \"The acting was a bit lacking\",\n \"The film was creative and surprising\",\n \"Absolutely fantastic!\"\n]\n\n# get predictions for sentences\npredictions = getPrediction(pred_sentences)\n\nprint(predictions)\n","sub_path":"predicting_movie_reviews_with_bert.py","file_name":"predicting_movie_reviews_with_bert.py","file_ext":"py","file_size_in_byte":15743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"358493057","text":"from django.conf.urls import url\n#from django.contrib import admin\nfrom articles.views import *\n\nurlpatterns = [\n\turl(r'^$', login),\n\turl(r'^login$', login),\n\turl(r'^register$',register),\n\turl(r'^showArticles$', showArticles),\n\turl(r'^detailArticle$', detailArticle),\n\turl(r'^newArticle$',newArticle),\n\turl(r'^userData$', userData),\n\turl(r'^announcement$', announcement),\n\turl(r'^about$', about),\n]\n","sub_path":"BBS/articles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"402725956","text":"\"\"\"\nCreated on 2 Aug 2016\n\n@author: Bruno Beloff (bruno.beloff@southcoastscience.com)\n\nhttps://stackoverflow.com/questions/3348460/csv-file-written-with-python-has-blank-lines-between-each-row\n\"\"\"\n\nimport csv\nimport os\nimport sys\n\nfrom scs_core.csv.csv_dict import CSVDict\n\n\n# --------------------------------------------------------------------------------------------------------------------\n\nclass CSVWriter(object):\n \"\"\"\n classdocs\n \"\"\"\n\n QUOTING = csv.QUOTE_MINIMAL\n\n # ----------------------------------------------------------------------------------------------------------------\n\n def __init__(self, filename=None, append=False, exclude_header=False):\n \"\"\"\n Constructor\n \"\"\"\n self.__filename = filename\n self.__paths = None\n\n if self.__filename is None:\n self.__append = append\n\n self.__file = sys.stdout\n self.__writer = csv.writer(self.__file, quoting=self.QUOTING)\n else:\n self.__append = append and os.path.exists(self.__filename)\n\n if self.__append:\n self.__paths = self.__append_paths()\n\n self.__file = open(self.__filename, \"a\" if self.__append else \"w\", newline='')\n self.__writer = csv.writer(self.__file, quoting=self.QUOTING)\n\n self.__exclude_header = exclude_header\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n def __append_paths(self):\n file = sys.stdin if self.__filename is None else open(self.__filename, \"r\")\n reader = csv.reader(file)\n\n paths = next(reader)\n\n file.close()\n\n return paths\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n def write(self, jstr):\n if jstr is None:\n return False\n\n datum = CSVDict.construct_from_jstr(jstr)\n\n if datum is None:\n return False\n\n if self.__paths is None:\n self.__paths = datum.paths()\n\n # header...\n if not self.__append and not self.__exclude_header:\n self.__writer.writerow(self.__paths)\n\n # row...\n self.__writer.writerow(datum.row(self.__paths))\n\n # if self.filename is None:\n self.__file.flush()\n\n return True\n\n\n def close(self):\n if self.filename is None:\n return\n\n self.__file.close()\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n @property\n def filename(self):\n return self.__filename\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n def __str__(self, *args, **kwargs):\n return \"CSVWriter:{filename:%s, append:%s, exclude_header:%s, paths:%s}\" % \\\n (self.filename, self.__append, self.__exclude_header, self.__paths)\n","sub_path":"src/scs_core/csv/csv_writer.py","file_name":"csv_writer.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"483767402","text":"#!/usr/bin/env python\n\n# This is a thin wrapper for AWS Codebuild API to kick off a build, wait for it to finish,\n# and tail build logs while it is running.\n\nimport os\nimport json\nfrom typing import Dict, Any, List, Optional, AsyncGenerator\nfrom datetime import datetime\nimport asyncio\nimport sys\nimport argparse\nimport boto3\nfrom botocore.config import Config\n\n\nclass LogTailer:\n \"\"\" A simple cloudwatch log tailer. \"\"\"\n\n _next_token: Optional[str]\n\n def __init__(self, client, log_group: str, log_stream: str):\n self._client = client\n self._next_token = None\n self._log_group = log_group\n self._log_stream = log_stream\n\n def _get_log_events_args(self) -> Dict[str, Any]:\n res = dict(\n logGroupName=self._log_group,\n logStreamName=self._log_stream,\n limit=100,\n startFromHead=True,\n )\n if self._next_token:\n res[\"nextToken\"] = self._next_token\n return res\n\n async def tail_chunk(self) -> List[Dict[str, str]]:\n max_sleep = 5.0\n SLEEP_TIME = 0.5\n\n while max_sleep > 0:\n resp = self._client.get_log_events(**self._get_log_events_args())\n events = resp[\"events\"]\n self._next_token = resp.get(\"nextForwardToken\")\n if events:\n return events\n else:\n max_sleep -= SLEEP_TIME\n await asyncio.sleep(SLEEP_TIME)\n else:\n return []\n\n async def read_all_chunks(self) -> AsyncGenerator[List[Dict[str, str]], None]:\n while True:\n resp = self._client.get_log_events(**self._get_log_events_args())\n events = resp[\"events\"]\n self._next_token = resp.get(\"nextForwardToken\")\n if events:\n yield events\n else:\n return\n\n\nasync def _wait_build_state(\n client, build_id, desired_phase: Optional[str], desired_states: List[str]\n) -> Dict[str, Any]:\n \"\"\" Wait until the build is in one of the desired states, or in the desired phase. \"\"\"\n while True:\n resp = client.batch_get_builds(ids=[build_id])\n assert len(resp[\"builds\"]) == 1\n build = resp[\"builds\"][0]\n if build[\"buildStatus\"] in desired_states:\n return build\n for phase in build[\"phases\"]:\n if desired_phase and (phase[\"phaseType\"] == desired_phase):\n return build\n\n await asyncio.sleep(2)\n\n\ndef print_log_event(event) -> None:\n print(\n str(datetime.fromtimestamp(event[\"timestamp\"] / 1000.0)),\n event[\"message\"],\n end=\"\",\n )\n\n\nasync def main() -> None:\n parser = argparse.ArgumentParser(description=\"Process some integers.\")\n parser.add_argument(\n \"--project-name\", default=\"feast-ci-project\", type=str, help=\"Project name\"\n )\n parser.add_argument(\n \"--source-location\",\n type=str,\n help=\"Source location, e.g. https://github.com/feast/feast.git\",\n )\n parser.add_argument(\n \"--source-version\", type=str, help=\"Source version, e.g. master\"\n )\n parser.add_argument(\n \"--location-from-prow\", action='store_true', help=\"Infer source location and version from prow environment variables\"\n )\n args = parser.parse_args()\n\n if args.location_from_prow:\n job_spec = json.loads(os.getenv('JOB_SPEC', ''))\n source_location = job_spec['refs']['repo_link']\n source_version = source_version_from_prow_job_spec(job_spec)\n else:\n source_location = args.source_location\n source_version = args.source_version\n\n await run_build(\n project_name=args.project_name,\n source_location=source_location,\n source_version=source_version,\n )\n\ndef source_version_from_prow_job_spec(job_spec: Dict[str, Any]) -> str:\n pull = job_spec['refs']['pulls'][0]\n return f'refs/pull/{pull[\"number\"]}/head^{{{pull[\"sha\"]}}}'\n\nasync def run_build(project_name: str, source_version: str, source_location: str):\n print(f\"Building {project_name} at {source_version}\", file=sys.stderr)\n\n config = Config(\n retries = {\n 'max_attempts': 10,\n }\n )\n\n logs_client = boto3.client(\"logs\", region_name=\"us-west-2\", config=config)\n codebuild_client = boto3.client(\"codebuild\", region_name=\"us-west-2\")\n\n print(\"Submitting the build..\", file=sys.stderr)\n build_resp = codebuild_client.start_build(\n projectName=project_name,\n sourceLocationOverride=source_location,\n sourceVersion=source_version,\n )\n\n build_id = build_resp[\"build\"][\"id\"]\n\n try:\n print(\n \"Waiting for the INSTALL phase to start before tailing the log\",\n file=sys.stderr,\n )\n build = await _wait_build_state(\n codebuild_client,\n build_id,\n desired_phase=\"INSTALL\",\n desired_states=[\"SUCCEEDED\", \"FAILED\", \"STOPPED\", \"TIMED_OUT\", \"FAULT\"],\n )\n\n if build[\"buildStatus\"] != \"IN_PROGRESS\":\n print(\n f\"Build failed before install phase: {build['buildStatus']}\",\n file=sys.stderr,\n )\n sys.exit(1)\n\n log_tailer = LogTailer(\n logs_client,\n log_stream=build[\"logs\"][\"streamName\"],\n log_group=build[\"logs\"][\"groupName\"],\n )\n\n waiter_task = asyncio.get_event_loop().create_task(\n _wait_build_state(\n codebuild_client,\n build_id,\n desired_phase=None,\n desired_states=[\"SUCCEEDED\", \"FAILED\", \"STOPPED\", \"TIMED_OUT\", \"FAULT\"],\n )\n )\n\n while not waiter_task.done():\n events = await log_tailer.tail_chunk()\n for event in events:\n print_log_event(event)\n\n build_status = waiter_task.result()[\"buildStatus\"]\n if build_status == \"SUCCEEDED\":\n print(f\"Build {build_status}\", file=sys.stderr)\n else:\n print(f\"Build {build_status}\", file=sys.stderr)\n sys.exit(1)\n except KeyboardInterrupt:\n print(f\"Stopping build {build_id}\", file=sys.stderr)\n codebuild_client.stop_build(id=build_id)\n\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n","sub_path":"infra/scripts/codebuild_runner.py","file_name":"codebuild_runner.py","file_ext":"py","file_size_in_byte":6324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"568812522","text":"\n\"\"\"\n逆向運動學共輸出8組解\n\nBy : ya000000000000\n\n\"\"\"\n\n\nimport math\nimport numpy as np\n\n# Transfer Function\n# -------------------------------------------------------\n\n\ndef RPY2Rot(A, B, C):\n # Deg to Rad\n Deg2Rad = math.pi/180\n # Deg to Rad\n alpha = A * Deg2Rad\n beta = B * Deg2Rad\n gamma = C * Deg2Rad\n#### 尤拉角轉旋轉矩陣順序 zyx\n Rzyx = [[math.cos(alpha)*math.cos(beta), math.cos(alpha)*math.sin(beta)*math.sin(gamma) - math.cos(gamma)*math.sin(alpha), math.sin(alpha)*math.sin(gamma) + math.cos(alpha)*math.cos(gamma)*math.sin(beta)],\n [math.cos(beta)*math.sin(alpha), math.cos(alpha)*math.cos(gamma) + math.sin(alpha)*math.sin(beta) *\n math.sin(gamma), math.cos(gamma)*math.sin(alpha)*math.sin(beta) - math.cos(alpha)*math.sin(gamma)],\n [-math.sin(beta), math.cos(beta)*math.sin(gamma), math.cos(beta)*math.cos(gamma)]]\n\n#### 尤拉角轉旋轉矩陣順序 xyz\n Rxyz = [[math.cos(gamma) * math.cos(beta), -math.cos(beta) * math.sin(gamma) ,math.sin(beta) ],\n [math.cos(alpha)*math.sin(gamma)+math.cos(gamma)*math.sin(alpha)*math.sin(beta),math.cos(alpha)*math.cos(gamma)-math.sin(alpha)*math.sin(beta)*math.sin(gamma), math.cos(beta)*math.sin(alpha)],\n [math.sin(alpha)*math.sin(gamma)-math.cos(alpha)*math.cos(gamma)*math.sin(beta),math.cos(gamma)*math.sin(alpha)+math.cos(alpha)*math.sin(beta)*math.sin(gamma),math.cos(alpha)*math.cos(beta)]]\n\n return Rzyx\n\n# Adjust if angle > 180 or angle < -180\n# -------------------------------------------------------------\n\n\ndef over180_rad(input_rad):\n angle = input_rad\n\n if input_rad > math.pi:\n # print('bigger')\n angle = input_rad - 2*math.pi\n\n if input_rad < -math.pi:\n # print('smaller')\n angle = input_rad + 2*math.pi\n\n return angle\n\n# Calculate T: output T\n# --------------------------------------------------------\n\n\ndef calculateT(theta, alpha, a, d):\n ### RotZ(4, 4) ###################\n RotZ = np.zeros((4, 4))\n\n RotZ[0] = [1, 0, 0, 0]\n RotZ[1] = [0, math.cos(alpha), -math.sin(alpha), 0]\n RotZ[2] = [0, math.sin(alpha), math.cos(alpha), 0]\n RotZ[3] = [0, 0, 0, 1]\n\n ### TranZ(4, 4) ###################\n TransZ = np.zeros((4, 4))\n\n TransZ[0] = [1, 0, 0, a]\n TransZ[1] = [0, 1, 0, 0]\n TransZ[2] = [0, 0, 1, 0]\n TransZ[3] = [0, 0, 0, 1]\n\n ### TransX(4, 4) ##################\n TransX = np.zeros((4, 4))\n\n TransX[0] = [1, 0, 0, 0]\n TransX[1] = [0, 1, 0, 0]\n TransX[2] = [0, 0, 1, d]\n TransX[3] = [0, 0, 0, 1]\n\n ### RotX(4, 4) ###################\n RotX = np.zeros((4, 4))\n\n RotX[0] = [math.cos(theta), -math.sin(theta), 0, 0]\n RotX[1] = [math.sin(theta), math.cos(theta), 0, 0]\n RotX[2] = [0, 0, 1, 0]\n RotX[3] = [0, 0, 0, 1]\n\n\n\n T = RotX@TransX@TransZ@RotZ\n\n return T\n\n# Calculate J1: output J1, J1p\n# use function: over180_rad\n# -------------------------------------------------------------\n\n\ndef calculateJ1(EEx, EEy, EEz, z1, z2, z3, DH_table):\n\n\n # Read DH_table\n dof = len(DH_table)\n\n a = np.zeros(dof)\n alpha = np.zeros(dof)\n d = np.zeros(dof)\n theta = np.zeros(dof)\n\n for i in range(dof):\n a[i] = DH_table[i][2]\n alpha[i] = DH_table[i][3]\n d[i] = DH_table[i][1]\n theta[i] = DH_table[i][0]\n\n # Solve Angle1\n\n Ex = EEx - d[5]*z1\n Ey = EEy - d[5]*z2\n Ez = EEz - d[5]*z3\n\n J1 = math.atan2(Ey, Ex)\n\n\n J1p = math.atan2(Ey, Ex) + math.pi\n\n # 保持在 正負 180 內\n J1 = over180_rad(J1)\n\n J1p = over180_rad(J1p)\n\n return J1, J1p\n\n# Calculate J2 J3: output J2 J2p J3 J3p\n# call function: calculateT over180_rad\n# -----------------------------------------------------\n\n\ndef calculateJ2J3(Ex, Ey, Ez, J1, DH_table):\n # Read DH_table\n singular = False\n dof = len(DH_table)\n\n a = np.zeros(dof)\n alpha = np.zeros(dof)\n d = np.zeros(dof)\n theta = np.zeros(dof)\n\n for i in range(dof):\n a[i] = DH_table[i][2]\n alpha[i] = DH_table[i][3]\n d[i] = DH_table[i][1]\n theta[i] = DH_table[i][0]\n\n T1 = calculateT(J1, alpha[0], a[0], d[0])\n\n A1 = T1\n\n Bx = A1[0][3]\n By = A1[1][3]\n Bz = A1[2][3]\n\n P = 0\n AB = a[0]\n # print('AB',AB)\n\n # if abs(Bx) > 0.01:\n if Ex*Bx < 0: #(2,3)\n P = -math.sqrt(Ex*Ex+Ey*Ey) - AB\n\n else: #(1,4)\n P = math.sqrt(Ex*Ex+Ey*Ey) - AB\n\n OA = d[0]\n Q = Ez - OA\n BC = a[1]\n CD = a[2]\n DE = d[3]\n BE = math.sqrt(P*P + Q*Q)\n CE = math.sqrt(CD*CD + DE*DE)\n\n # check singular\n if((BC + CE < BE) or (CE - BC >BE)):\n J2, J2p, J3, J3p = 0, 0, 0, 0\n singular = True\n # break\n else:\n angleCBE = math.acos((BC * BC + BE * BE - CE * CE) / (2 * BC * BE))\n\n # 使用 arctan2 函數以修正 arctan 給出的角度值\n angleEBU = math.atan2(Q, P)\n\n J2 = -(math.pi/2 - (angleCBE + angleEBU))\n J2p = -(math.pi/2 - (angleEBU - angleCBE))\n # 保持在 正負 180 內\n J2 = over180_rad(J2)\n J2p = over180_rad(J2p)\n\n ans2=(BC**2 + CE**2 - BE**2)/(2*BC*CE)\n angleBCE = math.acos(ans2)\n angleECD = math.acos(CD/CE)\n J3 = angleBCE + angleECD - math.pi # elbow up\n J3p = math.pi - (angleBCE - angleECD) # elbow down\n # 保持在 正負 180 內\n J3 = over180_rad(J3)\n J3p = over180_rad(J3p)\n\n singular = False\n return J2, J2p, J3, J3p,singular\n\n# Calculate J4, J5, J6: output J4 J4p J5 J5p J6 J6p\n# call function:\n# ------------------------------------------------------\n\n\ndef calculateJ4J5J6(EEx, EEy, EEz, x1, x2, x3, J1, J2, J3, DH_table):\n dof = len(DH_table)\n\n a = np.zeros(dof)\n alpha = np.zeros(dof)\n d = np.zeros(dof)\n theta = np.zeros(dof)\n\n for i in range(dof):\n a[i] = DH_table[i][2]\n alpha[i] = DH_table[i][3]\n d[i] = DH_table[i][1]\n theta[i] = DH_table[i][0]\n\n # 解 Angle4 Angle5 Angle6\n\n T1 = calculateT(J1, alpha[0], a[0], d[0])\n T2 = calculateT(J2+(math.pi/2), alpha[1], a[1], d[1])\n T3 = calculateT(J3, alpha[2], a[2], d[2])\n A3 = T1@T2@T3\n\n newEEx = EEx - A3[0][3]\n newEEy = EEy - A3[1][3]\n newEEz = EEz - A3[2][3]\n\n xProjection = newEEx*A3[0][0] + newEEy*A3[1][0] + newEEz*A3[2][0]\n yProjection = newEEx*A3[0][1] + newEEy*A3[1][1] + newEEz*A3[2][1]\n zProjection = newEEx*A3[0][2] + newEEy*A3[1][2] + newEEz*A3[2][2]\n\n J4 = math.atan2(yProjection, xProjection)\n J4p = math.atan2(yProjection, xProjection) + math.pi\n # 增加一個機制 若超過180度 自動變負號\n J4 = over180_rad(J4)\n J4p = over180_rad(J4p)\n\n\n EExdir1 = xProjection*A3[0][0]\n EExdir2 = xProjection*A3[1][0]\n EExdir3 = xProjection*A3[2][0]\n\n EEydir1 = yProjection*A3[0][1]\n EEydir2 = yProjection*A3[1][1]\n EEydir3 = yProjection*A3[2][1]\n\n EEinXYprojection1 = EExdir1 + EEydir1\n EEinXYprojection2 = EExdir2 + EEydir2\n EEinXYprojection3 = EExdir3 + EEydir3\n\n EEinXYprojectionValue = math.sqrt(EEinXYprojection1*EEinXYprojection1 +\n EEinXYprojection2*EEinXYprojection2 + EEinXYprojection3*EEinXYprojection3)\n\n # 會用102 是因為末端長度是102,如果用newEEvalue,那是末端長度和前一軸長度相加\n J5temp1cos = (zProjection - d[3])/d[5]\n J5temp2cos = (newEEx*EEinXYprojection1 + newEEy*EEinXYprojection2 +\n newEEz*EEinXYprojection3) / (d[5]*EEinXYprojectionValue)\n\n if J5temp1cos > 1:\n J5temp1cos = 1\n\n if J5temp2cos > 1:\n J5temp2cos = 1\n\n if J5temp1cos < -1:\n J5temp1cos = -1\n\n if J5temp2cos < -1:\n J5temp2cos = -1\n\n # 如果有算術誤差,將其消除\n J5temp1 = math.acos(J5temp1cos)\n J5temp2 = math.acos(J5temp2cos)\n\n if J5temp2 > math.pi/2:\n J5 = J5temp1\n else:\n J5 = J5temp1\n\n J5p = -J5\n\n # 增加一個機制 若超過180度 自動變負號\n J5 = over180_rad(J5)\n J5p = over180_rad(J5p)\n\n T4 = calculateT(J4, alpha[3], a[3], d[3])\n T5 = calculateT(J5, alpha[4], a[4], d[4])\n\n A5 = T1@T2@T3@T4@T5\n\n J6temp1cos = x1*A5[0][0] + x2*A5[1][0] + x3*A5[2][0]\n J6temp2cos = x1*A5[0][1] + x2*A5[1][1] + x3*A5[2][1]\n\n if J6temp1cos > 1:\n J6temp1cos = 1\n\n if J6temp2cos > 1:\n J6temp2cos = 1\n\n if J6temp1cos < -1:\n J6temp1cos = -1\n\n if J6temp2cos < -1:\n J6temp2cos = -1\n\n # 如果有算術誤差,將其消除\n J6temp1 = math.acos(J6temp1cos)\n J6temp2 = math.acos(J6temp2cos)\n\n if J6temp2 > math.pi/2:\n J6 = -J6temp1\n J6p = -J6temp1 + math.pi\n\n else:\n J6 = J6temp1\n J6p = J6temp1 + math.pi\n\n # 增加一個機制 若超過180度 自動變負號\n J6 = over180_rad(J6)\n J6p = over180_rad(J6p)\n\n return J4, J4p, J5, J5p, J6, J6p\n\n\ndef InverseKinematics(EulerAngle, Position, DH_table):\n SingularFlag = 0 # 看看是否在奇異點\n SingularFlag1 = False\n SingularFlag2 = False\n Singularplace = np.zeros(8)\n\n # 讀取DH參數\n dof = len(DH_table)\n\n a = np.zeros(dof)\n alpha = np.zeros(dof)\n d = np.zeros(dof)\n theta = np.zeros(dof)\n\n for i in range(dof):\n a[i] = DH_table[i][2]\n alpha[i] = DH_table[i][3]\n d[i] = DH_table[i][1]\n theta[i] = DH_table[i][0]\n\n ### calculate wrist center position ############################\n\n TEEFPosture = np.array([EulerAngle[0], EulerAngle[1], EulerAngle[2]])\n EEF_Matr = RPY2Rot(TEEFPosture[0], TEEFPosture[1], TEEFPosture[2])\n##這裡position單位是cm\n nsdt = np.array([[EEF_Matr[0][0], EEF_Matr[0][1], EEF_Matr[0][2], Position[0]],\n [EEF_Matr[1][0], EEF_Matr[1][1], EEF_Matr[1][2], Position[1]],\n [EEF_Matr[2][0], EEF_Matr[2][1], EEF_Matr[2][2], Position[2]],\n [0, 0, 0, 1, ]])\n\n\n\n z1 = nsdt[0][2]\n z2 = nsdt[1][2]\n z3 = nsdt[2][2]\n x1 = nsdt[0][0]\n x2 = nsdt[1][0]\n x3 = nsdt[2][0]\n EEx = nsdt[0][3]\n EEy = nsdt[1][3]\n EEz = nsdt[2][3]\n\n # -----------------------------------------------\n\n J1s = np.zeros(2)\n J2s = np.zeros(4)\n J3s = np.zeros(4)\n J4s = np.zeros(8)\n J5s = np.zeros(8)\n J6s = np.zeros(8)\n\n J1s[0], J1s[1] = calculateJ1(EEx, EEy, EEz, z1, z2, z3, DH_table)\n\n\n Ex = EEx - d[5]*z1\n Ey = EEy - d[5]*z2\n Ez = EEz - d[5]*z3\n\n J2s[0], J2s[1], J3s[0], J3s[1], SingularFlag1 = calculateJ2J3(Ex, Ey, Ez, J1s[0], DH_table)\n J2s[2], J2s[3], J3s[2], J3s[3], SingularFlag2 = calculateJ2J3(Ex, Ey, Ez, J1s[1], DH_table)\n\n\n JointAngle = np.zeros((8, 6))\n if not SingularFlag1:\n J4s[0], J4s[1], J5s[0], J5s[1], J6s[0], J6s[1] = calculateJ4J5J6( EEx, EEy, EEz, x1, x2, x3, J1s[0], J2s[0], J3s[0], DH_table)\n J4s[2], J4s[3], J5s[2], J5s[3], J6s[2], J6s[3] = calculateJ4J5J6( EEx, EEy, EEz, x1, x2, x3, J1s[0], J2s[1], J3s[1], DH_table)\n\n # # 返回參數的實部\n # J1s = np.real(J1s)\n # J2s = np.real(J2s)\n # J3s = np.real(J3s)\n # J4s = np.real(J4s)\n\n JointAngle[0] = [J1s[0], J2s[0], J3s[0], J4s[0], J5s[0], J6s[0]]\n JointAngle[1] = [J1s[0], J2s[0], J3s[0], J4s[1], J5s[1], J6s[1]]\n JointAngle[2] = [J1s[0], J2s[1], J3s[1], J4s[2], J5s[2], J6s[2]]\n JointAngle[3] = [J1s[0], J2s[1], J3s[1], J4s[3], J5s[3], J6s[3]]\n Singularplace[0] = 1\n Singularplace[1] = 1\n Singularplace[2] = 1\n Singularplace[3] = 1\n\n if not SingularFlag2:\n J4s[4], J4s[5], J5s[4], J5s[5], J6s[4], J6s[5] = calculateJ4J5J6(EEx, EEy, EEz, x1, x2, x3, J1s[1], J2s[2], J3s[2], DH_table)\n J4s[6], J4s[7], J5s[6], J5s[7], J6s[6], J6s[7] = calculateJ4J5J6(EEx, EEy, EEz, x1, x2, x3, J1s[1], J2s[3], J3s[3], DH_table)\n\n # J5s = np.real(J5s)\n # J6s = np.real(J6s)\n\n JointAngle[4] = [J1s[1], J2s[2], J3s[2], J4s[4], J5s[4], J6s[4]]\n JointAngle[5] = [J1s[1], J2s[2], J3s[2], J4s[5], J5s[5], J6s[5]]\n JointAngle[6] = [J1s[1], J2s[3], J3s[3], J4s[6], J5s[6], J6s[6]]\n JointAngle[7] = [J1s[1], J2s[3], J3s[3], J4s[7], J5s[7], J6s[7]]\n\n Singularplace[4] = 1\n Singularplace[5] = 1\n Singularplace[6] = 1\n Singularplace[7] = 1\n\n if(SingularFlag1 and SingularFlag2):\n SingularFlag = True\n\n return JointAngle, SingularFlag,Singularplace\n\n\n\ndef InverseKinematics_Analytical(EulerAngle, Position, DH_table):\n inf = float('Inf')\n\n J1 = np.zeros(2)\n J2 = np.zeros(4)\n J3 = np.zeros(4)\n J4 = np.zeros(8)\n J5 = np.zeros(8)\n J6 = np.zeros(8)\n\n\n \"\"\" 讀取DH參數 \"\"\"\n dof = len(DH_table)\n\n a = np.zeros(dof)\n alpha = np.zeros(dof)\n d = np.zeros(dof)\n theta = np.zeros(dof)\n\n for i in range(dof):\n a[i] = DH_table[i][2]\n alpha[i] = DH_table[i][3]\n d[i] = DH_table[i][1]\n theta[i] = DH_table[i][0]\n\n TEEFPosture = np.array([EulerAngle[0], EulerAngle[1], EulerAngle[2]])\n EEF_Matr = RPY2Rot(TEEFPosture[0], TEEFPosture[1], TEEFPosture[2])\n\n \"\"\" 這裡position單位是cm \"\"\"\n nsdt = np.array([[EEF_Matr[0][0], EEF_Matr[0][1], EEF_Matr[0][2], Position[0]],\n [EEF_Matr[1][0], EEF_Matr[1][1], EEF_Matr[1][2], Position[1]],\n [EEF_Matr[2][0], EEF_Matr[2][1], EEF_Matr[2][2], Position[2]],\n [0, 0, 0, 1, ]])\n t11, t12, t13, t14 = nsdt[0][0], nsdt[0][1], nsdt[0][2], nsdt[0][3]\n t21, t22, t23, t24 = nsdt[1][0], nsdt[1][1], nsdt[1][2], nsdt[1][3]\n t31, t32, t33, t34 = nsdt[2][0], nsdt[2][1], nsdt[2][2], nsdt[2][3]\n\n J5x = t14 - d[5] * t13\n J5z = t34 - d[5] * t33\n\n Limitation = np.array([[-170, 170],\n [-82.79, 135],\n [-74.88, 104],\n [-190, 190],\n [-118.88, 118.88],\n [-360, 360]])\n Limitation = Limitation * math.pi/180\n\n \"\"\" singular check J5x = J5y = 0 \"\"\"\n if J5x == 0 :\n JointAngle = np.ones((8,6)) * inf\n else:\n \"\"\" J1 \"\"\"\n J1[0] = math.atan2(t24 - d[5]*t23,t14 - d[5]*t13)\n J1[1] = math.atan2(t24 - d[5]*t23,t14 - d[5]*t13) + math.pi\n J1[0] = over180_rad(J1[0])\n J1[1] = over180_rad(J1[1])\n\n \"\"\" J3 \"\"\"\n","sub_path":"vrep/SAC_camera_version2/inverseKinematics.py","file_name":"inverseKinematics.py","file_ext":"py","file_size_in_byte":14368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"22607539","text":"#!/usr/bin/env python3\n\n# Initiate code with the necessary imports for different functional use\nimport json\nimport numpy as np\n\n# Function definition for handling json streams of data\ndef json_handler(file):\n\n # agrs is json files\n # read json file\n with open(file) as json_file:\n\n # parse json data\n # provide full file path\n data = json.load(json_file)\n\n # Return json as dictionary to be used\n return data\n\n\n# Function definition for using input name of staff member to search through dict from json file\ndef input(staff):\n\n print(staff)\n selected_staff_req = []\n dd = []\n cc = []\n bb = []\n places_to_visit = []\n print('------------------------------------------------------')\n # Parse args to function\n # Loop through json_handler dict to find key value pair for input\n # Parse the json file locations to function\n users_json = 'C:\\\\Users\\\\gerar\\\\source\\\\repos\\\\master\\\\g_python\\\\python_tdd\\\\users.json'\n venues_json = 'C:\\\\Users\\\\gerar\\\\source\\\\repos\\\\master\\\\g_python\\\\python_tdd\\\\venues.json'\n\n # Extract dictionaries from json data\n users_data = json_handler(users_json)\n venues_data = json_handler(venues_json)\n\n # Loop through dictionary of users data from json files\n for item in users_data:\n # Loop through staff members inputted\n for staff_members in staff:\n\n # Extract first value of dictionary\n values = item.values()\n value_iter = iter(values)\n first_value = next(value_iter)\n\n # Compare first value with inputted staff members\n if first_value == staff_members:\n selected_staff_req.append(item)\n print(selected_staff_req)\n print('------------------------------------------------------')\n \n # Looping through venues to extract places to visit\n for item in venues_data:\n\n # Extracting second values of wont eats to make comparison\n venues_values = item.values()\n venues_values_iter = iter(venues_values)\n first_venues_value = next(venues_values_iter)\n second_venues_value = next(venues_values_iter)\n dd.append(second_venues_value)\n #print(second_venues_value)\n #print('---------------------------------------------------------')\n # Loop through selected staff requirements\n for each_staff_req in selected_staff_req:\n\n # Extract second value from selected staff req\n values = each_staff_req.values()\n value_iter = iter(values)\n first_value = next(value_iter)\n second_value = next(value_iter)\n cc.append(second_value)\n #print(second_value)\n print(dd)\n print('--------------------------------------------------')\n print(cc) \n print('--------------------------------------------------')\n\n # Next step would be to compare the matrices and parse the difference on to determine place to visit \n\n # After this I will again compare the matrices to determine which places to not visit\n \n # This can then be appended using the necessary text to form a list of dictionary\n \n # Then use the list of dictionaries to dump into a json file to output at the end of the script\n\n\ndef main(): # main function\n print(\"Welcome to Staff Outing Decider\")\n #input(json_handler())\n\nif __name__ == '__main__':\n main()","sub_path":"TimeOutDecider.py","file_name":"TimeOutDecider.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"463816449","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\nimport os\r\nimport tensorflow as tf\r\nfrom tensorflow.python.ops import rnn,rnn_cell\r\nimport numpy as np\r\n\r\npath = os.getcwd()\r\n\r\ni='000001_0_1'\r\nfile_traindata = path + '/'+i+'/x_train_'+i+'.txt'\r\nfile_trainlabel = path + '/'+i+'/label_'+i+'.txt'\r\nfile_testdata = path + '/'+i+'/x_test_'+i+'.txt'\r\n# test_label = path + '\\\\all_user_data\\\\特征值\\\\base_0_sim_test_label.txt'\r\noutput_file_path = path + '/'+i+'/result_'+i+'.txt'\r\noutput_file = open(output_file_path, 'w+')\r\ntraindata = np.loadtxt(file_traindata)\r\ntrainlabel = np.loadtxt(file_trainlabel)\r\ntestdata = np.loadtxt(file_testdata)\r\n\r\nprint(i)\r\n# model_path = path + \"\\\\model\\\\base_0_sim_lstm.ckpt\"\r\nmodel_path = path+\"/\"+i+\"/model/lstm.ckpt\"\r\n\r\nlearning_rate = 0.000001\r\n# training_iters = 10000\r\n\r\n\r\nn_input = 1 # MNIST data input (img shape: 28*28)\r\nn_steps = 10 # timesteps\r\nn_hidden = 128 # hidden layer num of features\r\nn_classes = 2\r\n\r\n# tf Graph input\r\nx = tf.placeholder(\"float\", [1, n_steps, n_input])\r\ny = tf.placeholder(\"float\", [1, n_classes])\r\n\r\n# Define weights\r\nweights = {\r\n 'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))\r\n}\r\nbiases = {\r\n 'out': tf.Variable(tf.random_normal([n_classes]))\r\n}\r\n\r\n\r\ndef RNN(x, weights, biases):\r\n x = tf.transpose(x, [1, 0, 2])\r\n # used to be (batch_size, n_stpe, n_input)\r\n # Reshaping to (n_steps*batch_size, n_input)\r\n x = tf.reshape(x, [-1, n_input])\r\n # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)\r\n x = tf.split(0, n_steps, x)\r\n # Define a lstm cell with tensorflow\r\n lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)\r\n # lstm_cell = rnn_cell.MultiRNNCell(lstm_cell,state_is_tuple=True)\r\n\r\n # Get lstm cell output\r\n outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)\r\n\r\n # Linear activation, using rnn inner loop last output\r\n return tf.matmul(outputs[-1], weights['out']) + biases['out']\r\n\r\n\r\npre = RNN(x, weights, biases)\r\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pre, y))\r\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\r\n\r\ninit = tf.initialize_all_variables()\r\nsaver = tf.train.Saver()\r\n\r\nistrain=1\r\nwith tf.Session() as sess:\r\n sess.run(init)\r\n if istrain == 0:\r\n saver.restore(sess, model_path)\r\n print(traindata.shape[0])\r\n count = 1\r\n for i in range(testdata.shape[0]):\r\n data = traindata[i]\r\n label = trainlabel[i]\r\n # print(data.shape)\r\n data = data.reshape(1, n_steps, n_input)\r\n # print(label)\r\n label = label.reshape(1, n_classes)\r\n # print(data.shape)\r\n # print(label.shape)\r\n\r\n # print(\"sess run\")\r\n if istrain == 1:\r\n sess.run(optimizer, feed_dict={x: data, y: label})\r\n if i % 50 == 0:\r\n loss = sess.run(cost, feed_dict={x: data, y: label})\r\n print(i, loss)\r\n else:\r\n data_test = testdata[i]\r\n data_test = data_test.reshape(1, n_steps, n_input)\r\n result = sess.run(pre, feed_dict={x: data_test})\r\n\r\n print(result)\r\n num = np.argmax(result)\r\n print(count, num)\r\n output_file.write(str(num))\r\n output_file.write('\\n')\r\n count += 1\r\n\r\n if istrain == 1:\r\n save_path = saver.save(sess, model_path)\r\n\r\nistrain=0\r\nwith tf.Session() as sess:\r\n sess.run(init)\r\n if istrain == 0:\r\n saver.restore(sess, model_path)\r\n print(traindata.shape[0])\r\n count = 1\r\n for i in range(testdata.shape[0]):\r\n data = traindata[i]\r\n label = trainlabel[i]\r\n # print(data.shape)\r\n data = data.reshape(1, n_steps, n_input)\r\n # print(label)\r\n label = label.reshape(1, n_classes)\r\n # print(data.shape)\r\n # print(label.shape)\r\n\r\n # print(\"sess run\")\r\n if istrain == 1:\r\n sess.run(optimizer, feed_dict={x: data, y: label})\r\n if i % 50 == 0:\r\n loss = sess.run(cost, feed_dict={x: data, y: label})\r\n print(i, loss)\r\n else:\r\n data_test = testdata[i]\r\n data_test = data_test.reshape(1, n_steps, n_input)\r\n result = sess.run(pre, feed_dict={x: data_test})\r\n\r\n print(result)\r\n num = np.argmax(result)\r\n print(count, num)\r\n output_file.write(str(num))\r\n output_file.write('\\n')\r\n count += 1\r\n\r\n if istrain == 1:\r\n save_path = saver.save(sess, model_path)\r\n","sub_path":"data/lstm/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":4595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"549052917","text":"#\r\n# [188] Best Time to Buy and Sell Stock IV\r\n#\r\n# https://leetcode.com/problems/best-time-to-buy-and-sell-stock-iv\r\n#\r\n# Hard (24.17%)\r\n# Total Accepted: \r\n# Total Submissions: \r\n# Testcase Example: '2\\n[]'\r\n#\r\n# Say you have an array for which the ith element is the price of a given stock\r\n# on day i.\r\n#\r\n# Design an algorithm to find the maximum profit. You may complete at most k\r\n# transactions.\r\n#\r\n# Note:\r\n# You may not engage in multiple transactions at the same time (ie, you must\r\n# sell the stock before you buy again).\r\n#\r\n# Credits:Special thanks to @Freezen for adding this problem and creating all\r\n# test cases.\r\n#\r\n\r\n\r\nclass Solution(object):\r\n def maxProfit(self, k, prices):\r\n \"\"\"\r\n :type k: int\r\n :type prices: List[int]\r\n :rtype: int\r\n \"\"\"\r\n if not prices:\r\n return 0\r\n\r\n n = len(prices)\r\n if k > n // 2:\r\n return self.quick(prices)\r\n\r\n dp = [0] * n\r\n for i in range(1, k + 1):\r\n buy = -prices[0]\r\n preprofit = 0\r\n for j in range(1, n):\r\n temp = dp[j]\r\n dp[j] = max(dp[j - 1], buy + prices[j])\r\n buy = max(buy, preprofit - prices[j])\r\n preprofit = temp\r\n return dp[-1]\r\n\r\n def quick(self, prices):\r\n profit = 0\r\n for i in range(1, len(prices)):\r\n if prices[i] > prices[i - 1]:\r\n profit += prices[i] - prices[i - 1]\r\n return profit\r\n\r\n\r\nif __name__ == \"__main__\":\r\n sol = Solution()\r\n assert(sol.maxProfit(2, [7, 1, 5, 3, 6, 4]) == 7)\r\n","sub_path":"accepted/188.best-time-to-buy-and-sell-stock-iv.2.py","file_name":"188.best-time-to-buy-and-sell-stock-iv.2.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"246655847","text":"# -*- encoding: utf-8 -*-\nfrom experimental import *\n\n\ndef test_CounttimeComponentSelectExpression__set_spanner_01():\n r'''Set crescendo spanner.\n '''\n\n score_template = scoretemplatetools.GroupedStavesScoreTemplate(staff_count=1)\n score_specification = musicexpressiontools.ScoreSpecificationInterface(score_template)\n score_specification.set_time_signatures(5 * [(2, 8)])\n score_specification.set_rhythm(library.note_tokens)\n score_specification.select_leaves('Voice 1').set_spanner(spannertools.CrescendoSpanner())\n score = score_specification.interpret()\n\n current_function_name = introspectiontools.get_current_function_name()\n testtools.write_test_output(score, __file__, current_function_name)\n assert score.lilypond_format == testtools.read_test_output(__file__, current_function_name)\n","sub_path":"abjad/experimental/tools/musicexpressiontools/test/test_CounttimeComponentSelectExpression__set_spanner.py","file_name":"test_CounttimeComponentSelectExpression__set_spanner.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"614491120","text":"from datetime import datetime\nimport random\nfrom flask import Blueprint\n\nmod_two = Blueprint('ali', __name__)\n\ndef int_checker(func):\n def wrapper(*args, **kwargs):\n try:\n int(kwargs['num_max'])\n return func(kwargs['num_max'])\n except Exception:\n return 'Invalid Input!'\n finally:\n pass \n return wrapper\n\n\n@mod_two.route('/simple_random_with_check/')\n@int_checker\ndef simple_random_with_check(num_max):\n num = int(num_max)\n current_time = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n return 'Randomized ' + str(random.randint(1, num)) + ' at ' + current_time\n","sub_path":"flaskexer/app/modules/math_two/math_two.py","file_name":"math_two.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"178176219","text":"import logging\nfrom file_Handler import FileHandler\nimport csv\nfrom prettytable import PrettyTable\nimport pandas as pd\nimport os\n\nlogging.basicConfig(level=logging.INFO, filename=\"logging.log\", filemode=\"a\",\n format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\", datefmt='%m/%d/%Y %I:%M:%S %p')\n\n\ndef access_ResponsibleTraining():\n h = True\n while h:\n print('''\n *******************************************\n * You Came in as Responsible for Training *\n ******************************************* \n 1-Definition of Lesson\n 2-View List of Lessons\n 3-View List of Students\n 4-Search into List of Lessons\n 5-Search into List of Students\n 6-View Total Presented Units\n 7-View Selected Lessons by Students\n 8-View Students Who Have Chosen a Specified Lesson\n 9-View Specified Student Caretable\n 10-Reject or Confirm confirmed lessons\n 11-For Exit, Press 11 or any key\n ''')\n # try:\n choice = input(\"Please Select Your Choice: \")\n if choice == \"1\": # Definition of Lesson\n y = True\n while y:\n dic = {}\n a = input(\"Enter Lesson Name: \").capitalize()\n reader = FileHandler(\"list_of_lessons.csv\")\n result = reader.read_file()\n for i in result:\n while i[\"Name_Lesson\"] == a:\n logging.info(f'{a} is Repetitive')\n print('''\n this lesson name exist,\n please enter another lesson\n ''')\n a = input(\"Enter Lesson Name: \").capitalize()\n b = int(input(\"Enter Unit of Lesson: \"))\n c = int(input(\"Enter Total Capacity for This Lesson: \"))\n d = input(\"Enter Professor's Name: \").lower().replace(\" \", \"\")\n dic[\"Name_Lesson\"] = a\n dic[\"Unit\"] = b\n if d == \"\":\n dic[\"Professor's Name\"] = \"None\"\n else:\n dic[\"Professor's Name\"] = d\n dic[\"Total_Capacity\"] = c\n dic[\"Remain_Capacity\"] = c\n dic[\"Number_of_obtained\"] = 0\n with open(\"list_of_lessons.csv\", \"a\") as f:\n write = csv.DictWriter(f, fieldnames=[\"Name_Lesson\", \"Unit\", \"Professor's Name\", \"Total_Capacity\",\n \"Remain_Capacity\", \"Number_of_obtained\"])\n if f.tell() == 0:\n write.writeheader()\n write.writerow(dic)\n logging.info(f'{a} lesson added to list of lessons ')\n q = input(\"Do You Want to Register other New Lesson: select(Y/N) \").lower()\n if q == \"y\":\n y = True\n else:\n y = False\n\n elif choice == \"2\": # View List of Lessons\n df = pd.read_csv(\"list_of_lessons.csv\")\n print(df)\n # from prettytable import from_csv\n # with open(\"list_of_lessons.csv\", \"r\") as fp:\n # mytable = from_csv(fp)\n # print(mytable)\n\n\n\n elif choice == \"3\": # View List of Students\n\n df = pd.read_csv(\"List_of_Student.csv\")\n print(df)\n # from prettytable import from_csv\n # with open(\"List_of_Student.csv\", \"r\") as fp:\n # mytable = from_csv(fp)\n # print(mytable)\n elif choice == \"4\": # Search into List of Lessons\n lesson_name = input(\"Enter your desired lesson: \").capitalize()\n read_lessons = pd.read_csv(\"list_of_lessons.csv\")\n for ind, item in read_lessons.iterrows():\n if item[\"Name_Lesson\"] == lesson_name:\n mytable = PrettyTable(\n ['Name_Lesson', 'Unit', \"Professor's Name\", \"Total_Capacity\", \"Remain_Capacity\",\n \"Number_of_obtained\"])\n lis = [item['Name_Lesson'], item[\"Unit\"], item[\"Professor's Name\"], item[\"Total_Capacity\"],\n item[\"Remain_Capacity\"], item[\"Number_of_obtained\"]]\n mytable.add_row(lis)\n print(mytable)\n logging.info(f\"search based on {lesson_name} done successfully\")\n else:\n print(\"This Lesson don't Exist\")\n logging.info(f\"search based on {lesson_name} done Unsuccessfully\")\n\n elif choice == \"5\": # Search into List of Students\n print('''\n 1-Student Name\n 2-Student Code\n ''')\n search = input(\"Your Search Based on: \")\n find = False\n if search == \"1\":\n search_name = input(\"Enter Your The Desired Name: \").lower().replace(\" \", \"\")\n reader = FileHandler(\"List_of_Student.csv\")\n result = reader.read_file()\n for i in result:\n if i[\"FirstName\"] + i[\"LastName\"] == search_name:\n logging.info(f'the student with username {search_name}is found')\n find = True\n mytable = PrettyTable(['FirstName', 'LastName', 'UserName', 'StudentId'])\n lis = [i['FirstName'], i['LastName'], i['UserName'], i[\"StudentId\"]]\n mytable.add_row(lis)\n print(mytable)\n\n if find:\n print(\"There is not such a username!\")\n else:\n search_student_code = input(\"Enter Your The Desired Student Code: \").lower().replace(\" \", \"\")\n reader = FileHandler(\"List_of_Student.csv\")\n result = reader.read_file()\n for i in result:\n find = False\n if i[\"StudentId\"] == search_student_code:\n logging.info(f'the student with this code {search_student_code} is found')\n find = True\n mytable = PrettyTable(['FirstName', 'LastName', 'UserName', 'StudentId'])\n lis = [i['FirstName'], i['LastName'], i['UserName'], i[\"StudentId\"]]\n mytable.add_row(lis)\n print(mytable)\n\n if find:\n print(\"There is not such a student!\")\n elif choice == \"6\": # View Total Presented Units\n df2 = pd.read_csv('list_of_lessons.csv')\n summ = df2['Unit'].sum()\n print(f'Sum Total Presented Units is : {summ} ')\n\n\n elif choice == \"7\": # View Selected Lessons by Students\n data = pd.read_csv(\"list_of_lessons.csv\")\n data = data[data[\"Number_of_obtained\"] > 0]\n print(data)\n\n elif choice == \"8\": # View Students Who Have Chosen a Specified Lesson\n name_lesson = input(\"Enter Your The Desired Lesson \")\n data = pd.read_csv(\"list_student_lesson.csv\")\n data = data[data[\"Name_Lesson\"] == name_lesson]\n print(data)\n if not data.empty:\n print(f'''Students Who Have Chosen {name_lesson}:\n {data[\"First and Last Name\"]}\n ''')\n\n else:\n print(\"No One!!..\")\n\n elif choice == \"9\": # View Specified Student Caretable\n name_student = input(\"Enter Name of Your The Desired Student\").lower().replace(\" \", \"\")\n data = pd.read_csv(\"list_student_lesson.csv\")\n data = data[data[\"First and Last Name\"] == name_student]\n if not data.empty:\n print(data)\n else:\n print(\"this student don't select lesson yet \")\n\n elif choice == \"10\": # Reject or Confirm confirmed lessons\n df = pd.read_csv(\"list_student_lesson.csv\")\n df = df[df[\"Lesson_Status\"] == \"Unknown\"]\n print(df)\n if not df.empty:\n q = input(\"Are You Want To Change Status: (Y/N) \").lower()\n if q == \"y\":\n q2 = input(\"Select Student Name: \").lower().replace(\" \", \"\")\n df = df[(df[\"First and Last Name\"] == q2) & (df[\"Lesson_Status\"] == \"Unknown\")]\n print(df)\n pl=pd.read_csv(\"list_student_lesson.csv\")\n for index, row in df.iterrows():\n dd=input(\"Enter '1-Confirm' or '2-Reject'\")\n if dd==\"1\":\n pl.loc[index, \"Lesson_Status\"] = \"Confirm\"\n else:\n pl.loc[index, \"Lesson_Status\"] = \"Reject\"\n pl.to_csv(\"list_student_lesson.csv\", index=False)\n gh = pd.read_csv(f'Students_files/cartabl{row[\"UserName\"]}.csv')\n for ind,ro in gh.iterrows():\n if ro[\"Name_Lesson\"]==row[\"Name_Lesson\"]:\n if dd == \"1\":\n gh.loc[ind,\"Lesson_Status\"]=\"Confirm\"\n else:\n gh.loc[ind, \"Lesson_Status\"] =\"Reject\"\n gh.to_csv(f'Students_files/cartabl{row[\"UserName\"]}.csv',index=False)\n else:\n print(\"Nothing Exist\")\n\n else:\n print(\"Exit\")\n logging.info(\"User Exit from The Program\")\n h = False\n # except Exception as e:\n # print(e)\n\n\ndef access_Student(a):\n storage_path = 'Students_files'\n if os.path.exists(storage_path):\n print('Directory exist')\n\n else:\n os.mkdir(storage_path)\n print('Directory Created')\n\n def ret_user(a):\n with open(\"List_of_Student.csv\", \"r\") as f:\n read = csv.DictReader(f)\n for item in read:\n if item[\"UserName\"] == a:\n return item[\"FirstName\"] + item[\"LastName\"]\n\n h = True\n while h:\n print('''\n **************************\n * You Came in as Student *\n ************************** \n 1-View List of Lessons\n 2-Select Lesson\n 3-Search into List of Lessons\n 4-View Selected Lessons and Summation of Units\n 5-View Reject or Confirm of Selected lessons\n 6-Press any Key for Exit\n ''')\n\n choice = input(\"Please Select Your Choice: \")\n if choice == \"1\": # View List of Lessons\n df = pd.read_csv(\"list_of_lessons.csv\")\n print(df)\n # from prettytable import from_csv\n # with open(\"list_of_lessons.csv\", \"r\") as fp:\n # mytable = from_csv(fp)\n # print(mytable)\n elif choice == \"2\": # Select Lesson\n df = pd.read_csv(\"list_of_lessons.csv\")\n print(df)\n user_input = input(\"Enter Your Desirable Lesson: \")\n for index, row in df.iterrows():\n if row[\"Name_Lesson\"] == user_input and row[\"Remain_Capacity\"] > 0:\n if os.path.exists(f'Students_files/cartabl{a}.csv'):\n check = pd.read_csv(f'Students_files/cartabl{a}.csv')\n for Index, Row in check.iterrows():\n if Row[\"Name_Lesson\"] == user_input or (check['Unit'].sum() + row[\"Unit\"]) > 20:\n raise Exception(\n f\"You aren't Allowed to Select more than 20 Unit or a Duplicate lesson \")\n\n dic = {}\n dic2 = {}\n select_lesson = {}\n select_lesson[\"Name_Lesson\"] = user_input\n select_lesson[\"Unit\"] = row[\"Unit\"]\n select_lesson[\"Total_Capacity\"] = row[\"Total_Capacity\"]\n select_lesson[\"Number_of_obtained\"] = row[\"Number_of_obtained\"] + 1\n select_lesson[\"Remain_Capacity\"] = row[\"Total_Capacity\"] - select_lesson[\n \"Number_of_obtained\"]\n df.loc[index, \"Remain_Capacity\"] = select_lesson[\"Remain_Capacity\"]\n df.loc[index, \"Number_of_obtained\"] = select_lesson[\"Number_of_obtained\"]\n df.to_csv(\"list_of_lessons.csv\", index=False)\n dic[\"Name_Lesson\"] = select_lesson[\"Name_Lesson\"]\n dic[\"Unit\"] = select_lesson[\"Unit\"]\n dic2[\"Name_Lesson\"] = select_lesson[\"Name_Lesson\"]\n dic2[\"Unit\"] = select_lesson[\"Unit\"]\n dic2[\"UserName\"] = a\n dic2[\"First and Last Name\"] = ret_user(a)\n dic2[\"Lesson_Status\"] = \"Unknown\" # \"Unknown\",\"Confirmed\",\"Rejected\"\n dic[\"Lesson_Status\"] = dic2[\"Lesson_Status\"] # \"Unknown\",\"Confirmed\",\"Rejected\"\n s = os.path.join(storage_path, f'cartabl{a}.csv')\n with open(s, \"a\", newline=\"\") as f:\n writer = csv.DictWriter(f, fieldnames=[\"Name_Lesson\", \"Unit\", \"Lesson_Status\"])\n if f.tell() == 0:\n writer.writeheader()\n writer.writerow(dic)\n with open(\"list_student_lesson.csv\", \"a\", newline=\"\") as k:\n writ = csv.DictWriter(k, fieldnames=[\"Name_Lesson\", \"Unit\", \"UserName\",\n \"First and Last Name\", \"Lesson_Status\"])\n if k.tell() == 0:\n writ.writeheader()\n writ.writerow(dic2)\n\n else:\n dic = {}\n dic2 = {}\n select_lesson = {}\n select_lesson[\"Name_Lesson\"] = user_input\n select_lesson[\"Unit\"] = row[\"Unit\"]\n select_lesson[\"Total_Capacity\"] = row[\"Total_Capacity\"]\n select_lesson[\"Number_of_obtained\"] = row[\"Number_of_obtained\"] + 1\n select_lesson[\"Remain_Capacity\"] = row[\"Total_Capacity\"] - select_lesson[\"Number_of_obtained\"]\n df.loc[index, \"Remain_Capacity\"] = select_lesson[\"Remain_Capacity\"]\n df.loc[index, \"Number_of_obtained\"] = select_lesson[\"Number_of_obtained\"]\n df.to_csv(\"list_of_lessons.csv\", index=False)\n dic[\"Name_Lesson\"] = select_lesson[\"Name_Lesson\"]\n dic[\"Unit\"] = select_lesson[\"Unit\"]\n dic2[\"Name_Lesson\"] = select_lesson[\"Name_Lesson\"]\n dic2[\"Unit\"] = select_lesson[\"Unit\"]\n dic2[\"UserName\"] = a\n dic2[\"First and Last Name\"] = ret_user(a)\n dic2[\"Lesson_Status\"] = \"Unknown\" # \"Unknown\",\"Confirmed\",\"Rejected\"\n dic[\"Lesson_Status\"] = dic2[\"Lesson_Status\"] # \"Unknown\",\"Confirmed\",\"Rejected\"\n s = os.path.join(storage_path, f'cartabl{a}.csv')\n with open(s, \"a\", newline=\"\") as f:\n writer = csv.DictWriter(f, fieldnames=[\"Name_Lesson\", \"Unit\", \"Lesson_Status\"])\n if f.tell() == 0:\n writer.writeheader()\n writer.writerow(dic)\n with open(\"list_student_lesson.csv\", \"a\", newline=\"\") as k:\n writ = csv.DictWriter(k,\n fieldnames=[\"Name_Lesson\", \"Unit\", \"UserName\", \"First and Last Name\",\n \"Lesson_Status\"])\n if k.tell() == 0:\n writ.writeheader()\n writ.writerow(dic2)\n elif choice == \"3\":\n lesson_name = input(\"Enter your desired lesson: \")\n read_lessons = pd.read_csv(\"list_of_lessons.csv\")\n for ind, item in read_lessons.iterrows():\n if item[\"Name_Lesson\"] == lesson_name:\n mytable = PrettyTable(\n ['Name_Lesson', 'Unit', \"Professor's Name\", \"Total_Capacity\", \"Remain_Capacity\",\n \"Number_of_obtained\"])\n lis = [item['Name_Lesson'], item[\"Unit\"], item[\"Professor's Name\"], item[\"Total_Capacity\"],\n item[\"Remain_Capacity\"], item[\"Number_of_obtained\"]]\n mytable.add_row(lis)\n print(mytable)\n logging.info(f\"search based on {lesson_name} done successfully\")\n else:\n print(\"This Lesson don't Exist\")\n logging.info(f\"search based on {lesson_name} done Unsuccessfully\")\n\n elif choice == \"4\":\n jj = pd.read_csv(f'Students_files/cartabl{a}.csv')\n print(jj)\n summ = jj['Unit'].sum()\n print(f'Sum Total Units is : {summ} ')\n\n elif choice == \"5\":\n gh=pd.read_csv(f'Students_files/cartabl{a}.csv')\n print(gh)\n\n else:\n print(\"Exit\")\n logging.info(\"User Exit from The Program\")\n h = False\n\n\ndef access_Professor(a):\n def ret_user(a):\n with open(\"List_of_Professor.csv\", \"r\") as f:\n read = csv.DictReader(f)\n for item in read:\n if item[\"UserName\"] == a:\n return item[\"FirstName\"] + item[\"LastName\"]\n\n h = True\n while h:\n print('''\n **************************\n * You Came in as Professor *\n ************************** \n 1-View List of Lessons \n 2-Search into List of Lessons\n 3-View Selected Lessons\n 4-Select Lesson\n 5-View List of Students Based on Selected Lesson\n 6-For Exit, Press 6 or any key\n ''')\n choice = input(\"Please Select Your Choice: \")\n if choice == \"1\": # View List of Lessons\n\n df = pd.read_csv(\"list_of_lessons.csv\")\n print(df)\n\n elif choice == \"2\": # Search into List of Lessons\n lesson_name = input(\"Enter your desired lesson: \").capitalize()\n data = pd.read_csv(\"list_of_lessons.csv\")\n data = data[data[\"Name_Lesson\"] == lesson_name]\n print(data)\n\n elif choice == \"3\": # View Selected Lessons\n d = ret_user(a)\n data = pd.read_csv(\"list_of_lessons.csv\")\n data = data[data[\"Professor's Name\"] == d]\n print(data)\n elif choice == \"4\": # Select Lesson\n data = pd.read_csv(\"list_of_lessons.csv\")\n data = data[data[\"Professor's Name\"] == \"None\"]\n print(data)\n summ = data['Unit'].sum()\n count = 0\n df = pd.read_csv(\"list_of_lessons.csv\")\n\n if summ >= 10:\n c = True\n else:\n c = False\n while c:\n sel = input(\"Select Name of Your The Desired Lesson From Above List: \")\n for index, row in df.iterrows():\n if row[\"Name_Lesson\"] == sel:\n count += row[\"Unit\"]\n if count <= 15:\n df.loc[index, \"Professor's Name\"] = ret_user(a)\n df.to_csv(\"list_of_lessons.csv\", index=False)\n data = pd.read_csv(\"list_of_lessons.csv\")\n data = data[data[\"Professor's Name\"] == \"None\"]\n print(data)\n Que = input(\"Do You Want to Continue? (Y/N) \").lower()\n if Que == \"y\":\n continue\n else:\n c = False\n else:\n print(\"you aren't allowed to select more 15 units \")\n c = False\n\n elif choice == \"5\": # View List of Students Based on Selected Lesson\n select_name = input(\"Enter Your Desired Lesson Name: \")\n data = pd.read_csv(\"list_student_lesson.csv\")\n data = data[data[\"Name_Lesson\"] == select_name]\n if not data.empty:\n print(data)\n else:\n print(\"No one chose this lesson\")\n else:\n print(\"Exit\")\n logging.info(\"User Exit from The Program\")\n h = False\n\n\n\n\n\n# access_ResponsibleTraining()\n# access_Student(\"jahani\")\n\n# access_Professor(\"anoshe\")\n","sub_path":"Access_Level.py","file_name":"Access_Level.py","file_ext":"py","file_size_in_byte":21102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"85688511","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'Borja Gete'\n__email__ = 'borjagete90@outlook.es'\n\nimport time\nimport re\n\nfrom bs4 import BeautifulSoup\n\nfrom connection.login_page import login\nfrom trainer import game\n\n\ndef enter_calendar(auth, db, team_id):\n \"\"\" Almacena en BD las estadisticas de los partidos\n disputados por el equipo\n\n Keyword arguments:\n auth -- Cadena de autenticacion a la web.\n db -- Objeto de conexion a la BD.\n team_id -- Id del equipo\n \"\"\"\n\n year = 2019\n for i in range(1, 4):\n n = str(i).zfill(2)\n print('[' + time.strftime(\"%H:%M:%S\") + '] > Analizando Mes: \\\n ' + str(year) + '-' + str(n))\n # http://es.ibasketmanager.com/proximos_partidos.php?\\\n # mes=2019-01&id_equipo=14612\n calendar_url = 'http://es.ibasketmanager.com/proximos_partidos.php?'\n calendar_url = calendar_url + 'mes=2019-' + n\n calendar_url = calendar_url + '&id_equipo=' + str(team_id)\n game_data = analyze_calendar(calendar_url, auth, i, year)\n for v_game in game_data:\n analyze_game(v_game, auth, team_id, db)\n\n\ndef analyze_calendar(calendar_url, auth, month, year):\n \"\"\" Devolvemos un array con los datos iniciales de cada partido\n\n Keyword arguments:\n calendar_url -- URL del calendario del equipo\n auth -- Cadena de autenticacion a la web.\n month -- Mes del calendario.\n year -- Año del calendario\n \"\"\"\n session = login(auth)\n\n r = session.get(calendar_url)\n load_status = 0\n while load_status != 200:\n load_status = r.status_code\n\n soup = BeautifulSoup(r.content, 'html.parser')\n trs = soup.find('table', {\"id\": \"calendario\"}).find_all('tr')\n game_data = []\n for tr in trs: # Semanas\n tds = tr.find_all('td')\n for td in tds: # Dias\n # print('\\t' + str(td))\n if(td.find('div', {\"class\": \"numdia\"}) and td.find(\n 'div', {\"class\": \"tipopart\"})):\n dia = td.find('div', {\"class\": \"numdia\"}).text\n partido_id = str(td.find_all(\n 'a')[0]['href']).split('=')[1].strip()\n rival_id = 0\n if(len(td.find_all('a')) > 1):\n rival_id = td.find_all(\n 'a')[1]['href'].split('=')[1].strip()\n tipo = td.find('div', {\"class\": \"tipopart\"}).text\n tipo = re.search(r'([A-Z])\\w\\D+', tipo).group(0)\n home = 'Away'\n if(td.find('div', {\"style\": \"background: \\\n url('/img/as/ihouse.png') 100% 100% no-repeat; width: 86px;\\\n height: 44px;\"})):\n home = 'Home'\n # print('\\t' + str([partido_id,str(str(dia) + '/\\\n # ' + str(month) + '/' + str(year)),rival_id,tipo, home]))\n game_data.append([partido_id, str(str(dia) + '/\\\n ' + str(month) + '/' + str(year)), rival_id, tipo, home])\n\n return game_data\n\n\ndef analyze_game(game_data, auth, team_id, db):\n \"\"\" Insertamos los datos de partido y estadisticas en la BD\n\n Keyword arguments:\n game_data -- Array de datos iniciales del partido\n auth -- Cadena de autenticacion a la web.\n team_id -- Id del equipo\n db -- Objeto de conexion a la BD.\n \"\"\"\n\n # http://es.ibasketmanager.com/partido.php?id=24441704\n if(game_data[3] != 'All Star'):\n game_url_data = 'http://es.ibasketmanager.com/partido.php?id='\n game_url_data = game_url_data + game_data[0] + '&accion=datos'\n game_url_stats = 'http://es.ibasketmanager.com/partido.php?id='\n game_url_stats = game_url_stats + game_data[0] + '&accion=alineaciones'\n game_url_mvp = 'http://es.ibasketmanager.com/partido.php?id='\n game_url_mvp = game_url_mvp + game_data[0] + '&accion=jugador'\n\n mvp_id = analyze_mvp(game_url_mvp, auth)\n v_game = analyze_game_data(\n game_url_data, auth, game_data, team_id, mvp_id) # Data-Home-Away\n\n if(v_game[0] is not None):\n if (db.games.find_one(\n {\"id_game\": int(v_game[0].id_game)}) is None):\n db.games.insert_one(game[0].to_db_collection())\n if(v_game[1] is not None):\n if (db.team_stats.find_one(\n {\"$and\": [{\"game_id\": int(v_game[1].game_id)},\n {\"team_id\": int(v_game[1].team_id)}]}) is None):\n db.team_stats.insert_one(game[1].to_db_collection())\n if(v_game[2] is not None):\n if (db.team_stats.find_one(\n {\"$and\": [{\"game_id\": int(v_game[2].game_id)},\n {\"team_id\": int(game[2].team_id)}]}) is None):\n db.team_stats.insert_one(game[2].to_db_collection())\n if(v_game[1] is not None and game[2] is not None):\n player_stats = analyze_player_stats(\n game_url_stats, v_game[1].team_id, v_game[2].team_id,\n auth, v_game[0].id_game)\n for player_stat in player_stats:\n if (db.player_stats.find_one(\n {\"$and\": [{\"game_id\": int(player_stat.game_id)},\n {\"player_id\": int(player_stat.player_id)}]}\n ) is None):\n db.player_stats.insert_one(player_stat.to_db_collection())\n\n\ndef analyze_player_stats(url, home_team_id, away_team_id, auth, game_id):\n \"\"\" Analizamos las estadisticas de los jugadores en el partido y devolvemos\n un array de instancias de Player_Stats\n\n Keyword arguments:\n url -- URL de datos del partido -- Cadena de autenticacion a la web.\n home_team_id -- Id del equipo izquierdo\n away_team_id -- Id del equipo derecho\n auth -- Cadena de autenticacion a la web.\n game_id -- Id del partido disputado\n \"\"\"\n session = login(auth)\n\n r = session.get(url)\n\n load_status = 0\n while load_status != 200:\n load_status = r.status_code\n\n # print('[' + time.strftime(\"%H:%M:%S\") + '] > ' + str(url))\n soup = BeautifulSoup(r.content, 'html.parser')\n tables = soup.find_all('table', {\"id\": \"pagetabla\"})\n home_trs = tables[0].find('tbody').find_all('tr')\n away_trs = tables[1].find('tbody').find_all('tr')\n stats = []\n for tr in home_trs:\n if(tr.has_attr(\"class\")):\n # print(tr['class'])\n tds = tr.find_all('td')\n if(tds[0].text != 'TOTAL'):\n # print(tds[5])\n player_id = tds[3].find(\n 'a')['href'].split('id_jugador=')[1].strip()\n if(player_id == ''):\n player_id = '0'\n t2_m = tds[5].text.split('/')[0].strip()\n t2_a = tds[5].text.split('/')[1].strip()\n t3_m = tds[7].text.split('/')[0].strip()\n t3_a = tds[7].text.split('/')[1].strip()\n ft_m = tds[9].text.split('/')[0].strip()\n ft_a = tds[9].text.split('/')[1].strip()\n reb_def = tds[11].text.strip()\n reb_off = tds[12].text.strip()\n steal = tds[14].text.strip()\n turnover = tds[15].text.strip()\n assist = tds[16].text.strip()\n block = tds[17].text.strip()\n opp_block = tds[18].text.strip()\n foul = tds[19].text.strip()\n rec_foul = tds[20].text.strip()\n minutes = tds[21].text.split(':')[0].strip()\n seconds = tds[21].text.split(':')[1].strip()\n # print(PlayerStats(game_id, home_team_id, player_id, t2_m,\n # t2_a, t3_m, t3_a, ft_m, ft_a, reb_def, reb_off, assist,\n # steal, turnover, block, opp_block, foul, rec_foul, minutes,\n # seconds))\n stats.append(\n game.PlayerStats(game_id, home_team_id, player_id, t2_m,\n t2_a, t3_m, t3_a, ft_m, ft_a, reb_def,\n reb_off, assist, steal, turnover, block,\n opp_block, foul, rec_foul, minutes,\n seconds))\n for tr in away_trs:\n if(tr.has_attr(\"class\")):\n # print(tr['class'])\n tds = tr.find_all('td')\n if(tds[0].text != 'TOTAL'):\n # print(tds[5])\n player_id = tds[3].find(\n 'a')['href'].split('id_jugador=')[1].strip()\n if(player_id == ''):\n player_id = '0'\n t2_m = tds[5].text.split('/')[0].strip()\n t2_a = tds[5].text.split('/')[1].strip()\n t3_m = tds[7].text.split('/')[0].strip()\n t3_a = tds[7].text.split('/')[1].strip()\n ft_m = tds[9].text.split('/')[0].strip()\n ft_a = tds[9].text.split('/')[1].strip()\n reb_def = tds[11].text.strip()\n reb_off = tds[12].text.strip()\n steal = tds[14].text.strip()\n turnover = tds[15].text.strip()\n assist = tds[16].text.strip()\n block = tds[17].text.strip()\n opp_block = tds[18].text.strip()\n foul = tds[19].text.strip()\n rec_foul = tds[20].text.strip()\n minutes = tds[21].text.split(':')[0].strip()\n seconds = tds[21].text.split(':')[1].strip()\n # print(PlayerStats(game_id, away_team_id, player_id, t2_m,\n # t2_a, t3_m, t3_a, ft_m, ft_a, reb_def, reb_off, assist,\n # steal, turnover, block, opp_block, foul, rec_foul, minutes,\n # seconds))\n stats.append(\n game.PlayerStats(game_id, away_team_id, player_id, t2_m,\n t2_a, t3_m, t3_a, ft_m, ft_a, reb_def,\n reb_off, assist, steal, turnover, block,\n opp_block, foul, rec_foul, minutes,\n seconds))\n\n return stats\n\n\ndef analyze_game_data(url, auth, game_data, team_id, mvp_id):\n \"\"\" Devuelve una instancia de Game con los datos del partido\n\n Keyword arguments:\n url -- URL de datos del partido\n auth -- Cadena de autenticacion a la web.\n game_data -- Array de datos iniciales del partido\n team_id -- Id del equipo\n mvp_id -- Id del MVP\n \"\"\"\n session = login(auth)\n\n r = session.get(url)\n\n load_status = 0\n while load_status != 200:\n load_status = r.status_code\n\n # print('[' + time.strftime(\"%H:%M:%S\") + '] > ' + str(url))\n soup = BeautifulSoup(r.content, 'html.parser')\n trs = soup.find('table', {\"class\": \"datos_partido\"}).find_all('tr')\n # Data\n if(game_data[4] == 'Home'):\n home_team_id = team_id\n away_team_id = game_data[2]\n else:\n home_team_id = game_data[2]\n away_team_id = team_id\n attendance = str(trs[1].find_all('td')[0].text).replace('.', '').strip()\n money1 = str(trs[2].find_all('td')[0].text).replace(\n '.', '').replace('€', '').strip()\n if(money1 == ''):\n money1 = 0\n money2 = str(trs[2].find_all('td')[2].text).replace(\n '.', '').replace('€', '').strip()\n if(money2 == ''):\n money2 = 0\n money = int(money1) + int(money2)\n if(mvp_id == ''):\n mvp_id = 0\n\n # Home\n t2 = trs[4].find_all('td')[0].text\n if(t2 == '0'):\n t2_m = 0\n t2_a = 0\n else:\n t2_m = t2[:t2.find('/')].strip()\n t2_a = t2[t2.find('/')+1:t2.find('(')].strip()\n t3 = trs[5].find_all('td')[0].text\n if(t3 == '0'):\n t3_m = 0\n t3_a = 0\n else:\n t3_m = t3[:t3.find('/')].strip()\n t3_a = t3[t3.find('/')+1:t3.find('(')].strip()\n ft = trs[6].find_all('td')[0].text\n if(ft == '0'):\n ft_m = 0\n ft_a = 0\n else:\n ft_m = ft[:ft.find('/')].strip()\n ft_a = ft[ft.find('/')+1:ft.find('(')].strip()\n reb_def = trs[7].find_all('td')[0].text\n reb_off = trs[8].find_all('td')[0].text\n assist = trs[10].find_all('td')[0].text\n steal = trs[11].find_all('td')[0].text\n turnover = trs[12].find_all('td')[0].text\n block = trs[13].find_all('td')[0].text\n foul = trs[15].find_all('td')[0].text\n\n home = game.TeamStats(game_data[0], home_team_id, t2_m, t2_a, t3_m, t3_a,\n ft_m, ft_a, reb_def, reb_off, assist, steal,\n turnover, block, foul)\n # print(home)\n # Away\n t2 = trs[4].find_all('td')[2].text\n if(t2 == '0'):\n t2_m = 0\n t2_a = 0\n else:\n t2_m = t2[:t2.find('/')].strip()\n t2_a = t2[t2.find('/')+1:t2.find('(')].strip()\n t3 = trs[5].find_all('td')[2].text\n if(t3 == '0'):\n t3_m = 0\n t3_a = 0\n else:\n t3_m = t3[:t3.find('/')].strip()\n t3_a = t3[t3.find('/')+1:t3.find('(')].strip()\n ft = trs[6].find_all('td')[2].text\n if(ft == '0'):\n ft_m = 0\n ft_a = 0\n else:\n ft_m = ft[:ft.find('/')].strip()\n ft_a = ft[ft.find('/')+1:ft.find('(')].strip()\n reb_def = trs[7].find_all('td')[2].text\n reb_off = trs[8].find_all('td')[2].text\n assist = trs[10].find_all('td')[2].text\n steal = trs[11].find_all('td')[2].text\n turnover = trs[12].find_all('td')[2].text\n block = trs[13].find_all('td')[2].text\n foul = trs[15].find_all('td')[2].text\n\n away = game.TeamStats(game_data[0], away_team_id, t2_m, t2_a, t3_m, t3_a,\n ft_m, ft_a, reb_def, reb_off, assist, steal,\n turnover, block, foul)\n # print(away)\n v_game = game.Game(game_data[0], game_data[3], game_data[1], home_team_id,\n away_team_id, attendance, money, int(mvp_id))\n\n return [v_game, home, away]\n\n\ndef analyze_mvp(url, auth):\n \"\"\" Devolvemos el id del mvp del partido\n\n Keyword arguments:\n url -- URL del MVP del partido\n auth -- Cadena de autenticacion a la web.\n \"\"\"\n session = login(auth)\n\n r = session.get(url)\n\n load_status = 0\n while load_status != 200:\n load_status = r.status_code\n\n # print('[' + time.strftime(\"%H:%M:%S\") + '] > ' + str(url))\n soup = BeautifulSoup(r.content, 'html.parser')\n src = soup.find('div', {\"class\": \"mrx\"}).find_all('img')[0]['src']\n\n return src[str(src).find('=')+1:str(src).find('&')]\n","sub_path":"ibm-auto-manager/trainer/calendar_page.py","file_name":"calendar_page.py","file_ext":"py","file_size_in_byte":14445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"466124618","text":"'''\nCreated on Nov 26, 2017\n\n@author: henryliu\n'''\nimport sys, csv\n\nfrom sklearn import linear_model\nfrom sklearn.metrics import mean_squared_error, r2_score\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n\ndef main(argv): \n\n # read in vehicles csv\n vehicles_df = pd.read_csv(\"../data/vehicles.csv\", low_memory=False)\n \n vehicles_displ_mpg_all = vehicles_df[['displ', 'UHighway']]\n vehicles_displ_mpg = vehicles_displ_mpg_all[vehicles_displ_mpg_all.displ > 0]\n \n half = int(len (vehicles_displ_mpg) / 2)\n \n # create the training set with the first half of data\n first_half = vehicles_displ_mpg [:half]\n second_half = vehicles_displ_mpg [half:]\n \n first_half_sorted = first_half.sort_values(by=['displ', 'UHighway'])\n first_half_grouped_by_mean = pd.DataFrame({'train_mean' : \\\n first_half_sorted.groupby('displ')['UHighway'].mean()}).reset_index() \n \n first_half_x = first_half_grouped_by_mean ['displ'].values.reshape(-1,1)\n print(type(first_half_x))\n print(first_half_x.shape)\n first_half_y = first_half_grouped_by_mean ['train_mean'].values.reshape(-1,1)\n print(first_half_y.shape)\n \n #ax = first_half_grouped_by_median.plot (x = \"displ\", y = \"train_median\", c = \"b\")\n #plt.show ()\n \n second_half_sorted = second_half.sort_values(by=['displ', 'UHighway'])\n second_half_grouped_by_mean = pd.DataFrame({'test_mean' : \\\n second_half_sorted.groupby('displ')['UHighway'].mean()}).reset_index() \n \n second_half_x = second_half_grouped_by_mean ['displ'].values.reshape(-1,1)\n second_half_y = second_half_grouped_by_mean ['test_mean'].values.reshape(-1,1)\n #second_half_grouped_by_median.plot (ax=ax, x = \"displ\", y = \"test_median\", c = \"gold\")\n #plt.show ()\n \n # Create linear regression object\n regr = linear_model.LinearRegression()\n\n # Train the model using the training sets\n regr.fit(first_half_x, first_half_y)\n\n # Make predictions using the testing set\n second_half_y_pred = regr.predict(second_half_x)\n\n # intercept & coefficients\n print('Intercept: ', regr.intercept_)\n print('Coefficients: ', regr.coef_)\n\n # mean squared error\n mse = mean_squared_error(second_half_y, second_half_y_pred)\n rmse = np.sqrt(mse)\n print(\"Mean squared error: %.2f\" % mse)\n print(\"Root mean squared error: %.2f\" % rmse)\n # Explained variance score: 1 is perfect prediction\n print('R-squared score: %.2f' % r2_score(second_half_y, second_half_y_pred))\n\n # Plot outputs\n plt_train = plt.scatter(first_half_x, first_half_y, marker='s', color='green')\n plt_test = plt.scatter(second_half_x, second_half_y, marker='o', color='red')\n plt.plot(second_half_x, second_half_y_pred, color='blue', linewidth=3)\n plt.legend ((plt_train, plt_test), ('train', 'test'))\n plt.xlabel (\"Engine displacement (liter)\")\n plt.ylabel (\"Fuel economy (MPG)\")\n\n plt.show()\n \n# entry point to the main function \nif __name__ == '__main__':\n main (sys.argv)\n","sub_path":"samples/ch02/vehicles_linear_regression_mean.py","file_name":"vehicles_linear_regression_mean.py","file_ext":"py","file_size_in_byte":3034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"205978512","text":"from selenium import webdriver\nimport time\n\nfrom selenium import webdriver\nimport time\n\nfirefox_profile = webdriver.FirefoxProfile()\nfirefox_profile.set_preference(\"permissions.default.stylesheet\", 2) # 禁用样式表文件\nfirefox_profile.set_preference(\"permissions.default.image\", 2) # 不加载图片\nfirefox_profile.set_preference(\"javascript.enabled\", False) # 禁止JS\nfirefox_profile.update_preferences() # 更新设置\nfirefox = webdriver.Firefox(firefox_profile, executable_path=\"/Users/yons/webdriver/geckodriver\")\n\nurl = 'https://www.zhihu.com'\nprint(\"开始加载\")\nt_start = time.time()\nfirefox.get(url)\nt_end = time.time()\nprint(\"加载时间是:\", t_end - t_start)\ntime.sleep(10)\nfirefox.quit()\n\n\n\n\n#\n# 模拟登陆微博\n# browser.get(\"http://www.weibo.com/\")\n#\n# # 等待10秒,等待页面全部加载完成\n# time.sleep(10)\n#\n# browser.find_element_by_css_selector(\"#loginname\").send_keys(\"13419516267\")\n# browser.find_element_by_css_selector(\".info_list.password input[name='password']\").clear().send_keys(\"ssjusher123\")\n# browser.find_element_by_css_selector(\".info_list.login_btn a[node-type='submitBtn']\").clear().click()\n\n# 模拟鼠标下拉\n# browser.get(\"https://www.oschina.net/blog\")\n# time.sleep(5)\n# for i in range(3):\n# # 执行js代码模拟下拉动作\n# browser.execute_script(\n# \"window.scrollTo(0, document.body.scrollHeight); var lenofPage=document.body.scrollHeight; return lenofPage\")\n# time.sleep(3)\n#\n# browser.quit()\n","sub_path":"ArticleSpider/tools/selenium_test.py","file_name":"selenium_test.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"344170898","text":"class Solution:\n def maxSubArray(self, nums: List[int]) -> int:\n s = 0 \n maximum = -999999999999\n for i in range(len(nums)):\n s = max(s+nums[i],nums[i])\n if s > maximum :\n maximum = s \n return maximum\n \n ","sub_path":"Leet Code/30 Days of Code/week_01/Maximum_Subarray.py","file_name":"Maximum_Subarray.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"508300180","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/5/11 11:15\n# @Author : DarrenZhang\n# @FileName: 01_TensorBoard.py\n# @Software: PyCharm\n# @Blog :https://www.yuque.com/darrenzhang\n# @Brief : 测试tensorboard\n\nimport numpy as np\nfrom torch.utils.tensorboard import SummaryWriter\n\nwriter = SummaryWriter(comment='test_tensorboard')\n\nfor x in range(100):\n writer.add_scalar('y=2x', x * 2, x)\n writer.add_scalar('y=pow(2, x)', 2 ** x, x)\n\n writer.add_scalars('data/scalar_group', {\"xsinx\": x * np.sin(x),\n \"xcosx\": x * np.cos(x),\n \"arctanx\": np.arctan(x)}, x)\nwriter.close()\n","sub_path":"10_tensorboard/01_TensorBoard.py","file_name":"01_TensorBoard.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"433394489","text":"\"\"\"\n============================================\nWindow Generator\n============================================\n\n@author: happle@arch.ethz.ch\n\nApril 2016\n\n\"\"\"\n\nfrom __future__ import division\nimport geopandas\nimport rtree\n\nfrom geopandas import GeoDataFrame as gpdf\nimport descartes\nfrom matplotlib import pyplot\n\nimport gdal\nimport struct\n\nfrom shapely.geometry import Polygon, Point\nfrom rtree import index\n\nimport arcpy\n\n\ndef building_footprint_real_to_box(building_footprint_real ):\n\n # building_footprint_real: shape file containing building real footprint\n\n footprint_box = building_footprint_real.geometry.envelope\n # TODO: find smallest box with real orientation\n\n building_footprint_box = building_footprint_real # copy shape file\n building_footprint_box.geometry = footprint_box # change geometry from real footprint to box\n\n return building_footprint_box\n\ndef building_centroid_DEM_height( building, dem ):\n\n # code inspired by: http://www.gis.usu.edu/~chrisg/python/2009/lectures/ospy_slides4.pdf\n\n\n building_centroid = building.geometry.centroid\n\n # get dem raster size\n size_x = dem.RasterXSize\n size_y = dem.RasterYSize\n\n # get geo reference matrix\n georef = dem.GetGeoTransform()\n origin_x = georef[0]\n origin_y = georef[3]\n pixel_width = georef[1]\n pixel_height = georef[5]\n\n # for all building footprint centroids\n for i in range(0,building_centroid.size):\n\n # get centroid coordinates\n x = building_centroid.get_value(i).x\n y = building_centroid.get_value(i).y\n\n # compute pixel offset\n offset_x = int((x - origin_x) / pixel_width)\n offset_y = int((y - origin_y) / pixel_height)\n\n # read the height at the building centroid\n data = band.ReadAsArray(offset_x, offset_y, 1, 1)\n value = data[0, 0]\n\n print(value)\n\n\n return\n\n\n\n\n\n# TESTING\nif __name__ == '__main__':\n\n # testing building file read\n # **************************\n path_geometry = 'C:/reference-case/baseline/1-inputs/1-buildings/building_geometry.shp'\n\n building_geometry = gpdf.from_file(path_geometry)\n\n path_dem = 'C:/reference-case/baseline/1-inputs/2-terrain/terrain/w001001.adf'\n\n gdal_layer = gdal.Open(path_dem) # http://www.itopen.it/how-to-read-a-raster-cell-with-python-qgis-and-gdal/\n\n print(gdal_layer.GetMetadata())\n\n gt = gdal_layer.GetGeoTransform() # get geo transform matrix\n\n print(gt)\n\n # get image size\n rows = gdal_layer.RasterYSize\n cols = gdal_layer.RasterXSize\n bands = gdal_layer.RasterCount\n print(rows, cols, bands)\n\n xo, xs, xr, yo, yr, ys = gt\n band = gdal_layer.GetRasterBand(1)\n gdal_value = struct.unpack('f', band.ReadRaster(1, 1, 1, 1, buf_type=band.DataType))[0]\n\n\n print(gdal_value)\n\n building_geometry\n\n print(building_geometry.head(1))\n\n print(building_geometry.get_value(0, 'geometry'))\n\n building_footprint_box = building_footprint_real_to_box(building_geometry)\n\n print(building_footprint_box.get_value(0, 'geometry'))\n print(building_footprint_box.head(1))\n\n building_centroid_DEM_height(building_footprint_box, gdal_layer)\n\n building_geometry.plot()\n\n # testing some code with rtree from stackoverflow\n # ***********************************************\n\n # List of non-overlapping polygons\n polygons = [\n Polygon([(0, 0), (0, 1), (1, 1), (0, 0)]),\n Polygon([(0, 0), (1, 0), (1, 1), (0, 0)]),\n ]\n\n # Populate R-tree index with bounds of polygons\n idx = index.Index()\n for pos, poly in enumerate(polygons):\n idx.insert(pos, poly.bounds)\n\n # Query a point to see which polygon it is in\n # using first Rtree index, then Shapely geometry's within\n point = Point(0.5, 0.2)\n poly_idx = [i for i in idx.intersection((point.coords[0]))\n if point.within(polygons[i])]\n for num, idx in enumerate(poly_idx, 1):\n print(\"%d:%d:%s\" % (num, idx, polygons[idx]))\n","sub_path":"sandbox/ghapple/window_generator.py","file_name":"window_generator.py","file_ext":"py","file_size_in_byte":3958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"563106121","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport sys\n\n\nsys.path.append(os.path.abspath('.'))\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_settings')\n\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n]\n\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\nmaster_doc = 'index'\nproject = 'django-howl'\ncopyright = 'Benjamin Banduhn'\nversion = '0.1.0'\nrelease = '0.1.0'\nexclude_patterns = ['_build']\npygments_style = 'sphinx'\nhtml_theme = 'default'\n# html_static_path = ['_static']\nhtmlhelp_basename = 'howldoc'\nlatex_documents = [(\n 'index', 'howl.tex', 'django-howl Documentation', 'Benjamin Banduhn', 'manual')]\n\nman_pages = [('index', 'howl', 'django-howl Documentation', ['Benjamin Banduhn'], 1)]\n\ntexinfo_documents = [(\n 'index', 'howl', 'django-howl Documentation',\n 'Benjamin Banduhn', 'django-howl', 'Generate thumbnails of anything.', 'Miscellaneous'\n)]\n\nintersphinx_mapping = {'http://docs.python.org/': None}\n","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"443379492","text":"#!/usr/bin/env python3\nimport argparse\nimport json\nfrom typing import NamedTuple, List, Any\n\nfrom github import Github\n\n\nfrom .exporthelpers.export_helper import Json\n\n\nclass GithubData(NamedTuple):\n profile: Json\n\n events: List[Json]\n followers: List[Json]\n following: List[Json]\n # TODO keys? not sure if worth keeping?\n orgs: List[Json]\n received_events: List[Json]\n repos: List[Json]\n starred: List[Json]\n subscriptions: List[Json]\n watched: List[Json]\n\n\nclass Exporter:\n def __init__(self, *args, **kwargs) -> None:\n kwargs['login_or_token'] = kwargs['token']\n del kwargs['token']\n self.api = Github(*args, **kwargs)\n\n def export_json(self) -> Json:\n login = self.api.get_user().login\n user = self.api.get_user(login) # need to get NamedUser first\n\n fields = list(GithubData._fields)\n fields.remove('profile')\n\n gd = GithubData(\n profile=user._rawData, # type: ignore[attr-defined]\n **{f: [x._rawData for x in getattr(user, 'get_' + f)()] for f in fields},\n )\n\n ## get traffic (it's only kept for 14 days :( )\n for r in gd.repos:\n # todo not ideal that we retrieve it all over again..\n repo = self.api.get_repo(r['full_name'])\n\n fields = ['views', 'clones', 'popular/referrers', 'popular/paths']\n # todo ugh. this vvv doesn't quite work because returned types are different (lists vs github. objects)\n # [x._rawData for x in getattr(repo, 'get_' + f)()]\n # ad github library doesn't expose raw api properly...\n traffic = {\n f: repo._requester.requestJsonAndCheck('GET', repo.url + '/traffic/' + f)[1] # type: ignore[attr-defined]\n for f in fields\n }\n\n assert 'traffic' not in r # just in case..\n r['traffic'] = traffic\n # TODO not sure if this is a good way to keep it...\n ##\n\n return gd._asdict()\n\n\ndef get_json(**params):\n return Exporter(**params).export_json()\n\n\ndef main():\n parser = make_parser()\n args = parser.parse_args()\n\n params = args.params\n dumper = args.dumper\n\n j = get_json(**params)\n js = json.dumps(j, ensure_ascii=False, indent=1)\n dumper(js)\n\n\ndef make_parser():\n from .exporthelpers.export_helper import setup_parser, Parser\n parser = Parser('''\nExport your Github personal data: issues, PRs, comments, followers and followings, etc.\n\n*Note*: this only deals with metadata. If you want a download of actual git repositories, I recommend using [[https://github.com/josegonzalez/python-github-backup][python-github-backup]].\n'''.strip())\n # TODO repositories?\n setup_parser(\n parser=parser,\n params=['token'],\n extra_usage='''\nYou can also import ~ghexport.export~ as a module and call ~get_json~ function directly to get raw JSON.\n ''',\n )\n return parser\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/ghexport/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":2996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"138355445","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import ColaboradorProjeto, Projeto, ProjetoTag, Tag\n\n\n# Create your views here.\ndef listar(request):\n projetos = Projeto.objects.all()\n context = {'projetos': projetos}\n return render(request, 'projeto/list.html', context)\n\ndef exibir(request, projeto_id):\n projeto = get_object_or_404(Projeto, pk = projeto_id) \n coolaboradores = ColaboradorProjeto.objects.filter(projeto=projeto)\n tags = ProjetoTag.objects.filter(projeto=projeto)\n\n context = {\n 'projeto': projeto,\n 'colaboradores': coolaboradores,\n 'tags': tags,\n }\n\n return render(request, 'projeto/detail.html', context)\n\n\n","sub_path":"mvc/sgc/projeto/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"339447910","text":"import mariadb\nimport dbconn\nimport traceback\nfrom flask import Response\n\n\ndef loop_items(cursor, rows):\n # Takes column headers from cursor.description, and rows returned from the query, and creates a zip object with them, pairing each header index with\n # the same index from row as tuples, then we do data conversion to a dictionary by wrapping the result of zip() in dict() and append it to result, which\n # starts as a empty list.\n headers = [i[0] for i in cursor.description]\n result = []\n for row in rows:\n result.append(dict(zip(headers, row)))\n return result\n\n\ndef run_query(sql, params=[]):\n # This function will run all of the queries, keeping it DRY, params starts as an empty list because we don't always need them.\n # sql argument will be the sql statement\n # params is for the prepared statements variables, must be passed in order, as a list.\n\n # setting defaults for the result\n result = {\n 'success': True,\n 'error': None,\n 'data': None\n }\n conn = dbconn.open_connection()\n cursor = dbconn.create_cursor(conn)\n # try block with a conditional inside to check what kind of statement we are passing based on how it starts.\n try:\n cursor.execute(sql, params)\n if(sql.startswith('SELECT')):\n sel_data = cursor.fetchall()\n # set the data key inside result to the result of the loop_items function from above.\n # this will be the data from the fetchall above and stictes the column names to them as keys in a dictionary.\n # we don't set success or error here because the defaults from above are correct.\n result['data'] = loop_items(cursor, sel_data)\n elif(sql.startswith('INSERT')):\n conn.commit()\n # Set data key in result to the lastrowid, this will be primary key from the table.\n result['data'] = cursor.lastrowid\n elif(sql.startswith('UPDATE') or sql.startswith('DELETE')):\n # we want rowcount from update and delete statements, so they share an elif block.\n conn.commit()\n result['data'] = cursor.rowcount\n else:\n # else for error catching if the sql statement does not start with SELECT, INSERT, UPDATE or DELETE\n result['success'] = False\n result['error'] = Response(\n \"Error: Method Not Allowed!\", mimetype=\"text/plain\", status=405)\n # MariaDB exceptions with a catchAll as the last exception\n # they all set the success key to False, and the error key to the Response.\n except mariadb.InternalError:\n result['success'] = False\n result['error'] = Response(\n \"Internal Server Error, Please try again later!\", mimetype=\"text/plain\", status=500)\n traceback.print_exc()\n except mariadb.IntegrityError:\n result['success'] = False\n result['error'] = Response(\n \"Error: Possible duplicate data or foreign key conflict!\", mimetype=\"text/plain\", status=409)\n traceback.print_exc()\n except mariadb.DataError:\n result['success'] = False\n result['error'] = Response(\n \"Internal Server Error, Please try again later!\", mimetype=\"text/plain\", status=500)\n traceback.print_exc()\n except:\n result['success'] = False\n result['error'] = Response(\n \"Internal Server Error, Please try again later!\", mimetype=\"text/plain\", status=500)\n traceback.print_exc()\n\n dbconn.close_all(conn, cursor)\n\n return result\n\n\ndef input_handler(endpoint_args, u_inputs=[]):\n '''\n u_inputs should be a list of dicts for each input, which can be optional or required.\n endpoint_args will be either request.args or request.json\n\n # ? this is how we pass the inputs for each endpoint.\n [\n {\n required: True,\n name: var_name,\n type: str\n },\n {\n required: True,\n name: var_name,\n type: int\n }\n ]\n '''\n # payload will be the result we return\n payload = {\n 'success': True,\n 'error': None,\n 'data': {}\n }\n for u_input in u_inputs:\n # u_input will be a dictionry\n try:\n if(u_input.get('rule') != None and endpoint_args.get(u_input['name']) != None):\n result = u_input['rule'](endpoint_args[u_input['name']])\n\n if(result['success'] == False):\n payload['success'] = False\n payload['error'] = result['message']\n return payload\n\n # if the user input is required, we add a key value pair inside the data dictionary in payload. key will be the value from name, and value will be\n # the the value of name as a key in endpoint_args, wrapped in the type value.\n # ie: login_token: str(request.json['login_token'])\n if(u_input['required'] == True):\n # added another error handler due to an issue with KeyError, works for now.\n if(endpoint_args[u_input['name']] == ''):\n payload['success'] = False\n payload['error'] = Response(\n f\"Required field {u_input['name']} is empty!\", mimetype=\"text/plain\", status=422)\n\n payload['data'][u_input['name']] = u_input['type'](\n endpoint_args[u_input['name']])\n else:\n # if user input is not required, and the user did pass data we do the same as above, we dont use .get in the setting of the key\n # because we know there was some data passed.\n if(endpoint_args.get(u_input['name']) != None and endpoint_args.get(u_input['name']) != ''):\n payload['data'][u_input['name']] = u_input['type'](\n endpoint_args[u_input['name']])\n # exceptions that set the success key to False and the response to f strings to be more specific about what went wrong.\n except ValueError:\n traceback.print_exc()\n payload['success'] = False\n payload['error'] = Response(\n f\"Error: {u_input['name']} is invalid!\", mimetype=\"text/plain\", status=422)\n except KeyError:\n traceback.print_exc()\n payload['success'] = False\n payload['error'] = Response(\n f\"Required field {u_input['name']} is empty!\", mimetype=\"text/plain\", status=422)\n except:\n traceback.print_exc()\n payload['success'] = False\n payload['error'] = Response(\n f\"Error: Unknown data error with {u_input['name']}\", mimetype=\"text/plain\", status=400)\n\n return payload\n","sub_path":"dbh.py","file_name":"dbh.py","file_ext":"py","file_size_in_byte":6138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"102994631","text":"from django.shortcuts import render,redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom account.forms import editProfileForm\n@login_required(login_url='/')\ndef editProfile(request):\n if request.method == 'POST':\n form=editProfileForm(request.POST, request.FILES, instance=request.user)\n if form.is_valid():\n form.save()\n messages.success(request,'Profile Edited')\n else:\n form=editProfileForm(instance= request.user) \n return render(request,'pages/editProfile.html', context={\n 'form':form\n })\n","sub_path":"account/views/editProfile.py","file_name":"editProfile.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"540598186","text":"from .sql import Sql\nfrom sdbys.items import SdbysItem\n\n\nclass SdbysPipeline(object):\n def process_item(self, item, spider):\n if isinstance(item, SdbysItem):\n guid = item['guid']\n ret = Sql.select_guid(guid)\n # 去重\n if ret[0] == 1:\n print('已经存在了')\n pass\n else:\n name = item['name']\n xueli = item['xueli']\n zhuanye = item['zhuanye']\n school = item['school']\n year = item['year']\n sex = item['sex']\n time = item['time']\n guid = item['guid']\n Sql.insert_sdbys(name, xueli, zhuanye, school, year, sex, time, guid)\n print('成功存储 '+name)\n","sub_path":"sdbys/mysqlpipelines/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"536873105","text":"import click\nfrom datetime import datetime\nimport psutil\nimport subprocess\nimport re\nimport sh\nimport psycopg2\nfrom pathlib import Path\nimport os\nimport pyperclip\nimport shlex\nimport sys\nimport logging\n\n# import ptvsd\n\n# # 5678 is the default attach port in the VS Code debug configurations\n# print(\"Waiting for debugger attach\")\n# ptvsd.enable_attach(address=('localhost', 5679), redirect_output=True)\n# ptvsd.wait_for_attach()\n\n\nCOLORS = {\n 'black': '#011627',\n 'blue': '#05668d',\n 'white': '#fdfffc',\n 'green': '#2ec4b6',\n 'red': '#e71d36',\n 'yellow': '#ff9f1c',\n 'grey': '#7c7c7c',\n 'gold': '#efc88b'\n}\n\nSUPPORT_DIR = Path(\"/home/odoo/support\")\nSUPPORT_TOOLS_DIR = SUPPORT_DIR / \"support-tools\"\nSRC_DIR = SUPPORT_DIR / \"src\"\nODOO_DIR = SRC_DIR / \"odoo\"\nENTERPRISE_DIR = SRC_DIR / \"enterprise\"\nDESIGN_THEMES_DIR = SRC_DIR / \"design-themes\"\nINTERNAL_DIR = SUPPORT_DIR / \"internal\"\nOE_SUPPORT = SUPPORT_TOOLS_DIR / \"oe-support.py\"\nODOO = lambda version: ODOO_DIR / (\"odoo.py\" if version <= 9 else \"odoo-bin\")\nENVS_DIR = Path('/home/odoo/miniconda3/envs')\nDB_PREFIX = 'oe_support_'\nVERSION_MAP = {\n 8: ['8.0', 'saas-6'],\n 9: ['9.0'] + ['saas-%s' % se for se in [7, 8, 9, 10, 11]],\n 10: ['10.0'] + ['saas-%s' % se for se in [12, 13, 14, 15]],\n 11: ['11.0'] + ['saas-%s' % se for se in [11.1, 11.2, 11.3, 11.4]],\n 12: ['12.0'] + ['saas-%s' % se for se in [11.5]],\n}\nVERSION_MAP = {v: s for (s, vl) in VERSION_MAP.items() for v in vl} # inverse the map for easier use\n\n@click.group()\n@click.pass_context\ndef cli(ctx):\n \"\"\"\n My personal commands in the terminal.\n \"\"\"\n pass\n\n@cli.group()\ndef i3():\n pass\n\n@cli.command(\"hello\")\n@click.option('--name', default=\"World\")\ndef hello(name):\n click.echo(f\"Hello {name}!\")\n\n@i3.command(\"date\")\n@click.option(\"--format\", default=\"%Y-%m-%d (%A)\")\ndef date(format):\n click.echo(datetime.now().strftime(format))\n\n@i3.command(\"time\")\n@click.option(\"--format\", default=\"%H:%M\")\ndef time(format):\n click.echo(datetime.now().strftime(format))\n\n@i3.command(\"mem\")\ndef mem():\n percent=int(psutil.virtual_memory().percent)\n if percent > 80:\n color = COLORS.get('red')\n elif percent > 40:\n color = COLORS.get('gold')\n else:\n color = COLORS.get('green')\n click.echo(f\"MEM{percent:>3}%\")\n\n@i3.command(\"cpu\")\ndef cpu():\n percent=int(psutil.cpu_percent(interval=1))\n if percent > 80:\n color = COLORS.get('red')\n elif percent > 40:\n color = COLORS.get('gold')\n else:\n color = COLORS.get('green')\n click.echo(f\"CPU{percent:>3}%\")\n\n@i3.command(\"volume\")\ndef volume():\n master = subprocess.check_output(['amixer', 'get', 'Master'], universal_newlines=True)\n output = re.search('Mono: [A-z0-9\\s]*\\[([0-9]*)%\\].*\\[(on|off)\\]', master)\n if output:\n status = output.group(2)\n vol = int(output.group(1))\n\n headphone = subprocess.check_output(['amixer', 'get', 'Headphone'], universal_newlines=True)\n headphone_status = re.search('Front Left: [A-z,0-9,\\s]*\\[([0-9]*)%\\].*\\[([a-z]*)\\]', headphone).group(2)\n\n device = \"HEADPHONE\" if headphone_status == 'on' else \"SPEAKER\"\n if vol > 90:\n color = COLORS.get('red')\n elif vol > 50:\n color = COLORS.get('gold')\n else:\n color = COLORS.get('green')\n \n if status == 'off' or vol == 0:\n color = COLORS.get('grey')\n\n click.echo(f\"{device}{'' if status == 'off' else f'{vol:>3}%'}\")\n\n@i3.command(\"battery\")\ndef battery():\n battery = subprocess.check_output(['acpi', '-b'], universal_newlines=True)\n pattern = \"Battery 0: (?P\\w*), (?P\\d*)%, (?P\\d\\d)\\:(?P\\d\\d)\\:\\d\\d [\\w\\s]+\"\n output = re.search(pattern, battery)\n percent = int(output.group('percent'))\n state = output.group('state')\n hour = output.group('hour')\n minute = output.group('min')\n\n if percent > 80:\n color = COLORS.get('green')\n elif percent > 20:\n color = COLORS.get('gold')\n else:\n color = COLORS.get('red')\n\n if state != 'Discharging':\n color = COLORS.get('green')\n click.echo(f\"CHARGING{percent:>3}% ({hour}:{minute})\")\n else:\n click.echo(f\"BAT{percent:>3}% ({hour}:{minute})\")\n\n@i3.command(\"disk_usage\")\ndef disk_usage():\n disk = psutil.disk_usage('/')\n\n if disk.percent > 80:\n color = COLORS.get('red')\n elif disk.percent > 20:\n color = COLORS.get('gold')\n else:\n color = COLORS.get('green')\n\n free_gb = int(disk.free / (1024 ** 3))\n total_gb = int(disk.total / (1024 ** 3))\n\n click.echo(f\"DISK{int(disk.percent):>3}% ({free_gb}gb)\") \n\n@i3.command(\"keyboard_layout\")\ndef keyboard_layout():\n out = sh.setxkbmap('-query')\n layout = re.split(\"\\s+\", re.split(\"\\n\", out.strip())[-1])[-1]\n color = COLORS.get(\"green\") if layout == \"us\" else COLORS.get(\"red\")\n click.echo(f\"{layout}\")\n\n@cli.command(\"change-lockscreen\")\n@click.option(\"-r\", \"--resolution\", help=\"Resolution (3520x1080 default)\")\n@click.option(\"-d\", \"--wallpapers-dir\")\ndef change_lockscreen(resolution, wallpapers_dir):\n if not wallpapers_dir:\n wallpapers_dir = \"/home/odoo/Wallpapers\"\n if not resolution:\n resolution = \"3520x1080\"\n cmd = f\"betterlockscreen -u {wallpapers_dir} -r {resolution}\"\n subprocess.check_call(shlex.split(cmd))\n\nclass DBDoesntExistError(Exception):\n pass\n\ndef list_database():\n with psycopg2.connect(\"dbname=postgres\").cursor() as cur:\n cur.execute(\"select datname from pg_database;\")\n return [row[0] for row in cur.fetchall()]\n\ndef db_name(db):\n dbs = list_database()\n if f\"oe_support_{db}\" in dbs:\n return f\"oe_support_{db}\"\n return db\n # raise DBDoesntExistError(f\"{db}{f' and oe_support_{db}' if not db.startswith('oe_support') else ''} don't exist.\")\n\ndef _get_logins(db, limit):\n query = \"select login from res_users where active order by id\"\n if limit:\n query += f\" limit {limit}\"\n db = db_name(db)\n with psycopg2.connect(f\"dbname={db}\").cursor() as cur:\n cur.execute(f\"{query};\")\n return [row[0] for row in cur.fetchall()]\n\ndef _get_admin(db):\n admin_id_query = \"select res_id from ir_model_data where module = 'base' and name = 'user_admin';\"\n root_id_query = \"select res_id from ir_model_data where module = 'base' and name = 'user_root';\"\n login_query = \"select login from res_users where id = {}\"\n db = db_name(db)\n with psycopg2.connect(f\"dbname={db}\").cursor() as cur:\n # TODO refactor by determining the db version\n cur.execute(admin_id_query)\n admin = cur.fetchall()\n if len(admin)==1:\n cur.execute(login_query.format(admin[0][0]))\n return cur.fetchall()[0][0]\n cur.execute(root_id_query)\n root = cur.fetchall()\n cur.execute(login_query.format(root[0][0]))\n return cur.fetchall()[0][0]\n\ndef get_version(db):\n \"\"\"Return the version of the database in git compatible notation (i.e. 9.0, saas-11, etc.).\"\"\"\n query = \"select replace((regexp_matches(latest_version, '^\\d+\\.0|^saas~\\d+\\.\\d+|saas~\\d+'))[1], '~', '-') from ir_module_module where name='base'\"\n cmd = ['psql','-tAqX', '-d', '%s' % (db,), '-c', query]\n try:\n return subprocess.check_output(cmd).decode('utf-8').replace('\\n','')\n except subprocess.CalledProcessError:\n logging.info(\"Database not present on system, how about you fetch it first, hum ?\")\n sys.exit(0)\n\ndef db_exists(db):\n dbs = list_database()\n return f\"oe_support_{db}\" in dbs or db in dbs\n\n@cli.command(\"support\")\n@click.argument('db', metavar='')\n@click.option('--get-logins', is_flag=True)\n@click.option('--get-admin', is_flag=True)\n@click.option('--update', '-u', is_flag=True, help=\"Updates the base module. Useful when custom modules' states are set to 'to remove'.\")\n@click.option('--vscode', '-v', is_flag=True, help=\"Debug using vscode. Don't forget to attach the process.\")\n@click.option('--silent', '-s', is_flag=True, help=\"Do not show INFO messages in the log.\")\n@click.option('--restore', '-r', is_flag=True, help=\"Restore the initial state of the .\")\n@click.option('--dump', '-d', type=click.Path(), help=\"Restore a given downloaded [sh] database to the given .\")\n@click.option('--info', '-i', is_flag=True, help=\"Shows the metadata of the .\")\n@click.option('--copy-command', '-c', is_flag=True, help=\"Copies the command to the clipboard.\")\n@click.option('--port', '-p', type=str)\n@click.option('--fetch', is_flag=True, help=\"Fetch new database.\")\n@click.option('--shell', is_flag=True, help=\"Run shell instance on the given db.\")\n@click.option('--init', help=\"Initialize a database.\")\n@click.pass_context\ndef support(ctx, db, get_logins, get_admin, silent, restore, update, vscode, dump, info, copy_command, port, fetch, shell, init):\n if get_logins:\n show_logins(db, None)\n return\n if get_admin:\n show_admin(db)\n return\n if info:\n show_info(db) \n return\n if dump:\n load_dump(db, dump)\n if not db_exists(db) and not init:\n fetch_cmd(db)\n start(db, silent, restore, update, vscode, copy_command, port, fetch, shell, init)\n\ndef load_dump(db, dump_relative_path):\n dump_path = Path.cwd() / dump_relative_path\n cmd = shlex.split(f\"{OE_SUPPORT} restore-dump {db} {dump_path.absolute()} --no-start\")\n try:\n subprocess.check_call(cmd)\n except Exception as err:\n click.echo(str(err), err=True)\n \ndef show_info(db):\n cmd = shlex.split(f\"{OE_SUPPORT} info {db}\")\n subprocess.check_call(cmd)\n\ndef fetch_cmd(db):\n cmd = shlex.split(f\"{OE_SUPPORT} fetch {db} --no-start\")\n subprocess.check_call(cmd)\n\ndef get_python(db, version=None):\n if not version:\n version = get_version(db)\n return ENVS_DIR / str(version) / 'bin/python'\n\ndef start(db, silent, restore, update, vscode, copy_command, port, fetch, shell, init):\n if db_name(db).startswith(\"oe_support_\"):\n python = get_python(f\"{DB_PREFIX}{db}\")\n subcommand = fetch and \"fetch\" or f\"{'restore' if restore else 'start'}\"\n server_cmd = shlex.split(f\"{OE_SUPPORT} {subcommand} {db} {'--update' if update else ''} {'--vscode' if vscode else ''} {'--shell' if shell else ''} {'--debug' if silent else ''} --python {str(python)}\")\n chrome_cmd = shlex.split(f\"google-chrome http://localhost:8569/web/login?debug\")\n else:\n server_cmd = test_db_command(db, update, vscode, port, shell, init)\n chrome_cmd = shlex.split(f\"google-chrome http://localhost:{port or '8069'}/web/login\")\n if copy_command:\n pyperclip.copy(\" \".join(server_cmd))\n return\n if not init:\n get_admin_cmd = shlex.split(f\"perc support {db} --get-admin\")\n proc_list = [subprocess.Popen(cmd) for cmd in [server_cmd, get_admin_cmd] + (not shell and [chrome_cmd] or [])]\n for proc in proc_list:\n proc.wait()\n\ndef show_logins(db, limit):\n \"\"\"This command prints the logins of .\"\"\"\n try:\n logins = _get_logins(db, limit)\n click.echo(\"\\n\".join(logins))\n except DBDoesntExistError as err:\n click.echo(str(err), err=True) \n\ndef show_admin(db):\n \"\"\"This command prints the login of the admin of .\"\"\"\n try:\n admin = _get_admin(db)\n pyperclip.copy(admin)\n click.echo(f\"{admin}\")\n except DBDoesntExistError as err:\n click.echo(str(err), err=True)\n\ndef test_db_command(db, update, vscode, port, shell, init):\n if not init:\n version = get_version(db)\n else:\n version = init\n python_script = [f\"{get_python(db, version)}\"] + (vscode and \"-m ptvsd --host localhost --port 5678\".split(\" \") or [])\n odoo_script = [f\"{get_odoo_script(version)}\"] + (shell and ['shell'] or [])\n default_options = f\"--xmlrpc-port={port or '8069'} --max-cron-threads=0 --load=saas_worker,web --db-filter=^{db}$\".split(\" \")\n addons_path_option = [f\"--addons-path=/home/odoo/support/src/{version}/enterprise,/home/odoo/support/src/{version}/design-themes,/home/odoo/support/internal/default,/home/odoo/support/internal/trial,/home/odoo/support/src/{version}/odoo/addons\"]\n db_options = f\"-d {db}\".split(\" \") + (update and \"-u base\".split(\" \") or [])\n return python_script + odoo_script + addons_path_option + default_options + db_options\n\ndef get_odoo_script(version):\n serie = VERSION_MAP[version]\n odoobin = \"odoo.py\" if serie <= 9.0 else \"odoo-bin\"\n return SRC_DIR / version / \"odoo\" / odoobin","sub_path":"perc/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":12770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"476212897","text":"# import matplotlib.pyplot as plt\n\n# plt.plot([3,4,7,6,2,8,9])\n# plt.savefig('test')\n\n# import matplotlib.pyplot as plt\n# import numpy as np\n\n# t = np.arange(0.,4.,0.1)\n# plt.bar(t,t**2)\n# plt.show()\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n \n# x = np.linspace(0, 1)\n# y = np.sin(4 * np.pi * x) * np.exp(-5 * x)\n# plt.title('The graph')\n# plt.xlabel('x')\n# plt.ylabel('y')\n# plt.plot(x, y,'r--')\n# plt.show()\n\nx = np.linspace(-np.pi,np.pi,300)\n# plt.figure(1)\n# plt.subplot(211)\n# plt.plot(x,np.sin(x),'r')\n# plt.subplot(212)\n# plt.plot(x,np.cos(x),'g')\n# plt.show()\n\nfig,(ax0,ax1) = plt.subplots(2,1)\nax0.plot(x,np.sin(x),'r')\nax0.set_title('subplots1')\nplt.subplots_adjust(hspace=0.5)\nax1.plot(x,np.cos(x),'g')\nax1.set_title('subplots2')\nplt.show()\n\n","sub_path":"playwith/5.2 pyplot.py","file_name":"5.2 pyplot.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"69528026","text":"\"\"\"\r\n3. Write a Python program to get a string from a given string where all occurrences of its first\r\nchar have been changed to '$', except the first char itself.\r\nSample String : 'restart'\r\nExpected Result : 'resta$t'\r\n\"\"\"\r\nstr_1 = input(\"Enter a string: \")\r\n\r\nnew_list = list()\r\nnew_list.append(str_1[0])\r\nfor each in str_1[1:]:\r\n if each == str_1[0]:\r\n new_list.append('$')\r\n else:\r\n new_list.append(each)\r\nnew_str = \"\".join(new_list)\r\nprint(new_str)\r\n\"\"\"\r\nlist_1 = []\r\nfor each in str_1:\r\n list_1.append(each)\r\nindex = 1\r\nfor each in list_1[1:]:\r\n if each == list_1[0]:\r\n list_1[index] = '$'\r\n index += 1\r\nnew_str = ''.join(list_1)\r\nprint(new_str)\r\n\"\"\"\r\n","sub_path":"Jan 19/Assignment/String/Jan19_String_Qsn3.py","file_name":"Jan19_String_Qsn3.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"196566584","text":"from queue import Empty\nfrom ArrayStack import ArrayStack\n\ndef infix_to_postfix(string):\n stack = ArrayStack()\n tokens = string.split(\" \")\n precedence = {\"+\":1, \"-\":1, \"*\":2, \"/\":2, \"(\":0, \")\":0}\n for i in tokens:\n if i not in precedence:\n print(i,end =' ')\n elif i =='(':\n stack.push(i)\n elif i==')':\n while stack.top()!='(':\n print(stack.pop(),end = ' ')\n stack.pop()\n else:\n if stack.is_empty() or precedence[i]>precedence[stack.top()]:\n stack.push(i)\n else:\n while (not stack.is_empty()) and precedence[i]<=precedence[stack.top()]:\n print(stack.pop(),end = ' ')\n stack.push(i)\n while not stack.is_empty():\n print(stack.pop(), end=' ')\n print()\n\n\ninfix_to_postfix(\"( 3 + 2 ) / 4 + ( 3 * 2 + 4 )\") # OUTPUTS: 3 2 + 4 / 3 2 * 4 + +\ninfix_to_postfix(\"X + Y / ( 5 * Z ) + 10\") # OUTPUTS: X Y 5 Z * / + 10 +\n","sub_path":"Stack&Queue/Infix_to_postfix.py","file_name":"Infix_to_postfix.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"53926255","text":"import talib\nimport numpy\nimport math\nfrom config.cst import START_PENDING_EVAL_NOTE\n\nfrom evaluator.Util.abstract_util import AbstractUtil\n\n\nclass TrendAnalyser(AbstractUtil):\n\n # trend < 0 --> Down trend\n # trend > 0 --> Up trend\n @staticmethod\n def get_trend(data_frame, averages_to_use):\n trend = 0\n inc = round(1 / len(averages_to_use), 2)\n averages = []\n\n # Get averages\n for average_to_use in averages_to_use:\n averages.append(data_frame.tail(average_to_use).values.mean())\n\n for a in range(0, len(averages) - 1):\n if averages[a] - averages[a + 1] > 0:\n trend -= inc\n else:\n trend += inc\n\n return trend\n","sub_path":"evaluator/Util/trend_analyser.py","file_name":"trend_analyser.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"615987120","text":"from First_Project.games.cards.Card import Card\nfrom First_Project.games.cards.Player import Player\nfrom First_Project.games.cards.DeckOfCards import DeckOfCards\nfrom unittest import TestCase\n\n\nclass TestPlayer(TestCase):\n\n def setUp(self):\n self.card1 = Card('J',u'\\u2663')\n self.card2 = Card('Q', u'\\u2663')\n self.card3 = Card('K', u'\\u2663')\n self.r_card = Card('A', u'\\u2663')\n self.player1 = Player('name',3)\n self.player2 = Player('name',3)\n self.player3 = Player(\"name\",0)\n self.player4 = Player(\"name\", 26)\n self.player1.pack = [self.card1, self.card2, self.card3]\n\n def tearDown(self):\n print('Test is completed')\n\n def test_init(self):\n player5 = Player(\"name\", -4)\n player6 = Player(\"name\", 27)\n self.assertEqual(self.player2.len_pack, 3)\n self.assertEqual(self.player3.len_pack, 0)\n self.assertEqual(self.player4.len_pack, 26)\n self.assertEqual(player5.len_pack, 26)\n self.assertEqual(player6.len_pack, 26)\n with self.assertRaises(TypeError):\n player7 = Player(\"name\", \"abc\")\n\n\n def test_set_hand(self):\n deck1 = DeckOfCards()\n self.player2.set_hand(deck1)\n deck2 = DeckOfCards()\n self.player3.set_hand(deck2)\n deck3 = DeckOfCards()\n self.player4.set_hand(deck3)\n self.assertEqual(len(self.player2.pack),3)\n self.assertEqual(len(self.player3.pack),0)\n self.assertEqual(len(self.player4.pack),26)\n deck4 = DeckOfCards()\n deck4.deck = []\n player_1 = Player('name', 10)\n player_1.set_hand(deck4)\n self.assertEqual(len(player_1.pack), 0)\n\n\n\n\n def test_get_card(self):\n copy_pack = self.player1.pack.copy()\n rand_card = self.player1.get_card()\n self.assertIn(rand_card, copy_pack)\n self.assertIsInstance(rand_card, Card)\n self.assertNotIn(rand_card, self.player1.pack)\n self.assertEqual(len(self.player1.pack) + 1,len(copy_pack))\n\n\n def test_add_card(self):\n self.player1.add_card(self.r_card)\n self.assertIn(self.r_card,self.player1.pack)\n self.assertEqual(self.player1.len_pack + 1, len(self.player1.pack))\n with self.assertRaises(TypeError):\n self.player1.add_card(4)\n with self.assertRaises(TypeError):\n self.player1.add_card('Four')\n deck1 = DeckOfCards()\n with self.assertRaises(TypeError):\n self.player1.add_card(deck1)\n\n","sub_path":"games/cards/Test_Player.py","file_name":"Test_Player.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"291217914","text":"import torch.nn as nn\nimport torch\nfrom torch.autograd import Variable\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n m.weight.data.normal_(0.0, 0.02)\n if m.bias is not None:\n m.bias.data.fill_(0)\n elif classname.find('BatchNorm') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\n \nclass Encode_Vis_Att(nn.Module):\n def __init__(self, opt):\n super(Encode_Vis_Att, self).__init__()\n self.fc1 = nn.Linear(opt.resSize, opt.ngh)\n self.fc2 = nn.Linear(opt.ngh, opt.ngh)\n self.fc3 = nn.Linear(opt.ngh, opt.ngh)\n self.fc4 = nn.Linear(opt.ngh, opt.attSize)\n self.lrelu = nn.LeakyReLU(0.2, True)\n self.relu = nn.ReLU(True)\n self.apply(weights_init)\n\n def forward(self, feat): \n h = self.lrelu(self.fc1(feat))\n mix_feature = self.lrelu(self.fc2(h))\n h = self.lrelu(self.fc3(mix_feature))\n h = self.lrelu(self.fc4(h))\n return h, mix_feature\n\nclass Decode_Att_Vis(nn.Module):\n def __init__(self, opt):\n super(Decode_Att_Vis, self).__init__()\n self.fc1 = nn.Linear(opt.attSize, opt.ngh)\n self.fc2 = nn.Linear(opt.ngh, opt.ngh)\n self.fc3 = nn.Linear(opt.ngh, opt.ngh)\n self.fc4 = nn.Linear(opt.ngh, opt.resSize)\n self.lrelu = nn.LeakyReLU(0.2, True)\n self.relu = nn.ReLU(True)\n self.apply(weights_init)\n\n def forward(self, feat): \n h = self.lrelu(self.fc1(feat))\n mix_feature = self.lrelu(self.fc2(h))\n h = self.lrelu(self.fc3(mix_feature))\n h = self.lrelu(self.fc4(h))\n return h, mix_feature\n \nclass ODDetector(nn.Module):\n def __init__(self, input_dim, h_size, num_classes):\n super(ODDetector, self).__init__()\n self.relu = nn.ReLU(True)\n self.fc1 = nn.Linear(input_dim, h_size)\n self.fc2 = nn.Linear(h_size,h_size)\n self.classifier = nn.Linear(h_size, num_classes)\n \n def forward(self,x,center_loss=False):\n h = self.relu(self.fc1(x))\n h = self.relu(self.fc2(h))\n pred = self.classifier(h)\n return pred\n","sub_path":"online_action_ucf/my_model.py","file_name":"my_model.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"133676765","text":"import smtplib\n\nfrom email.mime.image import MIMEImage\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nimport sys\n\n## This is just a small wrapper around the standard python email module.\n## It defaults to being sent from me to me, but this can be altered. \n\ndef email(text=\"\", \n TO=\"\",\n FROM=\"\",\n subject=\"TEST\",\n attachment_images=[],\n smtp_server=\"\"):\n try:\n msg=MIMEMultipart()\n msg[\"To\"]=TO\n msg[\"From\"]=FROM\n msg[\"Subject\"]=subject\n txt=MIMEText(text,'plain')\n msg.attach(txt)\n for image in attachment_images:\n fp=open(image,\"rb\")\n img=MIMEImage(fp.read())\n fp.close()\n msg.attach(img)\n s = smtplib.SMTP(smtp_server)\n s.sendmail(TO, FROM, msg.as_string())\n s.quit()\n return \"Email succesfully sent\"\n except:\n return \"Error in sending email\"\n \n \n \n \nautofocus_device = self.mmc.getAutoFocusDevice\nself.mmc.waitForDevice(autofocus_device)\n\n\ntext=\"test email from microscope machine\"\nTO=\"nate@calicolabs.com\"\nFROM=\"nate@calicolabs.com\"\nsubject=\"Microscope computer as server\"\nattachment_images=[]\n\nmsg=MIMEMultipart()\nmsg[\"To\"]=TO\nmsg[\"From\"]=FROM\nmsg[\"Subject\"]=subject\ntxt=MIMEText(text,'plain')\nmsg.attach(txt)\ns = smtplib.SMTP()\ns.sendmail(TO, FROM, msg.as_string())\ns.quit()","sub_path":"NotInUse/EmailFromNTHAYER.py","file_name":"EmailFromNTHAYER.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"11857028","text":"import argparse\nimport json\n\nimport matplotlib.pyplot as plt\n\n\ndef log_play(filename1, filename2, filename3):\n \n\t# file1 - data1\n with open(filename1, 'r') as f1:\n data1 = json.load(f1)\n nb_steps1 = data1['nb_steps']\n\n # file2 - data2\n with open(filename2, 'r') as f2:\n data2 = json.load(f2)\n nb_steps2 = data2['nb_steps']\n\n # file3 - data3\n with open(filename3, 'r') as f3:\n data3 = json.load(f3)\n nb_steps3 = data3['nb_steps']\n\n # Get value keys. The x axis is shared and is the number of episodes.\n keys = sorted(list(set(data1.keys()).difference(set(['nb_steps','mean_absolute_error','mean_eps','nb_episode_steps']))))\n \n figsize = (150., 5.)\n \n\n for i in range(0,len(keys)):\n \t#plt.figure(figsize=figsize)\n \tfig, ax = plt.subplots(figsize = figsize)\n \tax.plot(nb_steps1[0:4676],data1[keys[i]][0:4676],label ='seed 111')\n \tax.plot(nb_steps2,data2[keys[i]],label = 'seed 555')\n \tax.plot(nb_steps3[0:4676],data3[keys[i]][0:4676],label = 'seed 999')\n \tax.legend()\n \tplt.xlabel('number of steps')\n \tplt.ylabel(keys[i])\n \n plt.show()\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('filename1', type=str)\nparser.add_argument('filename2', type=str)\nparser.add_argument('filename3', type=str)\nargs = parser.parse_args()\n\n# Call plotting function\nlog_play(args.filename1,args.filename2,args.filename3)\n","sub_path":"DQN/log_play.py","file_name":"log_play.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"193492914","text":"import pymysql\nimport random\n\nFILE_NAME= \"founded.sql\"\nTABLE = \"publishers\"\nNEW_COLUMN = \"founded\"\nNEW_COLUMN_TYPE=NEW_COLUMN+\" YEAR\"\n\nrandom.seed()\n\nfieldNameIndex = 0\nconn = pymysql.Connect(host=\"127.0.0.1\",user=\"atlas\",database=\"gamedb\")\ncurs = conn.cursor()\ncurs.execute(\"SELECT id FROM \"+TABLE)\npubs=[]\nfor line in curs:\n pubs.append(line[0])\nconn.close()\n\nwith open(FILE_NAME,\"w\") as sql:\n sql.write(\"ALTER TABLE \"+TABLE+\"\\n ADD \"+NEW_COLUMN_TYPE+\";\\n\")\n for pub_id in pubs:\n if (random.randint(0,1) == 1):\n rand = \"19\"+str(random.randint(0,2)+7)+str(random.randint(0,9))\n else:\n rand = \"200\"+str(random.randint(0,6))\n\n sql.write(\"UPDATE \"+TABLE+\"\\n SET \"+NEW_COLUMN+\" = \"+rand\n +\"\\n WHERE id = \"+str(pub_id)+\";\\n\")\n","sub_path":"data/generateSql.py","file_name":"generateSql.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"191691441","text":"import time\nfrom keras.models import load_model\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras.utils import to_categorical\nfrom sklearn.model_selection import train_test_split\nimport cv2 \nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport os\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dropout, Flatten, Dense, Conv2D, MaxPooling2D\nfrom importlib import reload\n\n\nclass MaskDetection:\n\n def __init__(self):\n self.with_mask = []\n self.without_mask = []\n self.y = []\n self.model_trained = False\n self.model = load_model(\"./mask_detection.h5\")\n self.results = {0: 'without mask', 1: 'with mask'}\n self.color = {0: (0, 0, 255), 1: (0, 255, 0)}\n self.rect_size = 4\n self.haarcascade = cv2.CascadeClassifier(\n './haarcascade_frontalface_default.xml')\n\n\n def read_data(self):\n # Data path\n train_dir = './Dataset/train/'\n test_dir = './Dataset/test/'\n train_with_mask = os.path.join(train_dir, 'with_mask')\n train_without_mask = os.path.join(train_dir, 'without_mask')\n img_with_mask = os.listdir(train_with_mask)\n img_without_mask = os.listdir(train_without_mask)\n\n # Read Data\n for img in img_with_mask:\n image = cv2.imread(os.path.join(train_with_mask, img))\n image = cv2.resize(image, (120, 120))\n self.with_mask.append(image)\n self.y.append(1)\n for img in img_without_mask:\n image = cv2.imread(os.path.join(train_without_mask, img))\n image = cv2.resize(image, (120, 120))\n self.without_mask.append(image)\n self.y.append(0)\n\n def preprocessing_data(self):\n #Preprocessing Data\n self.y = np.array(self.y)\n self.with_mask = np.array(self.with_mask)\n self.with_mask = self.with_mask.reshape(658, 120, 120, 3)\n self.without_mask = np.array(self.without_mask)\n self.without_mask = self.without_mask.reshape(657, 120, 120, 3)\n self.x = np.concatenate((self.with_mask, self.without_mask), axis=0)\n\n # split data\n self.x_train, self.x_test, self.y_train, self.y_test = train_test_split(\n self.x, self.y, test_size=0.2, random_state=42)\n\n # one_hot encoding\n self.y_train_scaled = to_categorical(self.y_train)\n self.y_test_scaled = to_categorical(self.y_test)\n\n # rescaling\n self.x_train = self.x_train/255\n self.x_test = self.x_test/255\n\n def create_model(self):\n #Modeling\n early_stop = EarlyStopping(monitor='val_loss',patience=2)\n self.model = Sequential()\n self.model.add(Conv2D(filters=24, kernel_size=(4, 4),\n activation='relu', input_shape=(120, 120, 3)))\n self.model.add(MaxPooling2D(pool_size=(4, 4)))\n\n self.model.add(Conv2D(filters=24, kernel_size=(4, 4),\n activation='relu', input_shape=(120, 120, 3)))\n self.model.add(MaxPooling2D(pool_size=(4, 4)))\n\n self.model.add(Flatten())\n self.model.add(Dense(128, activation='relu'))\n self.model.add(Dropout(0.5))\n\n self.model.add(Dense(2, activation='softmax'))\n self.model.compile(loss='categorical_crossentropy',\n optimizer='adam', metrics=['accuracy'])\n self.model.summary()\n self.model.fit(self.x_train, self.y_train_scaled,\n epochs=8, validation_data=(self.x_test, self.y_test_scaled),\n callbacks=[early_stop])\n self.model.save('mask_detection.h5')\n\n def evaluate_model(self):\n if(not self.model_trained):\n self.read_data()\n self.preprocessing_data()\n test_scores = self.model.evaluate(self.x_test, self.y_test_scaled, verbose=0)\n train_scores = self.model.evaluate(self.x_train, self.y_train_scaled, verbose=0)\n train_acc=train_scores[1]\n train_loss=train_scores[0]\n test_acc = test_scores[1]\n test_loss =test_scores[0]\n\n return [train_acc,train_loss,test_acc,test_loss]\n\n def train(self):\n self.read_data()\n self.preprocessing_data()\n self.create_model()\n self.model_trained= True\n\n def predict_live(self):\n cap = cv2.VideoCapture(0)\n while (cap.isOpened()):\n rval, im= cap.read()\n if rval:\n im = cv2.flip(im, 1, 1)\n gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n rerect_size = cv2.resize(\n gray, (im.shape[1] // self.rect_size, im.shape[0] // self.rect_size))\n faces = self.haarcascade.detectMultiScale(rerect_size)\n for f in faces:\n (x, y, w, h) = [v * self.rect_size for v in f]\n\n face_img = im[y:y+h, x:x+w]\n rerect_sized = cv2.resize(face_img, (120, 120))\n normalized = rerect_sized/255.0\n reshaped = np.reshape(normalized, (1, 120, 120, 3))\n reshaped = np.vstack([reshaped])\n result = self.model.predict(reshaped)\n\n label = np.argmax(result, axis=1)[0]\n\n cv2.rectangle(im, (x, y), (x+w, y+h),\n self.color[label], 2)\n cv2.rectangle(im, (x, y-40), (x+w, y),\n self.color[label], -1)\n cv2.putText(im, self.results[label], (x, y-10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)\n cv2.imshow('LIVE', im)\n if cv2.waitKey(1) & 0xFF == 27:\n break\n else:\n break\n cap.release()\n cv2.waitKey(0)\n\n cv2.destroyAllWindows()\n \n\n \n def predict_video(self,video):\n cap = cv2.VideoCapture(video)\n fps = int(cap.get(cv2.CAP_PROP_FPS))\n \n while (True):\n rval, im= cap.read()\n \n if rval:\n time.sleep(1/fps)\n if cv2.waitKey(1) & 0xFF ==27:\n break\n else:\n break\n im = cv2.flip(im, 1, 1)\n gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n\n rerect_size = cv2.resize(\n gray, (im.shape[1] // self.rect_size, im.shape[0] // self.rect_size))\n faces = self.haarcascade.detectMultiScale(rerect_size)\n for f in faces:\n (x, y, w, h) = [v * self.rect_size for v in f]\n\n face_img = im[y:y+h, x:x+w]\n rerect_sized = cv2.resize(face_img, (120, 120))\n normalized = rerect_sized/255.0\n reshaped = np.reshape(normalized, (1, 120, 120, 3))\n reshaped = np.vstack([reshaped])\n result = self.model.predict(reshaped)\n\n label = np.argmax(result, axis=1)[0]\n\n cv2.rectangle(im, (x, y), (x+w, y+h), self.color[label], 2)\n cv2.rectangle(im, (x, y-40), (x+w, y), self.color[label], -1)\n cv2.putText(im, self.results[label], (x, y-10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)\n cv2.imshow('Video', im) \n \n cap.release()\n cv2.waitKey(0)\n\n cv2.destroyAllWindows()\n \n\n\n def predict_img(self,img):\n\n img=cv2.imread(img)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = self.haarcascade.detectMultiScale(gray, 1.1 , 3)\n for (x, y, w, h) in faces:\n face_img = img[y:y+h, x:x+w]\n rerect_sized = cv2.resize(face_img, (120, 120))\n normalized = rerect_sized/255.0\n reshaped = np.reshape(normalized, (1, 120, 120, 3))\n reshaped = np.vstack([reshaped])\n result = self.model.predict(reshaped)\n\n label = np.argmax(result, axis=1)[0]\n\n cv2.rectangle(img, (x, y), (x+w, y+h), self.color[label], 2)\n cv2.rectangle(img, (x, y-40), (x+w, y), self.color[label], -1)\n\n\n cv2.putText(img, self.results[label], (x, y-10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)\n cv2.imshow('img', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n","sub_path":"Mask_Detection.py","file_name":"Mask_Detection.py","file_ext":"py","file_size_in_byte":8391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"492677782","text":"#!/usr/bin/python\n\n#! Internal Imports !#\nimport os\nimport sys\n\n'''\n\n Author: Mipsel / Sprite\n Date: 7/28/17 - July 28th, 2017.\n\n'''\n\n#! Color Codes !#\nW = '\\033[0m' # white (normal)\nR = '\\033[31m' # red\nG = '\\033[32m' # green\nO = '\\033[33m' # orange\nB = '\\033[34m' # blue\nP = '\\033[35m' # purple\nC = '\\033[1;36m' # cyan\nGR = '\\033[37m' # gray\nT = '\\033[93m' # tan\nY = '\\e[1;33m' # yellow\n\nif os.name == 'posix':\n\tcs = 'clear'\nelse:\n\tcs = 'cls'\n\nreadFile = sys.argv[1]\nwriteFile = sys.argv[1]\n\ndef get_banner():\n\tos.system(cs)\n\tprint(R+\"-\" * 41)\n\tprint(R+\"|\"+C+\" ______ __ \t\"+R+\"|\"+W)\n\tprint(R+\"|\"+C+\" / ____// /____ _ ____ ___ ___ \t\"+R+\"|\"+W)\n\tprint(R+\"|\"+C+\" / /_ / // __ `// __ `__ \\ / _ \\ \t\"+R+\"|\"+W)\n\tprint(R+\"|\"+C+\" / __/ / // /_/ // / / / / // __/ \t\"+R+\"|\"+W)\n\tprint(R+\"|\"+C+\" /_/ /_/ \\__,_//_/ /_/ /_/ \\___/ \t\"+R+\"|\"+W)\n\tprint(R+\"-\" * 41)\n\n\ndef read_file():\n\tget_banner()\n\tfileName = raw_input(\"File: \")\n\tfile = open(fileName, \"r\")\n\tfor line in file:\n\t\tprint (line)\n\n\ndef write_file():\n\tget_banner()\n\tfileName = raw_input(\"File: \")\n\twhile True:\n\t\tfile = open(fileName, \"a\")\n\t\tfileWrite = raw_input(\": \")\n\t\tfile.write(fileWrite+\"\\r\\n\")\n\t\tfile.close()\n\n\nif sys.argv[1] == 'read':\n\tread_file()\n\nif sys.argv[1] == 'write':\n\twrite_file()\n","sub_path":"File.py","file_name":"File.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"7458531","text":"import pdb\nfrom models.book import Book\nfrom models.author import Author\n\nimport repositories.book_repository as book_repository\nimport repositories.author_repository as author_repository\n\nbook_repository.delete_all()\nauthor_repository.delete_all()\n\nauthor1 = Author(\"Stephen\", \"King\")\nauthor_repository.save(author1)\n\nauthor2 = Author(\"Bret\", \"Easton-Ellis\")\nauthor_repository.save(author2)\n\nauthor_repository.select_all()\n\nbook_1 = Book(\"IT\", author1, \"Horror\", True)\nbook_repository.save(book_1)\n\nbook_2 = Book(\"Less Than Zero\", author2, \"Americana\", True)\nbook_repository.save(book_2)\n\nbook_3 = Book(\"The Shining\", author1, \"Psychological Thriller\", True)\nbook_repository.save(book_3)\n\npdb.set_trace()","sub_path":"console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"168274370","text":"# -*- coding:utf-8 -*-\nfrom unit.survey import AnonymousSurvey\n\nmy_survey = AnonymousSurvey(\"about python\")\nmy_survey.show_question()\nwhile True:\n response = input(\"language:\")\n if response == 'q':\n break\n my_survey.store_response(response)\n\nmy_survey.show_result()\n","sub_path":"unit/language_survey.py","file_name":"language_survey.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"507213993","text":"# coding=utf-8\n\nimport unittest\nfrom framework.logger import Logger\nfrom framework.browser_engine import BrowserEngine\nfrom fky_common.login import Login\nfrom fky_common.logout import Logout\nfrom fky_pageobjects.feecontrolManagement import FeecontrolManage\n\nlogger = Logger(logger=\"AuxiliaryItems\").getlog()\n\n\nclass AuxiliaryItems (unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n browser = BrowserEngine(cls)\n cls.driver = browser.open_browser(cls)\n # 登录\n login = Login()\n login.log_in(cls)\n feecontrol = FeecontrolManage(cls.driver)\n feecontrol.into_feikonggl()\n feecontrol.into_fuzhuhsx()\n\n @classmethod\n def tearDownClass(cls):\n # 登出\n logout = Logout()\n logout.log_out(cls)\n cls.driver.quit()\n\n def test1_into_auxiliary(self):\n feecontrol = FeecontrolManage(self.driver)\n title = feecontrol.get_hs_title()\n try:\n self.assertEqual(\"辅助核算项\", title, \"进入辅助核算项页面失败!\")\n logger.info(\"进入辅助核算项页面成功。\")\n except Exception as e:\n logger.error(\"执行失败!\", e)\n feecontrol.get_windows_img()\n\n def test2_save_auxiliary(self):\n feecontrol = FeecontrolManage(self.driver)\n feecontrol.click_hs_save()\n tishi = feecontrol.get_tishi()\n try:\n self.assertEqual(\"保存成功!\", tishi, \"保存失败!\")\n logger.info(\"保存成功。\")\n except Exception as e:\n logger.error(\"执行失败!\", e)\n feecontrol.get_windows_img()\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"fky_testsuits/test12_auxiliary_items.py","file_name":"test12_auxiliary_items.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"91904057","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\n\n__author__ = 'Nb'\n\nimport threading\nimport queue\n\nfrom NbHelper import NbThread\nfrom NbHelper.Experimental import NbExtension\n\n\nclass NbBackEnd(threading.Thread):\n def __init__(self, function):\n super(NbBackEnd, self).__init__()\n self.function = function\n\n def run(self):\n self.function()\n\n\nPUBLIC_QUEUE = queue.Queue()\n\n\ndef NbBackEndImplementation():\n while True:\n if not PUBLIC_QUEUE.empty():\n message = PUBLIC_QUEUE.get()\n assert isinstance(message, NbThread.ThreadMessage)\n if message.flag == NbThread.TASK:\n result = NbExtension.intercept(message)\n PUBLIC_QUEUE.put(NbThread.ThreadMessage(\n NbThread.RESULT,\n NbThread.ResultMessage(\n result,\n message.mid\n )\n ))\n elif message.flag == NbThread.HALT:\n break\n else:\n PUBLIC_QUEUE.put(message)\n\n\nNbBackEnd(NbBackEndImplementation).start()","sub_path":"Experimental/NbBackEnd.py","file_name":"NbBackEnd.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"250413529","text":"\"\"\"RentalCar URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom user import views as UserViews\nfrom home import views\nfrom rent.views import admin_send_pdf_rent_detail_to_email\n\nurlpatterns = [\n path('logout/', UserViews.logout_view, name='logout_view'),\n path('login/', UserViews.login_view, name='login_view'),\n path('signup/', UserViews.signup_view, name='signup_views'),\n\n\n path('', include('home.urls')),\n path('home/', include('home.urls')),\n path('car/', include('car.urls')),\n path('user/', include('user.urls')),\n path('rent', include('rent.urls')),\n path('contact', views.contact, name='contact'),\n path('references', views.references, name='references'),\n path('about', views.about, name='about'),\n path('admin/', admin.site.urls),\n path('ckeditor/', include('ckeditor_uploader.urls')),\n path('search/', views.search, name='search.urls'),\n path('category//', views.ListCar, name='ListCar'),\n path('admin/rent//send-pdf/', admin_send_pdf_rent_detail_to_email, name='send_pdf_to_email'),\n\n]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"RentalCar/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"211206928","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def swapPairs(self, head: ListNode) -> ListNode:\n if head is None:\n return head\n\n a, b = head, head.next\n prev = None\n head = b or a\n while a is not None and b is not None:\n c = b.next\n a.next = c\n b.next = a\n if prev is not None:\n prev.next = b\n prev = a\n if c is None:\n a, b = None, None\n else:\n a, b = c, c.next\n return head\n\n\ndef make_link_list(*vals):\n if not vals:\n return None\n start = curr = ListNode(vals[0])\n for idx in range(1, len(vals)):\n curr.next = ListNode(vals[idx])\n curr = curr.next\n return start\n\n\ndef display_link_list(node):\n from io import StringIO\n vals = []\n while node is not None:\n vals.append(node.val)\n node = node.next\n return \" -> \".join([str(val) for val in vals])\n\nif __name__ == '__main__':\n sol = Solution()\n l = make_link_list(1, 2, 3, 4)\n print(display_link_list(sol.swapPairs(l))) # 2 -> 1 -> 4 -> 3\n l = make_link_list(1, 2, 3, 4, 5)\n print(display_link_list(sol.swapPairs(l))) # 2 -> 1 -> 4 -> 3 -> 5\n l = make_link_list(1)\n print(display_link_list(sol.swapPairs(l))) # 1\n l = make_link_list(1, 2)\n print(display_link_list(sol.swapPairs(l))) # 2 -> 1\n","sub_path":"leetcode/24.Swap_Nodes_in_Pairs/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"596388952","text":"from flask import Blueprint, render_template, url_for\nfrom flask.views import View, MethodView\n\nfrom app.mod_builder.models import Role, Alliance, GameSize\n\n# Define the blueprint\nmod_admin = Blueprint('mod_admin', __name__, url_prefix='/mod_admin')\n\n\n@mod_admin.route('/')\ndef index():\n return 'Admin index'\n\n\nclass CRUDView(MethodView):\n\n list_template = 'admin/list_view.html'\n\n def __init__(self, model, endpoint, list_template=None):\n self.model = model\n self.endpoint = endpoint\n self.path = url_for('.%s' % self.endpoint)\n if list_template:\n self.list_template = list_template\n\n def get(self):\n obj = self.model.query.all()\n return render_template(self.list_template, obj=obj, path=self.path)\n\ngamesize_view = CRUDView.as_view('gamesizes', model=GameSize, endpoint='gamesizes')\nalliance_view = CRUDView.as_view('alliances', model=Alliance, endpoint='alliances')\nrole_view = CRUDView.as_view('roles', model=Role, endpoint='roles')\n\nmod_admin.add_url_rule('/gamesizes/', view_func=gamesize_view)\nmod_admin.add_url_rule('/alliances/', view_func=alliance_view)\nmod_admin.add_url_rule('/roles/', view_func=role_view)","sub_path":"app/mod_admin/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"171543850","text":"from json import load\nfrom os.path import abspath, dirname\n\n\ndef append_format(origin: str, content: dict) -> str:\n for k, v in content.items():\n origin += '{{ \"address\": \"{}\", \"backed_ring\": {} }},'.format(k, v)\n\n return origin\n\n\nroot_dir = dirname(dirname(dirname(abspath(__file__))))\n\nwith open(''.join([root_dir, '/.maintain', '/utility', '/crab_claims_list.json'])) as f:\n j = load(f)\n\nn_j = '{ \"dot\": ['\nn_j = append_format(n_j, j['dot'])\nn_j = n_j[:-1] + '],'\n\nn_j += '\"eth\": ['\nn_j = append_format(n_j, j['eth'])\nn_j = n_j[:-1] + '],'\n\nn_j += '\"tron\": ['\nn_j = append_format(n_j, j['tron'])\nn_j = n_j[:-1] + ']}'\n\nwith open(''.join([root_dir, '/service', '/res', '/crab_claims_list.json']), 'w') as f:\n f.write(n_j)\n","sub_path":".maintain/utility/format-claims-list.py","file_name":"format-claims-list.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"576554882","text":"#-*- coding:utf-8 -*-\nfrom __future__ import print_function\nimport os\nimport codecs\nimport re\nimport tensorflow\nimport numpy as np\n\n\n\nfrom keras.preprocessing.text import text_to_word_sequence\nfrom keras.preprocessing import sequence\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\n\n\n#Parameters\nembedding_size = 200\nglove_embedding_filename = 'data/glove.twitter.27B.200d.txt'\nquestion_filename = 'data/Computer/Computers&Internet.txt' #'question-simple.txt'\nans_filename = 'data/Computer/Computers&Internet_ans.txt' #'question-simple.txt'\n\nprocessed_filename = 'data/Computer/question-vec.txt'\nprocessed_ques_len = 'data/Computer/question-len.txt'\nprocessed_ansname = 'data/Computer/answer-vec.txt'\nprocessed_ans_len = 'data/Computer/answer-len.txt'\nprocessed_glove = 'data/Computer/glove-vec'\nindex_to_word = 'data/Computer/index_to_word.txt'\n\n\nques = []\nMAX_LENGTH = 0\nfile = open(question_filename,'r')\nfor line in file.readlines():\n row = 'starttrats ' + line.strip() + ' enddne'\n row_ = text_to_word_sequence(row)\n MAX_LENGTH = max(MAX_LENGTH, len(row_))\n ques.append(row)\nfile.close()\nans = []\nANS_MAX_LENGTH = 0\nfile = open(ans_filename,'r')\nfor line in file.readlines():\n row = 'starttrats ' + line.strip() + ' enddne'\n row_ = text_to_word_sequence(row)\n ANS_MAX_LENGTH = max(ANS_MAX_LENGTH, len(row_))\n ans.append(row)\nfile.close()\n\nembedding_index = {}\nfopen = codecs.open(glove_embedding_filename, 'r', 'utf-8')\ni=0\nfor eachLine in fopen.readlines():\n # First element in each line is the word\n values = eachLine.split()\n if len(values) < 2:\n print(i)\n word = values[0]\n # Word vectors\n coefs = np.asarray(values[1:], dtype='float32')\n embedding_index[word] = coefs\n i+=1\nfopen.close()\nembedding_index['starttrats'] = np.asarray(['0' for _ in range(embedding_size)], dtype='float32')\nembedding_index['enddne'] = np.asarray(['0' for _ in range(embedding_size)], dtype='float32')\n\nprint('Found %s word vectors.' % len(embedding_index))\n\ntotal_texts = []\ntotal_texts = ques + ans\ntokenizer = Tokenizer()\ntokenizer.fit_on_texts(total_texts)\nsequences_train = tokenizer.texts_to_sequences(ques)\nans_train = tokenizer.texts_to_sequences(ans)\nques_len = codecs.open(processed_ques_len,'w', 'utf-8')\nques_len_static = [0,0,0,0,0,0,0]\nfor seq in sequences_train:\n if len(seq) < 50: \n ques_len_static[0] += 1\n ques_len.write(str(len(seq)))\n ques_len.write(\" \")\n elif len(seq) < 100:\n ques_len_static[1] += 1\n ques_len.write(str(len(seq)))\n ques_len.write(\" \")\n elif len(seq) < 200:\n ques_len_static[2] += 1\n ques_len.write(\"100\")\n ques_len.write(\" \")\n elif len(seq) < 300:\n ques_len_static[3] += 1\n ques_len.write(\"100\")\n ques_len.write(\" \")\n elif len(seq) < 400:\n ques_len_static[4] += 1\n ques_len.write(\"100\")\n ques_len.write(\" \")\n elif len(seq) < 500:\n ques_len_static[5] += 1\n ques_len.write(\"100\")\n ques_len.write(\" \")\n else:\n ques_len_static[6] += 1\n ques_len.write(\"100\")\n ques_len.write(\" \")\nques_len.close()\nprint(\"ques_len_static:\\n\", ques_len_static)\n\nans_len = codecs.open(processed_ans_len,'w', 'utf-8')\nans_len_static = [0,0,0,0,0,0,0]\nfor seq in ans_train:\n if len(seq) < 50: \n ans_len_static[0] += 1\n ans_len.write(str(len(seq)))\n ans_len.write(\" \")\n elif len(seq) < 100:\n ans_len_static[1] += 1\n ans_len.write(str(len(seq)))\n ans_len.write(\" \")\n elif len(seq) < 200:\n ans_len_static[2] += 1\n ans_len.write(\"100\")\n ans_len.write(\" \")\n elif len(seq) < 300:\n ans_len_static[3] += 1\n ans_len.write(\"100\")\n ans_len.write(\" \")\n elif len(seq) < 400:\n ans_len_static[4] += 1\n ans_len.write(\"100\")\n ans_len.write(\" \")\n elif len(seq) < 500:\n ans_len_static[5] += 1\n ans_len.write(\"100\")\n ans_len.write(\" \")\n else:\n ans_len_static[6] += 1\n ans_len.write(\"100\")\n ans_len.write(\" \")\nans_len.close()\nprint(\"ans_len_static:\\n\", ans_len_static)\n\n#print(ques[0])\n#print(sequences_train[0])\n# # Auto filled with 0\n# remove MAX_LENGTH setting below to use the max length of all sentences.\nMAX_LENGTH = 100\ndata_train = pad_sequences(sequences_train, maxlen = MAX_LENGTH, padding='post', truncating='post')\nANS_MAX_LENGTH = 200\nans_train = pad_sequences(sequences_train, maxlen = ANS_MAX_LENGTH, padding='post', truncating='post')\n\n\nword_index = tokenizer.word_index\nprint('Found %s unique tokens.' % len(word_index))\n\n# Prepare embedding matrix\nnum_words = len(word_index)+1\nembedding_matrix = np.zeros((num_words, embedding_size))\nin_to_word = {}\nfor word, i in word_index.items():\n #print(word)\n embedding_vector = embedding_index.get(word)\n if embedding_vector is not None:\n # Words not found in embedding index will be all zeros\n embedding_matrix[i] = embedding_vector\n in_to_word[i] = word\n\nin_w = codecs.open(index_to_word,'w', 'utf-8')\nfor i, word in in_to_word.items():\n in_w.write(str(i) + ' ' + unicode(word, 'utf8')+'\\n')\nin_w.close()\n\nnp.save(processed_glove,embedding_matrix)\nnp.savetxt(processed_filename,data_train, fmt=\"%d\", delimiter=' ')\nnp.savetxt(processed_ansname, ans_train, fmt=\"%d\", delimiter=' ')\n\nprint(\"Processing done.\")\nprint(\"Max length: \", MAX_LENGTH)\nprint(\"Embedding shape: \", embedding_matrix.shape)\nprint(\"Data shape: \", data_train.shape)\n\n\n# #Word embedding\n# def loadGloVe(filename):\n# vocab = []\n# embd = []\n# vocab.append('unk') #装载不认识的词\n# embd.append([0]*embedding_size) #这个emb_size可能需要指定\n# file = codecs.open(filename, 'r', 'utf-8')\n# for line in file.readlines():\n# row = line.strip().split(' ')\n# vocab.append(row[0])\n# embd.append(row[1:])\n# print('GloVe loaded.')\n# file.close()\n# return vocab,embd\n\n\n# vocab,embd = loadGloVe(glove_embedding_filename)\n# embedding_size = len(embd[0])\n# #Add start & end & unknown & pad token\n# PAD_TOKEN = 0\n# vocab.insert(0, '')\n# embd.insert(0, ['0' for _ in range(embedding_size)])\n# START_TOKEN = len(vocab)\n# vocab.append('')\n# embd.append(['0' for _ in range(embedding_size)])\n# END_TOKEN = len(vocab)\n# vocab.append('')\n# embd.append(['0' for _ in range(embedding_size)])\n# UKNOWN_TOKEN = len(vocab)\n# vocab.append('')\n# embd.append(['0' for _ in range(embedding_size)])\n# src_vocab_size = len(vocab)\n\n# #vocab to int\n# vocab_to_int = {}\n# for i in range(src_vocab_size):\n# vocab_to_int[vocab[i]] = i\n\n# print('Glove vector loaded. Total vocab: ', src_vocab_size, '. embedding_size: ', embedding_size)\n\n# ques = []\n# MAX_LENGTH = 0\n# file = codecs.open(question_filename,'r', 'utf-8')\n# for line in file.readlines():\n# row = line.strip()\n# row = text_to_word_sequence(row)\n# MAX_LENGTH = max(MAX_LENGTH, len(row))\n# ques.append(row)\n# file.close()\n\n# #to int & reconstruct embedding\n# re-embed = []\n# re-vocab = []\n# for q in ques:\n# for word in q:\n \n# embedding = np.asarray(embd)\n\n# tokenizer = Tokenizer()\n# tokenizer.fit_on_texts(ques)\n\n# sequences_ques = tokenizer.texts_to_sequences(ques)\n# word_index = tokenizer.word_index\n# print('Found %s unique tokens.' % len(word_index))\n# sequences_ques = pad_sequences(sequences_ques)\n# print('questions shape: ', sequences_ques.shape)\n\n# # Prepare embedding matrix\n# num_words = len(word_index) + 4\n# embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))\n# for word, i in word_index.items():\n# if i == 0:\n# print('impossible!!!')\n# if word in vocab:\n# embedding_vector = embedding[vocab.index(word)]\n# embedding_matrix[i] = embedding_vector\n","sub_path":"process_questions.py","file_name":"process_questions.py","file_ext":"py","file_size_in_byte":7842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"32469592","text":"#!/usr/bin/python\n# -*- coding: GBK -*- \n\nfrom os import getcwd, walk\nfrom os.path import join\nfrom re import match\n\nfrom xlrd import open_workbook\nfrom xlwt import Workbook\n\n\nclass PayReader(object):\n\tdef __init__(self, dirpath, fields):\n\t\tself.dirpath = dirpath\n\t\tself.fields = fields\n\t\tself.headers = [\"No\", \"Month\", \"Depart\", \"Name\"] + list(self.fields)\n\n\tdef __cell(self, sheet, row, col):\n\t\tval = sheet.cell(row, col).value\n\t\treturn val.encode(\"GBK\") if type(val) == unicode else val\n\n\tdef __books(self):\n\t\tfor dirpath, _, filenames in walk(self.dirpath):\n\t\t\tfor filename in filenames:\n\t\t\t\tmatched = match(\"(\\d{4})\\.(\\d{2})\\.xlsx?\", filename)\n\t\t\t\tif matched:\n\t\t\t\t\tyield open_workbook(join(dirpath, filename))\n\n\tdef __iter__(self):\n\t\tfor book in self.__books():\n\t\t\tfor sheet in book.sheets():\n\t\t\t\tidxs = {}\n\t\t\t\tfor col in range(sheet.ncols):\n\t\t\t\t\tkey = self.__cell(sheet, 3, col)\n\t\t\t\t\tif key in self.fields:\n\t\t\t\t\t\tidxs[key] = col\n\n\t\t\t\tfor row in range(4, sheet.nrows):\n\t\t\t\t\torder = self.__cell(sheet, row, 0)\n\t\t\t\t\tif match(\"\\d+\", str(order)):\n\t\t\t\t\t\tvals = [order]\n\t\t\t\t\t\tvals.append(self.__cell(sheet, row, 1))\n\t\t\t\t\t\tvals.append(self.__cell(sheet, row, 2))\n\t\t\t\t\t\tvals.append(self.__cell(sheet, row, 3))\n\t\t\t\t\t\tfor field in self.fields:\n\t\t\t\t\t\t\tif field in idxs:\n\t\t\t\t\t\t\t\tvals.append(self.__cell(sheet, row, idxs[field]))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tvals.append(\"#NA\")\n\t\t\t\t\t\tyield vals\n\n\tdef export(self, filename):\n\t\tbook = Workbook(encoding=\"GBK\")\n\t\tsheet = book.add_sheet(\"Payroll\")\n\t\trowIdx = 0\n\t\tcolIdx = 0\n\t\tfor header in reader.headers:\n\t\t\tsheet.write(rowIdx, colIdx, header)\n\t\t\tcolIdx += 1\n\t\trowIdx += 1\n\t\tfor recored in self:\n\t\t\tcolIdx = 0\n\t\t\tfor field in recored:\n\t\t\t\tsheet.write(rowIdx, colIdx, field)\n\t\t\t\tcolIdx += 1\n\t\t\trowIdx += 1\n\t\tbook.save(join(self.dirpath, filename))\n\nif __name__ == \"__main__\":\n\treader = PayReader(getcwd(), (\"Amount1\", \"Amount2\"))\n\treader.export(\"Payroll.xls\")\n","sub_path":"scripts/python/terminal/payroller.py","file_name":"payroller.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"349378861","text":"#Creates a GUI and plots stock graph + 10-Q functions\n#First searches the NYSE and then NASDAQ for the ticker\n\nfrom datapackage import Package\nfrom Tkinter import *\nfrom math import *\n\nfrom alpha_vantage.timeseries import TimeSeries\n\nimport matplotlib\nimport numpy as np\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom matplotlib.figure import Figure\nimport matplotlib.pyplot\nfrom matplotlib.ticker import MaxNLocator\n\n\nimport get_10_Q_K\n\n\npackageNYSE = Package('https://datahub.io/core/nyse-other-listings/datapackage.json')\npackageNASDAQ = Package('https://datahub.io/core/nasdaq-listings/datapackage.json')\n\n#Main window\nclass stockClass:\n def __init__(self, window):\n self.window = window\n self.box = Entry()\n self.button = Button(self.window, text=\"Search\", command=self.plotGraph)\n self.box.grid(row=0,column=1, sticky=\"NW\")\n self.button.grid(row=0,column=2, sticky=\"NW\")\n self.outx = np.array([0])\n \n self.comp_name = \"\"\n self.buttonEdgar = Button(self.window, text=\"get a report\", command = lambda : get_10_Q_K.get_10_Q_K(self.comp_name))\n self.buttonEdgar.grid(row=4, column=4, sticky=\"NE\")\n \n \n self.title = Label(self.window, text=\"Company ticker\", relief=FLAT).grid(row=0,column=0, sticky=\"NW\")\n \n #Finds the company through Alpha Vantage\n def findComp(self, name): \n for resource in packageNYSE.resources:\n #if resource.descriptor['datahub']['type'] == 'derived/csv':\n if resource.tabular:\n listNYSE = resource.read()\n\n for resource in packageNASDAQ.resources:\n #if resource.descriptor['datahub']['type'] == 'derived/csv':\n if resource.tabular:\n listNASDAQ = resource.read()\n\n\n found = False\n company = \"\"\n for companyNames in listNYSE:\n if companyNames[0] == name:\n found = True\n company = companyNames[0] + \", \" + companyNames[1]\n\n if found == False:\n for companyNames in listNASDAQ:\n if companyNames[0] == name:\n found = True\n company = companyNames[0] + \", \" + companyNames[1]\n if found == False:\n return 'Could not find the company'\n \n return company\n\n \n def plotGraph(self):\n \n self.comp_name = self.box.get().upper()\n \n info_label = self.findComp(self.comp_name) \n \n #Get your own API key\n ts = TimeSeries(key='Z4GYC8KFAQZCUZ51', output_format='pandas')\n data, meta_data = ts.get_weekly_adjusted(symbol=self.comp_name)\n price_values = np.array(data['4. close'])\n time_values = np.array(list(data.index))\n \n #Short and long moving average filters to detect momentum\n short_mva = (data['4. close']).rolling(window=20).mean()\n short_mva = np.array(short_mva)\n \n long_mva = (data['4. close']).rolling(window=50).mean()\n long_mva = np.array(long_mva)\n\n\n #Graph plotting\n time_ratio = 2.1 #This magic number is for time range. I should use ['date'] instead\n fig = Figure(figsize=(10,6))\n main_window = fig.add_subplot(111)\n main_window.plot(time_values[int(len(time_values)/time_ratio):len(time_values)],price_values[int(len(price_values)/time_ratio):len(price_values)],color='red')\n main_window.plot(time_values[int(len(time_values)/time_ratio):len(time_values)],short_mva[int(len(short_mva)/time_ratio):len(short_mva)],color='blue')\n main_window.plot(time_values[int(len(time_values)/time_ratio):len(time_values)],long_mva[int(len(long_mva)/time_ratio):len(long_mva)], color = 'green')\n\n main_window.set_title (info_label, fontsize=16)\n main_window.set_ylabel(\"Price\", fontsize=14)\n main_window.set_xlabel(\"Date\", fontsize=14)\n main_window.xaxis.set_major_locator(MaxNLocator(6))\n\n canvas = FigureCanvasTkAgg(fig, master=self.window)\n canvas.get_tk_widget().grid(row=3,column=0, rowspan=4, columnspan=3)\n canvas.draw() \n \n #Some additional info\n Label(self.window, text=\"Current Price:\", relief=GROOVE).grid(row=3, column=3, sticky=\"NW\")\n Label(self.window, text=str(price_values[len(price_values)-1]) + '$', relief=GROOVE).grid(row=3, column=4, sticky='NE')\n","sub_path":"terminal.py","file_name":"terminal.py","file_ext":"py","file_size_in_byte":4426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"61479258","text":"from appium import webdriver\nfrom appium.webdriver.common.mobileby import MobileBy\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\n\nuserName = \"igoryaremchuk2\"\naccessKey = \"2f4HcvKqq8UsvEkE7z7z\"\ndesired_cap = {\n 'device': 'iPhone 8 Plus',\n 'os_version': '11'\n}\ndesired_cap['app'] = \"bs://444bd0308813ae0dc236f8cd461c02d3afa7901d\"\n\ndriver = webdriver.Remote(\"http://\" + userName + \":\" + accessKey + \"@hub-cloud.browserstack.com/wd/hub\", desired_cap)\n\ntext_button = WebDriverWait(driver, 30).until(\n EC.element_to_be_clickable((MobileBy.ACCESSIBILITY_ID, \"Text Button\"))\n)\ntext_button.click()\n\ntext_input = WebDriverWait(driver, 30).until(\n EC.element_to_be_clickable((MobileBy.ACCESSIBILITY_ID, \"Text Input\"))\n)\ntext_input.send_keys(\"hello@browserstack.com\" + \"\\n\")\n\ntime.sleep(5)\n\ntext_output = WebDriverWait(driver, 30).until(\n EC.element_to_be_clickable((MobileBy.ACCESSIBILITY_ID, \"Text Output\"))\n)\n\nif text_output!=None and text_output.text==\"hello@browserstack.com\":\n print (\"Test Passed\")\nelse:\n print (\"Test Failed\")\n\ndriver.quit()","sub_path":"test_IOS_Emulator.py","file_name":"test_IOS_Emulator.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"487858801","text":"import random\r\nimport time\r\n\r\nprint(\"\"\"***********\r\nsayi tahmin oyunu\r\n1 ile 40 arasinda bir sayi tahmin edin.\r\n************\r\n\"\"\")\r\n\r\nrastgele_sayi=random.randint(1,40)\r\ntahminsayisi=7\r\nwhile True:\r\n tahmin=int(input(\"Bir tahminde bulunun :\"))\r\n\r\n if (tahminrastgele_sayi):\r\n print(\"lutfen asagi inin..\")\r\n print(\"Kalan tahmin hakkiniz:\", tahminsayisi, \"`dir.\")\r\n tahminsayisi-=1\r\n\r\n elif(tahmin==rastgele_sayi):\r\n print(\"Tebrikler bildiniz...\")\r\n break\r\n if (tahminsayisi == 0):\r\n print(\"Tahmin hakkiniz kalmamistir,,\")\r\n break","sub_path":"sayi tahmin oyunu.py","file_name":"sayi tahmin oyunu.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"94972960","text":"#p125\nfrom statistics import mean, variance\nfrom math import sqrt\n\ndataset = [2, 4, 5, 6, 1, 8]\n\n# (1) 산술평균\ndef Avg(data):\n avg = mean(data)\n return avg\n\nprint('산술평균=', Avg(dataset))\n\n# (2) 분산/표준편차\ndef var_sd(date):\n avg = Avg(date)\n #list 내포\n diff = [(d-avg)**2 for d in date]\n\n var = sum(diff)/(len(data) - 1)\n sd = sqrt(var)\n\n return var, sd\n\n# (3) 함수 호출\nv, s = var_sd(dataset)\nprint('분산=', v)\nprint('표준편차=', s)\n\n#p127\n#피타고라스 정리\ndef pytha(s,t):\n a = s**2 - t**2\n b = 2 * s * t\n c = s**2+t**2\n print(\"3변의 길이:\", a, b, c)\n\npytha(2,1)\n\n#p127\n#단계1: 동전 앞면과 뒷면의 난수 확률분포 함수 정의\ndef coin(n):\n result = []\n for i in range(n):\n r = rnadom. randint(0, 1)\n if (r == 1):\n result.append(1)\n else:\n result.append(0)\n return result\n print(coin(10))\n\n# 단계 2 :몬테카를로 시뮬레이션 함수 정의\ndef montacoin(n):\n cnt = 0\n for i in range(n):\n cnt += coin(1)[0]\n result = cnt/n\n\n# 단계 3 :몬테카를로 시뮬레이션 함수 호출\n print(montacoin(10))\n print(montacoin(30))\n print(montacoin(100))\n print(montacoin(1000))\n print(montacoin(10000))\n\n\n\n","sub_path":"ch05/chapter05.lecture.step02_func_app.py","file_name":"chapter05.lecture.step02_func_app.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"118958169","text":"#!/usr/bin/env python\n# By Litrin J \n# http://www.litrin.net/\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\nimport time, redis, hashlib, random\nfrom tornado.options import define, options\n\ndefine(\"port\", default=8888, help=\"run on the given port\", type=int)\n\nclass LongPolling(tornado.web.RequestHandler):\n\n minWaitTime = 15\n maxWaitTime = 900\n\n RedisHost = \"172.16.10.132\"\n RedisPrefix = \"Q/MSG%s\"\n\n SignSalt = \"salt\"\n\n StopCode = \"{'stop':1}\"\n\n @tornado.web.asynchronous\n def get(self):\n sUid = self.get_argument(\"uid\", None)\n sSign = self.get_argument(\"sign\", None)\n sJsonCall = self.get_argument(\"jsonCallback\", None)\n\n if (sUid is None or sSign is None \n or self.checkSign(sUid, sSign) == False):\n raise tornado.web.HTTPError(404)\n self.clear()\n\n else:\n self.doLongPolling(callback=self.onWaitting)\n\n def checkSign(self, sUid, sSign):\n return True\n\n def doLongPolling(self, callback):\n # Check if the client close the connection\n if self.request.connection.stream.closed():\n self.clear()\n\n sKey = self.RedisPrefix % self.uid\n res = redis.Redis(self.RedisHost)\n sMessage = res.rpop(sKey)\n del res # close the redis at 1st time\n\n callback(sMessage)\n\n def onWaitting(self, sMessage):\n if (sMessage is not None):\n self.onRespones(sMessage)\n else:\n iNextPollingTime = time.time() + self.minWaitTime\n if self.minWaitTime < self.maxWaitTime:\n self.minWaitTime *= 2\n # Can't useing time.sleep for no-bloking mode\n tornado.ioloop.IOLoop.instance().add_timeout(\n iNextPollingTime ,\n lambda: self.doLongPolling(callback=self.onWaitting)\n )\n else:\n self.onRespones(self.StopCode)\n\n def onRespones(self, sMessage):\n sJsonCall = self.get_argument(\"jsonCallback\", None)\n if sJsonCall is not None:\n sMessage = sJsonCall + \"( \" + sMessage.decode('utf-8') + \")\"\n\n self.write(sMessage)\n self.finish()\n\ndef main():\n tornado.options.parse_command_line()\n application = tornado.web.Application([\n (r\"/\", LongPolling),\n ])\n http_server = tornado.httpserver.HTTPServer(application)\n http_server.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"137686640","text":"import requests\nimport json\n\ncontroller = '10.0.192.123'\n\ndef post_tokens():\n url = 'http://%s:5000/v3/auth/tokens' % controller\n headers = {'Content-Type': 'application/json'}\n data = {\n \"auth\": {\n \"scope\": {\n \"project\": {\n \"domain\": {\n \"name\": \"default\"\n },\n \"name\": \"admin\"\n }\n },\n \"identity\": {\n \"password\": {\n \"user\": {\n \"domain\": {\n \"name\": \"default\"\n },\n \"password\": \"12345\",\n \"name\": \"admin\"\n }\n },\n \"methods\": [\"password\"]\n }\n }\n }\n r = requests.post(url, data=json.dumps(data), headers=headers)\n print('post tokens request status_token %s' % r.status_code)\n return r\n\ndef get_url_from_catalogs(catalogs, catalog_type, interface):\n for catalog in catalogs:\n if catalog['type'] == catalog_type:\n endpoints = catalog['endpoints']\n for endpoint in endpoints:\n if endpoint['interface'] == interface:\n return endpoint['url']\n raise AttributeError('%s %s' % (catalog_type, interface))\n\ndef server_list(compute_url, token):\n url = '%s/servers' % compute_url\n headers = {\n 'User-Agent': 'python-novaclient',\n 'Accept': 'application/json',\n 'X-Auth-Token': token\n }\n r = requests.get(url, headers=headers)\n print(r.status_code)\n return r\n\nif __name__ == '__main__':\n r = post_tokens()\n token = r.headers['X-Subject-Token']\n catalogs = r.json()['token']['catalog']\n compute_url = get_url_from_catalogs(catalogs, 'compute', 'public')\n r = server_list(compute_url, token)\n servers = r.json()['servers']\n for server in servers:\n print(server)\n","sub_path":"python/openstack/api/nova/server_list.py","file_name":"server_list.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"64492031","text":"import numpy as np\nimport pandas as pd\n\nfrom ram.utils.time_funcs import convert_date_array\n\n\ndef import_sql_output(path):\n data = pd.read_csv(path, header=0, skiprows=[1],\n sep='|', skipinitialspace=True)\n # Strip white space from columns\n data.columns = [x.strip() for x in data.columns]\n # Replace all commas\n data.replace(',', '', regex=True, inplace=True)\n # Strip whitespace from columns that are still objects\n obj_cols = data.columns[np.where(data.dtypes == object)[0]].values\n # Replace remaining NULL values\n for oc in obj_cols:\n data[oc] = data[oc].str.strip()\n data[oc] = data[oc].replace('NULL', np.nan)\n\n # Convert date columns\n def _strip_date(date):\n try:\n return date[:10]\n except:\n return date\n\n date_cols = [x for x in data.columns if x.find('Date') >= 0]\n for dc in date_cols:\n data[dc] = data[dc].apply(_strip_date)\n data[dc] = convert_date_array(data[dc])\n return data\n","sub_path":"ram/utils/read_write.py","file_name":"read_write.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"433000429","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.template import loader\nfrom django.http import JsonResponse\n\nfrom datetime import datetime, timedelta\n\nfrom academy.apps.accounts.models import Instructor\nfrom academy.apps.accounts.models import User\nfrom academy.apps.offices.models import LogoPartner,LogoSponsor\nfrom academy.apps.students.models import Student\nfrom academy.apps.graduates.models import Graduate\n\nfrom .forms import CertificateVerifyForm\n\ndef index(request):\n context = {\n 'title': 'Home',\n 'instructors': Instructor.objects.order_by('order'),\n 'pendaftar': User.objects.registered().count(),\n 'pengguna': User.objects.actived().count(),\n 'peserta': Student.objects.participants().count(),\n 'lulus': Student.objects.graduated().count(),\n 'logo_partners': LogoPartner.objects.filter(is_visible=True).order_by('display_order'),\n 'logo_sponsors': LogoSponsor.objects.filter(is_visible=True).order_by('display_order')\n }\n return render(request, 'website/home.html', context)\n\n\ndef faq(request):\n context = {\n 'title': 'Tilil (Q&A)'\n }\n return render(request, 'website/faq.html', context)\n\n\ndef certificate_verify(request):\n form = CertificateVerifyForm(request.POST or None)\n result = None\n valid_date = None\n\n if form.is_valid():\n student = form.verification()\n if student:\n result = student\n valid_date = student.created + timedelta(days=1095)\n else:\n result = \"\"\n\n context = {\n 'title': 'Verifikasi Sertifikat',\n 'form': form,\n 'result': result,\n 'valid_date': valid_date\n }\n\n if request.is_ajax():\n html = loader.render_to_string('website/result-verify.html', context)\n return JsonResponse({'html': html})\n return render(request, 'website/cert-verify.html', context)\n\n\ndef home(request):\n context = {\n 'title': 'Home 2'\n }\n return render(request, 'website/home2.html', context)\n\n\ndef error_404(request):\n return render(request, '404.html', {})\n\n\ndef error_500(request):\n return render(request, '500.html', {})","sub_path":"academy/website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"57804000","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef rata2(x, col=None):\n sumRow = []\n for j in range(len(x[0])):\n sumCol = 0\n for k in range(len(x)):\n sumCol += x[k][j]\n sumCol = sumCol / len(x)\n sumRow.append(sumCol)\n if col is None:\n result = 0\n for i in sumRow:\n result += i\n elif col is not None and type(col) is int:\n result = sumRow[col]\n\n return result\n\n\ndef varian(x):\n sigma = 0\n for i in range(len(x)):\n sigma += (x[i][0]-rata2(x, 0)) ** 2\n result = sigma / (len(x) - 1)\n return result\n\n\ndef cov(x):\n sigma = 0\n for i in range(len(x)):\n perkalian = 1\n for j in range(len(x[0])):\n perkalian *= (x[j][i])\n sigma += perkalian\n result = sigma / (len(x) - 1)\n return result\n\n\ndef w1(x):\n return cov(x) / varian(x)\n\n\ndef w0(x, y):\n return rata2(y) - (w1(x, y) * rata2(x))\n\n\n# data = np.random.rand(3, 2) * 10\n\ndata = np.array([[1, 1],\n [2, 3],\n [4, 3],\n [3, 2],\n [5, 5]])\nx = [1, 2, 4, 3, 5]\ny = [1, 3, 3, 2, 5]\nplt.plot(x, y, 'ro')\nplt.axis([0, 6, 0, 6])\nplt.show()\n","sub_path":"linierRegresion(scikit).py","file_name":"linierRegresion(scikit).py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"75557310","text":"\r\nimport xbmc,xbmcaddon,xbmcgui,xbmcplugin,os,time\r\n\r\ndef check(url):\r\n\r\n\tREPO_FOLDER,INFO = url.split('|SPLIT|')\r\n\t\r\n\tif not os.path.exists(REPO_FOLDER):\r\n\t\tf = open(INFO,mode='r'); msg = f.read(); f.close()\r\n\t\tTextBoxes(\"%s\" % msg)\r\n\t\tquit()\r\n\telse: return\r\n\t\r\ndef TextBoxes(announce):\r\n\tclass TextBox():\r\n\t\tWINDOW=10147\r\n\t\tCONTROL_LABEL=1\r\n\t\tCONTROL_TEXTBOX=5\r\n\t\tdef __init__(self,*args,**kwargs):\r\n\t\t\txbmc.executebuiltin(\"ActivateWindow(%d)\" % (self.WINDOW, )) # activate the text viewer window\r\n\t\t\tself.win=xbmcgui.Window(self.WINDOW) # get window\r\n\t\t\txbmc.sleep(500) # give window time to initialize\r\n\t\t\tself.setControls()\r\n\t\tdef setControls(self):\r\n\t\t\tself.win.getControl(self.CONTROL_LABEL).setLabel('[COLOR snow]Sports[/COLOR]') # set heading\r\n\t\t\ttry: f=open(announce); text=f.read()\r\n\t\t\texcept: text=announce\r\n\t\t\tself.win.getControl(self.CONTROL_TEXTBOX).setText(str(text))\r\n\t\t\treturn\r\n\tTextBox()\r\n\twhile xbmc.getCondVisibility('Window.IsVisible(10147)'):\r\n\t\ttime.sleep(.5)\r\n\r\n","sub_path":"script.snsports/resources/lib/modules/checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"253662637","text":"import urllib,re,requests\nimport sys\nimport util\nimport json\nimport os\nfrom bs4 import BeautifulSoup\n\nroutes = ['https://www.msn.com/en-ca/video/superbowl',\n'https://www.msn.com/en-ca/video/animals',\n'https://www.msn.com/en-ca/video/diyandrecipes',\n'https://www.msn.com/en-ca/video/autos',\n'https://www.msn.com/en-ca/video']\nurlCollection = []\n# If failed hit for 3 times, break\nMaxFailedHit = 5\n\nheaders = {\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',\n 'Cookie':'MUID=0DA4D4BD498D6CFB3AA9D9BE4D8D6820; _SS=SID=00; videoCookiesLastCategory=en-ca=animals; _cb_ls=1; _cb=DsAPZCJmzJ0BiCB2c; _chartbeat2=.1550504320201.1550509083431.11.uAsG96ZMUeDgWcawC2JWWNCmna0Z.1; ANON=A=E43533FD3C93526D33F4F5C4FFFFFFFF&E=164d&W=1; NAP=V=1.9&E=15f3&C=gBm5NGQ6hq9_2JLke_9M_uUX-nhzUVJp3UwliRTchLXC4pE05Iv2DA&W=1; vidvol=10; adoptout={\"msaOptOut\":0,\"adIdOptOut\":0}; videoerrorcount=0; trg=0%7C0%7C0; ecasession=v2_9a22cfec7b49fc3893239e7b074a63fd_ba74fcd1-eff2-48d8-b6af-082423d58358-tuct35eea8e_1550666026_1550666984_CNawjgYQqLw-GP6e37rd9J7zBiACKAYwMDjK_QdA_qAQSI7OHlCJxAlYAGAC'\n}\n\n# Parse video info by url\ndef parseVedioInfoByMSNUrl(url):\n r = requests.get(url, headers = headers)\n res = requests.get(url)\n if res.status_code == 200:\n res.encoding = 'utf8'\n soup = BeautifulSoup(res.text, 'html.parser')\n lists = soup.select('.dplayer-video-wrap video')\n videoInfo = None\n if len(lists) > 0:\n try:\n videoInfo = json.loads(lists[0].text)\n except:\n util.log(\"failed found:\" + lists[0].text)\n if len(nextVideoList) > 0 and videoInfo is not None:\n videoInfo[\"nextUrl\"] = nextVideoList[0][\"href\"]\n return videoInfo\n\n#check and download video\ndef downloadVideo(url, path):\n if (not os.path.exists(path)):\n urllib.request.urlretrieve(url, path)\n\n#check file existed or not, create file\ndef checkAndCreateDescFile(path, title, desc):\n if (not os.path.exists(path)):\n fo = open(path, \"a\")\n fo.write(desc)\n fo.close()\n\n\n# check folder existed or not, if not create it\ndef checkAndCreateFolder(folderName):\n if (not os.path.exists(folderName)):\n os.makedirs(folderName)\n\n# handle video by videoinfo\ndef handleVideoInfo(videoInfo):\n try:\n # Create video folder\n videoDir = \"H:\\\\自媒体\\\\2019-02\"\n # + videoInfo['uploadDate'][:10]\n checkAndCreateFolder(videoDir) \n # Translate name and description\n name = util.translate(videoInfo['name'])\n desc = util.translate(videoInfo['description'])\n url = videoInfo['contentUrl']\n duration = videoInfo['duration']\n videoPath = videoDir + \"\\\\\"+name+\".mp4\"\n descPath = videoDir + \"\\\\\" + name + \".txt\"\n #download video\n downloadVideo(url, videoPath)\n # write desc\n checkAndCreateDescFile(descPath, name, desc)\n except:\n util.log(json.dumps(videoInfo))\n\ndef changeRoute(routeNum):\n if routeNum < len(routes):\n return routes[routeNum]\n\nif __name__ == \"__main__\":\n failedHit = 0\n initurl = \"https://www.msn.com/en-us/video/animals/the-most-adorable-hedgehogs/vi-BBTR6KW\"\n urlCollection.append(initurl)\n while failedHit < MaxFailedHit:\n try:\n videoInfo = parseVedioInfoByMSNUrl(initurl)\n handleVideoInfo(videoInfo)\n initurl = videoInfo[\"nextUrl\"]\n if (initurl in urlCollection):\n initurl = changeRoute(failedHit)\n failedHit = failedHit + 1\n else:\n urlCollection.append(initurl)\n except:\n initurl = changeRoute(failedHit)\n failedHit = failedHit + 1\n","sub_path":"Python/Spider/downloadCKTV.py","file_name":"downloadCKTV.py","file_ext":"py","file_size_in_byte":3797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"457665747","text":"#Project Euler Problem 1\n#If we list all the natural numbers below 10 that are multiples of 3 or 5, \n#we get 3, 5, 6 and 9. The sum of these multiples is 23.\n#\n#Find the sum of all the multiples of 3 or 5 below 1000.\n\n\ndef main(max_number=1000):\n total = 0\n \n for number in range(max_number):\n if number%3==0 or number%5==0:\n total += number\n \n print(total)\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n","sub_path":"euler/euler01.py","file_name":"euler01.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"176192038","text":"import json\n\ndef query_article(path='./jawiki-country.json', query='イギリス'):\n with open(path, 'r', encoding='utf-8') as src:\n data = [json.loads(line) for line in src]\n for item in data:\n if query in item['title']:\n yield item\n\nif __name__ == '__main__':\n results = query_article()\n for i, article in enumerate(results):\n for line in article['text'].split('\\n'):\n print(line)\n","sub_path":"tosho/chapter03/knock20.py","file_name":"knock20.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"418166470","text":"\n\n#calss header\nclass _GRUB():\n\tdef __init__(self,): \n\t\tself.name = \"GRUB\"\n\t\tself.definitions = [u'to search for something by digging or turning over earth: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_grub.py","file_name":"_grub.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"464754806","text":"class Solution:\n def largestPerimeter(self, A):\n A.sort()\n for i in range(len(A) - 3, -1, -1):\n if A[i] + A[i + 1] > A[i + 2]:\n return A[i] + A[i + 1] + A[i + 2]\n return 0\n\n\nif __name__ == '__main__':\n A = [3, 6, 2, 3]\n solution = Solution()\n result = solution.largestPerimeter(A=A)\n print(result)\n","sub_path":"Easy/976. Largest Perimeter Triangle/976. Largest Perimeter Triangle.py","file_name":"976. Largest Perimeter Triangle.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"363433718","text":"import qrcode, datetime, requests\n\ndef create_qr_code(data):\n generator = qrcode.QRCode(\n version = 1,\n error_correction = qrcode.constants.ERROR_CORRECT_M,\n box_size = 10,\n border=4,\n )\n\n generator.add_data(data)\n generator.make()\n\n return generator.make_image()\n\n \n","sub_path":"server/services/qr_service.py","file_name":"qr_service.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"216899153","text":"# coding: utf-8\n# 2018.08.28\n\nimport requests\nimport json\nimport uuid\n\n\ndef getUserInformation():\n accessToken = \"\"\n roomUuid = \"\"\n\n with open('data/userInfo.txt') as f:\n # ファイルからテキストを取得して改行コードでlistに格納\n data = f.read()\n data = data.split(\"\\n\")\n accessToken = data[0]\n roomUuid = data[1]\n\n return accessToken, roomUuid\n\n\nif __name__ == \"__main__\":\n getUserInformation()\n","sub_path":"getUserInformation.py","file_name":"getUserInformation.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"352925315","text":"name = \"ege\"\n\nskins = [\n (\"avadefault\", 'AVA padrão'),\n (\"avaalternative\", 'AVA alternativo'),\n (\"egedefault\", 'EGE padrão'),\n (\"egealternative\", 'EGE alternativo'),\n (\"highcontrast\", 'Alto contraste'),\n (\"dark\", 'Dark'),\n (\"contrast\", 'Contraste'),\n (\"golden\", 'Dourado'),\n (\"purple\", 'Púrpura'),\n (\"navy\", 'Marinha'),\n (\"coral\", 'Coral'),\n]\n","sub_path":"ege_theme/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"564520908","text":"from sklearn import linear_model, decomposition\nimport sys\nimport logging\nimport numpy as np\nfrom sklearn.pipeline import Pipeline\nroot = '/projects/francisco/repositories/NI-ML/'\nsys.path.insert(0, root)\n\n# Load repo-specific imports:\nfrom adni_utils.experiment import experiment\nfrom adni_utils.dataset_constants import *\nfrom adni_utils.evaluate_model import evaluate\n\n\ndef pca_lr(params, n_classes):\n C = np.exp(params['log_C'])\n n_components = params['n_components']\n mclass = 'multinomial' if n_classes > 2 else 'ovr'\n solver = 'lbfgs' if n_classes > 2 else 'liblinear'\n\n logistic = linear_model.LogisticRegression(C=C, multi_class=mclass, solver=solver, penalty='l2')\n\n pca = decomposition.RandomizedPCA(n_components=n_components)\n pca_lr_classifier = Pipeline(steps=[('pca', pca), ('logistic', logistic)])\n return pca_lr_classifier, 'PCA Logistic Regression'\n\n\ndef main(job_id, params):\n \"\"\"\n Main hook for Spearmint.\n :param job_id:\n :param params:\n :return:\n \"\"\"\n score = experiment(job_id=job_id, params=params, classifier_fn=pca_lr, n=default_n_trials, test=False, **dataset_args[default_dataset])\n\n return score\n\n\nif __name__ == \"__main__\":\n # Entry point when running the script manually. Not run by Spearmint.\n held_out_test = True\n job_id = 0\n params = {\n 'n_components': 64,\n 'log_C': 0.5,\n }\n evaluate(params=params, classifier_fn=pca_lr, n=default_n_trials, test=False, model_metrics=None, **dataset_args[default_dataset])\n\n","sub_path":"models/LR/lr.py","file_name":"lr.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"350708518","text":"import copy\nimport time\nimport random\nimport pickle\nimport argparse\nimport numpy as np\nimport pandas as pd\nfrom pprint import pprint\nimport utils\nimport scipy.sparse as sp\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', type=str, default=\"CiteULike\", help='Dataset to use.')\nparser.add_argument('--data_dir', type=str, default=\"./data/process/\", help='Director of the dataset.')\nparser.add_argument('--seed', type=int, default=42, help='Random seed')\nparser.add_argument('--cold_object', type=str, default='item')\nargs = parser.parse_args()\npprint(vars(args))\n\nrandom.seed(args.seed)\nnp.random.seed(args.seed)\nt0 = time.time()\n\n\"\"\"read data from file\"\"\"\ndf_val = pd.read_csv(args.data_dir + args.dataset + f'/cold_{args.cold_object}_val.csv', dtype=np.int)\ndf_test = pd.read_csv(args.data_dir + args.dataset + f'/cold_{args.cold_object}_test.csv', dtype=np.int)\ndf_pos = pd.concat([df_val, df_test])\ninfo_dict = pickle.load(open(args.data_dir + args.dataset + '/info.pkl', 'rb'))\nuser_num = info_dict['user_num']\nitem_num = info_dict['item_num']\n\n\n\"\"\"Generate users' neighboring items.\"\"\"\nval_nb = utils.df_get_neighbors(df_val)\ntest_nb = utils.df_get_neighbors(df_test)\npos_nb = utils.df_get_neighbors(df_pos)\n\nval_nb_reverse = utils.df_get_neighbors(df_val, 'item')\ntest_nb_reverse = utils.df_get_neighbors(df_test, 'item')\npos_nb_reverse = utils.df_get_neighbors(df_pos, 'item')\n\n\n\"\"\"Save results\"\"\"\npara_dict = {}\npara_dict['user_num'] = info_dict['user_num']\npara_dict['item_num'] = info_dict['item_num']\npara_dict['val_nb'] = val_nb\npara_dict['test_nb'] = test_nb\npara_dict['pos_nb'] = pos_nb\npara_dict['val_nb_reverse'] = val_nb_reverse\npara_dict['test_nb_reverse'] = test_nb_reverse\npara_dict['pos_nb_reverse'] = pos_nb_reverse\n\n\npickle.dump(para_dict, open(args.data_dir + args.dataset + f'/cold_{args.cold_object}_dict.pkl', 'wb'), protocol=4)\nprint('Process %s in %.2f s' % (args.dataset, time.time() - t0))\n","sub_path":"convert_cold.py","file_name":"convert_cold.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"339069822","text":"import codecs\nfrom time import time\nimport multiprocessing as mp\n\nletters = {}\nnumberLetters = 0\nspecialCharacters =['à', 'â', 'é', 'è', 'ê', '\\r', '\\n', 'î', 'û', 'ô', 'ç', 'ï', 'ë', 'ü', 'ä', 'ö', 'ù', '-']\n\nnumberCores = mp.cpu_count()\npool = mp.Pool(mp.cpu_count())\njobs = []\n\nfilePath = r\"C:\\Users\\ext_bantoine\\Downloads\\words.txt\"\nencoding = \"utf-8\"\n\ndef process(line):\n for letter in line:\n if(letter not in specialCharacters):\n if(letter.lower() not in letters):\n letters.update({letter.lower():1})\n else:\n letters[letter.lower()] += 1\n numberLetters += 1\n\ndef process_wrapper(lineByte):\n with codecs.open(filePath, encoding=encoding) as file:\n file.seek(lineByte)\n line = file.readline()\n process(line)\n \ndef sortDict(dicto):\n \n def findMaxDict(dicto):\n keyMax, valueMax = list(dicto.items())[0]\n for key, value in dicto.items():\n if(value > valueMax):\n valueMax = value\n keyMax = key\n return (keyMax,valueMax)\n \n sortedDict = []\n for i in range(len(dicto)):\n letter, frequency = findMaxDict(dicto)\n sortedDict.append((letter, frequency))\n del dicto[letter]\n return sortedDict\n\ntic = time()\n#create jobs\nwith codecs.open(filePath, encoding=encoding) as file:\n nextLineByte = file.tell()\n print('Ligne : ' + str(nextLineByte))\n for line in file:\n jobs.append( pool.apply_async(process_wrapper,(nextLineByte)) )\n nextLineByte = file.tell()\n\n#wait for all jobs to finish\nfor job in jobs:\n job.get()\n\n#clean up\npool.close()\ntac = time()\n\nfor key in letters:\n letters[key] /= numberLetters\n letters[key] *= 100\n\nprint(\"Temps écoulé : \" + str(round((tac - tic)*1000, 2)) + \"ms\")\nletters = sortDict(letters)\nfor letter, frequency in letters:\n print(letter + \" : \" + str(round(frequency, 2)))\n","sub_path":"Linguistic analysis/linguistic_analysis_mp.py","file_name":"linguistic_analysis_mp.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"74104355","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom matplotlib import pyplot as plt\r\n\r\nx = list(range(1,6))\r\ny = list(range(10,51,10))\r\n\r\nplt.title('bar chart example')\r\nplt.xlabel('x label')\r\nplt.ylabel('y label')\r\n\r\n# BAR 형태의 차트 생성 방법\r\n# bar, barh 함수를 사용\r\n# bar(가로방향), barh(세로방향)\r\n# bar(x데이터, y데이터)\r\n\r\n# 참고사이트\r\n# https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.bar\r\n\r\nplt.bar(x, y)\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"day_17/matplotlib_13.py","file_name":"matplotlib_13.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"32923659","text":"import numpy as np\nfrom irlc.ex08.small_gridworld import GridworldEnv\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\nfrom gym.envs.toy_text.discrete import DiscreteEnv\n\n\nclass MDPBuilder(DiscreteEnv):\n states = []\n\n def __init__(self):\n nS = len(self.states)\n nA = len( {a for s in self.states for a in self.A(s)} )\n\n P = {}\n for s in self.states:\n ps_ = {}\n for a in self.A(s):\n ps_[a] = [(p, sp, r, False) for (sp,r),p in self.Psr(s,a).items() ]\n P[s] = ps_\n\n isd = np.ones(nS) / nS # initial state distribution\n super().__init__(nS, nA, P, isd)\n\n def A(self, s):\n raise NotImplementedError\n\n def Psr(self, s, a):\n '''\n Represents P(sp,r | s, a)\n\n We will repesent this as a dict. In other words:\n\n P(sp,r | s, a) = { (sp1,r1): p1, (sp2,r2): p2, ...}\n\n where the probabilities sum to 1 and (sp,r) is a tuple of observed next states and rewards.\n '''\n raise NotImplementedError\n\nclass GamblerE(MDPBuilder):\n def __init__(self, goal, p_heads):\n self.states = range(100)\n self.goal = goal\n self.p_heads = p_heads\n super().__init__()\n\n def A(self, s):\n return range(0, min(s, self.goal - s) + 1)\n\n def Psr(self, s, a):\n r = 1 if s + a == 100 and s < 100 else 0\n return {(s + a, r): self.p_heads, (s - a, 0): 1 - self.p_heads}\n\nclass RentalEnv(MDPBuilder):\n def __init__(self, goal, p_heads):\n self.states = range(100)\n self.max_cars = 20\n self.max_move = 5\n self.MOVE_COST = -2\n self.ADDITIONAL_PARK_COST = -4\n\n self.RENT_REWARD = 10\n # expectation for rental requests in first location\n self.RENTAL_REQUEST_FIRST_LOC = 3\n # expectation for rental requests in second location\n self.RENTAL_REQUEST_SECOND_LOC = 4\n # expectation for # of cars returned in first location\n self.RETURNS_FIRST_LOC = 3\n # expectation for # of cars returned in second location\n self.RETURNS_SECOND_LOC = 2\n import itertools\n k = np.arange(self.max_cars + 1)\n # cartesian product\n self.states = ((i, j) for i, j in itertools.product(k, k))\n super().__init__()\n\n def A(self, s):\n return range(0, min(s, self.goal - s) + 1)\n\n def Psr(self, s, a):\n r = 1 if s + a == 100 and s < 100 else 0\n return {(s + a, r): self.p_heads, (s - a, 0): 1 - self.p_heads}\n\n\ndef value_iteration(env, theta=0.0001, gamma=1.0):\n # V = defaultdict(float)\n V = defaultdict(float)\n Qs = {}\n while True:\n delta = 0\n #!b\n for s in env.P.keys():\n v = V[s]\n # Update the value function \\cite[Eq. (4.10)]{sutton}\n Qs[s] = {a: sum([p * (r + gamma * V[sp]) for p, sp, r, _ in Psa]) for a, Psa in env.P[s].items()}\n V[s] = max(Qs[s].values())\n delta = max(delta, np.abs(v - V[s]))\n #!b\n print(delta)\n if delta < theta:\n break\n\n policy = {}\n #!b\n for s in range(env.nS):\n Q_ = {a: q - a*theta/10 for a,q in Qs[s].items()}\n policy[s] = max(Q_, key=Q_.get)\n #!b\n return policy, V\n\nif __name__ == \"__main__\":\n \"\"\"\n CAR RENTAL problem\n \"\"\"\n env = RentalEnv()\n\n pi, V = value_iteration(env, gamma=.9, theta=1e-1)\n\n a = 234\n \"\"\"\n GAMBLER problem\n \"\"\"\n env = GamblerE(goal=100, p_heads=0.4)\n pi,V = value_iteration(env, gamma=1)\n\n pi = [pi[s] for s in env.states]\n V = [V[s] for s in env.states]\n\n plt.plot(env.states, V)\n plt.xlabel('Capital')\n plt.ylabel('Value Estimates')\n plt.title('Final Policy (action stake) vs State (Capital)')\n plt.show()\n\n y = [pi[s] for s in env.states]\n x = env.states\n # plotting the bars\n plt.bar(x, y, align='center', alpha=0.5)\n plt.xlabel('Capital')\n plt.ylabel('Final policy (stake)')\n plt.title('Capital vs Final Policy')\n plt.show()\n\n\n\n\n\n\n","sub_path":"irlc/ex08/old/mdp2.py","file_name":"mdp2.py","file_ext":"py","file_size_in_byte":4039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"332639313","text":"import sys\nsys.path.insert(0,'./utils/')\n\nimport albumentations as A\nimport numpy as np\nfrom affine_transforms import *\n\ndef add_gaussian_noise(x, sigma):\n x += np.random.randn(*x.shape) * sigma\n x = np.clip(x, 0., 1.)\n return x\n\n\ndef _evaluate_ratio(ratio):\n if ratio <= 0.:\n return False\n return np.random.uniform() < ratio\n\n\ndef apply_aug(aug, image):\n return aug(image=image)['image']\n\n\nclass Transform:\n def __init__(self, affine=True, crop=True, size=(64, 64),\n normalize=True, train=True, threshold=40.,\n sigma=-1., blur_ratio=0., noise_ratio=0., cutout_ratio=0.,\n grid_distortion_ratio=0., elastic_distortion_ratio=0., random_brightness_ratio=0.,\n piece_affine_ratio=0., ssr_ratio=0.):\n self.affine = affine\n self.crop = crop\n self.size = size\n self.normalize = normalize\n self.train = train\n self.threshold = threshold / 255.\n self.sigma = sigma / 255.\n\n self.blur_ratio = blur_ratio\n self.noise_ratio = noise_ratio\n self.cutout_ratio = cutout_ratio\n self.grid_distortion_ratio = grid_distortion_ratio\n self.elastic_distortion_ratio = elastic_distortion_ratio\n self.random_brightness_ratio = random_brightness_ratio\n self.piece_affine_ratio = piece_affine_ratio\n self.ssr_ratio = ssr_ratio\n\n def __call__(self, example):\n if self.train:\n x, y = example\n else:\n x = example\n # --- Augmentation ---\n if self.affine:\n x = affine_image(x)\n\n # --- Train/Test common preprocessing ---\n if self.crop:\n x = crop_char_image(x, threshold=self.threshold)\n if self.size is not None:\n x = resize(x, size=self.size)\n if self.sigma > 0.:\n x = add_gaussian_noise(x, sigma=self.sigma)\n\n # albumentations...\n x = x.astype(np.float32)\n assert x.ndim == 2\n # 1. blur\n if _evaluate_ratio(self.blur_ratio):\n r = np.random.uniform()\n if r < 0.25:\n x = apply_aug(A.Blur(p=1.0), x)\n elif r < 0.5:\n x = apply_aug(A.MedianBlur(blur_limit=5, p=1.0), x)\n elif r < 0.75:\n x = apply_aug(A.GaussianBlur(p=1.0), x)\n else:\n x = apply_aug(A.MotionBlur(p=1.0), x)\n\n if _evaluate_ratio(self.noise_ratio):\n r = np.random.uniform()\n if r < 0.50:\n x = apply_aug(A.GaussNoise(var_limit=5. / 255., p=1.0), x)\n else:\n x = apply_aug(A.MultiplicativeNoise(p=1.0), x)\n\n if _evaluate_ratio(self.cutout_ratio):\n # A.Cutout(num_holes=2, max_h_size=2, max_w_size=2, p=1.0) # Deprecated...\n x = apply_aug(A.CoarseDropout(max_holes=8, max_height=8, max_width=8, p=1.0), x)\n\n if _evaluate_ratio(self.grid_distortion_ratio):\n x = apply_aug(A.GridDistortion(p=1.0), x)\n\n if _evaluate_ratio(self.elastic_distortion_ratio):\n x = apply_aug(A.ElasticTransform(\n sigma=50, alpha=1, alpha_affine=10, p=1.0), x)\n\n if _evaluate_ratio(self.random_brightness_ratio):\n # A.RandomBrightness(p=1.0) # Deprecated...\n # A.RandomContrast(p=1.0) # Deprecated...\n x = apply_aug(A.RandomBrightnessContrast(p=1.0), x)\n\n if _evaluate_ratio(self.piece_affine_ratio):\n x = apply_aug(A.IAAPiecewiseAffine(p=1.0), x)\n\n if _evaluate_ratio(self.ssr_ratio):\n x = apply_aug(A.ShiftScaleRotate(\n shift_limit=0.0625,\n scale_limit=0.1,\n rotate_limit=30,\n p=1.0), x)\n\n if self.normalize:\n x = (x.astype(np.float32) - 0.0692) / 0.2051\n if x.ndim == 2:\n x = x[None, :, :]\n x = x.astype(np.float32)\n if self.train:\n y = y.astype(np.int64)\n return x, y\n else:\n return x\n","sub_path":"Bengali AI handwritten recognition/SResNext/utils/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":4041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"159715839","text":"# Credit https://www.learnopencv.com/barcode-and-qr-code-scanner-using-zbar-and-opencv/\nimport pyzbar.pyzbar as pyzbar\nimport numpy as np\nimport cv2\nimport struct\n\n\ndef decode_qr(im):\n return pyzbar.decode(im)\n\n\n# Display barcode and QR code location\ndef display_qr(im, decoded_objects):\n # Loop over all decoded objects\n for decoded_object in decoded_objects:\n points = decoded_object.polygon\n\n # If the points do not form a quad, find convex hull\n if len(points) > 4:\n hull = cv2.convexHull(np.array([point for point in points], dtype=np.float32))\n hull = list(map(tuple, np.squeeze(hull)))\n else:\n hull = points\n\n # Number of points in the convex hull\n n = len(hull)\n\n # Draw the convex hull\n for j in range(0, n):\n cv2.line(im, hull[j], hull[(j + 1) % n], (255, 0, 0), 3)\n\n for obj in decoded_objects:\n qr_txt = obj.data\n #print 'QR code: ', qr_txt\n\n\ndef distance_analyzer(rx, ry, cx, cy):\n dx = cx-rx\n dy = cy-ry\n cord = [dx, dy]\n return cord\n\n\ndef float_to_bits(f):\n s = struct.pack('>f', f)\n return struct.unpack('>l', s)[0]\n\n\ndef get_rect_points(polygon):\n tr = br = max(polygon)\n polygon.remove(tr)\n point = max(polygon)\n if point.y > br.y:\n br = point\n else:\n tr = point\n polygon.remove(point)\n tl = bl = polygon[0]\n polygon.remove(tl)\n point = polygon[0]\n if point.y > bl.y:\n bl = point\n else:\n tl = point\n return [tr, br, bl, tl]\n\n\ndef get_rect_padding(tr, br, bl, tl, width, height):\n min_x = min(tl.x, bl.x)\n max_x = max(tr.x, br.x)\n min_y = min(tl.y, tr.y)\n max_y = min(bl.y, br.y)\n\n padding_top = min_y\n padding_bottom = height - max_y\n padding_left = min_x\n padding_right = width - max_x\n\n return [padding_top, padding_right, padding_bottom, padding_left]\n","sub_path":"src/modules/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"293615739","text":"class mumu:\n store=list()\n\ndef fibBa(n: int,p:int,temp:int):\n if(n == 0):\n return 0\n else:\n n = n - 1\n mumu.store.append(str(p))\n old=p\n p=p+temp\n temp=old\n return fibBa(n,p,temp)\nif __name__ == '__main__':\n t = int(input())\n temp=dict()\n for i in range(t):\n fibBa(int(input()),1,0)\n temp[str(i)]=' '.join(mumu.store)\n mumu.store.clear\n for i in temp:\n print(temp[str(i)])\n","sub_path":"Progress/Python/PPo/PPo.py","file_name":"PPo.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"320388450","text":"#!/usr/bin/env python3.5\n\n\nfrom datetime import datetime\nimport time\nimport os\nimport cnn_db_loader\n\nimport tf_utils\nimport cnn_tf_graphs\n\nimport tensorflow as tf\nfrom tensorflow.contrib import learn\n\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string('experiment_folder', '21',\n\t\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"Directory where to write event logs \"\"\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"and checkpoint.\"\"\")\ntf.app.flags.DEFINE_integer('max_steps', 1000000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"Number of batches to run.\"\"\")\ntf.app.flags.DEFINE_boolean('log_device_placement', False,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"Whether to log device placement.\"\"\")\ntf.app.flags.DEFINE_integer('batch_size', 100,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"Size of a batch.\"\"\")\n\ncnn_db_loader.NUMBER_ALPHAS = 0\ncnn_db_loader.NUMBER_IMAGES = 1\ncnn_db_loader.NUMBER_XYZ = 0\n\n\nos.environ['CUDA_VISIBLE_DEVICES']='0'\n\nMOMENTUM = 0.9\nLEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.\nINITIAL_LEARNING_RATE = 0.001 # Initial learning rate.\n\n\nPaSC_still_BASE = '/user/HS204/m09113/my_project_folder/PaSC/still/multi_fit_CCR_iter75_reg30_256/'\n#PaSC_still_BASE = '/user/HS204/m09113/my_project_folder/PaSC/still/face_boxes/'\nPaSC_video_BASE = '/user/HS204/m09113/my_project_folder/PaSC/video/multi_fit_CCR_iter75_reg30_256/'\n#PaSC_video_BASE = '/user/HS204/m09113/my_project_folder/PaSC/video/face_boxes/'\nCASIA_BASE = '/user/HS204/m09113/my_project_folder/CASIA_webface/multi_fit_CCR_iter75_reg30_256/'\n#CASIA_BASE = '/user/HS204/m09113/my_project_folder/CASIA_webface/face_boxes/'\nExperint_BASE = '/user/HS204/m09113/my_project_folder/cnn_experiments/'\nexperiment_dir = Experint_BASE+FLAGS.experiment_folder\ndb_dir = experiment_dir+'/db_input/'\ntrain_dir = experiment_dir+'/train'\n\ntf.logging.set_verbosity(tf.logging.DEBUG)\n\ndef train():\n\t\"\"\"Train CIFAR-10 for a number of steps.\"\"\"\n\twith tf.Graph().as_default():\n\t\tglobal_step = tf.contrib.framework.get_or_create_global_step()\n\n\t\t#cnn_db_loader.IMAGE_FILE_ENDING = '/*'\n\t\tcnn_db_loader.IMAGE_FILE_ENDING = '/*isomap.png'\n\t\tpasc_still = cnn_db_loader.PaSC_still_loader(outputfolder=db_dir, db_base=PaSC_still_BASE)\n\t\t#pasc_still.analyse_isomaps()\n\t\t#pasc_still.remove_bad_isomaps()\n\t\tpasc_video = cnn_db_loader.PaSC_video_loader(outputfolder=db_dir, db_base=PaSC_video_BASE)\n\t\t#pasc_video.analyse_isomaps()\n\t\t#pasc_video.remove_bad_isomaps()\n\t\t\n\t\t#cnn_db_loader.IMAGE_FILE_ENDING = '/*.jpg'\t\t\n\t\tcasia = cnn_db_loader.CASIA_webface_loader(outputfolder=db_dir, db_base=CASIA_BASE)\n\t\t#casia.analyse_isomaps()\n\t\t#casia.remove_bad_isomaps()\n\n\t\tcnn_db_loader.IMAGE_FILE_ENDING = '/*'\n\t\tpasc_still_merges = cnn_db_loader.PaSC_still_loader(outputfolder=experiment_dir+'/db_input_merges/', db_base='/user/HS204/m09113/my_project_folder/PaSC/still/random_merges_256_conf13/')\n\t\tpasc_video_merges = cnn_db_loader.PaSC_video_loader(outputfolder=experiment_dir+'/db_input_merges/', db_base='/user/HS204/m09113/my_project_folder/PaSC/video/random_merges_256_conf13/')\n\t\tcasia_merges = cnn_db_loader.CASIA_webface_loader(outputfolder=experiment_dir+'/db_input_merges/', db_base='/user/HS204/m09113/my_project_folder/CASIA_webface/random_merges_256_conf13/')\n\n\t\tpasc_still_merges.set_all_as_train()\n\t\tpasc_video_merges.split_train_eval(train_proportion=0.8)\n\t\tcasia_merges.set_all_as_train()\n\n\n\t\tpasc_still.set_all_as_train()\n\t\tcasia.set_all_as_train()\n\t\tpasc_video.split_train_eval(train_proportion=0.8)\n\t\t#db_loader = cnn_db_loader.Aggregator(pasc_video, pasc_still, casia)\n\t\tdb_loader = cnn_db_loader.Aggregator(pasc_still, pasc_still_merges, pasc_video, pasc_video_merges, casia, casia_merges)\n\t\t#db_loader = cnn_db_loader.Aggregator(casia)\n\t\t#db_loader.make_sure_nothings_empty()\n\n\t\tnum_batches_per_epoch = len(db_loader.examples_train) / FLAGS.batch_size\n\n\t\tif cnn_db_loader.NUMBER_ALPHAS > 0 and cnn_db_loader.NUMBER_XYZ == 0:\n\t\t\timage_list, alphas_list, labels_list = db_loader.get_training_image_alphas_and_label_lists()\n\n\t\t\timages, alphas, labels = tf_utils.inputs_with_alphas(image_list, alphas_list, labels_list, FLAGS.batch_size, db_loader.get_mean_image_path())\n\n\t\t\t# Build a Graph that computes the logits predictions from the inference model.\n\t\t\tlogits, _ = cnn_tf_graphs.inference(network=\"alex_with_alpha\", mode=learn.ModeKeys.TRAIN, batch_size=FLAGS.batch_size, num_classes=db_loader.number_ids, input_image_tensor=images, input_alpha_tensor=alphas)\t\t\t\n\n\t\telif cnn_db_loader.NUMBER_ALPHAS == 0 and cnn_db_loader.NUMBER_IMAGES == 1 and cnn_db_loader.NUMBER_XYZ == 0:\n\t\t\timage_list, labels_list = db_loader.get_training_image_and_label_lists()\n\n#\t\t\timage_name_tensor = tf.placeholder(tf.string)\n#\t\t\timage_contents = tf.read_file(image_name_tensor)\n#\t\t\timage = tf.image.decode_image(image_contents, channels=3)\n#\t\t\tinit_op = tf.initialize_all_tables()\n#\t\t\twith tf.Session() as sess:\n#\t\t\t\tsess.run(init_op)\n#\t\t\t\tfor image_name in image_list[210000:]:\n#\t\t\t\t\tprint (image_name)\n#\t\t\t\t\ttmp = sess.run(image, feed_dict={image_name_tensor: image_name})\n\n\n\t\t\timages, labels = tf_utils.inputs(image_list, labels_list, FLAGS.batch_size, db_loader.get_mean_image_path(), image_size=256)\n\n\t\t\t# Build a Graph that computes the logits predictions from the inference model.\n\t\t\tlogits, _ = cnn_tf_graphs.inference(network=\"alex\", mode=learn.ModeKeys.TRAIN, batch_size=FLAGS.batch_size, num_classes=db_loader.number_ids, input_image_tensor=images, image_size=256)\n\n\t\telif cnn_db_loader.NUMBER_ALPHAS == 0 and cnn_db_loader.NUMBER_IMAGES == 0 and cnn_db_loader.NUMBER_XYZ == 1:\n\t\t\timage_list, labels_list = db_loader.get_training_xyz_and_label_lists()\n\t\t\n\t\t\timages, labels = tf_utils.inputs(image_list, labels_list, FLAGS.batch_size, db_loader.get_mean_xyz_path())\n\n\t\t\t# Build a Graph that computes the logits predictions from the inference model.\n\t\t\tlogits, _ = cnn_tf_graphs.inference(network=\"alex\", mode=learn.ModeKeys.TRAIN, batch_size=FLAGS.batch_size, num_classes=db_loader.number_ids, input_image_tensor=images)\n\n\t\telif cnn_db_loader.NUMBER_ALPHAS == 0 and cnn_db_loader.NUMBER_IMAGES == 1 and cnn_db_loader.NUMBER_XYZ == 1:\n\t\t\timage_list, xyz_list, labels_list = db_loader.get_training_image_xyz_and_label_lists()\n\n\t\t\tisomap_stacks, labels = tf_utils.inputs_stack_image_and_xyz(image_list, xyz_list, labels_list, FLAGS.batch_size, db_loader.get_mean_image_path(), db_loader.get_mean_xyz_path())\n\n\t\t\t# Build a Graph that computes the logits predictions from the inference model.\n\t\t\tlogits, _ = cnn_tf_graphs.inference(network=\"alex\", mode=learn.ModeKeys.TRAIN, batch_size=FLAGS.batch_size, num_classes=db_loader.number_ids, input_image_tensor=isomap_stacks)\n\n\n\t\t#exit(0)\n\t\t# Calculate loss.\n\t\t#loss = cnn_tf_graphs.l2_loss(logits, labels)\n\t\tloss = cnn_tf_graphs.softmax_loss(logits, labels, db_loader.number_ids)\n\n\t\ttop_k_op = tf.nn.in_top_k(logits, labels, 1)\n\t\tsum_correct = tf.reduce_sum(tf.cast(top_k_op, tf.float32))\n\t\taccuracy = tf.divide(tf.multiply(sum_correct,tf.constant(100.0)),tf.constant(float(FLAGS.batch_size)))\n\t\t#accuracy, accuracy_update = tf.contrib.metrics.streaming_accuracy(tf.argmax(logits,1), tf.argmax(labels, 1))\n\n\t\tlr = tf.constant(INITIAL_LEARNING_RATE, tf.float32)\n\t\ttf.summary.scalar('learning_rate', lr)\n\t\ttf.summary.scalar('momentum', MOMENTUM)\n\t\ttf.summary.scalar('batch_size', FLAGS.batch_size)\n\t\ttf.summary.scalar('accuracy', accuracy)\n\n\t\toptimizer=tf.train.MomentumOptimizer(learning_rate=lr, momentum=MOMENTUM)\n\t\t#optimizer=tf.train.AdadeltaOptimizer(learning_rate=lr)\n\n\t\ttrain_op = tf.contrib.layers.optimize_loss(\n\t\t\t\t\tloss=loss,\n\t\t\t\t\tglobal_step=tf.contrib.framework.get_global_step(),\n\t\t\t\t\tlearning_rate=lr,\n\t\t\t\t\toptimizer=optimizer)\n\t\n\n\t\tlogging_hook = tf.train.LoggingTensorHook(\n\t\t\t\t\t\ttensors={'step': tf.contrib.framework.get_global_step(),\n\t\t\t\t\t\t\t\t 'loss': loss,\n\t\t\t\t\t\t\t\t 'lr': lr,\n\t\t\t\t\t\t\t\t 'acc': accuracy},\n\t\t\t\t\t\tevery_n_iter=100)\n\n\t\t#saver = tf.train.Saver(var_list=None, keep_checkpoint_every_n_hours=1)\n\t\tsaver = tf.train.Saver(var_list=None, max_to_keep=None)\n\n\t\tclass _LearningRateSetterHook(tf.train.SessionRunHook):\n\t\t\t\"\"\"Sets learning_rate based on global step.\"\"\"\n\n\t\t\tdef begin(self):\n\t\t\t\tself._lrn_rate = INITIAL_LEARNING_RATE * LEARNING_RATE_DECAY_FACTOR**6\n\t\t\t\t#print(self.num_batches_per_epoch)\n\t\n\t\t\tdef before_run(self, run_context):\n\t\t\t\treturn tf.train.SessionRunArgs(\n\t\t\t\t\ttf.contrib.framework.get_global_step(), # Asks for global step value.\n\t\t\t\t\tfeed_dict={lr: self._lrn_rate}) # Sets learning rate\n\t\n\t\t\tdef after_run(self, run_context, run_values):\n\t\t\t\ttrain_step = run_values.results\n\t\t\t\tself._lrn_rate = INITIAL_LEARNING_RATE\n\t\t\t\t#training_epoch = int(train_step/num_batches_per_epoch)\n\t\t\t\t#self._lrn_rate = INITIAL_LEARNING_RATE * LEARNING_RATE_DECAY_FACTOR**int(train_step/num_batches_per_epoch/2.7)\n\t\t\t\tif train_step < 2*num_batches_per_epoch:\n\t\t\t\t\tself._lrn_rate = INITIAL_LEARNING_RATE\n\t\t\t\telif train_step < 4*num_batches_per_epoch:\n\t\t\t\t\tself._lrn_rate = INITIAL_LEARNING_RATE * LEARNING_RATE_DECAY_FACTOR**1\n\t\t\t\telif train_step < 6*num_batches_per_epoch:\n\t\t\t\t\tself._lrn_rate = INITIAL_LEARNING_RATE * LEARNING_RATE_DECAY_FACTOR**2\n\t\t\t\telif train_step < 9*num_batches_per_epoch:\n\t\t\t\t\tself._lrn_rate = INITIAL_LEARNING_RATE * LEARNING_RATE_DECAY_FACTOR**3\n\t\t\t\telif train_step < 12*num_batches_per_epoch:\n\t\t\t\t\tself._lrn_rate = INITIAL_LEARNING_RATE * LEARNING_RATE_DECAY_FACTOR**4\n\t\t\t\telse:\n\t\t\t\t\tself._lrn_rate = INITIAL_LEARNING_RATE * LEARNING_RATE_DECAY_FACTOR**5\n\n\t\t\t\t\t\t\t\n\n\t\tconfig = tf.ConfigProto( allow_soft_placement=False, log_device_placement=FLAGS.log_device_placement)\n\t\tconfig.gpu_options.allow_growth = True\n\t\n\t\twith tf.train.MonitoredTrainingSession(\n\t\t\t\tcheckpoint_dir=train_dir,\n\t\t\t\thooks=[ tf.train.StopAtStepHook(last_step=FLAGS.max_steps),\n\t\t\t\t\t\ttf.train.NanTensorHook(loss),\n\t\t\t\t\t\ttf.train.CheckpointSaverHook(checkpoint_dir=train_dir, save_steps=num_batches_per_epoch, saver=saver),\n\t\t\t\t\t\tlogging_hook,\n\t\t\t\t\t\t_LearningRateSetterHook()],\n\t\t\t\tconfig=config,\n\t\t\t\tsave_checkpoint_secs=3600)\tas mon_sess:\n\t\t\t#saver.restore(mon_sess,'/vol/vssp/facer2vm/people/Philipp/cnn_experiments/03/train/model.ckpt-21575')\n\t\t\twhile True: # not mon_sess.should_stop():\n\t\t\t\tmon_sess.run(train_op)\n\t\t\t\t#mon_sess.run(train_op)\n\t\t#my_summary_op = tf.summary.merge_all()\n\t\t#sv = tf.train.Supervisor(logdir=\"/my/training/directory\", summary_op=None) # Do not run the summary service\n\n\n\ndef main(argv=None): # pylint: disable=unused-argument\n\n\tif not os.path.exists(experiment_dir):\n\t\tos.mkdir(experiment_dir)\n\n\n\tif not os.path.exists(train_dir):\n\t\tos.mkdir(train_dir)\n\n\tif not os.path.exists(db_dir):\n\t\tos.mkdir(db_dir)\n\n\twith tf.device('/gpu:0'):\n\t\ttrain()\n\n\nif __name__ == '__main__':\n\ttf.app.run()\n","sub_path":"cnn_experiment_train_alex.py","file_name":"cnn_experiment_train_alex.py","file_ext":"py","file_size_in_byte":10539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"24457946","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 23 00:02:47 2018\n\n@author: aksha\n\"\"\"\n\nimport random\n\nb=[]\nwith open(\"sam2.txt\", \"w\") as f:\n for i in range(10):\n a =random.random()\n b.append(a)\n f.write(str(a) + \"\\n\")\n\na = b.sort()\n\nwith open(\"OUTPUT2.txt\", \"w\") as f:\n for i in range(len(b)):\n f.write(str(b[i]) + \"\\n\")","sub_path":"bb/file handling/q5.py","file_name":"q5.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"129407214","text":"#!/usr/bin/env python3\n\nfrom bottle import template\nfrom datetime import datetime\nimport json\nfrom os import environ\nimport requests\n\nforecastURL = environ[\"forecastURL\"]\nhourlyURL = environ[\"hourlyURL\"]\n\ndef getWeatherData(url):\n user_agent = {'User-agent': 'weather forecast google cloud function'}\n response = requests.get(url, headers=user_agent)\n if response.status_code != 200:\n # raise an error\n print(\"Error - non-200 status code: %s\" % response.status_code)\n return \"error with API response\"\n\n try:\n responseData = json.loads(response.content)\n except Exception as e:\n print(e)\n return \"error parsing JSON\"\n\n return responseData\n\n\nhtmlTemplate = \"\"\"\n\n\n\nweather\n\n\n\n\n
\n{{!generalData}}\n
\n\n\n \n \n \n\n{{!hourlyData}}\n
TimeSummaryTemp
\n
\n\n\"\"\"\n\ngeneralTemplate = \"\"\"\n

{name}

\n

{detailedForecast}

\"\"\"\n\nhourlyTemplate = \"\"\"\n\n {time}\n {summary}\n {temp}\n\"\"\"\n\n\ndef weatherForecast(request):\n if request.user_agent.platform == \"android\" or request.user_agent.platform == \"iphone\":\n bodySizeCSS = \"font-size: 1.7em;\"\n else:\n bodySizeCSS = \"\"\n\n general = getWeatherData(forecastURL)\n if type(general) == str:\n # then there was an error\n return(\"an error occurred while gathering data\")\n\n generalData = \"\"\n for i in range(4):\n generalData = generalData + generalTemplate.format(\n name=general['properties']['periods'][i]['name'],\n detailedForecast=general['properties']['periods'][i]['detailedForecast']\n )\n\n hourly = getWeatherData(hourlyURL)\n if type(hourly) == str:\n # then there was an error\n return(\"an error occurred while gathering data\")\n\n hourlyData = \"\"\n hourList = list(range(12)) + list(range(12, 36, 2))\n for i in hourList:\n timeRFC3339 = hourly['properties']['periods'][i]['startTime']\n t = datetime.strptime(timeRFC3339[:timeRFC3339.rfind('-')], '%Y-%m-%dT%H:%M:%S').strftime(\"%a %m-%d %H:%M\")\n hourlyData = hourlyData + hourlyTemplate.format(\n time=t,\n summary=hourly['properties']['periods'][i]['shortForecast'],\n temp=hourly['properties']['periods'][i]['temperature']\n )\n\n return template(htmlTemplate, generalData=generalData, hourlyData=hourlyData, bodySize=bodySizeCSS)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"208644327","text":"#\nfrom django.urls import path\nfrom . import views\n\napp_name = \"entrada_app\"\n\nurlpatterns = [\n path(\n 'entradas/', \n views.EntryListView.as_view(),\n name='entry-lista',\n ),\n path(\n 'entradas//', \n views.EntryDetailView.as_view(),\n name='entry-detail',\n ),\n path(\n 'nueva-entrada', \n views.NewEntry.as_view(),\n name='new-entry',\n ),\n]","sub_path":"blog/applications/entrada/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"599750068","text":"from db import News, session, engine\nfrom math import log\nfrom nltk.tokenize import RegexpTokenizer\nfrom pprint import pprint as pp\nfrom operator import itemgetter\nimport time\n\n\nclass NaiveBayesClassifier:\n\n def __init__(self, alpha=0.05):\n self.alpha = alpha\n self.labels = []\n self.classified_words = [] # list of word_info dicts\n\n self.count = {}\n\n def fit(self, X, y):\n \"\"\" Fit Naive Bayes classifier according to X, y. \n X - news title\n y - news label (upvoted, downvoted, maybe'ed)\n \"\"\"\n t0 = time.time()\n\n # List unique classes (labels)\n self.labels = list(set(y))\n\n tokenizer = RegexpTokenizer(r'\\w+')\n\n words = []\n sanitized_titles = []\n for index, title in enumerate(X):\n # Remove punctuation and lowercase words\n sanitized_title = list(map(str.lower, tokenizer.tokenize(title)))\n\n # Count words that occur in this class\n self.count_words_in_class(sanitized_title, y[index])\n\n sanitized_titles.append(sanitized_title)\n words.extend(sanitized_title)\n\n unique_words = list(set(words))\n for word in unique_words:\n word_info = {\n 'word': word,\n 'occur_in_class': [], # list of dicts\n 'prob_in_class': [], # list of dicts\n }\n\n # Dynamically count word occurences in classes (labels)\n for label in self.labels:\n count = 0\n for title_index, title in enumerate(sanitized_titles):\n if y[title_index] == label:\n count += title.count(word)\n\n word_info['occur_in_class'].append({\n f'{label}': count,\n })\n\n # Dynamically count word probabilities for appearing in classes (labels)\n for label in self.labels:\n for occur in word_info['occur_in_class']:\n if list(occur.keys())[0] == label:\n occur_in_class = occur[f'{label}']\n\n # Calculationg probability of a word appearing in class (label)\n # Formula: https://i.imgur.com/oaym6LY.png\n prob = log((occur_in_class + self.alpha) /\n (self.count[f'{label}'] + self.alpha * len(unique_words)))\n\n word_info['prob_in_class'].append({\n f'{label}': prob\n })\n\n self.classified_words.append(word_info)\n\n t1 = time.time()\n total = t1-t0\n print('Fitted in %.2f' % total, 'seconds')\n\n def count_words_in_class(self, title, label):\n\n if label not in self.count:\n self.count.update({f'{label}': 0})\n\n self.count[f'{label}'] += len(title)\n\n def predict(self, X):\n \"\"\" Perform classification on an array of test vectors X. \"\"\"\n t0 = time.time()\n\n tokenizer = RegexpTokenizer(r'\\w+')\n\n predictions = []\n for title in X:\n # Remove punctuation and lowercase words\n sanitized_title = list(map(str.lower, tokenizer.tokenize(title)))\n\n prob_sums = []\n for label in self.labels:\n prob_sum = log(1 / len(self.labels))\n\n for word in sanitized_title:\n word_info = list(\n filter(lambda word_info: word_info['word'] == word, self.classified_words))\n\n if word_info:\n word_info = word_info[0]\n for prob in word_info['prob_in_class']:\n if list(prob.keys())[0] == label:\n prob_in_class = prob[f'{label}']\n prob_sum += prob_in_class\n prob_sums.append((label, prob_sum))\n\n # ref: https://stackoverflow.com/questions/13145368/find-the-maximum-value-in-a-list-of-tuples-in-python\n prediction = {\n 'title': title,\n 'label': max(prob_sums, key=itemgetter(1))[0],\n 'prob_sum': max(prob_sums, key=itemgetter(1))[1],\n }\n\n predictions.append(prediction)\n\n t1 = time.time()\n total = t1-t0\n print('Predicted in %.2f' % total, 'seconds')\n return predictions\n\n def score(self, X_test, y_test):\n \"\"\" Returns the mean accuracy on the given test data and labels. \"\"\"\n success = 0\n fail = 0\n predictions = self.predict(X_test)\n\n for index, _ in enumerate(predictions):\n # Compare predicted label and true label\n if predictions[index]['label'] == y_test[index]:\n success += 1\n else:\n fail += 1\n total = success + fail\n accuracy = success / total\n\n print('Result accuracy: %.6f' % accuracy, f'with α={self.alpha}')\n\n\n# Score on SMS Spam Collection\nif __name__ == '__main__':\n import csv\n with open(\"SMSSpamCollection\", encoding=\"utf8\") as f:\n data = list(csv.reader(f, delimiter=\"\\t\"))\n labels = []\n texts = []\n for pair in data:\n label, text = pair\n labels.append(label)\n texts.append(text)\n bayers = NaiveBayesClassifier()\n bayers.fit(texts[:3900], labels[:3900])\n bayers.score(texts[3900:], labels[3900:])\n\n# Score on SMS Spam Collection by sklearn\n# if __name__ == '__main__':\n# import csv\n# with open(\"SMSSpamCollection\", encoding=\"utf8\") as f:\n# data = list(csv.reader(f, delimiter=\"\\t\"))\n# labels = []\n# texts = []\n# for pair in data:\n# label, text = pair\n# labels.append(label)\n# texts.append(text)\n# from sklearn.naive_bayes import MultinomialNB\n# from sklearn.pipeline import Pipeline\n# from sklearn.feature_extraction.text import TfidfVectorizer\n\n# model = Pipeline([\n# ('vectorizer', TfidfVectorizer()),\n# ('classifier', MultinomialNB(alpha=0.05)),\n# ])\n\n# model.fit(texts[:3900], labels[:3900])\n# print(model.score(texts[3900:], labels[3900:]))\n","sub_path":"homework06/bayes.py","file_name":"bayes.py","file_ext":"py","file_size_in_byte":6126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"203460108","text":"from bot.views.lesson import lessons_list, add_lesson, delete_lesson, add_lesson_record\nfrom bot.views.version import version_list, change_subscription_rules\nfrom bot.views.video import add_video, video_list, delete_video, video_statistics, get_video\nfrom config import token\nfrom django.conf.urls import url\nfrom bot.views.views import *\nfrom bot.views.order import *\nfrom bot.views.statistic import *\nfrom bot.views.user import *\nfrom bot.views.admin_user import *\nfrom bot.views.sent_message import *\n\nurlpatterns = [\n url(r'^' + token + '$', webhook, name='webhook'),\n url(r'^$', index, name='index'),\n url(r'^yandex_money$', ym, name='ym'),\n\n url(r'^user/$', user_list, name='user_list'),\n url(r'^user/active/(?P[0-9]+)/$', active_users, name='active_users'),\n url(r'^user/(?P[0-9]+)/$', user_payments, name='user_payments'),\n url(r'^user/(?P[0-9]+)/discount$', add_discount, name='add_discount'),\n url(r'^user/(?P[0-9]+)/lessons$', set_lessons, name='set_lessons'),\n url(r'^user/(?P[0-9]+)/delete$', delete_bot_user, name='delete_bot_user'),\n\n url(r'^user/add_day$', user_add_day, name='user_add_day'),\n #\n url(r'^video/$', video_list, name='video_list'),\n url(r'^video/add$', add_video, name='add_video'),\n url(r'^video/(?P[0-9]+)/delete$', delete_video, name='delete_video'),\n url(r'^video/(?P[0-9]+)/statistics', video_statistics, name='video_statistics'),\n\n url(r'^application/$', application_list, name='application_list'),\n url(r'^application/(?P[0-9]+)/delete/$', application_delete, name='application_delete'),\n\n url(r'^payments/$', all_payments, name='payments'),\n\n url(r'^statistics/add/$', add_statistics, name='add_statistics'),\n url(r'^statistics/(?P[0-9]+)/edit/$', edit_stat, name='edit_stat'),\n url(r'^statistics/$', stat_list, name='stat_list'),\n\n url(r'^message/$', messages_list, name='static_messages'),\n url(r'^message/(?P[0-9]+)/edit/', edit_message, name='edit_message'),\n url(r'^message/market/(?P[0-9]+)/edit/', edit_market_description, name='edit_market_description'),\n\n url(r'^message/insight/$', insight_message, name='insight'),\n url(r'^message/insight/all$', insight_list, name='insight_list'),\n url(r'^message/insight/(?P[0-9]+)/edit/$', edit_insight, name='edit_insight'),\n\n\n url(r'^message/all/$', message_to_all, name='message_to_all'),\n url(r'^message/subscribed/$', message_to_subscribed, name='message_to_subscribed'),\n url(r'^message/sent/(?P[0-9]+)/edit/', edit_sent_message, name='edit_sent_message'),\n url(r'^message/sent/', sent_messages_list, name='sent_messages_list'),\n\n url(r'^price/$', price_list, name='price_list'),\n url(r'^price/edit/(?P[0-9]+)$', price_edit, name='price_edit'),\n\n url(r'^oauth$', oauth, name='oauth'),\n url(r'^payment$', payment_webhook, name='payment_webhook'),\n # url(r'^pay$', payment, name='user_payment'),\n url(r'^subscription/(?P[0-9]+)$', give_subscription, name='subscription'),\n url(r'^subscription/(?P[0-9]+)/delete', delete_subscription, name='delete_subscription'),\n url(r'^user/(?P[0-9]+)/payment$', add_payment, name='add_payment'),\n url(r'^payment/(?P[0-9]+)/edit$', edit_payment, name='edit_payment'),\n url(r'^payment/(?P[0-9]+)/delete$', delete_payment, name='delete_payment'),\n\n url(r'^staff/$', admin_user_list, name='admin_user_list'),\n url(r'^staff/(?P[0-9]+)/delete$', delete_staff_user, name='delete_staff_user'),\n url(r'^staff/create/$', create_user, name='create_user'),\n\n url(r'^lessons/$', lessons_list, name='lessons_list'),\n url(r'^lessons/add/$', add_lesson, name='add_lesson'),\n url(r'^lessons/(?P[0-9]+)/add_record', add_lesson_record, name='add_lesson_record'),\n\n url(r'^lessons/(?P[0-9]+)/delete', delete_lesson, name='delete_lesson'),\n\n url(r'^version/$', version_list, name='version_list'),\n url(r'^version/subscription/change$', change_subscription_rules, name='change_subscription_rules'),\n\n]\n\n","sub_path":"bot/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"54930024","text":"'''\n\n\tV-Ray/Blender\n\n\thttp://vray.cgdo.ru\n\n\tAuthor: Andrey M. Izrantsev (aka bdancer)\n\tE-Mail: izrantsev@cgdo.ru\n\n\tThis program is free software; you can redistribute it and/or\n\tmodify it under the terms of the GNU General Public License\n\tas published by the Free Software Foundation; either version 2\n\tof the License, or (at your option) any later version.\n\n\tThis program is distributed in the hope that it will be useful,\n\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\tGNU General Public License for more details.\n\n\tYou should have received a copy of the GNU General Public License\n\talong with this program. If not, see .\n\n\tAll Rights Reserved. V-Ray(R) is a registered trademark of Chaos Software.\n\n'''\n\nimport os\nimport bpy\nimport inspect\n\n\n# https://gist.github.com/techtonik/2151727\n#\ndef caller_name(skip=2):\n \"\"\"Get a name of a caller in the format module.class.method\n \n `skip` specifies how many levels of stack to skip while getting caller\n name. skip=1 means \"who calls me\", skip=2 \"who calls my caller\" etc.\n \n An empty string is returned if skipped levels exceed stack height\n \"\"\"\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return ''\n parentframe = stack[start][0]\n name = []\n module = inspect.getmodule(parentframe)\n if module:\n name.append(module.__name__)\n if 'self' in parentframe.f_locals:\n name.append(parentframe.f_locals['self'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != '':\n name.append(codename)\n del parentframe\n return \".\".join(name)\n\n\ndef msg(msg=\"\"):\n if bpy.app.debug:\n print(\"...\", caller_name())\n print(\"......\", msg)\n","sub_path":"All_In_One/addons/vb25/dbg.py","file_name":"dbg.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"22579041","text":"\n\n#calss header\nclass _ZODIAC():\n\tdef __init__(self,): \n\t\tself.name = \"ZODIAC\"\n\t\tself.definitions = [u'(in the study of the planets and their influence on life) an area of the sky through which the sun, moon, and most of the planets appear to move, divided into twelve equal parts, each with a name and symbol, and each connected with an exact time of year: ', u'a circular drawing representing the zodiac']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_zodiac.py","file_name":"_zodiac.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"640564305","text":"class SentimentData:\n \"\"\"\n A class which reads in and stores data from files containing information about\n aspects of certain words, specifically their valence (pleasure) and arousal (intensity)\n Current data coming from https://link.springer.com/article/10.3758%2Fs13428-012-0314-x#Notes\n \"\"\"\n def __init__(self, _filename):\n self.filename = _filename\n self.data_dict = None\n self.word_list = None\n\n def readLineData(self, _filename = None):\n\n \"\"\"\n Reads sentiment data line-by-line from a .csv file\n :param _filename: The name of the file to read data from\n :return: A two-dimensional array containing arrays for each line in the file, seperated by commas\n \"\"\"\n if _filename == None:\n _filename = self.filename\n data_arr = []\n data_file = open(_filename, 'r+')\n for line in data_file:\n arr = line.split(',')\n data_arr.append(arr)\n return data_arr\n\n def restructureData(self, data_arr):\n \"\"\"\n Takes in a two-dimensional array of words returned by readLineData and restructures it in a more usable\n format.\n :param data_arr: Two-dimensional array of words we want to use to measure overall sentiment of a tweet.\n :return: A dictionary of dictionaries. Structured as word : {number, valence_mean, valence_deviation, arousal_mean,\n arousal_deviation}\n \"\"\"\n ret_dict = {}\n for word_arr in data_arr:\n data_dict = {}\n word = word_arr[1]\n data_dict['number'] = word_arr[0]\n data_dict['valence_mean'] = word_arr[2]\n data_dict['valence_deviation'] = word_arr[3]\n data_dict['arousal_mean'] = word_arr[5]\n data_dict['arousal_deviation'] = word_arr[6]\n ret_dict[word] = data_dict\n return ret_dict\n\n def getWordList(self, data_dict = None):\n \"\"\"\n Gets a list of words being used from an array\n :param data_dict: A dict of dicts from restructureData containing information about words being used in\n analysis.\n :return: An array of words\n \"\"\"\n\n if data_dict == None:\n data_dict = self.data_dict\n ret_data = []\n for word in data_dict:\n ret_data.append(word)\n return ret_data\n\n def getSentimentData(self, _filename = None):\n if _filename == None:\n _filename = self.filename\n else:\n self.filename = _filename\n raw_data = self.readLineData(_filename)\n struct_data = self.restructureData(raw_data)\n\n self.data_dict = struct_data\n self.word_list = self.getWordList(self.data_dict)\n return self\n\n def containsEntry(self, entry):\n if (self.word_list == None):\n return False\n return entry.lower() in self.word_list\n\n def getEntry(self, entry):\n if not self.containsEntry(entry):\n return None\n else:\n return self.data_dict[entry]\n","sub_path":"SentimentData.py","file_name":"SentimentData.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"361410053","text":"\"\"\"\nCopyright (c) 2018 Cisco Systems, Inc.\nAuthor: \"Christian Oeien\" \n\"\"\"\nfrom tng.api import runner\nfrom tng_sl.contrib.pitstop_helper import PitstopTestCase\nfrom pitstop.exp import (\n Receive, Poll, Wait,\n Command, Send, LocalOffer, RemoteAnswer, NoTransition, Flag)\n\n\nclass Test(PitstopTestCase):\n\n def test_accept_then_retrans_200_until_ack(self):\n '''\n Verify that the TU in the DUT retransmits the 200 to INVITE\n until it is ACK'ed.\n Accomodates for MPP: FAILS on TCP\n as DUT shall retransmit in that case too\n '''\n self.spec.update({\"test\": [\n Wait(\"idle\").then([\n LocalOffer(\"localSdp\"),\n Send(\"INVITE\", {\"\\n\": \"$localSdp\"}, transaction_label=\"i\")]),\n Receive(\"18.\", {}, on_transaction=\"i\").then([\n NoTransition(\"i\"),\n Command(self.dut.accept_call)]),\n Receive(\"200\", {}, on_transaction=\"i\").then([]),\n Receive(\"200\", {}, on_transaction=\"i\", dialog_label=\"d\", captures={\n \"remoteSdp\": \"\\n(^v=0.+)\"}).then([\n RemoteAnswer(\"$remoteSdp\"),\n Send(\"ACK\", {}, in_dialog=\"d\")]),\n Poll(self.dut.is_active_call).then([\n Command(self.dut.end_call)]),\n Receive(\"BYE\", {}, in_dialog=\"d\", transaction_label=\"b\").then([\n Send(\"200\", {}, on_transaction=\"b\")]),\n Poll(self.dut.is_line_idle).then([Flag(\"idle\")])]})\n\n self.pitstop([\"udp\"])\n\n\ndef main():\n runner()\n","sub_path":"pitstop_tests/basic/accept_then_retrans_200_until_ack.py","file_name":"accept_then_retrans_200_until_ack.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"646723466","text":"import os\n# os.environ[\"SDL_VIDEODRIVER\"] = \"dummy\"\nimport pygame\npygame.init()\nimport random \nimport numpy as np\nimport math\nimport time\nimport cv2\nfrom game_config import *\nfrom game_models import *\nimport skvideo.io\n\nclass GameManager:\n def __init__(self, bullet_mode, explode_mode, plane_show, score_show, test_mode = False):\n \n self.reset()\n\n self.bullet_mode = bullet_mode\n self.explode_mode = explode_mode\n self.plane_show = plane_show \n self.score_show = score_show\n self.test_mode = test_mode\n\n self.dead = False\n self.action_space = 5\n self.obs_resize_shape = [RESIZE_SIZE[0], RESIZE_SIZE[1], 3]\n self.obs_shape = [WINDOW_WIDTH, WINDOW_HEIGHT, 3]\n\n # set title \n pygame.display.set_caption('bullet hell drill')\n \n def resize_state(self, size):\n arr = pygame.surfarray.array3d(pygame.display.get_surface())\n image = cv2.resize(arr, size)\n image = np.ascontiguousarray(image, dtype = np.float32) /255\n return image\n\n def reset(self, resize = True, size = (80,80)):\n self.window = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\n self.clock = pygame.time.Clock()\n self.score = 0\n self.run = True\n self.plane = Plane(WINDOW_WIDTH//2, WINDOW_HEIGHT//2)\n self.bullets = []\n self.collision = False\n self.explosion = None\n self.font = pygame.font.SysFont(\"comicsans\", 30, True)\n self.dead = False\n\n if resize == False:\n return pygame.surfarray.array3d(pygame.display.get_surface())\n else:\n return self.resize_state(size)\n\n def render(self):\n # reset \n self.window.fill((0, 0, 0))\n\n # border\n # left up coordinate\n border_x = WINDOW_WIDTH/2 - BORDER_WIDTH/2\n border_y = WINDOW_HEIGHT/2 - BORDER_HEIGHT/2\n pygame.draw.rect(self.window, WHITE, pygame.Rect(int(border_x), int(border_y),BORDER_WIDTH, BORDER_HEIGHT),BORDER_LEN)\n\n # plane\n self.plane.render(self.window, self.collision, self.plane_show, is_warning=IS_WARNING)\n \n # bullet\n for bullet in self.bullets:\n bullet.render(self.window)\n \n # explosion\n if self.explode_mode and self.collision:\n self.explosion.render(self.window)\n \n # score \n if self.score_show == True:\n self.render_score += self.score\n self.window.blit(self.font.render(f'Score(s): {self.render_score}', 1, WHITE), (50, 10))\n \n # update\n pygame.display.update()\n\n def is_collision(self):\n boundary = PLANE_HITBOX_RADIUS + BULLET_RADIUS\n for bullet in self.bullets:\n distance = math.hypot(self.plane.x - bullet.x, self.plane.y - bullet.y)\n if distance < (boundary-COLL_TOLERANCE):\n return True\n return False\n \n def in_warning_zone(self):\n boundary = PLANE_WARNING_RADIUS + BULLET_RADIUS\n in_count = 0\n for bullet in self.bullets:\n distance = math.hypot(self.plane.x - bullet.x, self.plane.y - bullet.y)\n if distance < (boundary-COLL_TOLERANCE):\n in_count += 1\n return in_count\n\n \n def score_update(self, invincible):\n # self.score += time.time()-start_tick\n # in_warning_count = self.in_warning_zone()\n\n self.score = SURVIVE_SCORE\n # self.score += in_warning_count*WARNING_PUNISH\n\n if self.run == False or self.dead == True:\n self.score = 0\n\n if self.collision == True and self.dead == False:\n self.score = DEAD_PUNISH\n self.dead = True\n \n if invincible:\n self.dead = False\n\n def step(self, actions, resize = True, size = (80,80), invincible = False):\n self.clock.tick(FPS)\n\n for event in pygame.event.get(): # This will loop through a list of any keyboard or mouse events.\n if event.type == pygame.QUIT: # Checks if the red button in the corner of the self.window is clicked\n self.run = False # Ends the game loop \n \n if invincible == True:\n self.collision = False\n \n # check explosion\n if not self.collision:\n # plane move\n self.plane.move(actions)\n # bullets move\n for bullet in self.bullets:\n bullet_exist = False\n if 0 <= bullet.x and bullet.x <= WINDOW_WIDTH and 0 <= bullet.y and bullet.y <= WINDOW_HEIGHT:\n bullet.move()\n bullet_exist = True\n\n if bullet_exist == False:\n self.bullets.pop(self.bullets.index(bullet))\n \n addin_bullets = 0\n while len(self.bullets) < MAX_BULLETS and addin_bullets <= MAX_ADDIN_BULLETS and self.test_mode != True:\n if self.bullet_mode == 'random':\n self.bullets.append(Bullet_2(YELLOW))\n elif self.bullet_mode == 'aim':\n self.bullets.append(Bullet(WHITE, self.plane.x, self.plane.y))\n addin_bullets += 1\n addin_bullets = 0\n \n # check collision\n if self.is_collision():\n self.collision = True\n if self.explode_mode:\n self.explosion = Explode(self.plane.x, self.plane.y)\n else:\n if invincible == False:\n self.run = False\n else:\n # end game\n if self.explode_mode:\n if self.explosion.is_stop():\n self.run = False\n else:\n self.run = False\n \n # update frame\n if self.run:\n self.render()\n\n # return \n if resize == False:\n screen_shot = pygame.surfarray.array3d(pygame.display.get_surface())\n else:\n screen_shot = self.resize_state(size)\n\n\n self.score_update(invincible)\n return screen_shot, self.score, self.collision, self.run\n\nclass ReplaySaver:\n def __init__(self):\n self.frame_array = []\n self.best_frame_array = []\n \n def reset(self):\n self.frame_array = []\n \n def save_best(self):\n self.best_frame_array = self.frame_array\n \n def get_current_frame(self):\n frame = pygame.surfarray.array3d(pygame.display.get_surface())\n self.frame_array.append(frame)\n \n def make_video(self, path, size = (750,750), fps = 60):\n out = cv2.VideoWriter(path,cv2.VideoWriter_fourcc(*'mp4v'), fps, size)\n print('Vid length: {}'.format(len(self.best_frame_array)))\n for i in self.best_frame_array:\n i = np.rot90(i,3)\n i = cv2.cvtColor(i, cv2.COLOR_BGR2RGB)\n out.write(i)\n \n print('Writing video...')\n out.release()\n # out = cv2.VideoWriter(path,cv2.VideoWriter_fourcc(*'mp4v'), fps, size)\n #rate = '60'\n #writer = skvideo.io.FFmpegWriter(path, inputdict = {'-r':rate}, outputdict={'-r':rate})\n #print('Vid length: {}'.format(len(self.best_frame_array)))\n #for i in self.best_frame_array:\n # i = np.rot90(i,3)\n # # i = cv2.cvtColor(i, cv2.COLOR_BGR2RGB)\n # writer.writeFrame(i)\n # # out.write(i)\n # \n #print('Writing video...')\n #writer.close()\n # out.release()\n\n","sub_path":"game/game_controller.py","file_name":"game_controller.py","file_ext":"py","file_size_in_byte":7520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"159381725","text":"\"\"\"Project Euler Problem 50\"\"\"\nimport math\n\n\ndef problem_50() -> int:\n \"\"\"Consecutive prime sum\"\"\"\n # 素数の合計した値が素数である数字で1000000未満の最大値を求める\n # ex) 素数 = 2, 3, 5, ...\n # 項1 = [2] : 合計 2 == 素数\n # 項2 = [2, 3] : 合計 5 == 素数\n # 項3 = [2, 3, 5] : 合計 10 != 素数\n # ...\n # 項6 = [2, 3, 5, 7, 11, 13] : 合計 41 == 素数\n # ...\n # 項z = [2, 3, ..., zzzz] : 合計 ZZZZ == 素数 (ZZZZ < 1000000)\n # 項y = [2, 3, ..., yyyy] : 合計 YYYY != 素数 (YYYY < 1000000)\n # 項x = [2, 3, ..., xxxx] : 合計 XXXX == 素数 (XXXX > 1000000)\n # ※ y=z+1, x=y+1, ZZZZ < YYYY < 1000000 < XXXX\n # 上記のZZZZを求める。YYYYは合計値が素数でないため対象外\n prim_num_list = list()\n sum_prim_num_list = list()\n check_sum_p_num = 0\n for check_p_num in range(2, 1000000):\n if judge_prim_number(check_p_num):\n prim_num_list.append(check_p_num)\n check_sum_p_num = sum(prim_num_list)\n if judge_prim_number(check_sum_p_num):\n sum_prim_num_list.append(check_sum_p_num)\n if sum_prim_num_list[-1] > 1000000:\n return sum_prim_num_list[-2]\n return -1\n\n\ndef judge_prim_number(num: int) -> bool:\n \"\"\"\n 引数numが素数かどうか判断する\n True -> 素数である\n False -> 素数でない\n \"\"\"\n for i in range(2, int(math.sqrt(num)) + 1):\n if num % i == 0:\n return False\n return True\n\n\nif __name__ == \"__main__\":\n print(problem_50())\n","sub_path":"sampleproject/www/Project_Euler/problem001_050/problem050.py","file_name":"problem050.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"438376347","text":"#othello_ui_tkinter\r\n#\r\n#Project 5 CSE 42 Winter 2016\r\n#Kausthub Raj Jadhav\r\n\r\n\r\nimport tkinter\r\nimport point\r\n\r\n\r\nclass GuiOthello:\r\n def __init__(self, rectangles: [(float, float, float, float)]):\r\n # First, store the list of rectangle coordinates in an attribute,\r\n # so they'll be available to us when we need to draw them.\r\n self._rectangles = rectangles\r\n\r\n # set up the window.\r\n self._root_window = tkinter.Tk()\r\n self._root_window.configure(background ='black')\r\n\r\n self._canvas = tkinter.Canvas(\r\n master = self._root_window,\r\n width = 500, height = 400,\r\n background = 'black')\r\n \r\n self._canvas.bind('', self._on_canvas_clicked)\r\n\r\n # * The canvas is in grid cell (0, 0), which is the only grid\r\n # cell in our window.\r\n # * As the size of grid cell (0, 0) changes, the size of our\r\n # canvas changes accordingly, because it's \"stuck\" to all\r\n # four edges of the cell (north, south, west, and east).\r\n # * There are 30 pixels of padding (empty space) inside of the\r\n # grid cell (0, 0) horizontally and vertically, with the canvas\r\n # surrounding by the padding.\r\n # * As the size of the window changes, all of the added or removed\r\n # space is added to (or taken from) the size of grid cell (0, 0).\r\n # (That's what the calls to rowconfigure and columnconfigure do;\r\n # they set the \"weights\" on the row and column, which specifies\r\n # what proportion of newly-available space should be allocated\r\n # to rows and columns in the grid when the size of the window\r\n # changes.)\r\n\r\n self._canvas.grid(\r\n row = 1, column = 0, padx = 30, pady = 30,\r\n sticky = tkinter.N + tkinter.S + tkinter.W + tkinter.E)\r\n\r\n self._canvas.bind('', self._on_canvas_resized)\r\n\r\n self._root_window.rowconfigure(0, weight = 1)\r\n self._root_window.columnconfigure(0, weight = 1)\r\n\r\n\r\n def start(self) -> None:\r\n self._root_window.mainloop()\r\n\r\n\r\n # Because of the call we made to bind() in the __init__ method,\r\n # this method is called whenever the size of the canvas changes.\r\n # We respond by calling our own _draw_rings() method to redraw the\r\n # image, given the new size of the canvas.\r\n \r\n def _on_canvas_resized(self, event: tkinter.Event) -> None:\r\n self._draw_rectangles()\r\n\r\n\r\n def _draw_rectangles(self) -> None:\r\n # Remove all of the shapes currently in the canvas. (For a fun\r\n # effect, comment this line out and re-run the program.)\r\n self._canvas.delete(tkinter.ALL)\r\n\r\n # Draw the rectangles. We always want the size of the rings to be\r\n # in the same proportions as the size of the canvas, so we're\r\n # passing \"fractional coordinates\" instead of \"pixel coordinates\".\r\n # When we actually draw ovals on the canvas, we'll convert the\r\n # fractional coordinates (ranging from 0.0 to 1.0 in the x and y\r\n # directions) to pixel coordinates (with the range changing as\r\n # the size of the canvas changes).\r\n for frac_x1, frac_y1, frac_x2, frac_y2 in self._rectangles:\r\n self._draw_rectangle(frac_x1, frac_y1, frac_x2, frac_y2)\r\n\r\n\r\n def _draw_rectangle(self, frac_x1: float, frac_y1: float, frac_x2: float, frac_y2: float) -> None:\r\n # Given the fractional coordinates representing the top-left and\r\n # bottom-right points of the bounding box around the oval we want\r\n # to draw, draw the corresponding oval. We have to convert the\r\n # coordinates from fractional to pixel in order to draw the oval,\r\n # since Canvas' create_oval() method expects pixel coordinates.\r\n # We can do that by multiplying the fractional coordinate by the\r\n # width or height, respectively.\r\n\r\n # Though, first, we'll need to find out how big the canvas is, in\r\n # terms of pixels.\r\n canvas_width = self._canvas.winfo_width()\r\n canvas_height = self._canvas.winfo_height()\r\n\r\n # Now we can do the multiplication and draw the oval.\r\n \r\n self._canvas.create_rectangle(\r\n canvas_width * frac_x1, canvas_height * frac_y1,\r\n canvas_width * frac_x2, canvas_height * frac_y2,\r\n outline = 'black', fill= 'green', width =3)\r\n\r\n\r\n self._display_score(\"sd\",\"sd\")\r\n self._display_turn()\r\n\r\n self._draw_circles(rectangles)\r\n \r\n \r\n\r\n\r\n def _display_score(self, tally_black: str, tally_white: str):\r\n score_board=tkinter.Label(text=\"B: 2 W: 2\", font= ('Helvetica', 20))\r\n score_board.grid(\r\n row = 0, column = 0, columnspan = 2, padx = 10, pady = 10,\r\n sticky = tkinter.N)\r\n \r\n def _display_turn(self):\r\n display_turn=tkinter.Label(text=\"TURN: BLACK\", font= ('Helvetica', 20))\r\n display_turn.grid(\r\n row = 2, column = 0, columnspan = 2, padx = 10, pady = 10,\r\n sticky = tkinter.S)\r\n\r\n def _on_canvas_clicked(self, event: tkinter.Event) -> None:\r\n width = self._canvas.winfo_width()\r\n height = self._canvas.winfo_height()\r\n\r\n click_point = point.from_pixel(\r\n event.x, event.y, width, height)\r\n\r\n print(\"Click point =\", click_point.frac()[0], \", \", click_point.frac()[1])\r\n\r\n def _draw_circles(self, rectangles):\r\n canvas_width = self._canvas.winfo_width()\r\n canvas_height = self._canvas.winfo_height()\r\n radius=40\r\n othello_Game_State=[[0, 0, 0, 0],\r\n [0, 1, 2, 0],\r\n [0, 2, 1, 0],\r\n [0, 0, 0, 0]\r\n ]\r\n \r\n for row in range(len(othello_Game_State)):\r\n for col in range(len(othello_Game_State[0])):\r\n row_factor=len(othello_Game_State[0])\r\n \r\n if othello_Game_State[row][col]==1:\r\n center_x, center_y = self._get_center_coords(row, col, row_factor, rectangles)\r\n \r\n self._canvas.create_oval(\r\n (center_x*canvas_width)-radius , (center_y*canvas_height)- radius,\r\n (center_x*canvas_width)+ radius , (center_y*canvas_height)+ radius,\r\n fill = 'black', outline = 'white', width=3)\r\n\r\n \r\n elif othello_Game_State[row][col]==2:\r\n center_x, center_y = self._get_center_coords(row, col, row_factor, rectangles)\r\n \r\n self._canvas.create_oval(\r\n (center_x*canvas_width)-radius , (center_y*canvas_height)- radius,\r\n (center_x*canvas_width)+ radius , (center_y*canvas_height)+ radius,\r\n fill = 'white', outline = 'black', width =3)\r\n \r\n \r\n\r\n def _get_center_coords(self, row: int,col: int, row_factor: int, rectangles)->'center coords (center_x, center_y)':\r\n \r\n center_x=(rectangles[row_factor*row+col][0]+rectangles[row_factor*row+col][2])/2\r\n center_y=(rectangles[row_factor*row+col][1]+rectangles[row_factor*row+col][3])/2\r\n return(center_x, center_y)\r\n pass\r\n \r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n rectangles = [(0, 0, 0.25, 0.25), (0.25, 0, 0.5, 0.25), (0.5, 0, 0.75, 0.25), (0.75, 0, 1.0, 0.25), (0, 0.25, 0.25, 0.5), (0.25, 0.25, 0.5, 0.5), (0.5, 0.25, 0.75, 0.5), (0.75, 0.25, 1.0, 0.5), (0, 0.5, 0.25, 0.75), (0.25, 0.5, 0.5, 0.75), (0.5, 0.5, 0.75, 0.75), (0.75, 0.5, 1.0, 0.75), (0, 0.75, 0.25, 1.0), (0.25, 0.75, 0.5, 1.0), (0.5, 0.75, 0.75, 1.0), (0.75, 0.75, 1.0, 1.0)]\r\n othello_Game_State=[[0, 0, 0, 0], [0, 1, 2, 0], [0, 2, 1, 0], [0, 0, 0, 0]]\r\n \r\n\r\n '''\r\n othello_Game_State=[\r\n \r\n ]\r\n '''\r\n\r\n '''\r\n (0,0)= (0,0) and (1,1)\r\n center_coords = ((rectangles[row][col][0]+rectangles[row+1][col+1][0])/2, (rectangles[row][col][1]+rectangles[row+1][col+1][1])/2)\r\n '''\r\n\r\n '''\r\n Logic for generating grids\r\n rectangles=[]\r\n x=0\r\n y=0\r\n for i in range(4):\r\n for j in range(4):\r\n rectangles.append((x,y,(x+1/4),(y+1/4)))\r\n x+=1/4\r\n y+=1/4\r\n x=0\r\n \r\n '''\r\n app = GuiOthello(rectangles)\r\n app.start()\r\n","sub_path":"othello_game_logic.py","file_name":"othello_game_logic.py","file_ext":"py","file_size_in_byte":8430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"265399183","text":"'''\nЗдесь вы создаете простое приложение-калькулятор, \nкоторое запрашивает у пользователя два значения и математическую операцию,\nвыполняет вычисление, а затем форматирует и отображает выходные данные.\n'''\n\nprint('Simple calculator!')\nfirst_number = input('First number? ')\noperation = input('Operation? ')\nsecond_number = input('Second number? ')\n\nif not (first_number.isnumeric() and second_number.isnumeric()):\n print(\"Please input a number\")\n exit()\nif not (operation == '+' or operation == '-' or operation == '*'\n or operation == '/'):\n print('Operation not recognized.')\n exit()\n\nresult = 0\nif operation == '+':\n result = int(first_number) + int(second_number)\nelif operation == '-':\n result = int(first_number) - int(second_number)\nelif operation == '*':\n result = int(first_number) * int(second_number)\nelif operation == '/':\n result = int(first_number) / int(second_number)\n\nprint(f'product of {first_number} {operation} {second_number} equals {result}')\n","sub_path":"COURSE/Microsoft/01 Выполните первые шаги с помощью Python/06 Выполнение математических операций с числовыми данными в Python/06/challenge2.py","file_name":"challenge2.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"260692440","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('companies', '0014_auto_20160421_1436'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='xfsformat',\n name='is_day_first',\n field=models.BooleanField(default=True, verbose_name='Is day first?'),\n ),\n ]\n","sub_path":"atm_analytics/companies/migrations/0015_xfsformat_is_day_first.py","file_name":"0015_xfsformat_is_day_first.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"493273224","text":"import webbrowser\n\n\nclass Movie():\n \"\"\"This class represents a movie object. Movies tend to have a title,\n storyline, poster image, and a trailer.\"\"\"\n def __init__(self, movie_title, movie_storyline, poster_image,\n trailer_youtube):\n \"\"\"self is the object being created. We can establish fields\n by using the self reference with a '.' and the name of the\n field to establish being set to a value. A helpful convention\n is to name the field simular to the passed in parameters.\n Ex. : 'self.title = movie_title' \"\"\"\n self.title = movie_title\n self.storyline = movie_storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube\n\n def show_trailer(self):\n \"\"\"The show_trailer function opens up a browser and displays the\n content in a new tab.\"\"\"\n webbrowser.open(self.trailer_youtube_url)\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"169955980","text":"##\n# From flask quickstart:\n# http://flask.pocoo.org/docs/0.12/quickstart/\n#\n# Rendering Templates - Case 1: a module\n#\nfrom flask import Flask\nfrom flask import request\nfrom flask import make_response\nfrom flask import render_template\n\napp = Flask(__name__)\n\n##\n# If we get a name in the url,\n# use that as the name\n# set it in a cookie\n# else\n# if the name is set in the cookie\n# use the name in the cookie\n# else\n# user is anonymous\n#\n# A value of 'unset??' means there is a logic error\n#\ndef try_to_get_cookie( url_name='' ):\n users_name = 'unset??'\n if url_name == '':\n try:\n users_name = request.cookies.get( 'users_name' )\n except:\n users_name = 'Anonymous'\n return render_template( 'index.html', name=users_name )\n else:\n users_name = url_name\n response = make_response(render_template( 'index.html', name=users_name) )\n response.set_cookie( 'users_name', users_name )\n return response\n\n@app.route('/')\n@app.route('/')\ndef index(url_name=''):\n return try_to_get_cookie( url_name )\n\n@app.route('/hello/')\n@app.route('/hello/')\ndef hello(url_name=''):\n return try_to_get_cookie( url_name )\n\n","sub_path":"08-real_python_class/2017_01_24-Lesson_1/homework/1-flask_quickstart/3-cookies/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"233903357","text":"from django.shortcuts import render\nfrom .models import Post\n# Create your views here.\ndef blog_list(request):\n post = Post.objects.all()\n context = {\n 'blog_list':post\n }\n return render(request, \"blog/blog_list.html\", context)\n\n\n","sub_path":"project/prototype/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"543292632","text":"\"\"\"\nutilities for signal processing\n\"\"\"\n\nfrom numpy import sqrt, fix, pi, median, std, sum, mean, shape, zeros, roll, dot, angle, abs\nfrom scipy.linalg import norm\nfrom scipy.io import loadmat\nfrom numpy.fft import fft\n\n\nclass SigProcessingMethod(object):\n \"\"\"class for doing signal processing\"\"\"\n\n @staticmethod\n def load(method, **opts):\n return SIGPROCESSING_METHODS[method](**opts)\n\n def get(self, y):\n pass\n\n def calc(self, data):\n result = data.mapValues(lambda x: self.get(x))\n return result\n\n\nclass FourierMethod(SigProcessingMethod):\n \"\"\"class for computing fourier transform\"\"\"\n\n def __init__(self, freq):\n \"\"\"get frequency\"\"\"\n\n self.freq = freq\n\n def get(self, y):\n \"\"\"compute fourier amplitude (coherence) and phase\"\"\"\n\n y = y - mean(y)\n nframes = len(y)\n ft = fft(y)\n ft = ft[0:int(fix(nframes/2))]\n amp_ft = 2*abs(ft)/nframes\n amp = amp_ft[self.freq]\n amp_sum = sqrt(sum(amp_ft**2))\n co = amp / amp_sum\n ph = -(pi/2) - angle(ft[self.freq])\n if ph < 0:\n ph += pi * 2\n return co, ph\n\n\nclass StatsMethod(SigProcessingMethod):\n \"\"\"class for computing simple summary statistics\"\"\"\n\n def __init__(self, statistic):\n \"\"\"get mode\"\"\"\n self.func = {\n 'median': lambda x: median(x),\n 'mean': lambda x: mean(x),\n 'std': lambda x: std(x),\n 'norm': lambda x: norm(x - mean(x)),\n }[statistic]\n\n def get(self, y):\n \"\"\"compute fourier amplitude (coherence) and phase\"\"\"\n\n return self.func(y)\n\n\nclass QueryMethod(SigProcessingMethod):\n \"\"\"class for computing averages over indices\"\"\"\n\n def __init__(self, indsfile):\n \"\"\"get indices\"\"\"\n if type(indsfile) is str:\n inds = loadmat(indsfile)['inds'][0]\n else:\n inds = indsfile\n self.inds = inds\n self.n = len(inds)\n\n\nclass CrossCorrMethod(SigProcessingMethod):\n \"\"\"class for computing lagged cross correlations\"\"\"\n\n def __init__(self, sigfile, lag):\n \"\"\"load parameters. paramfile can be an array, or a string\n if its a string, assumes signal is a MAT file\n with name modelfile_X\n \"\"\"\n if type(sigfile) is str:\n x = loadmat(sigfile + \"_X.mat\")['X'][0]\n else:\n x = sigfile\n x = x - mean(x)\n x = x / norm(x)\n\n if lag is not 0:\n shifts = range(-lag, lag+1)\n d = len(x)\n m = len(shifts)\n x_shifted = zeros((m, d))\n for ix in range(0, len(shifts)):\n tmp = roll(x, shifts[ix])\n if shifts[ix] < 0: # zero padding\n tmp[(d+shifts[ix]):] = 0\n if shifts[ix] > 0:\n tmp[:shifts[ix]] = 0\n x_shifted[ix, :] = tmp\n self.x = x_shifted\n else:\n self.x = x\n\n def get(self, y):\n \"\"\"compute cross correlation between y and x\"\"\"\n\n y = y - mean(y)\n n = norm(y)\n if n == 0:\n b = zeros((shape(self.x)[0],))\n else:\n y /= norm(y)\n b = dot(self.x, y)\n return b\n\n\nSIGPROCESSING_METHODS = {\n 'stats': StatsMethod,\n 'fourier': FourierMethod,\n 'crosscorr': CrossCorrMethod,\n 'query': QueryMethod\n}","sub_path":"python/thunder/sigprocessing/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"503178398","text":"#문제 14 scovile\nimport heapq\ndef scov(scoville, K):\n answer = 0\n heapq.heapify(scoville)\n while scoville[0] < K and len(scoville)>1 :\n heapq.heappush(scoville, heapq.heappop(scoville) + heapq.heappop(scoville)*2)\n answer += 1\n\n if scoville[0] Iterator[Tuple[str, str, str]]:\n \"\"\"Return all available serial ports.\"\"\"\n ports = QSerialPortInfo.availablePorts()\n return ((p.description(), p.portName(), p.systemLocation()) for p in ports)\n\n#https://stackoverflow.com/a/50914550\ndef resource_path(relative_path):\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)\n\n#https://stackoverflow.com/questions/20324804/how-to-use-qthread-correctly-in-pyqt-with-movetothread\ndef logthread(caller):\n print('%-25s:\\t %s, %s,' % (caller, threading.current_thread().name,\n threading.current_thread().ident))\n\n# ///// START of code taken from artemis_svl.py\n# Commands\nSVL_CMD_VER = 0x01 # version\nSVL_CMD_BL = 0x02 # enter bootload mode\nSVL_CMD_NEXT = 0x03 # request next chunk\nSVL_CMD_FRAME = 0x04 # indicate app data frame\nSVL_CMD_RETRY = 0x05 # request re-send frame\nSVL_CMD_DONE = 0x06 # finished - all data sent\nSVL_CMD_MSG = 0x07 # message\nSVL_CMD_DATE = 0x08 # update date/time\n\ncrcTable = (\n 0x0000, 0x8005, 0x800F, 0x000A, 0x801B, 0x001E, 0x0014, 0x8011,\n 0x8033, 0x0036, 0x003C, 0x8039, 0x0028, 0x802D, 0x8027, 0x0022,\n 0x8063, 0x0066, 0x006C, 0x8069, 0x0078, 0x807D, 0x8077, 0x0072,\n 0x0050, 0x8055, 0x805F, 0x005A, 0x804B, 0x004E, 0x0044, 0x8041,\n 0x80C3, 0x00C6, 0x00CC, 0x80C9, 0x00D8, 0x80DD, 0x80D7, 0x00D2,\n 0x00F0, 0x80F5, 0x80FF, 0x00FA, 0x80EB, 0x00EE, 0x00E4, 0x80E1,\n 0x00A0, 0x80A5, 0x80AF, 0x00AA, 0x80BB, 0x00BE, 0x00B4, 0x80B1,\n 0x8093, 0x0096, 0x009C, 0x8099, 0x0088, 0x808D, 0x8087, 0x0082,\n 0x8183, 0x0186, 0x018C, 0x8189, 0x0198, 0x819D, 0x8197, 0x0192,\n 0x01B0, 0x81B5, 0x81BF, 0x01BA, 0x81AB, 0x01AE, 0x01A4, 0x81A1,\n 0x01E0, 0x81E5, 0x81EF, 0x01EA, 0x81FB, 0x01FE, 0x01F4, 0x81F1,\n 0x81D3, 0x01D6, 0x01DC, 0x81D9, 0x01C8, 0x81CD, 0x81C7, 0x01C2,\n 0x0140, 0x8145, 0x814F, 0x014A, 0x815B, 0x015E, 0x0154, 0x8151,\n 0x8173, 0x0176, 0x017C, 0x8179, 0x0168, 0x816D, 0x8167, 0x0162,\n 0x8123, 0x0126, 0x012C, 0x8129, 0x0138, 0x813D, 0x8137, 0x0132,\n 0x0110, 0x8115, 0x811F, 0x011A, 0x810B, 0x010E, 0x0104, 0x8101,\n 0x8303, 0x0306, 0x030C, 0x8309, 0x0318, 0x831D, 0x8317, 0x0312,\n 0x0330, 0x8335, 0x833F, 0x033A, 0x832B, 0x032E, 0x0324, 0x8321,\n 0x0360, 0x8365, 0x836F, 0x036A, 0x837B, 0x037E, 0x0374, 0x8371,\n 0x8353, 0x0356, 0x035C, 0x8359, 0x0348, 0x834D, 0x8347, 0x0342,\n 0x03C0, 0x83C5, 0x83CF, 0x03CA, 0x83DB, 0x03DE, 0x03D4, 0x83D1,\n 0x83F3, 0x03F6, 0x03FC, 0x83F9, 0x03E8, 0x83ED, 0x83E7, 0x03E2,\n 0x83A3, 0x03A6, 0x03AC, 0x83A9, 0x03B8, 0x83BD, 0x83B7, 0x03B2,\n 0x0390, 0x8395, 0x839F, 0x039A, 0x838B, 0x038E, 0x0384, 0x8381,\n 0x0280, 0x8285, 0x828F, 0x028A, 0x829B, 0x029E, 0x0294, 0x8291,\n 0x82B3, 0x02B6, 0x02BC, 0x82B9, 0x02A8, 0x82AD, 0x82A7, 0x02A2,\n 0x82E3, 0x02E6, 0x02EC, 0x82E9, 0x02F8, 0x82FD, 0x82F7, 0x02F2,\n 0x02D0, 0x82D5, 0x82DF, 0x02DA, 0x82CB, 0x02CE, 0x02C4, 0x82C1,\n 0x8243, 0x0246, 0x024C, 0x8249, 0x0258, 0x825D, 0x8257, 0x0252,\n 0x0270, 0x8275, 0x827F, 0x027A, 0x826B, 0x026E, 0x0264, 0x8261,\n 0x0220, 0x8225, 0x822F, 0x022A, 0x823B, 0x023E, 0x0234, 0x8231,\n 0x8213, 0x0216, 0x021C, 0x8219, 0x0208, 0x820D, 0x8207, 0x0202)\n\ndef get_crc16(data) -> int:\n \"\"\"Compute CRC on a byte array\"\"\"\n #logthread('Global.get_crc16')\n\n #Table and code ported from Artemis SVL bootloader\n crc = 0x0000\n data = bytearray(data)\n for ch in data:\n tableAddr = ch ^ (crc >> 8)\n CRCH = (crcTable[tableAddr] >> 8) ^ (crc & 0xFF)\n CRCL = crcTable[tableAddr] & 0x00FF\n crc = CRCH << 8 | CRCL\n return crc\n\ndef wait_for_packet(ser) -> dict:\n \"\"\"Wait for a packet\"\"\"\n #logthread('Global.wait_for_packet')\n\n packet = {'len':0, 'cmd':0, 'data':0, 'crc':1, 'timeout':1}\n\n n = ser.read(2) # get the length bytes\n if(len(n) < 2):\n return packet\n\n packet['len'] = int.from_bytes(n, byteorder='big', signed=False)\n\n if(packet['len'] == 0): # Check for an empty packet\n return packet\n\n payload = ser.read(packet['len']) #read bytes (or timeout)\n\n if(len(payload) != packet['len']):\n return packet\n\n packet['timeout'] = 0 # all bytes received, so timeout is not true\n packet['cmd'] = payload[0] # cmd is the first byte of the payload\n packet['data'] = payload[1:packet['len']-2] # the data is the part of the payload that is not cmd or crc\n packet['crc'] = get_crc16(payload) # performing the crc on the whole payload should return 0\n\n return packet\n\n\ndef send_packet(ser, cmd, data) -> None:\n \"\"\"Send a packet\"\"\"\n #logthread('Global.send_packet')\n data = bytearray(data)\n num_bytes = 3 + len(data)\n payload = bytearray(cmd.to_bytes(1,'big'))\n payload.extend(data)\n crc = get_crc16(payload)\n payload.extend(bytearray(crc.to_bytes(2,'big')))\n\n ser.write(num_bytes.to_bytes(2,'big'))\n ser.write(bytes(payload))\n\n# ///// END of code taken from artemis_svl.py\n\n# noinspection PyArgumentList\n\nclass MainWindow(QMainWindow):\n \"\"\"Main Window\"\"\"\n def __init__(self, parent: QMainWindow = None) -> None:\n super().__init__(parent)\n\n self.ser = None\n self.isSerialSettingChanged = False\n self.setupUi()\n self.showDatetime()\n\n ## Create uploader object and thread\n #self.uploader = Uploader()\n #self.upload_thread = QThread()\n #self.uploader.moveToThread(self.upload_thread)\n #self.uploader.addMessage[str].connect(self.addMessage)\n #self.uploader.addMessageRemote[str].connect(self.addMessageRemote)\n #self.upload_thread.started.connect(self.uploader.upload_main)\n #self.uploader.finished.connect(self.done)\n\n logthread('mainwin.__init__')\n \n def setupUi(self):\n # File location line edit\n msg_label = QLabel(self.tr('Firmware File:'))\n self.fileLocation_lineedit = QLineEdit()\n msg_label.setBuddy(self.fileLocation_lineedit)\n self.fileLocation_lineedit.setEnabled(False)\n self.fileLocation_lineedit.returnPressed.connect(\n self.on_browse_btn_pressed)\n\n # Browse for new file button\n self.browse_btn = QPushButton(self.tr('Browse'))\n self.browse_btn.setEnabled(True)\n self.browse_btn.pressed.connect(self.on_browse_btn_pressed)\n\n # Port Combobox\n self.isCOMPortsUpdated = False\n port_label = QLabel(self.tr('COM Port:'))\n self.port_combobox = QComboBox()\n port_label.setBuddy(self.port_combobox)\n self.port_combobox.currentIndexChanged.connect(self.on_combobox_changed)\n self.update_com_ports()\n\n # Refresh Button\n self.refresh_btn = QPushButton(self.tr('Refresh'))\n self.refresh_btn.pressed.connect(self.on_refresh_btn_pressed)\n\n # Clear Button\n self.clear_btn = QPushButton(self.tr('Clear Log'))\n self.clear_btn.pressed.connect(self.on_clear_btn_pressed)\n\n # Baudrate Combobox\n self.isBaudUpdated = False\n baud_label = QLabel(self.tr('Baud Rate:'))\n self.baud_combobox = QComboBox()\n baud_label.setBuddy(self.baud_combobox)\n self.baud_combobox.currentIndexChanged.connect(self.on_combobox_changed)\n self.update_baud_rates()\n\n myFont=QFont()\n myFont.setBold(True)\n # Update Datetime Button\n self.update_dt_btn = QPushButton(self.tr(' Update Date/Time '))\n self.update_dt_btn.setFont(myFont)\n self.update_dt_btn.setFixedWidth(150)\n self.update_dt_btn.pressed.connect(self.on_update_dt_btn_pressed)\n\n # Upload Button\n self.upload_btn = QPushButton(self.tr(' Upload Firmware '))\n self.upload_btn.setFont(myFont)\n self.upload_btn.setFixedWidth(150)\n self.upload_btn.pressed.connect(self.on_upload_btn_pressed)\n\n # Connect UART Button\n self.connect_btn = QPushButton(self.tr(' Connect '))\n self.connect_btn.setFont(myFont)\n self.connect_btn.setFixedWidth(150)\n self.connect_btn.pressed.connect(self.on_connect_btn_pressed)\n\n ## Upload Bootloader Button\n #self.updateBootloader_btn = QPushButton(self.tr(' Update Bootloader '))\n #self.updateBootloader_btn.pressed.connect(self.on_update_bootloader_btn_pressed)\n\n # Messages Bar\n messages_label = QLabel(self.tr('Status / Warnings:'))\n\n # Messages Window\n self.messages = QPlainTextEdit()\n # Attempting to reduce window size\n #self.messages.setMinimumSize(1, 2)\n #self.messages.resize(1, 2)\n\n # Remote Messages Bar\n messages_label_remote = QLabel(self.tr('Remote Status / Warnings:'))\n\n # Remote Messages Window\n self.messages_remote = QPlainTextEdit()\n # Attempting to reduce window size\n #self.messages_remote.setMinimumSize(1, 2)\n #self.messages_remote.resize(1, 2)\n\n ## Menu Bar\n #menubar = self.menuBar()\n #boardMenu = menubar.addMenu('Board Type')\n \n #boardGroup = QActionGroup(self)\n\n #self.artemis = QAction('Artemis', self, checkable=True)\n #self.artemis.setStatusTip('Artemis-based boards including the OLA and AGT')\n #self.artemis.setChecked(True) # Default to artemis\n #a = boardGroup.addAction(self.artemis)\n #boardMenu.addAction(a)\n \n #self.apollo3 = QAction('Apollo3', self, checkable=True)\n #self.apollo3.setStatusTip('Apollo3 Blue development boards including the SparkFun Edge')\n #a = boardGroup.addAction(self.apollo3)\n #boardMenu.addAction(a)\n\n # Status Bar\n self.statusBar()\n\n # Arrange Layout\n layout = QGridLayout()\n \n layout.addWidget(msg_label, 1, 0)\n layout.addWidget(self.fileLocation_lineedit, 1, 1)\n layout.addWidget(self.browse_btn, 1, 2)\n\n layout.addWidget(port_label, 2, 0)\n layout.addWidget(self.port_combobox, 2, 1)\n layout.addWidget(self.refresh_btn, 2, 2)\n\n layout.addWidget(baud_label, 3, 0)\n layout.addWidget(self.baud_combobox, 3, 1)\n layout.addWidget(self.clear_btn, 3, 2)\n\n layout.addWidget(messages_label, 4, 0)\n layout.addWidget(self.messages, 5, 0, 5, 3)\n\n layout.addWidget(messages_label_remote, 15, 0)\n layout.addWidget(self.messages_remote, 16, 0, 16, 3)\n\n layout.addWidget(self.update_dt_btn, 36, 0)\n layout.addWidget(self.upload_btn, 36, 1)\n layout.addWidget(self.connect_btn, 36, 2)\n #layout.addWidget(self.updateBootloader_btn, 36, 1)\n\n widget = QWidget()\n widget.setLayout(layout)\n self.setCentralWidget(widget)\n\n #self._clean_settings() # This will delete all existing settings! Use with caution!\n \n self._load_settings()\n\n # Make the text edit window read-only\n self.messages.setReadOnly(True)\n self.messages.clear() # Clear the message window\n self.messages_remote.setReadOnly(True)\n self.messages_remote.clear() # Clear the message window\n\n def showDatetime(self):\n currentDT = datetime.datetime.now()\n self.addMessage(currentDT.strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n def addMessage(self, msg: str) -> None:\n \"\"\"Add msg to the messages window, ensuring that it is visible\"\"\"\n #logthread('mainwin.addMessage')\n self.messages.moveCursor(QTextCursor.End)\n #self.messages.ensureCursorVisible()\n self.messages.appendPlainText(msg)\n #self.messages.ensureCursorVisible()\n self.messages.repaint() # Update/refresh the message window\n \n def addMessageRemote(self, msg: str) -> None:\n \"\"\"Add msg to the remote messages window, ensuring that it is visible\"\"\"\n #logthread('mainwin.addMessageRemote')\n self.messages_remote.moveCursor(QTextCursor.End)\n #self.messages_remote.ensureCursorVisible()\n self.messages_remote.appendPlainText(msg)\n #self.messages_remote.ensureCursorVisible()\n self.messages_remote.repaint() # Update/refresh the remote message window\n\n def _load_settings(self) -> None:\n \"\"\"Load settings on startup.\"\"\"\n logthread('mainwin._load_settings')\n settings = QSettings()\n\n port_name = settings.value(SETTING_PORT_NAME)\n if port_name is not None:\n index = self.port_combobox.findData(port_name)\n if index > -1:\n self.port_combobox.setCurrentIndex(index)\n\n lastFile = settings.value(SETTING_FILE_LOCATION)\n if lastFile is not None:\n self.fileLocation_lineedit.setText(lastFile)\n\n baud = settings.value(SETTING_BAUD_RATE)\n if baud is not None:\n index = self.baud_combobox.findData(baud)\n if index > -1:\n self.baud_combobox.setCurrentIndex(index)\n\n #checked = settings.value(SETTING_ARTEMIS)\n #if checked is not None:\n # if (checked == 'True'):\n # self.artemis.setChecked(True)\n # self.apollo3.setChecked(False)\n # else:\n # self.artemis.setChecked(False)\n # self.apollo3.setChecked(True)\n\n def _save_settings(self) -> None:\n \"\"\"Save settings on shutdown.\"\"\"\n logthread('mainwin._save_settings')\n settings = QSettings()\n settings.setValue(SETTING_PORT_NAME, self.port)\n settings.setValue(SETTING_FILE_LOCATION, self.fileLocation_lineedit.text())\n settings.setValue(SETTING_BAUD_RATE, self.baudRate)\n #if (self.artemis.isChecked()): # Convert isChecked to str\n # checkedStr = 'True'\n #else:\n # checkedStr = 'False'\n #settings.setValue(SETTING_ARTEMIS, checkedStr)\n\n def _clean_settings(self) -> None:\n \"\"\"Clean (remove) all existing settings.\"\"\"\n logthread('mainwin._clean_settings')\n settings = QSettings()\n settings.clear()\n\n def show_error_message(self, msg: str) -> None:\n \"\"\"Show a Message Box with the error message.\"\"\"\n logthread('mainwin.show_error_message')\n QMessageBox.critical(self, QApplication.applicationName(), str(msg))\n\n def update_com_ports(self) -> None:\n \"\"\"Update COM Port list in GUI.\"\"\"\n logthread('mainwin.update_com_ports')\n previousPort = self.port # Record the previous port before we clear the combobox\n\n self.port_combobox.clear()\n\n index = 0\n indexOfCH340 = -1\n indexOfPrevious = -1\n for desc, name, sys in gen_serial_ports():\n longname = desc + \" (\" + name + \")\"\n self.port_combobox.addItem(longname, sys)\n if(\"CH340\" in longname):\n # Select the first available CH340\n # This is likely to only work on Windows. Linux port names are different.\n if (indexOfCH340 == -1):\n indexOfCH340 = index\n # it could be too early to call\n #self.addMessage(\"CH340 found at index \" + str(indexOfCH340))\n # as the GUI might not exist yet\n if(sys == previousPort): # Previous port still exists so record it\n indexOfPrevious = index\n index = index + 1\n\n if indexOfPrevious > -1: # Restore the previous port if it still exists\n self.port_combobox.setCurrentIndex(indexOfPrevious)\n if indexOfCH340 > -1: # If we found a CH340, let that take priority\n self.port_combobox.setCurrentIndex(indexOfCH340)\n\n def update_baud_rates(self) -> None:\n \"\"\"Update baud rate list in GUI.\"\"\"\n logthread('mainwin.update_baud_rates')\n # Lowest speed first so code defaults to that\n # if settings.value(SETTING_BAUD_RATE) is None\n self.baud_combobox.clear()\n self.baud_combobox.addItem(\"115200\", 115200)\n self.baud_combobox.addItem(\"460800\", 460800)\n self.baud_combobox.addItem(\"921600\", 921600)\n\n @property\n def port(self) -> str:\n logthread('mainwin.port')\n \"\"\"Return the current serial port.\"\"\"\n return self.port_combobox.currentData()\n\n @property\n def baudRate(self) -> str:\n logthread('mainwin.baudRate')\n \"\"\"Return the current baud rate.\"\"\"\n return self.baud_combobox.currentData()\n\n def closeEvent(self, event: QCloseEvent) -> None:\n \"\"\"Handle Close event of the Widget.\"\"\"\n logthread('mainwin.closeEvent')\n self._save_settings()\n\n event.accept()\n\n def on_combobox_changed(self):\n self.isSerialSettingChanged = True\n if ((self.ser != None) and (self.ser.is_open == True)):\n self.addMessage(\"Port \" + self.port + \" is open, going to close it\\n\")\n self.ser.close()\n self.connect_btn.setText(\"Connect\")\n\n def on_browse_btn_pressed(self) -> None:\n \"\"\"Open dialog to select bin file.\"\"\"\n logthread('mainwin.on_browse_btn_pressed')\n options = QFileDialog.Options()\n fileName, _ = QFileDialog.getOpenFileName(\n None,\n \"Select Firmware to Upload\",\n \"\",\n \"Firmware Files (*.bin);;All Files (*)\",\n options=options)\n if fileName:\n self.fileLocation_lineedit.setText(fileName)\n\n def on_refresh_btn_pressed(self) -> None:\n logthread('mainwin.on_refresh_btn_pressed')\n self.update_com_ports()\n self.addMessage(\"Ports Refreshed\\n\")\n\n def on_clear_btn_pressed(self) -> None:\n logthread('mainwin.on_refresh_btn_pressed')\n self.messages.clear()\n self.messages_remote.clear()\n self.showDatetime()\n\n def on_connect_btn_pressed(self) -> None:\n logthread('mainwin.on_connect_btn_pressed')\n if ((self.ser == None) or (self.isSerialSettingChanged == True)):\n self.addMessage(\"Updated settings of port \" + str(self.port))\n self.ser = serial.Serial(None, self.baudRate, timeout=1)\n self.ser.port = self.port\n self.isSerialSettingChanged = False\n try:\n if (self.ser.is_open == True):\n self.addMessage(\"Closed port \" + self.port)\n self.ser.close()\n self.connect_btn.setText(\"Connect\")\n else:\n # Open the serial port\n self.addMessage(\"Opened port \" + self.port + \" at \" + str(self.baudRate) + \" Baud\")\n self.ser.open()\n self.connect_btn.setText(\"Disconnect\")\n except IOError:\n self.addMessage(\"Failed to connect to port \" + str(self.port))\n\n def on_update_dt_btn_pressed(self) -> None:\n logthread('mainwin.on_update_dt_btn_pressed')\n currDT = datetime.datetime.now()\n if (self.ser != None):\n if (self.ser.is_open == True):\n self.addMessage(\"Updated remote date/time to \" + currDT.strftime(\"%Y-%m-%d %H:%M:%S\"))\n DT = str(currDT.year - 2000) + \" \" + \\\n str(currDT.month) + \" \" + \\\n str(currDT.day) + \" \" + \\\n str(currDT.hour) + \" \" + \\\n str(currDT.minute) + \" \" + \\\n str(currDT.second) + \"\\r\\n\"\n payload = bytes('DateTime ' + DT, encoding='utf8')\n self.ser.write(payload) \n #remoteReply = self.ser.read(30)\n #self.addMessage(\"Received reply \" + str(remoteReply))\n else:\n self.addMessage(\"Serial port is not open, connect a port first\")\n else:\n self.addMessage(\"Serial port is not open, connect a port first\")\n\n def on_upload_btn_pressed(self) -> None:\n \"\"\"Check if port is available\"\"\"\n logthread('mainwin.on_upload_btn_pressed')\n portAvailable = False\n for desc, name, sys in gen_serial_ports():\n if (sys == self.port):\n portAvailable = True\n if (portAvailable == False):\n self.addMessage(\"Port No Longer Available\")\n return\n\n \"\"\"Check if file exists\"\"\"\n fileExists = False\n try:\n f = open(self.fileLocation_lineedit.text())\n fileExists = True\n except IOError:\n fileExists = False\n finally:\n if (fileExists == False):\n self.addMessage(\"File Not Found\")\n return\n f.close()\n\n self.addMessage(\"\\nUploading firmware\")\n\n # Create uploader and thread\n self.uploader = Uploader(self.ser, self.fileLocation_lineedit.text())\n self.upload_thread = QThread()\n self.uploader.moveToThread(self.upload_thread)\n\n #self.uploader.set_serial_connection(self.ser)\n #self.uploader.set_file_location(self.fileLocation_lineedit.text())\n\n # Connect signals and slots\n self.uploader.addMessage[str].connect(self.addMessage)\n self.uploader.addMessageRemote[str].connect(self.addMessageRemote)\n self.upload_thread.started.connect(self.uploader.upload_main)\n self.uploader.finished.connect(self.done)\n\n # start uploader\n self.upload_thread.start()\n\n self.browse_btn.setEnabled(False)\n self.refresh_btn.setEnabled(False)\n self.clear_btn.setEnabled(False)\n self.upload_btn.setEnabled(False)\n self.update_dt_btn.setEnabled(False)\n\n def done(self):\n logthread('mainwin.done')\n\n # Enable buttons when done\n self.browse_btn.setEnabled(True)\n self.refresh_btn.setEnabled(True)\n self.clear_btn.setEnabled(True)\n self.upload_btn.setEnabled(True)\n self.update_dt_btn.setEnabled(True)\n\n self.upload_thread.quit()\n self.uploader.deleteLater()\n self.upload_thread.deleteLater()\n\nclass Uploader(QObject):\n addMessage = pyqtSignal(str)\n addMessageRemote = pyqtSignal(str)\n finished = pyqtSignal()\n\n def __init__(self, ser, fileLocation):\n #QThread.__init__(self)\n super(QObject, self).__init__()\n logthread('Uploader.__init__')\n \n self.ser = ser\n self.fileLocation = fileLocation\n self.installed_bootloader = -1 # Use this to record the bootloader version\n self.barWidthInCharacters = 50 # Width of progress bar, ie [###### % complete (NOT USED)\n\n #def __del__(self):\n # logthread('Uploader.__del__')\n # self.upload_thread.wait()\n\n def set_serial_connection(self, ser):\n self.ser = ser\n\n def set_file_location(self, fileLocation):\n self.fileLocation = fileLocation\n\n def phase_setup(self) -> bool:\n \"\"\"Setup: signal baud rate, get version, and command BL enter\"\"\"\n logthread('Uploader.phase_setup')\n upgrade_cmd = b'Upgrade'\n baud_detect_byte = b'U'\n\n vesion_pkg_received = False\n setup_failed = False\n packet_counter = 0\n\n self.addMessage.emit(\"Phase:\\tSetup\")\n\n self.ser.write(bytes(upgrade_cmd)) # send the upgrade command\n\n self.addMessage.emit(\"\\tSent upgrade_cmd\")\n time.sleep(5) # wait five seconds for ama3 to go to be bootloader mode\n\n self.ser.reset_input_buffer() # Handle the serial startup blip\n self.addMessage.emit(\"\\tCleared startup blip\")\n\n self.ser.write(baud_detect_byte) # send the baud detection character\n self.addMessage.emit(\"\\tSent baud_detect_byte\")\n\n while((not vesion_pkg_received) and (not setup_failed)):\n\n packet = wait_for_packet(self.ser)\n\n if(packet['timeout']):\n self.addMessage.emit(\"\\twait_for_packet timeout\")\n setup_failed = True\n break \n if(packet['crc']):\n self.addMessage.emit(\"\\twait_for_packet crc error\")\n setup_failed = True\n break\n \n if(packet['cmd'] == SVL_CMD_VER):\n self.addMessage.emit(\"\\twait_for_packet complete\")\n self.installed_bootloader = int.from_bytes(packet['data'], 'big')\n self.addMessage.emit(\"\\tGot SVL Bootloader Version: \" + str(self.installed_bootloader))\n self.addMessage.emit(\"\\tSending \\'enter bootloader\\' command\")\n\n vesion_pkg_received = True\n\n send_packet(self.ser, SVL_CMD_BL, b'')\n #self.addMessage.emit(\"\\tfinished send_packet\")\n break\n\n if(packet['cmd'] == SVL_CMD_MSG):\n self.addMessageRemote.emit(packet['data'].decode('ascii'))\n\n packet_counter += 1\n if(packet_counter > 10): # There should be less than 10 message packets before the version packet\n self.addMessage.emit(\"\\tNo version packet received in time\")\n setup_failed = True\n break\n \n return setup_failed\n # Now enter the bootload phase\n\n def phase_bootload(self) -> bool:\n \"\"\"Bootloader phase (Artemis is locked in)\"\"\"\n logthread('Uploader.phase_bootload')\n startTime = time.time()\n frame_size = 512*4\n\n resend_max = 4\n resend_count = 0\n\n self.addMessage.emit(\"Phase:\\tBootload\")\n\n with open(self.fileLocation, mode='rb') as binfile:\n application = binfile.read()\n total_len = len(application)\n\n total_frames = math.ceil(total_len/frame_size)\n curr_frame = 0\n progressChars = 0\n\n self.addMessage.emit(\"\\tSending \" + str(total_len) +\n \" bytes in \" + str(total_frames) + \" frames\")\n\n bl_done = False\n bl_failed = False\n done_sent = False\n\n while((not bl_done) and (not bl_failed)):\n\n packet = wait_for_packet(self.ser) # wait for indication by Artemis\n\n #print(packet)\n\n if( packet['cmd'] == SVL_CMD_MSG ):\n self.addMessageRemote.emit(packet['data'].decode('ascii'))\n elif( packet['cmd'] == SVL_CMD_DONE ):\n bl_done = True\n break\n elif( not done_sent ):\n if((packet['timeout'] or packet['crc'])):\n self.addMessage.emit(\"\\tError receiving packet\")\n bl_failed = True\n bl_done = True\n break\n\n if( packet['cmd'] == SVL_CMD_NEXT ):\n self.addMessage.emit(\"\\tGot frame request\")\n curr_frame += 1\n resend_count = 0\n elif( packet['cmd'] == SVL_CMD_RETRY ):\n self.addMessage.emit(\"\\tRetrying...\")\n resend_count += 1\n if( resend_count >= resend_max ):\n bl_failed = True\n bl_done = True\n break\n else:\n self.addMessage.emit(\"\\tUnknown error\")\n bl_failed = True\n bl_done = True\n break\n\n if( curr_frame <= total_frames ):\n frame_data = application[((curr_frame-1)*frame_size):((curr_frame-1+1)*frame_size)]\n self.addMessage.emit(\"\\tSending frame #\" + str(curr_frame) + \", length: \" + str(len(frame_data)))\n send_packet(self.ser, SVL_CMD_FRAME, frame_data)\n else:\n send_packet(self.ser, SVL_CMD_DONE, b'')\n done_sent = True\n\n if( bl_failed == False ):\n self.addMessage.emit(\"Upload complete!\")\n endTime = time.time()\n bps = total_len / (endTime - startTime)\n self.addMessage.emit(\"Nominal bootload \" + str(round(bps, 2)) + \" bytes/sec\\n\")\n else:\n self.addMessage.emit(\"Upload failed!\\n\")\n if (self.baudRate > 115200):\n self.addMessage.emit(\"Please try a slower Baud Rate\\n\")\n\n return bl_failed\n\n @pyqtSlot()\n def upload_main(self) -> None:\n \"\"\"SparkFun Variable Loader (Variable baud rate bootloader for Artemis Apollo3 modules)\"\"\"\n logthread('Uploader.upload_main')\n try:\n num_tries = 3\n\n #self.messages.clear() # Clear the message window\n\n self.addMessage.emit(\"\\nLocaSafe UT221 SVL Uploader\\n\")\n\n for _ in range(num_tries):\n\n bl_failed = False\n\n bl_failed = self.phase_setup() # Perform baud rate negotiation\n if( bl_failed == False ):\n bl_failed = self.phase_bootload() # Bootload\n\n if( bl_failed == False ):\n break\n if ((self.installed_bootloader >= 0) and (self.installed_bootloader < BOOTLOADER_VERSION)):\n self.addMessage.emit(\"\\nYour bootloader is out of date.\\nPlease click Update Bootloader.\")\n\n except:\n self.addMessage.emit(\"Could not communicate with board!\")\n\n #try:\n # self.ser.close()\n #except:\n # self.addMessage.emit(\"Failed to close serial port!\")\n # pass\n\n self.finished.emit()\n\n # ///// END of code taken from artemis_svl.py\n\nif __name__ == '__main__':\n import sys\n app = QApplication([sys.argv])\n app.setOrganizationName('Locatechs')\n app.setApplicationName('LocaSafe UT221 Firmware Uploader ' + guiVersion)\n app.setWindowIcon(QIcon(resource_path(\"Artemis-Logo-Rounded.png\")))\n w = MainWindow()\n w.show()\n sys.exit(app.exec_())\n","sub_path":"tools/artemis_firmware_uploader_gui.py","file_name":"artemis_firmware_uploader_gui.py","file_ext":"py","file_size_in_byte":34288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"70403185","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport gwf\n\nimport zmq\nimport json\nimport logging\nlog = logging.getLogger('gwf.server')\n\n\nclass server:\n ''' The world server core runs a world instance supplied at initialization\n and accomplishes the following needs:\n - fair server: it's possible to let every attached client do the same\n amount of instructions in the same time\n - ensures proper instruction scheduling\n - handles communication with clients and world\n - exceptions:\n - json\n - steps\n '''\n def __init__(self, world_instance,\n player_endpoint: str,\n supervisor_rep_endpoint: str,\n supervisor_pub_endpoint: str):\n self._world = world_instance\n self._zmq_context = zmq.Context()\n self._player_endpoint = player_endpoint\n self._supervisor_rep_endpoint = supervisor_rep_endpoint\n self._supervisor_pub_endpoint = supervisor_pub_endpoint\n\n self._client_socket = None\n self._supervisor_rep_socket = None\n self._supervisor_pub_socket = None\n\n self._clients = {}\n\n self._world_step_count = 10\n self._wait_for_all = False\n self._max_player_count = -1\n self._initialized = False\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n return\n\n def _supervisor_io(self) -> None:\n try:\n _msgs = self._supervisor_rep_socket.recv_json(flags=zmq.NOBLOCK)\n except zmq.error.Again:\n return\n\n if not 'cmd' in _msgs:\n raise gwf.error.request_malformed('\"cmd\" keyword missing')\n\n _result = {'step': self._world.step()}\n if _msgs['cmd'] == 'hello':\n _result.update(\n {'server_version': gwf.version_info,\n 'pub_endpoint': self._supervisor_pub_endpoint})\n else:\n _result.update(self._world.supervisor_request(_msgs))\n self._supervisor_rep_socket.send_json(_result)\n\n def _client_io(self, step: int) -> None:\n _count = 0\n for _ in range(10):\n if not self._client_recv(self._client_socket):\n break\n _count += 1\n log.debug(\"recieved %d requests\", _count)\n\n for _name, _client in self._clients.items():\n if not _client.has_pending_request():\n continue\n if _client.is_in_turn(step, self._world_step_count):\n _rep_msg = self._world.client_request_reply(\n _client.uid(), _client.pop_request(step))\n self._client_socket.send_multipart(\n (_name, b'', json.dumps(_rep_msg).encode()))\n\n def _client_recv(self, socket) -> bool:\n try:\n _msgs = socket.recv_multipart(flags=zmq.NOBLOCK)\n except zmq.error.Again:\n return False\n\n _name, _, _req = _msgs\n if not _name in self._clients:\n self._clients[_name] = client_stub(uid=self._world.register_client())\n log.info('new client: %s', ' '.join(\"{:02x}\".format(b) for b in _name))\n self._clients[_name].push_request(json.loads(_req.decode()))\n return True\n\n def _supervisor_publish(self, message):\n self._supervisor_pub_socket.send_json(message)\n\n def initialize(self):\n if self._initialized:\n return\n\n try:\n self._client_socket = self._zmq_context.socket(zmq.ROUTER)\n self._client_socket.bind(self._player_endpoint)\n\n self._supervisor_rep_socket = self._zmq_context.socket(zmq.REP)\n self._supervisor_rep_socket.bind(self._supervisor_rep_endpoint)\n\n self._supervisor_pub_socket = self._zmq_context.socket(zmq.PUB)\n self._supervisor_pub_socket.bind(self._supervisor_pub_endpoint)\n\n except zmq.error.ZMQError as ex:\n # todo: research re-raise\n # todo: show process\n raise gwf.error.address_in_use()\n\n self._initialized = True\n\n def run(self):\n log.info('run')\n\n self.initialize()\n\n while True:\n _step = self._world.step()\n log.debug('step: %d', _step)\n self._supervisor_io()\n self._client_io(_step)\n self._supervisor_publish({'step': _step})\n\n def shutdown(self):\n log.info('server shutdown')\n return\n\n\nclass client_stub:\n def __init__(self, uid: int) -> None:\n self._timestamp = None\n self._request = None\n self._uid = uid\n\n def uid(self) -> int:\n return self._uid\n\n def push_request(self, request: dict) -> None:\n self._request = request\n\n def pop_request(self, timestamp: int):\n self._timestamp = timestamp\n _swap, self._request = self._request, None\n return _swap\n\n def has_pending_request(self) -> bool:\n return self._request is not None\n\n def is_in_turn(self, now: int, duration: int) -> bool:\n return self._timestamp is None or self._timestamp + duration <= now\n","sub_path":"gwf/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"313377629","text":"# INTERROMPENDO O WHILE\n\n\n#1 \n# cont = 1 \n\n# while cont <= 10:\n# print(cont, '-> ', end='')\n# cont += 1\n# print('Acabou')\n\n#2 \n# n = cont = 0\n# while cont < 3 : \n# n = int(input('Leia um número: '))\n# cont += 1\n\n#3 Break\n\nn = s= 0\nwhile True:\n n = int(input( 'Digite um número: '))\n if n == 999:\n break\n s += n\nprint('A soma vale {}'.format(s))\nprint(f'A soma vale {s}')","sub_path":"mundo2/aula015.py","file_name":"aula015.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"282855744","text":"# read training, validation, test data\nimport pickle\n\ntrain_file = \"./traffic-sign/train.p\"\nvalidation_file = \"./traffic-sign/valid.p\"\ntest_file = \"./traffic-sign/test.p\"\n\nwith open(train_file, \"rb\") as f:\n train = pickle.load(f)\nwith open(validation_file, \"rb\") as f:\n valid = pickle.load(f)\nwith open(test_file, \"rb\") as f:\n test = pickle.load(f)\n\nX_train, y_train = train[\"features\"], train[\"labels\"]\nX_valid, y_valid = valid[\"features\"], valid[\"labels\"]\nX_test, y_test = test[\"features\"], test[\"labels\"]\n\nn_train = len(X_train)\nn_test = len(X_test)\nn_valid = len(X_valid)\n\nn_classes = len(set(y_train))\n\n# display number of training, validation, test examples, and unique classes\nprint(\"Number of training examples = {}\".format(n_train))\nprint(\"Number of validation examples = {}\".format(n_valid))\nprint(\"Number of test examples = {}\".format(n_test))\nprint(\"Number of classes = {}\".format(n_classes))\n\n# Data exploration visualization code goes here\nimport random\nimport matplotlib.pyplot as plt\n\nimport csv\nsignnames = {}\nwith open(\"./signnames.csv\", 'r') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n k, v = row\n signnames[k] = v\n\n'''\nindex = random.randint(0, n_train)\nimage = X_train[index]\nplt.figure(figsize=(1,1))\nplt.title(signnames[str(y_train[index])])\nplt.imshow(image)\nplt.show()\n'''\nimport numpy as np\n# Normalize data\nX_train = np.subtract(np.divide(X_train, 127), 1)\nX_valid = np.subtract(np.divide(X_valid, 127), 1)\nX_test = np.subtract(np.divide(X_test, 127), 1)\n\nfrom helper import *\nfrom sklearn.utils import shuffle\n\nEPOCHS = 40\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n print(\"start training ...\")\n print()\n num_examples = len(X_train)\n for i in range(EPOCHS):\n X_train, y_train = shuffle(X_train, y_train)\n for offset in range(0, num_examples, BATCH_SIZE):\n end = offset + BATCH_SIZE\n batch_x, batch_y = X_train[offset:end], y_train[offset:end]\n sess.run(training_operation, feed_dict={x:batch_x, y:batch_y, keep_prob:0.5})\n\n validation_loss, validation_acc = evaluate(X_valid, y_valid) \n print(\"EPOCH {} ...\".format(i + 1))\n print(\"Validation Loss = {:.3f}\".format(validation_loss))\n print(\"Validation Acc = {:.3f}\".format(validation_acc))\n print()\n\n try:\n saver\n except NameError:\n saver = tf.train.Saver()\n save_path = saver.save(sess, saved_file)\n print (\"Model saved at {}\".format(save_path))\n\nwith tf.Session() as sess:\n saver = tf.train.Saver()\n saver.restore(sess, saved_file)\n\n test_loss, test_acc = evaluate(X_test, y_test)\n print(\"Train Loss = {:.3f}\".format(test_loss))\n print(\"Test Acc = {:.3f}\".format(test_acc))\n\n","sub_path":"Traffic_Sign_Classifier.py","file_name":"Traffic_Sign_Classifier.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"95828135","text":"\"\"\"\n\nA file that creates a structured dataset\n data\n |\n |--- train\n |\n |--- test\n |\n |--- validation\n\n\"\"\"\n\nimport os\n\ndef createDatadir(path, parentdir, subdirs):\n\n os.chdir(path)\n print(\"The current working directory is {}\".format(path))\n #os.chdir(path)\n check_dir = os.path.isdir('./data')\n subfolders = []\n\n if check_dir == False:\n print(\"There is no data dir,\\ncreating the directories now\")\n parent_dir = os.makedirs('data/')\n parent_path = os.chdir('./data')\n #subfolders = []\n if subdirs == 2:\n subfolders.extend(('train', 'test'))\n elif subdirs == 3:\n subfolders.extend(('train', 'test', 'validation'))\n else:\n print(\"Set a valid number of directories\\n2 for train and test set and\\n3 for train, test and validation sets\")\n for subfolder in subfolders:\n if not os.path.exists(subfolder):\n os.makedirs(subfolder)\n created_folders = os.listdir()\n print(\"The subfolders {} have been created\".format(created_folders))\n else:\n print(\"Data directory exists\")\n\n\n\nif __name__ == '__main__':\n path = input(\"Enter the directory path: \")\n parentdir = str(input(\"What is the name of your parent directory? \"))\n subdirs = int(input(\"How many subdirectories need to create? \"))\n createDatadir(path, parentdir, subdirs)\n","sub_path":"work_with_files/create_file_dir.py","file_name":"create_file_dir.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"615233953","text":"#!/bin/python3\n\nmyfileObj = open(\"myfile.txt\", \"w+\") # r, w , a \n#l = [1,2,3,4]\n#myfileObj.write(\"Hi This is the content in my file.\\n\")\n#myfileObj.write(\"Adding some more things in file \\n\")\n\nfor i in range(5):\n\tmyfileObj.write(\" My line number is \" + str(myfileObj.tell()) + \"\\n\")\n\nmyfileObj.close()\n\n#myfileObj = open(\"myfile.txt\", \"r\") # r, w , a \nmyfileObj = open(\"myfile.txt\", \"r\") # r, w , a \n\nmyfileObj.seek(5)\nline1 = myfileObj.read(100)\n\nprint(line1)\n#print(line1, str(myfileObj.tell()))\nmyfileObj.close()\n\n#myfileObj.tell()\n#print(help(myfileObj.read))\n#content = myfileObj.readlines(70)\n#for i in content:\n#\tprint(i)\n\n#print( myfileObj.tell())\n#\n#line2 = myfileObj.readline()\n#print(line2)\n#print( myfileObj.tell())\n#\n#line3 = myfileObj.readline()\n#print(line3)\n#print( myfileObj.tell())\n#myfileObj.close()\n","sub_path":"pythonscripts/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"527494783","text":"import urllib.parse\nfrom urllib.parse import parse_qs, urlparse\nfrom typing import Dict, Any, Match, Tuple\n\nfrom markdown import Markdown\nfrom markdown.extensions import Extension\nfrom markdown.inlinepatterns import InlineProcessor\nfrom markdown.util import etree\n\n\nclass EmbedInlineProcessor(InlineProcessor):\n # Don't worry about re.compiling this, markdown.inlinepatterns.Pattern.__init__ does that for us\n EMBED_RE = r'\\[\\[(?P.+?)\\]\\]'\n\n # Prefixes of the iframe src attributes we generate\n YOUTUBE_SRC_PREFIX = '//www.youtube-nocookie.com/embed/'\n IFRAME_SRC_PREFIXES = [YOUTUBE_SRC_PREFIX]\n\n # Other iframe attributes we generate\n IFRAME_ATTRIBS = ['width', 'height', 'frameborder', 'allowfullscreen']\n\n def __init__(self, md: Markdown, configs: Dict[str, Any]) -> None:\n super().__init__(self.EMBED_RE, md)\n self.config = configs\n\n def handleMatch(self, m: Match[str], data: str) -> Tuple[etree.Element, int, int]: # type: ignore[override]\n d = m.groupdict()\n url = d.get('url')\n if not url:\n el = etree.Element('span')\n el.text = \"[[]]\"\n return el, m.start(0), m.end(0)\n try:\n link = urlparse(url)\n host = link.hostname\n except:\n el = etree.Element('span')\n el.text = \"[[\" + url + \"]]\"\n return el, m.start(0), m.end(0)\n el = None\n try:\n if host == 'youtube.com' or host == 'www.youtube.com' or host == 'youtu.be':\n el = self._embed_youtube(self._get_youtube_id(link))\n except:\n pass\n if el is None:\n el = etree.Element('span')\n el.text = \"[[\" + url + \"]]\"\n return el, m.start(0), m.end(0)\n\n def _get_youtube_id(self, link: urllib.parse.ParseResult) -> str:\n return (link.path if link.netloc == 'youtu.be'\n else parse_qs(link.query)['v'][0])\n\n def _embed_youtube(self, vid_id: str) -> etree.Element:\n el = etree.Element('iframe')\n el.set('width', '100%')\n el.set('height', '600')\n el.set('frameborder', '0')\n el.set('allowfullscreen', '')\n el.set('src', self.YOUTUBE_SRC_PREFIX + vid_id + '?rel=0')\n return el\n\n\nclass KerbDown(Extension):\n def __init__(self, **kwargs: str) -> None:\n super().__init__(**kwargs) # type: ignore[arg-type]\n self.config: Dict[str, Any] = {}\n\n # noinspection PyMethodOverriding\n def extendMarkdown(self, md: Markdown) -> None:\n # BUG: the base method signature is INVALID, it's a bug in flask-markdown\n md.inlinePatterns.register(EmbedInlineProcessor(md, self.config), 'embed', 200)\n md.registerExtension(self)\n","sub_path":"KerbalStuff/kerbdown.py","file_name":"kerbdown.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"538150977","text":"import serial\n\ndef distance(serial_inst_inp):\n\toutput = [0]*3\n\tn = 0\n\tdata0 = serial_inst_inp.readline()\n\tAddrs = data0[0]\n\tfor i in range(2,len(data0)):\n\t\tif data0[i]==32:\n\t\t\tn = i-1\n\t\t\tbreak\n\n\tdata = 0\n\tfor j in range(2,n):\n\t\tdata = data + ( (data0[j] - 48)*(10**(n-j)) )\n\n\tdata = data + (data0[n] - 48)\n\n\toutput[0] = Addrs\n\toutput[1] = data\n\n\treturn(output)\n","sub_path":"Files_On_RPi/coop_con_loc_nano/src/coop_con_loc/src/distance.py","file_name":"distance.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"417534750","text":"import arcgisscripting\nimport gc\nimport zipfile\nimport os\nimport datetime\nimport binascii\n\ndef timeticks():\n try:\n return str(datetime.datetime.now()).replace(\"-\",\"\").replace(\":\",\"\").replace(\".\",\"\").replace(\" \",\"\")[:14]\n except Exception:\n raise\n\ntry: \n gp = arcgisscripting.create(9.3)\n\n gp.AddMessage(\"Iniciado\")\n\n ids = gp.GetParameterAsText(0)\n table = gp.GetParameterAsText(1) \n\n fileName = timeticks()\n \n TEMP_GEOM = \"in_memory\\\\\" + fileName\n\n path = \"D:/arcgisserver/toolboxes/GetShape/\"\n\n gp.AddMessage(\"Abrindo workspace\")\n \n gp.Workspace = path + \"IDAFGEO.sde\"\n\n gp.MakeFeatureLayer_management(\"IDAFGEO.\" + table,TEMP_GEOM, \"ID IN (\" + ids + \")\")\n\n gp.AddMessage(\"Gerando centroids\")\n\n rows = gp.SearchCursor(TEMP_GEOM)\n\n row = rows.next()\n\n returnCentroid = \"[\"\n\n while row:\n returnCentroid += \"{\"+str(row.geometry.Centroid).split()[0].replace(\",\",\".\") + \",\" + str(row.geometry.Centroid).split()[1].replace(\",\",\".\")+\"}\"\n row = rows.next()\n if row != None:\n returnCentroid += \",\"\n\n returnCentroid += \"]\"\n\n gp.FeatureClassToShapefile(TEMP_GEOM, path)\n\n gp.AddMessage(\"Criando zip\")\n \n zf = zipfile.ZipFile(path + fileName + '.zip', mode='w')\n\n gp.AddMessage(\"Adicionando arquivos\")\n \n zf.write(path + fileName + '.shp', fileName + '.shp')\n zf.write(path + fileName + '.dbf', fileName + '.dbf')\n zf.write(path + fileName + '.prj', fileName + '.prj')\n zf.write(path + fileName + '.sbn', fileName + '.sbn')\n zf.write(path + fileName + '.shp.xml', fileName + '.shp.xml')\n zf.write(path + fileName + '.sbx', fileName + '.sbx')\n zf.write(path + fileName + '.shx', fileName + '.shx')\n zf.close()\n\n os.remove(path + fileName + \".shp\")\n os.remove(path + fileName + \".dbf\")\n os.remove(path + fileName + \".prj\")\n os.remove(path + fileName + \".sbn\")\n os.remove(path + fileName + \".shp.xml\")\n os.remove(path + fileName + \".sbx\")\n os.remove(path + fileName + \".shx\")\n\n in_file = open(path + fileName + \".zip\", \"rb\")\n data = in_file.read()\n in_file.close()\n\n os.remove(path + fileName + \".zip\")\n \n gp.SetParameterAsText(2, binascii.b2a_base64(data))\n gp.SetParameterAsText(3, returnCentroid)\n \n \nexcept Exception:\n raise\nfinally:\n del TEMP_GEOM\n gc.collect()\n \n","sub_path":"Código Fonte/VERSAO-ATUAL/src/ArcGIS/Python/GetShape/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"574541685","text":"\nimport argparse\nimport numpy as np\nimport cv2\nimport os\nimport time\n\nimport serve\nimport json\n\nimport edge_detect\nimport face_landmark_detect\n\n\ndef inference_cam(input_model, output_dir, cam_id, cam_width, cam_height, perf_mode, *args):\n\n\n #set CUDA_VISIBLE_DEVICES\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n print(\"\\nSet CUDA_VISIBLE_DEVICES = 1\\n\")\n\n\n\n ##### get res from input model dir\n options_s = {\"scale_size\"}\n with open(os.path.join(input_model, \"options.json\")) as f:\n for key, val in json.loads(f.read()).items():\n if key in options_s:\n #print(\"loaded\", key, \"=\", val)\n img_res = int(val)\n print(\"\\nModel Resolution retrieved from Exported Model Data and set to:\", img_res, \"px\\n\")\n\n\n #cam capture \n cam_cap = cv2.VideoCapture(cam_id)\n #set res\n cam_cap.set(3, cam_width);\n cam_cap.set(4, cam_height);\n\n #cam_cap.set(14, 10)\n\n\n\n out__ = os.path.join(os.path.dirname(__file__), '_temp_imgs/temp_out.jpg')\n input_image_queue, output_image_queue, live_process, lifetime_end = serve.process_handler(input_model, out__)\n\n\n #vars\n _last = None\n n = 1\n use_preproc = 0\n preproc_mode = 0\n use_zoom = 0\n use_inf = 0\n\n\n\n #resize\n if (img_res == 1024) and (perf_mode == 1):\n target_size = int(img_res / 2)\n else:\n target_size = img_res\n\n\n\n #create bg image first outside loop\n bg_img = face_landmark_detect.create_bg(target_size)\n\n\n while (True):\n #print (True)\n ret, frame = cam_cap.read()\n\n\n frame_rec = frame.shape\n frame_height = frame_rec[0]\n frame_width = frame_rec[1]\n #print (frame_width, frame_height, target_size)\n\n\n if use_zoom == 0:\n\n frame_scale = target_size / frame_height\n frame_width_s = int(frame_width * frame_scale)\n width_offset = int((frame_width_s - target_size) / 2)\n\n #rescale\n frame = cv2.resize(frame, (frame_width_s, target_size))\n #crop square\n frame = frame[0:target_size, width_offset:(frame_width_s-width_offset)]\n #post fix scale\n frame = cv2.resize(frame, (target_size, target_size))\n #print (frame.shape)\n\n else:\n\n width_offset = int((frame_width - target_size) / 2)\n height_offset = int((frame_height - target_size) / 2)\n\n #crop square\n frame = frame[height_offset:(frame_height-height_offset), width_offset:(frame_width-width_offset)]\n #print (frame.shape)\n #post fix scale\n frame = cv2.resize(frame, (target_size, target_size))\n #print (frame.shape)\n\n\n\n\n \n\n #image pre processing, filters\n if use_preproc:\n if preproc_mode == 0:\n frame = face_landmark_detect.face_landmark_detect(frame, bg_img, target_size, 0)\n\n else:\n frame = edge_detect.edge_detect_filter(frame)\n \n \n\n #post upscale to match target res\n if (img_res == 1024) and (perf_mode == 1):\n frame = cv2.resize(frame, (img_res, img_res))\n\n\n\n #inference\n\n #write temp file\n temp = os.path.join(os.path.dirname(__file__), '_temp_imgs/temp.jpg')\n cv2.imwrite(temp, frame)\n\n\n #check if prediction process can receive img, if empty\n if input_image_queue.empty():\n #if yes add temp img \n input_image_queue.put(temp)\n\n #sleep for a while \n #time.sleep(0.1)\n\n #read predict output from process tunnel\n if not output_image_queue.empty():\n #get signal\n pred_img = output_image_queue.get()\n #save img\n _last = cv2.imread(pred_img, 1)\n \n #check if numpy \n if isinstance(_last, np.ndarray):\n #overwrite frame\n \n #check if use inference\n if(use_inf):\n frame = _last\n \n\n #display\n cv2.imshow('q Close, s Save, d Prepocess, m P. Mode, p Perf. Mode, f Inference, z Zoom', frame)\n key = cv2.waitKey(1) & 0xFF\n\n\n\n #write image function\n if key == ord('s'):\n\n image_file = os.path.join(output_dir, \"img_{0}.png\".format(str(n).zfill(4)))\n cv2.imwrite(image_file, frame)\n\n print(\"Snapshot created:\", image_file, \"\\n\")\n\n #increase img counter\n n += 1\n\n\n #use image preprocesing\n if key == ord('d'):\n if(use_preproc):\n use_preproc = 0\n else:\n use_preproc = 1\n\n #toggle image preprocesing mode\n if key == ord('m'):\n if(preproc_mode):\n preproc_mode = 0\n else:\n preproc_mode = 1\n\n\n #toggle inferenceing\n if key == ord('f'):\n if(use_inf):\n use_inf = 0\n else:\n use_inf = 1 \n\n #toggle inferenceing\n if key == ord('z'):\n if(use_zoom):\n use_zoom = 0\n else:\n use_zoom = 1 \n\n\n #quit process\n if key == ord('q'):\n #set value to break BG while loop\n lifetime_end.value = True\n\n for process in live_process:\n #kill process in BG\n process.join()\n\n #break cv\n break\n\n\n #close video capture\n cam_cap.release()\n\n\n\n\n#arg parser\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Pix2Pix Inference Module')\n\n parser.add_argument('-m', '--input_model',\n dest='input_model',\n help='Input Model Dir',\n required=True)\n parser.add_argument('-o', '--output_dir',\n dest='output_dir',\n help='Image Output Dir',\n required=False)\n parser.add_argument('-c', '--cam_id',\n dest='cam_id',\n help='Webcam ID',\n required=False,\n type=int,\n default=0)\n parser.add_argument('-x', '--cam_width',\n dest='cam_width',\n help='Webcam Px Width',\n required=False,\n type=int,\n default=960)\n parser.add_argument('-y', '--cam_height',\n dest='cam_height',\n help='Webcam Px Height',\n required=False,\n type=int,\n default=720)\n parser.add_argument('-p', '--perf_mode', #zoom doesn't work in non_perf_mode yet\n dest='perf_mode',\n help='1024Px Performance Mode',\n required=False,\n type=int,\n default=1)\n\n \n results = parser.parse_args()\n \n\n #call function\n inference_cam(results.input_model, results.output_dir, results.cam_id, results.cam_width, results.cam_height, results.perf_mode)","sub_path":"inference_webcam.py","file_name":"inference_webcam.py","file_ext":"py","file_size_in_byte":7179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"568892350","text":"import time\nfrom keras.preprocessing.image import load_img, img_to_array\nfrom mrcnn.config import Config\nfrom mrcnn.model import MaskRCNN\nfrom mrcnn.visualize import display_instances\n\nclass_names = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'CN', 'DT', '/']\n\n\n# define the test configuration\nclass CardConfig(Config):\n NAME = 'number'\n\n IMAGES_PER_GPU = 1\n\n NUM_CLASSES = 1 + 13\n\n STEPS_PER_EPOCH = 100\n\n # Skip detections with < 90% confidence\n DETECTION_MIN_CONFIDENCE = 0.1\n\n\ndef main(path_to_weight):\n ###################################################################################\n # load this on boot\n config = CardConfig()\n # path_to_weight = 'weights/weights.h5'\n path_to_image = 'dataset/marlvinmbinga/test4.jpeg'\n rcnn = MaskRCNN(mode='inference', model_dir='./load_weights', config=config)\n rcnn.load_weights(path_to_weight, by_name=True)\n ####################################################################################\n\n ####################################################################################\n\n # When hit the endpoint does this\n img = img_to_array(load_img(path_to_image))\n results = rcnn.detect([img], verbose=1)\n r = results[0]\n ####################################################################################\n\n width = img.shape[1]\n xyz = zip(r['class_ids'], [list(i) for i in r['rois']], r['scores'])\n sortedXYZ = [list(i) for i in sorted(xyz, key=lambda item: item[1][1])]\n if 11 in r['class_ids']:\n CN_COD = r['rois'][list(r['class_ids']).index(11)]\n CN_COD[0] -= 20\n CN_COD[1] = 10\n CN_COD[2] += 15\n CN_COD[3] = width - 10\n res = filter(lambda x: True if CN_COD[0] < x[1][0] < CN_COD[2] else False, sortedXYZ)\n getIndexOf(res, CN_COD)\n # To see what the model is seeing comment the line below\n display_instances(img, r['rois'], r['masks'], r['class_ids'], class_names, r['scores'])\n\n\ndef getIndexOf(sortedList, CN):\n print('Card Number')\n valid = [str(i) for i in range(0, 11)]\n cardNumber = ''\n prevDigit = [11, CN, 0]\n for item in sortedList:\n if str(item[0]) not in valid:\n continue\n if CN[0] < item[1][0] and item[1][2] < CN[2] and item[1][3] - item[1][1] < 500:\n overlap = lambda x, y: False if (y[1][1] - x[1][1]) / (x[1][3] - x[1][1]) > 0.7 or x[0] == 11 or y[\n 0] == 11 else True\n if prevDigit is not None:\n if overlap(prevDigit, item):\n resolve = lambda digit1, digit2: digit1 if digit1[-1] > digit2[-1] else digit2\n candidate = resolve(prevDigit, item)\n if prevDigit[0] == 5 and item[0] == 6 or prevDigit[0] == 6 and item[0] == 5:\n candidate = prevDigit if prevDigit[0] == 5 else item\n class_id = candidate[0]\n cardNumber = cardNumber[:len(cardNumber) - 1]\n prevDigit = candidate\n print('overlap >> ', candidate)\n else:\n class_id = item[0]\n prevDigit = item\n if class_id == 10:\n class_id = 0\n cardNumber += str(class_id)\n\n chunks = [cardNumber[i:i + 4] for i in range(0, len(cardNumber), 4)]\n try:\n print(f'{chunks[0]} {chunks[1]} {chunks[2]} {chunks[3]}')\n except IndexError:\n pass\n print(cardNumber)\n return cardNumber\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"IntelliVision-main/api/detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":3532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"220685957","text":"import os\nimport dotenv\n\n\nclass Config:\n BASE_DIR = os.path.dirname(os.path.dirname(__file__))\n dotenv_file = os.path.join(BASE_DIR, \".env\")\n if os.path.isfile(dotenv_file):\n dotenv.load_dotenv(dotenv_file)\n\n SECRET_KEY = os.environ['SECRET_KEY']\n SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']\n MAIL_SERVER = os.environ['MAIL_SERVER']\n MAIL_PORT = os.environ['MAIL_PORT']\n MAIL_USE_TLS = os.environ['MAIL_USE_TLS']\n MAIL_USERNAME = os.environ['MAIL_USERNAME']","sub_path":"flaskblog/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"605832030","text":"# coding=utf8\n# -*- coding: utf8 -*-\n\nfrom __future__ import absolute_import\nimport random, string\nimport os\nfrom datetime import date\nfrom optparse import make_option\n\nfrom django.core.management.base import BaseCommand\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Group\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.text import slugify\nfrom django.core.files import File\nfrom django.conf import settings\n\nfrom places.models import Place\n\nUser = get_user_model()\n\n\nclass Command(BaseCommand):\n\n USERS = [\n # name, phone number, is active, can order\n (u\"Mateusz\", \"\", True, False),\n (u\"Dariusz\", \"\", True, False),\n (u\"Sebastian\", \"\", True, False),\n (u\"Tomasz\", \"\", True, False),\n (u\"Adam\", \"\", True, False),\n (u\"Mariusz\", \"\", True, False),\n (u\"Marzena\", \"\", True, True),\n (u\"Krystian\", \"\", True, False),\n (u\"Rafał\", \"\", True, False),\n (u\"Łukasz T\", \"\", True, False),\n (u\"Maciej\", \"\", True, False),\n (u\"Witold\", \"\", True, False),\n (u\"Łukasz K\", \"\", True, False)\n ]\n\n PLACES = [\n (u\"Pierogarnia Kubiel\", \"https://pyszne.pl/pierogarnia-kubiel-wroclaw\"),\n (u\"Nudle i Sałaty\", \"http://nudle.com.pl/\"),\n (u\"Obiadomek\", \"http://obiadomek.pl/\"),\n (u\"Dragon Box\", \"https://pyszne.pl/dragon-box-piotra-skargi-wroclaw\"),\n (u\"Woo Thai\", \"http://woothai.pl/\"),\n (u\"Gyros Hot\", \"http://gyroshotwroclaw.pl/\"),\n ]\n\n def __create_users(self):\n User.objects.all().delete()\n for name, phone, is_active, can_order in self.USERS:\n username = name.lower().replace(' ', '.')\n email = '%s@tivix.com' % username\n user = User.objects.create_user(email=email, password='test')\n user.first_name, user.last_name = name.split(' ')\n user.is_active = is_active\n user.can_send_order = can_order\n user.save()\n\n def __create_places(self):\n Place.objects.all().delete()\n for name, menu_url in self.PLACES:\n Place.objects.create(name=name, menu_url=menu_url)\n\n def handle(self, *args, **options):\n # self.core = options.get('core', False)\n\n self.__create_users()\n self.__create_places()\n","sub_path":"server/bnw/orders/management/commands/fakedata.py","file_name":"fakedata.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"55308874","text":"# Imports\nimport numpy as np\n\nfrom ctf.functions2d.function2d import Function2D\n\n\n\n# Problem\nclass SumOfDifferentPowers(Function2D):\n \"\"\" Sum of Different Powers Function. \"\"\"\n\n def __init__(self):\n \"\"\" Constructor. \"\"\"\n # Information\n self.min = np.array([0.0, 0.0])\n self.value = 0.0\n self.domain = np.array([[-1, 1], [-1, 1]])\n self.n = 2\n self.smooth = True\n self.info = [True, False, False]\n # Description\n self.latex_name = \"Sum of Different Powers Function\"\n self.latex_type = \"Bowl-Shaped\"\n self.latex_cost = r\"\\[ f(\\mathbf{x}) = \\sum_{i=0}^d |x_i|^{i+2} \\]\"\n self.latex_desc = \" The Sum of Different Powers function is unimodal. It is shown here in its two-dimensional\" \\\n \" form. \"\n\n def cost(self, x):\n \"\"\" Cost function. \"\"\"\n # Cost\n c = np.zeros(x.shape[1:])\n # Calculate Cost\n c = np.sum([np.abs(x[i])**(i+2) for i in range(0, 2)], axis=0)\n # Return Cost\n return c\n","sub_path":"ctf/functions2d/sum_of_different_powers.py","file_name":"sum_of_different_powers.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"272128781","text":"from django.urls import path\nfrom . import views\n\napp_name='rest'\n\nurlpatterns = [\n\n path('create',views.Restcreate.as_view(),name='restform'),\n path('myrest',views.rest,name='myrest'),\n\n path('update/',views.Restupdate.as_view(),name='update'),\n\n\n]","sub_path":"website/rest/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"92450718","text":"# -*- coding: utf-8 -*-\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom plone.batching.browser import BatchView\nfrom ZTUtils import make_query\n\n\nclass TemasBatchView(BatchView):\n \"\"\"\n \"\"\"\n\n index = ViewPageTemplateFile(\"templates/navigation.pt\")\n\n def __call__(self, batch, batchformkeys=None,\n minimal_navigation=False,\n show_page_range=False,\n ajaxcontentid='content-batch'):\n super(BatchView, self).__call__(\n batch, batchformkeys, minimal_navigation)\n self.ajaxcontentid = ajaxcontentid\n self.show_page_range = show_page_range\n return self.index()\n\n def make_link(self,\n pagenumber=0,\n pagesize=None):\n form = self.request.form\n\n if self.batchformkeys:\n batchlinkparams = dict([(key, form[key])\n for key in self.batchformkeys\n if key in form])\n else:\n batchlinkparams = form.copy()\n\n if not pagesize:\n pagesize = self.batch.pagesize\n\n start = max(pagenumber - 1, 0) * pagesize\n return '%s?%s' % (self.request.ACTUAL_URL, make_query(batchlinkparams,\n {self.batch.b_start_str: start,\n 'b_size': pagesize}))\n\n\n","sub_path":"src/governo/mdh/portal/browser/batch.py","file_name":"batch.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"622005832","text":"#Step 1: Split the sentence into words\r\n#Step 2: Tag the words\r\nimport sqlite3\r\nimport sys\r\nfrom nltk import *\r\n\r\nsentence = \"\"\r\nfor i in range(1,len(sys.argv)):\r\n\tsentence = sentence + str(sys.argv[i]).lower() + \" \"\r\n\r\nconnection = sqlite3.connect(\"inventory.db\")\r\ncursor = connection.cursor()\r\n\r\nif __name__ == \"__main__\":\r\n\twords = word_tokenize(sentence)\r\n\tcontext = pos_tag(words)\r\n\t#Searching for of in the sentence\r\n\tgrammar = \"CHUNK: {}\"\r\n\tcp = RegexpParser(grammar)\r\n\tresult = cp.parse(context)\r\n\tfor subtree in result.subtrees():\r\n\t\tif subtree.label() == 'CHUNK':\r\n\t\t\t#It returns as a list of tuples, so accessing just the word\r\n\t\t\tquery = str(subtree.leaves()[0][0])\r\n\t\t\tproduct = str(subtree.leaves()[2][0])\r\n\t\t\tcursor.execute(\"SELECT \" + query + \" FROM table1 WHERE name=?\", [product])\r\n\t\t\tprint(cursor.fetchone()[0]) \r\n","sub_path":"chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"434760715","text":"import os, time, csv, glob\nfrom PIL import Image\n\nclass Timer:\n\n start_time = None\n end_time = None\n formats = {\n \"pretty\" : \"%02dd %02dh %02dm %02ds\"\n }\n milestones = []\n\n def __init__(self):\n self.set_start_time()\n\n def set_start_time(self):\n self.start_time = self.get_timestamp()\n\n def set_end_time(self):\n self.end_time = self.get_timestamp()\n\n def get_timestamp(self):\n return time.time()\n\n def add_milestone(self,label):\n self.milestones.append({ \"label\" : label, \"timestamp\" : self.get_timestamp() })\n\n def get_milestones(self):\n return self.milestones\n\n def reset_milestones(self):\n self.milestones = []\n\n def get_time_passed(self,format=\"pretty\"):\n if None is self.end_time:\n self.set_end_time()\n time = float(self.end_time - self.start_time)\n day = time // (24 * 3600)\n time = time % (24 * 3600)\n hour = time // 3600\n time %= 3600\n minutes = time // 60\n time %= 60\n seconds = time\n return self.formats[format] % (day, hour, minutes, seconds)\n\n\nclass ImageVerifier:\n\n root_dir = None\n bad_files = []\n image_list = []\n current_image_file = None\n image_list_file = None\n filepath_col = None\n override_image_root_folder = None\n prepend_image_root_folder = None\n progress = 0\n\n def set_root_dir(self,root_dir):\n self.root_dir = root_dir\n\n def set_current_image_file(self,current_image_file):\n self.current_image_file = current_image_file\n\n def set_image_list_file(self,image_list_file):\n self.image_list_file = image_list_file\n\n def set_image_list_filepath_column(self,filepath_col):\n self.filepath_col = filepath_col\n\n def set_override_image_root_folder(self,folder):\n self.override_image_root_folder = folder\n\n def set_prepend_image_root_folder(self,folder):\n self.prepend_image_root_folder = folder\n\n def get_bad_files(self):\n return self.bad_files\n\n def verify_image(self):\n # img = Image.open(filename) # open the image file\n # img.verify() # verify that it is, in fact an image\n im = Image.open(self.current_image_file)\n im.verify() #I perform also verify, don't know if he sees other types o defects\n # im.close() #reload is necessary in my case\n im = Image.open(self.current_image_file)\n im.transpose(Image.FLIP_LEFT_RIGHT)\n # im.close()\n\n def verify_images_from_folder(self):\n self.progress = 0\n for filename in glob.iglob(self.root_dir + '/**/*.jpg', recursive=True):\n if os.path.isfile(filename):\n # print(filename)\n try:\n self.set_current_image_file(filename)\n self.verify_image()\n except Exception as e:\n self.bad_files.append({ \"filename\": filename, \"error\": str(e)})\n self.progress += 1\n self._print_progress()\n\n def verify_images_from_image_list(self):\n self.progress = 0\n self.image_list = []\n self.read_image_list_file()\n for filename in self.image_list:\n try:\n self.set_current_image_file(filename)\n self.verify_image()\n except Exception as e:\n self.bad_files.append({ \"filename\": filename, \"error\": str(e)})\n self.progress += 1\n self._print_progress()\n\n def _print_progress(self):\n if self.progress % 1000 == 0:\n print(\"{} / {} \".format(len(self.bad_files),self.progress))\n\n def read_image_list_file(self):\n with open(self.image_list_file) as csv_file:\n # reader = csv.reader(csv_file, delimiter=utils._determine_csv_separator(self.downloaded_images_file,\"utf-8-sig\"))\n reader = csv.reader(csv_file, delimiter=_determine_csv_separator(self.image_list_file,\"utf-8-sig\"))\n for row in reader:\n file = row[self.filepath_col]\n\n if self.override_image_root_folder:\n file = os.path.join(self.override_image_root_folder, os.path.basename(file))\n\n if self.prepend_image_root_folder:\n file = os.path.join(self.prepend_image_root_folder, file)\n\n if row[0] and file:\n self.image_list.append(file)\n\n print(\"read image list {}, found {} entries\".format(self.image_list_file,len(self.image_list)))\n\n\n\ndef _determine_csv_separator(filepath,encoding):\n f = open(filepath, \"r\", encoding=encoding)\n line = f.readline()\n if line.count('\\t') > 0:\n sep = '\\t'\n else:\n sep = ','\n return sep\n","sub_path":"code/lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"189228312","text":"\"\"\"\r\ntraverse backwards + go from last node to first node or vice versa\r\n\r\ncreate\r\ntime = O(1)\r\nspace = O(1)\r\n\r\ninsert\r\ntime = O(n)\r\nspace = O(1)\r\n\r\ntraverse\r\ntime = O(n)\r\nspace = O(1)\r\n\r\nsearch\r\ntime = O(n)\r\nspace = O(1)\r\n\r\ndelete\r\ntime = O(n)\r\nspace = O(1)\r\n\r\ndelete entire\r\ntime = O(n)\r\nspace = O(1)\r\n\"\"\"\r\n\r\nclass Node:\r\n def __init__(self, value):\r\n self.value = value\r\n self.prev = None\r\n self.next = None\r\n\r\nclass CDLL:\r\n def __init__(self):\r\n self.head = None\r\n self.tail = None\r\n\r\n def __iter__(self):\r\n node = self.head\r\n while node:\r\n yield node.value\r\n node = node.next\r\n if node == self.tail.next:\r\n break\r\n \r\n def create(self, nodevalue):\r\n newnode = Node(nodevalue)\r\n self.head = newnode\r\n self.tail = newnode\r\n newnode.next = newnode\r\n newnode.prev = newnode\r\n\r\n def insert(self, value, location):\r\n if not self.head:\r\n print(\"cdll does not exist\")\r\n else:\r\n newnode = Node(value)\r\n if location == 0:\r\n newnode.next = self.head\r\n newnode.prev = self.tail\r\n self.head.prev = newnode\r\n self.head = newnode\r\n self.tail.next = newnode\r\n elif location == -1:\r\n newnode.next = self.head\r\n newnode.prev = self.tail\r\n self.head.prev = newnode\r\n self.tail.next = newnode\r\n self.tail = newnode\r\n else:\r\n tempnode = self.head\r\n index = 0\r\n while index < location - 1:\r\n tempnode = tempnode.next\r\n index += 1\r\n newnode.next = tempnode.next\r\n newnode.prev = tempnode\r\n newnode.next.prev = newnode\r\n tempnode.next = newnode\r\n\r\n def traverse(self):\r\n if not self.head:\r\n print(\"cdll does not exist\")\r\n else:\r\n node = self.head\r\n while node:\r\n print(node.value)\r\n if node == self.tail:\r\n break\r\n node = node.next\r\n\r\n def reverse_traverse(self):\r\n if not self.head:\r\n print(\"cdll does not exist\")\r\n else:\r\n node = self.tail\r\n while node:\r\n print(node.value)\r\n if node == self.head:\r\n break\r\n node = node.prev\r\n\r\n def search(self, nodevalue):\r\n if not self.head:\r\n print(\"cdll does not exist\")\r\n else:\r\n node = self.head\r\n while node:\r\n if node.value == nodevalue:\r\n return node.value\r\n if node == self.tail:\r\n return \"not found\"\r\n node = node.next\r\n\r\n def delete(self, location):\r\n if not self.head:\r\n print(\"cdll does not exist\")\r\n else:\r\n if location == 0:\r\n if self.head == self.tail:\r\n self.head.prev = None\r\n self.head.next = None\r\n self.head = None\r\n self.tail = None \r\n else:\r\n self.head = self.head.next\r\n self.head.prev = self.tail\r\n self.tail.next = self.head\r\n elif location == -1:\r\n if self.head == self.tail:\r\n self.head.prev = None\r\n self.head.next = None\r\n self.head = None\r\n self.tail = None \r\n else:\r\n self.tail = self.tail.prev\r\n self.tail.next = self.head\r\n self.head.prev = self.tail\r\n else:\r\n if not self.head:\r\n return \"cdll does not exist\"\r\n else:\r\n tempnode = self.head\r\n index = 0\r\n while index < location - 1:\r\n tempnode = tempnode.next\r\n tempnode.next = tempnode.next.next\r\n tempnode.next.prev = tempnode\r\n\r\n def delete_entire(self):\r\n if not self.head:\r\n print(\"csll does not exist\") \r\n else:\r\n self.tail.next = None\r\n tempnode = self.head\r\n while tempnode:\r\n tempnode.prev = None\r\n tempnode = tempnode.next \r\n self.head = None\r\n self.tail = None\r\n\r\ncdll = CDLL()\r\ncdll.create(4)\r\nprint([i for i in cdll])\r\ncdll.insert(3, 0)\r\nprint([i for i in cdll])\r\ncdll.insert(2, 0)\r\nprint([i for i in cdll])\r\ncdll.insert(2.5,1)\r\nprint([i for i in cdll])\r\ncdll.insert(5, -1)\r\nprint([i for i in cdll])\r\ncdll.traverse()\r\ncdll.reverse_traverse()\r\nprint(cdll.search(2))\r\nprint(cdll.search(100))\r\ncdll.delete(0)\r\nprint([i for i in cdll])\r\ncdll.delete(-1)\r\nprint([i for i in cdll])\r\ncdll.delete(1)\r\nprint([i for i in cdll])\r\ncdll.delete_entire()\r\nprint([i for i in cdll])\r\n","sub_path":"zzz_dsa/python_dsa_1/042_circular_doubly_linked_list_create_ins_trav_reversetrav_search_del_delentire.py","file_name":"042_circular_doubly_linked_list_create_ins_trav_reversetrav_search_del_delentire.py","file_ext":"py","file_size_in_byte":5167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"367368289","text":"from unittest.mock import patch\n\nimport pytest\n\nfrom tests_practice.definitions import (\n task11_time_converter,\n task12_largest_word,\n task13_words_backward,\n task14_fibonacci,\n task15_even_only,\n task16_sum_up_until,\n task17_factorial,\n task18_letter_replacement,\n task19_alpha_order,\n task20_num2_check)\n\n\n@pytest.mark.parametrize(\"check, result\",\n [(63, '1:3'), (48, '0:48'), (140, '2:20')])\ndef test_task11_time_converter(check, result):\n assert task11_time_converter(check) == result\n\n\ndef test_task11_time_converter_raises():\n with pytest.raises(ValueError):\n task11_time_converter(-5)\n\n\n@pytest.mark.parametrize(\"check, result\",\n [(\"fun&!! &!!&!!54 times time\", 'times'),\n (\"I love dogs\", 'love')])\ndef test_task12_largest_word(check, result):\n assert task12_largest_word(check) == result\n\n\ndef test_task12_largest_word_raises():\n with pytest.raises(ValueError):\n task12_largest_word(59)\n\n\n@pytest.mark.parametrize(\"check, result\",\n [(\"My name is Michele\", 'Michele is name My'),\n (\"I love dogs\", 'dogs love I')])\ndef test_task13_words_backward(check, result):\n assert task13_words_backward(check) == result\n\n\ndef test_task13_words_backward_raises():\n with pytest.raises(ValueError):\n task13_words_backward(59)\n\n\n@pytest.mark.parametrize(\"test_input, result\",\n [(7, [1, 1, 2, 3, 5, 8, 13]),\n (10, [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]),\n (2, [1, 1]),\n (1, [1])])\ndef test_task14_fibonacci(test_input, result):\n assert task14_fibonacci(test_input) == result\n\n\n@pytest.mark.parametrize(\"check, result\",\n [([1, 4, 9, 16, 25, 36, 49, 64, 81, 100], [4, 16, 36, 64, 100]),\n ([1, 9, 25], [])])\ndef test_task15_even_only(check, result):\n assert task15_even_only(check) == result\n\n\n@pytest.mark.parametrize(\"check\",\n [59, []])\ndef test_task15_even_only_raises(check):\n with pytest.raises(ValueError):\n task15_even_only(check)\n\n\n@pytest.mark.parametrize(\"test_input, result\",\n [(4, 10),\n (2, 3)])\ndef test_task16_sum_up_until(test_input, result):\n with patch('builtins.input', return_value=test_input):\n assert task16_sum_up_until() == result\n\n\ndef test_task16_sum_up_until_raises():\n with patch('builtins.input', return_value='some text'):\n with pytest.raises(ValueError):\n task16_sum_up_until()\n\n\n@pytest.mark.parametrize(\"check, result\",\n [(4, 24),\n (3, 6)])\ndef test_task17_factorial(check, result):\n assert task17_factorial(check) == result\n\n\n@pytest.mark.parametrize(\"check\",\n ['59', [], 's'])\ndef test_task17_factorial_raises(check):\n with pytest.raises(TypeError):\n task17_factorial(check)\n\n\n@pytest.mark.parametrize(\"check, result\",\n [('abcd', 'bcdE'),\n ('efghi', 'fghIj')])\ndef test_task18_letter_replacement(check, result):\n assert task18_letter_replacement(check) == result\n\n\n@pytest.mark.parametrize(\"check\",\n [59, []])\ndef test_task18_letter_replacement_raises(check):\n with pytest.raises(ValueError):\n task18_letter_replacement(check)\n\n\n@pytest.mark.parametrize(\"check, result\",\n [('hello', 'ehllo'),\n ('adbec', 'abcde')])\ndef test_task19_alpha_order(check, result):\n assert task19_alpha_order(check) == result\n\n\n@pytest.mark.parametrize(\"check\",\n [59, []])\ndef test_task19_alpha_order_raises(check):\n with pytest.raises(ValueError):\n task19_alpha_order(check)\n\n\ndef test_task20_num2_check():\n assert task20_num2_check(5, 6)\n assert not task20_num2_check(6, 5)\n assert task20_num2_check(5, 5) == '-1'\n\n\n@pytest.mark.parametrize(\"check, result\",\n [(59, '6'),\n (5, [])])\ndef test_task20_num2_check_raises(check, result):\n with pytest.raises(ValueError):\n task20_num2_check(check, result)\n","sub_path":"tests_practice/pytests.py","file_name":"pytests.py","file_ext":"py","file_size_in_byte":4281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"76831767","text":"import os\nfrom gevent.pywsgi import WSGIServer\nfrom app import app\n\nFLASK_ENV = os.environ.get(\"FLASK_ENV\", \"production\")\n\n### App parameters\n# I should investigate what this does further\napp.config.update(\n SECRET_KEY = os.urandom(16)\n)\n\n\n### Check if in development or production\nif FLASK_ENV == \"development\":\n app.config[\"TESTING\"] = True\n\nhttp_server = WSGIServer(('', 2890), app)\nhttp_server.serve_forever()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"455890642","text":"# Add a sphere and a cube as a multiblock dataset to a plotter and then\n# change the visibility and color of the blocks.\n#\n# Note index ``1`` and ``2`` are used to access the individual blocks of\n# the composite dataset. This is because the :class:`pyvista.MultiBlock`\n# is the root node of the \"tree\" and is index ``0``. This allows you to\n# access individual blocks or the entire composite dataset itself in the\n# case of multiple nested composite datasets.\n#\nimport pyvista as pv\ndataset = pv.MultiBlock(\n [pv.Cube(), pv.Sphere(center=(0, 0, 1))]\n)\npl = pv.Plotter()\nactor, mapper = pl.add_composite(dataset)\nmapper.block_attr[1].color = 'b'\nmapper.block_attr[1].opacity = 0.5\nmapper.block_attr[2].color = 'r'\npl.show()\n","sub_path":"version/0.39/api/plotting/_autosummary/pyvista-Plotter-add_composite-1.py","file_name":"pyvista-Plotter-add_composite-1.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"308351211","text":"import collections\nimport datetime\n\nfrom django.utils.translation import gettext_lazy as _\n\n__author__ = 'andriy'\n\nSTATUS_PUBLISHED = 'P'\nSTATUS_HOLDED = 'H'\nSTATUS_DRAFT = 'N'\nSTATUS_HIDDEN = 'I'\n\nSTATUSES = {\n STATUS_DRAFT: _(\"Draft\"),\n STATUS_PUBLISHED: _(\"Published\"),\n STATUS_HOLDED: _(\"Holded\"),\n STATUS_HIDDEN: _(\"Hidden\"),\n}\n\nTYPE_PUBLICATION = 'Note'\nTYPE_RSS = 'Rss'\nTYPE_PHOTOGALLERY = 'Photo'\n\nTYPES = {\n TYPE_PUBLICATION: _(\"Publication\"),\n TYPE_RSS: _(\"Rss\"),\n TYPE_PHOTOGALLERY: _(\"Photogallery\"),\n}\nMIN_DATE = datetime.date(1970, 1, 1)\n\n\nclass Pager(collections.namedtuple(\"Pager\", \"page_nr pages page\")):\n def __new__(cls, *args, **kwargs):\n return super(Pager, cls).__new__(cls, *args, **kwargs)\n\n def replace_page(self, new_page):\n return self._replace(page=new_page)\n\n # def replace_page\n\n\n","sub_path":"src/publications/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"566434244","text":"\n\nfrom xai.brain.wordbase.nouns._inflection import _INFLECTION\n\n#calss header\nclass _INFLECTIONS(_INFLECTION, ):\n\tdef __init__(self,): \n\t\t_INFLECTION.__init__(self)\n\t\tself.name = \"INFLECTIONS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"inflection\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_inflections.py","file_name":"_inflections.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"67859034","text":"import unittest\nimport json\nimport nuget\n\nclass TestRegistrations(unittest.TestCase):\n\n def test_const(self):\n response = json.load(open(\"./tests/sampledata/sample_nuget_service_index.json\", \"r\")) \n registration = nuget.Registrations(response)\n self.assertIsNotNone(registration)\n \nif __name__ == '__main__':\n unittest.main()","sub_path":"tests/registrations_test.py","file_name":"registrations_test.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"565769289","text":"from django.urls import path\nfrom django.views.generic import TemplateView\nfrom django.conf.urls import url\nfrom django.urls import include\nfrom . import views\nfrom . import forms\n\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('index/', views.index, name='index'),\n path('database/', views.database, name = 'database'),\n path('results/', views.results, name='results'),\n path('accountPage/', views.accountPage, name ='accountPage'),\n path( 'following/', views.follow, name = 'following'),\n path( 'unfollow/', views.unfollow, name = 'unfollow'),\n path( 'updateComments/', views.updateComments, name = 'updateComments'),\n path( 'deleteComment/', views.deleteComment, name = 'deleteComment'),\n path( 'updateLike/', views.updateLike, name = 'updateLike'),\n path( 'removeLike/', views.removeLike, name = 'removeLike'),\n #path('login/', views.indexAP, name='login_view'),\n #path('logout/', views.index, name='index'),\n path('register_page/', views.register_page, name = 'register_page'),\n path('recipe_register/', views.recipe_register, name = 'recipe_register'),\n url(r'^user/(?P\\d+)/$', views.user_detail, name='user_detail'),\n url(r'^recipe/(?P\\d+)/$', views.recipe_detail, name='recipe_detail'),\n]\n","sub_path":"Open_Fridge/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"110201717","text":"#! /usr/bin/python\n#\n# Ogonek\n#\n# Written in 2016 by Martinho Fernandes \n#\n# To the extent possible under law, the author(s) have dedicated all copyright and related\n# and neighboring rights to this software to the public domain worldwide. This software is\n# distributed without any warranty.\n#\n# You should have received a copy of the CC0 Public Domain Dedication along with this software.\n# If not, see .\n#\n# Codepage compiler\n\nimport sys\nimport string\nimport re\nimport os\nimport codecs\n\nif len(sys.argv) != 6:\n print('usage: ' + os.path.basename(sys.argv[0]) + '
')\n sys.exit(17)\n\nfmt = sys.argv[1]\ncodepage = sys.argv[2]\nheader = sys.argv[3]\nimpl = sys.argv[4]\nname = sys.argv[5]\n\ndef parse_0x(lines):\n mapping = {}\n for line in source:\n line = line.split('#', 1)[0]\n parts = line.split()\n if(len(parts) < 2):\n continue\n\n l = int(parts[0], 16)\n r = int(parts[1], 16)\n mapping[l] = r\n return mapping\n\nformats = {\n '0x' : parse_0x,\n #'0x_var' : parse_0x_var,\n #'0x_mult' : parse_0x_mult,\n #'0x_bidi' : parse_0x_bidi,\n #'0x_indic' : parse_0x_indic,\n #'0x_ascii' : parse_0x_ascii,\n #'0x_var_mult' : parse_0x_var_mult,\n #'inverse' : parse_inverse,\n}\n\nif fmt not in formats:\n print('formats available ' + ', '.join(formats.keys()))\n sys.exit(17)\n\nsource = codecs.open(codepage, 'r', 'utf-8')\nmapping = formats[fmt](line for line in source)\n\nto_str = ''\nfor i in xrange(0, 256):\n if i in mapping.keys():\n to_str += ' ' + hex(mapping[i]) + ',\\n'\n else:\n to_str += ' 0xFFFFFFFF,\\n'\nto_str = to_str.rstrip()\n\nfrom_str = sorted(mapping.iteritems(), key=lambda e: e[1])\nfrom_str = map(lambda e: ' { static_cast(' + hex(e[0]) + '), ' + hex(e[1]) + ' },', from_str)\nfrom_str = '\\n'.join(from_str)\n\ncxx_name = os.path.splitext(os.path.basename(header))[0]\nname_lower = re.sub(r'[ -]', '_', cxx_name.lower());\nname_caps = name_lower.upper()\n\ncopyright_tmpl = string.Template('''// Ogonek\n//\n// Written in 2017 by Martinho Fernandes \n//\n// To the extent possible under law, the author(s) have dedicated all copyright and related\n// and neighboring rights to this software to the public domain worldwide. This software is\n// distributed without any warranty.\n//\n// You should have received a copy of the CC0 Public Domain Dedication along with this software.\n// If not, see .\n\n// This file was automatically generated.\n\n// ${name} encoding form\n\n''')\n\nheader_tmpl = string.Template('''#ifndef OGONEK_ENCODINGS_${pp_symbol}_HPP\n#define OGONEK_ENCODINGS_${pp_symbol}_HPP\n\n#include \n#include \n#include \n#include \n\nnamespace ogonek {\n struct OGONEK_PUBLIC ${identifier}_codepage {\n static OGONEK_PUBLIC code_point to_unicode[256];\n static OGONEK_PUBLIC detail::simple_byte_mapping from_unicode[${size}];\n };\n\n using ${identifier} = detail::simple_byte_mapping_encoding<${identifier}_codepage>;\n\n CONCEPT_ASSERT(EncodingForm<${identifier}>());\n} // namespace ogonek\n\n#endif // OGONEK_${pp_symbol}_HPP\n''')\n\nwith open(header, 'w') as header_file:\n header_file.write(copyright_tmpl.substitute(name=name))\n header_file.write(header_tmpl.substitute(pp_symbol=name_caps, identifier=name_lower, size=len(mapping)))\n\nimpl_tmpl = string.Template('''#include \n#include \n\nnamespace ogonek {\n code_point ${identifier}_codepage::to_unicode[256] = {\n$to_str\n };\n detail::simple_byte_mapping ${identifier}_codepage::from_unicode[] = {\n$from_str\n };\n} // namespace ogonek\n''')\n\nwith open(impl, 'w') as impl_file:\n impl_file.write(copyright_tmpl.substitute(name=name))\n impl_file.write(impl_tmpl.substitute(header=os.path.basename(header), identifier=name_lower, from_str=from_str, to_str=to_str))\n","sub_path":"tool/transforms/codepage.py","file_name":"codepage.py","file_ext":"py","file_size_in_byte":4149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"340626255","text":"import numpy as np\nimport samplernn_pase.utils as utils\nimport math\nimport torch\nimport torch.nn.functional as F\n\n\ndef lecun_uniform(tensor):\n fan_in = torch.nn.init._calculate_correct_fan(tensor, 'fan_in')\n torch.nn.init.uniform_(tensor, -math.sqrt(3 / fan_in), math.sqrt(3 / fan_in))\n\n\ndef concat_init(tensor, inits):\n try:\n tensor = tensor.data\n except AttributeError:\n pass\n\n (length, fan_out) = tensor.size()\n fan_in = length // len(inits)\n\n chunk = tensor.new(fan_in, fan_out)\n for (i, init) in enumerate(inits):\n init(chunk)\n tensor[i * fan_in: (i + 1) * fan_in, :] = chunk\n\n\nclass CondsMixer(torch.nn.Module):\n conds_speaker_type = None\n conds_speaker_size = None\n conds_utterance_type = None\n conds_utterance_size = None\n conds_utterance_expanded_size = None\n\n def __init__(self, conds_speaker_type, conds_speaker_n, conds_speaker_size, conds_utterance_type,\n conds_utterance_linguistic_n, conds_utterance_linguistic_emb_size, conds_size):\n super(CondsMixer, self).__init__()\n self.conds_speaker_type = conds_speaker_type\n self.conds_utterance_type = conds_utterance_type\n self._init_dynamic_params(conds_utterance_linguistic_emb_size)\n self.speaker_embedding = torch.nn.Embedding(conds_speaker_n, conds_speaker_size)\n if self.conds_utterance_type in ['linguistic', 'linguistic_lf0']:\n emb_size = conds_utterance_linguistic_emb_size\n self.conds_utt_phonemes_emb = torch.nn.Embedding(conds_utterance_linguistic_n[0], emb_size)\n self.conds_utt_vowels_emb = torch.nn.Embedding(conds_utterance_linguistic_n[1], emb_size)\n self.conds_utt_gpos_emb = torch.nn.Embedding(conds_utterance_linguistic_n[2], emb_size)\n self.conds_utt_tobi_emb = torch.nn.Embedding(conds_utterance_linguistic_n[3], emb_size)\n self.conds_mix = torch.nn.Linear(self.conds_utterance_expanded_size + conds_speaker_size, conds_size)\n\n def _init_dynamic_params(self, conds_utterance_linguistic_emb_size):\n if self.conds_utterance_type == 'acoustic':\n self.conds_utterance_size = self.conds_utterance_expanded_size = 43\n elif self.conds_utterance_type == 'linguistic':\n self.conds_utterance_size = 55\n self.conds_utterance_expanded_size = 55 - 10 + 10 * conds_utterance_linguistic_emb_size\n elif self.conds_utterance_type == 'linguistic_lf0':\n self.conds_utterance_size = 57\n self.conds_utterance_expanded_size = 57 - 10 + 10 * conds_utterance_linguistic_emb_size\n\n def forward(self, utt_conds, info):\n speaker_conds = self._forward_speaker_conds(info, utt_conds.device).expand(\n utt_conds.size(0), utt_conds.size(1), -1\n )\n utt_conds = self._forward_linguistic_features(utt_conds)\n return self.conds_mix(torch.cat((speaker_conds, utt_conds), dim=2))\n\n def _forward_speaker_conds(self, info, device):\n if self.conds_speaker_type == 'embedding':\n speakers_ids = torch.tensor(\n [info_item['speaker']['index'] if info_item is not None else 0 for info_item in info], dtype=torch.int64\n ).to(device)\n return self.speaker_embedding(speakers_ids).unsqueeze(1)\n elif self.conds_speaker_type == 'pase':\n raise NotImplemented\n\n def _forward_linguistic_features(self, utt_conds):\n if self.conds_utterance_type not in ['linguistic', 'linguistic_lf0']:\n return utt_conds\n embedded_features = []\n for i in [2, 3, 4, 5, 6]:\n embedded_features.append(self.conds_utt_phonemes_emb(utt_conds[:, :, i].long()))\n embedded_features.append(self.conds_utt_vowels_emb(utt_conds[:, :, 27].long()))\n for i in [31, 33, 41]:\n embedded_features.append(self.conds_utt_gpos_emb(utt_conds[:, :, i].long()))\n embedded_features.append(self.conds_utt_tobi_emb(utt_conds[:, :, 49].long()))\n embedded_features.append(utt_conds[:, :, 0:2])\n embedded_features.append(utt_conds[:, :, 7:27])\n embedded_features.append(utt_conds[:, :, 28:31])\n embedded_features.append(utt_conds[:, :, 32:33])\n embedded_features.append(utt_conds[:, :, 34:41])\n embedded_features.append(utt_conds[:, :, 42:49])\n embedded_features.append(utt_conds[:, :, 50:])\n return torch.cat(embedded_features, dim=2)\n\n\nclass FrameLevelLayer(torch.nn.Module):\n input_samples = None\n ratio = None\n rnn_layers = None\n rnn_hidden_size = None\n\n def __init__(self, input_samples, conds_size, ratio, rnn_layers, rnn_hidden_size):\n super(FrameLevelLayer, self).__init__()\n self.input_samples = input_samples\n self.ratio = ratio\n self.rnn_layers = rnn_layers\n self.rnn_hidden_size = rnn_hidden_size\n self.x_expand = torch.nn.Conv1d(input_samples, rnn_hidden_size, 1)\n self.conds_expand = torch.nn.Conv1d(conds_size, rnn_hidden_size, 1)\n self.rnn = torch.nn.GRU(rnn_hidden_size, rnn_hidden_size, rnn_layers, batch_first=True)\n self.rnn_h0 = torch.nn.Parameter(torch.zeros(rnn_layers, rnn_hidden_size))\n self.upsample = torch.nn.ConvTranspose1d(rnn_hidden_size, rnn_hidden_size, ratio, stride=ratio, bias=False)\n self.upsample_bias = torch.nn.Parameter(torch.zeros(rnn_hidden_size, ratio))\n self.upsample.reset_parameters()\n self._init_weights()\n self._init_weights_norm()\n\n def _init_weights(self):\n torch.nn.init.kaiming_uniform_(self.x_expand.weight)\n torch.nn.init.kaiming_uniform_(self.conds_expand.weight)\n torch.nn.init.constant_(self.x_expand.bias, 0)\n torch.nn.init.constant_(self.conds_expand.bias, 0)\n torch.nn.init.constant_(self.upsample_bias, 0)\n torch.nn.init.uniform_(\n self.upsample.weight, -np.sqrt(6 / self.rnn_hidden_size), np.sqrt(6 / self.rnn_hidden_size)\n )\n for i in range(self.rnn_layers):\n torch.nn.init.constant_(getattr(self.rnn, 'bias_ih_l{}'.format(i)), 0)\n torch.nn.init.constant_(getattr(self.rnn, 'bias_hh_l{}'.format(i)), 0)\n concat_init(getattr(self.rnn, 'weight_ih_l{}'.format(i)),\n [lecun_uniform, lecun_uniform, lecun_uniform])\n concat_init(getattr(self.rnn, 'weight_hh_l{}'.format(i)),\n [lecun_uniform, lecun_uniform, torch.nn.init.orthogonal_])\n\n def _init_weights_norm(self):\n self.x_expand = torch.nn.utils.weight_norm(self.x_expand)\n self.conds_expand = torch.nn.utils.weight_norm(self.conds_expand)\n self.upsample = torch.nn.utils.weight_norm(self.upsample)\n\n def forward(self, x, conds, upper_conditioning, rnn_state):\n b, t, _ = x.size()\n if t != conds.shape[1]:\n upscale_ratio = int(x.shape[1] / conds.shape[1])\n conds = conds.unsqueeze(2).expand(b, conds.shape[1], upscale_ratio, conds.shape[2]) \\\n .reshape(b, t, conds.shape[2])\n x = self.x_expand(x.permute(0, 2, 1)).permute(0, 2, 1)\n conds = self.conds_expand(conds.permute(0, 2, 1)).permute(0, 2, 1)\n x = x + conds + upper_conditioning if upper_conditioning is not None else x + conds\n hidden_state_tensor = torch.cat([\n self.rnn_h0.unsqueeze(1) if state is None else state.unsqueeze(1) for _, state in enumerate(rnn_state)\n ], dim=1)\n rnn_output, rnn_state_new = self.rnn(x, hidden_state_tensor)\n upsampling_bias = self.upsample_bias.unsqueeze(0).unsqueeze(2).expand(b, self.rnn_hidden_size, t, self.ratio) \\\n .contiguous().view(b, self.rnn_hidden_size, t * self.ratio)\n upsampling_output = (self.upsample(rnn_output.permute(0, 2, 1)) + upsampling_bias).permute(0, 2, 1)\n return upsampling_output, rnn_state_new\n\n\nclass SampleLevelLayer(torch.nn.Module):\n input_samples = None\n q_levels = None\n\n def __init__(self, input_samples, conds_size, rnn_hidden_size, q_levels):\n super(SampleLevelLayer, self).__init__()\n self.input_samples = input_samples\n self.q_levels = q_levels\n self.emb_layer = torch.nn.Embedding(q_levels, q_levels)\n self.emb_layer_expand = torch.nn.Conv1d(q_levels, rnn_hidden_size, input_samples, bias=False)\n self.conds_expand = torch.nn.Conv1d(conds_size, rnn_hidden_size, 1)\n self.comb_layer = torch.nn.Linear(rnn_hidden_size * 3, rnn_hidden_size)\n self.comb_layer_expand = torch.nn.Conv1d(rnn_hidden_size, rnn_hidden_size, 1)\n self.adapt = torch.nn.Conv1d(rnn_hidden_size, q_levels, 1)\n self._init_weights()\n self._init_weights_norm()\n\n def _init_weights(self):\n torch.nn.init.kaiming_uniform_(self.emb_layer_expand.weight)\n torch.nn.init.kaiming_uniform_(self.comb_layer.weight)\n torch.nn.init.constant_(self.comb_layer.bias, 0)\n lecun_uniform(self.adapt.weight)\n torch.nn.init.constant_(self.adapt.bias, 0)\n\n def _init_weights_norm(self):\n self.emb_layer_expand = torch.nn.utils.weight_norm(self.emb_layer_expand)\n self.comb_layer_expand = torch.nn.utils.weight_norm(self.comb_layer_expand)\n self.adapt = torch.nn.utils.weight_norm(self.adapt)\n\n def forward(self, x, conds, upper_tier_conditioning):\n upscale_ratio = int(upper_tier_conditioning.shape[1] / conds.shape[1])\n conds = conds.unsqueeze(2).expand(x.size(0), conds.shape[1], upscale_ratio, conds.shape[2]) \\\n .reshape(x.size(0), upper_tier_conditioning.shape[1], conds.shape[2])\n embedding_output = self.emb_layer(x.contiguous().view(-1)).view(x.size(0), -1, self.q_levels)\n embedding_expand_output = self.emb_layer_expand(embedding_output.permute(0, 2, 1))\n conds_expand_output = self.conds_expand(conds.permute(0, 2, 1))\n inputs_comb_output = F.relu(\n self.comb_layer(torch.cat(\n (embedding_expand_output.permute(0, 2, 1), conds_expand_output.permute(0, 2, 1),\n upper_tier_conditioning), dim=2)\n )\n )\n global_expand_output = F.relu(self.comb_layer_expand(inputs_comb_output.permute(0, 2, 1)))\n adaptation_output = self.adapt(global_expand_output)\n return F.log_softmax(adaptation_output.permute(0, 2, 1), dim=2)\n\n\nclass SampleRNNModel(torch.nn.Module):\n frame_size = None\n receptive_field = None\n quantizer = None\n rnn_states = None\n\n def __init__(self, conds_speaker_type, conds_speaker_n, conds_speaker_size, conds_utterance_type,\n conds_utterance_linguistic_n, conds_utterance_linguistic_emb_size, conds_size, sequence_length, ratios,\n rnn_layers, rnn_hidden_size, q_type_ulaw, q_levels):\n super(SampleRNNModel, self).__init__()\n self.frame_size = np.prod(ratios)\n self.receptive_field = np.prod(ratios) * sequence_length\n self.quantizer = utils.SampleRNNQuantizer(q_type_ulaw, q_levels)\n\n # Initialize conditionants mixer\n self.conds_mixer = CondsMixer(conds_speaker_type, conds_speaker_n, conds_speaker_size, conds_utterance_type,\n conds_utterance_linguistic_n, conds_utterance_linguistic_emb_size, conds_size)\n\n # Initialize frame level layers\n self.frames_layers = torch.nn.ModuleList()\n frame_layers_fs = list(map(int, np.cumprod(ratios)))\n for layer_n in range(0, len(frame_layers_fs)):\n self.frames_layers.append(\n FrameLevelLayer(frame_layers_fs[layer_n], conds_size, ratios[layer_n], rnn_layers[layer_n],\n rnn_hidden_size[layer_n])\n )\n\n # Initialize sample level layer\n self.sample_layer = SampleLevelLayer(ratios[0], conds_size, rnn_hidden_size[0], self.quantizer.q_levels)\n\n def _init_rnn_states(self, batch_size):\n self.rnn_states = {rnn: [None] * batch_size for rnn in self.frames_layers}\n\n def _get_rnn_states(self, layer, reset):\n return [\n self.rnn_states[layer][reset_index] if reset_element == 0 else None\n for reset_index, reset_element in enumerate(reset)\n ]\n\n def _set_rnn_states(self, new_hidden_state_tensor, frame_level_layer, reset):\n for reset_index, reset_element in enumerate(reset):\n if reset_element == 0 or reset_element == 1:\n self.rnn_states[frame_level_layer][reset_index] = new_hidden_state_tensor[:, reset_index, :]\n else:\n self.rnn_states[frame_level_layer][reset_index] = None\n\n def forward(self, x, y, utt_conds, info, reset):\n b, t, _ = utt_conds.size()\n\n # Init RNN states, if not done\n if not hasattr(self, 'rnnstates'):\n self._init_rnn_states(b)\n\n # Quantize both x and y\n x, y = self.quantizer.quantize(x), self.quantizer.quantize(y)\n\n # Mix both the speaker and utterance conditionants\n conds = self.conds_mixer(utt_conds, info)\n\n # Propagate through frame level layers\n upper_tier_conditioning = None\n for layer in reversed(self.frames_layers):\n from_index = self.frames_layers[-1].input_samples - layer.input_samples\n to_index = -layer.input_samples + 1\n input_samples = self.quantizer.dequantize(x[:, from_index: to_index])\n input_samples = input_samples.contiguous().view(x.size(0), -1, layer.input_samples)\n rnn_states = self._get_rnn_states(layer, reset)\n upper_tier_conditioning, rnn_states_new = layer(\n input_samples, conds, upper_tier_conditioning, rnn_states\n )\n self._set_rnn_states(rnn_states_new.detach(), layer, reset)\n\n # Propagate through sample level layers\n input_samples = x[:, (self.frames_layers[-1].input_samples - self.sample_layer.input_samples):]\n y_hat = self.sample_layer(input_samples, conds, upper_tier_conditioning)\n\n # Return only valid samples\n y_hat = y_hat[reset != 2]\n y = y[reset != 2]\n\n # Return both y_hat and y, even this last one is not used (just quantized for loss computation)\n return y_hat, y\n\n def test(self, utt_conds, info):\n b, t, _ = utt_conds.size()\n\n # Mix both the speaker and utterance conditionants\n conds = self.conds_mixer(utt_conds, [info])\n\n # Create a Tensor to store the generated samples in\n y_hat = torch.zeros(\n 1, (t + 1) * self.frame_size, dtype=torch.int64\n ).fill_(self.quantizer.quantize_zero()).to(utt_conds.device)\n\n # Init hidden states\n self._init_rnn_states(1)\n\n # Create a list to store the conditioning\n frame_level_outputs = [None for _ in self.frames_layers]\n\n # Iterate over the samples\n for xi in range(self.frame_size, y_hat.shape[1]):\n conds_indx, _ = divmod(xi, self.frame_size)\n conds_indx -= 1\n\n # Iterate over Frame Level layers\n for layer_index, layer in reversed(list(enumerate(self.frames_layers))):\n\n # If the generated sample is not a multiple of the input size, skip\n if xi % layer.input_samples != 0:\n continue\n\n # Prepare the input samples to enter the model\n input_samples = self.quantizer.dequantize(y_hat[:, xi - layer.input_samples:xi].unsqueeze(1)) \\\n .to(utt_conds.device)\n\n # Check conditioning (first layer does not have)\n if layer_index == len(self.frames_layers) - 1:\n upper_tier_conditioning = None\n else:\n frame_index = (xi // layer.input_samples) % self.frames_layers[layer_index + 1].ratio\n upper_tier_conditioning = frame_level_outputs[layer_index + 1][:, frame_index, :].unsqueeze(1)\n\n # Propagate through current frame level layer\n frame_level_outputs[layer_index], rnn_states_new = \\\n layer(\n input_samples, conds[:, conds_indx, :].unsqueeze(1), upper_tier_conditioning,\n self._get_rnn_states(layer, [1 if xi == self.frame_size else 0])\n )\n\n # Set the new frame level hidden state\n self._set_rnn_states(rnn_states_new.detach(), layer, [0])\n\n # Prepare the input samples Sample Level Layer\n input_samples = y_hat[:, xi - self.sample_layer.input_samples:xi].to(utt_conds.device)\n\n # Prepare conditioning\n upper_tier_conditioning = frame_level_outputs[0][:, xi % self.sample_layer.input_samples, :].unsqueeze(1)\n\n # Store generated samples\n y_hat[:, xi] = self.sample_layer(\n input_samples, conds[:, conds_indx, :].unsqueeze(1), upper_tier_conditioning\n ).squeeze(1).exp_().multinomial(1).squeeze(1)\n\n # Return generated samples\n return y_hat\n","sub_path":"samplernn_pase/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":16997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"631944889","text":"#!/usr/bin/env python\nimport rospy\nimport numpy as np\n\nfrom sensor_msgs.msg import Imu\t\t\t\t\t# IMU (orientation)\nimport matplotlib.pyplot as plt\nimport pdb\n\nfrom tf import transformations\n\narr_t = []\narr_r = []\narr_p = []\narr_y = []\n\ndef sub_imu(msg):\n\tglobal arr_r, arr_p, arr_y, arr_t\n\tori = msg.orientation\n\tquaternion = (ori.x, ori.y, ori.z, ori.w)\n\t(roll_raw, pitch_raw, yaw_raw) = transformations.euler_from_quaternion(quaternion)\n\n\ttm = msg.header.stamp.secs + 1e-9 * msg.header.stamp.nsecs\n\n\tarr_t.append(tm)\n\tarr_r.append(roll_raw)\n\tarr_p.append(pitch_raw)\n\tarr_y.append(yaw_raw)\n\ndef end_callback():\n\tplt.figure()\n\tts = [x - arr_t[0] for x in arr_t]\n\tplt.plot(ts, arr_y, 'r')\n\tplt.plot(ts, arr_p, 'g')\n\tplt.plot(ts, arr_r, 'b')\n\tplt.show()\n\t#pdb.set_trace()\n\nif __name__==\"__main__\":\n\trospy.init_node('imu_listener')\n\t#rospy.Subscriber('/xsens/imu/data', Imu, sub_imu, queue_size=10)\n\trospy.Subscriber('/imu_raw', Imu, sub_imu, queue_size=10)\n\trospy.on_shutdown(end_callback)\n\trospy.spin()","sub_path":"scripts/old/imu_orientation_test.py","file_name":"imu_orientation_test.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"317931988","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Digital Library — a digital book management system\n# Copyright (C) 2015 Igor Tarakanov ,\n# Yuriy Syrovetskiy \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n\nfrom threading import current_thread, main_thread, Thread \nfrom PySide.QtCore import QObject, Qt, Signal\nfrom PySide.QtGui import QApplication\nfrom PySide.QtWebKit import QWebView\nfrom time import sleep\nimport configparser\nimport errno\nimport os\n\n\ndef current_thread_is_main():\n\treturn current_thread() is main_thread()\n\n\nclass Browser(QObject):\n\t# pylint: disable=too-many-public-methods\n\n\t__execute_script_called = Signal(str)\n\n\tdef __init__(self):\n\t\tsuper().__init__()\n\n\t\tself.app = QApplication([])\n\n\t\tself.webview = QWebView()\n\t\tself.webview.setAttribute(Qt.WA_DeleteOnClose)\n\t\tself.webview.destroyed.connect(self.app.quit)\n\n\t\tself.__execute_script_called.connect(self.__execute_script)\n\n\tdef __execute_script(self, javascript_code: str):\n\t\tassert current_thread_is_main()\n\t\tself.webview.page().mainFrame().evaluateJavaScript(javascript_code)\n\n\tdef execute_script(self, javascript_code: str):\n\t\tif current_thread_is_main():\n\t\t\tself.__execute_script(javascript_code)\n\t\telse:\n\t\t\tself.__execute_script_called.emit(javascript_code)\n\n\tdef run(self, url):\n\t\tassert current_thread_is_main()\n\t\tself.webview.showFullScreen()\n\t\tself.webview.load(url)\n\t\tself.app.exec_()\n\n\ndef opener_nonblock(path, mode):\n\treturn os.open(path, mode | os.O_NONBLOCK)\n\n\ndef user_scanner(config, browser):\n\tscanner = open(config[\"user_scanner\"])\n\twhile True:\n\t\tdata = scanner.readline()\n\t\tnew_user = data.strip(\"\\0\\2\\3\\r\\n\")\n\t\tif new_user == '':\n\t\t\ttry:\n\t\t\t\tscanner = open(config[\"user_scanner\"])\n\t\t\texcept FileNotFoundError:\n\t\t\t\tbrowser.execute_script('user_scanner_off()')\n\t\t\t\tsleep(0.1)\n\t\t\t\tcontinue\n\t\t\tbrowser.execute_script('user_scanner_on()')\n\t\t\tsleep(0.1)\n\t\t\tcontinue\n\t\tprint('user:', repr(new_user))\n\t\tbrowser.execute_script(\"user('\" + new_user + \"')\")\n\n\ndef book_scanner(config, browser):\n\tsleep(2)\n\tbrowser.execute_script('angular.element(document.getElementById(\"signin\")).scope().setdata(\"terminal\", \"terminal123\");')\n\twhile True:\n\t\ttry:\n\t\t\tscanner = open(config[\"book_scanner\"], \"rb\")\n\t\texcept FileNotFoundError:\n\t\t\tsleep(0.1)\n\t\t\tbrowser.execute_script('book_scanner_off()')\n\t\t\tcontinue\n\t\tbrowser.execute_script('book_scanner_on()')\n\t\ti = 0\n\t\tbarcode = \"\"\n\t\twhile True:\n\t\t\ti += 1\n\t\t\ttry:\n\t\t\t\tscanner.read(12)\n\t\t\texcept OSError as e:\n\t\t\t\tif e.errno == errno.ENODEV:\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\traise\n\t\t\tnumber = int.from_bytes(scanner.read(1), byteorder='big') - 29\n\t\t\tscanner.read(3)\n\t\t\tif i % 2 == 0:\n\t\t\t\tcontinue\n\t\t\tif number < 0:\n\t\t\t\tcontinue\n\t\t\tif number == 11:\n\t\t\t\tbreak\n\t\t\tbarcode += str(number % 10)\n\t\tif barcode == '':\n\t\t\tcontinue\n\t\tprint('barcode:', repr(barcode))\n\t\tif barcode != '':\n\t\t\tbrowser.execute_script(\"book('\" + barcode + \"')\")\n\n\ndef main():\n\tdef load_config():\n\t\tconfig = configparser.ConfigParser()\n\t\tconfig.read('config')\n\t\treturn config['Terminal']\n\n\tconfig = load_config()\n\n\tbrowser = Browser()\n\n\tuser = Thread(target=user_scanner, args=(config, browser), daemon=True)\n\tbook = Thread(target=book_scanner, args=(config, browser), daemon=True)\n\tuser.start()\n\tbook.start()\n\n\tprint(config['operations_url'])\n\n\tbrowser.run(config['operations_url'])\n\tsleep(4)\n\n\tbrowser.execute_script('angular.element(document.getElementById(\"signin\")).scope().setdata(\"terminal\", \"terminal123\");')\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"328849933","text":"from multiprocessing import Pool\nimport os\nimport time\nimport random\n\n\ndef child_task(name):\n print('子进程 %s ID是:%s 正在运行' % (name, os.getpid()))\n start = time.time()\n time.sleep(random.random() * 3) # 随机休眠\n end = time.time()\n print('子进程:%s 运行了:%0.2f秒' % (name, (end - start)))\n\n\nif __name__ == '__main__':\n print('当前父进程id是:%s'% os.getpid())\n p=Pool(4)#创建进程池实例,大小是4个进程\n for i in range(5):#循环5次,创建5个进程\n p.apply_async(child_task,args=(i,))\n print('子进程循环创建完毕,正在等待子进程执行')\n p.close()#关闭进程池,之后就不能添加新的进程了\n p.join()#如果有进程池,调用join前必须调用close。(join方法,等待所有子进程执行完毕再往下执行)\n print('所有进程运行完毕')\n\n","sub_path":"three/thread/processpool.py","file_name":"processpool.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"386526988","text":"from .controller import Controller\nfrom service.picture import PictureService, PictureSaveResult\n\n\nclass PictureController(Controller):\n def post(self, request):\n # checking if the file is present or not.\n if 'file' not in request.files:\n return self.format_response({'error': \"Aucun fichier reçu\"}), 400\n\n file = request.files['file']\n service = PictureService(self.config)\n res = service.save(file)\n body, status = self.create_response(res, {\n PictureSaveResult.OK: {},\n PictureSaveResult.INVALID_PICTURE_TYPE: (\n \"Le fichier doit etre une image JPEG\", 400\n )\n })\n return self.format_response(body), status\n","sub_path":"api/controller/picture.py","file_name":"picture.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"571589196","text":"# -*- coding: utf-8 -*-\r\nimport findspark\r\nfindspark.init()\r\n\r\nfrom pyspark import SparkContext\r\nimport os\r\n\r\nsc = SparkContext(\"local\", \"map\")\r\n\r\ndata_path = \"C:\\\\PySpark\\\\data\"\r\n\r\ndirPath = os.path.join(data_path, \"files\")\r\nos.mkdir(dirPath)\r\n\r\nwith open(os.path.join(dirPath, \"1.txt\"), \"w\") as file1:\r\n _ = file1.write(\"1.. some text\")\r\n \r\nwith open(os.path.join(dirPath, \"2.txt\"), \"w\") as file2:\r\n _ = file2.write(\"2.. some other text\")\r\n \r\ntextFiles = sc.wholeTextFiles(dirPath)\r\n\r\nsorted(textFiles.collect())\r\n\r\nsc.stop()","sub_path":"spark_core/rdd_actions/wholeTextFiles.py","file_name":"wholeTextFiles.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"626556181","text":"names_of_stuff = \"Banana Republic. I try to do as much as I can to understand stff\"\nlen_of_string = len(names_of_stuff)\n\ndef slice_and_dice(x):\n if x < len_of_string:\n return names_of_stuff[:x]\n else:\n error_out()\n\ndef error_out():\n return (\"Error. Try again.\")\n\nx = int(input(\"Number? \"))\nprint(\"New list: \", slice_and_dice(x))\n","sub_path":"missing_char.py","file_name":"missing_char.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"389760621","text":"import forma4\nfrom forma4 import llenar_forma\nfrom alimentar import datos\nimport time, random\nfrom selenium import webdriver\n\n#nombres = ['AARON GARCIA', 'EDEL SAID PABLO BIBIANO', 'ANA SOFIA RODRIGUEZ LUNA']\n#pos = ['14', '15', '23']\nnombres = ['AARON GARCIA', 'EDEL SAID PABLO BIBIANO']\npos = ['14', '15']\nespera = 60\ncooldown = 2\n\ndef cooldown():\n print(\"\\nCooldown:\")\n espera_query = cooldown + random.randrange(0,4)\n for l in range(espera_query):\n print(str(espera_query-l), end=\" \", flush=True)\n time.sleep(1) \n\ndef init_rank():\n rank_browser = webdriver.Chrome('/home/mr/Documents/corona/chromedriver')\n rank_browser.get('https://coronacapital10.com.mx/sites/all/themes/bootstrap_barrio/img/logo.png')\n rank_browser.add_cookie({'name' : 'OptanonAlertBoxClosed',\n 'value' : '2019-10-31T06:37:07.575Z',\n 'domain' : '.coronacapital10.com.mx',\n 'path':'/'})\n rank_browser.get('https://coronacapital10.com.mx/ranking')\n\n try:\n elem = rank_browser.find_element_by_id('age_checker_day')\n elem.send_keys('12')\n\n elem = rank_browser.find_element_by_id('age_checker_month')\n elem.send_keys('02')\n\n elem = rank_browser.find_element_by_id('age_checker_year')\n elem.send_keys('1996')\n\n elem = rank_browser.find_element_by_id('edit-submit')\n elem.click()\n\n print(\"sí se pudo qué\")\n\n return rank_browser\n except:\n print(\"chale\")\n\ndef main():\n\n# rank_browser = init_rank()\n \n while True:\n k=0\n for i in range(len(nombres)):\n# p=0\n p=1\n while p==0:\n try:\n rank = rank_browser.find_element_by_xpath('/html/body[1]/div[2]/div[1]/div[2]/div[1]/section[1]/div[1]/div[1]/div[1]/div[1]/div[1]/div[1]/div[2]/div['+ pos[i] + ']/p[1]').text\n p=1\n except:\n try:\n print(\"\\nERROR: REINICIANDO RANKING\\n\")\n rank_browser.quit()\n rank_browser = init_rank()\n except:\n print(\"\\nERROR: REABRIENDO RANKING\\n\")\n rank_browser = init_rank()\n\n\n# if nombres[i]==rank:\n# print('\\n' + nombres[i] + ' EN ' + pos[i] + 'AVA POSICIÓN')\n# k=k+1\n# else:\n# print('\\nRankeando a ' + nombres[i] + ' a la posición ' + pos[i])\n# par = datos()\n# llenar_forma(par[0], par[1], par[2], i)\n print('\\n\\nRankeando a ' + nombres[i] + ' a la posición ' + pos[i])\n par=datos()\n try:\n llenar_forma(par[0], par[1], par[2], i)\n except:\n print(\"NO JAJAJAJAJALOLOLOLOLO\")\n None\n\n# cooldown()\n\n if k==len(nombres):\n print('\\nTodos rankeados, checando otra vez en ', espera, ' segundos')\n time.sleep(espera)\n\n# n=0\n n=1\n while n==0:\n try:\n rank_browser.refresh()\n n=1\n except:\n None\n\nif __name__ == \"__main__\":\n main()","sub_path":"main4.py","file_name":"main4.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"407399601","text":"import pygame\nimport os\n\n# Define some colors\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\ngreen = (0, 255, 0)\nred = (255, 0, 0)\nblue = (0, 0, 255)\n\n\ndef get_font_dir_location():\n s = os.path.dirname(os.path.abspath(__file__))\n s2 = s.partition(\"Display\")\n return ''.join([s2[0], \"resources\\\\fonts\\\\\"])\n\n\ndef get_font(font_name, size):\n font_dir_location = get_font_dir_location()\n try:\n if font_name == 'Comic Sans':\n return pygame.font.Font(''.join([font_dir_location, 'comic.ttf']), size)\n if font_name == 'Calibri':\n return pygame.font.Font(''.join([font_dir_location, 'calibri.ttf']), size)\n if font_name == 'Lucida Console':\n return pygame.font.Font(''.join([font_dir_location, 'lucon.ttf']), size)\n except OSError:\n return pygame.font.SysFont('Calibri', size, True, False)\n\n\ndef draw_new_text(text, font, colour, screen, center_point):\n text_surface = font.render(text, True, colour) # create a surface object containing the desired text\n text_rect = text_surface.get_rect() # create a rectangle of the text_surface area, with position 0,0\n text_rect.center = (center_point[0], center_point[1]) # move the rectangle to the desired position (not on screen)\n screen.blit(text_surface, text_rect) # blit the text onto the rectangle's position onto the screen\n\n\ndef get_rectangle(text, font, colour, center_point):\n text_surface = font.render(text, True, colour) # create a surface object containing the desired text\n text_rect = text_surface.get_rect() # create a rectangle of the text_surface area, with position 0,0\n text_rect.center = (center_point[0], center_point[1]) # move the rectangle to the desired position (not on screen)\n return text_rect\n","sub_path":"retro_games/Display/display_manager.py","file_name":"display_manager.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"39548092","text":"import requests\nimport urllib\nimport pandas as pd\n\nif __name__ == '__main__':\n #######################################\n # To Load Raw Data from NOAA\n # Takes a while...\n #######################################\n\n url = 'https://www.ncei.noaa.gov/access/services/data/v1'\n st = pd.read_csv('noaa_stations2.csv')\n\n st_list = st.STATION.to_list()\n\n tt = pd.DataFrame()\n for s in st_list:\n\n params = urllib.parse.urlencode(dict(\n dataset='daily-summaries',\n stations=s,\n startDate='2015-01-01',\n endDate='2021-04-15',\n format='json',\n includeStationName='false',\n units='metric',\n includeStationLocation='false'\n ))\n\n r = requests.get(url, params=params)\n\n tt = tt.append(pd.DataFrame(r.json()))\n\n tt.to_pickle('tx_temp_raw.pkl')\n\n ######################################\n # Transform Raw Temp Date to daily Mean\n ######################################\n\n tt = tt[['DATE','STATION','TMAX','TMIN']].dropna()\n\n tt.DATE = pd.to_datetime(tt.DATE)\n\n tt.TMAX = pd.to_numeric(tt.TMAX)\n tt.TMIN = pd.to_numeric(tt.TMIN)\n\n tt = tt.merge(st, on='STATION')\n\n tt['TEMP'] = (tt.TMAX + tt.TMIN)/2\n\n tt.to_pickle('tx_temp.pkl')\n","sub_path":".ipynb_checkpoints/download_raw_temp-checkpoint.py","file_name":"download_raw_temp-checkpoint.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"211430621","text":"import os, os.path, sys\n\ndef showMenu(heading, itemlist):\n while True:\n print (\"\\n *** \" + heading + \" ***\\n\")\n for i in range(len(itemlist)):\n print (\"%4d: %s\" % (i, itemlist[i]))\n\n print(\"\\n Choice (or 'q')\", end= \" \")\n reply = input(\": \")\n if reply in ['q', 'Q']:\n return None\n else:\n try:\n option = int(reply)\n if 0 <= option < len(itemlist):\n return option\n except:\n pass\n\n\ndef pickAFile(thisDir = ['.'], extension = '*'):\n namesInThisDir = os.listdir(thisDir)\n\n filelist = []\n for item in namesInThisDir:\n fullname = os.path.join(thisDir, item)\n # fullname = item\n if os.path.isfile(fullname) and \\\n (item.endswith(extension) or extension == '*'):\n filelist.append(item)\n filelist.sort()\n choice = showMenu(\"Pick a file\", filelist)\n if choice is not None:\n return os.path.join(thisDir, filelist[choice])\n else:\n return None\n\n\nif __name__ == \"__main__\":\n while True:\n choice = pickAFile('.', '*')\n if choice:\n print(\"You chose\", choice)\n else:\n sys.exit(0)\n","sub_path":"fileChooser.py","file_name":"fileChooser.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"187494154","text":"import unittest\nfrom random import randint\n\nfrom acrylic import Color, RANDOM\n\n\nclass TestColor(unittest.TestCase):\n\n def test_basic(self):\n color = Color()\n self.assertEqual(color.rgb, (0, 0, 0))\n\n color = Color(rgb=(83, 237, 229))\n self.assertEqual(color.rgb, (83, 237, 229))\n\n def test_str_repr(self):\n self.assertEqual(str(Color(rgb=(61, 245, 245))), 'r=61, g=245, b=245')\n self.assertEqual(str(Color(hex='#3DF5F5')), \"hex='#3DF5F5'\")\n\n new_color = Color(rgb=(61, 245, 245))\n self.assertEqual(eval(repr(new_color)), new_color)\n new_color = Color(hsl=(180, 90, 60))\n self.assertEqual(eval(repr(new_color)), new_color)\n new_color = Color(hex='#3DF5F5')\n self.assertEqual(eval(repr(new_color)), new_color)\n\n def test_eq(self):\n new_color = Color(rgb=(61, 245, 245))\n self.assertEqual(new_color, Color(hsl=(180, 90, 60)))\n self.assertEqual(new_color, Color(rgb=(61, 245, 245)))\n self.assertEqual(new_color, Color(hex='#3DF5F5'))\n self.assertNotEqual(new_color, Color(rgb=(62, 245, 245)))\n\n def test_hash(self):\n new_color = Color(rgb=(61, 245, 245))\n self.assertEqual(hash(new_color), hash(Color(hsl=(180, 90, 60))))\n self.assertEqual(hash(new_color), hash(Color(rgb=(61, 245, 245))))\n self.assertEqual(hash(new_color), hash(Color(hex='#3DF5F5')))\n self.assertNotEqual(hash(new_color), hash(Color(rgb=(62, 245, 245))))\n\n test_dict = {new_color: 'test!'}\n self.assertEqual(test_dict[Color(hsl=(180, 90, 60))], 'test!')\n self.assertEqual(test_dict[Color(hsl=(180, 90.1, 60))], 'test!')\n self.assertEqual(test_dict[Color(hsl=(180, 89.8, 60))], 'test!')\n\n test_set = set([\n Color(rgb=(61, 245, 245)), Color(hsl=(180, 90, 60)),\n Color(hsl=(180, 89.8, 60)), Color(hex='#3DF5F5')\n ])\n self.assertEqual(len(test_set), 1)\n\n def test_validation_inplist(self):\n with self.assertRaises(TypeError):\n _ = Color(rgb=1)\n _ = Color(rgb='123')\n _ = Color(rgb='test!')\n _ = Color(rgb=(0, 0.5))\n _ = Color(rgb=(1, 2, 3, 4))\n\n def test_validation_type(self):\n with self.assertRaises(TypeError):\n _ = Color(rgb=(25.123, 50, 200))\n _ = Color(rgb=('test!', 50, 200))\n _ = Color(rgb=((25.123, 24), 50, 200))\n _ = Color(rgb=((24, 25.123), 50, 200))\n _ = Color(rgb=(('test!', 24), 50, 200))\n _ = Color(rgb=((24, 'test!'), 50, 200))\n\n def test_validation_value(self):\n lower, upper = Color._LIMITS['rgb'].r\n with self.assertRaises(ValueError):\n _ = Color(rgb=(lower - 10, 50, 200))\n _ = Color(rgb=(upper + 10, 50, 200))\n _ = Color(rgb=((lower - 10, 24), 50, 200))\n _ = Color(rgb=((24, lower - 10), 50, 200))\n _ = Color(rgb=((upper + 10, 24), 50, 200))\n _ = Color(rgb=((23, upper + 10), 50, 200))\n _ = Color(rgb=((24, 42, 64), 50, 200))\n _ = Color(rgb=((24,), 50, 200))\n\n def test_single_values(self):\n limits = Color._LIMITS['rgb']\n for _ in range(50):\n values = [randint(limits[i][0], limits[i][1]) for i in range(3)]\n color = Color(rgb=values)\n self.assertEqual(color.rgb, tuple(values))\n [self.assertIsInstance(x, int) for x in color.rgb]\n\n def test_random(self):\n limits = Color._LIMITS['rgb']\n for _ in range(50):\n color = Color(rgb=RANDOM)\n check = lambda i, x: limits[i][0] <= x <= limits[i][1]\n self.assertTrue(all(check(i, x) for i, x in enumerate(color.rgb)))\n [self.assertIsInstance(x, int) for x in color.rgb]\n\n def test_range_values(self):\n for _ in range(50):\n lower, upper = Color._LIMITS['rgb'].r\n a, b = randint(lower, upper // 2), randint(upper // 2, upper)\n color = Color(rgb=([a, b], 42, 64))\n self.assertTrue(a <= color.rgb.r <= b)\n color = Color(rgb=([b, a], 42, 64))\n self.assertTrue(a <= color.rgb.r <= b)\n color = Color(rgb=([RANDOM, a], 42, 64))\n self.assertTrue(color.rgb.r <= a)\n color = Color(rgb=([b, RANDOM], 42, 64))\n self.assertTrue(b <= color.rgb.r)\n color = Color(rgb=([RANDOM, RANDOM], 42, 64))\n self.assertTrue(lower <= color.rgb.r <= upper)\n\n def test_validation_hex(self):\n with self.assertRaises(TypeError):\n _ = Color(hex=1)\n _ = Color(hex=(5, 0.5, 1))\n\n with self.assertRaises(ValueError):\n _ = Color(hex='test')\n _ = Color(hex='#0000')\n _ = Color(hex='#0000xx')\n\n def test_hex(self):\n tests = [\n '#3DF5F5', '3DF5F5', '#3df5f5', '3df5f5', '#3DF5F5FF',\n '3DF5F5FF', '#3df5f5ff', '3df5f5ff'\n ]\n for test in tests:\n self.assertEqual(Color(hex=test).hex, '#3DF5F5')\n\n def test_conversion(self):\n def test_all(color):\n self.assertEqual(color.hsl, (176.88, 81.05, 62.75))\n self.assertEqual(color.rgb, (83, 237, 229))\n self.assertEqual(color.hsv, (176.88, 64.98, 92.94))\n self.assertEqual(color.hex, '#53EDE5')\n self.assertEqual(color.ryb, (18, 97, 172))\n\n test_all(Color(hsl=(176.88, 81.05, 62.75)))\n test_all(Color(rgb=(83, 237, 229)))\n test_all(Color(hsv=(176.88, 64.98, 92.94)))\n test_all(Color(hex='#53EDE5'))\n test_all(Color(ryb=(18, 97, 172)))\n\n def test_immutability(self):\n with self.assertRaises(AttributeError):\n color = Color(rgb=(83, 237, 229))\n color.hsl = (176.88, 81.05, 62.75)\n color.rgb = (83, 237, 229)\n color.hsv = (176.88, 64.98, 92.94)\n color.hex = '#53EDE5'\n color.ryb = (18, 97, 172)\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":5967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"241604499","text":"\"\"\"\nThis algorithm is meant to be implemented using a paper-trading strategy to eliminate forward looking bias.\n\"\"\"\nfrom quantopian.algorithm import attach_pipeline, pipeline_output\nfrom quantopian.pipeline import Pipeline\nfrom quantopian.pipeline.data.builtin import USEquityPricing\nfrom quantopian.pipeline.factors import AverageDollarVolume\nfrom quantopian.pipeline.filters.morningstar import Q1500US\n \ndef initialize(context):\n \"\"\"\n Called once at the start of the algorithm.\n \"\"\" \n context.features = {}\n context.stocks = symbols('XLY', # XLY Consumer Discrectionary SPDR Fund \n 'XLF', # XLF Financial SPDR Fund \n 'XLK', # XLK Technology SPDR Fund \n 'XLE', # XLE Energy SPDR Fund \n 'XLV', # XLV Health Care SPRD Fund \n 'XLI', # XLI Industrial SPDR Fund \n 'XLP', # XLP Consumer Staples SPDR Fund \n 'XLB', # XLB Materials SPDR Fund \n 'XLU') # XLU Utilities SPRD Fund\n \n # Rebalance every day, when market opens.\n schedule_function(my_rebalance,\n date_rule = date_rules.every_day(),\n time_rule = time_rules.market_open())\n \n # Record tracking variables at the end of each day.\n schedule_function(my_record_vars, date_rules.every_day(), time_rules.market_close())\n \n # Create our dynamic stock selector.\n attach_pipeline(make_pipeline(), 'my_pipeline')\n \ndef make_pipeline():\n \"\"\"\n A function to create our dynamic stock selector (pipeline). Documentation on\n pipeline can be found here: https://www.quantopian.com/help#pipeline-title\n \"\"\"\n \n # Base universe set to the Q500US\n base_universe = Q1500US()\n\n # Factor of yesterday's close price.\n yesterday_close = USEquityPricing.close.latest\n \n pipe = Pipeline(\n screen = base_universe,\n columns = {\n 'close': yesterday_close,\n }\n )\n return pipe\n \ndef before_trading_start(context, data):\n \"\"\"\n Called every day before market open.\n \"\"\"\n context.output = pipeline_output('my_pipeline')\n \n # These are the securities that we are interested in trading each day.\n context.security_list = context.output.index\n \ndef my_assign_weights(context, data):\n \"\"\"\n Assign weights to securities that we want to order.\n \"\"\"\n pass\n \ndef my_rebalance(context,data):\n \"\"\"\n Execute orders according to our schedule_function() timing. \n \"\"\"\n for stock in context.stocks:\n price_hist = data.history(stock, 'price', 100, '1d')\n ma1 = price_hist.mean() \n\n price_hist = data.history(stock, 'price', 300, '1d')\n ma2 = price_hist.mean() \n\n \n if ma1 > ma2:\n order_target_percent(stock, 0.11)\n \n elif ma1 < ma2:\n order_target_percent(stock, -0.11) \n \ndef my_record_vars(context, data):\n \"\"\"\n Plot variables at the end of each day.\n \"\"\"\n pass\n\n","sub_path":"quantopian33(10)/misc-files/house-money2.py","file_name":"house-money2.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"117836178","text":"import numpy as np\r\nimport cv2 as cv\r\n\r\nhaar_cascade = cv.CascadeClassifier('harr_face.xml')\r\npeople = ['Female 1', 'Female 2', 'Ishihara Satomi', 'Male 1', 'Male 2', 'Male 3']\r\n\r\n\r\nface_recognizer = cv.face.LBPHFaceRecognizer_create()\r\nface_recognizer.read('face_trained.yml')\r\nimg = cv.imread(r'C:\\workspace\\OPENCV\\Faces\\Val\\image.jfif')\r\ngray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\r\ncv.imshow('Person', gray)\r\n\r\n#detect the face\r\nfaces_rect = haar_cascade.detectMultiScale(gray, 1.1, minNeighbors=4)\r\nfor (x, y, w, h) in faces_rect:\r\n faces_roi = gray[y:y+h, x:x+w]\r\n labels, confidence = face_recognizer.predict(faces_roi)\r\n print(f'Label = {people[labels]} with a confidence of {confidence}')\r\n cv.putText(img, str(people[labels]), (20,20), cv.FONT_HERSHEY_COMPLEX, 1.0, (0,255,0), 1)\r\n cv.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)\r\n\r\ncv.imshow('Detected Face', img)\r\ncv.waitKey(0)\r\n\r\n","sub_path":"face_recognition.py","file_name":"face_recognition.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"547387892","text":"from keras import backend as K\nfrom keras.engine import Layer\nfrom keras import regularizers, constraints, initializers\n\n\nclass GlobalAttentionPool(Layer):\n \"\"\"\n A gated attention global pooling layer as presented by\n [Li et al. (2017)](https://arxiv.org/abs/1511.05493).\n Note that this layer assumes the `'channels_last'` data format, and cannot\n be used otherwise.\n\n **Mode**: single, batch.\n\n **Input**\n\n - node features of shape `(batch, num_nodes, num_features)`, depending on\n the mode;\n\n **Output**\n\n - a pooled feature matrix of shape `(batch, channels)`;\n\n **Arguments**\n\n - `channels`: integer, number of output channels;\n - `kernel_regularizer`: regularization applied to the gating networks; \n\n **Usage**\n\n ```py\n X = Input(shape=(num_nodes, num_features))\n Z = GlobalAttentionPool(channels)(X)\n ```\n \"\"\"\n def __init__(self, channels=32, kernel_regularizer=None, **kwargs):\n if 'input_shape' not in kwargs and 'input_dim' in kwargs:\n kwargs['input_shape'] = (kwargs.pop('input_dim'),)\n super(GlobalAttentionPool, self).__init__(**kwargs)\n self.channels = channels\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n\n def build(self, input_shape):\n assert len(input_shape) >= 2\n self.lg_kernel = self.add_weight('LG_kernel',\n (input_shape[-1], self.channels),\n initializer='glorot_uniform',\n regularizer=self.kernel_regularizer)\n self.lg_bias = self.add_weight('LG_bias',\n (self.channels,),\n initializer='zeros')\n self.attn_kernel = self.add_weight('attn_kernel',\n (input_shape[-1], self.channels),\n initializer='glorot_uniform',\n regularizer=self.kernel_regularizer)\n self.attn_bias = self.add_weight('attn_bias',\n (self.channels,),\n initializer='zeros')\n self.built = True\n\n def call(self, inputs):\n # Note that the layer assumes the 'channels_last' data format.\n inputs_linear = K.dot(inputs, self.lg_kernel) + self.lg_bias\n attn_map = K.dot(inputs, self.attn_kernel) + self.attn_bias\n attn_map = K.sigmoid(attn_map)\n masked_inputs = inputs_linear * attn_map\n output = K.sum(masked_inputs, 1)\n return output\n\n def compute_output_shape(self, input_shape):\n if len(input_shape) == 2:\n return input_shape[:-1] + (self.channels,)\n else:\n return (input_shape[0], self.channels)\n\n def get_config(self):\n config = {}\n base_config = super(GlobalAttentionPool, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass NodeAttentionPool(Layer):\n \"\"\"\n A node-attention global pooling layer. Pools a graph by learning attention\n coefficients to sum node features.\n Note that this layer assumes the `'channels_last'` data format, and cannot\n be used otherwise.\n\n **Mode**: single, batch.\n\n **Input**\n\n - node features of shape `(batch, num_nodes, num_features)`;\n\n **Output**\n\n - a pooled feature matrix of shape `(batch, num_features)`;\n\n **Arguments**\n\n - `attn_kernel_initializer`: initializer for the attention kernel matrix;\n - `kernel_regularizer`: regularization applied to the kernel matrix; \n - `attn_kernel_regularizer`: regularization applied to the attention kernel \n matrix;\n - `attn_kernel_constraint`: constraint applied to the attention kernel\n matrix;\n\n **Usage**\n ```py\n X = Input(shape=(num_nodes, num_features))\n Z = NodeAttentionPool()(X)\n ```\n \"\"\"\n def __init__(self,\n attn_kernel_initializer='glorot_uniform',\n kernel_regularizer=None,\n attn_kernel_regularizer=None,\n attn_kernel_constraint=None,\n **kwargs):\n if 'input_shape' not in kwargs and 'input_dim' in kwargs:\n kwargs['input_shape'] = (kwargs.pop('input_dim'),)\n super(NodeAttentionPool, self).__init__(**kwargs)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.attn_kernel_initializer = initializers.get(attn_kernel_initializer)\n self.attn_kernel_regularizer = regularizers.get(attn_kernel_regularizer)\n self.attn_kernel_constraint = constraints.get(attn_kernel_constraint)\n\n def build(self, input_shape):\n assert len(input_shape) >= 2\n # Attention kernels\n self.attn_kernel = self.add_weight(shape=(input_shape[-1], 1),\n initializer=self.attn_kernel_initializer,\n regularizer=self.attn_kernel_regularizer,\n constraint=self.attn_kernel_constraint,\n name='attn_kernel')\n\n self.built = True\n\n def call(self, inputs):\n input_shape = K.int_shape(inputs)\n # Note that the layer assumes the 'channels_last' data format.\n features = K.dot(inputs, self.attn_kernel)\n features = K.squeeze(features, -1)\n attn_coeff = K.softmax(features) # TODO: maybe sigmoid?\n if len(input_shape) == 2:\n output = K.dot(attn_coeff, inputs)\n else:\n output = K.batch_dot(attn_coeff, inputs)\n\n return output\n\n def compute_output_shape(self, input_shape):\n if len(input_shape) == 2:\n return input_shape[-1:]\n else:\n return (input_shape[0], input_shape[-1])\n\n def get_config(self):\n config = {}\n base_config = super(NodeAttentionPool, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n","sub_path":"spektral/layers/pooling.py","file_name":"pooling.py","file_ext":"py","file_size_in_byte":6057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"85232217","text":"import requests\r\nimport socket\r\nfrom bs4 import BeautifulSoup\r\nfrom DouYuBarrageMonitor import settings\r\nimport threading\r\n\r\n\r\ndef get_room_status():\r\n # get live room status by html of douyu search result\r\n res = requests.get('https://www.douyu.com/search/?kw={}'.format(settings.APP_ROOM_ID))\r\n res.encoding = 'utf-8'\r\n if 'icon_live' in res.text:\r\n room_status = 1\r\n else:\r\n room_status = 2\r\n return room_status\r\n\r\n\r\nclass MyClient:\r\n # _instance_lock = threading.Lock()\r\n\r\n def __init__(self):\r\n # Configure the socket IP and port\r\n self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.host = socket.gethostbyname(\"openbarrage.douyutv.com\")\r\n self.port = 8601\r\n\r\n # def __new__(cls, *args, **kwargs):\r\n # if not hasattr(MyClient, \"_instance\"):\r\n # with MyClient._instance_lock:\r\n # if not hasattr(MyClient, \"_instance\"):\r\n # MyClient._instance = object.__new__(cls)\r\n # return MyClient._instance\r\n\r\n def connect(self):\r\n self.client.connect((self.host, self.port))\r\n\r\n def cancel_connect(self):\r\n self.client.close()\r\n\r\n def sendmsg(self, msgstr):\r\n \"\"\"\r\n Client to send a request to the server function, the integration of sending protocol header function\r\n msgHead: The protocol header before sending the data, twice the length of the message, and the message type, encryption field, and confidentiality field\r\n Use a while loop to send specific data, making sure you're sending it all out\r\n \"\"\"\r\n msg = msgstr.encode('utf-8')\r\n data_length = len(msg) + 8\r\n code = 689\r\n msgHead = int.to_bytes(data_length, 4, 'little') + int.to_bytes(data_length, 4, 'little') + int.to_bytes(code,\r\n 4,\r\n 'little')\r\n try:\r\n self.client.send(msgHead)\r\n except:\r\n self.client.close()\r\n sent = 0\r\n while sent < len(msg):\r\n tn = self.client.send(msg[sent:])\r\n sent = sent + tn\r\n\r\n def keeplive(self):\r\n \"\"\"\r\n Sending heartbeat information to maintain a long TCP connection\r\n Add \\0 to the end of the heartbeat message\r\n \"\"\"\r\n # msg = 'type@=keeplive/tick@=' + str(int(time.time())) + '/\\0'\r\n msg = 'type@=mrkl/\\0'\r\n self.sendmsg(msg)\r\n\r\n def get_name(self, roomid):\r\n \"\"\"\r\n BeautifulSoup is used to get live room titles\r\n \"\"\"\r\n r = requests.get(\"http://www.douyu.com/\" + roomid)\r\n soup = BeautifulSoup(r.text, 'lxml')\r\n return soup.find('h3', {'class', 'Title-headlineH2'}).string\r\n","sub_path":"DouYuBarrageMonitor/webServer/BarrageContent/NetworkContent.py","file_name":"NetworkContent.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"377803411","text":"\n\n#calss header\nclass _POTSHERD():\n\tdef __init__(self,): \n\t\tself.name = \"POTSHERD\"\n\t\tself.definitions = [u'a broken piece of an object made of baked clay, especially one found by an archaeologist (= someone who studies the buildings, graves, objects, etc. of people who lived in the past): ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_potsherd.py","file_name":"_potsherd.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"626629052","text":"\"\"\"\nTest sender.py module\n\"\"\"\n\nimport pytest\n\nfrom edunotice.sender import send_email\n\nfrom edunotice.constants import (\n SG_FROM_EMAIL,\n SG_TEST_EMAIL,\n)\n\nTEST_EMAIL_API = pytest.mark.skipif(\n not SG_TEST_EMAIL, reason=\"Testing Email API is switched off\"\n)\n\n\n@TEST_EMAIL_API\ndef test_send_email1():\n \"\"\"\n Tests send_email function.\n\n \"\"\"\n\n subject = \"Test message\"\n html_content = \"

This is a test message!

\"\n\n # One recipient\n success, error = send_email(SG_FROM_EMAIL, subject, html_content)\n assert success, error\n","sub_path":"tests/test_sender.py","file_name":"test_sender.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"279775844","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 26 12:44:33 2018\n@author: mpcr\n\"\"\"\n\n##self-organizing map##\n#DESeq2 data for MMP9 t-cell inhibititor conditions#\n\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport time\nimport cv2\nfrom scipy.misc import bytescale\n\nfilename = 'DE0.csv'\ndata = np.genfromtxt(filename, delimiter=',', missing_values='NA', filling_values=1, usecols=range(1,7))\nremoveNA = data[:, -1] != 1\ndata = data[removeNA, :]\nprint(data.shape)\n\ngenedata = np.genfromtxt(filename, delimiter=',', missing_values='NA', filling_values=1, usecols=range(0,7))\nremoveNA = genedata[:, -1] != 1\ngenedata = genedata[removeNA, :]\ngenedata = genedata[:, 0]\n#print(genedata.shape)\n\nnamedata = np.genfromtxt(filename, delimiter=',', dtype=str, usecols=0)\nprint(namedata)\n\nnamedata = namedata[removeNA]\nnamedata = namedata[:] \nprint(namedata.shape)\n\nfor name in namedata:\n input = name\n input = input.lower()\n numbername = []\n for character in input:\n number = ord(character)\n numbername.append(number)\n\nnamedata.shape=(7905, 1)\nnamedata = np.hstack((data,namedata))\nprint(namedata.shape)\n\ntestnum = int(0.1 * namedata.shape[0])\nrandtestind = np.random.randint(0, namedata.shape[0], testnum)\ntestdata = namedata[randtestind, :]\n\nnamedata = np.delete(arr=namedata, obj=randtestind, axis= 0)\n\nnamedata -= np.mean(namedata, 0)\nnamedata /= np.std(namedata, 0)\n\nn_in = namedata.shape[1]\n\nw = np.random.randn(3, n_in) * 0.1\n\nlr = 0.025\nn_iters = 10000\n\nfor i in range(n_iters):\n randsamples = np.random.randint(0, namedata.shape[0], 1)[0] \n rand_in = namedata[randsamples, :] \n difference = w - rand_in\n dist = np.sum(np.absolute(difference), 1)\n best = np.argmin(dist)\n w_eligible = w[best,:]\n w_eligible += (lr * (rand_in - w_eligible))\n w[best,:] = w_eligible\n cv2.namedWindow('weights', cv2.WINDOW_NORMAL)\n cv2.imshow('weights', bytescale(w))\n cv2.waitKey(100)\n###############################################################################\nnode1w = w[0, :]\nnode2w = w[1, :]\nnode3w = w[2, :]\ndifference1 = node1w - testdata\ndist1 = np.sum(np.absolute(difference1), 1)\ndifference2 = node2w - testdata\ndist2 = np.sum(np.absolute(difference2), 1)\ndifference3 = node3w - testdata\ndist3 = np.sum(np.absolute(difference3), 1)\n\n#???convert queried gene-number-name back into gene-letter-name\n#chr(ord('x'))\n###############################################################################\nprint (filename)\nprint (w)\n\n \n ","sub_path":"SparseCoding/SOMMMP9-2.py","file_name":"SOMMMP9-2.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"220753357","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 24 16:38:24 2019\n\n@author: mvb\n\"\"\"\nfrom data_visualization.showrawdata.preprocess import preProcessing\nfrom data_visualization.mathfunction import interpolation\nimport data_visualization.data_io as dio\n\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport numpy as np\nimport pyqtgraph as pg\nimport sys, os\n\nfrom pyqtgraph.dockarea import DockArea, Dock\nfrom pyqtgraph.parametertree import Parameter, ParameterTree\nimport pyqtgraph.parametertree.parameterTypes as pTypes\nfrom scipy.ndimage.filters import gaussian_filter1d\n\n\nclass ScalableGroup(pTypes.GroupParameter):\n def __init__(self, **opts):\n opts['type'] = 'group'\n pTypes.GroupParameter.__init__(self, **opts)\n\n def addNew(self, name):\n self.addChild(\n dict(name=\"%s\" % (str(name)), type='bool', value=True, removable=True, renamable=True,\n expanded=False))\n\n\nclass Clarity(QtGui.QMainWindow):\n\n def __init__(self):\n super(Clarity, self).__init__()\n self.pathRootData = '/home/mvb/0_ETH/01_MasterThesis/Logs_GoKart/LogData/dynamics_newFormat/cuts/'\n testDays = dio.getDirectories(self.pathRootData)\n testDays.sort()\n defaultDay = testDays[1]\n self.pathTestDay = self.pathRootData + '/' + defaultDay\n logNrs = dio.getDirectories(self.pathTestDay)\n logNrs.sort()\n defaultLogNr = logNrs[13]\n self.pathLogNr = self.pathTestDay + '/' + defaultLogNr\n\n params = [\n {'name': 'Testing day', 'type': 'list', 'values': testDays, 'value': defaultDay},\n {'name': 'Log Nr.', 'type': 'list', 'values': logNrs, 'value': defaultLogNr},\n {'name': 'Add to plot -->', 'type': 'action'},\n ScalableGroup(name='Data in Plot')\n ]\n self.p = Parameter.create(name='params', type='group', children=params)\n self.p.param('Testing day').sigValueChanged.connect(self.testDayChange)\n self.p.param('Log Nr.').sigValueChanged.connect(self.logNrChange)\n self.p.param('Add to plot -->').sigActivated.connect(self.addToPlot)\n self.p.param('Data in Plot').sigTreeStateChanged.connect(self.treeChange)\n\n self.dataList = QtGui.QListWidget()\n self.dataList.setSelectionMode(self.dataList.ExtendedSelection)\n self.plotfield = pg.PlotWidget()\n self.histogramfield = pg.PlotWidget()\n self.legend = pg.LegendItem()\n self.tree = ParameterTree(showHeader=False)\n self.tree.setParameters(self.p, showTop=False)\n\n self.dataList.itemSelectionChanged.connect(self.dataSelectionChanged)\n\n self.availableData = []\n self.plotDataList = []\n self.createUI()\n\n def createUI(self):\n dockArea = DockArea()\n self.setCentralWidget(dockArea)\n windowWidth = 1200\n windowHeight = 675\n self.setGeometry(100, 100, windowWidth, windowHeight)\n self.setWindowTitle('Clarity')\n\n pg.setConfigOptions(antialias=True)\n\n d1 = Dock(\"Display Data\", size=(windowWidth / 3., windowHeight / 3.))\n d2 = Dock(\"Plot 1\", size=(windowWidth * 2 / 3., windowHeight*2./3.))\n d3 = Dock(\"Parameter Tree\", size=(windowWidth / 3., windowHeight * 2. / 3))\n d4 = Dock(\"Histogram 1\", size=(windowWidth * 2 / 3., windowHeight/3.))\n\n dockArea.addDock(d1, 'left')\n dockArea.addDock(d2, 'right')\n dockArea.addDock(d3, 'bottom', d1)\n dockArea.addDock(d4, 'bottom', d2)\n\n item = QtGui.QListWidgetItem(self.dataList)\n item.setText('helloworld')\n\n d1.addWidget(self.dataList)\n\n d2.addWidget(self.plotfield)\n\n self.setListItems()\n d3.addWidget(self.tree)\n \n d4.addWidget(self.histogramfield)\n\n self.show()\n\n def filterChange(self, group, param):\n print('filter changed!')\n for param, value, data in param:\n if param.name() == 'sigma':\n for item in self.dataList.findItems(group.name(), QtCore.Qt.MatchExactly):\n item.info[1] = data\n item.info[2] = 10 * data\n if param.name() == 'scale':\n for item in self.dataList.findItems(group.name(), QtCore.Qt.MatchExactly):\n item.info[4] = data\n self.updatePlot()\n\n def testDayChange(self, param):\n self.plotfield.clear()\n self.p.param('Data in Plot').clearChildren()\n self.plottedData = []\n self.tempX = []\n self.tempY = []\n\n value = param.value()\n self.pathTestDay = self.pathRootData + '/' + value\n logNrs = dio.getDirectories(self.pathTestDay)\n logNrs.sort()\n self.p.param('Log Nr.').remove()\n child = Parameter.create(name='Log Nr.', type='list', values=logNrs, value=logNrs[0])\n self.p.insertChild(1, child)\n self.p.param('Log Nr.').sigValueChanged.connect(self.logNrChange)\n self.pathLogNr = self.pathTestDay + '/' + logNrs[0]\n\n def logNrChange(self, param):\n self.plotfield.clear()\n self.p.param('Data in Plot').clearChildren()\n self.availableData = []\n self.tempX = []\n self.tempY = []\n value = param.value()\n if value != None:\n self.pathLogNr = self.pathTestDay + '/' + value\n self.setListItems()\n\n def treeChange(self, group, param):\n print('Tree changed!')\n for param, value, data in param:\n print(' action:', value)\n if param.name() == 'Data in Plot':\n if value == 'childAdded':\n for item in self.dataList.findItems(data[0].name(), QtCore.Qt.MatchExactly):\n self.plotDataList.append(data[0].name())\n\n if value == 'childRemoved' and len(self.plotDataList) > 0:\n for i in range(len(self.plotDataList)):\n if self.plotDataList[i] == data.name():\n deleteElement = i\n del self.plotDataList[deleteElement]\n self.updatePlot()\n elif value == 'value':\n for item in self.dataList.findItems(param.name(), QtCore.Qt.MatchExactly):\n item.info[0] = data\n self.updatePlot()\n\n def dataSelectionChanged(self):\n print('dataselection changed')\n selected = self.dataList.selectedItems()\n if len(selected) > 2:\n self.dataList.blockSignals(True)\n try:\n for item in selected[1:-1]:\n item.setSelected(False)\n finally:\n self.dataList.blockSignals(False)\n if len(selected) > 0:\n self.updatePlot()\n\n def addToPlot(self):\n sel = list([str(item.text()) for item in self.dataList.selectedItems()])\n dispName = 0\n if len(sel) == 1:\n dispName = sel[0]\n if len(sel) == 2:\n dispName = str(sel[0]) + ' / ' + str(sel[1])\n\n if (not (any([item.name() == dispName for item in\n self.p.param('Data in Plot').children()])) and dispName != 0):\n self.p.param('Data in Plot').addNew(dispName)\n for item in self.dataList.findItems(dispName, QtCore.Qt.MatchExactly):\n sigma = item.info[1]\n scale = item.info[4]\n OptionVars = [\n {'name': 'sigma', 'type': 'float', 'value': sigma, 'step': 0.1},\n {'name': 'scale', 'type': 'float', 'value': scale, 'step': 0.1},\n ]\n # child = Parameter.create(name='Gaussian Filter', type='group', children\n # = filterVars)\n self.p.param('Data in Plot').param(dispName).addChildren(OptionVars)\n self.p.param('Data in Plot').param(dispName).sigTreeStateChanged.connect(\n self.filterChange)\n elif dispName == 0:\n print('No Data Selected!')\n else:\n print(dispName + ' is already plotted!')\n\n def updateData(self, dataNames):\n availableDataList = [item[0] for item in self.availableData]\n for name in dataNames:\n if name in self.needsPreprocessing:\n for item in self.dataList.findItems(name, QtCore.Qt.MatchExactly):\n nameDependency = item.dependencies[0]\n for depend in nameDependency:\n if depend not in dataNames:\n index = dataNames.index(name)\n dataNames = dataNames[:index] + [depend] + dataNames[index:]\n # print('dataNames', dataNames)\n for name in dataNames:\n if name in self.needsPreprocessing:\n preProcessing(self, name)\n availableDataList = [item[0] for item in self.availableData]\n for item in self.dataList.findItems(name, QtCore.Qt.MatchExactly):\n sigma = item.info[1]\n width = item.info[2]\n index = availableDataList.index(name)\n yOld = self.availableData[index][2]\n if sigma == 0:\n yNew = yOld\n else:\n trunc = (((width - 1) / 2) - 0.5) / sigma\n yNew = gaussian_filter1d(yOld, sigma, truncate=trunc)\n self.availableData[index][2] = yNew\n else:\n for item in self.dataList.findItems(name, QtCore.Qt.MatchExactly):\n sigma = item.info[1]\n width = item.info[2]\n xOld = item.data[0]\n yOld = item.data[1]\n if sigma == 0:\n yNew = yOld\n else:\n trunc = (((width - 1) / 2) - 0.5) / sigma\n yNew = gaussian_filter1d(yOld, sigma, truncate=trunc)\n if name in availableDataList:\n index = availableDataList.index(name)\n self.availableData[index][2] = yNew\n else:\n self.availableData.append([name, xOld, yNew])\n\n def updatePlot(self):\n self.plotfield.removeItem(self.legend)\n self.plotfield.clear()\n self.histogramfield.clear()\n self.legend = pg.LegendItem()\n # self.plotfield.addLegend()\n sel = list([str(item.text()) for item in self.dataList.selectedItems()])\n unite = self.plotDataList\n print('unite', unite)\n plotNow = []\n for elem in unite:\n for item in self.dataList.findItems(elem, QtCore.Qt.MatchExactly):\n order_new = item.info[3]\n if len(plotNow) != 0:\n for it in range(len(plotNow)):\n for item in self.dataList.findItems(plotNow[it], QtCore.Qt.MatchExactly):\n order = item.info[3]\n if order_new <= order:\n plotNow.insert(it, elem)\n break\n elif it == len(plotNow) - 1:\n plotNow.append(elem)\n else:\n plotNow.append(elem)\n\n self.updateData(plotNow + sel)\n colorIndex = 0\n print('plotNow', plotNow)\n availableDataList = [item[0] for item in self.availableData]\n for child in self.p.param('Data in Plot').children():\n name = child.name()\n for item in self.dataList.findItems(name, QtCore.Qt.MatchExactly):\n visible = item.info[0]\n scale = item.info[4]\n index = availableDataList.index(name)\n if visible:\n c = self.plotfield.plot(self.availableData[index][1],\n np.multiply(self.availableData[index][2], scale), pen=(colorIndex),\n name=name)\n #histogram\n y,x = np.histogram(self.availableData[index][2], bins=100)\n _ = self.histogramfield.plot(x, y, stepMode=True, fillLevel=0, brush=(0,0,255,150),name=name)\n \n self.legend.addItem(c, name=c.opts['name'])\n colorIndex += 1\n if sel[0] not in plotNow:\n index = availableDataList.index(sel[0])\n c = self.plotfield.plot(self.availableData[index][1], self.availableData[index][2],\n pen=(0.5), name=self.availableData[index][0])\n #histogram \n y,x = np.histogram(self.availableData[index][2], bins=100)\n _ = self.histogramfield.plot(x, y, stepMode=True, fillLevel=0, brush=(0,0,255,150),name=self.availableData[index][0])\n self.histogramfield.addLine(x = np.mean(self.availableData[index][2]), y = None, pen=pg.mkPen('r', width=3))\n self.histogramfield.addLine(x = np.mean(self.availableData[index][2]) +np.std(self.availableData[index][2]) , y = None, pen=pg.mkPen(color = (200,200,200), width=1))\n self.histogramfield.addLine(x = np.mean(self.availableData[index][2]) -np.std(self.availableData[index][2]) , y = None, pen=pg.mkPen(color = (200,200,200), width=1))\n \n self.legend.addItem(c, name=c.opts['name'])\n axX = self.plotfield.getAxis('bottom')\n axY = self.plotfield.getAxis('left')\n xrange = axX.range\n yrange = axY.range\n\n self.legend.setPos(\n self.legend.mapFromItem(self.legend, QtCore.QPointF(xrange[0], yrange[1])))\n self.plotfield.showGrid(x=True, y=True, alpha=1)\n self.plotfield.autoRange(padding=0)\n self.plotfield.addItem(self.legend)\n\n def setListItems(self):\n files = []\n groups = []\n # r=root, d=directories, f = files\n for r, d, f in os.walk(self.pathLogNr):\n for file in f:\n if '.csv' in file:\n files.append(os.path.join(r, file))\n\n for name in files:\n if 'pose.lidar' in name:\n groups.append(['pose x atvmu', 0, 1, name, True, 0, 0, 0, 1])\n groups.append(['pose y atvmu', 0, 2, name, True, 0, 0, 0, 1])\n groups.append(['pose theta', 0, 3, name, True, 5, 50, 0, 1])\n groups.append(['pose quality', 0, 4, name, True, 0, 0, 0, 1])\n groups.append(['vehicle vx', 0, 5, name, True, 5, 50, 0, 1])\n groups.append(['vehicle vy atvmu', 0, 6, name, True, 5, 50, 0, 1])\n elif 'steer.put' in name:\n groups.append(['steer torque cmd', 0, 2, name, True, 0, 0, 0, 1])\n elif 'steer.get' in name:\n groups.append(['steer torque eff', 0, 5, name, True, 0, 0, 0, 1])\n groups.append(['steer position raw', 0, 8, name, True, 0, 0, 0, 1])\n elif 'status.get' in name:\n groups.append(['steer position cal', 0, 1, name, True, 0, 0, 0, 1])\n elif 'linmot.put' in name:\n groups.append(['brake position cmd', 0, 1, name, True, 0, 0, 0, 1])\n elif 'linmot.get' in name:\n groups.append(['brake position effective', 0, 1, name, True, 0, 0, 0, 1])\n elif 'rimo.put' in name:\n groups.append(['motor torque cmd left', 0, 1, name, True, 0, 0, 0, 1])\n groups.append(['motor torque cmd right', 0, 2, name, True, 0, 0, 0, 1])\n elif 'rimo.get' in name:\n groups.append(['motor rot rate left', 0, 2, name, True, 0, 0, 0, 1])\n groups.append(['motor rot rate right', 0, 9, name, True, 0, 0, 0, 1])\n elif 'vmu931' in name:\n groups.append(['vmu ax atvmu (forward)', 0, 2, name, True, 70, 700, 0, 1])\n groups.append(['vmu ay atvmu (left)', 0, 3, name, True, 70, 700, 0, 1])\n groups.append(['vmu vtheta', 0, 4, name, True, 5, 50, 0, 1])\n\n self.dataList.clear()\n groups.sort()\n rawDataNames = []\n for name, timeIndex, dataIndex, fileName, vis, sig, wid, order, scale in groups:\n rawDataNames.append(name)\n try:\n dataFrame = dio.dataframe_from_csv(fileName)\n xRaw = dataFrame.iloc[:, timeIndex]\n yRaw = dataFrame.iloc[:, dataIndex]\n\n if name == 'vmu vtheta':\n if int(self.pathLogNr[-14:-10]) > 509:\n yRaw = -yRaw\n\n # if name == 'vehicle vy atvmu':\n # offset = -0.045\n # yRaw += offset\n\n if name == 'pose theta':\n # for i in range(len(yRaw)):\n # if yRaw[i] < -np.pi:\n # yRaw[i] = yRaw[i] + 2 * np.pi\n # if yRaw[i] > np.pi:\n # yRaw[i] = yRaw[i] - 2 * np.pi\n # for i in range(len(yRaw)-1):\n # if np.abs(yRaw[i + 1] - yRaw[i]) > 1:\n # yRaw[i + 1:] = yRaw[i + 1:] - np.sign((yRaw[i + 1] - yRaw[i])) * 2 * np.pi\n\n dy = np.abs(np.subtract(np.array(yRaw[1:]), np.array(yRaw[:-1])))\n indices = np.where(dy > 1)\n for index in indices[0]:\n yRaw[index + 1:] = yRaw[index + 1:] - np.sign((yRaw[index + 1] - yRaw[index])) * 2 * np.pi\n\n if name in ['vmu ax atvmu (forward)', 'vmu ay atvmu (left)', 'vmu vtheta']:\n xRaw, yRaw = interpolation(xRaw, yRaw, xRaw.iloc[0], xRaw.iloc[-1], 0.001)\n except:\n print('EmptyDataError: could not read data \\'', name, '\\' from file ', fileName)\n xRaw, yRaw = [0], [0]\n # raise\n\n item = QtGui.QListWidgetItem(name)\n item.setText(name)\n \n\n item.data = [xRaw, yRaw] # item.data = [x_data, y_data]\n item.info = [vis, sig, wid, order, scale] # item.info = [visible, filter_sigma, filter_width, order, scale]\n item = self.dataList.addItem(item)\n # self.availableData.append([name,xRaw, yRaw])\n if len(groups) == 18:\n print('Data status: complete')\n else:\n print('ACHTUNG! Missing Data!')\n\n # Add Preprocessed Data\n groups = []\n groups.append(['pose x', ['pose x atvmu', 'pose theta'], True, 5, 50, 1, 1])\n groups.append(['pose y', ['pose y atvmu', 'pose theta'], True, 5, 50, 1, 1])\n groups.append(['xy trace', ['pose x', 'pose y'], True, 0, 0, 1, 1])\n groups.append(['xy trace atvmu', ['pose x atvmu', 'pose y atvmu'], True, 0, 0, 1, 1])\n groups.append(['pose vx', ['pose x'], True, 5, 50, 1, 1])\n groups.append(['pose vy', ['pose y'], True, 5, 50, 1, 1])\n groups.append(['pose vtheta', ['pose theta'], True, 5, 50, 1, 1])\n groups.append(['vehicle vy', ['vehicle vy atvmu', 'pose vtheta'], True, 0, 0, 2, 1])\n groups.append(['vehicle vx from pose', ['pose x', 'pose y', 'pose vx', 'pose vy', 'pose theta'], True, 0, 0, 2, 1])\n groups.append(['vehicle vy from pose', ['pose x', 'pose y', 'pose vx', 'pose vy', 'pose theta'], True, 0, 0, 2, 1])\n groups.append(['vehicle ax local', ['vehicle vx'], True, 5, 50, 1, 1])\n groups.append(['vehicle ay local', ['vehicle vy'], True, 8, 80, 1, 1])\n groups.append(['pose ax', ['pose x', 'pose vx'], True, 20, 200, 2, 1])\n groups.append(['pose ay', ['pose y', 'pose vy'], True, 20, 200, 2, 1])\n groups.append(['pose atheta', ['pose vtheta'], True, 0, 0, 2, 1])\n groups.append(['slip ratio left', ['motor rot rate left', 'vehicle vx'], True, 0, 0, 1, 1])\n groups.append(['slip ratio right', ['motor rot rate right', 'vehicle vx'], True, 0, 0, 1, 1])\n groups.append(['vehicle slip angle', ['pose theta', 'pose x', 'pose y', 'pose vx', 'pose vy'], True, 0, 0, 2, 1])\n groups.append(['vmu ax', ['vmu ax atvmu (forward)', 'pose theta','pose vtheta','pose atheta'], True, 0, 0, 3, 1])\n groups.append(['vmu ay', ['vmu ay atvmu (left)', 'pose theta','pose vtheta','pose atheta'], True, 0, 0, 3, 1])\n groups.append(['vehicle ax total',\n ['pose theta', 'pose x', 'pose y', 'pose vtheta', 'pose vx', 'pose vy', 'vehicle slip angle',\n 'vehicle vx', 'vehicle vy'], True, 0, 0, 3, 1])\n groups.append(['vehicle ay total',\n ['pose theta', 'pose x', 'pose y', 'pose vtheta', 'pose vx', 'pose vy', 'vehicle slip angle',\n 'vehicle vx', 'vehicle vy'], True, 0, 0, 3, 1])\n groups.append(['vehicle ax only transl',\n ['pose theta', 'pose x', 'pose y', 'pose vx', 'pose vy', 'pose ax', 'pose ay'], True, 0, 0, 3, 1])\n groups.append(['vehicle ay only transl',\n ['pose theta', 'pose x', 'pose y', 'pose vx', 'pose vy', 'pose ax', 'pose ay'], True, 0, 0, 3, 1])\n groups.append(['MH power accel rimo left',\n ['motor torque cmd left', 'pose x', 'pose y', 'pose vx', 'pose vy', 'vehicle slip angle', 'vehicle vx'],\n True, 0, 0, 4, 1])\n groups.append(['MH power accel rimo right',\n ['motor torque cmd right', 'pose x', 'pose y', 'pose vx', 'pose vy', 'vehicle slip angle', 'vehicle vx'],\n True, 0, 0, 4, 1])\n groups.append(['MH AB',\n ['brake position effective', 'pose x', 'pose y', 'pose vx', 'pose vy', 'vehicle slip angle', 'vehicle vx', 'MH power accel rimo left', 'MH power accel rimo right'],\n True, 0, 0, 5, 1])\n groups.append(['MH TV',\n ['pose x', 'pose y', 'pose vx', 'pose vy', 'vehicle slip angle', 'vehicle vx', 'MH power accel rimo left', 'MH power accel rimo right'],\n True, 0, 0, 5, 1])\n groups.append(['MH BETA',\n ['steer position cal'], \n True, 0, 0, 1, 1])\n\n self.needsPreprocessing = []\n for name, dep, vis, sig, wid, order, scale in groups:\n self.needsPreprocessing.append(name)\n item = QtGui.QListWidgetItem(name)\n item.setText(name)\n item.info = [vis, sig, wid, order, scale] # item.info = [visible, filter_sigma,\n # filter_width, order, scale]\n item.dependencies = [dep]\n item = self.dataList.addItem(item)\n self.dataList.sortItems()\n\n self.updateData(rawDataNames)\n\n\ndef main():\n app = QtGui.QApplication(sys.argv)\n _ = Clarity()\n app.exec_()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/data_visualization/showrawdata/Clarity_V01.py","file_name":"Clarity_V01.py","file_ext":"py","file_size_in_byte":22585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"72656649","text":"\n\n#calss header\nclass _MARQUISE():\n\tdef __init__(self,): \n\t\tself.name = \"MARQUISE\"\n\t\tself.definitions = [u'the wife of a marquis (= a man of high social rank), or a woman who holds a high social rank in her own right']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_marquise.py","file_name":"_marquise.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"469724268","text":"#-*-coding:utf-8-*- \r\n__author__ = 'yxt'\r\nimport csv\r\nimport os\r\nimport numpy as np\r\nimport scipy.stats.mstats\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\ndef get_filename(path):\r\n filename_list=[]\r\n for root, subdirs, files in os.walk(path, True):\r\n for file_name in files:\r\n filename_list.append(os.path.join(path,file_name))\r\n return filename_list\r\ndef get_dir_filename(dir_path, listdir=False):\r\n filename_list = []\r\n for root, subdirs, files in os.walk(dir_path, True):\r\n for file_name in files:\r\n filename_list.append(file_name)\r\n if listdir:\r\n for subdir in subdirs:\r\n filename_list.append(subdir + '/')\r\n return filename_list\r\ndef get_data():\r\n file_list=get_filename('data/')\r\n for file in file_list:\r\n dic={}\r\n sets=set()\r\n lis=[]\r\n with open(file)as f:\r\n f.readline()\r\n read=csv.reader(f)\r\n for line in read:\r\n lis.append(line[1])\r\n for a in lis:\r\n if a not in sets:\r\n sets.add(a)\r\n dic[a]=1\r\n else:\r\n dic[a]+=1\r\n key=dic.keys()\r\n value=dic.values()\r\n plt.bar(key,value)\r\n plt.show()\r\ndef mark(datafram,thres_up,thres_down):\r\n d=datafram['count']\r\n # datafram['isAnamaly']=(d>d.quantile(thres_up)) or (dd.quantile(thres_up)\r\n down=d class_no = 1\n # color: in BGR color space\n # iou : appearance area\n \n # https://stackoverflow.com/questions/39206986/numpy-get-rectangle-area-just-the-size-of-mask?rq=1\n target = list()\n for i in range(total_circles):\n \n # create mask for bbox and iou\n color = colors[i]\n c1 = tuple(color)\n c2 = tuple([c+1 for c in color])\n mask = cv2.inRange(color_image, c1, c2)\n \n # create mask for iou\n x, y, radius = circles[i]\n full = np.zeros(shape=[image_h, image_w], dtype=np.uint8)\n cv2.circle(full, (x,y), radius, 255, thickness = -1) \n iou = np.sum(mask) / np.sum(full)\n \n # check appearance\n if iou > .2:\n where = np.array(np.where(mask))\n y1, x1 = np.amin(where, axis=1)\n y2, x2 = np.amax(where, axis=1)\n class_no = 1\n target.append([x1, y1, x2, y2, class_no, iou] + color)\n \n # cast to numpy array and normalization\n target = np.asarray(target, dtype=np.float64)\n target /= np.array([1, 1, 1, 1,\n 1, 1, 255, 255, 255])\n \n # overlay color on augmented image (transparent 0%)\n image = color_image\n image[image==0] = augmented[image==0]\n \n # do preprocessing (not required)\n if self.preproc is not None:\n image, target = self.preproc(image, target)\n \n return image, target\n\n def __len__(self):\n return len(self.samples)\n\n\n def evaluate_detections(self, all_boxes, output_dir=None):\n \"\"\"\n all_boxes is a list of length number-of-classes.\n Each list element is a list of length number-of-images.\n Each of those list elements is either an empty list []\n or a numpy array of detection.\n\n all_boxes[class][image] = [] or np.array of shape #dets x 5\n \"\"\"\n\n self._write_voc_results_file(all_boxes)\n aps, map = self._do_python_eval(output_dir)\n return aps, map\n\n def _get_voc_results_file_template(self):\n filename = 'comp4_det_test' + '_{:s}.txt'\n filedir = \"drive/My Drive/Datasets/Pills_Datasets/mAP_result\"\n if not os.path.exists(filedir):\n os.makedirs(filedir)\n path = os.path.join(filedir, filename)\n return path\n\n def _write_voc_results_file(self, all_boxes):\n for cls_ind, cls in enumerate(Circles_CLASSES):\n cls_ind = cls_ind \n if cls == '__background__':\n continue\n # print('Writing {} VOC results file'.format(cls))\n filename = self._get_voc_results_file_template().format(cls)\n with open(filename, 'wt') as f:\n for im_ind, index in enumerate(self.ids):\n # This line almost kill me\n # index = index[1]\n dets = all_boxes[cls_ind][im_ind]\n if dets == []:\n continue\n for k in range(dets.shape[0]):\n f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\\n'.\n format(index, dets[k, -1],\n dets[k, 0] + 1, dets[k, 1] + 1,\n dets[k, 2] + 1, dets[k, 3] + 1))\n\n def _do_python_eval(self, output_dir='output'):\n aps = []\n\n # The PASCAL VOC metric changed in 2010\n use_07_metric = True\n # print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))\n if output_dir is not None and not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n\n print(\"a\")\n\n for i, cls in enumerate(Circles_CLASSES):\n if cls == '__background__':\n continue\n\n filename = self._get_voc_results_file_template().format(cls)\n\n print(\"b\")\n rec, prec, ap = pills_eval(\n filename, self.dir_lists, cls, self.image_sets,\n ovthresh=0.5,\n use_07_metric=use_07_metric)\n aps += [ap]\n # print('AP for {} = {:.4f}'.format(cls, ap))\n if output_dir is not None:\n with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:\n pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)\n # valid_aps = [ap for ap in aps if ap != 0]\n # mean_aps = np.mean(valid_aps) if len(valid_aps) > 0 else 0\n \n print(\"c\")\n print(aps)\n\n mean_aps = np.mean([aps[0], aps[1], aps[2], aps[7], aps[12]])\n print('Mean AP = {:.4f}'.format(mean_aps))\n # print('~~~~~~~~')\n # print('Results:')\n # for ap in aps:\n # print('{:.3f}'.format(ap))\n # print('{:.3f}'.format(np.mean(aps)))\n # print('~~~~~~~~')\n # print('')\n # print('--------------------------------------------------------------')\n # print('Results computed with the **unofficial** Python eval code.')\n # print('Results should be very close to the official MATLAB eval code.')\n # print('Recompute with `./tools/reval.py --matlab ...` for your paper.')\n # print('-- Thanks, The Management')\n # print('--------------------------------------------------------------')\n return aps, mean_aps\n\n def show(self, index):\n img, target = self.__getitem__(index)\n for obj in target:\n obj = obj.astype(np.int)\n cv2.rectangle(img, (obj[0], obj[1]), (obj[2], obj[3]), (255,0,0), 3)\n cv2.imwrite('./image.jpg', img)","sub_path":"ml/lib_dataset_circles.py","file_name":"lib_dataset_circles.py","file_ext":"py","file_size_in_byte":8176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"365918910","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 25 18:32:55 2018\n\n@author: Stefan Draghici\n\"\"\"\n\nimport numpy as np\n\n# the classes at the output layer\na=np.random.randn(5)\n\n# exponentiate the outputs\nexpo=np.exp(a)\n\n# divide by the sub of the expo\nanswer=expo/expo.sum()\n\n# the softmax result will add up to 1\ntest=answer.sum()","sub_path":"softmax.py","file_name":"softmax.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"494803146","text":"import urllib.request\nimport json\n\nres = urllib.request.urlopen(\n \"https://api.github.com/search/repositories?q=language:java&sort=stars&page=1&per_page=100&order=desc\"\n)\nres = json.loads(res.read().decode('utf-8'))\n\nfor item in res['items']:\n print(item['full_name'])\n\n","sub_path":"crawler/toprepos.py","file_name":"toprepos.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"503429742","text":"# Created by Tanner Bornemann\n# 04:24PM 09 Oct 2016\n\nimport csv\nimport statistics\nfrom datetime import datetime\n\n# region AUXILIARY FUNCTIONS\n\n\ndef get_parsable_data(splitline):\n \"\"\"\n This function will split up the line read from the raw output of the arduino to PuTTY.\n Then return the data from the line in a 'parsable' format.\n :param splitline: the line to split\n :return: potentiometer sensor readings, milliseconds passed since arduino initialized.\n \"\"\"\n splitline = splitline.split(\" , \")\n if splitline[0].startswith(\"pot: \") and splitline[1].startswith(\"millis: \"):\n # we have found a valid line of data\n pot = splitline[0][4:]\n millis = splitline[1][8:]\n tup = [float(pot), int(millis)]\n return tup # return tupple with pot sensor data and time stamp data\n\n\ndef print_progress(current_progress, total):\n percent = current_progress / total * 100 # do the maths to get percentage to show current progress\n # make print output pretty\n progress = \"\\rProgress: %{:.2f} | Elapsed: {:}\".format(percent, datetime.now() - start_time)\n print(progress, end=\"\") # PRINT!!\n\n\n# endregion AUXILIARY FUNCTIONS\n\n# region IMPORT AND SORT DATA\n\nlines = []\nposition1 = [] # list of position 1 data\nposition2 = [] # list of position 2 data\ncurrent_position = 0 # 1 = the first position data, 2 = the second position data\n\ndata_file = \"C:/github/Flex/R&D/SoftPotentiometerTesting/Data/RawOutput/potRecordTXT_6_49hours.txt\"\n# for test data:\n# data_file = \"C:/github/Flex/R&D/SoftPotentiometerTesting/Data/RawOutput/testData.txt\"\noutput_file_position1 = \\\n \"C:\\\\github\\\\Flex\\\\R&D\\\\SoftPotentiometerTesting\\\\Data\\\\position1data{:%d-%b-%Y}.csv\".format(datetime.now())\noutput_file_position2 = \\\n \"C:\\\\github\\\\Flex\\\\R&D\\\\SoftPotentiometerTesting\\\\Data\\\\position2data{:%d-%b-%Y}.csv\".format(datetime.now())\n\ncurrent_progress = 0 # current progress of total items to parse\n\n# get current time for progress\nstart_time = datetime.now()\nprint(\"\\nSorting....\")\n# sort through the data\nwith open(data_file) as input:\n\n print(\"\\nOpening file: \", data_file)\n all = input.readlines()\n total = len(all)\n print(total, \"total lines found.\\n\")\n\n for line in all:\n current_progress += 1\n\n # check for position 1 data\n if line.startswith(\"POS 1 CONNECTION ESTABLISHED\"): # start new set of data for position 1\n current_position = 1\n # check for position 2 data\n elif line.startswith(\"POS 2 CONNECTION ESTABLISHED\"): # start new set of data for position 1\n current_position = 2\n else:\n if current_position is 1:\n tup = get_parsable_data(line)\n if tup is not None:\n position1.append([tup[0], tup[1]])\n elif current_position is 2:\n tup = get_parsable_data(line)\n if tup is not None:\n position2.append([tup[0], tup[1]])\n\n if current_progress % 100 is 0: # only print progress every 100 items\n print_progress(current_progress, total)\n\nprint(\"\\rSorting Complete. | Elapsed: {:}\\n\\n\".format(datetime.now() - start_time))\n\n# endregion IMPORT AND SORT DATA\n\n# region ORGANIZE AND EXPORT TO CSV\n\nprint(\"Parsing raw data to export to csv files...\\n\\n\")\nheaders = [\"Analog Sensor Reading\", \"Time Elapsed (milliseconds)\"]\n\nwith open(output_file_position1, 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow(headers)\n writer.writerows(position1)\n\nwith open(output_file_position2, 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow(headers)\n writer.writerows(position2)\n\n# endregion ORGANIZE AND EXPORT TO CSV\n\n# region ANALYSIS AND REPORT\n\nprint(\"Analysing data and creating report...\\n\\n\")\n\n# make some statistics\npos1sum = 0\npos2sum = 0\npos1list = []\npos2list = []\nmillis1list = []\nmillis2list = []\n\nfor item in position1:\n pos1sum += item[0]\n pos1list.append(item[0])\n millis1list.append(item[1])\n if item[0] < 500: # see if data is way outta range or something fishy, do not modify data here.\n stop = 1\n raise ValueError('value may not be for position 1')\n\nfor item in position2:\n pos2sum += item[0]\n pos2list.append(item[0])\n millis2list.append(item[1])\n if item[0] > 400:\n stop = 1\n raise ValueError('value may not be for position 2')\n\npos1min = min(pos1list)\npos1max = max(pos1list)\npos2min = min(pos2list)\npos2max = max(pos2list)\n\npos1avg = pos1sum / len(position1)\npos2avg = pos2sum / len(position2)\n\npos1std = statistics.stdev(pos1list)\npos2std = statistics.stdev(pos2list)\npos1pvar = statistics.pvariance(pos1list)\npos2pvar = statistics.pvariance(pos2list)\npos1var = statistics.variance(pos1list)\npos2var = statistics.variance(pos2list)\n\n# format milliseconds elapsed to show time elapsed for data collection\nhours = max(millis1list) / 1000 / 60 / 60\n\nprint(\"\\n## Flex Soft Potentiometer Data Report [{:%d %b %Y %H:%M}]\\n\".format(datetime.now()))\n# print total amount of time elapsed of data collection\nprint(\"- Total time elapsed for collection data: {:.2f} hours\".format(hours))\nprint(\"- Total data points: {:}\".format(len(position1) + len(position2)))\n\nprint(\"\\n### Position 1 \")\nprint(\"- Flex step position: 500\")\nprint(\"- Data points: \", len(position1))\nprint(\"- Average Potentiometer Reading: \", \"{:.4f}\".format(pos1avg)) # format to only 4 decimal places\nprint(\"- Standard Deviation(σ):\", \"{:.4f}\".format(pos1std)) # format to only 4 decimal places\nprint(\"- Variance: \", \"{:.4f}\".format(pos1var)) # format to only 4 decimal places\nprint(\"- Max; \", pos1max)\nprint(\"- Min: \", pos1min)\nprint(\"- Range: \", pos1max - pos1min)\n\nprint(\"\\n### Position 2 \")\nprint(\"- Flex step position: 3800\")\nprint(\"- Data points: \", len(position2))\nprint(\"- Average Potentiometer Reading: \", \"{:.4f}\".format(pos2avg)) # format to only 4 decimal places\nprint(\"- Standard Deviation(σ):\", \"{:.4f}\".format(pos2std)) # format to only 4 decimal places\nprint(\"- Variance: \", \"{:.4f}\".format(pos2var)) # format to only 4 decimal places\nprint(\"- Max; \", pos2max)\nprint(\"- Min: \", pos2min)\nprint(\"- Range: \", pos2max - pos2min)\n\nprint(\"\\nNotes:\")\nprint(\"\\n> The Flex Test schedule used to generate the data: \"\n \"\\n - [C:\\github\\Flex\\R&D\\SoftPotentiometerTesting\\\\FlexTestSchedule_SoftPot.xml]\\n\"\n \"\\n> The raw data used is located these files: \\n\" +\n \"> - [{:}]\\n> - [{:}]\".format(output_file_position1, output_file_position2) +\n \"\\n\\n> This data was parsed from the file:\\n> - [{:}] \\n\\n> using the python\".format(data_file) +\n \" script \\\"potSensorOutputConversion.py\\\" (in the \\Python Files folder).\")\n\n# format date example: 08-Oct-2016 => \"{:%d-%b-%Y}\".format(datetime.now())\n\nprint(\"\\n\\nTanner Bornemann [{:%d %b %Y %H:%M}]\\n\\n----\".format(datetime.now()))\n\n# endregion ANALYSIS AND REPORT\n","sub_path":"Python Files/potSensorOutputConversion.py","file_name":"potSensorOutputConversion.py","file_ext":"py","file_size_in_byte":6878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"220427587","text":"import numpy as np\nimport tensorflow as tf\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\nimport itertools\nimport os\nfrom Segmentation.utils.losses import dice_coef, iou_loss\n\ndef iou_loss_eval(y_true, y_pred):\n\n y_true = tf.slice(y_true, [0, 0, 0, 1], [-1, -1, -1, 6])\n y_pred = tf.slice(y_pred, [0, 0, 0, 1], [-1, -1, -1, 6])\n iou = iou_loss(y_true, y_pred)\n\n return iou\n\ndef dice_coef_eval(y_true, y_pred):\n\n y_true = tf.slice(y_true, [0, 0, 0, 1], [-1, -1, -1, 6])\n y_pred = tf.slice(y_pred, [0, 0, 0, 1], [-1, -1, -1, 6])\n\n dice = dice_coef(y_true, y_pred)\n\n return dice\n\ndef get_confusion_matrix(y_true, y_pred, classes=None):\n\n y_true = np.reshape(y_true, (y_true.shape[0] * y_true.shape[1] * y_true.shape[2], y_true.shape[3]))\n y_pred = np.reshape(y_pred, (y_pred.shape[0] * y_pred.shape[1] * y_pred.shape[2], y_pred.shape[3]))\n y_true_max = np.argmax(y_true, axis=1)\n y_pred_max = np.argmax(y_pred, axis=1)\n\n if classes is None:\n cm = confusion_matrix(y_true_max, y_pred_max)\n else:\n cm = confusion_matrix(y_true_max, y_pred_max, labels=classes)\n print(cm)\n\n return cm\n\ndef plot_confusion_matrix(cm, savefig, classes, normalise=True, title='confusion matrix', cmap=plt.cm.Blues):\n\n if normalise:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalise else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.show()\n if savefig is not None:\n plt.savefig(savefig)\n","sub_path":"Segmentation/utils/evaluation_metrics.py","file_name":"evaluation_metrics.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"110544217","text":"\"\"\"\n1335. Minimum Difficulty of a Job Schedule\nHard\n\nYou want to schedule a list of jobs in d days. Jobs are dependent (i.e To work on the i-th job, you have to finish all the jobs j where 0 <= j < i).\n\nYou have to finish at least one task every day. The difficulty of a job schedule is the sum of difficulties of each day of the d days. The difficulty of a day is the maximum difficulty of a job done in that day.\n\nGiven an array of integers jobDifficulty and an integer d. The difficulty of the i-th job is jobDifficulty[i].\n\nReturn the minimum difficulty of a job schedule. If you cannot find a schedule for the jobs return -1.\n\nExample 1:\n\nInput: jobDifficulty = [6,5,4,3,2,1], d = 2\nOutput: 7\nExplanation: First day you can finish the first 5 jobs, total difficulty = 6.\nSecond day you can finish the last job, total difficulty = 1.\nThe difficulty of the schedule = 6 + 1 = 7 \n\nExample 2:\n\nInput: jobDifficulty = [9,9,9], d = 4\nOutput: -1\nExplanation: If you finish a job per day you will still have a free day. you cannot find a schedule for the given jobs.\n\nExample 3:\n\nInput: jobDifficulty = [1,1,1], d = 3\nOutput: 3\nExplanation: The schedule is one job per day. total difficulty will be 3.\n\nExample 4:\n\nInput: jobDifficulty = [7,1,7,1,7,1], d = 3\nOutput: 15\n\nExample 5:\n\nInput: jobDifficulty = [11,111,22,222,33,333,44,444], d = 6\nOutput: 843\n\nConstraints:\n\n1 <= jobDifficulty.length <= 300\n0 <= jobDifficulty[i] <= 1000\n1 <= d <= 10\n\"\"\"\n\nfrom typing import List\n\n###############################################################################\n\"\"\"\nSolution 0: basic recursion, most naive version to show idea\n\nHow to partition n elts into d groups so sum of max in each group is minimum?\n\"\"\"\nclass Solution0:\n #def minDifficulty(self, jobDifficulty: List[int], d: int) -> int:\n def minDifficulty(self, arr: List[int], d: int) -> int:\n def rec(arr, d):\n n = len(arr) # assume >= 1\n\n if n < d: # too few jobs to allocate to d days\n return -1\n if n == d: # one job per day\n return sum(arr)\n if d == 1: # all jobs on the same day\n return max(arr)\n\n # Now n > d.\n # Want to split n jobs into d days.\n\n min_diffic = float('inf')\n m = n - d + 1 # max number of jobs in one day\n # because the remaining (d-1) days can sitll have 1 job each.\n # n - (d-1)*1 = n -d + 1\n\n for i in range(1, m+1): # number of jobs to do today\n # For today, do jobs 0 to i-1, and recurse to find difficulty\n # for remaining jobs i, .., n with remaining d-1 days.\n diffic = max(arr[:i]) + rec(arr[i:], d - 1)\n\n min_diffic = min(min_diffic, diffic)\n \n return min_diffic\n \n return rec(arr, d)\n\n###############################################################################\n\"\"\"\nSolution 1: basic recursion with some improvements.\n\nAvoid some array copying (list slicing).\n1 To get rid of slicing in \"rec(arr[i:], d - 1)\" within the loop, pass the \nstart index \"i\" to the recursive function instead of a copied array.\n\n2. To get rid of slicing in \"max(arr[:i])\", now \"max(arr[start:i+1])\", in the\nloop, we can either cache these values or introduce another loop and index.\nThis is what memoization and tabulation would do, respectively. Here, we\nuse a pre-calculated cache.\n\nThis still TLE's on LeetCode.\n\"\"\"\nclass Solution:\n #def minDifficulty(self, jobDifficulty: List[int], d: int) -> int:\n def minDifficulty(self, arr: List[int], d: int) -> int:\n def rec(d, start=0):\n nonlocal maxes\n\n n = n_total - start # assume >= 1\n\n if n == d: # one job per day\n return sum(arr[start:])\n if d == 1: # all jobs on the same day\n #return max(arr[start:])\n return maxes[start, n_total-1]\n\n # Now n > d.\n # Want to split n jobs into d days.\n\n min_diffic = INF\n m = n - d + 1 # max number of jobs in one day\n\n for i in range(start, start + m):\n #diffic = max(arr[:i]) + rec(arr[i:], d - 1)\n #diffic = max(arr[start:i+1]) + rec(d - 1, i + 1)\n diffic = maxes[start, i] + rec(d - 1, i + 1)\n\n min_diffic = min(min_diffic, diffic)\n \n return min_diffic\n \n n_total = len(arr)\n if n_total < d: # too few jobs to allocate to d days\n return -1\n\n INF = float('inf')\n\n maxes = {} # maxes[i,j] is max of arr[i] to arr[j], inclusive\n for i in range(n_total):\n mx = arr[i]\n maxes[i, i] = mx\n\n for j in range(i+1, n_total):\n mx = max(mx, arr[j])\n maxes[i, j] = mx\n\n return rec(d)\n\n###############################################################################\n\"\"\"\nSolution 2: memoization\n\nRuntime: 1204 ms, faster than 44.30% of Python3 online submissions\nMemory Usage: 61.5 MB, less than 100.00% of Python3 online submissions\n\"\"\"\nclass Solution2:\n def minDifficulty(self, arr: List[int], d: int) -> int:\n def rec(d, start=0):\n nonlocal cache, sums, maxes\n if (d, start) in cache: return cache[d, start]\n\n n = n_total - start # assume >= 1\n\n if n == d: return sums[start] # one job per day\n if d == 1: return maxes[start, n_total-1] # all jobs on same day\n\n min_diffic = INF\n m = n - d + 1 # max number of jobs in one day\n\n for i in range(start, start + m):\n diffic = maxes[start, i] + rec(d - 1, i + 1)\n min_diffic = min(min_diffic, diffic)\n\n cache[d, start] = min_diffic\n return min_diffic\n\n n_total = len(arr)\n if n_total < d: return -1 # too few jobs to allocate to d days\n \n cache = {}\n INF = float('inf')\n \n s = 0\n sums = {} # sums[i] is sum from arr[i] to end of array\n for i in range(n_total-1, -1, -1):\n s += arr[i]\n sums[i] = s\n\n maxes = {} # maxes[i,j] is max of arr[i] to arr[j], inclusive\n for i in range(n_total):\n mx = arr[i]\n maxes[i, i] = mx\n\n for j in range(i+1, n_total):\n mx = max(mx, arr[j])\n maxes[i, j] = mx\n\n return rec(d)\n\n###############################################################################\n\"\"\"\nSolution 3: with @functools.lru_cache\n\nThis is much faster and uses much less space than memoization.\n\nRuntime: 776 ms, faster than 86.89% of Python3 online submissions\nMemory Usage: 13.6 MB, less than 100.00% of Python3 online submissions\n\nUse \"if\" instead of \"max\" and \"min\" \nand replacing start+m with end=start+n-d+1=n_total-d+1:\nRuntime: 416 ms, faster than 97.82% of Python3 online submissions\n\"\"\"\nimport functools\n\nclass Solution3:\n def minDifficulty(self, arr: List[int], d: int) -> int:\n \n @functools.lru_cache(None)\n def rec(d, start=0):\n n = n_total - start # assume >= 1\n\n #if n < d: return -1 # too few jobs to allocate to d days\n if n == d: return sum(arr[start:]) # one job per day\n if d == 1: return max(arr[start:]) # all jobs on same day\n\n min_diffic = INF\n #m = n - d + 1 # max number of jobs in one day\n # note: start + m = start + (n - d + 1) = n_total - d + 1\n end = n_total - d + 1\n\n maxd = 0\n #for i in range(start, start + m):\n for i in range(start, end):\n maxd = max(maxd, arr[i])\n diffic = maxd + rec(d - 1, i + 1)\n min_diffic = min(min_diffic, diffic)\n\n return min_diffic\n\n n_total = len(arr)\n if n_total < d: return -1 # too few jobs to allocate to d days\n \n INF = float('inf')\n\n return rec(d)\n\n###############################################################################\n\"\"\"\nSolution 4: tabulation using 2d table.\n\nTry to get rid of recursion in the memoization solution. \n\nWe want to start with smaller values of d.\n Case d = 0 is impossible (no solutions).\n Case d = 1 is trivial (sum all relevant values).\n Case d = 2: decide how to split a list into two nonempty sublists.\n Cases d >= 3: decide the first sublist, then use a dp table to look up\n the solution for the rest of the list.\n\nOur recursive function was rec(d, start=0), and was called using rec(d-1, i+1).\nConvert rec(d-1, i+1) to a table lookup dp[days-1][end+1]. The state variables:\n\n days: the number of days left to allocate jobs for\n start: the start index of the array of jobs to start considering\n\nOur recursive relation is:\n\n dp[days][start] = min( max(arr[start:end+1]) + dp[days-1][end+1] )\n for end=start..end, where end=start+(n-d+1)\n\nConsider diffic = maxes[start, end] + dp[days-1][end+1].\n\nBoundary cases: days=1 or days-1=0\n We want diffic to be just maxes[start, end]. So dp[0][end+1] should be 0.\n This is only for the whole array, so start=0 and end=n-1, so dp[0][n]=0.\n If it's not the whole array, then diffic=-1 (given by problem statement),\n but for calculation purposes with max, let it be float('inf'). \n So dp[0][i] = float('inf) for i=0..n-1.\n\nO(dnn) time\nO(dn) extra space\n\nLeetCode: if using \"max\" and \"min\" instead of \"if\", lookup using \"maxes\" was faster \nthan calculating it smartly within the loops, but used much more memory (but still \nmuch less than memoization). In both cases, time was a little faster than using \nmemoization, but used much less space. \n\nRuntime: 1024 ms, faster than 56.62% of Python3 online submissions\nMemory Usage: 20.7 MB, less than 100.00% of Python3 online submissions\n\nPrecalculate \"maxes\" and use \"if\" instead of \"max\" and \"min\":\nRuntime: 652 ms, faster than 93.71% of Python3 online submissions\nMemory Usage: 20.7 MB, less than 100.00% of Python3 online submissions\n\nDont precalculate \"maxes\" and use \"if\" instead of \"max\" and \"min\":\nRuntime: 464 ms, faster than 97.12% of Python3 online \nMemory Usage: 12.7 MB, less than 100.00% of Python3 online submissions\n\"\"\"\nclass Solution4:\n def minDifficulty(self, arr: List[int], d: int) -> int: \n n = len(arr)\n INF = float('inf')\n dp = [[INF]*n + [0] for _ in range(d+1)] # dp[days][start]\n\n ### precalculate maxes\n maxes = {} # maxes[i,j] is max of arr[i] to arr[j], inclusive\n for i in range(n):\n mx = arr[i]\n maxes[i, i] = mx\n\n for j in range(i+1, n):\n mx = max(mx, arr[j])\n maxes[i, j] = mx\n\n for days in range(1, d+1):\n m = n - days + 1 # max number of jobs in one day\n \n for start in range(m): # was recursive parameter\n #maxd = 0\n min_diffic = INF\n\n for end in range(start, m): # was index i in recursion\n #maxd = max(maxd, arr[end])\n #diffic = maxd + dp[days-1][end+1]\n \n diffic = maxes[start, end] + dp[days-1][end+1]\n\n #dp[days][start] = min(dp[days][start], diffic)\n min_diffic = min(min_diffic, diffic)\n\n dp[days][start] = min_diffic\n\n #for row in dp: print(row)\n\n # d days left, starting with job 0\n return dp[d][0] if dp[d][0] < INF else -1\n\n###############################################################################\n\"\"\"\nSolution 5: tabulation using 1d table.\n\nRecurrence relation from tabulation for 2d table:\ndp[days][start] = maxes[start, end] + dp[days-1][end+1]\n\nNote that row \"days\" for dp table only depended on the previous row.\n\nO(dnn) time as for tabulation using 2d table, but uses less space:\nO(n) extra space\n\nRuntime: 928 ms, faster than 67.76% of Python3 online submissions\nMemory Usage: 12.8 MB, less than 100.00% of Python3 online submissions\n\nUsing \"if\" instead of \"max\" and \"min\":\nRuntime: 384 ms, faster than 97.90% of Python3 online submissions\n\"\"\"\nclass Solution5:\n def minDifficulty(self, arr: List[int], d: int) -> int:\n n = len(arr)\n if n < d: return -1\n INF = float('inf')\n\n dp = [INF]*n + [0] # for days = 0\n\n for days in range(1, d+1):\n m = n - days + 1 # max number of jobs in one day\n \n for start in range(m):\n maxd = 0\n min_diffic = INF\n\n for end in range(start, m):\n maxd = max(maxd, arr[end])\n min_diffic = min(min_diffic, maxd + dp[end+1])\n\n dp[start] = min_diffic\n\n return dp[0]\n\n###############################################################################\n\"\"\"\nSolution 6: use contiguous partitions of given list into d sublists.\n\"\"\"\nclass Solution6:\n def partitions_contig(self, n):\n if n == 1: return [[[0]]]\n \n parts = self.partitions_contig(n-1)\n new_parts = []\n # new element is n-1\n\n for p in parts: # eg, [[0,1],[1,2]]\n # add single-element list [n-1] to all sublists in \"parts\"\n new_parts.append(p + [[n-1]])\n\n new_p = []\n for s in p: # look for the sublist that contains n-2\n if n-2 in s: # eg, s = [0,1,2] when n = 4\n # want to create copy of p such that s has n-1 appended\n new_p.append(s + [n-1])\n else: \n new_p.append(s)\n \n new_parts.append(new_p)\n \n return new_parts\n\n def minDifficulty(self, arr: List[int], d: int) -> int: \n n = len(arr)\n\n # Want to generate all partitions of arr into d subsets.\n parts = [p for p in self.partitions_contig(n) if len(p)==d]\n #print(parts)\n min_diffic = float('inf')\n \n for p in parts:\n diffic = 0\n for s in p: # eg, [0,1,2]\n diffic += max(arr[i] for i in s)\n \n if diffic < min_diffic:\n min_diffic = diffic\n\n return min_diffic if min_diffic < float('inf') else -1\n\n###############################################################################\n\"\"\"\nSolution 7: use DP + monotone stack\n\ndp[i] table holds increasing values\n\nstack\n\n\nO(dn) time\nO(n) extra space\n\nhttps://leetcode.com/problems/minimum-difficulty-of-a-job-schedule/discuss/495000/C%2B%2B-0ms!-O(d*n)-time-O(n)-space.-DP-%2B-MonotonicMinimum-Stack\n\nRuntime: 48 ms, faster than 100.00% of Python3 online submissions\nMemory Usage: 12.6 MB, less than 100.00% of Python3 online submissions\n\"\"\"\nclass Solution7:\n def minDifficulty(self, arr: List[int], d: int) -> int: \n n = len(arr)\n if n < d: return -1\n \n # for \n dp = [0]*n\n dp[0] = arr[0]\n for i in range(1, n):\n dp[i] = max(dp[i-1], arr[i]) # max so far up to index i\n print(f\"dp = {dp}\")\n\n # day = 1:\n for day in range(1, d):\n stack = [] # holds tuples (imax_d, min_d)\n t = dp[day-1] # max up to previous day\n print(\"=\"*80)\n print(f\"day = {day}\")\n\n for j in range(day, n):\n m = t # max up to previous day\n t = dp[j] # max up to current day (j)\n print(\"\\n=====\")\n print(f\"j = {j}, arr[j] = {arr[j]}, m = {m}, t = {t}\")\n\n # Found a higher diffic job, so pop all the jobs with\n # lower difficulty.\n # while max diffic of job on top of stack <= curr diffic\n while stack and arr[stack[-1][0]] <= arr[j]:\n m = min(m, stack[-1][1])\n print(f\"pop {stack[-1]}\")\n stack.pop()\n \n\n if stack:\n # take min diffic b/w (1) taking all jobs up to local\n # max this segment ??? and (2) all prev jobs in one day\n # and today's job in the next day\n dp[j] = min(dp[stack[-1][0]], m + arr[j])\n else:\n dp[j] = m + arr[j] # add prev max and current value\n\n # j = index of max complexity job this segment\n # m = local min this segment\n stack.append((j, m))\n print(f\"push ({j}, {m})\")\n print(f\"stack = {stack}\")\n print(f\"\\ndp = {dp}\")\n\n return dp[-1]\n\n###############################################################################\n\nif __name__ == \"__main__\":\n def test(arr, d, comment=None):\n min_diffic = s.minDifficulty(arr, d)\n \n print(\"=\"*80)\n if comment:\n print(comment)\n \n print(f\"\\n{arr}\")\n print(f\"d = {d}\")\n print(f\"\\nmin difficulty = {min_diffic}\")\n\n #s = Solution0() # basic recursion\n #s = Solution() # basic recursion\n #s = Solution2() # memoization\n #s = Solution3() # use @functool.lru_cache\n #s = Solution4() # tabulation using 2d table\n #s = Solution5() # tabulation using 1d table\n #s = Solution6() # partitions using contiguous sublists\n s = Solution7() # use DP + monotonic/minimum stack\n\n # comment = \"LC ex1; answer = 7\" \n # arr = [6,5,4,3,2,1]\n # d = 2\n # test(arr, d, comment)\n\n # comment = \"LC ex2; answer = -1\"\n # arr = [9,9,9]\n # d = 4\n # test(arr, d, comment)\n\n # comment = \"LC ex3; answer = 3\"\n # arr = [1,1,1]\n # d = 3\n # test(arr, d, comment)\n \n # comment = \"LC ex4; answer = 15\"\n # arr = [7,1,7,1,7,1]\n # d = 3\n # test(arr, d, comment)\n \n comment = \"LC ex5; answer = 843\"\n arr = [11,111,22,222,33,333,44,444]\n d = 6\n test(arr, d, comment)\n\n # comment = \"LC test case; TLE's basic recursion; answer = 3807\"\n # arr = [380,302,102,681,863,676,243,671,651,612,162,561,394,856,601,30,6,257,921,405,716,126,158,476,889,699,668,930,139,164,641,801,480,756,797,915,275,709,161,358,461,938,914,557,121,964,315]\n # d = 10\n # test(arr, d, comment)\n\n","sub_path":"dp/1335_min_difficulty_of_job_schedule.py","file_name":"1335_min_difficulty_of_job_schedule.py","file_ext":"py","file_size_in_byte":18210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"566587855","text":"from datetime import time\nfrom functools import reduce\nfrom typing import Tuple\nimport operator\nimport datetime\nimport traceback\n\nfrom django.core.cache import cache\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.utils import timezone\n\nfrom utils.enum import CLASS_TYPES, WEEKDAYS\n\n\nclass Semester(models.Model):\n year = models.IntegerField(db_index=True)\n semester = models.IntegerField(db_index=True)\n beginning = models.DateTimeField()\n end = models.DateTimeField()\n\n courseDesciptionSubmission = models.DateTimeField(null=True)\n\n courseRegistrationPeriodStart = models.DateTimeField(null=True)\n courseRegistrationPeriodEnd = models.DateTimeField(null=True)\n # Beginning goes here in timeline\n courseAddDropPeriodEnd = models.DateTimeField(null=True)\n courseDropDeadline = models.DateTimeField(null=True)\n courseEvaluationDeadline = models.DateTimeField(null=True)\n # End goes here in timeline\n gradePosting = models.DateTimeField(null=True)\n\n class Meta:\n unique_together = [[\"year\", \"semester\"]]\n\n def get_cache_key(self):\n return \"semester:%d-%d\" % (self.year, self.semester)\n\n def to_json(self):\n cache_id = self.get_cache_key()\n result_cached = cache.get(cache_id)\n if result_cached is not None:\n return result_cached\n\n result = {\n \"year\": self.year,\n \"semester\": self.semester,\n \"beginning\": self.beginning,\n \"end\": self.end,\n \"courseDesciptionSubmission\": self.courseDesciptionSubmission,\n \"courseRegistrationPeriodStart\": self.courseRegistrationPeriodStart,\n \"courseRegistrationPeriodEnd\": self.courseRegistrationPeriodEnd,\n \"courseAddDropPeriodEnd\": self.courseAddDropPeriodEnd,\n \"courseDropDeadline\": self.courseDropDeadline,\n \"courseEvaluationDeadline\": self.courseEvaluationDeadline,\n \"gradePosting\": self.gradePosting,\n }\n\n cache.set(cache_id, result, 60 * 60)\n\n return result\n \n def get_name(self, language: str=\"kr\"):\n if \"en\" in language:\n season_name = [\"spring\", \"summer\", \"fall\", \"winter\"][self.semester-1]\n else:\n season_name = [\"봄\", \"여름\", \"가을\", \"겨울\"][self.semester-1]\n return f\"{self.year} {season_name}\"\n\n # SYNC: Keep synchronized with React src/utils/semesterUtils.js getOngoingSemester()\n @classmethod\n def get_ongoing_semester(cls):\n now = timezone.now()\n try:\n ongoing_semester = cls.objects.get(beginning__lt=now, end__gt=now)\n except cls.DoesNotExist:\n return None\n except cls.MultipleObjectsReturned:\n # TODO: Use a logger instead\n print(\n \"WARNING: Semester.get_ongoing_semester() catched multiple Semester instances with overlapping period.\"\n \"Please check beginning and end fields of the instances.\",\n )\n ongoing_semester = cls.objects.filter(beginning__lt=now, end__gt=now).first()\n return ongoing_semester\n\n @classmethod\n def get_semester_to_default_import(cls):\n now = timezone.now()\n return cls.objects.filter(courseDesciptionSubmission__lt=now) \\\n .order_by(\"courseDesciptionSubmission\").last()\n\n # TODO: Change methods below to receive and return Semester class instance instead of\n # integer type year and semester value\n # See issue #845\n\n @classmethod\n def get_prev_semester(cls, year: int, semester: int) -> Tuple[int, int]:\n if semester == 1:\n return year - 1, 4\n else:\n return year, semester - 1\n\n @classmethod\n def get_next_semester(cls, year: int, semester: int) -> Tuple[int, int]:\n if semester == 4:\n return year + 1, 1\n else:\n return year, semester + 1\n\n @classmethod\n def get_offsetted_semester(cls, year: int, semester: int, offset: int) -> Tuple[int, int]:\n for _ in range(abs(offset)):\n if offset > 0:\n year, semester = Semester.get_next_semester(year, semester)\n else:\n year, semester = Semester.get_prev_semester(year, semester)\n return year, semester\n\n\nclass Lecture(models.Model):\n # Fetched from KAIST Scholar DB\n code = models.CharField(max_length=10, db_index=True)\n old_code = models.CharField(max_length=10, db_index=True)\n year = models.IntegerField(db_index=True)\n semester = models.SmallIntegerField(db_index=True)\n department = models.ForeignKey(\"Department\", on_delete=models.PROTECT, db_index=True)\n class_no = models.CharField(max_length=4, blank=True)\n title = models.CharField(max_length=100, db_index=True)\n title_en = models.CharField(max_length=200, db_index=True)\n type = models.CharField(max_length=12)\n type_en = models.CharField(max_length=36, db_index=True)\n audience = models.IntegerField()\n credit = models.IntegerField(default=3)\n num_classes = models.IntegerField(default=3)\n num_labs = models.IntegerField(default=0)\n credit_au = models.IntegerField(default=0)\n limit = models.IntegerField(default=0)\n professors = models.ManyToManyField(\"Professor\",\n related_name=\"lectures\", blank=True, db_index=True)\n is_english = models.BooleanField()\n deleted = models.BooleanField(default=False, db_index=True)\n\n course = models.ForeignKey(\"Course\", on_delete=models.PROTECT, related_name=\"lectures\")\n\n # Updated by signal timetable_lecture_saved, timetable_deleted\n num_people = models.IntegerField(default=0, blank=True, null=True)\n\n # Updated by method update_class_title\n common_title = models.CharField(max_length=100, null=True)\n common_title_en = models.CharField(max_length=100, null=True)\n class_title = models.CharField(max_length=100, null=True)\n class_title_en = models.CharField(max_length=100, null=True)\n\n # Updated by view when reviews are added/deleted/modified\n grade_sum = models.FloatField(default=0)\n load_sum = models.FloatField(default=0)\n speech_sum = models.FloatField(default=0)\n review_total_weight = models.FloatField(default=0)\n grade = models.FloatField(default=0.0)\n load = models.FloatField(default=0.0)\n speech = models.FloatField(default=0.0)\n\n def get_cache_key(self, nested):\n return \"lecture:%d:%s\" % (self.id, \"nested\" if nested else \"normal\")\n\n def to_json(self, nested=False):\n if self.deleted:\n print(\"WARNING: You are serializing DELETED lecture: %s. Please check your query\" % self)\n traceback.print_stack()\n\n cache_id = self.get_cache_key(nested)\n result_cached = cache.get(cache_id)\n if result_cached is not None:\n return result_cached\n\n # Don't change this into model_to_dict: for security and performance\n result = {\n \"id\": self.id,\n \"title\": self.title,\n \"title_en\": self.title_en,\n \"course\": self.course.id,\n \"old_code\": self.old_code,\n \"class_no\": self.class_no,\n \"year\": self.year,\n \"semester\": self.semester,\n \"code\": self.code,\n \"department\": self.department.id,\n \"department_code\": self.department.code,\n \"department_name\": self.department.name,\n \"department_name_en\": self.department.name_en,\n \"type\": self.type,\n \"type_en\": self.type_en,\n \"limit\": self.limit,\n \"num_people\": self.num_people,\n \"is_english\": self.is_english,\n \"num_classes\": self.num_classes,\n \"num_labs\": self.num_labs,\n \"credit\": self.credit,\n \"credit_au\": self.credit_au,\n \"common_title\": self.common_title,\n \"common_title_en\": self.common_title_en,\n \"class_title\": self.class_title,\n \"class_title_en\": self.class_title_en,\n \"review_total_weight\": self.review_total_weight,\n }\n\n professors = self.professors.all().order_by(\"professor_name\")\n result.update({\"professors\": [p.to_json(nested=True) for p in professors]})\n\n if nested:\n cache.set(cache_id, result, 60 * 10)\n return result\n\n result.update(\n {\n \"grade\": self.grade,\n \"load\": self.load,\n \"speech\": self.speech,\n \"classtimes\": [ct.to_json(nested=True) for ct in self.classtimes.all()],\n \"examtimes\": [et.to_json(nested=True) for et in self.examtimes.all()],\n },\n )\n\n cache.set(cache_id, result, 60 * 10)\n\n return result\n\n def recalc_score(self):\n from apps.review.models import Review # pylint: disable=import-outside-toplevel\n\n reviews = Review.objects.filter(lecture__course=self.course,\n lecture__professors__in=self.professors.all())\n _, self.review_total_weight, sums, avgs = Review.calc_average(reviews)\n self.grade_sum, self.load_sum, self.speech_sum = sums\n self.grade, self.load, self.speech = avgs\n self.save()\n\n def update_class_title(self):\n # Finds logest common string from front of given strings\n def _lcs_front(lecture_titles):\n if len(lecture_titles) == 0:\n return \"\"\n result = \"\"\n for i in range(len(lecture_titles[0]), 0, -1): # [len(ls[0]),...,2,1]\n target_substring = lecture_titles[0][0:i]\n if all(t[0:i] == target_substring for t in lecture_titles):\n result = target_substring\n break\n result = result.rstrip(\"<([{\")\n return result\n\n # Add common and class title for lectures like 'XXX', 'XXX'\n def _add_title_format(lecture_list):\n if len(lecture_list) == 1:\n title = lecture_list[0].title\n if title[-1] == \">\":\n common_title = title[: title.find(\"<\")]\n else:\n common_title = title\n else:\n common_title = _lcs_front([lecture.title for lecture in lecture_list])\n\n for lecture in lecture_list:\n lecture.common_title = common_title\n if lecture.title != common_title:\n lecture.class_title = lecture.title[len(common_title) :]\n elif len(lecture.class_no) > 0:\n lecture.class_title = lecture.class_no\n else:\n lecture.class_title = \"A\"\n lecture.save(update_fields=[\"common_title\", \"class_title\"])\n\n # Add common and class title for lectures like 'XXX', 'XXX'\n def _add_title_format_en(lecture_list):\n if len(lecture_list) == 1:\n title = lecture_list[0].title_en\n if title[-1] == \">\":\n common_title = title[: title.find(\"<\")]\n else:\n common_title = title\n else:\n common_title = _lcs_front([lecture.title_en for lecture in lecture_list])\n\n for lecture in lecture_list:\n lecture.common_title_en = common_title\n if lecture.title_en != common_title:\n lecture.class_title_en = lecture.title_en[len(common_title) :]\n elif len(lecture.class_no) > 0:\n lecture.class_title_en = lecture.class_no\n else:\n lecture.class_title_en = \"A\"\n lecture.save(update_fields=[\"common_title_en\", \"class_title_en\"])\n\n lectures = Lecture.objects.filter(course=self.course, deleted=False,\n year=self.year, semester=self.semester)\n _add_title_format(lectures)\n _add_title_format_en(lectures)\n\n # SYNC: Keep synchronized with React src/utils/lectureUtils.js getProfessorsShortStr()\n def get_professors_short_str(self):\n professors = self.professors.all().order_by(\"professor_name\")\n prof_name_list = [p.professor_name for p in professors]\n if len(prof_name_list) <= 2:\n return \", \".join(prof_name_list)\n return f\"{prof_name_list[0]} 외 {len(prof_name_list) - 1} 명\"\n\n # SYNC: Keep synchronized with React src/utils/lectureUtils.js getProfessorsShortStr()\n def get_professors_short_str_en(self):\n professors = self.professors.all().order_by(\"professor_name\")\n prof_name_list = [p.professor_name_en for p in professors]\n if len(prof_name_list) <= 2:\n return \", \".join(prof_name_list)\n return f\"{prof_name_list[0]} 외 {len(prof_name_list) - 1} 명\"\n\n @classmethod\n def get_query_for_research(cls):\n return Q(type_en__in=[\"Individual Study\", \"Thesis Study(Undergraduate)\",\n \"Thesis Research(MA/phD)\"])\n\n @classmethod\n def get_query_for_review_writable(cls):\n now = timezone.now()\n not_writable_semesters = Semester.objects.filter(Q(courseAddDropPeriodEnd__gte=now)\n | Q(beginning__gte=now))\n query = reduce(operator.and_,\n (~Q(year=s.year, semester=s.semester) for s in not_writable_semesters),\n Q())\n return query\n\n def __str__(self):\n re_str = \"%s(%s %s)\" % (self.title, self.old_code, self.class_no)\n return re_str\n\n\nclass ExamTime(models.Model):\n \"\"\"Lecture에 배정된 시험시간\"\"\"\n\n lecture = models.ForeignKey(Lecture, on_delete=models.CASCADE, related_name=\"examtimes\")\n day = models.SmallIntegerField(choices=WEEKDAYS) # 시험요일\n begin = models.TimeField() # hh:mm 형태의 시험시작시간 (24시간제)\n end = models.TimeField() # hh:mm 형태의 시험시작시간 (24시간 제)\n\n def __str__(self):\n return \"[%s] %s, %s-%s\" % (\n self.lecture.code,\n self.get_day_display(),\n self.begin.strftime(\"%H:%M\"),\n self.end.strftime(\"%H:%M\"),\n )\n\n def to_json(self, nested=False):\n def _format_day(day: int) -> str:\n DAY_STR = [\"월요일\", \"화요일\", \"수요일\", \"목요일\", \"금요일\", \"토요일\", \"일요일\"]\n return DAY_STR[day]\n\n def _format_day_en(day: int) -> str:\n DAY_STR_EN = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\",\n \"Sunday\"]\n return DAY_STR_EN[day]\n\n def _format_time(time_: datetime.time) -> str:\n return time_.strftime(\"%H:%M\")\n\n result = {\n \"day\": self.day,\n \"str\": \\\n f\"{_format_day(self.day)} {_format_time(self.begin)} ~ {_format_time(self.end)}\",\n \"str_en\": \\\n f\"{_format_day_en(self.day)} {_format_time(self.begin)} ~ {_format_time(self.end)}\",\n \"begin\": self.get_begin_numeric(),\n \"end\": self.get_end_numeric(),\n }\n\n return result\n\n def get_begin_numeric(self):\n \"\"\"0시 0분을 기준으로 분 단위로 계산된 시작 시간을 반환한다.\"\"\"\n begin_numeric = self.begin.hour * 60 + self.begin.minute\n return begin_numeric\n\n def get_end_numeric(self):\n \"\"\"0시 0분을 기준으로 분 단위로 계산된 종료 시간을 반환한다.\"\"\"\n end_numeric = self.end.hour * 60 + self.end.minute\n return end_numeric\n\n\nclass ClassTime(models.Model):\n \"\"\"Lecture 에 배정된강의시간, 보통 하나의 Lecture 가 여러개의 강의시간을 가진다.\"\"\"\n\n lecture = models.ForeignKey(Lecture,\n on_delete=models.CASCADE, related_name=\"classtimes\", null=True)\n day = models.SmallIntegerField(choices=WEEKDAYS) # 강의 요일\n begin = models.TimeField() # hh:mm 형태의 강의 시작시각 (24시간제)\n end = models.TimeField() # hh:mm 형태의 강의 끝나는 시각 (24시간 제)\n type = models.CharField(max_length=1, choices=CLASS_TYPES) # 강의 or 실험\n building_id = models.CharField(max_length=10, blank=True, null=True) # 건물 고유 ID\n building_full_name = models.CharField(max_length=60, blank=True,\n null=True) # 건물 이름(ex> (E11)창의학습관)\n building_full_name_en = models.CharField(max_length=60, blank=True,\n null=True) # 건물 이름(ex> (E11)Creative learning Bldg.)\n room_name = models.CharField(max_length=20, null=True) # 강의실 호실(ex> 304, 1104, 1209-1, 터만홀)\n unit_time = models.SmallIntegerField(null=True) # 수업 교시\n\n def to_json(self, nested=False):\n building_code, room_name, classroom, classroom_en, classroom_short, classroom_short_en = self.get_classroom_strs()\n\n result = {\n \"building_code\": building_code,\n \"classroom\": classroom,\n \"classroom_en\": classroom_en,\n \"classroom_short\": classroom_short,\n \"classroom_short_en\": classroom_short_en,\n \"room_name\": room_name,\n \"day\": self.day,\n \"begin\": self.get_begin_numeric(),\n \"end\": self.get_end_numeric(),\n }\n\n return result\n\n def get_begin_numeric(self):\n \"\"\"0시 0분을 기준으로 분 단위로 계산된 시작 시간을 반환한다.\"\"\"\n \"\"\"30분 단위로 내림한다\"\"\"\n begin_numeric = self.begin.hour * 60 + self.begin.minute\n if begin_numeric % 30 != 0:\n begin_numeric = begin_numeric - (begin_numeric % 30)\n return begin_numeric\n\n def get_end_numeric(self):\n \"\"\"0시 0분을 기준으로 분 단위로 계산된 종료 시간을 반환한다.\"\"\"\n \"\"\"30분 단위로 올림한다\"\"\"\n end_numeric = self.end.hour * 60 + self.end.minute\n if end_numeric % 30 != 0:\n end_numeric = end_numeric + (30 - (end_numeric % 30))\n return end_numeric\n \n def get_classroom_strs(self):\n building_full_name = self.building_full_name\n building_full_name_en = self.building_full_name_en\n # No classroom info\n if building_full_name is None or len(building_full_name) == 0:\n building_code = \"\"\n room_name = \"\"\n classroom = \"정보 없음\"\n classroom_en = \"Unknown\"\n classroom_short = \"정보 없음\"\n classroom_short_en = \"Unknown\"\n # Building name has form of \"(N1) xxxxx\"\n elif building_full_name[0] == \"(\":\n building_code = building_full_name[1 : building_full_name.find(\")\")]\n building_name = building_full_name[len(building_code) + 2 :]\n building_name_en = building_full_name_en[len(building_code) + 2 :]\n room_name = self.room_name if (self.room_name is not None) else \"\"\n classroom = \"(\" + building_code + \") \" + building_name + \" \" + room_name\n classroom_en = \"(\" + building_code + \") \" + building_name_en + \" \" + room_name\n classroom_short = \"(\" + building_code + \") \" + room_name\n classroom_short_en = \"(\" + building_code + \") \" + room_name\n # Building name has form of \"xxxxx\"\n else:\n building_code = \"\"\n room_name = self.room_name if (self.room_name is not None) else \"\"\n classroom = building_full_name + \" \" + room_name\n classroom_en = building_full_name_en + \" \" + room_name\n classroom_short = building_full_name + \" \" + room_name\n classroom_short_en = building_full_name_en + \" \" + room_name\n return building_code, room_name, classroom, classroom_en, classroom_short, classroom_short_en\n\n\n def get_location(self):\n if self.room_name is None:\n return \"%s\" % (self.building_full_name)\n try:\n int(self.room_name)\n return \"%s %s호\" % (self.building_full_name, self.room_name)\n except ValueError:\n return \"%s %s\" % (self.building_full_name, self.room_name)\n\n def get_location_en(self):\n if self.room_name is None:\n return \"%s\" % (self.building_full_name_en)\n try:\n int(self.room_name)\n return \"%s %s\" % (self.building_full_name_en, self.room_name)\n except ValueError:\n return \"%s %s\" % (self.building_full_name_en, self.room_name)\n\n @staticmethod\n def numeric_time_to_str(numeric_time):\n return \"%s:%s\" % (numeric_time // 60, numeric_time % 60)\n\n @staticmethod\n def numeric_time_to_obj(numeric_time):\n return time(hour=numeric_time // 60, minute=numeric_time % 60)\n\n\nclass Department(models.Model):\n id = models.IntegerField(primary_key=True, db_index=True)\n num_id = models.CharField(max_length=4, db_index=True)\n code = models.CharField(max_length=5, db_index=True)\n name = models.CharField(max_length=60, db_index=True)\n name_en = models.CharField(max_length=60, null=True, db_index=True)\n visible = models.BooleanField(default=True)\n\n def __str__(self):\n return self.code\n\n def get_cache_key(self, nested):\n return \"department:%d:%s\" % (self.id, \"nested\" if nested else \"normal\")\n\n def to_json(self, nested=False):\n cache_id = self.get_cache_key(nested)\n result_cached = cache.get(cache_id)\n if result_cached is not None:\n return result_cached\n\n result = {\n \"id\": self.id,\n \"name\": self.name,\n \"name_en\": self.name_en,\n \"code\": self.code,\n }\n\n cache.set(cache_id, result, 60 * 30)\n\n return result\n\n\nclass Course(models.Model):\n # Fetched from KAIST Scholar DB\n old_code = models.CharField(max_length=10, db_index=True)\n department = models.ForeignKey(\"Department\", on_delete=models.PROTECT, db_index=True)\n professors = models.ManyToManyField(\"Professor\", db_index=True)\n type = models.CharField(max_length=12)\n type_en = models.CharField(max_length=36)\n title = models.CharField(max_length=100, db_index=True)\n title_en = models.CharField(max_length=200, db_index=True)\n\n # Updated by command update_course_summary\n summury = models.CharField(max_length=4000, default=\"\")\n\n related_courses_prior = models.ManyToManyField(\"Course\", related_name=\"+\")\n related_courses_posterior = models.ManyToManyField(\"Course\", related_name=\"+\")\n\n # Updated by view when reviews are added/deleted/modified\n grade_sum = models.FloatField(default=0)\n load_sum = models.FloatField(default=0)\n speech_sum = models.FloatField(default=0)\n review_total_weight = models.FloatField(default=0)\n grade = models.FloatField(default=0.0)\n load = models.FloatField(default=0.0)\n speech = models.FloatField(default=0.0)\n\n latest_written_datetime = models.DateTimeField(default=None, null=True)\n\n def get_cache_key(self, nested):\n return \"course:%d:%s\" % (self.id, \"nested\" if nested else \"normal\")\n\n def to_json(self, nested=False, user=None):\n def add_userspecific_data(result, user):\n # Add user read info\n if user is None or not user.is_authenticated:\n is_read = False\n else:\n try:\n course_user = self.read_users_courseuser.get(user_profile__user=user)\n except CourseUser.DoesNotExist:\n course_user = None\n\n if course_user is None:\n is_read = False\n elif self.latest_written_datetime is None:\n is_read = True\n else:\n is_read = self.latest_written_datetime < course_user.latest_read_datetime\n\n result.update(\n {\n \"userspecific_is_read\": is_read,\n },\n )\n\n cache_id = self.get_cache_key(nested)\n result_cached = cache.get(cache_id)\n if result_cached is not None:\n if not nested:\n add_userspecific_data(result_cached, user)\n return result_cached\n \n representative_lecture = self.lectures.order_by(\"-year\", \"-semester\").first()\n\n # Don't change this into model_to_dict: for security and performance\n result = {\n \"id\": self.id,\n \"old_code\": self.old_code,\n \"department\": self.department.to_json(nested=True),\n \"type\": self.type,\n \"type_en\": self.type_en,\n \"title\": self.title,\n \"title_en\": self.title_en,\n \"summary\": self.summury if len(self.summury) > 0 else \"등록되지 않았습니다.\",\n \"review_total_weight\": self.review_total_weight,\n \"credit\": representative_lecture.credit if representative_lecture else 0,\n \"credit_au\": representative_lecture.credit_au if representative_lecture else 0,\n \"num_classes\": representative_lecture.num_classes if representative_lecture else 0,\n \"num_labs\": representative_lecture.num_labs if representative_lecture else 0,\n }\n\n if nested:\n cache.set(cache_id, result, 60 * 10)\n return result\n\n result.update(\n {\n \"related_courses_prior\": [c.to_json(nested=True)\n for c in self.related_courses_prior.all()],\n \"related_courses_posterior\": [c.to_json(nested=True)\n for c in self.related_courses_posterior.all()],\n \"professors\": [p.to_json(nested=True)\n for p in self.professors.all().order_by(\"professor_name\")],\n \"grade\": self.grade,\n \"load\": self.load,\n \"speech\": self.speech,\n },\n )\n\n cache.set(cache_id, result, 60 * 10)\n\n add_userspecific_data(result, user)\n\n return result\n\n def recalc_score(self):\n from apps.review.models import Review # pylint: disable=import-outside-toplevel\n\n reviews = Review.objects.filter(lecture__course=self)\n _, self.review_total_weight, sums, avgs = Review.calc_average(reviews)\n self.grade_sum, self.load_sum, self.speech_sum = sums\n self.grade, self.load, self.speech = avgs\n self.save()\n\n def update_related_courses(self):\n pass\n\n def __str__(self):\n return \"%s(%s)\" % (self.title, self.old_code)\n\n\nclass Professor(models.Model):\n STAFF_ID = 830\n\n # Fetched from KAIST Scholar DB\n professor_name = models.CharField(max_length=100, db_index=True)\n professor_name_en = models.CharField(max_length=100, blank=True, null=True)\n professor_id = models.IntegerField()\n major = models.CharField(max_length=30)\n course_list = models.ManyToManyField(\"Course\", db_index=True)\n\n # Updated by view when reviews are added/deleted/modified\n grade_sum = models.FloatField(default=0)\n load_sum = models.FloatField(default=0)\n speech_sum = models.FloatField(default=0)\n review_total_weight = models.FloatField(default=0)\n grade = models.FloatField(default=0.0)\n load = models.FloatField(default=0.0)\n speech = models.FloatField(default=0.0)\n\n def to_json(self, nested=False):\n result = {\n \"name\": self.professor_name,\n \"name_en\": self.professor_name_en,\n \"professor_id\": self.professor_id,\n \"review_total_weight\": self.review_total_weight,\n }\n\n if nested:\n return result\n\n # Add course information\n result.update(\n {\n \"courses\": [c.to_json(nested=True) for c in self.course_list.all()],\n \"grade\": self.grade,\n \"load\": self.load,\n \"speech\": self.speech,\n },\n )\n\n return result\n\n def recalc_score(self):\n from apps.review.models import Review # pylint: disable=import-outside-toplevel\n\n reviews = Review.objects.filter(lecture__professors=self)\n _, self.review_total_weight, sums, avgs = Review.calc_average(reviews)\n self.grade_sum, self.load_sum, self.speech_sum = sums\n self.grade, self.load, self.speech = avgs\n self.save()\n\n def __str__(self):\n return \"%s(id:%d)\" % (self.professor_name, self.professor_id)\n\n\nclass CourseUser(models.Model):\n course = models.ForeignKey(\"Course\",\n related_name=\"read_users_courseuser\", on_delete=models.CASCADE)\n user_profile = models.ForeignKey(\"session.UserProfile\", on_delete=models.CASCADE)\n latest_read_datetime = models.DateTimeField(auto_now=True)\n\n class Meta:\n unique_together = [[\"course\", \"user_profile\"]]\n","sub_path":"apps/subject/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":28907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"347208049","text":"from odd.artifact import Issue, Location, Position, XMLDocument\nfrom odd.plugin import Plugin\n\nELEMENTS = {\"record\", \"template\", \"act_window\", \"menuitem\", \"report\"}\nXPATH_EXPR = \"|\".join(f\"//{el}[not(@id)]\" for el in ELEMENTS)\n\n\nclass XMLOperationNoID(Plugin):\n _handles = {\"xml_document\"}\n _emits = {\"issue\"}\n\n def on_xml_document(self, xml_document: XMLDocument):\n for el in xml_document.node.xpath(XPATH_EXPR):\n yield Issue(\n \"xml_operation_without_id\",\n f\"XML operation `<{el.tag}>` has no `id` attribute\",\n xml_document.addon,\n locations=[Location(xml_document.path, Position(el.sourceline))],\n categories=[\"maintainability\"],\n )\n","sub_path":"odd_bunch/plugin/xml_operation_no_id.py","file_name":"xml_operation_no_id.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"89346890","text":"import socket\nimport sys\nimport time\n\nTCP_IP = \"127.0.0.1\"\nFILE_PORT = 5005\nDATA_PORT = 5006\nbuf = 1024\n\nclass TCP:\n def __init__(self, IP, FILE_PORT, DATA_PORT):\n self.ip = IP\n self.file_port = FILE_PORT\n self.data_port = DATA_PORT\n\n def send(self, file_name):\n try:\n startTime = time.time()\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((TCP_IP, self.file_port))\n sock.send(file_name.encode())\n sock.close()\n\n print(\"Sending file : %s\" % file_name)\n\n f = open(file_name, \"rb\")\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((TCP_IP, self.data_port))\n data = f.read()\n sock.send(data)\n\n finally:\n sock.close()\n f.close()\n total = time.time() - startTime\n with open(\"Results.txt\", \"a\") as myfile:\n myfile.write(file_name +\":\"+str(total)+\"\\n\")\n\n# Create new instance of TCP Sender\ntcpSender = TCP(TCP_IP, FILE_PORT, DATA_PORT)\n\n# Send the files\ntcpSender.send(\"1MB.txt\")\ntcpSender.send(\"2MB.txt\")\ntcpSender.send(\"4MB.txt\")\ntcpSender.send(\"8MB.txt\")\ntcpSender.send(\"16MB.txt\")","sub_path":"TCP/TCPSender.py","file_name":"TCPSender.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"643728484","text":"#!/usr/bin/python2.7\n# -*- coding: UTF-8 -*- \n\n## 用于测试内容的提取和关键字的匹配\n\nimport os\nfrom cStringIO import StringIO\n\nDIRS = [\"2012\", \"2013\", \"2014\", \"2015\", \"2016\"]\n\ndef load_data(path):\n data = []\n asid = str()\n line = str()\n text = str()\n cnt = 1\n with open(path, \"rt\") as f:\n for i in f:\n if cnt == 1:\n asid = i.strip()\n elif cnt == 2:\n line = i.strip()\n else:\n text = i.strip()\n data.append([asid, line, text])\n cnt = 0\n cnt += 1\n return data\n\ndef write_io(text, idd):\n with open(\"res.txt\", \"at\") as f:\n f.write(\"%d %s\\n\" % (idd, text))\n\ndef parse_data(path):\n y = []\n data = load_data(path)\n for cell in data:\n x = cell[2].replace(\" \",\"\").find(r\"重大风险提示\")\n y.append([x, cell[2]])\n\n# if x < 100:\n# print cell[0]\n# print cell[2]\n return y\n\n\nabc =[]\nfor i in [\"2012\",\"2013\",\"2014\",\"2015\",\"2016\"]:\n y = parse_data(\"result-%s.txt\" % i) \n abc.extend(y)\nabc.sort()\nfor i in abc:\n write_io(i[1], i[0])\n\n","sub_path":"06-parse-segment.py","file_name":"06-parse-segment.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"632562875","text":"import backtrader as bt\nimport btfd.util as util\n\nclass TweetsShitpostFlow(bt.Indicator):\n lines = ('tsf',)\n params = (('period', 0),)\n\n def next(self):\n self.lines.tsf[0] = self.data.tweet[0]\n\nclass TwiggsMoneyFlow(bt.Indicator):\n lines = ('tmf',)\n params = (('period', 24*4*21),)\n trh, trl, ad, vol = 0, 0, 0, 0\n\n def __init__(self):\n self.addminperiod(self.p.period)\n\n def next(self):\n i = self.p.period\n trh, trl, ad, vol = 0, 0, 0, 0\n while i > 0:\n trh = max(self.data.high[0-i], self.data.close[-1-i])\n trl = min(self.data.low[0-i], self.data.close[-1-i])\n tr = trh - trl\n if tr == 0:\n tr = 999999\n ad = ad + ((((self.data.close[0-i] - trl) - (trh - self.data.close[0-i]))/tr) * self.data.volume[0-i])\n vol = vol + self.data.volume[0-i]\n i = i - 1\n\n trh = max(self.data.high[0], self.data.close[-1])\n trl = min(self.data.low[0], self.data.close[-1])\n tr = trh - trl\n if tr == 0:\n tr = 999999\n ad = (ad * (self.p.period-1 / self.p.period)) + ((((self.data.close[0] - trl) - (trh - self.data.close[0]))/tr) * self.data.volume[0])\n vol = (vol * (self.p.period-1 / self.p.period)) + self.data.volume[0]\n\n if vol == 0:\n self.lines.tmf[0] = 0\n else:\n self.lines.tmf[0] = ad / vol\n\nclass OHLCT(bt.feeds.GenericCSVData):\n lines = ('tweet',)\n\n params = (\n ('nullvalue', 0.0),\n ('dtformat', '%Y-%m-%d %H:%M:%S'),\n ('datetime', 0),\n ('open', 1),\n ('high', 2),\n ('low', 3),\n ('close', 4),\n ('volume', 5),\n ('openinterest', -1),\n ('tweet', -1)\n )\n","sub_path":"btfd/myclasses.py","file_name":"myclasses.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"489470812","text":"#!/usr/bin/env python\nimport rospy, roslib, sys, cv2, os\nfrom std_msgs.msg import Int16MultiArray \nimport numpy as np\nimport math as m\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom rospkg import RosPack\nrp = RosPack()\npath = rp.get_path('air_hockey')\n\nframe = np.zeros((480,640,3), np.uint8)\npts1 = np.float32([[0,0],[0,480],[640,0],[480,640]])\nlower_puck = np.array([40,33,0])\nupper_puck = np.array([93,182,255])\ndilate_x = 1\ny_prev = 0\nvirtual = cv2.imread(path+\"/scripts/Template_Virtual_AH.jpg\")\nvirtual_copy = virtual.copy()\n\nend = [150.0,500.0]\nstart = [150.0,600.0]\n\narm_1 = [151,0,0,0,0]\narm_2 = [151,0,0,0,0]\n\nclass air_hockey:\n def __init__(self):\n self.bridge = CvBridge() \n self.image_rgb = rospy.Subscriber(\"/usb_cam/image_raw\",Image,self.callback_frame)\n #self.image_rgb = rospy.Subscriber(\"/video/AH\",Image,self.callback_frame)\n self.pers_co = rospy.Subscriber(\"/air_hockey/pers_co\",Int16MultiArray ,self.callback_pers_co)\n self.pub_perspective = rospy.Publisher('/air_hockey/perspective', Image, queue_size=1)\n self.pub_puck = rospy.Publisher('/air_hockey/puck', Image, queue_size=1)\n self.pub_circle = rospy.Publisher('/air_hockey/circle', Image, queue_size=1)\n self.pub_cms = rospy.Publisher('/air_hockey/position', Int16MultiArray, queue_size=1)\n self.color = rospy.Subscriber(\"/air_hockey/color\",Int16MultiArray ,self.callback_color)\n \n def callback_frame(self,data):\n global frame\n frame = self.bridge.imgmsg_to_cv2(data,\"bgr8\")\n\n def callback_pers_co(self,ros_data):\n global pts1\n data = ros_data.data\n pts1 = np.float32([[data[0],data[1]],[data[2],data[3]],[data[4],data[5]],[data[6],data[7]]])\n\n def callback_color(self,ros_data):\n global lower_puck, upper_puck, dilate_x\n data = ros_data.data\n lower_puck = np.array([data[0],data[1],data[2]])\n upper_puck = np.array([data[3],data[4],data[5]])\n dilate_x = data[6]\n \ndef perspective():\n global frame,pts1\n pts2 = np.float32([[300,0],[0,0],[300,600],[0,600]])\n #pts2 = np.float32([[0,0],[0,600],[300,0],[600,300]])\n M = cv2.getPerspectiveTransform(pts1,pts2)\n arena = cv2.warpPerspective(frame,M,(300,600))\n air_hockey.pub_perspective.publish(air_hockey.bridge.cv2_to_imgmsg(arena,\"bgr8\"))\n return arena\n \ndef mask(arena):\n hsv_arena = cv2.cvtColor(arena, cv2.COLOR_BGR2HSV)\n puck = cv2.inRange(hsv_arena, lower_puck, upper_puck)\n puck = cv2.erode(puck, np.ones((2,2),np.uint8),iterations = dilate_x)\n puck = cv2.dilate(puck, np.ones((2,2),np.uint8),iterations = dilate_x)\n puck = cv2.cvtColor(puck, cv2.COLOR_GRAY2BGR)\n air_hockey.pub_puck.publish(air_hockey.bridge.cv2_to_imgmsg(puck,\"bgr8\"))\n return puck\n\ndef detect(arena,puck):\n global y_prev,virtual,virtual_copy,end\n puck = cv2.cvtColor(puck, cv2.COLOR_BGR2GRAY)\n circles = cv2.HoughCircles(puck,cv2.cv.CV_HOUGH_GRADIENT,1,5,param1=2,param2=2,minRadius=8,maxRadius=12)\n if circles is not None:\n for i in circles[0,:]:\n virtual = virtual_copy.copy()\n cv2.circle(virtual,(i[0],i[1]),i[2],(0,255,0),-1)\n end = [i[0],500]\n x = int((i[0]*0.3048))\n y = int((-i[1]+300)*0.3048)\n xy = np.asarray([x,y])\n air_hockey.pub_cms.publish(data = xy.tolist())\n break\n\n \nif __name__ == '__main__':\n rospy.init_node('air_hockey', anonymous=True)\n air_hockey = air_hockey()\n while not rospy.is_shutdown():\n rate = rospy.Rate(60)\n arena = perspective()\n puck = mask(arena)\n detect(arena,puck)\n rate.sleep()\n","sub_path":"scripts/Air_Hockey.py","file_name":"Air_Hockey.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"475032884","text":"import configparser\nimport os\nimport time\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import Optional, Tuple\n\nimport click\nimport yaml\nfrom pydantic import BaseModel, ValidationError # type: ignore\n\nfrom treebeard.util import fatal_exit\n\n\nclass TreebeardEnv(BaseModel):\n notebook_id: Optional[\n str\n ] = None # Not present when CLI is not in notebook directory\n project_id: Optional[str] = None # Not present when initially installing\n run_id: str\n email: Optional[str] = None # Not present at build time\n api_key: Optional[str] = None # Not present at build time\n\n def __str__(self) -> str:\n dict_obj = self.dict()\n if self.api_key:\n unredacted_length = 4\n dict_obj[\"api_key\"] = (\n \"x\" * (len(self.api_key) - unredacted_length)\n + self.api_key[-unredacted_length:]\n )\n return str(dict_obj)\n\n\nclass TreebeardConfig(BaseModel):\n notebook: str = \"main.ipynb\"\n output_dirs: Tuple[str, ...] = tuple([\"output\"])\n ignore: Tuple[str, ...] = ()\n secret: Tuple[str, ...] = ()\n\n\nenv = \"production\"\nif os.getenv(\"TREEBEARD_ENVIRONMENT\"):\n env = os.getenv(\"TREEBEARD_ENVIRONMENT\")\n\n\nif env == \"development\":\n click.echo(\"WARNING: RUNNING IN LOCAL MODE\", err=True)\n url = \"http://localhost:8080\"\n treebeard_web_url = \"https://localhost:8000\"\nelse:\n url = \"https://scheduler-cvee2224cq-ew.a.run.app\"\n treebeard_web_url = \"https://treebeard.io\"\n\n\ndef get_run_path(treebeard_env: TreebeardEnv):\n return (\n f\"{treebeard_env.project_id}/{treebeard_env.notebook_id}/{treebeard_env.run_id}\"\n )\n\n\ndef get_time():\n return datetime.now().strftime(\"%H:%M:%S\")\n\n\ndef get_config_path():\n home = str(Path.home())\n return f\"{home}/.treebeard\"\n\n\ndef validate_notebook_directory(\n treebeard_env: TreebeardEnv, treebeard_config: TreebeardConfig\n):\n if treebeard_env.project_id is None:\n click.echo(\n click.style(\n \"No account config detected! Please run treebeard configure\", fg=\"red\",\n ),\n err=True,\n )\n\n if not os.path.exists(treebeard_config.notebook):\n fatal_exit(\n f\"Cannot run non-existent notebook '{treebeard_config.notebook}', you should be in a project directory with a treebeard.yaml file\"\n )\n\n\ndef get_treebeard_config() -> TreebeardConfig:\n notebook_config = \"treebeard.yaml\"\n if not os.path.exists(notebook_config):\n return TreebeardConfig()\n\n with open(notebook_config) as f:\n conf = yaml.load(f, Loader=yaml.FullLoader)\n if not conf:\n fatal_exit(\"treebeard.yaml config file exists but is empty\")\n try:\n return TreebeardConfig(**conf)\n except ValidationError as e: # type: ignore\n fatal_exit(f\"Error parsing treebeard.yaml\\n{e.json()}\")\n\n\ndef get_treebeard_env():\n \"\"\"Reads variables from a local file, credentials.cfg\"\"\"\n treebeard_project_id = os.getenv(\"TREEBEARD_PROJECT_ID\")\n run_id = os.getenv(\"TREEBEARD_RUN_ID\")\n if run_id is None:\n run_id = f\"local-{int(time.time())}\"\n\n notebook_id = os.getenv(\"TREEBEARD_NOTEBOOK_ID\")\n if not notebook_id:\n notebook_id = Path(os.getcwd()).name\n\n email = None\n api_key = None\n\n # .treebeard config is present in CLI and Runtime\n if os.path.exists(config_path):\n config = configparser.RawConfigParser()\n config.read(config_path)\n email = config.get(\"credentials\", \"treebeard_email\")\n treebeard_project_id = config.get(\"credentials\", \"treebeard_project_id\")\n api_key = config.get(\"credentials\", \"treebeard_api_key\")\n\n return TreebeardEnv(\n notebook_id=notebook_id,\n project_id=treebeard_project_id,\n run_id=run_id,\n email=email,\n api_key=api_key,\n )\n\n\nconfig_path = get_config_path()\ntreebeard_config = get_treebeard_config()\ntreebeard_env = get_treebeard_env()\nrun_path = get_run_path(treebeard_env)\nsecrets_endpoint = f\"{url}/projects/{treebeard_env.project_id}/notebooks/{treebeard_env.notebook_id}/secrets\"\nnotebooks_endpoint = f\"{url}/notebooks/{treebeard_env.notebook_id}\"\nsignup_endpoint = f\"{url}/cli_signup\"\nservice_status_endpoint = f\"{url}/service_status\"\n","sub_path":"treebeard-lib/treebeard/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":4278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"407136387","text":"from facenet import FacenetModel\r\nimport os\r\nimport tensorflow as tf\r\nimport sys\r\nimport numpy as np\r\nfrom tqdm import tqdm\r\nimport tensorboard_helper as tbh\r\nimport time\r\n\r\nFLAGS = tf.app.flags.FLAGS\r\ntf.app.flags.DEFINE_string('image_path', '/home/user170/shared-data/SJC_Dev/Projects/SJC_Git/Face-Detector/SJC-Face-Data/', \"root directory including images and tfrecords\")\r\n\r\ntf.app.flags.DEFINE_string('mode', 'test', \"Select train or pred mode\")\r\n\r\n\r\n# Set tensorboard folder name\r\nsummary_name_str = 'facenet2'\r\n\r\ncurrent_epoch_return = 0;\r\n\r\ndef training():\r\n \"\"\" #####################################\r\n ####### Hyper parameters starts ######\"\"\"\r\n\r\n epoch = 60\r\n batch = 20\r\n learning_rate = 0.001\r\n\r\n num_outputs = len(list(\r\n filter(lambda y: os.path.isdir(y), list(\r\n #map(lambda x: os.path.join(FLAGS.image_path+\"images\", x), os.listdir(FLAGS.image_path+\"images\"))))))\r\n map(lambda x: os.path.join('/home/user170/shared-data/SJC_Dev/Projects/SJC_Git/Face-Detector/SJC-Face-Data/'+\"images\", x), os.listdir('/home/user170/shared-data/SJC_Dev/Projects/SJC_Git/Face-Detector/SJC-Face-Data/'+\"images\"))))))\r\n\r\n model = FacenetModel(epoch, batch, learning_rate, num_outputs) # Initialize FacenetModel class to set shapes of layers\r\n\r\n model.block1[\"unit1\"] = [64, 64, 32]\r\n model.block1[\"unit2\"] = [64, 64, 32]\r\n model.block1[\"unit3\"] = [64, 64, 32]\r\n\r\n model.block2[\"unit1\"] = [128, 128, 64]\r\n model.block2[\"unit2\"] = [128, 128, 64]\r\n model.block2[\"unit3\"] = [128, 128, 64]\r\n model.block2[\"unit4\"] = [128, 128, 64]\r\n\r\n model.block3[\"unit1\"] = [256, 256, 128]\r\n model.block3[\"unit2\"] = [256, 256, 128]\r\n model.block3[\"unit3\"] = [256, 256, 128]\r\n model.block3[\"unit4\"] = [256, 256, 128]\r\n model.block3[\"unit5\"] = [256, 256, 128]\r\n model.block3[\"unit6\"] = [256, 256, 128]\r\n\r\n model.block4[\"unit1\"] = [512, 512, 256]\r\n model.block4[\"unit2\"] = [512, 512, 256]\r\n model.block4[\"unit3\"] = [512, 512, 256]\r\n\r\n \"\"\" ###### Hyper parameters ends #########\r\n ###################################### \"\"\"\r\n \r\n model('/home/user170/shared-data/SJC_Dev/Projects/SJC_Git/Face-Detector/SJC-Face-Data/', 224, 224)\r\n\r\n # Add ops to save and restore all the variables.\r\n saver = tf.train.Saver()\r\n\r\n config_proto = tf.ConfigProto()\r\n #config_proto.gpu_options.allow_growth = True\r\n config_proto.gpu_options.per_process_gpu_memory_fraction = 0.6\r\n with tf.Session(config=config_proto) as sess:\r\n\r\n #summary_writer = tf.summary.FileWriter(os.path.join('summaries','facenet'), sess.graph) # Write tensorboard\r\n summary_writer = tbh.summary_writer_fn(summary_name_str, sess)\r\n\r\n sess.run(tf.global_variables_initializer())\r\n\r\n training_handle = sess.run(model.train_iterator.string_handle())\r\n validation_handle = sess.run(model.validation_iterator.string_handle())\r\n for current_epoch_int in tqdm(range(epoch)):\r\n try:\r\n print(\"=== Training ===\"); sys.stdout.flush()\r\n sess.run(model.train_iterator.initializer)\r\n total_accuracy = 0\r\n count = 0\r\n #print(\"Processing: \", end=\"\"); sys.stdout.flush()\r\n while True:\r\n count += 1\r\n #print(\".\", end=\"\"); sys.stdout.flush()\r\n \r\n extracted_data = sess.run(model.get_next_in_interators, feed_dict={model.handle_placeholder: training_handle})\r\n \r\n if len(extracted_data[1]) > 1:\r\n #extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\r\n loss, _ = sess.run([model.loss, model.loss_minimizer], feed_dict={model.input_dataset_placeholder:extracted_data[0], model.labels_dataset_placeholder:extracted_data[1], model.train_valid_placeholder:True})\r\n pred = sess.run(model.predictions, feed_dict={model.input_dataset_placeholder:extracted_data[0], model.train_valid_placeholder:True})\r\n total_accuracy += np.sum(np.equal(pred, extracted_data[1])) / batch\r\n \r\n current_epoch_return = current_epoch_int # Set current itoration as current epoch state\r\n #print(\"{0} accuracy: \\t{1}\".format(np.sum(np.equal(pred, extracted_data[1])), np.equal(pred, extracted_data[1]))); sys.stdout.flush()\r\n #print(\"expected:\\t{0} \\npredicted:\\t{1}\".format(extracted_data[1], pred)); sys.stdout.flush()\r\n \r\n except tf.errors.OutOfRangeError:\r\n # Execute the summaries defined above\r\n tbh.write_loss_and_accuracy_summary_fn(summary_writer, sess, loss, total_accuracy, count, epoch)\r\n\r\n print(\"\\n{0} %\".format((total_accuracy/count)*100)); sys.stdout.flush()\r\n pass\r\n\r\n try:\r\n print(\"=== Validation ===\"); sys.stdout.flush()\r\n sess.run(model.validation_iterator.initializer)\r\n total_accuracy = 0\r\n count = 0\r\n #print(\"Processing: \", end=\"\"); sys.stdout.flush()\r\n while True:\r\n count += 1\r\n #print(\".\", end=\"\"); sys.stdout.flush()\r\n\r\n extracted_data = sess.run(model.get_next_in_interators, feed_dict={model.handle_placeholder: validation_handle})\r\n\r\n if len(extracted_data[1]) > 1:\r\n pred = sess.run(model.predictions, feed_dict={model.input_dataset_placeholder:extracted_data[0], model.train_valid_placeholder:False})\r\n total_accuracy += np.sum(np.equal(pred, extracted_data[1])) / batch\r\n\r\n current_epoch = count # Set current itoration as current epoch state\r\n \r\n except tf.errors.OutOfRangeError:\r\n print(\"\\n{0} %\".format((total_accuracy/count)*100)); sys.stdout.flush()\r\n\r\n # Execute the summaries defined above\r\n tbh.write_accuracy(summary_writer, sess, total_accuracy, count, epoch)\r\n\r\n # Save the variables to disk.\r\n save_file_name = 'trained_tf_data/{0}_{1:05d}-of-{2:05d}'.format(\"facenet\", current_epoch_int+1, epoch)\r\n save_path = saver.save(sess, os.path.join('/home/user170/shared-data/SJC_Dev/Projects/SJC_Git/Face-Detector/SJC-Face-Data/', save_file_name))\r\n #save_path = saver.save(sess, \"/tmp/model.ckpt\")\r\n #print(\"Model saved in path: %s\" % save_path)\r\n\r\n pass\r\n\r\ndef testing():\r\n # Read stred data\r\n batch = 50\r\n\r\n trained_tf_data_folder = os.path.join('/home/user170/shared-data/SJC_Dev/Projects/SJC_Git/Face-Detector/SJC-Face-Data/', \"trained_tf_data\")\r\n save_file_name = 'facenet_00060-of-00060.meta'\r\n saver = tf.train.import_meta_graph(os.path.join(trained_tf_data_folder, save_file_name))\r\n \r\n sess = tf.Session()\r\n sess.__enter__()\r\n #with tf.Session() as sess:\r\n saver.restore(sess,tf.train.latest_checkpoint(trained_tf_data_folder))\r\n #summary_writer = tf.summary.FileWriter(os.path.join('summaries','facenet'), sess.graph) # Write tensorboard\r\n summary_writer = tbh.summary_writer_fn('loaded_session', sess)\r\n\r\n graph = tf.get_default_graph()\r\n input_dataset_placeholder = graph.get_tensor_by_name(\"placeholders/input:0\")\r\n train_valid_placeholder = graph.get_tensor_by_name(\"placeholders/tv_mode_selector_placeholder:0\")\r\n\r\n prediction = graph.get_tensor_by_name(\"predictions/ArgMax:0\")\r\n\r\n\r\n def _read_image(filepath):\r\n # Convert filepath string to string tensor\r\n tf_filepath = tf.convert_to_tensor(filepath, dtype=tf.string)\r\n\r\n # Read .JPEG image\r\n tf_image_string = tf.read_file(tf_filepath)\r\n image_tf = tf.image.decode_jpeg(tf_image_string, channels=3)\r\n\r\n # Rescale image and convert to float\r\n image_tf = tf.to_float(image_tf)\r\n #image_tf = tf.image.resize_images(image_tf, [224, 224], method=tf.image.ResizeMethod.AREA, align_corners=True)\r\n image_tf = tf.image.resize_images(image_tf, [224, 224])\r\n image_tf = image_tf * (1./255) # Normalization\r\n image_tf = tf.expand_dims(image_tf, 0)\r\n\r\n return image_tf\r\n\r\n image_tf = _read_image(\"/home/user170/shared-data/SJC_Dev/Projects/SJC_Git/Face-Detector/SJC-Face-Data/images/166/36.jpg\")\r\n \r\n #sess.run(tf.global_variables_initializer())\r\n image_tf = sess.run(image_tf)\r\n\r\n for i in range(batch-1):\r\n if i < 1:\r\n stacked_image = np.vstack([image_tf, image_tf])\r\n else:\r\n stacked_image = np.vstack([stacked_image, image_tf])\r\n\r\n prev_time = time.time()\r\n result = sess.run(prediction, feed_dict={input_dataset_placeholder: stacked_image, train_valid_placeholder:False})\r\n \r\n print(\"Execution time: \", time.time() - prev_time)\r\n \r\n print(result)\r\n\r\n return result[0]\r\n\r\nif __name__==\"__main__\":\r\n testing()\r\n","sub_path":"TensorFlow/Face-Recognition/recognition_phase.py","file_name":"recognition_phase.py","file_ext":"py","file_size_in_byte":9056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"606651688","text":"import argparse\r\nimport os\r\nimport os.path as osp\r\n\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nimport torch.utils.data as util_data\r\nfrom torch.autograd import Variable\r\n\r\nimport time\r\nimport json\r\nimport random\r\n\r\nfrom data_list import ImageList\r\nimport network\r\nimport loss\r\nimport pre_process as prep\r\nimport lr_schedule\r\nfrom gcn.gcn import GCN\r\n\r\noptim_dict = {\"SGD\": optim.SGD}\r\n\r\n\r\ndef image_classification_test(iter_test,len_now, base, class1,bottelneck, gpu=True):\r\n start_test = True\r\n COR = 0.\r\n Total = 0.\r\n print('Testing ...')\r\n for i in range(len_now):\r\n data = iter_test.next()\r\n inputs = data[0]\r\n labels = data[1]\r\n if gpu:\r\n inputs = Variable(inputs.cuda())\r\n labels = Variable(labels.cuda())\r\n else:\r\n inputs = Variable(inputs)\r\n labels = Variable(labels)\r\n output = base(inputs)\r\n outputs = class1(output)\r\n if start_test:\r\n all_output = outputs.data.float()\r\n all_label = labels.data.float()\r\n _, predict = torch.max(all_output, 1)\r\n COR = COR + torch.sum(torch.squeeze(predict).float() == all_label)\r\n Total = Total + all_label.size()[0]\r\n accuracy = float(COR)/float(Total)\r\n return accuracy\r\n\r\ndef train_classification(config):\r\n ## set pre-process\r\n prep_train = prep.image_train(resize_size=256, crop_size=224)\r\n prep_test = prep.image_test(resize_size=256, crop_size=224)\r\n \r\n ## set loss\r\n class_criterion = nn.CrossEntropyLoss()\r\n transfer_criterion = loss.loss_dict[\"LP\"]\r\n\r\n ## prepare data\r\n TEST_LIST = 'data/new_AwA2_common.txt'#AWA_T.txt'#'data/WEB_72.txt'\r\n TRAIN_LIST = 'data/I2AWA2_40.txt'#'AWA_SS.txt#'data/new_AwA2_common.txt'\r\n BSZ = args.batch_size\r\n\r\n dsets_train1 = ImageList(open(TRAIN_LIST).readlines(), shape = (args.img_size,args.img_size), transform=prep_train, train=True)\r\n loaders_train1 = util_data.DataLoader(dsets_train1, batch_size=BSZ, shuffle=True, num_workers=8, pin_memory=True)\r\n\r\n dsets_test = ImageList(open(TEST_LIST).readlines(), shape = (args.img_size,args.img_size),transform=prep_test, train=False)\r\n loaders_test = util_data.DataLoader(dsets_test, batch_size=BSZ, shuffle=True, num_workers=4, pin_memory=True)\r\n net_config = config[\"network\"]\r\n base_network = network.network_dict[net_config[\"name\"]]()\r\n classifier_layer1 = nn.Linear(base_network.output_num(), class_num)\r\n ## initialization\r\n for param in base_network.parameters():\r\n param.requires_grad = False\r\n for param in base_network.layer4.parameters():\r\n param.requires_grad = True\r\n for param in base_network.layer3.parameters():\r\n param.requires_grad = True\r\n \r\n use_gpu = torch.cuda.is_available()\r\n if use_gpu:\r\n classifier_layer1 = classifier_layer1.cuda()\r\n base_network = base_network.cuda()\r\n\r\n ## collect parameters\r\n parameter_list = [{\"params\":classifier_layer1.parameters(), \"lr\":10},\r\n {\"params\": base_network.layer3.parameters(), \"lr\":1},\r\n {\"params\": base_network.layer4.parameters(), \"lr\":5}]\r\n\r\n \r\n ## set optimizer\r\n optimizer_config = config[\"optimizer\"]\r\n optimizer = optim_dict[optimizer_config[\"type\"]](parameter_list, **(optimizer_config[\"optim_params\"]))\r\n param_lr = []\r\n for param_group in optimizer.param_groups:\r\n param_lr.append(param_group[\"lr\"])\r\n schedule_param = optimizer_config[\"lr_param\"]\r\n lr_scheduler = lr_schedule.schedule_dict[optimizer_config[\"lr_type\"]]\r\n \r\n \r\n len_train_source = len(loaders_train1) - 1\r\n len_test_source = len(loaders_test) - 1\r\n optimizer.zero_grad()\r\n for i in range(config[\"num_iterations\"]):\r\n if (i + 1) % config[\"test_interval\"] == 0:\r\n base_network.train(False)\r\n classifier_layer1.train(False)\r\n print(str(i)+' ACC:')\r\n iter_target = iter(loaders_test)\r\n print(image_classification_test(iter_target,len_test_source, base_network, classifier_layer1, bottelneck, gpu=use_gpu))\r\n iter_target = iter(loaders_test)\r\n if not osp.exists(osp.join('model',args.save_name)):\r\n os.mkdir(osp.join('model',args.save_name))\r\n torch.save(base_network.state_dict(),osp.join('model',args.save_name,'base_net%d.pkl'%(i+1)))\r\n torch.save(classifier_layer1.state_dict(),osp.join('model',args.save_name,'class%d.pkl'%(i+1)))\r\n\r\n classifier_layer1.train(True)\r\n base_network.train(True)\r\n \r\n optimizer = lr_scheduler(param_lr, optimizer, i, **schedule_param)\r\n\r\n if i % (len_train_source-1) == 0:\r\n iter_source = iter(loaders_train1)\r\n if i % (len_test_source ) == 0:\r\n iter_target = iter(loaders_test)\r\n\r\n inputs_source, labels_source, labels_source_father, inputs_target = iter_source.next()\r\n\r\n if use_gpu:\r\n inputs_source, labels_source, inputs_target = Variable(inputs_source).cuda(), Variable(labels_source).cuda(), Variable(inputs_target).cuda()\r\n else:\r\n inputs_source, labels_source, inputs_target = Variable(inputs_source), Variable(labels_source),Variable(inputs_target)\r\n \r\n features_source = base_network(inputs_source)\r\n features_target = base_network(inputs_target)\r\n \r\n outputs_source1 = classifier_layer1(features_source)\r\n outputs_target1 = classifier_layer1(features_target)\r\n \r\n\r\n cls_loss = class_criterion(outputs_source1, labels_source)\r\n \r\n transfer_loss = transfer_criterion(features_source, features_target)\r\n \r\n\r\n\r\n total_loss = cls_loss + transfer_loss * args.w_align\r\n print(\"Step \"+str(i)+\": cls_loss: \"+str(cls_loss.cpu().data.numpy())+\r\n \" transfer_loss: \"+str(transfer_loss.cpu().data.numpy()))\r\n\r\n total_loss.backward(retain_graph=True)\r\n if (i+1)% config[\"opt_num\"] ==0:\r\n optimizer.step()\r\n optimizer.zero_grad()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser(description='Transfer Learning')\r\n parser.add_argument('--gpu_id', type=str, nargs='?', default='0', help=\"device id to run\")\r\n parser.add_argument('--batch_size', type=int, nargs='?', default=32, help=\"batch size\")\r\n parser.add_argument('--img_size', type=int, nargs='?', default=256, help=\"image size\")\r\n parser.add_argument('--save_name', type=str, nargs='?', default='base', help=\"loss name\")\r\n args = parser.parse_args()\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_id \r\n\r\n config = {}\r\n config[\"num_iterations\"] = 3000\r\n config[\"test_interval\"] = 200\r\n config[\"save_num\"] = 200\r\n config[\"opt_num\"] = 1\r\n config[\"network\"] = {\"name\":\"ResNet50\"}\r\n config[\"optimizer\"] = {\"type\":\"SGD\", \"optim_params\":{\"lr\":1.0, \"momentum\":0.9, \"weight_decay\":0.0001, \"nesterov\":True}, \"lr_type\":\"inv\", \"lr_param\":{\"init_lr\":0.001, \"gamma\":0.001, \"power\":0.75} }\r\n print(config)\r\n print(args)\r\n train_classification(config)\r\n","sub_path":"DA_pmd.py","file_name":"DA_pmd.py","file_ext":"py","file_size_in_byte":7217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"238683433","text":"from django.conf.urls import patterns, url\n#import List.views\nfrom List import views\n\n\nurlpatterns = patterns('List.views',\n\turl(r'^$', views.index, name='index'),\n\t#url(r'^list/complete/$', views.complete, name='complete'),\n\turl(r'^list/$', views.list, name='list'),\n\turl(r'^register/$', views.register, name='register'),\n\turl(r'^login/$', views.user_login, name='login'),\n\turl(r'^logout/$', views.user_logout, name='logout'),\n\turl(r'^login_fail/$', views.login_fail, name='login_fail'),\n)\t\n","sub_path":"ToDoList/List/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"502433750","text":"import pandas as pd \n\n\no_data = pd.read_csv(\"C:/Dropbox (LANCIS)/CARPETAS_TRABAJO/vhernandez/geo_lancis/centroide/clasificaciones.csv\") \ndata = o_data.sort_values('rank_order',ascending=True).copy()\none_n = 1/len(data.alternatives)\n\ndata2=data.assign(one_k=1/data.rank_order)\nprint (data2)\ndata2['w']=0\nfor i in range(len(data.alternatives)):\n data2.w.loc[i]=one_n*sum(data2.one_k[i:len(data.alternatives)])\nprint (data2)\ndata3 =data2.filter(['rank_order','alternatives','w'])\n\ndata3.to_csv(\"C:/Dropbox (LANCIS)/CARPETAS_TRABAJO/vhernandez/geo_lancis/centroide/clasificaiones_w_centroid.csv\",index=False)","sub_path":"codigos/secundarios/centroide.py","file_name":"centroide.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"253831049","text":"from flask import render_template, flash, redirect, session, url_for, request, g\nfrom app import app, db, lm, oid\nfrom flask.ext.login import login_user, logout_user, current_user, login_required\nfrom .forms import LoginForm, EditForm, PostForm, SearchForm\nfrom .models import User, Post\nfrom datetime import datetime\nfrom config import POSTS_PER_PAGE, MAX_SEARCH_RESULTS, DATABASE_QUERY_TIMEOUT\nfrom flask.ext.sqlalchemy import get_debug_queries\n\n@app.route('/', methods =['GET','POST'])\n@app.route('/index', methods =['GET','POST'])\n@app.route('/index/', methods =['GET','POST'])\n@login_required\ndef index(page = 1):\n\tform = PostForm()\n\tif form.validate_on_submit():\n\t\tpost = Post(body = form.post.data, timestamp = datetime.utcnow(), author = g.user)\n\t\tdb.session.add(post)\n\t\tdb.session.commit()\n\t\tflash('Post published.')\n\t\treturn redirect(url_for('index'))\t\t#this request makes sure that on a refresh we\n\t\t\t\t\t\t\t\t\t\t\t\t# don't do a Post, if user refreshes we get the \n\t\t\t\t\t\t\t\t\t\t\t\t#GET request for the index post and not the POST \n\t\t\t\t\t\t\t\t\t\t\t\t#request which will post the new post again\n\tposts = g.user.followed_posts().paginate(page, POSTS_PER_PAGE, False)\n\treturn render_template('index.html',\n\t\ttitle = 'Home', \n\t\tform = form,\n\t\tposts = posts)\n\n\n@app.route('/login', methods = ['GET', 'POST'])\n@oid.loginhandler\ndef login():\n\tif g.user is not None and g.user.is_authenticated():\n\t\treturn redirect(url_for('index'))\n\tform = LoginForm()\n\tif form.validate_on_submit():\n\t\tsession['remember_me'] = form.remember_me.data\n\t\treturn oid.try_login(form.openid.data, ask_for=['nickname', 'email'])\n\treturn render_template('login.html', \n\t\ttitle ='Sign In',\n\t\tform = form,\n\t\tproviders = app.config['OPENID_PROVIDERS']\n\t\t)\n\n\n@oid.after_login\ndef after_login(resp):\n\tif resp.email is None or resp.email == \"\":\n\t\tflash('Invaid login. Please try again.')\n\t\treturn redirect(url_for('login'))\n\tuser = User.query.filter_by(email = resp.email).first()\n\tif user is None:\n\t\tnickname = resp.nickname\n\t\tif nickname is None or nickname == \"\":\n\t\t\tnickname = resp.email.split('@')[0]\n\t\tnickname = User.make_unique_nickname(nickname)\n\t\tuser = User(nickname = nickname, email = resp.email)\n\t\tdb.session.add(user)\n\t\tdb.session.commit()\n\t\tdb.session.add(user.follow(user))\n\t\tdb.session.commit()\n\tremember_me = False\n\t\n\tif'remember_me' in session:\n\t\tremember_me = session['remember_me']\n\t\tsession.pop('remember_me', None)\n\tlogin_user(user, remember = remember_me)\n\treturn redirect(request.args.get('next') or url_for('index'))\n\n@app.before_request\ndef before_request():\n\tg.user = current_user\n\tif g.user.is_authenticated():\n\t\tg.user.last_seen = datetime.utcnow()\n\t\tdb.session.add(g.user)\n\t\tdb.session.commit()\n\t\tg.search_form = SearchForm()\n\n@app.after_request\ndef after_request(resp):\n\tfor query in get_debug_queries():\n\t\tif query.duration >= DATABASE_QUERY_TIMEOUT:\n\t\t\tapp.logger.warning(\"SLOW QUERY: %s\\nParameters: %s\\nDuration: %fs\\nContext: %s\\n\" % (query.statement, query.parameters, query.duration, query.context))\n\treturn resp\n\n@app.route('/logout')\ndef logout():\n\tlogout_user()\n\treturn redirect(url_for('index'))\n\n@lm.user_loader\ndef load_user(id):\n\treturn User.query.get(int(id))\n\n@app.route('/user/')\n@app.route('/user//')\n@login_required\ndef user(nickname, page = 1):\n\tuser = User.query.filter_by(nickname = nickname).first()\n\tif user == None:\n\t\tflash('User %s not found.' % nickname)\n\t\treturn redirect(url_for('index'))\n\tposts = user.posts.paginate(page, POSTS_PER_PAGE, False)\n\treturn render_template('user.html',\n\t\t\t\t\t\t\tuser = user,\n\t\t\t\t\t\t\tposts = posts)\n\n@app.route('/edit', methods = ['GET', 'POST'])\n@login_required\ndef edit():\n\tform = EditForm(g.user.nickname)\n\tif form.validate_on_submit():\n\t\tg.user.nickname = form.nickname.data\n\t\tg.user.about_me = form.about_me.data\n\t\tdb.session.add(g.user)\n\t\tdb.session.commit()\n\t\tflash('Profile has been updated successfully!')\n\t\treturn redirect(url_for('edit'))\n\telse:\n\t\tform.nickname.data = g.user.nickname\n\t\tform.about_me.data = g.user.about_me\n\treturn render_template('edit.html', form = form)\n\n\n@app.route('/follow/')\n@login_required\ndef follow(nickname):\n\tuser = User.query.filter_by(nickname=nickname).first()\n\tif user is None:\n\t\tflash(\"User %s not found\" % nickname)\n\t\treturn redirect(url_for('index'))\n\tif user == g.user:\n\t\tflash(\"Following self isn't allowed\")\n\t\treturn redirect(url_for('index'))\n\tu = g.user.follow(user)\n\tif u is None:\n\t\tflash(\"Can't follow\" + nickname + '.')\n\t\treturn redirect(url_for('index'))\n\tdb.session.add(u)\n\tdb.session.commit()\n\tflash('Following ' + nickname + ' successful!')\n\treturn redirect(url_for('user', nickname= nickname))\n\n\n@app.route('/unfollow/')\n@login_required\ndef unfollow(nickname):\n\tuser = User.query.filter_by(nickname= nickname).first()\n\tif user is None:\n\t\tflash(\"User %s not found\" % nickname)\n\t\treturn redirect(url_for('index'))\n\tif user == g.user:\n\t\tflash(\"Unfollowing self not allowed\")\n\t\treturn redirect(url_for('index'))\n\tu = g.user.unfollow(user)\n\tif u is None:\n\t\tflash(\"Can't unfollow \" + nickname +'.')\n\t\treturn redirect(url_for('user',nickname=nickname))\n\tdb.session.add(u)\n\tdb.session.commit()\n\tflash('Unfollowing ' + nickname + ' successful!')\n\treturn redirect(url_for('user', nickname= nickname))\n\n@app.route('/search', methods =['POST'])\n@login_required\ndef search():\n\tif not g.search_form.validate_on_submit():\n\t\treturn redirect(url_for('index'))\n\treturn redirect(url_for('search_results', query = g.search_form.search.data))\n\n@app.route('/search_results/')\n@login_required\ndef search_results(query):\n\tresults = Post.query.whoosh_search(query, MAX_SEARCH_RESULTS).all()\n\treturn render_template('search_results.html',\n\t\t\t\t\t\t\tquery = query,\n\t\t\t\t\t\t\tresults = results\n\t\t\t\t\t\t\t)\n\n\n@app.route('/delete/')\n@login_required\ndef delete(id):\n\tpost = Post.query.get(id)\n\tif post is None:\n\t\tflash('Post Not Found.')\n\t\treturn redirect(url_for('index'))\n\tif post.author.id != g.user.id:\n\t\tflash('Can only delte your own posts!')\n\t\treturn redirect(url_for('index'))\n\tdb.session.delete(post)\n\tdb.session.commit()\n\tflash('Post deleted.')\n\treturn redirect(url_for('index'))\n\n@app.errorhandler(404)\ndef not_found_error(error):\n return render_template('404.html'), 404\n\n@app.errorhandler(500)\ndef internal_error(error):\n db.session.rollback()\n return render_template('500.html'), 500\n","sub_path":"microblog/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"482743456","text":"CONFIG = {\n #OFFENSE\n \"Passing Yards\" : 0.04,\n \"Passing Touchdowns\" : 4,\n \"Interceptions Thrown\" : -1,\n \"Rushing Yards\" : 0.1,\n \"Rushing Touchdowns\" : 6,\n \"Receiving Yards\" : 0.1,\n \"Receiving Touchdowns\" : 6,\n \"Two Point Conversion\" : 2,\n \"Fumbles Lost\" : -2,\n \"Receiving Reception\" : 1,\n\n #KICKERS\n \"0-39 Yd\" : 3,\n \"40-49 Yd\" : 4,\n \"50+ Yd\" : 5,\n \"Point After Touchdown\" : 1,\n\n #Team Defense/Special Teams\n \"Sacks\" : 1,\n \"INTERCEPTION\" : 2,\n \"Fumbles Recovered\" : 2,\n \"Touchdowns\" : 6,\n \"Safeties\" : 2,\n \"Blocked Kicks\" : 2,\n \"Returned for Touchdown\" : 6,\n \"0 Points Against\" : 10,\n \"1-6 Points Against\": 7,\n \"7-13 Points Against\": 4,\n \"14-20 Points Against\": 1,\n \"21-27 Points Against\": 0,\n \"28-34 Points Against\": -1,\n \"35+ Points Against\": -4,\n}\n\nPOSITIONS = {\n \"QUARTERBACK\" : 1,\n \"FULLBACK\" : 2,\n \"RUNNING BACK\" : 3,\n \"WIDE RECEIVER\" : 4,\n \"WIDE RECEIVER 1\" : 5,\n \"WIDE RECEIVER 2\" : 6,\n \"WIDE RECEIVER 3\" : 7,\n \"WIDE RECEIVER 4\" : 8,\n \"SPLIT END\" : 9,\n \"FLANKER\" : 10,\n \"TIGHT END\" : 11,\n \"KICKER\" : 12,\n \"PUNTER\" : 13,\n}\n\nTEAMS = {\n \"ARI\" : 1,\n \"ATL\" : 2,\n \"BAL\" : 3,\n \"BUF\" : 4,\n \"CAR\" : 5,\n \"CHI\" : 6,\n \"CIN\" : 7,\n \"CLE\" : 8,\n \"DAL\" : 9,\n \"DEN\" : 10,\n \"DET\" : 11,\n \"GB\" : 12,\n \"HOU\" : 13,\n \"IND\" : 14,\n \"JAC\" : 15,\n \"KC\" : 16,\n \"LA\" : 17,\n \"MIA\" : 18,\n \"MIN\" : 19,\n \"NE\" : 20,\n \"NO\" : 21,\n \"NYG\" : 22,\n \"NYJ\" : 23,\n \"OAK\" : 24,\n \"PHI\" : 25,\n \"PIT\" : 26,\n \"SD\" : 27,\n \"SEA\" : 28,\n \"SF\" : 29,\n \"STL\" : 30,\n \"TB\" : 31,\n \"TEN\" : 32,\n \"WAS\" : 33,\n \"BYE\" : 00,\n}\n\n\nALL_STATS = {\n #Punting\n \"punting_blk\" : \"Punt Blocked\",\n #Team Stats\n \"first_down\" : \"First Down\",\n \"rushing_first_down\" : \"Rushing First Down\"\n}\n","sub_path":"database/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"502544858","text":"\r\n# Phase 1\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup as bs\r\nimport urls\r\nimport json\r\n\r\n\r\nDATA_SCRAPPED = []\r\n \r\ndef scrapTOI(url):\r\n src = requests.get(url)\r\n response = json.loads(src.content) \r\n required_data = [] # data where we store data which is required (title and link to follow)\r\n #print(response['stories'])\r\n v = ''\r\n for k, v in response.items():\r\n v = k\r\n \r\n if v=='items':\r\n v1 = None\r\n for e in response[v]:\r\n for k, v1 in e.items():\r\n if k=='stories':\r\n v1 = v1\r\n break\r\n for data in v1:\r\n for k, v in data.items():\r\n if k=='title' or k=='link':\r\n if k=='link':\r\n v = \"https://timesofindia.indiatimes.com\"+v\r\n required_data.append({\r\n k: v\r\n })\r\n return required_data\r\n else:\r\n for data in response[v]:\r\n for k, v in data.items():\r\n if k=='title' or k=='link':\r\n required_data.append({\r\n k: v\r\n })\r\n #print(required_data)\r\n return required_data\r\n \r\n \r\n'''TOI_SCRAPPED_DATA = []\r\n# url = urls.TOI_URLS[0]\r\n# print(url)\r\n\r\nfor url in urls.toi_urls:\r\n data = scrapTOI(url)\r\n for d in data:\r\n TOI_SCRAPPED_DATA.append(d)\r\n \r\n# print(TOI_SCRAPPED_DATA)'''\r\n \r\n\r\nHT_SCRAPPED_DATA=[]\r\ndef scrapHT(url):\r\n src = requests.get(url, headers=urls.headers)\r\n soup = bs(src.content, 'html.parser')\r\n data = soup.text\r\n data = data.split(\"\\n\")\r\n data = set(data)\r\n return list(data)\r\n\r\n\r\nfor url in urls.HT_URLS:\r\n HT_SCRAPPED_DATA.append(scrapHT(url))\r\n\r\n# print(HT_SCRAPPED_DATA)\r\nfor data in HT_SCRAPPED_DATA:\r\n for e in data:\r\n #print(\">> \", e)\r\n if len(e) > 20:\r\n DATA_SCRAPPED.append(e)\r\n\r\n\r\n\r\nTH_SCRAPPED_DATA = []\r\ndef scrapTH(url):\r\n src = requests.get(url)\r\n soup = bs(src.content, 'html.parser')\r\n #print(soup.text)\r\n data = soup.text\r\n data = data.split(\"\\n\")\r\n data = set(data)\r\n return list(data)\r\n\r\n\r\nfor url in urls.TH_URLS:\r\n TH_SCRAPPED_DATA.append(scrapTH(url))\r\n\r\n#print(TH_SCRAPPED_DATA)\r\nfor data in TH_SCRAPPED_DATA:\r\n for e in data:\r\n #print(\" >> \",e)\r\n if len(e) > 20:\r\n DATA_SCRAPPED.append(e)\r\n\r\n\r\n\"\"\"for e in DATA_SCRAPPED:\r\n print(\" ++++++>>>> \", e)\"\"\"\r\n \r\n \r\nprint(len(DATA_SCRAPPED))\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"FNDphase1webscrap.py","file_name":"FNDphase1webscrap.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"116884514","text":"import os\r\nimport cv2\r\nimport random\r\n\r\nneg_img_path = \"C:/Users/Pasca/OneDrive/Dokumente/Master_3.Semester/Master_3.Semester/SYSL/Cascade_Classifier/Negativ_images/\"\r\n\r\nfor num, path in enumerate(os.listdir(neg_img_path)):\r\n\r\n for i in range(1):\r\n img = cv2.imread(neg_img_path + path)\r\n #print(img.shape)\r\n x1 = random.randint(0, img.shape[1] - 33)\r\n y1 = random.randint(0, img.shape[0] - 33)\r\n x2 = random.randint(x1+32, img.shape[1])\r\n y2 = random.randint(y1+32, img.shape[0])\r\n\r\n img = img[y1:y2, x1:x2]\r\n\r\n x = min(img.shape[0], 224)\r\n y = min(img.shape[1], 224)\r\n\r\n img = cv2.resize(img, (x,y), interpolation=cv2.INTER_NEAREST)\r\n\r\n #print(x1, y1, x2, y2)\r\n cv2.imwrite(\"C:/Users/Pasca/OneDrive/Dokumente/Master_3.Semester/Master_3.Semester/SYSL/Cascade_Classifier/Concept_Whitening_new_class/\" + \"_\" + path, img)\r\n print(num)\r\n","sub_path":"Scripts/Viola and Jones Object Detection/scripts/create_samples_for_false_examples.py","file_name":"create_samples_for_false_examples.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"297550360","text":"\n\n#calss header\nclass _SLANDER():\n\tdef __init__(self,): \n\t\tself.name = \"SLANDER\"\n\t\tself.definitions = [u\"to damage someone's reputation by making a false spoken statement about them\"]\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_slander.py","file_name":"_slander.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"279968166","text":"#!/usr/bin/env python\ndef main():\n f1_list = []\n f2_list = []\n common = []\n same_user = []\n sendFile('shadow-rh7.rhel7',f1_list)\n sendFile('shadow-rh5.rhel5', f2_list)\n print(\"\\nOriginal File1 has {} lines\\nOriginal File2 has {} lines\\n\".format(len(f1_list),len(f2_list)))\n for element in f2_list:\n if not element.startswith(\"root:\") and element not in f1_list:\n common.append(element)\n print(\"Number of lines that are different from RH5 and RH7 are {}:\\n{}\\n\".format(len(common),60*'+'))\n if len(common) > 0:\n for lg in f1_list:\n lg_user = lg.strip().split(':')[0]\n for cname in common:\n cname_user = cname.strip().split(':')[0]\n if cname_user == lg_user:\n user_combine = cname + ','+lg\n same_user.append(user_combine)\n f1_list.remove(lg)\n userPrint(common)\n print(\"\\nUsers already present in RH7 file and RH5 are {},lines from RH5 and RH7 are combined using delimiter comma in case for comparing two lines :\\n{}\\n\".format(len(same_user),150*'+'))\n userPrint(same_user)\n print(\"\\n\")\n print(\"{} lines after removing common users from RH7 file \\n\".format(len(f1_list)))\n f1_list.extend(common)\n print(\"{} lines after appending new lines from RH5 file\".format(len(f1_list)))\n print (\"\\nOverwriting RH7 file now\\n\")\n with open('shadow-rh7.rhel7','w') as wfile1:\n for item in f1_list:\n wfile1.write(item + '\\n')\n print(\"Overwriting completed\")\n else:\n print(\"Nothing found to overwrite RH7 file\")\ndef sendFile(f,flist):\n with open(f, 'r') as file:\n fh = file.readlines()\n for fline in fh:\n line_list = fline.strip()\n flist.append(line_list)\ndef userPrint(ulist):\n for lc in ulist:\n print(\"{}\".format(lc))\n return lc\n print(\"\\n\")\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"shadow_comparefiles_overwrite.py","file_name":"shadow_comparefiles_overwrite.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"194628000","text":"from __future__ import annotations\n\nimport os\nfrom pathlib import Path\nfrom typing import Dict, List, Optional\n\nimport yaml\n\nfrom pacco.manager.abstracts.remote import RemoteAbstract\nfrom pacco.manager.abstracts.remote_factory import create_remote_object\n\nDEFAULT_REMOTE_NAME = 'default'\n\n\nclass RemoteManager:\n \"\"\"\n Function to manage ``.pacco_config`` file as the storage for remote lists\n With ``RemoteManager``, you can manage multiple ``Remote`` s\n \"\"\"\n remotes: Dict[str, RemoteAbstract]\n\n def __init__(self):\n self.__pacco_config = os.path.join(str(Path.home()), '.pacco_config')\n if not os.path.exists(self.__pacco_config):\n self.remotes = {}\n self.default_remotes = []\n\n else:\n with open(self.__pacco_config, \"r\") as f:\n pacco_config = yaml.load(f, Loader=yaml.Loader)\n\n remotes_serialized = pacco_config['remotes']\n default_remotes = pacco_config['default']\n\n remotes = {name: create_remote_object(remotes_serialized[name])\n for name in remotes_serialized}\n\n self.remotes = remotes\n self.default_remotes = default_remotes\n self.save()\n\n def save(self) -> None:\n \"\"\"\n Save the current state to \".pacco_config\", this will also be done in the ``__del__``\n method, such that even if you forget to save, it will be auto saved when the program closes.\n \"\"\"\n serialized_remotes = {name: self.remotes[name].configuration for name in self.remotes}\n with open(self.__pacco_config, \"w\") as f:\n yaml.dump({'remotes': serialized_remotes, 'default': self.default_remotes}, stream=f)\n\n def get_remote(self, name: str) -> RemoteAbstract:\n \"\"\"\n Get the ``Remote`` based on the remote name.\n\n Args:\n name: the name of the remote\n Return:\n the package manager object\n \"\"\"\n if name not in self.remotes:\n raise KeyError(\"The remote named {} is not found\".format(name))\n return self.remotes[name]\n\n def list_remote(self) -> List[str]:\n \"\"\"\n Get the list of the remote names\n \"\"\"\n return list(self.remotes.keys())\n\n def add_remote(self, name: str, configuration: Dict[str, str]) -> None:\n \"\"\"\n Add/register a new remote. Currently there is two possible configuration:\n\n Local client: ::\n\n {\n 'remote_type': 'local',\n 'path': '[PATH]', (optional, will use ~/.pacco/ if not declared)\n }\n\n Nexus site client: ::\n\n {\n 'remote_type': 'nexus_site',\n 'url': '[URL]',\n 'username': '[USERNAME]',\n 'password': '[PASSWORD]',\n }\n\n Args:\n name: the name of the new remote\n configuration: a dictionary of the configuration as described above.\n \"\"\"\n if name in self.list_remote():\n raise NameError(\"The remote with name {} already exists\".format(name))\n self.remotes[name] = create_remote_object(configuration)\n self.save()\n\n def remove_remote(self, name: str) -> None:\n \"\"\"\n De-register a remote from this remote manager.\n\n Args:\n name: the name of the remote to be de-registered\n \"\"\"\n if name in self.default_remotes:\n raise ValueError(\"The remote {} is still in default remote, remove it first\".format(name))\n if name not in self.remotes:\n raise KeyError(\"The remote {} is not registered\".format(name))\n del self.remotes[name]\n self.save()\n\n def get_default(self) -> List[str]:\n \"\"\"\n Get the list of the default remotes to be used in the default download\n\n Returns:\n the list of remotes in order to be tried. (index 0 will be tried first)\n \"\"\"\n return list(self.default_remotes)\n\n def set_default(self, remotes: List[str]) -> None:\n \"\"\"\n Set the default remote list as the \"try list\" for the default download\n\n Args:\n remotes: list of the remote names\n Exception:\n KeyError: when the Remote name does not exists\n \"\"\"\n for remote in remotes:\n if remote not in self.remotes:\n raise KeyError(\"remote {} does not exist\".format(remote))\n self.default_remotes = remotes\n self.save()\n\n def default_download(self, package_name: str, assignment: Dict[str, str],\n dir_path: str, fresh_download: Optional[bool] = False) -> None:\n \"\"\"\n Try to download a package binary from the remotes in the default remote list.\n\n Args:\n package_name: package registry name of the binary\n assignment: the dictionary of the binary configuration\n dir_path: the download destination\n fresh_download: will not use cache if True\n \"\"\"\n for remote_name in self.default_remotes:\n remote = self.get_remote(remote_name)\n if remote.try_download(package_name, assignment, fresh_download, dir_path):\n return\n raise FileNotFoundError(\"Such binary does not exist in any remotes in the default remote list\")\n","sub_path":"pacco/manager/remote_manager.py","file_name":"remote_manager.py","file_ext":"py","file_size_in_byte":5314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"568495150","text":"#!/usr/bin/python\n\n# coding: utf-8\n\n# Copyright 2018 AstroLab Software\n# Author: Chris Arnault\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n\nThe indextry.py script has to be present on the machine\n\nwhere the minimal HTML server has been activated as\n\n> python server.py\n\nThen, call in a web navigator the URL\n\nhttp://:24701/indextry.py\n\nhttps://python-django.dev/page-python-serveur-web-creer-rapidement\n\"\"\"\n\n# coding: utf-8\n\nimport cgi\nfrom pylivy.session import *\nfrom pylivy.client import *\n\n\n\"\"\"\nDemo of using the pylivy library\n\nhttps://pylivy.readthedocs.io/en/latest/index.web\n\n\"\"\"\n\n\n# Initialize post variables\nclass Variable:\n def __init__(self, name, type=\"int\"):\n self.name = name\n self.type = type\n self.reset()\n\n def read(self):\n try:\n if self.type == \"int\":\n self.value = int(form.getvalue(self.name))\n else:\n value = form.getvalue(self.name)\n if value is None:\n value = \"\"\n self.value = value\n pass\n except:\n self.reset()\n pass\n\n def to_form(self):\n out = \"\"\"\"\"\".format(self.name, self.value)\n return out\n\n def debug(self):\n out = \" {} = {}\\n\".format(self.name, self.value)\n return out\n\n def reset(self):\n if self.type == \"int\":\n self.value = -1\n else:\n self.value = \"\"\n pass\n\n def set(self, value):\n if self.type == \"int\":\n try:\n self.value = int(value)\n except:\n self.value = -1\n else:\n self.value = value\n\n def is_set(self):\n if self.type == \"int\":\n try:\n if self.value >= 0:\n return True\n except:\n pass\n else:\n try:\n if len(self.value) > 0:\n return True\n except:\n pass\n\n return False\n\n def incr(self):\n if self.type == \"int\":\n self.value += 1\n\n def above(self, threshold):\n if self.type == \"int\":\n try:\n if self.value > threshold:\n return True\n except:\n pass\n\n return False\n\n\nclass VariableSet:\n def __init__(self, names, str_names):\n self.base = dict()\n\n type = \"int\"\n\n for name in names:\n if name in str_names:\n type = \"str\"\n else:\n type = \"int\"\n self.base[name] = Variable(name, type)\n\n def variable(self, name):\n return self.base[name]\n\n def read(self):\n for v in self.base:\n self.base[v].read()\n\n def to_form(self):\n out = \"\"\n for v in self.base:\n out += self.base[v].to_form()\n return out\n\n def debug(self):\n out = \"\"\n for v in self.base:\n out += self.base[v].debug()\n return out\n\n\n# ======================================================\nLIVY_URL = \"http://vm-75222.lal.in2p3.fr:21111\"\n\nform = cgi.FieldStorage()\nprint(\"Content-type: text/html; charset=utf-8\\n\")\n\nclient = LivyClient(LIVY_URL)\n\n# init data\nvariables = VariableSet([\"start\",\n \"simul\",\n \"change_simul\",\n \"livy_session\",\n \"waiting_session\",\n \"waiting_statement\",\n \"livy_statement\",\n \"new_statement\",\n \"kill_session\",\n \"result\"], [\"new_statement\", \"result\"])\n\nstart = variables.base[\"start\"]\nsimul = variables.base[\"simul\"]\nchange_simul = variables.base[\"change_simul\"]\nlivy_session = variables.base[\"livy_session\"]\nwaiting_session = variables.base[\"waiting_session\"]\nwaiting_statement = variables.base[\"waiting_statement\"]\nlivy_statement = variables.base[\"livy_statement\"]\nkill_session = variables.base[\"kill_session\"]\nnew_statement = variables.base[\"new_statement\"]\nresult = variables.base[\"result\"]\n\nvariables.read()\n\nif not start.is_set():\n simul.set(1)\n start.set(1)\n\n\n# ======================================================\n\nhtml = \"\"\"\n\n\n \n Mon programme test\n\n\n
\n
\n

Fink

\n

Alert dataset monitor

\n
\"\"\"\n\n# manage Livy simulation\n\nwill_change_simul = change_simul.is_set()\nchange_simul.reset()\n\nprint(\"
change simul = {}\".format(will_change_simul))\n\nif will_change_simul:\n if simul.is_set():\n html += \"\"\"\n
\n
Currently using real Livy\"\"\"\n simul.reset()\n html += variables.to_form()\n html += \"\"\"\n
\n \"\"\"\n else:\n html += \"\"\"\n
\n
Currently simulate Livy\"\"\"\n simul.set(1)\n html += variables.to_form()\n html += \"\"\"\n
\n \"\"\"\nelse:\n if simul.is_set():\n html += \"\"\"\n
\n
Currently simulate Livy\"\"\"\n change_simul.set(1)\n html += variables.to_form()\n html += \"\"\"\n
\n \"\"\"\n else:\n html += \"\"\"\n
\n
Currently using real Livy\"\"\"\n change_simul.set(1)\n html += variables.to_form()\n html += \"\"\"\n
\n \"\"\"\n\nchange_simul.reset()\n\n# Manage Livy session & Spark statements\nhtml += \"\"\"\n
\n \"\"\"\n\nif simul.is_set():\n if waiting_session.above(5):\n print(\"
session is now idle\")\n waiting_session.reset()\n waiting_statement.reset()\n livy_statement.reset()\n livy_session.set(1)\n\n if waiting_statement.above(5):\n print(\"
statement just finished\")\n waiting_session.reset()\n waiting_statement.reset()\n livy_statement.incr()\n\n# debugging\n# print(\"
\")\n# print(\"Keys = [\", \",\".join(form.keys()), \"]\")\nprint(variables.debug())\n\n\"\"\"\nCommand interface\n- select Livy simulation\n- open session & wait for idle\n- start statement & wait for completion\n\"\"\"\n\nif kill_session.is_set():\n id = livy_session.value\n try:\n client.delete_session(id)\n except:\n print(\"error killing session \", id)\n\n livy_session.reset()\n waiting_session.reset()\n kill_session.reset()\n\nif livy_session.is_set():\n # statement management\n if not waiting_statement.is_set():\n html += \"\"\"
session is idle: we may start a statement
\"\"\"\n waiting_statement.set(0)\n html += variables.to_form()\n html += \"\"\"\n Enter a Spark statement \n \n \n \n \"\"\".format(new_statement.value, result.value)\n else:\n html += \"\"\"
session is idle, we do wait a statement to complete
\"\"\"\n waiting_statement.incr()\n id = livy_session.value\n s = client.get_session(id)\n if not livy_statement.is_set():\n st = client.create_statement(s.session_id, new_statement.value)\n livy_statement.set(st.statement_id)\n else:\n st = client.get_statement(s.session_id, livy_statement.value)\n if st.state == StatementState.AVAILABLE:\n waiting_statement.reset()\n result.set(st.output.text)\n print(\"
\", result.value)\n livy_statement.reset()\n\n html += variables.to_form()\n html += \"\"\"\"\"\"\nelse:\n # session management\n if not waiting_session.is_set():\n html += \"\"\"
No session
\"\"\"\n waiting_session.set(0)\n\n print(waiting_session.debug())\n\n waiting_statement.reset()\n html += variables.to_form()\n html += \"\"\"\"\"\"\n else:\n # we have requested a new session thus waiting_session is set\n\n if simul.is_set():\n waiting_session.incr()\n else:\n\n if not livy_session.is_set():\n print(\"Create a session \")\n s = client.create_session(SessionKind.PYSPARK)\n print(\"
session {}
\".format(s.session_id))\n livy_session.set(s.session_id)\n\n # we test if the session is already idle\n id = livy_session.value\n s = client.get_session(id)\n if s.state == SessionState.IDLE:\n print(\"
session is now idle\")\n waiting_session.reset()\n waiting_statement.reset()\n livy_statement.reset()\n new_statement.reset()\n\n html += \"\"\"
Waiting session to become idle
\"\"\"\n html += variables.to_form()\n html += \"\"\"\"\"\"\n\nhtml += \"\"\"
\"\"\"\n\nif livy_session.is_set():\n html += \"\"\"\n
\"\"\"\n\n kill_session.set(1)\n html += variables.to_form()\n html += \"\"\"\n \n
\n \"\"\"\n\n\n\n\nhtml += \"\"\"\n
\n

© AstroLab Software 2018-2019

\n
\n
\n\n\n\n\"\"\"\n\n\nprint(html)\n\n\n","sub_path":"tuto/html/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":10481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"243139064","text":"import ta\nimport talib\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom talib.abstract import *\n\ndf = pd.read_csv('/media/hoangnt/Data/DATASET/HistoryStockDATA/HistoryData/HNX/AAV.txt', sep=' ', names=[\"Timestamps\",\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"])\n\ninputs = {\n 'open': df['Open'],\n 'high': df['High'],\n 'low': df['Low'],\n 'close': df['Close'],\n 'volume': df['Volume'],\n}\n\nMA5 = SMA(inputs, timeperiod=5, price='close')\nMA20 = SMA(inputs, timeperiod=20, price='close')\n\nsignals = []\nfor i in range(len(MA20)):\n x1 = MA5[i-1]\n x2 = MA5[i]\n y1 = MA20[i-1]\n y2 = MA20[i]\n if (x1 > y1) and (x2 < y2):\n #print(\"x1 %f x2 %f\" % (x1, x2))\n #print(\"y1 %f y2 %f\" % (y1, y2))\n signals.append({df['Timestamps'].iloc[i] : 'SELL'})\n if (x1 < y1) and (x2 > y2):\n signals.append({df['Timestamps'].iloc[i] : 'BUY'})\n\nprint(signals)","sub_path":".ipynb_checkpoints/rules-checkpoint.py","file_name":"rules-checkpoint.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"60847506","text":"\n\nfrom GorceryDelivery.models import *\n\nclass ItemsInOrder:\n\n def __init__(self):\n\n self.errors = []\n\n def get_errors(self):\n return self.errors\n\n\n def order_item_details(self, request):\n order_item = {}\n\n measures_array = []\n presentation_array = []\n index = 0\n\n #******From here\n # use the post value of itemID to retrieve item from db\n # compute the detailed dict\n\n if request.POST.get('itemID', ''):\n order_item_id = request.POST.get('itemID', '')\n\n\n try:\n item_details = Item.objects.get(id=int(order_item_id)) # retrive item by id\n\n order_item['name'] = item_details.get_name()\n order_item['price'] = request.POST.get('price', '')\n order_item['img'] = item_details.img.url\n order_item['description'] = item_details.get_description()\n order_item['id'] = order_item_id\n order_item['qty'] = 1\n\n\n except:\n self.errors.append('item not in db')\n else:\n self.errors.append('the item ID is not found')\n\n if request.POST.get('measure', ''):\n market_measures = SalesMeasure.objects.filter(item_id=int(order_item['id']))\n\n selected_measure = None\n for measure in market_measures:\n if request.POST.get('measure', '') == measure.get_name():\n selected_measure = measure\n\n if selected_measure:\n order_item['market_measure'] = selected_measure.id\n order_item['price']= str(selected_measure.get_price()) # overide initial price with price of measure\n #update description to reflect market measure\n order_item['description'] = '\\n [Measure-' + selected_measure.get_description() + '; price -' + str(selected_measure.get_price()) +']'\n\n\n if request.POST.get('presentation', ''):\n item_presentation = Presentation.objects.filter(item_id=int(order_item['id']))\n\n selected_presentation = None\n for presentation in item_presentation:\n if request.POST.get('presentation', '') == presentation.get_name():\n selected_presentation = presentation\n\n\n if selected_presentation:\n order_item['presentation'] = selected_presentation.id\n order_item['description'] = order_item['description'] + '\\n [Presentation: '\n #for option in presentation_array:\n order_item['description'] = order_item['description'] + selected_presentation.get_description() +']'\n\n if len(self.errors) > 0:\n return False\n else:\n return order_item.copy()\n\n\n\n","sub_path":"GorceryDelivery/modules/ItemsInOrder.py","file_name":"ItemsInOrder.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"428401210","text":"\"\"\"\n给定一个包括 n 个整数的数组 nums 和 一个目标值 target。\n找出 nums 中的三个整数,使得它们的和与 target 最接近。\n返回这三个数的和。假定每组输入只存在唯一答案。\n\n例如,给定数组 nums = [-1,2,1,-4], 和 target = 1.与 target 最接近的三个数的和为 2. (-1 + 2 + 1 = 2).\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/3sum-closest\n\"\"\"\n\nclass Solution:\n def threeSumClosest(self, nums: List[int], target: int) -> int:\n \n nums.sort()\n ans, n = 0, len(nums)\n diff = float('inf')\n for i in range(n-2):\n if i > 0 and nums[i] == nums[i-1]:\n continue\n left = i + 1\n right = n - 1\n while left < right:\n s = nums[i] + nums[left] + nums[right]\n if abs(s - target) < diff:\n diff = abs(s - target)\n res = s\n if s > target:\n right -= 1\n elif s < target:\n left += 1\n else:\n return target\n return res\n \nif __name__ == \"__main__\":\n nums = [-1, 2, 1, -4]\n s = Solution()\n print(s.threeSumClosest(nums, 2))\n ","sub_path":"016 threeSumClosest.py","file_name":"016 threeSumClosest.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"172191810","text":"'''\r\n\r\nDefiniamo adiacenti di un pixel p di un immagine i pixel adiacenti a p in orizzontale o in verticale.\r\nSe un pixel e' sul bordo dell'immagine il suo vicinato non comprende i pixel non contenuti nell'immagine.\r\nIl pixel dell'immagine con coordinate(x,y) ha dunque come adiacenti i pixel \r\ncon coordinate (x-1,y),(x+1,y),(x,y-1),(x,y+1) appartenenti all'immagine. \r\n \r\nDefiniamo connessi due pixel se e' possibile dall'uno raggiungere l'altro spostandosi solo su \r\npixel adiacenti e dello stesso colore (ovviamente perche' cio' sia possobile e' necessario \r\nche i due pixel abbiano lo stesso colore).\r\n\r\nPer caricare e salvare immagini PNG usate le funzioni load e save che abbiamo preparato nel modulo immagini.py .\r\n\r\nScrivere una funzione ricolora(fname, lista, fnameout) che presi:\r\n- il percorso di un file che contiene un'immagine in formato PNG\r\n- una lista di quadruple del tipo (x,y,c1,c2) dove x e y sono coordinate di un pixel dell'immagine e c1 e c2 due triple colore RGB\r\n- il percorso di un file (fnameout) da creare\r\nlegge l'immagine in fname, esegue un'operazione di ricolorazione di alcuni pixel dell'immagine e \r\nregistra l'immagine ricolorata nel file fnameout.\r\n\r\nL'operazione di ricolorazione e' la seguente. Per ciascuna delle quadruple (x,y,c1,c2) della lista (nell'ordine), \r\n- tutti i pixel connessi al pixel di coordinate (x,y) nell'immagine vanno ricolorati col colore c1, \r\n- tutti i pixel del perimetro (che si trovano sul 'bordo') della zona che si e' appena colorata devono essere ricolorati col colore c2.\r\nIl perimetro della zona colorata è l'insieme dei pixel che non hanno tutti e 4 i vicini che fanno parte della zona ricolorata \r\n(ovvero almeno uno è di un colore diverso da quello che si sta ricolorando oppure almeno uno non esiste perchè sarebbe fuori dall'immagine)\r\n\r\nSi consideri ad esempio l'immagine 'I1.png', l'invocazione di ricolora('I1.png',[(10,10,(255,0,0), (0,0,255))],’OUT1.png')\r\nprodurra' l'immagine 'OUT1.png' identica all'immagine di partenza se non per il fatto che,\r\n tutti i pixel adiacenti al pixel di coordinate (10,10) (e di colore verde), verranno ricolorati \r\n di rosso ((255,0,0)), mentre i pixel sul bordo della zona inizialmente verde vengono ricolorati di blu.\r\n\r\nPer ciascuna area ricolorata bisogna inoltre calcolare area interna e perimetro, che sono definite come segue:\r\n- l'area interna e' il numero di pixel ricolorati con il colore c1\r\n- il perimetro è il numero di pixel ricolorati con il colore c2\r\n\r\nLa funzone deve tornare la lista di coppie (area interna, perimetro) nello stesso ordine in cui sono state colorate le aree.\r\n \r\nPer altri esempi vedere il file grade03.txt \r\n'''\r\n\r\nfrom immagini import *\r\nimport png \r\n\r\ndef load(filename):\r\n with open(filename,mode='rb') as f:\r\n r=png.Reader(file=f)\r\n iw,ih,png_img,_=r.asRGB8()\r\n img=[]\r\n for png_row in png_img:\r\n row=[]\r\n for i in range(0,len(png_row),3):\r\n row.append((png_row[i+0],\r\n png_row[i+1],\r\n png_row[i+2]))\r\n img.append(row)\r\n return img,iw,ih\r\n\r\ndef create(iw,ih):\r\n matrix=[]\r\n for _ in range(ih+1):\r\n row=[]\r\n for _ in range(iw+1):\r\n row.append(0)\r\n matrix.append(row)\r\n return matrix\r\n\r\ndef save(filename,img):\r\n pngimg=png.from_array(img,'RGB')\r\n pngimg.save(filename)\r\n\r\ndef ricolora(fname, lista, fnameout):\r\n '''Implementare qui la funzione'''\r\n img,iw,ih=load(fname)\r\n listav=[]\r\n for h in lista:\r\n Area=0\r\n Perimetro=0\r\n x,y,c1,c2=h\r\n matrix=create(iw,ih)\r\n c=img[x][y]\r\n for i in range(len(img)):\r\n for j in range(len(img[0])):\r\n if img[i][j]==c:\r\n matrix[i][j]=1\r\n if img[i][j]!=c:\r\n matrix[i][j]=0\r\n matrix[x][y]+=1\r\n Yessa=matrix[x][y]\r\n Found=True\r\n while Found:\r\n allah=Yessa\r\n Yes=False\r\n for i in range(len(img)):\r\n for j in range(len(img[0])):\r\n control=[(i+1,j),(i-1,j),(i,j+1),(i,j-1)]\r\n if matrix[i][j]==allah:\r\n for (k,l) in control:\r\n if matrix[k][l]==1:\r\n matrix[k][l]+=allah\r\n Yes=True\r\n Yessa=matrix[k][l]\r\n if i==len(img)-1 and j==len(img[0])-1 and matrix[i][j]!=allah and Yes==False:\r\n Found=False\r\n if Yes==True:\r\n allah=Yessa\r\n for i in range(len(img)):\r\n for j in range(len(img[0])):\r\n if matrix[i][j]==1:\r\n matrix[i][j]=0\r\n for i in range(len(img)):\r\n for j in range(len(img[0])):\r\n if matrix[i][j]>1:\r\n control=[(i+1,j),(i-1,j),(i,j+1),(i,j-1)]\r\n for (k,l) in control:\r\n if matrix[k][l]==0:\r\n matrix[i][j]=1\r\n for i in range(len(img)):\r\n for j in range(len(img[0])):\r\n if matrix[i][j]>1:\r\n Area+=1\r\n img[j][i]=c1\r\n if matrix[i][j]==1:\r\n Perimetro+=1\r\n img[j][i]=c2\r\n listav.append((Area,Perimetro))\r\n save(fnameout,img)\r\n return listav","sub_path":"students/1769031/homework03/program03.py","file_name":"program03.py","file_ext":"py","file_size_in_byte":5486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"58764822","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 8/15/19 2:53 AM\n\n@author: nirav\n\"\"\"\nimport sys\nsys.path.insert(0, '../DB/')\n\nimport pandas as pd\nimport GetNWPData, GetGridsLandData, NWPDataProcessing\n\nclass INWPDataProcessing:\n '''\n Description:\n ------------\n Interface for NWPDataProcessing class and perform operations\n '''\n\n def __init__(self):\n # Instantiate objects to get data from DB\n self.getNWPData = GetNWPData.GetNWPData()\n self.getGridsLandData = GetGridsLandData.GetGridsLandData()\n\n def iNWPDataProcessing(self):\n nwpData = self.getNWPData.getNWPData()\n communityData = self.getNWPData.getCommunityData()\n print('NWPDataProcessing Task: 1/5 completed')\n\n self.nwpDataProcessing = NWPDataProcessing.NWPDataProcessing()\n nwpData, uniqueMMSI = self.nwpDataProcessing.filterData(nwpData)\n print ('NWPDataProcessing Task: 2/5 completed')\n\n nwpData = self.nwpDataProcessing.convertLatLng(nwpData)\n print('NWPDataProcessing Task: 3/5 completed')\n\n nwpData = self.nwpDataProcessing.splitDateTime(nwpData)\n print('NWPDataProcessing Task: 4/5 completed')\n\n nwpData = self.nwpDataProcessing.sortValuesByDateTime(nwpData, uniqueMMSI, communityData)\n print('NWPDataProcessing Task: 5/5 completed')\n return nwpData\n\n # For mapping grids to processed NWP data\n def iMapGridstoNWPData(self, processedNWPData):\n #processedNWPData = self.getNWPData.getProcessedNWPdata()\n gridsData = self.getGridsLandData.getGridsData(tablename='processedgrids')\n mappedNWPData = self.nwpDataProcessing.mapGridstoNWPData(processedNWPData, gridsData)\n print('Mapping of Grids to NWP data completed')\n return mappedNWPData\n\n # For finding distance from mappedNWPData\n def iProcessedNWPDataCalculation(self, mappedNWPData):\n #mappedNWPData = self.getNWPData.getGridsMappedNWPData()\n nwpData = self.nwpDataProcessing.processedNWPDataCalculation(mappedNWPData)\n nwpData.to_csv('processedNWPDataCalculation.csv', sep=',', encoding='utf-8', index=False)\n print('Final NWP Data is ready, written in processedNWPData.csv file')\n\n\nif __name__ == '__main__':\n iNWPDataProcessing = INWPDataProcessing()\n processedNWPData = iNWPDataProcessing.iNWPDataProcessing()\n processedNWPData.to_csv('processedNWPData.csv', sep=',', encoding='utf-8', index=False)\n print('processedNWPData.csv COMPLETED')\n\n mappedNWPData = iNWPDataProcessing.iMapGridstoNWPData(processedNWPData)\n mappedNWPData.to_csv('mappedNWPData.csv', sep=',', encoding='utf-8', index=False)\n print('mappedNWPData.csv COMPLETED')\n\n iNWPDataProcessing.iProcessedNWPDataCalculation(mappedNWPData)\n\n","sub_path":"Scripts/DataProcessing/INWPDataProcessing.py","file_name":"INWPDataProcessing.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"127679546","text":"# imports for reading the data\nimport os\nimport csv\nfrom sklearn.model_selection import train_test_split\n\n# path to data Folder\npath = './PATH/'\n\n# read in the csv\nsamples = []\nwith open(path+'driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n samples.append(line)\n\n# split data into train and validation set\ntrain_samples, validation_samples = train_test_split(samples, test_size=0.2)\n\n# make all necessary imports for working with the data\nimport cv2\nimport numpy as np\nimport sklearn\nfrom scipy.misc import imresize\nimport random\n\n# define function to randomly adjust brightness\ndef random_brighness(img):\n image1 = cv2.cvtColor(img,cv2.COLOR_RGB2HSV)\n image1 = np.array(image1, dtype = np.float64)\n random_bright = .5+np.random.uniform()\n image1[:,:,2] = image1[:,:,2]*random_bright\n image1[:,:,2][image1[:,:,2]>255] = 255\n image1 = np.array(image1, dtype = np.uint8)\n image1 = cv2.cvtColor(image1,cv2.COLOR_HSV2RGB)\n return image1\n\n# define function to randomly shift the image and\n# adjust the steering angle for shifted pixels\ndef trans_image(image,steer,trans_range):\n # Translation\n tr_x = trans_range*np.random.uniform()-trans_range/2\n steer_ang = steer + tr_x/trans_range*2*.2\n tr_y = 40*np.random.uniform()-40/2\n #tr_y = 0\n Trans_M = np.float32([[1,0,tr_x],[0,1,tr_y]])\n image_tr = cv2.warpAffine(image,Trans_M,(200,66))\n return image_tr,steer_ang\n\n# define funstion for random shaddows\ndef add_random_shadow(image):\n top_y = 200*np.random.uniform()\n top_x = 0\n bot_x = 66\n bot_y = 200*np.random.uniform()\n image_hls = cv2.cvtColor(image,cv2.COLOR_RGB2HLS)\n shadow_mask = 0*image_hls[:,:,1]\n X_m = np.mgrid[0:image.shape[0],0:image.shape[1]][0]\n Y_m = np.mgrid[0:image.shape[0],0:image.shape[1]][1]\n shadow_mask[((X_m-top_x)*(bot_y-top_y) -(bot_x - top_x)*(Y_m-top_y) >=0)]=1\n #random_bright = .25+.7*np.random.uniform()\n if np.random.randint(2)==1:\n random_bright = .5\n cond1 = shadow_mask==1\n cond0 = shadow_mask==0\n if np.random.randint(2)==1:\n image_hls[:,:,1][cond1] = image_hls[:,:,1][cond1]*random_bright\n else:\n image_hls[:,:,1][cond0] = image_hls[:,:,1][cond0]*random_bright \n image = cv2.cvtColor(image_hls,cv2.COLOR_HLS2RGB)\n return image\n\n\n# define a generator to augment and read data when needed\ndef generator(samples, batch_size=32):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n sklearn.utils.shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n\n images = []\n angles = []\n for batch_sample in batch_samples:\n \n # read in all 3 images\n name = path+'IMG/'+batch_sample[0].split('\\\\')[-1]\n name_left = path+'IMG/'+batch_sample[1].split('\\\\')[-1]\n name_right = path+'IMG/'+batch_sample[2].split('\\\\')[-1]\n\n # read in steering angle\n center_angle = float(batch_sample[3])\n\n # make random number to omit certian samples with a probaility\n random_num = random.random()\n\n # correction parameter for images left and right\n correction = 0.25\n \n # if going left randomly omit 30% of the data (otherwise car stays too far left)\n if center_angle <=0 and random_num > 0.7:\n pass\n \n\t\t\t\t# if going close to straight (with steering between -0.15 and 0.15) then\n\t\t\t\t# only read in the left and right image data, not the centered one\n if center_angle < 0.15 and center_angle > -0.15 and random_num > 0.4:\n \n # create adjusted steering measurements for the side camera images\n steering_left = center_angle + correction\n steering_right = center_angle - correction\n\n left_image = cv2.imread(name_left)\n right_image = cv2.imread(name_right)\n\n # trim image to only see section with road and resize for nvidea model (66 x 200)\n left_image = left_image[68:136, 0:320]\n left_image = cv2.resize(left_image, (200, 66))\n\n right_image = right_image[68:136, 0:320]\n right_image = cv2.resize(right_image, (200, 66))\n\n images.extend([left_image, right_image])\n angles.extend([steering_left, steering_right])\n\n # flip images and add them (doubles the size of training data)\n image_flipped_l = np.fliplr(left_image)\n measurement_flipped_l = -steering_left\n\n image_flipped_r = np.fliplr(right_image)\n measurement_flipped_r = -steering_right\n\n images.extend([image_flipped_l, image_flipped_r])\n angles.extend([measurement_flipped_l, measurement_flipped_r])\n\n else:\n\n # create adjusted steering measurements for the side camera images\n steering_left = center_angle + correction\n steering_right = center_angle - correction\n \n center_image = cv2.imread(name)\n left_image = cv2.imread(name_left)\n right_image = cv2.imread(name_right)\n\n # trim image to only see section with road and resize for nvidea model (66 x 200)\n center_image = center_image[68:136, 0:320]\n center_image = cv2.resize(center_image, (200, 66))\n\n left_image = left_image[68:136, 0:320]\n left_image = cv2.resize(left_image, (200, 66))\n\n right_image = right_image[68:136, 0:320]\n right_image = cv2.resize(right_image, (200, 66))\n\n images.extend([center_image, left_image, right_image])\n angles.extend([center_angle, steering_left, steering_right])\n\n # flip images\n image_flipped_c = np.fliplr(center_image)\n measurement_flipped_c = -center_angle\n\n image_flipped_l = np.fliplr(left_image)\n measurement_flipped_l = -steering_left\n\n image_flipped_r = np.fliplr(right_image)\n measurement_flipped_r = -steering_right\n\n images.extend([image_flipped_c, image_flipped_l, image_flipped_r])\n angles.extend([measurement_flipped_c, measurement_flipped_l, measurement_flipped_r])\n\n #add augmented data with random brightness adjustions, shifts and shadow\n aug_images=[]\n aug_angles=[]\n for image, angle in zip(images, angles):\n for _ in range(1):\n aug_images.append(random_brighness(image))\n aug_angles.append(angle)\n\n t_im, t_st = trans_image(image,angle,25)\n aug_images.append(t_im)\n aug_angles.append(t_st)\n\n aug_images.append(add_random_shadow(image))\n aug_angles.append(angle)\n\n images.extend(aug_images)\n angles.extend(aug_angles)\n \n X_train = np.array(images) \n y_train = np.array(angles)\n \n yield sklearn.utils.shuffle(X_train, y_train)\n\n# compile and train the model using the generator function\ntrain_generator = generator(train_samples, batch_size=32)\nvalidation_generator = generator(validation_samples, batch_size=32)\n\n# define needed variables\nch, row, col = 3, 66, 200 # Trimmed image format \n\n# make all imports for the keras model\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda, Activation, Conv2D, Dropout, Cropping2D\nfrom keras.layers.advanced_activations import ELU\n\n# define keras model based on the nvidea model architecture\nmodel = Sequential()\n# Preprocess incoming data, centered around zero with small standard deviation \nmodel.add(Lambda(lambda x: x/127.5 - 1.,\n input_shape=(row, col, ch),\n output_shape=(row, col, ch)))\nmodel.add(Conv2D(24, (5,5), strides=(2,2), padding='valid',\n kernel_initializer='he_normal'))\nmodel.add(ELU())\nmodel.add(Conv2D(36, (5,5), strides=(2,2), padding='valid',\n kernel_initializer='he_normal'))\nmodel.add(ELU())\nmodel.add(Conv2D(48, (5,5), strides=(2,2), padding='valid',\n kernel_initializer='he_normal'))\nmodel.add(ELU())\nmodel.add(Conv2D(64, (3,3), strides=(1,1), padding='valid',\n kernel_initializer='he_normal'))\nmodel.add(ELU())\nmodel.add(Conv2D(64, (3,3), strides=(1,1), padding='valid',\n kernel_initializer='he_normal'))\nmodel.add(ELU())\n\n# add dropout\nmodel.add(Dropout(0.5))\n\n# flatten and add fully connected layers\nmodel.add(Flatten())\nmodel.add(Dense(1164, kernel_initializer='he_normal'))\nmodel.add(ELU())\nmodel.add(Dense(100, kernel_initializer='he_normal'))\nmodel.add(ELU())\nmodel.add(Dense(50, kernel_initializer='he_normal'))\nmodel.add(ELU())\nmodel.add(Dense(10, kernel_initializer='he_normal'))\nmodel.add(ELU())\nmodel.add(Dense(1, kernel_initializer='he_normal'))\n\n# comiple model\nmodel.compile(loss='mse', optimizer='adam')\n\n# load weights of the model before\nmodel.load_weights('model_weights.h5')\n\n# train model and create history object for visualization\nhistory_object = model.fit_generator(train_generator, steps_per_epoch= len(train_samples)/32,\n validation_data=validation_generator,\n validation_steps=len(validation_samples)/32, epochs=20)\n\n# save model and model weights\n'''\nThis script was run on the AWS server.\nI only used the model weights and then loaded these and saved the model\nusing load_and_save.py\n'''\nmodel.save('model.h5')\nmodel.save_weights('model_weights.h5')\n\nprint('Saved model as: model.h5')\nprint('Saved weights as: model_weights.h5')\n\n# make imports for visualizing the data\nimport matplotlib as mpl\nmpl.use('Agg') # for running it on the server\nimport matplotlib.pyplot as plt\n\n# print the keys contained in the history object\nprint(history_object.history.keys())\n\n# plot the training and validation loss for each epoch\nfig = plt.figure()\nax = fig.add_subplot(111)\n\nax.plot(history_object.history['loss'])\nax.plot(history_object.history['val_loss'])\nax.set_title('model mean squared error loss')\nax.set_ylabel('mean squared error loss')\nax.set_xlabel('epoch')\nax.legend(['training set', 'validation set'], loc='upper right')\nfig.savefig('./history_model.png')\n\nprint('ALL DONE')\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":10926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"436171822","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\n\n# for image processing\nimport skimage.io\nimport skimage.exposure\nimport skimage.morphology\nimport skimage.filters\n\n\n# load image\nphase_im = skimage.io.imread('data/bsub_100x_phase.tif')\ncfp_im = skimage.io.imread('data/bsub_100x_cfp.tif')\n\n# show image\n# plt.imshow(phase_im)\n# cmap = color map\n# don't use jet cmap\n#plt.imshow(phase_im, cmap=plt.cm.viridis)\n\n# plot histogram data\nhist_phase, bins_phase = skimage.exposure.histogram(phase_im)\nplt.plot(bins_phase, hist_phase)\nplt.xlabel('pixel value')\nplt.ylabel('count')\n\n# apply threshold values (the peak from the histogram is the background)\n# choose the intensity below the peak to choose only bacteria\nthresh = 325\nim_phase_thresh = phase_im < thresh\nplt.close()\n\nwith sns.axes_style('dark'):\n plt.imshow(im_phase_thresh, cmap=plt.cm.Greys_r)\n\n\n# show the cfp image\nwith sns.axes_style('dark'):\n plt.imshow(cfp_im, cmap=plt.cm.viridis)\n\n# slice down the image where it is damaged\nplt.close()\nwith sns.axes_style('dark'):\n plt.imshow(cfp_im[150:250, 450:550]/cfp_im.max(), cmap=plt.cm.viridis)\n\n# use median filter\n# make the structuring element\n# selem = skimage.morphology.disk(1)\n# plt.imshow(selem)\n# plt.imshow(selem, interpolation='nearest')\n\nselem = skimage.morphology.square(3)\ncfp_filt = skimage.filters.median(cfp_im, selem)\nwith sns.axes_style('dark'):\n plt.imshow(cfp_filt, cmap=plt.cm.viridis)\n\n\n# plot histogram data\nplt.close()\nhist_cfp, bins_cfp = skimage.exposure.histogram(cfp_filt)\nplt.plot(bins_cfp, hist_cfp)\nplt.xlabel('pixel value')\nplt.ylabel('count')\n\nthresh_cfp = 120\nim_cfp_thresh = cfp_filt > thresh_cfp\nplt.close()\n\n# otsu's method\nwith sns.axes_style('dark'):\n plt.imshow(im_cfp_thresh, cmap=plt.cm.Greys_r)\n\nwith sns.axes_style('dark'):\n plt.imshow(im_phase_thresh, cmap=plt.cm.Greys_r)\n\n\n\nplt.show()\n","sub_path":"lesson38.py","file_name":"lesson38.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"469252447","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 2 14:12:07 2020\n\n@author: jacopo\n\"\"\"\n\n'''\nCheck heterogeneity in behavior\n'''\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA, FastICA\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport classes as cl\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\n\n\ndirectory_data = '/home/jacopo/OneDrive/Dati_Jac_locali/stack/'\n#qa_name = 'apple/'\nqa_name = 'ell/'\n\ndta = pd.read_csv(directory_data + qa_name + 'individual_chars.csv')\n\n# transform data so to have only dummies (year of registration not used because not really a stretegic deicision)\nmedian = dta.loc[dta['lenAboutMeALL']>0,'lenAboutMeALL'].median()\ndta.loc[:,'sizeAboutMe'] = pd.cut(dta['lenAboutMeALL'], bins=[-1,0,median,dta['lenAboutMeALL'].max()], labels=False)\n#dm = pd.get_dummies(dta['sizeAboutMe'], prefix='sizeAboutMe')\n\ndta.loc[:,'sizelinksAboutMe'] = pd.cut(dta['numLinksAboutMe'], bins=[-1,0,3,dta['numLinksAboutMe'].max()], labels=False)\n#dm2 = pd.get_dummies(dta['sizelinksAboutMe'], prefix='sizelinksAboutMe')\n\n#dm3 = pd.get_dummies(dta['yearRegistration'], prefix='year')\n\ndta[['Id','has_fullname','has_website', 'has_location', \n 'has_linkedin','sizeAboutMe','sizelinksAboutMe',\n 'yearRegistration']].to_csv(directory_data + qa_name + 'individual_chars_dummies.csv', index=False)\n\n'''\nANALYSIS IN R --> individual_heterogeneity.R\n'''\n# import output from R\n\ndta = pd.read_csv(directory_data + qa_name + 'individual_chars_dummies_wgroups.csv', index_col=0)\n'''\n# save for stata\ndtastata = dta[['Id','user_types']]\ndtastata.rename(columns={'Id':'user'}, inplace=True)\ndtastata.loc[:,'user'] = dtastata['user'].astype(str)\ndtastata.to_stata(directory_data + qa_name + 'individual_wgroups.dta', write_index=False)\n'''\n# badges correlated with type\nbadges = pd.read_csv(directory_data + qa_name + 'badge_hist.csv')\nbadges = badges.groupby('UserId').sum()\nbadges.reset_index(inplace=True)\n\nbadges = pd.merge(badges, dta[['Id','user_types']], left_on='UserId', right_on='Id', how='inner', validate='1:1')\n\nbarplotdt = badges.groupby('user_types')[['Gold','Silver','Bronze']].agg(['mean','std','sem','count'])\n\nbar_width = 0.2\nfig, ax = plt.subplots()\nplt.grid(axis='y')\nbronze = ax.bar(barplotdt.index.values - bar_width, barplotdt[('Bronze','mean')].values,\n width=bar_width, yerr=barplotdt[('Bronze','sem')], label='Bronze badges')\nsilver = ax.bar(barplotdt.index.values, barplotdt[('Silver','mean')].values,\n width=bar_width, yerr=barplotdt[('Silver','sem')], label='Silver badges')\ngold = ax.bar(barplotdt.index.values + bar_width, barplotdt[('Gold','mean')].values,\n width=bar_width, yerr=barplotdt[('Gold','sem')], label='Gold badges')\nplt.xticks(barplotdt.index.values, labels=['Type 1','Type 2','Type 3'])\nplt.legend()\nplt.title('Average number of badges obtained by each user')\nplt.xlabel('User type')\nplt.tight_layout()\n# saved numbadgesXtype.png\n\n### other correlations with type\nuser = cl.Users(qa_name, directory_data, out_type='df').users()\nuser.loc[:,'CreationDate'] = user['CreationDate'].apply(cl.date)\ndesigndate = pd.Timestamp(2016,2,25)\nuser_beforeGrad = user.loc[user['CreationDate']=1000),'isEditor'] = 1\nhist.loc[(hist['day']>=designdate) & (hist['rep_cum']>=2000),'isEditor'] = 1\nhist.loc[:,'isEditor'].fillna(0, inplace=True)\n\n## TIME TO BECOME EDITOR\n# early users history before graduation (those ones looking at the 1000 points threhsolds AND before they get to the design date)\nEUhist = hist.loc[(hist['user'].isin(user_beforeGrad)) & ((hist['day']\n#\n# Author: Julien Danjou \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport socket\n\nfrom oslo.config import cfg\n\nfrom ceilometer.openstack.common import context\nfrom ceilometer.openstack.common import log\nfrom ceilometer.openstack.common import rpc\nfrom ceilometer.openstack.common.rpc import service as rpc_service\n\n\ncfg.CONF.register_opts([\n cfg.IntOpt('periodic_interval',\n default=600,\n help='seconds between running periodic tasks'),\n cfg.StrOpt('host',\n default=socket.gethostname(),\n help='Name of this node. This can be an opaque identifier. '\n 'It is not necessarily a hostname, FQDN, or IP address. '\n 'However, the node name must be valid within '\n 'an AMQP key, and if using ZeroMQ, a valid '\n 'hostname, FQDN, or IP address'),\n])\n\nCLI_OPTIONS = [\n cfg.StrOpt('os-username',\n default=os.environ.get('OS_USERNAME', 'ceilometer'),\n help='Username to use for openstack service access'),\n cfg.StrOpt('os-password',\n default=os.environ.get('OS_PASSWORD', 'admin'),\n help='Password to use for openstack service access'),\n cfg.StrOpt('os-tenant-id',\n default=os.environ.get('OS_TENANT_ID', ''),\n help='Tenant ID to use for openstack service access'),\n cfg.StrOpt('os-tenant-name',\n default=os.environ.get('OS_TENANT_NAME', 'admin'),\n help='Tenant name to use for openstack service access'),\n cfg.StrOpt('os-auth-url',\n default=os.environ.get('OS_AUTH_URL',\n 'http://localhost:5000/v2.0'),\n help='Auth URL to use for openstack service access'),\n cfg.StrOpt('os-endpoint-type',\n default=os.environ.get('OS_ENDPOINT_TYPE', 'publicURL'),\n help='Type of endpoint in Identity service catalog to use for '\n 'communication with OpenStack services.'),\n]\ncfg.CONF.register_cli_opts(CLI_OPTIONS)\n\n\nclass PeriodicService(rpc_service.Service):\n\n def start(self):\n super(PeriodicService, self).start()\n admin_context = context.RequestContext('admin', 'admin', is_admin=True)\n self.tg.add_timer(cfg.CONF.periodic_interval,\n self.manager.periodic_tasks,\n context=admin_context)\n\n\ndef _sanitize_cmd_line(argv):\n \"\"\"Remove non-nova CLI options from argv.\"\"\"\n cli_opt_names = ['--%s' % o.name for o in CLI_OPTIONS]\n return [a for a in argv if a in cli_opt_names]\n\n\ndef prepare_service(argv=[]):\n rpc.set_defaults(control_exchange='ceilometer')\n cfg.set_defaults(log.log_opts,\n default_log_levels=['amqplib=WARN',\n 'qpid.messaging=INFO',\n 'sqlalchemy=WARN',\n 'keystoneclient=INFO',\n 'stevedore=INFO',\n 'eventlet.wsgi.server=WARN'\n ])\n cfg.CONF(argv[1:], project='ceilometer')\n log.setup('ceilometer')\n","sub_path":"ceilometer/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":3843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"616858385","text":"import asyncio\nimport os, json, time, base64, hmac, hashlib \nfrom aiohttp import FormData, ClientSession\n\nqiniu_domain_name = os.environ[\"QINIU_DOMAIN\"]\nqiniu_bucket_name = os.environ[\"QINIU_BUCKET\"]\nqiniu_access_key = os.environ[\"QINIU_ACCESS\"]\nqiniu_secret_key = os.environ[\"QINIU_SECRET\"]\n\ndef generate_token(store_key):\n\n put_policy = {\n \"scope\": \"{}:{}\".format(qiniu_bucket_name,store_key),\n \"deadline\": int(time.time()) + 3600, \n \"returnBody\": json.dumps({\n \"key\":\"$(key)\",\n \"type\":\"$(mimeType)\",\n \"size\":\"$(fsize)\",\n },separators=(',',':'))\n }\n\n put_policy = json.dumps(put_policy,separators=(',',':'))\n encoded_put_policy = base64.urlsafe_b64encode(put_policy.encode(\"utf-8\"))\n sign = hmac.new(qiniu_secret_key.encode(\"utf-8\"), encoded_put_policy, hashlib.sha1)\n encoded_sign = base64.urlsafe_b64encode(sign.digest())\n upload_token = '{}:{}:{}'.format(qiniu_access_key,encoded_sign.decode(\"utf-8\"),encoded_put_policy.decode(\"utf-8\"))\n\n return upload_token\n\n\ndef dynamic_url(uid,md5,style=None):\n\n download_url = \"http://{domain_name}/{uid}/{md5}{style}?e={deadline}\".format(\n domain_name = qiniu_domain_name,\n uid = str(uid).zfill(8),\n md5 = md5,\n style = '/{}'.format(style) if style else '',\n deadline = int(time.time()) + 3600\n )\n\n sign = hmac.new(qiniu_secret_key.encode(\"utf-8\"), download_url.encode(\"utf-8\"), hashlib.sha1)\n encoded_sign = base64.urlsafe_b64encode(sign.digest())\n download_token = '{}:{}'.format(qiniu_access_key,encoded_sign.decode(\"utf-8\"))\n\n return '{}&token={}'.format(download_url,download_token)\n\n@asyncio.coroutine\ndef transmit(temp_path,store_key):\n\n if os.path.exists(temp_path):\n print('ok it saved')\n\n print(temp_path)\n print(store_key)\n\n upload_token = generate_token(store_key)\n \n data = FormData()\n data.add_field('key', store_key)\n data.add_field('token', upload_token)\n data.add_field('file',\n open(temp_path, 'rb'),\n filename = os.path.split(temp_path)[-1],\n content_type = 'application/octet-stream')\n\n session = ClientSession()\n response = yield from session.post('http://up-z2.qiniu.com',data = data)\n text = yield from response.text()\n yield from session.close()\n print(text)\n os.remove(temp_path)\n return","sub_path":"server/routes/oss.py","file_name":"oss.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"552127115","text":"import pandas as pd\n\n\n# read data\nrawData = pd.read_csv(\"C:/users/gordi/Desktop/Code/Projects/StockMarketVisual/Data/CPSC-583-Project-Data.csv\")\nheaders = list(rawData.columns)\n\n# new table\nnewFormat = pd.DataFrame(columns=('Date', 'Ticker', 'Price'))\n\noffset = 0\nfor index, row in rawData.iterrows():\n # add 11 rows to new table\n idx = 0\n offset += 11\n while idx < len(headers)-1:\n newFormat.loc[offset + idx, 'Date'] = row[0]\n newFormat.loc[offset + idx, 'Ticker'] = headers[idx+1][0:3]\n newFormat.loc[offset + idx, 'Price'] = row[idx+1]\n idx += 1\n\nprint(newFormat.head(22))\nnewFormat.to_csv('C:/users/gordi/Desktop/Code/Projects/StockMarketVisual/Data/FormattedData1.csv', index=False)","sub_path":"FormatData.py","file_name":"FormatData.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"583496980","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\ndef find_subarr(input_lst, num):\n mas = []\n sum = 0\n for i in range(len(input_lst)):\n a = input_lst[i]\n if a == num:\n return (i, i)\n sum += a\n if sum - num not in mas:\n mas.append(sum - num)\n else:\n return (mas.index(sum - num), i)\n return []\n","sub_path":"homeworks/homework_01/hw1_subarr.py","file_name":"hw1_subarr.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"634561479","text":"from PyQt5.QtWidgets import (QLineEdit, QLabel, QComboBox)\nfrom PyQt5.QtCore import Qt\nimport multiprocessing as mp\n\nfrom ..app.error import show_error\nfrom ...dialogs.calcdialog import CalcDialog\n\n\n# Miscellaneous functions for reading, saving and initializing parameters\n# =====================================================================\ndef clear_layout(layout):\n \"\"\"Clear layout.\"\"\"\n for i in reversed(range(layout.count())):\n layout.itemAt(i).widget().setParent(None)\n\n\n# ---------------------------------------------------------------------\ndef _init_psd_parameters(self):\n \"\"\"Set the parameters in the parameters text slot\n \"\"\"\n clear_layout(self.ui.labels)\n clear_layout(self.ui.lines)\n self.ui.labels.addWidget(QLabel('Fmin (Hz)'))\n self.ui.labels.addWidget(QLabel('Fmax (Hz)'))\n self.ui.labels.addWidget(QLabel('Tmin (s)'))\n self.ui.labels.addWidget(QLabel('Tmax (s)'))\n self.fmin = QLineEdit()\n self.fmax = QLineEdit()\n self.tmin = QLineEdit()\n self.tmax = QLineEdit()\n self.ui.lines.addWidget(self.fmin)\n self.ui.lines.addWidget(self.fmax)\n self.ui.lines.addWidget(self.tmin)\n self.ui.lines.addWidget(self.tmax)\n\n if self.data.info['lowpass'] is not None:\n self.fmax.setText('{:2.1f}'.format(self.data.info['lowpass']))\n else:\n self.fmax.setText('{:2.1f}'.format(self.data.info['sfreq'] / 2))\n\n if self.data.info['highpass'] is not None:\n self.fmin.setText('{:2.1f}'.format(self.data.info['highpass']))\n else:\n self.fmin.setText('0')\n self.tmin.setText('{:2.1f}'.format(self.data.times[0]))\n self.tmax.setText('{:2.1f}'.format(self.data.times[-1]))\n\n if self.ui.psdMethod.currentText() == 'welch':\n self.ui.labels.addWidget(QLabel('FFT points'))\n self.ui.labels.addWidget(QLabel('Length of segments (points)'))\n self.ui.labels.addWidget(QLabel('Overlapping of segments (points)'))\n self.n_fft = QLineEdit()\n self.n_per_seg = QLineEdit()\n self.n_overlap = QLineEdit()\n self.ui.lines.addWidget(self.n_fft)\n self.ui.lines.addWidget(self.n_per_seg)\n self.ui.lines.addWidget(self.n_overlap)\n self.n_fft.setText(str(min(len(self.data.times), 2048)))\n self.n_per_seg.setText(str(int(int(self.n_fft.text()) / 2)))\n self.n_overlap.setText(str(int(int(self.n_fft.text()) / 4)))\n if self.ui.psdMethod.currentText() == 'multitaper':\n self.ui.labels.addWidget(QLabel('Bandwidth (Hz)'))\n self.bandwidth = QLineEdit()\n self.ui.lines.addWidget(self.bandwidth)\n self.bandwidth.setText('4')\n\n\n# ---------------------------------------------------------------------\ndef _init_tfr_parameters(self):\n \"\"\"Set the parameters in the parameters text slot\n \"\"\"\n clear_layout(self.ui.labels)\n clear_layout(self.ui.lines)\n self.ui.labels.addWidget(QLabel('Fmin (Hz)'))\n self.ui.labels.addWidget(QLabel('Fmax (Hz)'))\n self.fmin = QLineEdit()\n self.fmax = QLineEdit()\n self.ui.lines.addWidget(self.fmin)\n self.ui.lines.addWidget(self.fmax)\n\n if self.data.info['lowpass'] is not None:\n self.fmax.setText('{:2.1f}'.format(self.data.info['lowpass']))\n else:\n self.fmax.setText('{:2.1f}'.format(self.data.info['sfreq'] / 2))\n\n if self.data.info['highpass'] is not None:\n self.fmin.setText('{:2.1f}'.format(self.data.info['highpass']))\n else:\n self.fmin.setText('0')\n\n if self.ui.tfrMethodBox.currentText() == 'multitaper':\n self.ui.labels.addWidget(QLabel('Frequency Step (Hz)'))\n self.tfr_param = QComboBox()\n self.tfr_param.addItem('Time Window (s)')\n self.tfr_param.addItem('Number of cycles')\n self.ui.labels.addWidget(self.tfr_param)\n self.ui.labels.addWidget(QLabel('Time Bandwidth (s.Hz)'))\n self.fstep = QLineEdit()\n self.time_bandwidth = QLineEdit()\n self.cycles = QLineEdit()\n self.ui.lines.addWidget(self.fstep)\n self.ui.lines.addWidget(self.cycles)\n self.ui.lines.addWidget(self.time_bandwidth)\n self.fstep.setText('1')\n self.cycles.setText('0.5')\n self.time_bandwidth.setText('4')\n if self.ui.tfrMethodBox.currentText() == 'morlet':\n self.ui.labels.addWidget(QLabel('Frequency Step (Hz)'))\n self.tfr_param = QComboBox()\n self.tfr_param.addItem('Time Window (s)')\n self.tfr_param.addItem('Number of cycles')\n self.ui.labels.addWidget(self.tfr_param)\n self.fstep = QLineEdit()\n self.cycles = QLineEdit()\n self.ui.lines.addWidget(self.fstep)\n self.ui.lines.addWidget(self.cycles)\n self.fstep.setText('1')\n self.cycles.setText('0.5')\n if self.ui.tfrMethodBox.currentText() == 'stockwell':\n self.ui.labels.addWidget(QLabel('Width'))\n self.ui.labels.addWidget(QLabel('FFT points'))\n self.width = QLineEdit()\n self.n_fft = QLineEdit()\n self.ui.lines.addWidget(self.width)\n self.ui.lines.addWidget(self.n_fft)\n self.width.setText('1')\n self.n_fft.setText(str(min(len(self.data.times), 2048)))\n\n\n# ---------------------------------------------------------------------\ndef _save_matrix(self):\n \"\"\"Save the matrix containing the PSD\n \"\"\"\n n_files = len(self.filePaths)\n if n_files == 1:\n print('Saving one file ...', end='')\n if self.type == 'epochs':\n self.init_epochs_psd()\n if self.type == 'raw':\n self.init_raw_psd()\n self.psd.save_avg_matrix_sef(self.savepath)\n print('done !')\n\n else:\n from os.path import basename, splitext, join\n\n print('Batch Processing of {} files'\n .format(len(self.filePaths)))\n n = 0\n for path in self.filePaths:\n print('Saving file {} out of {} ...'\n .format(n+1, n_files), end='')\n file_name = splitext(basename(path))[0]\n self.ui.dataFilesBox.setCurrentIndex(0)\n if self.type == 'epochs':\n self.init_epochs_psd()\n if self.type == 'raw':\n self.init_raw_psd()\n\n savepath = join(self.savepath, file_name + '-PSD.sef')\n self.psd.save_avg_matrix_sef(savepath)\n print('done !')\n n += 1\n\n\n# ---------------------------------------------------------------------\ndef _read_psd_parameters(self):\n \"\"\"Read parameters from txt file and sets it up in params\"\"\"\n\n try:\n self.params = {}\n self.params['fmin'] = float(self.fmin.text())\n self.params['fmax'] = float(self.fmax.text())\n self.params['tmin'] = float(self.tmin.text())\n self.params['tmax'] = float(self.tmax.text())\n if self.ui.psdMethod.currentText() == 'multitaper':\n self.params['bandwidth'] = float(self.bandwidth.text())\n if self.ui.psdMethod.currentText() == 'welch':\n self.params['n_fft'] = int(self.n_fft.text())\n self.params['n_per_seg'] = int(self.n_per_seg.text())\n self.params['n_overlap'] = int(self.n_overlap.text())\n\n except Exception as e: # Print exception for parameters\n print(e)\n\n\n# ---------------------------------------------------------------------\ndef _read_tfr_parameters(self):\n \"\"\"Read parameters from txt file and sets it up in params\"\"\"\n\n try:\n self.params = {}\n self.params['fmin'] = float(self.fmin.text())\n self.params['fmax'] = float(self.fmax.text())\n if self.ui.tfrMethodBox.currentText() == 'multitaper':\n self.params['freq_step'] = float(self.fstep.text())\n if self.tfr_param.currentText == 'Time Window (s)':\n self.params['time_window'] = float(self.cycles.text())\n self.params['n_cycles'] = None\n else:\n self.params['n_cycles'] = float(self.cycles.text())\n self.params['time_window'] = None\n self.params['time_bandwidth'] = float(self.time_bandwidth.text())\n if self.ui.tfrMethodBox.currentText() == 'morlet':\n self.params['freq_step'] = float(self.fstep.text())\n if self.tfr_param.currentText == 'Time Window (s)':\n self.params['time_window'] = float(self.cycles.text())\n self.params['n_cycles'] = None\n else:\n self.params['n_cycles'] = float(self.cycles.text())\n self.params['time_window'] = None\n if self.ui.tfrMethodBox.currentText() == 'stockwell':\n self.params['width'] = float(self.width.text())\n self.params['n_fft'] = int(self.n_fft.text())\n self.params['time_window'] = None\n self.params['n_cycles'] = None\n\n except Exception as e: # Print exception for parameters\n print(e)\n\n\n# ---------------------------------------------------------------------\ndef _init_epochs_psd(self):\n \"\"\"Initialize the instance of EpochsPSD.\"\"\"\n from .epochs_psd import EpochsPSD\n\n if self.ui.psdMethod.currentText() == 'welch':\n n_fft = self.params['n_fft']\n kwds = dict(epochs=self.data,\n fmin=self.params['fmin'],\n fmax=self.params['fmax'],\n tmin=self.params['tmin'],\n tmax=self.params['tmax'],\n method='welch',\n n_fft=n_fft,\n n_per_seg=self.params.get('n_per_seg', n_fft),\n n_overlap=self.params.get('n_overlap', 0),\n type=self.ui.typeBox.currentText())\n\n if self.ui.psdMethod.currentText() == 'multitaper':\n kwds = dict(epochs=self.data,\n fmin=self.params['fmin'],\n fmax=self.params['fmax'],\n tmin=self.params['tmin'],\n tmax=self.params['tmax'],\n method='multitaper',\n bandwidth=self.params.get('bandwidth', 4),\n type=self.ui.typeBox.currentText())\n\n calc = CalcDialog(self, \"\",\n \"Computing Power Spectrum Density...\")\n calc.resize(300, 100)\n calc.setWindowFlags(Qt.Dialog | Qt.FramelessWindowHint)\n pool = mp.Pool(1)\n psd = EpochsPSD()\n res = pool.apply_async(func=psd.init,\n kwds=kwds,\n callback=lambda x: calc.accept())\n\n if not calc.exec_():\n pool.terminate()\n\n self.psd = res.get(timeout=1)\n\n\n# ---------------------------------------------------------------------\ndef _init_raw_psd(self):\n \"\"\"Initialize the instance of RawPSD.\"\"\"\n from .raw_psd import RawPSD\n\n if self.ui.psdMethod.currentText() == 'welch':\n kwds = dict(raw=self.data,\n fmin=self.params['fmin'],\n fmax=self.params['fmax'],\n tmin=self.params['tmin'],\n tmax=self.params['tmax'],\n method='welch',\n n_fft=self.params.get('n_fft', 2048),\n n_per_seg=self.params.get('n_per_seg', 2048),\n n_overlap=self.params.get('n_overlap', 0),\n type=self.ui.typeBox.currentText())\n\n if self.ui.psdMethod.currentText() == 'multitaper':\n kwds = dict(raw=self.data,\n fmin=self.params['fmin'],\n fmax=self.params['fmax'],\n tmin=self.params['tmin'],\n tmax=self.params['tmax'],\n method='multitaper',\n bandwidth=self.params.get('bandwidth', 4),\n type=self.ui.typeBox.currentText())\n\n calc = CalcDialog(self, \"\",\n \"Computing Power Spectrum Density...\")\n calc.resize(300, 100)\n calc.setWindowFlags(Qt.Dialog | Qt.FramelessWindowHint)\n pool = mp.Pool(1)\n psd = RawPSD()\n res = pool.apply_async(func=psd.init,\n kwds=kwds,\n callback=lambda x: calc.accept())\n\n if not calc.exec_():\n pool.terminate()\n\n self.psd = res.get(timeout=1)\n\n\n# ---------------------------------------------------------------------\ndef _open_epochs_psd_visualizer(self):\n \"\"\"Open PSD visualizer for epochs data\n \"\"\"\n from ..app.epochs_psd import EpochsPSDWindow\n\n _init_epochs_psd(self)\n psdVisualizer = EpochsPSDWindow(self.psd, parent=None)\n psdVisualizer.setWindowModality(Qt.WindowModal)\n psdVisualizer.setWindowTitle(self.windowTitle())\n psdVisualizer.exec()\n\n\n# ---------------------------------------------------------------------\ndef _open_raw_psd_visualizer(self):\n \"\"\"Open PSD Visualizer for raw type data\n \"\"\"\n from ..app.raw_psd import RawPSDWindow\n\n _init_raw_psd(self)\n\n psdVisualizer = RawPSDWindow(self.psd, parent=None)\n psdVisualizer.setWindowModality(Qt.WindowModal)\n psdVisualizer.setWindowTitle(self.windowTitle())\n psdVisualizer.exec()\n\n\n# ---------------------------------------------------------------------\ndef _init_avg_tfr(self):\n \"\"\"Init tfr from parameters\n \"\"\"\n from .avg_epochs_tfr import AvgEpochsTFR\n from .util import float_, int_\n from numpy import arange\n\n fmin = self.params['fmin']\n fmax = self.params['fmax']\n step = self.params.get('freq_step', 1)\n freqs = arange(fmin, fmax, step)\n if self.params['time_window'] is not None:\n n_cycles = self.params['time_window'] * freqs\n else:\n n_cycles = self.params['n_cycles']\n n_fft = self.params.get('n_fft', None)\n\n calc = CalcDialog(self, \"\",\n \"Computing Time-Frequency...\")\n calc.resize(300, 100)\n calc.setWindowFlags(Qt.Dialog | Qt.FramelessWindowHint)\n pool = mp.Pool(1)\n\n avgTFR = AvgEpochsTFR()\n args = (self.data, freqs, n_cycles)\n kwds = dict(method=self.ui.tfrMethodBox.currentText(),\n time_bandwidth=self.params.get('time_bandwidth', 4),\n width=self.params.get('width', 1),\n n_fft=n_fft, type=self.ui.typeBox.currentText())\n\n res = pool.apply_async(func=avgTFR.init,\n args=args,\n kwds=kwds,\n callback=lambda x: calc.accept())\n\n if not calc.exec_():\n pool.terminate()\n\n self.avgTFR = res.get(timeout=1)\n\n\n# ---------------------------------------------------------------------\ndef _open_tfr_visualizer(self):\n \"\"\"Open TFR Visualizer\n \"\"\"\n from ..app.avg_epochs_tfr import AvgTFRWindow\n try:\n _init_avg_tfr(self)\n tfrVisualizer = AvgTFRWindow(self.avgTFR, parent=None)\n tfrVisualizer.setWindowModality(Qt.WindowModal)\n tfrVisualizer.setWindowTitle(self.windowTitle())\n tfrVisualizer.exec()\n\n except Exception as e:\n print(e)\n","sub_path":"mnelab/tfr/backend/time_freq.py","file_name":"time_freq.py","file_ext":"py","file_size_in_byte":14746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"620944386","text":"# -*- coding: utf-8 -*-\nfrom functools import wraps\n\nfrom flask import render_template, request, abort, flash\nfrom flask_login import current_user\n\ndef templated(template=None):\n \"\"\"Template decorator.\n Ref: http://flask.pocoo.org/docs/patterns/viewdecorators/\n \"\"\"\n def decorator(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n template_name = template\n if template_name is None:\n template_name = request.endpoint.replace('.', '/') + '.html'\n context = f(*args, **kwargs)\n if context is None:\n context = {}\n elif not isinstance(context, dict):\n return context\n return render_template(template_name, **context)\n return decorated_function\n return decorator\n\n\ndef public_endpoint(function):\n function.is_public = True\n return function\n\n\ndef institute_and_case(store, institute_id, case_name=None):\n \"\"\"Fetch insitiute and case objects.\"\"\"\n institute_obj = store.institute(institute_id)\n if institute_obj is None and institute_id != 'favicon.ico':\n flash(\"Can't find institute: {}\".format(institute_id), 'warning')\n return abort(404)\n\n if case_name:\n if case_name:\n case_obj = store.case(institute_id=institute_id, display_name=case_name)\n if case_obj is None:\n return abort(404)\n\n # validate that user has access to the institute\n if not current_user.is_admin:\n if institute_id not in current_user.institutes:\n if not case_name or not any(inst_id in case_obj['collaborators'] for inst_id in\n current_user.institutes):\n # you don't have access!!\n flash(\"You don't have acccess to: {}\".format(institute_id),'danger')\n return abort(403)\n\n # you have access!\n if case_name:\n return institute_obj, case_obj\n else:\n return institute_obj\n\n\ndef user_institutes(store, login_user):\n \"\"\"Preprocess institute objects.\"\"\"\n if login_user.is_admin:\n institutes = store.institutes()\n else:\n institutes = [store.institute(inst_id) for inst_id in login_user.institutes]\n\n return institutes\n","sub_path":"scout/server/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"13108068","text":"import numpy as np\nimport torch\nfrom tensorboardX import SummaryWriter\n\nfrom utils.utils import time_it\n\n\nclass BaseTrainer:\n def __init__(self, test_env, log_dir):\n self._test_env = test_env\n\n self._log_dir = log_dir\n self._writer = SummaryWriter(log_dir + 'tb_logs/')\n\n @staticmethod\n @time_it\n def _act(\n agent, observation, deterministic,\n return_pi=False, require_grad=False,\n to_numpy=True\n ):\n \"\"\"Method to get an action from the agent.\n\n :param agent: an agent to sample action, expect observations of shape [Time, Batch, dim(obs)]\n :param observation: np.array of batch of observations, shape [Batch, dim(obs)]\n :param deterministic: if True then action will be chosen as policy mean\n :param return_pi: default to False, if True then returns full policy parameters\n :param require_grad: default to False, if True then action will have gradients\n :param to_numpy: defaults to True, if False then return\n action and log-prob as 'torch.tensor' instances\n :return: action and log-prob of action or full policy parameters\n \"\"\"\n\n if require_grad:\n action, log_prob = agent.act([observation], return_pi, deterministic)\n else:\n with torch.no_grad():\n action, log_prob = agent.act([observation], return_pi, deterministic)\n action, log_prob = action[0], log_prob[0]\n if to_numpy:\n action, log_prob = action.cpu().numpy(), log_prob.cpu().numpy()\n return action, log_prob\n\n @staticmethod\n @time_it\n def _env_step(env, action):\n return env.step(action)\n\n def _write_logs(self, tag, values, step):\n for key, value in values.items():\n self._writer.add_scalar(tag + key, value, step)\n\n @time_it\n def _test_agent_service(self, n_tests, agent, deterministic):\n \"\"\"Tests an 'agent' by playing #'n_test' episodes and return result\n\n :param n_tests:\n :param agent:\n :return: dict reward mean & std\n \"\"\"\n n_test_envs = self._test_env.num_envs\n observation = self._test_env.reset()\n env_reward = np.zeros(n_test_envs, dtype=np.float32)\n episode_rewards = []\n\n while len(episode_rewards) < n_tests:\n # I do not care about time while testing\n (action, _), _ = self._act(agent, observation, deterministic=deterministic)\n env_step_result, _ = self._env_step(self._test_env, action)\n observation, reward, done, _ = env_step_result\n env_reward += reward\n if np.any(done):\n for i, d in enumerate(done):\n if d:\n episode_rewards.append(env_reward[i])\n env_reward[i] = 0.0\n\n reward_mean = np.mean(episode_rewards)\n reward_std = np.std(episode_rewards)\n test_result = {\n 'reward_mean': reward_mean,\n 'reward_std': reward_std\n }\n return test_result\n\n def _test_agent(self, step, n_tests, agent, deterministic=True):\n # call the testing function and write logs\n agent.eval()\n test_result, test_time = self._test_agent_service(n_tests, agent, deterministic)\n test_result['test_time'] = test_time\n self._write_logs('test/', test_result, step)\n","sub_path":"trainers/base_trainer.py","file_name":"base_trainer.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"458582718","text":"import csv\n\nwith open(\"osoby.csv\", \"r+\") as plik:\n #tworzymy czytnik\n czytnik_csv = csv.reader(plik)\n\n for line in czytnik_csv:\n print(line) # tutaj nie wiem czemu mi pokazał 2 dodatkowe puste listy\n\ndane = [\"Adam\", \"Kowalski\", 33]\n\nwith open(\"osoby.csv\", \"a\") as plik:\n #tworze zapisywacz\n zapisywacz_csv = csv.writer(plik)\n zapisywacz_csv.writerow(dane)\n\n\n","sub_path":"a260917/csv_bulltins.py","file_name":"csv_bulltins.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"349252695","text":"\"\"\"\nmarkup library\n\"\"\"\nimport typing \n\nimport abjad\n\n### FACTORY FUNCTIONS ###\n\ndef instrument(\n string: typing.Union[str, typing.List[str]],\n hcenter_in: typing.Optional[abjad.Number] = 16,\n column: bool = True,\n) -> abjad.Markup:\n r\"\"\"\n Makes instrument name markup.\n .. container:: example\n Makes instrument name markup in column:\n >>> markup = mccartney.markups.instrument('Eng. horn')\n >>> abjad.show(markup, strict=89) # doctest: +SKIP\n .. docs::\n >>> abjad.f(markup, strict=89)\n \\markup {\n \\hcenter-in\n #16\n \"Eng. horn\"\n }\n .. container:: example\n Makes instrument name markup in line:\n >>> markup = mccartney.markups.instrument(\n ... 'Violin 1',\n ... column=False,\n ... )\n >>> abjad.show(markup, strict=89) # doctest: +SKIP\n .. docs::\n >>> abjad.f(markup, strict=89)\n \\markup {\n \\hcenter-in\n #16\n \"Violin 1\"\n }\n Centers markup horizontally in 16 spaces.\n \"\"\"\n return make_instrument_name_markup(string, column=column, hcenter_in=hcenter_in)\n\ndef make_instrument_name_markup(string, *, column=True, hcenter_in=None):\n if hcenter_in is not None:\n assert isinstance(hcenter_in, (int, float)), repr(hcenter_in)\n if isinstance(string, str):\n parts = [string]\n elif isinstance(string, list):\n parts = string\n else:\n raise TypeError(string)\n if len(parts) == 1:\n markup = abjad.Markup(parts[0])\n elif column:\n markups = [abjad.Markup(_) for _ in parts]\n markup = abjad.Markup.center_column(markups, direction=None)\n else:\n markups = [abjad.Markup(_) for _ in parts]\n markups = abjad.MarkupList(markups)\n markup = markups.line()\n if hcenter_in is not None:\n markup = markup.hcenter_in(hcenter_in)\n return markup\n\ndef short_instrument(\n string: str, hcenter_in: abjad.Number = 10, column: bool = True\n) -> abjad.Markup:\n r\"\"\"\n Makes short instrument name markup.\n .. container:: example\n Makes short instrument name markup in column:\n >>> markup = mccartney.markups.short_instrument('Eng. hn.')\n >>> abjad.show(markup, strict=89) # doctest: +SKIP\n .. docs::\n >>> abjad.f(markup, strict=89)\n \\markup {\n \\hcenter-in\n #10\n \"Eng. hn.\"\n }\n .. container:: example\n Makes short instrument name markup in line:\n >>> markup = mccartney.markups.short_instrument(\n ... 'Vn. 1',\n ... column=False,\n ... )\n >>> abjad.show(markup, strict=89) # doctest: +SKIP\n .. docs::\n >>> abjad.f(markup, strict=89)\n \\markup {\n \\hcenter-in\n #10\n \"Vn. 1\"\n }\n Centers markup horizontally in 10 spaces.\n \"\"\"\n return make_instrument_name_markup(string, column=column, hcenter_in=hcenter_in)\n","sub_path":"rill/materials/instruments/markups.py","file_name":"markups.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"629672594","text":"import asyncio, nest_asyncio\nfrom aiogram import Bot, dispatcher, executor\nfrom config import BOT_TOKEN, BOT_ADMINS\n\nbot = Bot(BOT_TOKEN)\ndp = dispatcher.Dispatcher(bot)\n\nasync def send_msg(dp):\n \"\"\" \n Отправляет уведомление админу об ошибке API сервера.\n \"\"\"\n\n for admin in BOT_ADMINS:\n await bot.send_message(admin, 'Что-то не так с API :(')\n\n asyncio.get_event_loop().stop()\n\ndef notify():\n \"\"\"\n Запускает работу бота и сразу же вызывает\n функцию `send_msg` для отправки уведомления.\n \"\"\"\n\n nest_asyncio.apply()\n executor.start_polling(dp, on_startup=send_msg)","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"243510147","text":"#!/usr/bin/python3\nfrom PIL import Image, ImageFilter\nimport sys\n\n# Try loading image\ntry:\n testImage = Image.open(\"test.png\")\n# Can't find image\nexcept IOError:\n print(\"Can't load image\")\n sys.exit(1)\n\n# Show loaded image\ntestImage.show()\n\n# blur image\nblurred = testImage.filter(ImageFilter.BLUR)\nblurred.show()\nblurred.save(\"blurred.png\")\n\n# GrayScale image\ngrayed = testImage.convert('L')\ngrayed.show()\ngrayed.save(\"grayed.png\")\n\n# Rotate image\nrotated = testImage.rotate(180)\nrotated.show()\nrotated.save(\"rotated.png\")\n","sub_path":"python/lab7.py","file_name":"lab7.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"163563575","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .clf_base import Net as _Net\n\nclass Net(_Net):\n\n def __init__(self, n_classes=40, d_key=-1, use_fixed_layer=False):\n super().__init__(n_classes, 512, d_key, use_fixed_layer)\n\n self.conv = nn.Sequential(\n nn.Conv2d(1, 32, 5, 1),\n nn.LeakyReLU(),\n nn.MaxPool2d(3, 3),\n nn.Conv2d(32, 64, 5, 1),\n nn.LeakyReLU(),\n nn.MaxPool2d(2, 2),\n nn.Conv2d(64, 128, 5, 1),\n nn.LeakyReLU(),\n nn.MaxPool2d(2, 2)\n )\n","sub_path":"src/networks/clf_olivetti.py","file_name":"clf_olivetti.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"285581900","text":"\"\"\" Written by: Riley Everett Student ID: 973838691 \"\"\"\n\n# import needed libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sn\nfrom datetime import datetime\nfrom scipy.special import expit\n\n\ndef derivative(val):\n return val * (1.0 - val)\n\n\n# get start time\nstart = datetime.now()\n\n# set constant values for experiment\nlearning_rate = 0.1\nepochs = 50\nhidden_units = 100\noutput_units = 10\ninput_units = 784\nweight_gen_min = -0.05\nweight_gen_max = 0.05\ntraining_ex = 60000\nexp2 = True\nsub_div = 0.25\n\n\n# import training and validation data from file\nfile_data = np.loadtxt('mnist_train.csv', dtype=str, delimiter=',')\nvalidation_data = np.loadtxt('mnist_validation.csv', dtype=str, delimiter=',')\n\n# shuffle training data and break into labels and image data\nnp.random.shuffle(file_data)\nif not exp2:\n training_labels = np.asarray(file_data[:, 0:1], dtype='float')\n training_data = np.asarray(file_data[:, 1:], dtype='float')\nelse:\n num_rows = int(sub_div * training_ex)\n train_temp = np.zeros((num_rows, 785))\n row_count = 0\n counter_arr = np.zeros(10)\n limit = num_rows / 10\n for i in range(training_ex):\n if row_count == num_rows:\n break\n curr = file_data[i, :]\n if counter_arr[int(curr[0])] != limit:\n train_temp[row_count] = curr\n counter_arr[int(curr[0])] += 1\n row_count += 1\n\n training_labels = np.asarray(train_temp[:, 0:1], dtype='float')\n training_data = np.asarray(train_temp[:, 1:], dtype='float')\n\n# shuffle validation data and break into labels and image data\nnp.random.shuffle(validation_data)\nvalidation_labels = np.asarray(validation_data[:, 0:1], dtype='float')\nvalidation_data = np.asarray(validation_data[:, 1:], dtype='float')\n\n# get count of training examples\ntraining_count = len(training_data[:, 0])\nvalidation_count = len(validation_data[:, 0])\n\n# make an array to divide data by\ndiv_by = np.full((training_count, input_units), 255)\nval_div_by = np.full((validation_count, input_units), 255)\n\n# divide each data value in the data by 255 so they are between 0 - 1\ntraining_data = np.divide(training_data, div_by)\nvalidation_data = np.divide(validation_data, val_div_by)\n\n# generate random weights and bias for network\ninput_weights = np.random.uniform(low=weight_gen_min, high=weight_gen_max, size=(hidden_units, input_units))\noutput_weights = np.random.uniform(low=weight_gen_min, high=weight_gen_max, size=(output_units, hidden_units))\ninput_bias = np.random.uniform(low=weight_gen_min, high=weight_gen_max, size=(hidden_units, 1))\noutput_bias = np.random.uniform(low=weight_gen_min, high=weight_gen_max, size=(output_units, 1))\n\n# prediction accuracy lists\nprediction_accuracy = np.zeros(epochs)\nvalidation_accuracy = np.zeros(epochs)\n\n# loop through 50 epochs\nfor epoch in range(epochs):\n # lists to store label targets and predictions for accuracy calculation\n test_prediction_list = []\n test_target_list = []\n val_prediction_list = []\n val_target_list = []\n\n # values for tracking accuracy\n test_correct = 0\n val_correct = 0\n\n # run through all training examples\n for row in range(training_count):\n # array for storing prediction values\n cur_predictions = []\n # list of input node activations to be sent to the hidden layer\n activation_list = []\n output_error = []\n hidden_error = []\n # create target and output matrix to compare prediction to actual label\n target = int(training_labels[row])\n test_target_list.append(target)\n\n label_matrix = np.full(output_units, 0.1)\n label_matrix[target] = 0.9\n output_matrix = np.zeros(output_units)\n\n # run through all training examples\n for unit in range(hidden_units):\n unit_output = np.dot(training_data[row, :], input_weights[unit, :]) + (1 * input_bias[unit])\n activation_list.append(float(expit(unit_output)))\n\n for percep in range(output_units):\n # calculate prediction using perceptron algorithm\n percep_output = np.dot(activation_list, output_weights[percep, :]) + (1 * output_bias[percep])\n output_matrix[percep] = expit(percep_output)\n\n # collect prediction values\n cur_predictions.append(output_matrix[percep])\n\n # get error value for output layer\n output_error.append((label_matrix[percep] - output_matrix[percep]) * derivative(output_matrix[percep]))\n\n # add the largest prediction value to an array of predictions\n prediction = np.argmax(np.array(cur_predictions))\n test_prediction_list.append(prediction)\n\n # update weights for output layer\n for o in range(output_units):\n output_bias[o, 0] += (learning_rate * output_error[o])\n output_weights[o, :] = output_weights[o, :] + (learning_rate * output_error[o] * activation_list[o])\n\n # get error for hidden layer\n for h in range(hidden_units):\n error = np.dot(output_weights[:, h], output_error)\n hidden_error.append(error * derivative(activation_list[h]))\n\n # update weights for hidden layer\n for h in range(hidden_units):\n input_bias[h, 0] += (learning_rate * hidden_error[h])\n input_weights[h, :] = input_weights[h, :] + (learning_rate * hidden_error[h] * training_data[row, :])\n\n # check for correctness in training predictions\n for index in range(training_count):\n if test_prediction_list[index] == int(test_target_list[index]):\n test_correct += 1\n\n # run through all validation examples\n for row in range(validation_count):\n # array for storing prediction values\n cur_val_predictions = []\n # list of input node activations to be sent to the hidden layer\n val_activation_list = []\n # create target and output matrix to compare prediction to actual label\n val_target = int(validation_labels[row])\n val_target_list.append(val_target)\n\n for unit in range(hidden_units):\n unit_output = np.dot(validation_data[row, :], input_weights[unit, :]) + (1 * input_bias[unit])\n val_activation_list.append(float(expit(unit_output)))\n\n for percep in range(output_units):\n # calculate prediction using perceptron algorithm\n percep_output = np.dot(val_activation_list, output_weights[percep, :]) + (1 * output_bias[percep])\n\n # collect prediction values\n cur_val_predictions.append(expit(percep_output))\n\n # add the largest prediction value to an array of predictions\n val_prediction_list.append(np.argmax(np.array(cur_val_predictions)))\n\n for index in range(validation_count):\n if val_prediction_list[index] == int(val_target_list[index]):\n val_correct += 1\n\n # calculate and print accuracy\n prediction_accuracy[epoch] = (test_correct / training_count) * 100\n validation_accuracy[epoch] = (val_correct / validation_count) * 100\n print('Epoch {}: '.format(epoch), end='')\n print('training accuracy = {0:.2f}% '.format(prediction_accuracy[epoch]), end='')\n print('correct is {} / {}\\n'.format(test_correct, training_count), end='')\n print('validation accuracy = {0:.2f}% '.format(validation_accuracy[epoch]), end='')\n print('correct is {} / {}\\n'.format(val_correct, validation_count))\n\n if epoch == epochs - 1:\n # create and plot confusion matrix\n cm = np.zeros((10, 10))\n\n for i in range(validation_count):\n cm[val_target_list[i], val_prediction_list[i]] += 1\n df_cm = pd.DataFrame(cm, index=[i for i in \"0123456789\"],\n columns=[i for i in \"0123456789\"])\n plt.figure(figsize=(10, 7))\n sn.heatmap(df_cm, annot=True)\n plt.savefig('EX2.25_CM.png')\n\n# print run time\nprint('Program Runtime: {}'.format(datetime.now() - start))\n\n# plot and save accuracy graphs\ntest_accuracy = []\nval_accuracy = []\n\nfor i in range(epochs):\n test_accuracy.append(prediction_accuracy[i])\n val_accuracy.append(validation_accuracy[i])\n\nx = np.arange(epochs)\nacc_fig = plt.figure()\nplt.xlabel('Epochs', fontsize=12)\nplt.ylabel('Accuracy', fontsize=12)\nplt.plot(x, test_accuracy, linewidth=2, label='Test Accuracy')\nplt.plot(x, val_accuracy, linewidth=2, label='Validation Accuracy')\nplt.legend(loc='best')\nacc_fig.savefig('EX2.25_ACC.png')\n","sub_path":"Python Machine Learning Projects/NeuralNet.py","file_name":"NeuralNet.py","file_ext":"py","file_size_in_byte":8499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"571479212","text":"import requests\n\nimport logging\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\nfile_handler = logging.FileHandler('google-geo.log')\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nfile_handler.setFormatter(formatter)\nlogger.addHandler(file_handler)\n\n\n\n\nGOOGLE_MAPS_API_URL = 'https://maps.googleapis.com/maps/api/geocode/json'\n\nclass Geohandler():\n def __init__(self, API_KEY):\n self.API_KEY = API_KEY\n\n\n\n def getAddress(self, lat,lng):\n params = {'latlng' : str(lat)+','+str(lng), 'key' : self.API_KEY}\n # Do the request and get the response data\n try:\n req = requests.get(GOOGLE_MAPS_API_URL, params=params)\n\n except Exception as e:\n logger.exception(\"Problem getting address: \" + str(e))\n\n return req.json() if req else None\n\n\n\n def getPostCode(self, json_data):\n try:\n # Add safety check\n if json_data['results']:\n address_components = json_data['results'][0][\"address_components\"]\n\n for element in address_components:\n if element['types'] == [ \"postal_code\" ]:\n return element['short_name']\n except Exception as e:\n logger.exception(\"Problem getting post-code: \" + str(e))\n\n\n","sub_path":"geoHandler.py","file_name":"geoHandler.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"579595618","text":"\"\"\"empty message\n\nRevision ID: 4f543ebf633c\nRevises: ed3f1b106de4\nCreate Date: 2020-02-19 16:18:43.431987\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '4f543ebf633c'\ndown_revision = 'ed3f1b106de4'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('user_product_list')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('user_product_list',\n sa.Column('Id', sa.INTEGER(), server_default=sa.text('nextval(\\'\"user_product_list_Id_seq\"\\'::regclass)'), autoincrement=True, nullable=False),\n sa.Column('productId', sa.INTEGER(), autoincrement=False, nullable=True),\n sa.Column('quantity', sa.INTEGER(), autoincrement=False, nullable=True),\n sa.Column('orderId', sa.INTEGER(), autoincrement=False, nullable=True),\n sa.Column('userId', sa.INTEGER(), autoincrement=False, nullable=True),\n sa.ForeignKeyConstraint(['orderId'], ['order.id'], name='user_product_list_orderId_fkey'),\n sa.ForeignKeyConstraint(['userId'], ['customers.id'], name='user_product_list_userId_fkey'),\n sa.PrimaryKeyConstraint('Id', name='user_product_list_pkey')\n )\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/4f543ebf633c_.py","file_name":"4f543ebf633c_.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"628192722","text":"from django.shortcuts import render, redirect\nfrom .models import Course\n# Create your views here.\ndef index(request):\n\tcontext ={\n\t'courses' : Course.objects.all()\n\t}\n\treturn render(request, 'CoursesApp/index.html', context)\n\ndef courses(request):\n\tCourse.objects.create(name=request.POST['courseName'], description=request.POST['courseDescription'])\n\treturn redirect('/')\n\ndef destroy(request, id):\n\tif request.method == 'POST':\n\t\tCourse.objects.filter(id=id).delete()\n\t\treturn redirect('/')\n\n\tcontext ={\n\t'courses' : Course.objects.filter(id=id)\n\t}\n\treturn render(request, 'CoursesApp/destroy.html', context)","sub_path":"RyanZambrano/CoursesDjango/apps/CoursesApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"349297966","text":"import os\nimport glob\nimport json\nimport csv\nimport pickle\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.text import text_to_word_sequence\n\n\ndef raw_data_parser(data_tsv):\n # input: raw data\n # output: corpus list[str]\n corpus = []\n with open(data_tsv, newline='') as tsvfile:\n reader = csv.reader(tsvfile, delimiter='\\t', quotechar='|')\n record = ''\n for i, row in enumerate(reader):\n if row[1].upper() == \"U\" or row[1].upper() == \"R\":\n record += ('\\t' + row[2])\n elif row[1].lower() == \"flag\":\n corpus.append(row[2] + record)\n record = ''\n else:\n continue\n return corpus\n\ndef _raw_data_parser(data_tsv):\n corpus = []\n with open(data_tsv, newline='') as tsvfile:\n reader = csv.reader(tsvfile, delimiter='\\t', quotechar='|')\n record = ''\n for i, row in enumerate(reader):\n if i == 0:\n continue\n record += (row[4] + '\\t')\n sentences = row[2].split('__EOS__')\n context = \"\\t\".join(sentences)\n record += (context + '\\t' + row[3])\n corpus.append(record)\n record = ''\n return corpus\n\ndef dump_corpus_to_txt(corpus, data_tsv):\n # input: corpus list[str]\n # output: tab delimited txt file\n\n filename = os.path.splitext(data_tsv)[0] + '.txt'\n with open(filename, \"w\") as f:\n for sentence in corpus:\n print(sentence, file=f)\n\ndef _merge_multiple_txt(file_path):\n filenames = []\n for filename in glob.glob(os.path.join(file_path, '*.txt')):\n filenames.append(filename)\n with open('all_data.txt', 'w') as outfile:\n for fname in filenames:\n with open(fname) as infile:\n for line in infile:\n outfile.write(line)\n\ndef read_txt_file(data_txt):\n # input: tab delimited txt file\n # output: corpus list[str]\n corpus = []\n with open(data_txt, \"r\") as f:\n lines = f.readlines()\n for line in lines:\n corpus.append(line)\n return corpus\n\ndef get_texts(corpus):\n texts = []\n for line in corpus:\n for i, block in enumerate(line.split('\\t')):\n if i == 0:\n continue\n texts.append(block)\n return texts\n\ndef generate_word_dict(texts):\n tokenizer = Tokenizer(lower=True)\n tokenizer.fit_on_texts(texts)\n return tokenizer.word_index\n\ndef dump_word_dict_to_json(word_dict):\n with open('word_dict.json', 'w') as jsonFile:\n json.dump(word_dict, jsonFile)\n\ndef _get_tokens(texts):\n tokens = []\n for line in texts:\n tokens.append(text_to_word_sequence(line))\n return tokens\n\ndef _word_to_sequence(tokens, word_dict):\n for line in tokens:\n for i, word in enumerate(line):\n line[i] = word_dict[word]\n return tokens\n\ndef get_sequence_tokens(corpus, word_dict):\n sequence_tokens = {'y':[], 'c':[], 'r':[]}\n for line in corpus:\n blocks = line.split('\\t')\n context = []\n for i, block in enumerate(blocks):\n if i == 0:\n sequence_tokens['y'].append(int(block))\n else:\n tokens = text_to_word_sequence(block)\n for j, word in enumerate(tokens):\n tokens[j] = word_dict[word]\n if i == len(blocks) - 1:\n sequence_tokens['r'].append(tokens)\n else:\n context.extend(tokens)\n sequence_tokens['c'].append(context)\n return sequence_tokens\n\n\ndef get_sequence_tokens_with_turn(corpus, word_dict):\n# this function generate dataset as 'c','r','y', the multiple turns are split with 28270: __EOS__ to align with reader.py\n sequence_tokens = {'y':[], 'c':[], 'r':[]}\n for line in corpus:\n blocks = line.split('\\t')\n context = []\n for i, block in enumerate(blocks):\n if i == 0:\n sequence_tokens['y'].append(int(block))\n elif i == len(blocks) - 1:\n context.pop(-1)\n sequence_tokens['c'].append(context)\n tokens = text_to_word_sequence(block)\n for j, word in enumerate(tokens):\n tokens[j] = word_dict[word]\n sequence_tokens['r'].append(tokens)\n else:\n tokens = text_to_word_sequence(block)\n for j, word in enumerate(tokens):\n tokens[j] = word_dict[word]\n context.extend(tokens)\n context.append(28270)\n\n return sequence_tokens\n\ndef generate_train_valid_test_data(sequence_tokens):\n # input: sequence_tokens dictionary\n # output: tuple of sequence_tokens dictionary, namely training, validation and test set\n pass\n\ndef dump_data_to_pkl(data, filename):\n # input: tuple of ready data\n # output: pkl format ready data\n filename = filename + '.pickle'\n with open(filename, 'wb') as handle:\n pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\nif __name__ == '__main__':\n corpus = read_txt_file(\"../data/original_data2.txt\")\n texts = get_texts(corpus)\n word_count = 0\n dialog_count = 0\n for line in texts:\n dialog_count +=1\n for text in line:\n word_count +=1\n print(word_count)\n print(dialog_count)\n # word_dict = generate_word_dict(texts)\n # dump_word_dict_to_json(word_dict)\n # sequence_tokens = get_sequence_tokens(corpus, word_dict)\n # dump_data_to_pkl(sequence_tokens, 'all_data')\n # with open('original_data.pickle', 'rb') as handle:\n # b = pickle.load(handle)\n # print(b)\n\n\n # for filename in glob.glob(os.path.join(file_path, '*.txt')):\n # corpus = raw_data_parser(filename)\n # dump_corpus_to_txt(corpus, filename)","sub_path":"preprocess/preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":5844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"131328577","text":"from django.shortcuts import render, get_object_or_404\nfrom django.template.response import TemplateResponse\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django import forms\nfrom django.views.generic.edit import CreateView\nfrom django.contrib.auth import login, logout\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.urls import reverse\n\n#from django.models import UserManager\n\nfrom seatingchart.forms import CourseForm, StudentFormSet, UserCreationForm, CourseOnlyForm, StudentForm, CourseSelectForm\nfrom seatingchart.models import Course, Student\n\n# Create your views here.\n\ndef logout(request):\n logout(request)\n # Redirect to a success page.\n return HttpResponseRedirect(reverse('loggedout'))\n\ndef createaccount(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n new_user = User.objects.create_user(**form.cleaned_data)\n login(request, new_user)\n return HttpResponseRedirect(reverse('createaccount_success'))\n else:\n form = UserCreationForm(None)\n\n context = {'form': form}\n\n return TemplateResponse(request, 'registration/createaccount.html', context)\n\n@login_required\ndef createaccount_success(request):\n context = {}\n return TemplateResponse(request, 'registration/createaccount_success.html', context)\n\n@login_required\ndef loggedin(request):\n context = {}\n return TemplateResponse(request, 'registration/loggedin.html', context)\n\ndef loggedout(request):\n context = {}\n return TemplateResponse(request, 'registration/loggedout.html', context)\n\n\n@login_required\ndef create_course(request):\n if request.method == 'POST':\n form = CourseOnlyForm(request.POST)\n if form.is_valid():\n #new_course = blah\n course = form.save(commit=False)\n #print(\"Course in course_create_course is {}\".format(course))\n #request.session['course'] = course.pk\n # print(\"Course/course.pk in course_create_course is {}\".format(course.pk))\n # request.session['course_id'] = course.id\n #print(\"Course/course.id in course_create_course is {}\".format(course.id))\n # request.session['class_size'] = course.class_size\n course.owner = request.user\n course.save()\n return HttpResponseRedirect(reverse('select_course'))\n else:\n form = CourseOnlyForm(None)\n\n context = {'form': form}\n return TemplateResponse(request, 'create_course.html', context)\n\n@login_required\ndef create_course_success(request):\n context = {}\n return TemplateResponse(request, 'create_course_success.html', context)\n\n@login_required\ndef select_course(request):\n allowed_courses = Course.objects.filter(owner=request.user)\n form = CourseSelectForm()\n if request.method == 'POST':\n course = form.save(commit=False)\n request.session['course_id'] = course\n return HttpResponseRedirect(reverse('create_student'))\n\n context = {\n 'form': form,\n 'allowed_courses': allowed_courses\n }\n return TemplateResponse(request, 'select_course.html', context)\n\n\n@login_required\ndef create_student(request):\n\n # course = request.POST.get('course', '')\n #class_size = request.session[\"class_size\"]\n course_id = request.session['course_id']\n course = Course.objects.filter(pk=course_id)\n\n console.log(course)\n student_form_set = forms.modelformset_factory(Student, form=StudentForm, extra=(course.class_size-1))\n if request.method == 'POST':\n formset = student_form_set(request.POST)\n if formset.is_valid():\n print('Cleaned data is {}'.format(formset.cleaned_data))\n formset.save\n return HttpResponseRedirect(reverse('create_student_success'))\n else:\n formset = student_form_set()\n\n context = {\n 'course': course,\n 'formset': formset,\n 'allowed_courses': allowed_courses,\n 'class_size' : class_size\n }\n return TemplateResponse(request, 'create_student.html', context)\n\n@login_required\ndef create_student_success(request):\n context = {}\n return TemplateResponse(request, 'create_student_success.html', context)\n\n\n@login_required\ndef chart(request):\n context = { } # selected course\n return TemplateResponse(request, 'chart.html', context)\n\n\nclass CourseCreateView(CreateView):\n model = Course\n template_name = 'index.html'\n form_class = CourseForm\n object = None\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Handles GET requests and instantiates blank versions of the form\n and its inline formsets.\n \"\"\"\n self.object = None\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n student_form = StudentFormSet()\n return self.render_to_response(\n self.get_context_data(form=form,\n student_form=student_form,\n )\n )\n\n def post(self, request, *args, **kwargs):\n \"\"\"\n Handles POST requests, instantiating a form instance and its inline\n formsets with the passed POST variables and then checking them for\n validity.\n \"\"\"\n self.object = None\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n student_form = StudentFormSet(self.request.POST)\n if form.is_valid() and student_form.is_valid():\n return self.form_valid(form, student_form)\n else:\n return self.form_invalid(form, student_form)\n\n def form_valid(self, form, student_form):\n \"\"\"\n Called if all forms are valid. Creates Class instance along with the\n associated Student instances then redirects to success url\n Args:\n form: Class Form\n student_form: Student Form\n\n Returns: an HttpResponse to success url\n\n \"\"\"\n self.object = form.save()\n # # pre-processing for Student here...\n # self.object.save()\n\n # saving Student Instances\n students = student_form.save(commit=False)\n for s in students:\n s.save()\n\n return HttpResponseRedirect(self.get_success_url())\n\n def form_invalid(self, form, assignment_question_form):\n \"\"\"\n Called if a form is invalid. Re-renders the context data with the\n data-filled forms and errors.\n\n Args:\n form: Assignment Form\n assignment_question_form: Assignment Question Form\n \"\"\"\n return self.render_to_response(\n self.get_context_data(form=form,\n student_form=student_form\n )\n )\n\n\n\n# # old\n# def index(request):\n# form = CourseForm()\n# if request == 'post':\n# form = CourseForm(request.POST)\n# if form.is_valid():\n# new_course = form.save()\n# new_course_mtm = form.save_mtm()\n# return HttpResponseRedirect('Success')\n# else:\n# print(form.errors)\n# else:\n# form = CourseForm()\n# context = {\n# 'form' : form,\n# }\n# return render(request, 'index.html', context)\n","sub_path":"seatingchart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"576764729","text":"# Addons: \"HitLog\"\n# ktulho \n\nimport BigWorld\nimport ResMgr\nimport nations\nfrom Avatar import PlayerAvatar\nfrom DestructibleEntity import DestructibleEntity\nfrom Vehicle import Vehicle\nfrom VehicleEffects import DamageFromShotDecoder\nfrom constants import ATTACK_REASON\nfrom constants import ITEM_DEFS_PATH, ARENA_GUI_TYPE, VEHICLE_CLASSES\nfrom gui.battle_control import avatar_getter\nfrom helpers import dependency\nfrom items import _xml\nfrom skeletons.gui.battle_session import IBattleSessionProvider\nfrom vehicle_systems.tankStructure import TankPartIndexes\n\nimport xvm_battle.python.battle as battle\nimport xvm_main.python.config as config\nimport xvm_main.python.userprefs as userprefs\nfrom xfw.events import registerEvent\nfrom xfw_actionscript.python import *\nfrom xvm_main.python.logger import *\nfrom xvm_main.python.stats import _stat\nfrom xvm_main.python.xvm import l10n\n\nimport parser_addon\nfrom xvm.damageLog import HIT_EFFECT_CODES, keyLower, ATTACK_REASONS, RATINGS, VEHICLE_CLASSES_SHORT, ConfigCache\n\nBATTLE_TYPE = {ARENA_GUI_TYPE.UNKNOWN: \"unknown\",\n ARENA_GUI_TYPE.RANDOM: \"regular\",\n ARENA_GUI_TYPE.TRAINING: \"training\",\n ARENA_GUI_TYPE.TUTORIAL: \"tutorial\",\n ARENA_GUI_TYPE.CYBERSPORT: \"cybersport\",\n ARENA_GUI_TYPE.EVENT_BATTLES: \"event_battles\",\n ARENA_GUI_TYPE.RATED_SANDBOX: \"rated_sandbox\",\n ARENA_GUI_TYPE.SANDBOX: \"sandbox\",\n ARENA_GUI_TYPE.FALLOUT_CLASSIC: \"fallout_classic\",\n ARENA_GUI_TYPE.FALLOUT_MULTITEAM: \"fallout_multiteam\",\n ARENA_GUI_TYPE.SORTIE_2: \"sortie_2\",\n ARENA_GUI_TYPE.FORT_BATTLE_2: \"fort_battle_2\",\n ARENA_GUI_TYPE.RANKED: \"ranked\",\n ARENA_GUI_TYPE.BOOTCAMP: \"bootcamp\",\n ARENA_GUI_TYPE.EPIC_RANDOM: \"epic_random\",\n ARENA_GUI_TYPE.EPIC_RANDOM_TRAINING: \"epic_random_training\",\n ARENA_GUI_TYPE.EPIC_BATTLE: \"epic_battle\",\n ARENA_GUI_TYPE.EPIC_TRAINING: \"epic_battle\"}\n\nHIT_LOG = 'hitLog/'\nFORMAT_HISTORY = 'formatHistory'\nGROUP_HITS_PLAYER = 'groupHitsByPlayer'\nSCROLL_LOG = 'scrollLog'\nADD_TO_END = 'addToEnd'\nLINES = 'lines'\nMOVE_IN_BATTLE = 'moveInBattle'\nHIT_LOG_ENABLED = HIT_LOG + 'enabled'\nSHOW_SELF_DAMAGE = HIT_LOG + 'showSelfDamage'\nSHOW_ALLY_DAMAGE = HIT_LOG + 'showAllyDamage'\nON_HIT_LOG = 'ON_HIT_LOG'\n\nPILLBOX = 'pillbox'\n\n\nclass HIT_LOG_SECTIONS(object):\n LOG = HIT_LOG + 'log/'\n ALT_LOG = HIT_LOG + 'logAlt/'\n BACKGROUND = HIT_LOG + 'logBackground/'\n ALT_BACKGROUND = HIT_LOG + 'logAltBackground/'\n SECTIONS = (LOG, ALT_LOG, BACKGROUND, ALT_BACKGROUND)\n\n\n_config = ConfigCache()\n\n\ndef parser(notParsedStr, macros):\n if notParsedStr and macros:\n return parser_addon.parser_addon(notParsedStr, macros)\n return notParsedStr\n\n\ndef readColor(section, value, xvalue=None):\n\n def getColor(c, v):\n for i in c:\n if i['value'] > v:\n color = i['color']\n return '#' + color[2:] if color[:2] == '0x' else color\n return None\n\n colors = _config.get('colors/' + section)\n if value is not None and colors is not None:\n return getColor(colors, value)\n elif xvalue is not None:\n colors_x = _config.get('colors/x')\n return getColor(colors_x, xvalue)\n\n\nclass Macros(dict):\n\n def __init__(self, *a, **kw):\n dict.__init__(self, *a, **kw)\n self.chooseRating = ''\n\n def setChooseRating(self):\n scale = config.networkServicesSettings.scale\n name = config.networkServicesSettings.rating\n r = '{}_{}'.format(scale, name)\n if r in RATINGS:\n self.chooseRating = RATINGS[r]['name']\n else:\n self.chooseRating = 'xwgr' if scale == 'xvm' else 'wgr'\n\n def setCommonMacros(self):\n value = g_dataHitLog.data\n xwn8 = value.get('xwn8', None)\n xwtr = value.get('xwtr', None)\n xeff = value.get('xeff', None)\n xwgr = value.get('xwgr', None)\n self['vehicle'] = value['shortUserString']\n self['name'] = value['name']\n self['clannb'] = value['clanAbbrev']\n self['clan'] = ''.join(['[', value['clanAbbrev'], ']']) if value['clanAbbrev'] else ''\n self['level'] = value['level']\n self['clanicon'] = value['clanicon']\n self['squad-num'] = value['squadnum']\n self['alive'] = 'al' if value['isAlive'] else None\n self['splash-hit'] = 'splash' if value['splashHit'] else None\n self['critical-hit'] = 'crit' if value['criticalHit'] else None\n self['wn8'] = value.get('wn8', None)\n self['xwn8'] = value.get('xwn8', None)\n self['wtr'] = value.get('wtr', None)\n self['xwtr'] = value.get('xwtr', None)\n self['eff'] = value.get('eff', None),\n self['xeff'] = value.get('xeff', None)\n self['wgr'] = value.get('wgr', None)\n self['xwgr'] = value.get('xwgr', None)\n self['xte'] = value.get('xte', None)\n self['r'] = '{{%s}}' % self.chooseRating\n self['xr'] = '{{%s}}' % self.chooseRating if self.chooseRating[0] == 'x' else '{{x%s}}' % self.chooseRating\n self['c:r'] = '{{c:%s}}' % self.chooseRating\n self['c:xr'] = '{{c:%s}}' % self.chooseRating if self.chooseRating[0] == 'x' else '{{c:x%s}}' % self.chooseRating\n self['c:wn8'] = readColor('wn8', value.get('wn8', None), xwn8)\n self['c:xwn8'] = readColor('x', xwn8)\n self['c:wtr'] = readColor('wtr', value.get('wtr', None), xwtr)\n self['c:xwtr'] = readColor('x', xwtr)\n self['c:eff'] = readColor('eff', value.get('eff', None), xeff)\n self['c:xeff'] = readColor('x', xeff)\n self['c:wgr'] = readColor('wgr', value.get('wgr', None), xwgr)\n self['c:xwgr'] = readColor('x', xwgr)\n self['c:xte'] = readColor('x', value.get('xte', None))\n self['diff-masses'] = value.get('diff-masses', None)\n self['nation'] = value.get('nation', None)\n self['blownup'] = 'blownup' if value['blownup'] else None\n self['vehiclename'] = value.get('attackerVehicleName', None)\n self['battletype-key'] = value.get('battletype-key', ARENA_GUI_TYPE.UNKNOWN)\n self['dmg-deviation'] = value['damageDeviation'] * 100 if value['damageDeviation'] is not None else None\n\n\nclass DataHitLog(object):\n\n guiSessionProvider = dependency.descriptor(IBattleSessionProvider)\n\n def __init__(self):\n self.player = None\n self.shells = {}\n self.macros = Macros()\n self.reset()\n self.ammo = None\n\n def reset(self):\n self.shellType = None\n self.playerVehicleID = None\n self.vehHealth = {}\n self.vehDead = []\n self.shells.clear()\n self.macros.clear()\n self.totalDamage = 0\n self.old_totalDamage = 0\n self.isVehicle = True\n self.entityNumber = None\n self.vehicleID = None\n self.intCD = None\n self.splashHit = False\n self.criticalHit = False\n self.compName = 'unknown'\n self.battletypeKey = 'unknown'\n self.data = {\n 'damage': 0,\n 'dmgRatio': 0,\n 'attackReasonID': 0,\n 'blownup': False,\n # 'hitEffect': None,\n 'costShell': 'unknown',\n 'shellKind': None,\n 'splashHit': False,\n 'criticalHit': False,\n 'isAlive': True,\n 'compName': None,\n 'attackedVehicleType': 'not_vehicle',\n 'shortUserString': None,\n 'level': None,\n 'nation': None,\n 'diff-masses': 0,\n 'name': None,\n 'clanAbbrev': None,\n 'clanicon': None,\n 'squadnum': None,\n 'teamDmg': 'unknown',\n 'damageDeviation': None,\n 'attackerVehicleName': '',\n 'battletype-key': 'unknown'\n }\n\n def updateLabels(self):\n self.macros.setCommonMacros()\n g_hitLogs.output()\n self.splashHit = False\n\n def setRatings(self):\n if (_stat.resp is not None) and (self.data['name'] in _stat.resp['players']):\n stats = _stat.resp['players'][self.data['name']]\n self.data['wn8'] = stats.get('wn8', None)\n self.data['xwn8'] = stats.get('xwn8', None)\n self.data['wtr'] = stats.get('wtr', None)\n self.data['xwtr'] = stats.get('xwtr', None)\n self.data['eff'] = stats.get('e', None)\n self.data['xeff'] = stats.get('xeff', None)\n self.data['wgr'] = stats.get('wgr', None)\n self.data['xwgr'] = stats.get('xwgr', None)\n self.data['xte'] = stats.get('v').get('xte', None)\n\n def getTeamDmg(self, vInfo):\n if self.isVehicle:\n if vInfo.team != self.player.team:\n return 'enemy-dmg'\n return 'player' if vInfo.player.name == self.player.name else 'ally-dmg'\n return self.data['teamDmg']\n\n def resetData(self):\n self.data['attackedVehicleType'] = 'not_vehicle'\n self.data['shortUserString'] = ''\n self.data['attackerVehicleName'] = ''\n self.data['level'] = None\n self.data['nation'] = None\n self.data['diff-masses'] = None\n self.data['name'] = ''\n self.data['clanAbbrev'] = ''\n self.data['clanicon'] = None\n self.data['squadnum'] = None\n self.data['wn8'] = None\n self.data['xwn8'] = None\n self.data['wtr'] = None\n self.data['xwtr'] = None\n self.data['eff'] = None\n self.data['xeff'] = None\n self.data['wgr'] = None\n self.data['xwgr'] = None\n self.data['xte'] = None\n self.data['teamDmg'] = 'unknown'\n self.data['costShell'] = 'unknown'\n self.data['shellKind'] = 'not_shell'\n self.data['damageDeviation'] = None\n\n def updateData(self):\n maxHealth = self.vehHealth[self.vehicleID]['maxHealth'] if self.vehicleID in self.vehHealth else 0\n self.data['dmgRatio'] = self.data['damage'] * 100 // maxHealth if maxHealth != 0 else 0\n if self.vehicleID:\n attacked = self.player.arena.vehicles.get(self.vehicleID)\n if attacked is not None:\n vehicleType = attacked['vehicleType']\n self.data['name'] = attacked['name']\n self.data['clanAbbrev'] = attacked['clanAbbrev']\n if vehicleType:\n _type = vehicleType.type\n self.data['attackedVehicleType'] = list(_type.tags.intersection(VEHICLE_CLASSES))[0]\n self.data['attackerVehicleName'] = vehicleType.name.replace(':', '-', 1) if vehicleType.name else ''\n self.data['shortUserString'] = _type.shortUserString\n self.data['level'] = vehicleType.level\n self.data['nation'] = nations.NAMES[_type.customizationNationID]\n if self.data['attackReasonID'] == 2:\n self.data['diff-masses'] = (self.player.vehicleTypeDescriptor.physics['weight'] - vehicleType.physics['weight']) / 1000.0\n self.setRatings()\n elif not self.isVehicle:\n self.data['shortUserString'] = l10n(PILLBOX).format(self.entityNumber)\n self.compName = None\n self.criticalHit = None\n self.data['clanicon'] = _stat.getClanIcon(self.vehicleID)\n arenaDP = self.guiSessionProvider.getArenaDP()\n if arenaDP is not None:\n vInfo = arenaDP.getVehicleInfo(vID=self.vehicleID)\n self.data['squadnum'] = vInfo.squadIndex if vInfo.squadIndex != 0 else None\n self.data['teamDmg'] = self.getTeamDmg(vInfo)\n self.data['splashHit'] = self.splashHit\n self.data['criticalHit'] = self.criticalHit\n self.data['compName'] = self.compName\n self.data['battletype-key'] = self.battletypeKey\n self.updateLabels()\n\n def loaded(self):\n self.intCD = self.ammo.getCurrentShellCD()\n\n def setParametersShot(self):\n if self.intCD is not None:\n _shells = self.shells[self.intCD]\n self.data['shellKind'] = _shells['shellKind']\n self.data['costShell'] = _shells['costShell']\n\n def getDamageDeviation(self, newHealth):\n result = None\n if newHealth > 0 and self.intCD in self.shells:\n _shells = self.shells[self.intCD]\n result = (self.data['damage'] - _shells['shellDamage']) / float(_shells['shellDamage'])\n if (_shells['shellKind'] in ['high_explosive', 'armor_piercing_he']) and (result < -0.25):\n result = 0.0\n return result\n\n def onHealthChanged(self, vehicle, newHealth, attackerID, attackReasonID, isVehicle=True):\n self.resetData()\n self.isVehicle = isVehicle\n self.vehicleID = vehicle.id\n self.data['isAlive'] = vehicle.isAlive()\n if attackReasonID < 8:\n self.data['attackReasonID'] = attackReasonID\n elif attackReasonID in [9, 10, 13, 24]:\n self.data['attackReasonID'] = 24\n elif attackReasonID in [11, 14, 25]:\n self.data['attackReasonID'] = 25\n self.data['blownup'] = newHealth <= -5\n newHealth = max(0, newHealth)\n self.data['damage'] = (self.vehHealth[vehicle.id]['health'] - newHealth) if vehicle.id in self.vehHealth else (- newHealth)\n if self.data['attackReasonID'] != 0:\n self.criticalHit = False\n self.splashHit = False\n self.compName = None\n else:\n self.setParametersShot()\n self.data['damageDeviation'] = self.getDamageDeviation(newHealth)\n if not self.isVehicle:\n self.entityNumber = vehicle.destructibleEntityID\n self.data['teamDmg'] = 'ally-dmg' if vehicle.isPlayerTeam else 'enemy-dmg'\n self.data['shortUserString'] = l10n(PILLBOX).format(self.entityNumber)\n self.updateData()\n\n def showDamageFromShot(self, vehicle, attackerID, points, effectsIndex, damageFactor):\n maxComponentIdx = TankPartIndexes.ALL[-1]\n wheelsConfig = vehicle.appearance.typeDescriptor.chassis.generalWheelsAnimatorConfig\n if wheelsConfig:\n maxComponentIdx += wheelsConfig.getWheelsCount()\n maxHitEffectCode, decodedPoints, maxDamagedComponent = DamageFromShotDecoder.decodeHitPoints(points, vehicle.appearance.collisions, maxComponentIdx)\n if decodedPoints:\n compName = decodedPoints[0].componentName\n self.compName = compName if compName[0] != 'W' else 'wheel'\n else:\n self.compName = 'unknown'\n self.criticalHit = (maxHitEffectCode == 5)\n\n def onEnterWorld(self, vehicle):\n self.macros.setChooseRating()\n self.player = BigWorld.player()\n self.playerVehicleID = self.player.playerVehicleID\n self.ammo = self.guiSessionProvider.shared.ammo\n shots = vehicle.typeDescriptor.gun.shots\n nation = nations.NAMES[vehicle.typeDescriptor.type.id[0]]\n xmlPath = '%s%s%s%s' % (ITEM_DEFS_PATH, 'vehicles/', nation, '/components/shells.xml')\n xmlCtx_s = (((None, '{}/{}'.format(xmlPath, n)), s) for n, s in ResMgr.openSection(xmlPath).items() if (n != 'icons') and (n != 'xmlns:xmlref'))\n goldShells = [_xml.readInt(xmlCtx, s, 'id', 0, 65535) for xmlCtx, s in xmlCtx_s if s.readBool('improved', False)]\n for shot in shots:\n shell = shot.shell\n intCD = shell.compactDescr\n self.shells[intCD] = {}\n self.shells[intCD]['shellKind'] = shell.kind.lower()\n self.shells[intCD]['shellDamage'] = shell.damage[0]\n self.shells[intCD]['costShell'] = 'gold-shell' if shell.id[1] in goldShells else 'silver-shell'\n ResMgr.purge(xmlPath, True)\n arena = avatar_getter.getArena()\n self.battletypeKey = BATTLE_TYPE.get(arena.guiType, ARENA_GUI_TYPE.UNKNOWN)\n\n def updateVehInfo(self, vehicle):\n if vehicle.id not in self.vehHealth:\n self.vehHealth[vehicle.id] = {}\n self.vehHealth[vehicle.id]['health'] = int(vehicle.health)\n self.vehHealth[vehicle.id]['maxHealth'] = int(vehicle.maxHealth) if isinstance(vehicle, DestructibleEntity) else vehicle.typeDescriptor.maxHealth\n if not vehicle.isAlive() and vehicle.id not in self.vehDead:\n self.vehDead.append(vehicle.id)\n\n\ng_dataHitLog = DataHitLog()\n\n\nclass GroupHit(object):\n\n def __init__(self, section):\n self.section = section\n self._listLog = []\n self.numberTopLine = 0\n self.players = {}\n self.countLines = 0\n self.maxCountLines = None\n self.isAddToEnd = False\n self.S_LINES = section + LINES\n self.S_ADD_TO_END = section + ADD_TO_END\n self.S_FORMAT_HISTORY = section + FORMAT_HISTORY\n self.ATTACK_REASON_FIRE_ID = ATTACK_REASON.getIndex(ATTACK_REASON.FIRE)\n self.ATTACK_REASON_RAM_ID = ATTACK_REASON.getIndex(ATTACK_REASON.RAM)\n self.attackReasonID = 0\n self.damage = 0\n self.__damageRatio = 0\n self.vehID = 0\n self.__hitLogConfig = {}\n\n def mouse_wheel(self, isScrollUp):\n if isScrollUp:\n if self.numberTopLine < len(self._listLog):\n self.numberTopLine += 1\n return True\n else:\n if self.numberTopLine > 0:\n self.numberTopLine -= 1\n return True\n\n def removePlayer(self, vehID):\n if vehID in self.players:\n del self.players[vehID]\n\n def sumDmg(self):\n player = self.players[self.vehID]\n player['dmg-player'] += self.damage\n if self.attackReasonID not in player['dmg-kind-player']:\n player['dmg-kind-player'].append(self.attackReasonID)\n maxHealth = g_dataHitLog.vehHealth[self.vehID]['maxHealth'] if self.vehID in g_dataHitLog.vehHealth else 0\n player['dmg-ratio-player'] = (player['dmg-player'] * 100 // maxHealth) if maxHealth != 0 else 0\n player['dmg-ratio'] = (player['damage'] * 100 // maxHealth) if maxHealth != 0 else 0\n\n def readyConfig(self):\n if config.config_autoreload or not self.__hitLogConfig:\n self.__hitLogConfig = {\n 'vehicleClass': keyLower(_config.get(self.section + 'vtype')),\n 'c_shell': keyLower(_config.get(self.section + 'c:costShell')),\n 'costShell': keyLower(_config.get(self.section + 'costShell')),\n 'c_dmg-kind': keyLower(_config.get(self.section + 'c:dmg-kind')),\n 'c_vehicleClass': keyLower(_config.get(self.section + 'c:vtype')),\n 'dmg-kind': keyLower(_config.get(self.section + 'dmg-kind')),\n 'dmg-kind-player': keyLower(_config.get(self.section + 'dmg-kind-player')),\n 'c_teamDmg': keyLower(_config.get(self.section + 'c:team-dmg')),\n 'teamDmg': keyLower(_config.get(self.section + 'team-dmg')),\n 'compNames': keyLower(_config.get(self.section + 'comp-name')),\n 'typeShell': keyLower(_config.get(self.section + 'type-shell')),\n 'c_typeShell': keyLower(_config.get(self.section + 'c:type-shell'))\n }\n return self.__hitLogConfig\n\n def setParametrsHitLog(self):\n self.countLines = len(self._listLog)\n self.attackReasonID = g_dataHitLog.data['attackReasonID']\n self.damage = g_dataHitLog.data['damage']\n self.__damageRatio = g_dataHitLog.data['dmgRatio']\n self.vehID = g_dataHitLog.vehicleID\n try:\n macro = {'battletype-key': g_dataHitLog.battletypeKey}\n self.maxCountLines = int(parser(_config.get(self.S_LINES, 7), macro))\n except TypeError:\n self.maxCountLines = 7\n self.isAddToEnd = _config.get(self.S_ADD_TO_END, False)\n\n def udateMacros(self):\n data = g_dataHitLog.macros\n conf = self.readyConfig()\n player = self.players[self.vehID]\n value = g_dataHitLog.data\n\n data['c:team-dmg'] = conf['c_teamDmg'].get(value['teamDmg'], '#FFFFFF')\n data['team-dmg'] = conf['teamDmg'].get(value['teamDmg'], '')\n data['vtype'] = conf['vehicleClass'].get(VEHICLE_CLASSES_SHORT[value['attackedVehicleType']], '')\n data['c:costShell'] = conf['c_shell'].get(value['costShell'], None)\n data['costShell'] = conf['costShell'].get(value['costShell'], None)\n data['c:dmg-kind'] = conf['c_dmg-kind'][ATTACK_REASONS[value['attackReasonID']]]\n data['dmg-kind'] = conf['dmg-kind'].get(ATTACK_REASONS[value['attackReasonID']], 'reason: %s' % value['attackReasonID'])\n data['dmg-kind-player'] = ''.join([conf['dmg-kind-player'].get(ATTACK_REASONS[i], None) for i in player.get('dmg-kind-player', [])])\n data['c:vtype'] = conf['c_vehicleClass'].get(VEHICLE_CLASSES_SHORT[value['attackedVehicleType']], '#CCCCCC')\n data['comp-name'] = conf['compNames'].get(value['compName'], None)\n data['type-shell'] = conf['typeShell'].get(value['shellKind'], 'not_shell')\n data['type-shell-key'] = value['shellKind'] if value['shellKind'] is not None else 'not_shell'\n data['c:type-shell'] = conf['c_typeShell'].get(value['shellKind'], None)\n data['dmg'] = player['damage']\n data['dmg-ratio'] = player['dmg-ratio']\n data['n-player'] = player.get('n-player', 0)\n data['dmg-player'] = player.get('dmg-player', 0)\n data['dmg-ratio-player'] = player.get('dmg-ratio-player', 0)\n data['c:dmg-ratio-player'] = readColor('dmg_ratio_player', player.get('dmg-ratio-player', None))\n return data\n\n def reset(self):\n self.players.clear()\n self._listLog[:] = []\n self.numberTopLine = 0\n self.countLines = 0\n self.maxCountLines = None\n\n def addAttackReasonID(self):\n return {'damage': self.damage,\n 'time': BigWorld.time(),\n 'numberLine': self.countLines if self.isAddToEnd else -1}\n\n def addPlayer(self):\n return {'dmg-player': self.damage,\n 'dmg-ratio-player': self.__damageRatio,\n 'n-player': 1,\n 'damage': self.damage,\n 'dmg-ratio': self.__damageRatio,\n 'numberLine': 0,\n 'dmg-kind-player': [self.attackReasonID]}\n\n\nclass GroupHitByPlayer(GroupHit):\n APPEND = 0\n CHANGE = 1\n INSERT = 2\n\n def __init__(self, section):\n super(GroupHitByPlayer, self).__init__(section)\n self._listLogNumber = []\n self.prevLineNumber = 0\n\n def reset(self):\n super(GroupHitByPlayer, self).reset()\n self._listLogNumber[:] = []\n\n def updateList(self, mode, numberLine=0):\n macros = self.udateMacros()\n formattedString = parser(_config.get(self.S_FORMAT_HISTORY, ''), macros)\n if mode == self.APPEND:\n self._listLog.append(formattedString)\n self._listLogNumber.append('')\n elif mode == self.INSERT:\n self._listLog.insert(0, formattedString)\n self._listLogNumber.insert(0, '')\n else:\n self._listLog[numberLine] = formattedString\n\n def updateGroupFireRamming(self, vehicle):\n if self.attackReasonID in [1, 2]:\n if self.attackReasonID in vehicle and ((BigWorld.time() - vehicle[self.attackReasonID]['time']) < 1.0):\n vehicle[self.attackReasonID]['damage'] += self.damage\n vehicle[self.attackReasonID]['time'] = BigWorld.time()\n vehicle['damage'] = vehicle[self.attackReasonID]['damage']\n else:\n vehicle[self.attackReasonID] = self.addAttackReasonID()\n vehicle['n-player'] += 1\n vehicle['damage'] = self.damage\n else:\n vehicle['n-player'] += 1\n vehicle['damage'] = self.damage\n\n def updatePlayers(self):\n vehicle = self.players[self.vehID]\n self.prevLineNumber = vehicle['numberLine']\n self.updateGroupFireRamming(vehicle)\n self.sumDmg()\n if self.isAddToEnd:\n if vehicle['numberLine'] == self.countLines - 1:\n self.updateList(self.CHANGE, vehicle['numberLine'])\n else:\n self._listLog.pop(vehicle['numberLine'])\n self._listLogNumber.pop(vehicle['numberLine'])\n for v in self.players.itervalues():\n if v['numberLine'] > vehicle['numberLine']:\n v['numberLine'] -= 1\n vehicle['numberLine'] = self.countLines - 1\n self.updateList(self.APPEND)\n else:\n if vehicle['numberLine'] == 0:\n self.updateList(self.CHANGE)\n else:\n self._listLog.pop(vehicle['numberLine'])\n self._listLogNumber.pop(vehicle['numberLine'])\n for v in self.players.itervalues():\n if v['numberLine'] < vehicle['numberLine']:\n v['numberLine'] += 1\n vehicle['numberLine'] = 0\n self.updateList(self.INSERT)\n\n def addPlayers(self):\n self.players[self.vehID] = self.addPlayer()\n vehicle = self.players[self.vehID]\n if self.attackReasonID in [1, 2]:\n vehicle[self.attackReasonID] = self.addAttackReasonID()\n if self.isAddToEnd:\n if self.countLines >= self.maxCountLines:\n self.numberTopLine += 1\n vehicle['numberLine'] = self.countLines\n self.updateList(self.APPEND)\n else:\n for v in self.players.itervalues():\n v['numberLine'] += 1\n vehicle['numberLine'] = 0\n self.updateList(self.INSERT)\n self.prevLineNumber = vehicle['numberLine']\n\n def addLineNumber(self):\n newLineNumber = self.players[self.vehID]['numberLine']\n start, finish = (self.prevLineNumber, newLineNumber + 1) if self.prevLineNumber < newLineNumber else (newLineNumber, self.prevLineNumber + 1)\n length = len(self._listLog)\n for number in xrange(start, finish):\n _number = number + 1 if self.isAddToEnd else length - number\n self._listLogNumber[number] = parser(self._listLog[number], {'number': _number})\n\n def getListLog(self):\n self.setParametrsHitLog()\n if self.maxCountLines <= 0:\n return []\n if self.vehID in self.players:\n self.updatePlayers()\n else:\n self.addPlayers()\n self.addLineNumber()\n return self._listLogNumber\n\n\nclass GroupHitByFireRamming(GroupHit):\n\n DIRECTION_UP = -1\n DIRECTION_DOWN = 1\n\n def __init__(self, section):\n super(GroupHitByFireRamming, self).__init__(section)\n self.isGroup = False\n\n def shiftsLines(self, direction):\n for v in self.players.itervalues():\n if self.ATTACK_REASON_FIRE_ID in v:\n v[self.ATTACK_REASON_FIRE_ID]['numberLine'] += direction\n if self.ATTACK_REASON_RAM_ID in v:\n v[self.ATTACK_REASON_RAM_ID]['numberLine'] += direction\n\n def udateListLog(self):\n macros = self.udateMacros()\n if self.isGroup:\n player = self.players[self.vehID]\n lineNumber = player[self.attackReasonID]['numberLine']\n macros['number'] = lineNumber + 1 if self.isAddToEnd else len(self._listLog) - lineNumber\n formattedString = parser(_config.get(self.S_FORMAT_HISTORY, ''), macros)\n self._listLog[lineNumber] = formattedString\n elif self.isAddToEnd:\n if self.maxCountLines <= self.countLines:\n self.numberTopLine += 1\n macros['number'] = self.countLines + 1\n formattedString = parser(_config.get(self.S_FORMAT_HISTORY, ''), macros)\n self._listLog.append(formattedString)\n else:\n self.shiftsLines(self.DIRECTION_DOWN)\n macros['number'] = self.countLines + 1\n formattedString = parser(_config.get(self.S_FORMAT_HISTORY, ''), macros)\n self._listLog.insert(0, formattedString)\n\n def updateAttackReasonID(self):\n player = self.players[self.vehID]\n if self.attackReasonID in player and ((BigWorld.time() - player[self.attackReasonID].get('time', 0)) < 1.0):\n paramAttack = player[self.attackReasonID]\n self.isGroup = True\n paramAttack['damage'] += self.damage\n paramAttack['time'] = BigWorld.time()\n player['damage'] = paramAttack['damage']\n else:\n player[self.attackReasonID] = self.addAttackReasonID()\n\n def updatePlayer(self):\n self.isGroup = False\n if self.vehID in self.players:\n player = self.players[self.vehID]\n if self.attackReasonID in [1, 2]:\n self.updateAttackReasonID()\n if not self.isGroup:\n player['n-player'] += 1\n player['damage'] = self.damage\n self.sumDmg()\n else:\n self.players[self.vehID] = self.addPlayer()\n if self.attackReasonID in [1, 2]:\n self.players[self.vehID][self.attackReasonID] = self.addAttackReasonID()\n\n def getListLog(self):\n self.setParametrsHitLog()\n if self.maxCountLines <= 0:\n return []\n self.updatePlayer()\n self.udateListLog()\n return self._listLog\n\n\nclass HitLog(object):\n\n def __init__(self, section):\n self.section = section\n self.listLog = []\n self.groupHitByPlayer = GroupHitByPlayer(section)\n self.groupHitByFireRamming = GroupHitByFireRamming(section)\n self.S_GROUP_HITS_PLAYER = section + GROUP_HITS_PLAYER\n self.S_SCROLL_LOG = section + SCROLL_LOG\n self.S_MOVE_IN_BATTLE = HIT_LOG_SECTIONS.LOG + MOVE_IN_BATTLE\n self.DEFAULT_X = 320\n self.DEFAULT_Y = 0\n self.S_X = HIT_LOG_SECTIONS.LOG + 'x'\n self.S_Y = HIT_LOG_SECTIONS.LOG + 'y'\n self.x = 0\n self.y = 0\n\n def setPosition(self, battleType):\n self._data = None\n positon = {'x': _config.get(self.S_X, self.DEFAULT_X), 'y': _config.get(self.S_Y, self.DEFAULT_Y)}\n if _config.get(self.S_MOVE_IN_BATTLE, False):\n _data = userprefs.get(HIT_LOG_SECTIONS.LOG + '{}'.format(battleType), positon)\n as_callback(\"hitLog_mouseDown\", self.mouse_down)\n as_callback(\"hitLog_mouseUp\", self.mouse_up)\n as_callback(\"hitLog_mouseMove\", self.mouse_move)\n else:\n _data = positon\n self.x = _data['x']\n self.y = _data['y']\n\n def savePosition(self, battleType):\n if (None not in [self.x, self.y]) and _config.get(self.S_MOVE_IN_BATTLE, False):\n userprefs.set(HIT_LOG_SECTIONS.LOG + '{}'.format(battleType), {'x': self.x, 'y': self.y})\n\n def reset(self):\n self.listLog[:] = []\n self.groupHitByPlayer.reset()\n self.groupHitByFireRamming.reset()\n\n def mouse_wheel(self, isScrollUp):\n if not _config.get(self.S_SCROLL_LOG, True):\n return False\n if _config.get(self.S_GROUP_HITS_PLAYER, True):\n return self.groupHitByPlayer.mouse_wheel(isScrollUp)\n else:\n return self.groupHitByFireRamming.mouse_wheel(isScrollUp)\n\n def getLog(self):\n\n if _config.get(self.S_GROUP_HITS_PLAYER, True):\n numberTopLine = self.groupHitByPlayer.numberTopLine\n maxCountLines = self.groupHitByPlayer.maxCountLines\n else:\n numberTopLine = self.groupHitByFireRamming.numberTopLine\n maxCountLines = self.groupHitByFireRamming.maxCountLines\n return [] if maxCountLines is None else self.listLog[numberTopLine:maxCountLines + numberTopLine]\n\n def mouse_down(self, _data):\n if _data['buttonIdx'] == 0:\n self._data = _data\n\n def mouse_up(self, _data):\n if _data['buttonIdx'] == 0:\n self._data = None\n\n def mouse_move(self, _data):\n if self._data:\n self.x += (_data['x'] - self._data['x'])\n self.y += (_data['y'] - self._data['y'])\n as_event(ON_HIT_LOG)\n\n def updatePosition(self):\n if (self.section == HIT_LOG_SECTIONS.LOG) or (self.section == HIT_LOG_SECTIONS.ALT_LOG):\n if not _config.get(self.S_MOVE_IN_BATTLE, False):\n self.x = parser(_config.get(self.S_X, self.DEFAULT_X), g_dataHitLog.macros)\n self.y = parser(_config.get(self.S_Y, self.DEFAULT_Y), g_dataHitLog.macros)\n\n def removePlayer(self, vehID):\n self.groupHitByPlayer.removePlayer(vehID)\n self.groupHitByFireRamming.removePlayer(vehID)\n\n def output(self):\n if _config.get(self.S_GROUP_HITS_PLAYER, True):\n self.listLog = self.groupHitByPlayer.getListLog()\n else:\n self.listLog = self.groupHitByFireRamming.getListLog()\n self.updatePosition()\n if self.callEvent:\n as_event(ON_HIT_LOG)\n\n\nclass HitLogs(object):\n\n def __init__(self):\n self.log = HitLog(HIT_LOG_SECTIONS.LOG)\n self.logAlt = HitLog(HIT_LOG_SECTIONS.ALT_LOG)\n self.logBg = HitLog(HIT_LOG_SECTIONS.BACKGROUND)\n self.logAltBg = HitLog(HIT_LOG_SECTIONS.ALT_BACKGROUND)\n self.logs = [self.log, self.logAlt, self.logBg, self.logAltBg]\n self.isDownAlt = False\n as_callback(\"hitLog_mouseWheel\", self.mouse_wheel)\n\n def mouse_wheel(self, _data):\n isRefresh = False\n isScrollUp = _data['delta'] < 0\n for log in self.logs:\n isRefresh = log.mouse_wheel(isScrollUp) or isRefresh\n if isRefresh:\n as_event(ON_HIT_LOG)\n\n def setPosition(self, battleType):\n self.log.setPosition(battleType)\n\n def savePosition(self, battleType):\n self.log.savePosition(battleType)\n\n def removePlayerFromLogs(self, vehicleID):\n for log in self.logs:\n log.removePlayer(vehicleID)\n\n def reset(self):\n for log in self.logs:\n log.reset()\n\n def output(self):\n self.log.callEvent = self.logBg.callEvent = not self.isDownAlt\n self.logAlt.callEvent = self.logAltBg.callEvent = self.isDownAlt\n for log in self.logs:\n log.output()\n if not g_dataHitLog.data['isAlive']:\n log.removePlayer(g_dataHitLog.vehicleID)\n\n def getListLog(self):\n if self.isDownAlt:\n listLog = self.logAlt.getLog()\n else:\n listLog = self.log.getLog()\n return '\\n'.join(listLog) if listLog else None\n\n def getListLogBg(self):\n if self.isDownAlt:\n listLog = self.logAltBg.getLog()\n else:\n listLog = self.logBg.getLog()\n return '\\n'.join(listLog) if listLog else None\n\n\ng_hitLogs = HitLogs()\n\n\n@registerEvent(PlayerAvatar, '_PlayerAvatar__processVehicleAmmo')\ndef PlayerAvatar__processVehicleAmmo(self, vehicleID, compactDescr, quantity, quantityInClip, _, __):\n if battle.isBattleTypeSupported and _config.get(HIT_LOG_ENABLED, True):\n g_dataHitLog.loaded()\n\n\n@registerEvent(DestructibleEntity, 'onEnterWorld')\ndef DestructibleEntity_onEnterWorld(self, prereqs):\n if self.isAlive():\n g_dataHitLog.updateVehInfo(self)\n\n\n@registerEvent(DestructibleEntity, 'onHealthChanged')\ndef DestructibleEntity_onHealthChanged(self, newHealth, attackerID, attackReasonID, hitFlags):\n destructibleEntityComponent = BigWorld.player().arena.componentSystem.destructibleEntityComponent\n if _config.get(HIT_LOG_ENABLED, True) and battle.isBattleTypeSupported and (destructibleEntityComponent is not None):\n if (g_dataHitLog.playerVehicleID == attackerID) and (self.id not in g_dataHitLog.vehDead):\n if not self.isPlayerTeam or _config.get(SHOW_ALLY_DAMAGE, True):\n g_dataHitLog.onHealthChanged(self, newHealth, attackerID, attackReasonID, False)\n g_dataHitLog.updateVehInfo(self)\n\n\n@registerEvent(Vehicle, 'showDamageFromShot')\ndef _Vehicle_showDamageFromShot(self, attackerID, points, effectsIndex, damageFactor):\n if battle.isBattleTypeSupported and (g_dataHitLog.playerVehicleID == attackerID) and self.isAlive() and _config.get(HIT_LOG_ENABLED, True):\n g_dataHitLog.showDamageFromShot(self, attackerID, points, effectsIndex, damageFactor)\n\n\n@registerEvent(Vehicle, 'showDamageFromExplosion')\ndef _Vehicle_showDamageFromExplosion(self, attackerID, center, effectsIndex, damageFactor):\n if battle.isBattleTypeSupported and (g_dataHitLog.playerVehicleID == attackerID) and self.isAlive() and _config.get(HIT_LOG_ENABLED, True):\n g_dataHitLog.splashHit = True\n g_dataHitLog.criticalHit = False\n\n\n@registerEvent(PlayerAvatar, '_PlayerAvatar__onArenaVehicleKilled')\ndef __onArenaVehicleKilled(self, targetID, attackerID, equipmentID, reason):\n if self.playerVehicleID != attackerID:\n g_hitLogs.removePlayerFromLogs(targetID)\n\n\n@registerEvent(Vehicle, 'onEnterWorld')\ndef _Vehicle_onEnterWorld(self, prereqs):\n if _config.get(HIT_LOG_ENABLED, True) and battle.isBattleTypeSupported:\n if self.id in g_dataHitLog.vehDead:\n g_dataHitLog.vehDead.remove(self.id)\n if self.isPlayerVehicle:\n g_dataHitLog.onEnterWorld(self)\n g_hitLogs.setPosition(g_dataHitLog.battletypeKey)\n\n\n@registerEvent(Vehicle, 'startVisual')\ndef _Vehicle_startVisual(self):\n if _config.get(HIT_LOG_ENABLED, True) and battle.isBattleTypeSupported:\n g_dataHitLog.updateVehInfo(self)\n\n\n@registerEvent(Vehicle, 'onHealthChanged')\ndef _Vehicle_onHealthChanged(self, newHealth, attackerID, attackReasonID):\n if _config.get(HIT_LOG_ENABLED, True) and battle.isBattleTypeSupported:\n if (g_dataHitLog.playerVehicleID == attackerID) and (self.id not in g_dataHitLog.vehDead or newHealth <= -5):\n attacked = g_dataHitLog.player.arena.vehicles.get(self.id)\n if (g_dataHitLog.player.team != attacked['team']) or _config.get(SHOW_ALLY_DAMAGE, True):\n if (self.id != attackerID) or _config.get(SHOW_SELF_DAMAGE, True):\n g_dataHitLog.onHealthChanged(self, newHealth, attackerID, attackReasonID)\n else:\n if (self.id == attackerID) and _config.get(SHOW_SELF_DAMAGE, True):\n g_dataHitLog.onHealthChanged(self, newHealth, attackerID, attackReasonID)\n g_dataHitLog.updateVehInfo(self)\n\n\n@registerEvent(Vehicle, 'set_isCrewActive')\ndef set_isCrewActive(self, prev):\n g_dataHitLog.updateVehInfo(self)\n\n\n@registerEvent(PlayerAvatar, '_PlayerAvatar__destroyGUI')\ndef PlayerAvatar__destroyGUI(self):\n if _config.get(HIT_LOG_ENABLED, True) and battle.isBattleTypeSupported:\n g_hitLogs.savePosition(g_dataHitLog.battletypeKey)\n g_hitLogs.reset()\n g_dataHitLog.reset()\n\n\n@registerEvent(PlayerAvatar, 'handleKey')\ndef PlayerAvatar_handleKey(self, isDown, key, mods):\n if _config.get(HIT_LOG_ENABLED, True) and battle.isBattleTypeSupported:\n hotkey = _config.get('hotkeys/hitLogAltMode')\n if hotkey['enabled'] and (key == hotkey['keyCode']):\n if isDown:\n if hotkey['onHold']:\n if not g_hitLogs.isDownAlt:\n g_hitLogs.isDownAlt = True\n as_event(ON_HIT_LOG)\n else:\n g_hitLogs.isDownAlt = not g_hitLogs.isDownAlt\n as_event(ON_HIT_LOG)\n else:\n if hotkey['onHold']:\n if g_hitLogs.isDownAlt:\n g_hitLogs.isDownAlt = False\n as_event(ON_HIT_LOG)\n\n\ndef hLog():\n return g_hitLogs.getListLog()\n\n\ndef hLog_bg():\n return g_hitLogs.getListLogBg()\n\n\ndef hLog_x():\n return g_hitLogs.log.x\n\n\ndef hLog_y():\n return g_hitLogs.log.y\n\n","sub_path":"content/Mods/XVM/XVM_Base/res_mods_content/configs/xvm/py_macro/xvm/hitLog.py","file_name":"hitLog.py","file_ext":"py","file_size_in_byte":39872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"368649815","text":"# -*- coding: utf-8 -*-\n\n# ====== SBS ======== sequential backward propagation =====================\nfrom sklearn.base import clone\nfrom itertools import combinations\nimport numpy as np\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\n\nclass SBS():\n def __init__(self,estimator,k_features,scoring=accuracy_score,test_size=0.25,random_state=1):\n self.scoring = scoring\n self.estimator = clone(estimator)\n self.f_features = k_features\n self.test_size = test_size\n self.random_state = random_state\n \n def fit(self,X,y):\n X_train,X_test,y_train,y_test = \\\n train_test_split(X,y,test_size=self.test_size,random_state = self.random_state)\n \n dim = X_train.shape[0]\n self.indices_ = tuple(range(dim))\n self.subsets_ = [self.indices_]\n score = self._cal_score(X_train,y_train,X_test,y_test,self.indices_)\n \n self.scores_ = [score]\n \n while dim > self.k_features:\n scores =[]\n subsets = []\n \n for p in combinations(self.indices_,r=dim-1):\n score = self._calc_score(X_train,y_train,X_test,y_test,p)\n scores.append(score)\n subsets.append(p)\n \n best = np.argmax(scores)\n self.indices_ = subsets[best]\n self.subsets_.append(self.indices_)\n dim -=1\n self.score_append(scores[best])\n self.k_score_ = self.scores_[-1]\n \n return self\n \n def transform(self,X):\n return X[:,self.indices_]\n \n def _cal_score(self,X_train,y_train,X_test,y_test,indices):\n self.estimator.fit(X_train[:,indices],y_train)\n y_pred =self.estimator.predict(X_test[:,indices])\n score = self.scoring(y_test,y_pred)\n return score\n \n \n#==================== Varfying using KNN classifier\n \n# -*- coding: utf-8 -*-\n\n\nfrom sklearn.neighbors import KNeighborsClassifier\nimport matplotlib.pyplot as plt\nX = np.random.random((2000))\ny = np.random.random((2000))\nknn = KNeighborsClassifier(n_neighbors=2)\nsbs = SBS(knn, k_features=1)\nsbs.fit(X,y)\n\nk_feat = [len(k) for k in sbs.subsets_]\nplt.plot(k_feat, sbs._cal_scores_, marker='o')\nplt.ylim([0.7,1.1])\n\nplt.ylabel('Accuracy')\nplt.xlabel('number of features')\nplt.grid()\nplt.show()\n\n\nX.shape[1]\n \n \n ","sub_path":"Machine_Learning/Part 9 - Dimensionality Reduction/misslaneous/SBS.py","file_name":"SBS.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"531589000","text":"#!usr/bin/python\n# coding: UTF-8\nimport numpy as np\nimport math\nimport gzip\nimport os.path\n\nclass Mnist:\n\n key_file = {\n 'train_img':'train-images-idx3-ubyte.gz',\n 'train_label':'train-labels-idx1-ubyte.gz',\n 'test_img':'t10k-images-idx3-ubyte.gz',\n 'test_label':'t10k-labels-idx1-ubyte.gz'\n }\n dataset_dir = os.path.dirname(os.path.abspath(__file__))\n\n def __init__(self):\n self.train_num = 60000\n self.test_num = 10000\n self.img_dim = (1, 28, 28)\n self.img_size = 784\n datas = self.load_mnist()\n self.train_data = datas[0]\n self.train_labels = datas[1]\n self.test_data = datas[2]\n self.test_labels = datas[3]\n\n def load_img(self, filename):\n file_path = Mnist.dataset_dir + '/' + filename\n with gzip.open(file_path, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=16)\n data.flags.writeable = True\n return data.reshape(-1, self.img_size)\n\n def load_label(self, filename):\n file_path = Mnist.dataset_dir + '/' + filename\n with gzip.open(file_path, 'rb') as f:\n labels = np.frombuffer(f.read(), np.uint8, offset=8)\n return labels\n \n def oneOfK(self, labels):\n res = np.zeros((len(labels), 10))\n for i in range(len(labels)):\n res[i][labels[i]] = 1\n return res\n \n def load_mnist(self):\n train_data = self.load_img(Mnist.key_file['train_img'])\n train_labels = self.load_label(Mnist.key_file['train_label'])\n test_data = self.load_img(Mnist.key_file['test_img'])\n test_labels = self.load_label(Mnist.key_file['test_label'])\n train_data = train_data / 255.0\n test_data = test_data / 255.0\n train_labels = self.oneOfK(train_labels)\n test_labels = self.oneOfK(test_labels)\n return [train_data, train_labels, test_data, test_labels]\n","sub_path":"task5/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"129484783","text":"# -*- coding:utf-8 -*-\nfrom django.conf.urls import url\nfrom . import views\n\napp_name = 'blog'\n\nurlpatterns = [\n\turl(r'^$',view=views.IndexViews.as_view(),name='index'),\n\turl(r'^page/(?P\\d+)/$',view=views.IndexViews.as_view(),name='articel_index_paginated'),\n\turl(r'^post/(?P[0-9]+)/$',view=views.PostDetailView.as_view(),name='detail'),\n\t# url(r'^categories/$',view=views.CategoriesViews.as_view(),name='categories'),\n\turl(r'^category/(?P\\d+)/$',view=views.CategoryView.as_view(),name='category'),\n # url(r'^tags/$',view=views.TagsViews.as_view(),name='tags'),\n url(r'^tag/(?P\\d+)/$',view=views.TagsViews.as_view(),name='tag'),\n\turl(r'^archives/$',view=views.ArchivesViews.as_view(),name='archives'),\n url(r'^archives/(?P[0-9]{4})/(?P[0-9]{1,2})/$',\n view=views.ArchiveView.as_view(),name='archive'),\n url(r'^about/$',view=views.about,name='about'),\n]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"5238907","text":"class Solution(object):\n def nextGreaterElement(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: List[int]\n \"\"\"\n ret = {}\n sorted_n1 = sorted(nums1)[::-1]\n\n for n2 in nums2:\n while sorted_n1 and n2 > sorted_n1[-1]:\n n1 = sorted_n1.pop()\n ret.setdefault(n1, n2)\n\n return [ret.get(i, -1) for i in nums1]\n\n\ns = Solution()\nprint(s.nextGreaterElement([2, 4], [1, 2, 3, 4]))\n","sub_path":"leetcode/algorithm/Next Greater Element I.py","file_name":"Next Greater Element I.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"590752110","text":"def issorted(lst):\n \"\"\"Check if a list is sorted \n \n >>> issorted([])\n True\n \n >>> issorted([3])\n True\n\n >>> issorted([3, 2])\n False\n\n >>> issorted([2, 3])\n True\n \n >>> issorted([7, 9, 13])\n True\n \n >>> issorted([7, 22, 13])\n False\n\n \"\"\"\n if len(lst) <= 1:\n return True\n \n last_seen, *rest = lst\n for i in rest:\n if last_seen <= i:\n last_seen = i\n else:\n return False\n return True\n\n\n# this helps us test the code, is only run if we\n# run the module:\nif __name__ == '__main__':\n import doctest\n if doctest.testmod().failed == 0:\n print(\"Ser godt ud\")\n","sub_path":"W40/issorted.py","file_name":"issorted.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"475832041","text":"import torch\nfrom torch import nn\n\nclass Linear2(nn.Module):\n def __init__(self, in_features, out_features):\n super(Linear2, self).__init__()\n self.in_features, self.out_features = in_features, out_features\n self.layer_1, self.layer_2 = nn.Linear(self.in_features, self.out_features), nn.Linear(self.in_features, self.out_features) \n def forward(self, x):\n x = x.view(-1, self.in_features)\n return [self.layer_1(x), self.layer_2(x)]\n \nclass MLP(nn.Module):\n def __init__(self, in_features, hidden_features, out_features, out_layer = nn.Linear, activation = nn.ReLU):\n super(MLP, self).__init__()\n self.in_features = in_features\n self.hidden_features = hidden_features\n self.out_features = out_features\n self.activation = activation\n self.out_layer = out_layer(hidden_features, out_features)\n \n self.model = nn.Sequential(\n nn.Linear(in_features, hidden_features),\n self.activation(),\n self.out_layer)\n \n def forward(self, x):\n x = x.view(-1, self.in_features)\n return self.model(x)","sub_path":"unsup/models/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"15304844","text":"from __future__ import print_function\n\nimport crc16\n\nSYN1 = chr(0x47).encode(\"utf-8\")\nSYN2 = chr(0x55).encode(\"utf-8\")\nNAK = chr(0x0F).encode(\"utf-8\")\nACK = chr(0x06).encode(\"utf-8\")\nLEN9 = (chr(0x00) + chr(0x00) + chr(0x00) + chr(0x09)).encode(\"utf-8\")\nLEN44 = (chr(0x00) + chr(0x00) + chr(0x00) + chr(0x2C)).encode(\"utf-8\")\n\n\nclass GlobalStarResponse():\n def __init__(self):\n self.ack = 1\n self.nak = 2\n self.error = 3\n\n def from_bytes(self, data, big_endian=False):\n if isinstance(data, str):\n data = bytearray(data)\n if big_endian:\n data = reversed(data)\n num = 0\n for offset, byte in enumerate(data):\n num += byte << (offset * 8)\n return num\n\n def timeElapsedDisplay(self, secs):\n d = secs // 86400\n secs = secs - (d * 86400)\n h = secs // 3600\n secs = secs - (h * 3600)\n m = secs // 60\n secs = secs - (m * 60)\n return \"%03d:%02d:%02d:%02d\" % (d, h, m, secs)\n\n def oldparse(self, buffer):\n assert type(buffer) is bytes\n if len(buffer) != 3:\n return self.error\n if buffer[0] == 0x47 and buffer[1] == 0x55:\n if buffer[2] == 0x06:\n return self.ack\n if buffer[2] == 0x0F:\n return self.nak\n else:\n return self.error\n\n def parseACK(self, incoming, poll):\n POLL_ACK = SYN1 + SYN2 + LEN9 + ACK + poll.encode('UTF-8')\n POLL_NAK = SYN1 + SYN2 + LEN9 + NAK + poll.encode('UTF-8')\n if len(incoming) >= 11:\n # print \"***Parsing ack/nak string***\"\n # print(\"recieved string: \"+ incoming[:10])\n if incoming[:11] == POLL_ACK:\n print(\"ACK received!\")\n return self.ack\n elif incoming[:11] == POLL_NAK:\n print(\"NAK received!\")\n return self.nak\n else:\n print('Malformed response from GlobalStar radio')\n print('Recieved string: ', incoming[0:11])\n print('Expected string: ', POLL_ACK)\n return self.error\n else:\n print(\"recieved insufficient bytes for poll ACK!\")\n print(\"recieved only \" + len(incoming) + \" bytes\")\n return self.error\n\n def parseSMSPoll(self, incoming):\n # CANNOT HARD CODE POLL HEADER BECAUSE LENGTH WILL BE DETERMINED BY SIZE OF SMS MESSAGE\n SYN_HDR = SYN1 + SYN2\n MSG_HDR = \"R111\".encode('UTF-8')\n if (len(incoming) >= 11):\n print(\"***Parsing poll response header string***\")\n # print(\"recieved string: \"+ buffer[:11])\n # if incoming[:2] == SYN_HDR and incoming[6:11] == MSG_HDR:\n if len(incoming) > 11:\n print(\"\\n\")\n print(\"Response:\", incoming)\n print(\"ESN: \" + incoming[11:19].decode(\"utf8\"))\n fileNameSize = incoming[19:22].decode(\"utf8\")\n print(\"File Name Size: \" + fileNameSize)\n payloadSize = incoming[22:28].decode(\"utf8\")\n print(\"Payload Size (max 34): \" + payloadSize)\n print(\"File name (internal use only)\", incoming[28:28 + int(fileNameSize)])\n print(\n \"Payload: \" + incoming[28 + int(fileNameSize):28 + int(fileNameSize) + int(payloadSize)].decode(\n \"utf8\"))\n print(\"CRC Integer: \", int(str(incoming[-2:]).encode('hex'), 16))\n\n print(\"\\n\")\n else:\n print('Malformed response from GlobalStar radio')\n print('Recieved string: ', incoming)\n return self.error\n # else:\n # print('Malformed response from GlobalStar radio')\n # print 'Recieved string: ', incoming[0:11]\n # print 'Expected string: ', MSG_HDR\n # return self.error\n else:\n print(\"recieved insufficient bytes for poll RESPONSE!\")\n print(\"recieved only \" + len(incoming) + \" bytes\")\n print('Recieved string: ', incoming)\n return self.error\n\n def parseHealthPoll(self, incoming):\n POLL_HDR = SYN1 + SYN2 + LEN44 + \"RC401\".encode('UTF-8')\n if (len(incoming) >= 11):\n # print \"***Parsing poll response header string***\"\n # print(\"recieved string: \"+ buffer[:11])\n if incoming[:11] == POLL_HDR:\n if len(incoming) >= 46:\n # print \"successfull Health Poll header recieved\"\n print(\"***Parsing Health Status string***\")\n print(\"Epoch: \" + str(self.from_bytes(incoming[11:15], True)))\n print(\"Elapsed time: \" + self.timeElapsedDisplay(self.from_bytes(incoming[15:19], True)))\n print(\"RSSI: \" + str(self.from_bytes(incoming[19:20], True)))\n print(\"Connected: \" + str(self.from_bytes(incoming[20:21], True)))\n print(\"Gateway: \" + str(self.from_bytes(incoming[21:22], True)))\n print(\"Last Contact: \" + self.timeElapsedDisplay(self.from_bytes(incoming[22:26], True)))\n print(\"Last Attempt: \" + self.timeElapsedDisplay(self.from_bytes(incoming[26:30], True)))\n print(\"Number of Call Attempts: \" + str(self.from_bytes(incoming[30:34], True)))\n print(\"Number of Successful Connects: \" + str(self.from_bytes(incoming[34:38], True)))\n print(\"Average Connection Duration: \" + str(self.from_bytes(incoming[38:42], True)))\n print(\"Average Connection Duration SD: \" + str(self.from_bytes(incoming[42:46], True)))\n print(\"Recieved string: \", (incoming))\n else:\n print('Malformed response from GlobalStar radio')\n print('Recieved string: ', incoming)\n else:\n print('Malformed response from GlobalStar radio')\n print('Recieved string: ', incoming[0:11])\n print('Expected string: ', POLL_HDR)\n else:\n print(\"recieved insufficient bytes for poll RESPONSE!\")\n print(\"recieved only \" + len(incoming) + \" bytes\")\n print('Recieved string: ', incoming)\n\n def parseQueueLenPoll(self, incoming, poll):\n POLL_HDR = SYN1 + SYN2 + LEN9 + ('R' + poll).encode('UTF-8')\n if (len(incoming) >= 11):\n # print \"***Parsing poll response header string***\"\n # print(\"recieved string: \"+ buffer[:11])\n # if incoming[:11] == POLL_HDR:\n if len(incoming) >= 15:\n print(\"Messages Queue: \" + str(self.from_bytes(incoming[11:], True)))\n # print 'recieved string: ', incoming\n else:\n print('Malformed response from GlobalStar radio')\n print('Recieved string: ', incoming)\n # else:\n # print('Malformed response from GlobalStar radio')\n # print 'Recieved string: ', incoming[0:11]\n # print 'Expected string: ', POLL_HDR\n else:\n print(\"recieved insufficient bytes for poll RESPONSE!\")\n print(\"recieved only \" + len(incoming) + \" bytes\")\n print('Recieved string: ', incoming)\n","sub_path":"MULE/globalstar_response.py","file_name":"globalstar_response.py","file_ext":"py","file_size_in_byte":7362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"281251757","text":"import time\nimport torch\n\nfrom torch.autograd import Variable\n\nuse_gpu = torch.cuda.is_available()\n\ndef evaluate(model, criterion, test_loader):\n since = time.time()\n model.train(False)\n running_loss = 0.0\n epoch_loss = 0.0\n for data in test_loader:\n inputs, labels = data\n\n if use_gpu:\n inputs = Variable(inputs.cuda())\n labels = Variable(labels.cuda())\n else:\n inputs, labels = Variable(inputs), Variable(labels)\n\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n running_loss += loss.data[0] * inputs.size(0)\n\n epoch_loss = float(running_loss) / float(len(test_loader.dataset))\n\n time_elapsed = time.time() - since\n\n print('Evaluating complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('epoch_loss: {:4f}'.format(epoch_loss))\n#End_ evaluate##################################################################################","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"386034293","text":"#\n# This is Seisflows\n#\n# See LICENCE file\n#\n# Functions to write signals to files (using Obspy)\n#\n# SeisFlows uses obspy stream objects for holding and processing seismic data.\n# In some cases, obspy.read doesn't provide the desired behavior, so we\n# introduce an additonal level of indirection\n#\n# used by the PREPROCESS class and specified by the WRITER parameter\n#\n###############################################################################\n\n# Import system module\nimport sys\n\n# Import numpy\nimport numpy as np\n\ntry:\n PAR = sys.modules['seisflows_parameters']\nexcept:\n print(\"Check parameters and paths.\")\n\ndef su(stream, path, filename):\n \"\"\" Write Seismic Unix files.\n Function writeBigSuFile is a hack to write a .su file when the number\n of samples per trace is two big.\n In the su format only 2 bytes per trace are dedicated to encoding for\n the number of samples (as signed int, see:\n http://lists.swapbytes.de/archives/obspy-users/2017-March/002359.html).\n Even if it's an old format it's still extremely stupid.\n This proove the lack of vision the designer of this format had at that\n time. They could have chosen 8 bytes or 16 bytes it was no big deal...\n They've cost me a day's work.\n But let us forget about the past. This limits the size of the\n traces in the header to maximum 32768.\n We use Obspy to write the file with dummy values there instead of the\n real number of sample (that we now anyway : it is PAR.NT).\n We thus rewrote a quick version of this function from Obspy replacing\n the number of point by PAR.NT\n\n \"\"\"\n for t in stream:\n # work around obspy data type conversion\n t.data = t.data.astype(np.float32)\n\n max_npts = 32767\n max_delta = 0.065535\n dummy_delta = max_delta\n\n if stream[0].stats.delta > max_delta:\n for t in stream:\n t.stats.delta = dummy_delta\n\n # write data to file\n # if PAR.NT < max_npts:\n if False:\n stream.write(path+'/'+filename, format='SU')\n else:\n writeBigSuFile(stream, path+'/'+filename)\n\n\ndef ascii(stream, path, filenames):\n \"\"\" Write ascii signal file\n \"\"\"\n for ir, tr in enumerate(stream):\n nt = tr.stats.npts\n t1 = float(tr.stats.starttime)\n t2 = t1 + tr.stats.npts*tr.stats.sampling_rate\n print(nt, t1, t2)\n\n t = np.linspace(t1, t2, nt)\n w = tr.data\n\n print(path + '/' + tr.stats.filename)\n print(times.shape, tr.data.shape)\n np.savetxt(path + '/' + tr.stats.filename,\n np.column_stack((t, w)))\n\n\ndef writeBigSuFile(stream, path, byteorder='<'):\n \"\"\" This function is a hack to write a .su file when the number\n of samples per trace is two big.\n In the su format only 2 bytes per trace are dedicated to encoding for\n the number of samples (as signed int, see:\n http://lists.swapbytes.de/archives/obspy-users/2017-March/002359.html).\n Even if it's an old format it's still extremely stupid.\n This proove the lack of vision the designer of this format had at that\n time. They could have chosen 8 bytes or 16 bytes it was no big deal...\n They've cost me a day's work.\n But let us forget about the past. This limits the size of the\n traces in the header to maximum 32768.\n We use Obspy to write the file with dummy values there instead of the\n real number of sample (that we now anyway : it is PAR.NT).\n We thus rewrote a quick version of this function from Obspy replacing\n the number of point by PAR.NT\n This is mostly copy-pastes from Obspy source code\n \"\"\"\n\n from obspy.core.utcdatetime import UTCDateTime\n from obspy.core.util import AttribDict\n from obspy.io.segy.core import SUFile\n from obspy.io.segy.segy import SEGYWritingError\n from obspy.io.segy.segy import SEGYTrace\n from obspy.io.segy.segy import SEGYTraceHeader\n from obspy.io.segy.header import TRACE_HEADER_FORMAT\n from obspy.io.segy.header import DATA_SAMPLE_FORMAT_PACK_FUNCTIONS\n \n\n dummy_npts = 9999\n su_file = SUFile()\n\n # Add all traces\n for trace in stream:\n new_trace = SEGYTrace()\n new_trace.data = trace.data\n # Use header saved in stats if one exists.\n if hasattr(trace.stats, 'segy') and \\\n hasattr(trace.stats.segy, 'trace_header'):\n this_trace_header = trace.stats.segy.trace_header\n else:\n this_trace_header = SEGYTraceHeader()\n new_trace_header = new_trace.header\n # Again loop over all field of the trace header and if they exists, set\n # them. Ignore all additional attributes.\n for _, item, _, _ in TRACE_HEADER_FORMAT:\n if hasattr(this_trace_header, item):\n setattr(new_trace_header, item,\n getattr(this_trace_header, item))\n starttime = trace.stats.starttime\n # Set some special attributes, e.g. the sample count and other stuff.\n new_trace_header.number_of_samples_in_this_trace = trace.stats.npts\n new_trace_header.sample_interval_in_ms_for_this_trace = \\\n int(round((trace.stats.delta * 1E6)))\n # Set the date of the Trace if it is not UTCDateTime(0).\n if starttime == UTCDateTime(0):\n new_trace.header.year_data_recorded = 0\n new_trace.header.day_of_year = 0\n new_trace.header.hour_of_day = 0\n new_trace.header.minute_of_hour = 0\n new_trace.header.second_of_minute = 0\n else:\n new_trace.header.year_data_recorded = starttime.year\n new_trace.header.day_of_year = starttime.julday\n new_trace.header.hour_of_day = starttime.hour\n new_trace.header.minute_of_hour = starttime.minute\n new_trace.header.second_of_minute = starttime.second\n # Set the data encoding and the endianness.\n new_trace.endian = byteorder\n # Add the trace to the SEGYFile object.\n su_file.traces.append(new_trace)\n\n # Write the file\n file = open(path, 'wb')\n for trace in su_file.traces:\n trace.header.number_of_samples_in_this_trace = dummy_npts\n endian = trace.endian\n data_encoding = 5\n # Write the header.\n trace.header.write(file, endian=endian)\n # Write the data.\n if trace.data is None:\n msg = \"No data in the SEGYTrace.\"\n raise SEGYWritingError(msg)\n DATA_SAMPLE_FORMAT_PACK_FUNCTIONS[data_encoding](file, trace.data,\n endian=endian)\n file.close()\n","sub_path":"seisflows/plugins/writers.py","file_name":"writers.py","file_ext":"py","file_size_in_byte":6699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"232070301","text":"\"\"\" @Property Implementation\"\"\"\nclass Student:\n def __init__(self, name, marks):\n self.name = name\n self.marks = marks\n\n @property\n def got_marks(self):\n print('PROPERTY')\n return self.name +' Obtained '+self.marks+' Marks '\n\n @got_marks.setter\n def got_marks(self, sentence):\n name, rand, marks = sentence.split(' ')\n print('SETUP')\n self.name = name\n self.marks = marks\n\n @got_marks.deleter\n def got_marks(self):\n self.name = None\n self.marks = None\n\ndef call_student():\n st = Student('Subramanyam.V','866')\n # print(st.name)\n # print(st.marks)\n # print(st.got_marks)\n # st.name = 'Vegi Subramanyam'\n # print(st.got_marks)\n st.got_marks = 'Subbu got 507'\n print(st.name)\n print(st.marks)\n print(st.got_marks)\n del st.got_marks\n print(st.name)\n print(st.marks)\n\n# call_student()\n\n\n\"\"\" Closures Implementation \"\"\"\ndef closure_fun(up):\n val = 0\n def nested_fun(arg):\n print(up,'\\t',arg)\n nonlocal val\n for i in range(up+1):\n val += i\n val *= arg\n print('Total is : {}'.format(val))\n return nested_fun\nsum_value = closure_fun(10)\nsum_value(5)\nsum_value(25)\n# del closure_fun\n# sum_value()","sub_path":"more_advanced_py/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"549433305","text":"# -*- coding:utf-8 -*-\n#\nimport tkinter\nimport tkinter.messagebox\ndef cmd():\n\tglobal n\n\tglobal buttontext\n\tn = n + 1\n\tif n == 1:\n\t\ttkinter.messagebox.askokcancel('Python tkinter','取消')\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# 使用askokcancel函数\n\t\tbuttontext.set('浪潮软件')\n\telif n == 2:\n\t\ttkinter.messagebox.a浪潮软件('Python tkinter','浪潮软件')\t\n\t\tbuttontext.set('AAAA')\n\telif n == 3:\n\t\ttkinter.messagebox.askyesno('Python tkinter','否')\n\t\tbuttontext.set('showerror')\n\telif n == 4:\n\t\ttkinter.messagebox.showerror('Python tkinter','错误')\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# 使用showerror函数\n\t\tbuttontext.set('showinfo')\n\telif n == 5:\n\t\ttkinter.messagebox.showinfo('Python tkinter','详情')\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# 使用showinfo函数\n\t\tbuttontext.set('显示警告')\n\telse :\n\t\tn = 0\n\t\ttkinter.messagebox.showwarning('Python tkinter','警告')\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# 使用showwarning函数\n\t\tbuttontext.set('AAAAl')\nn = 0\nroot = tkinter.Tk()\nbuttontext = tkinter.StringVar()\nbuttontext.set('AAAAl')\nbutton = tkinter.Button(root,\n\t\ttextvariable = buttontext,\n\t\tcommand = cmd)\nbutton.pack()\nroot.mainloop()\n","sub_path":"daima/16/16-10/duihua.py","file_name":"duihua.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"517838381","text":"#\n# This file is part of Python Module for Cube Builder.\n# Copyright (C) 2019-2020 INPE.\n#\n# Cube Builder is free software; you can redistribute it and/or modify it\n# under the terms of the MIT License; see LICENSE file for more details.\n#\n\n\"\"\"Define Brazil Data Cube Cube Builder routes.\"\"\"\n\n# 3rdparty\nfrom flask import Blueprint, request, jsonify\n\n# Cube Builder\nfrom .version import __version__\nfrom .controller import CubeController\nfrom .forms import GridRefSysForm, DataCubeForm, DataCubeProcessForm, PeriodForm, \\\n CubeStatusForm, CubeItemsForm, DataCubeMetadataForm\n\n\nbp = Blueprint('cubes', import_name=__name__)\n\n@bp.route('/', methods=['GET'])\ndef status():\n return dict(\n message = 'Running',\n description = 'Cube Builder',\n version = __version__\n ), 200\n\n\n\n@bp.route('/cube-status', methods=('GET', ))\ndef cube_status():\n form = CubeStatusForm()\n\n args = request.args.to_dict()\n\n errors = form.validate(args)\n\n if errors:\n return errors, 400\n\n return jsonify(CubeController.get_cube_status(**args))\n\n\n@bp.route('/cubes', defaults=dict(cube_id=None), methods=['GET'])\n@bp.route('/cubes/', methods=['GET'])\ndef list_cubes(cube_id):\n if cube_id is not None:\n message, status_code = CubeController.get_cube(cube_id)\n\n else:\n message, status_code = CubeController.list_cubes()\n\n return jsonify(message), status_code\n\n\n@bp.route('/cubes', methods=['POST'])\ndef create_cube():\n \"\"\"Define POST handler for datacube creation.\n\n Expects a JSON that matches with ``DataCubeForm``.\n \"\"\"\n form = DataCubeForm()\n\n args = request.get_json()\n\n errors = form.validate(args)\n\n if errors:\n return errors, 400\n\n data = form.load(args)\n\n cubes, status = CubeController.create(data)\n\n return jsonify(cubes), status\n\n@bp.route('/cubes/', methods=['PUT'])\ndef update_cube_matadata(cube_id):\n \"\"\"Define PUT handler for datacube Updation.\n\n Expects a JSON that matches with ``DataCubeMetadataForm``.\n \"\"\"\n form = DataCubeMetadataForm()\n\n args = request.get_json()\n\n errors = form.validate(args)\n\n if errors:\n return errors, 400\n\n data = form.load(args)\n\n message, status = CubeController.update(cube_id, data)\n\n return jsonify(message), status\n\n\n@bp.route('/cubes//tiles', methods=['GET'])\ndef list_tiles(cube_id):\n message, status_code = CubeController.list_tiles_cube(cube_id, only_ids=True)\n\n return jsonify(message), status_code\n\n\n@bp.route('/cubes//tiles/geom', methods=['GET'])\ndef list_tiles_as_features(cube_id):\n message, status_code = CubeController.list_tiles_cube(cube_id)\n\n return jsonify(message), status_code\n\n\n@bp.route('/cubes//items', methods=['GET'])\ndef list_cube_items(cube_id):\n form = CubeItemsForm()\n\n args = request.args.to_dict()\n\n errors = form.validate(args)\n\n if errors:\n return errors, 400\n\n message, status_code = CubeController.list_cube_items(cube_id, **args)\n\n return jsonify(message), status_code\n\n\n@bp.route('/cubes//meta', methods=['GET'])\ndef get_cube_meta(cube_id):\n \"\"\"Retrieve the meta information of a data cube such STAC provider used, collection, etc.\"\"\"\n message, status_code = CubeController.cube_meta(cube_id)\n\n return jsonify(message), status_code\n\n\n@bp.route('/start', methods=['POST'])\ndef start_cube():\n \"\"\"Define POST handler for datacube execution.\n\n Expects a JSON that matches with ``DataCubeProcessForm``.\n \"\"\"\n args = request.get_json()\n\n form = DataCubeProcessForm()\n\n errors = form.validate(args)\n\n if errors:\n return errors, 400\n\n data = form.load(args)\n\n proc = CubeController.maestro(**data)\n\n return proc\n\n\n@bp.route('/list-merges', methods=['GET'])\ndef list_merges():\n \"\"\"Define POST handler for datacube execution.\n\n Expects a JSON that matches with ``DataCubeProcessForm``.\n \"\"\"\n args = request.args\n\n res = CubeController.check_for_invalid_merges(**args)\n\n return res\n\n\n@bp.route('/create-temporal-schema', methods=['POST'])\ndef temporal_schema():\n \"\"\"Create the temporal composite schema using HTTP Post method.\n\n Expects a JSON that matches with ``TemporalSchemaParser``.\n \"\"\"\n\n args = request.get_json()\n\n errors = form.validate(args)\n\n if errors:\n return errors, 400\n\n cubes, status = CubeController.create_temporal_composition(args)\n\n return cubes, status\n\n\n\n@bp.route('/grids', defaults=dict(grs_id=None), methods=['GET'])\n@bp.route('/grids/', methods=['GET'])\ndef list_grs_schemas(grs_id):\n if grs_id is not None:\n result, status_code = CubeController.get_grs_schema(grs_id)\n else:\n result, status_code = CubeController.list_grs_schemas()\n\n return jsonify(result), status_code\n\n@bp.route('/create-grids', methods=['POST'])\ndef create_grs():\n \"\"\"Create the grid reference system using HTTP Post method.\"\"\"\n form = GridRefSysForm()\n\n args = request.get_json()\n\n errors = form.validate(args)\n\n if errors:\n return errors, 400\n\n cubes, status = CubeController.create_grs_schema(**args)\n\n return cubes, status\n\n\n@bp.route('/list-periods', methods=['POST'])\ndef list_periods():\n \"\"\"List data cube periods.\n\n The user must provide the following query-string parameters:\n - schema: Temporal Schema\n - step: Temporal Step\n - start_date: Start offset\n - last_date: End date offset\n \"\"\"\n parser = PeriodForm()\n\n args = request.get_json()\n\n errors = parser.validate(args)\n\n if errors:\n return errors, 400\n\n return CubeController.generate_periods(**args)\n\n\n@bp.route('/composite-functions', methods=['GET'])\ndef list_composite_functions():\n message, status_code = CubeController.list_composite_functions()\n\n return jsonify(message), status_code","sub_path":"cube_builder/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"463785660","text":"import os, pefile\r\ncurrent = \"./\"\r\nfiles = os.listdir(current)\r\ncount = 0\r\nfor file in files:\r\n\tif (file[3] == \"u\"):\r\n\t\ttry:\r\n\t\t\tppath = os.path.join(current, file)\r\n\t\t\tpe = pefile.PE(ppath)\r\n\t\texcept:\r\n\t\t\tos.remove(file)\r\n\t\tcount = count + 1\r\n\t\tprint(count)","sub_path":"pefilter.py","file_name":"pefilter.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"462451307","text":"'''\nCreated on May 16, 2014\n\n@author: alex\n'''\n\n\nimport random\nimport unittest\n\nfrom des.util import Ordered_queue\nfrom des_test.des_test_abc import DesTestABC\n\n\nQUEUE_LENGTH = 1000\n\nclass Test_ordered_queue(DesTestABC):\n def setUp(self):\n super(Test_ordered_queue, self).setUp()\n self.queue = Ordered_queue()\n\n def tearDown(self):\n super(Test_ordered_queue, self).tearDown()\n\n def test_insert_pop(self):\n for i in range(1, QUEUE_LENGTH + 1):\n self.queue.insert(i * random.random())\n\n last_obj = self.queue.pop()\n\n i = QUEUE_LENGTH - 2\n while i > 0:\n obj = self.queue.pop()\n\n self.assertTrue(obj >= last_obj, \"The most recent obj from pop() \\\n is smaller than the one from the previus pop()\")\n self.assertEqual(i,\n len(self.queue),\n \"Queue does not have the expected length\")\n\n i -= 1\n\nif __name__ == \"__main__\":\n # import sys;sys.argv = ['', 'Test.testName']\n unittest.main()\n\n\ndef suite():\n return unittest.makeSuite(Test_ordered_queue)\n","sub_path":"src/des_test/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"113435961","text":"from data import database\n\n\n# ---\n# Module for inserting into the database\n# ---\n\n\n# Save account if it's not already in the database\ndef insert(screenname):\n print(\"Saving account for listening: \" + screenname)\n account = database.accounts.find_one({\"screenname\": screenname})\n if account is None:\n database.accounts.save({\"screenname\": screenname, \"priority\": 1})\n","sub_path":"data/insert.py","file_name":"insert.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"547318802","text":"import re\n\ndata=[]\nlabel=[]\n\nf=open('collection','r')\ncontent=f.read()\ncontent=content.split('\\n')\nfor i in range(0,len(content)-1):\n\tline=content[i].split(',')\n\tline = [float(j) for j in line]\n\tdata.append(line[1:-1])\n\tlabel.append(line[-1])\n\nfrom sklearn.preprocessing import scale\ndata = scale(data,copy = 'False')\n\nfrom sklearn import tree\nfrom sklearn.model_selection import cross_val_score\n\n\ndef entro(x,y):\n\tclf = tree.DecisionTreeClassifier(random_state = 0, criterion = \"entropy\",max_depth = x,min_weight_fraction_leaf = y) \n\tclf = clf.fit(data,label)\n\tout = cross_val_score(clf,data,label,cv = 10,scoring = \"accuracy\")\n\tprint(\"max_depth = \",x,\"min weight frac = \",y,\"accu : \",out.mean())\ndef gini(x,y):\n\tclf = tree.DecisionTreeClassifier(random_state = 0, criterion = \"gini\",max_depth = x,min_weight_fraction_leaf = y) \n\tclf = clf.fit(data,label)\n\tout = cross_val_score(clf,data,label,cv = 10,scoring = \"accuracy\")\n\tprint(\"max_depth = \",x,\"min weight frac = \",y,\"accu : \",out.mean())\n\nprint(\"1 entropy:\")\nclf = tree.DecisionTreeClassifier(random_state = 0, criterion = \"entropy\") \nclf = clf.fit(data,label)\nout = cross_val_score(clf,data,label,cv = 10,scoring = \"accuracy\")\nprint(out.mean())\nlist1 = [5,10,20]\nlist2 = [0.1,0.15,0.2,0.3]\nfor i in list1:\n\tfor j in list2:\n\t\tentro(i,j)\n\nprint(\"2 gini:\")\nclf = tree.DecisionTreeClassifier(random_state = 0, criterion = \"entropy\") \nclf = clf.fit(data,label)\nout = cross_val_score(clf,data,label,cv = 10,scoring = \"accuracy\")\nprint(out.mean())\nfor i in list1:\n\tfor j in list2:\n\t\tgini(i,j)\n","sub_path":"dicision_tree.py","file_name":"dicision_tree.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"403899200","text":"import os\nimport sys\nimport glob\nimport argparse\nimport re\nimport shutil\n\ndirCalled = os.path.dirname(__file__)\nsys.path.append(os.path.abspath(dirCalled))\nfrom open import FileOpener\n\n\nclass LatexCompiler(object):\n\n def __init__(self, tex=None,\n batch=False, shell=False, twice=False, fully=False, keep_aux=False, clear=False,\n view=False, compile=True, bibtex=False, luatex=False,\n index=False, language='korean', komkindex=False, index_style='kotex.ist',\n bookmark_index=False, bookmark_python=False, \n final=False, draft=False, python=False, asy=False):\n \n self.tex = tex\n self.batch_bool = batch\n self.shell_bool = shell\n self.twice_bool = twice\n self.fully_bool = fully\n self.keep_aux_bool = keep_aux\n self.clear_bool = clear\n self.view_bool = view\n self.compile_bool = compile\n self.bibtex_bool = bibtex\n self.luatex_bool = luatex\n self.index_bool = index\n self.lang = language\n self.komkindex_bool = komkindex\n self.index_style = index_style\n self.bm_index_bool = bookmark_index\n self.bm_python_bool = bookmark_python\n self.final_bool = final\n self.draft_bool = draft\n self.python_bool = python\n self.asy_bool = asy\n\n\n def get_ready(self):\n\n if self.luatex_bool:\n self.compiler = 'lualatex.exe'\n else:\n self.compiler = 'xelatex.exe'\n\n # Compile mode\n if self.batch_bool or self.fully_bool:\n self.compile_mode = '-interaction=batchmode '\n else:\n self.compile_mode = '-synctex=1 '\n if self.shell_bool:\n self.compile_mode += '-shell-escape'\n\n # language by which to sort index\n index_modules = {\n 'eng': 'lang/english/utf8-lang ',\n 'fre': 'lang/french/utf8-lang ',\n 'ger': 'lang/german/din5007-utf8-lang ',\n 'ita': 'lang/italian/utf8-lang ',\n 'kor': 'lang/korean/utf8-lang ',\n 'rus': 'lang/russian/utf8-lang ',\n 'spa': 'lang/spanish/modern-utf8-lang '\n }\n if os.path.splitext(self.index_style)[1] == '.xdy':\n self.xindy = self.index_style\n else:\n try:\n self.xindy = index_modules[self.lang[:3].lower()]\n except:\n self.xindy = index_modules['kor']\n\n if self.tex is not None: \n basename = os.path.basename(self.tex)\n filename = os.path.splitext(basename)[0]\n self.tex = filename + '.tex'\n self.aux = filename + '.aux'\n self.idx = filename + '.idx'\n self.ind = filename + '.ind'\n self.pdf = filename + '.pdf'\n self.py = filename + '.pytxcode'\n self.asy = filename + '-*.asy'\n if not os.path.exists(self.tex): \n print('%s is not found.' %(self.tex))\n self.tex = None\n\n\n def parse_args(self, argv=None):\n\n example = '''examples:\n ltx.py -b -s foo.xxx\n Any filename extension is ignored.\n foo.tex is compiled in batch mode and shell commands are allowed during compilation.\n ltx.py -l foo\n lualatex is used instead of xelatex. \n ltx.py -w -i foo\n foo.tex is compiled twice and index entries (foo.idx) are sorted by texindy in between.\n ltx.py -i -L french -n foo\n foo.idx is sorted by french without compilation.\n ltx.py -k -I foo.ist foo\n foo.idx is sorted by komkindex instead of texindy with foo.ist after a compilation.\n ltx.py -i -m foo\n foo.ind is altered so that index entries are added as bookmarks. \n Use \"-p\" to bookmark ones from python docstrings.\n ltx.py -f -a foo\n If foo.idx exists, foo.tex is compiled four times and foo.idx is sorted in between.\n Otherwise, it is compiled three times. \n Without \"-a\", every auxiliary file is deleted after compilation is completed. \n ltx.py -B foo\n Bibtex runs after a compilation.\n ltx.py -P foo\n Pythontex runs after a compilation.\n ltx.py -c\n Auxiliary files are cleared.\n ltx.py -F/-D foo\n These options are available only with the hzguide latex class.\n See https://hoze.tistory.com/1598\n '''\n\n parser = argparse.ArgumentParser(\n epilog = example,\n formatter_class = argparse.RawDescriptionHelpFormatter,\n description = 'Convert a TeX file to PDF using XeLaTeX or LuaLaTeX.'\n )\n parser.add_argument(\n 'tex',\n type = str,\n nargs = '?',\n help = 'Specify a TeX file.'\n )\n parser.add_argument(\n '-b',\n dest = 'batch',\n action = 'store_true',\n default = False,\n help = 'Do not halt even with syntax errors. (batch-mode)'\n )\n parser.add_argument(\n '-s',\n dest = 'shell',\n action = 'store_true',\n default = False,\n help = 'Allow an external program to run during a XeLaTeX run. (shell-escape)'\n )\n parser.add_argument(\n '-w',\n dest = 'twice',\n action = 'store_true',\n default = False,\n help = 'Compile twice.'\n )\n parser.add_argument(\n '-f',\n dest = 'fully',\n action = 'store_true',\n default = False,\n help = 'Compile fully.'\n )\n parser.add_argument(\n '-v',\n dest = 'view',\n action = 'store_true',\n default = False,\n help = 'Open the resulting PDF file to view.'\n )\n parser.add_argument(\n '-n',\n dest = 'compile',\n action = 'store_false',\n default = True,\n help = 'Pass over compilation but do other processes such as index sorting.'\n )\n parser.add_argument(\n '-i',\n dest = 'index',\n action = 'store_true',\n default = False,\n help = 'Sort index using TeXindy.'\n )\n parser.add_argument(\n '-l',\n dest = 'luatex',\n action = 'store_true',\n default = False,\n help = 'Use LuaLaTeX instead of XeLaTeX.'\n )\n parser.add_argument(\n '-L',\n dest = 'language',\n default = 'korean',\n help = 'Specify a language to sort index entries. For example, \\\"german\\\" or \\\"ger\\\" for German. The default is \\\"korean\\\".'\n )\n parser.add_argument(\n '-k',\n dest = 'komkindex',\n action = 'store_true',\n default = False,\n help = 'Use komkindex instead of TeXindy.'\n )\n parser.add_argument(\n '-I',\n dest = 'index_style',\n default = 'kotex.ist',\n help = 'Specify an index style for komkindex or texindy. The dafault is kotex.ist.'\n )\n parser.add_argument(\n '-a',\n dest = 'keep_aux',\n action = 'store_true',\n default = False,\n help = 'Keep auxiliary files after a full compilation. Without this option, they are altogether deleted.' \n )\n parser.add_argument(\n '-m',\n dest = 'bookmark_index',\n action = 'store_true',\n default = False,\n help = 'Bookmark index entries. This option is available only with -f or -i options. This feature does not support komkindex.'\n )\n parser.add_argument(\n '-p',\n dest = 'bookmark_python',\n action = 'store_true',\n default = False,\n help = 'Bookmark index entries which are python functions extracted from docstrings. This option is available only with -f or -i options.'\n )\n parser.add_argument(\n '-c',\n dest = 'clear',\n action = 'store_true',\n default = False,\n help = 'Remove auxiliary files after compilation.'\n )\n parser.add_argument(\n '-B',\n dest = 'bibtex',\n action = 'store_true',\n default = False,\n help = 'Run bibtex.'\n )\n parser.add_argument(\n '-F',\n dest = 'final',\n action = 'store_true',\n default = False,\n help = 'Find \\\\FinalizerOff to replace it with \\\\FinalizerOn in the tex file.' \n )\n parser.add_argument(\n '-D',\n dest = 'draft',\n action = 'store_true',\n default = False,\n help = 'Find \\\\FinalizerON to replace it with \\\\FinalizerOff in the tex file.' \n )\n parser.add_argument(\n '-P',\n dest = 'python',\n action = 'store_true',\n default = False,\n help = 'Run pythontex.exe.' \n )\n parser.add_argument(\n '-A',\n dest = 'asy',\n action = 'store_true',\n default = False,\n help = 'Run asy.'\n )\n\n args = parser.parse_args(argv)\n\n if args.tex is not None:\n self.tex = args.tex\n self.batch_bool = args.batch\n self.shell_bool = args.shell\n self.twice_bool = args.twice\n self.fully_bool = args.fully\n self.keep_aux_bool = args.keep_aux\n self.clear_bool = args.clear\n self.view_bool = args.view\n self.compile_bool = args.compile\n self.bibtex_bool = args.bibtex\n self.luatex_bool = args.luatex\n self.index_bool = args.index\n self.lang = args.language\n self.komkindex_bool = args.komkindex\n self.index_style = args.index_style\n self.bm_index_bool = args.bookmark_index\n self.bm_python_bool = args.bookmark_python\n self.final_bool = args.final\n self.draft_bool = args.draft\n self.python_bool = args.python\n self.asy_bool = args.asy\n\n\n def compile_once(self, cmd_tex):\n\n os.system(cmd_tex)\n if self.bibtex_bool:\n self.run_bibtex()\n if self.index_bool:\n self.sort_index()\n if self.python_bool:\n self.pythontex() \n if self.asy_bool:\n self.asymptote()\n\n\n def compile_twice(self, cmd_tex):\n\n os.system(cmd_tex)\n if self.bibtex_bool:\n self.run_bibtex()\n if self.index_bool:\n self.sort_index()\n if self.python_bool:\n self.pythontex() \n if self.asy_bool:\n self.asymptote()\n os.system(cmd_tex) \n\n\n def compile_fully(self, cmd_tex):\n\n os.system(cmd_tex)\n if self.bibtex_bool:\n self.run_bibtex()\n if self.python_bool:\n self.pythontex()\n if self.asy_bool:\n self.asymptote()\n os.system(cmd_tex)\n self.sort_index() \n if os.path.exists(self.ind):\n os.system(cmd_tex)\n os.system(cmd_tex)\n if not self.keep_aux_bool:\n self.clear_aux()\n\n\n def run_bibtex(self): \n\n os.system('bibtex.exe %s' %(self.aux))\n\n\n def sort_index(self):\n\n if not os.path.exists(self.idx):\n print('%s is not found' % (self.idx))\n return\n if self.komkindex_bool:\n cmd = 'komkindex.exe -s %s %s' %(self.index_style, self.idx)\n else:\n cmd = 'texindy.exe --module %s %s' %(self.xindy, self.idx) \n os.system(cmd) \n if self.bm_index_bool or self.bm_python_bool:\n self.bookmark_index()\n\n\n def bookmark_index(self):\n\n tmp = 't@mp.ind'\n if os.path.exists(tmp):\n os.remove(tmp)\n with open(tmp, mode = 'w', encoding = 'utf-8') as new_file, open(self.ind, mode = 'r', encoding = 'utf-8') as old_file:\n if self.bm_python_bool:\n for line in old_file.readlines():\n new_file.write(self.bookmark_item(line, r'\\\\item (.+?)\\(\\)'))\n else:\n for line in old_file.readlines():\n new_file.write(self.bookmark_item(line, r'\\\\item (.+?),')) \n os.remove(self.ind)\n os.rename(tmp, self.ind)\n\n\n def bookmark_item(self, line, pattern):\n\n entry = re.search(pattern, line)\n if entry: \n entry = entry.group(1).replace('\\\\spxentry{', '')\n page = re.findall(r'\\\\hyperpage\\{(\\d+)\\}', line)\n append = ''\n for i in range(len(page)):\n append += '\\t\\\\bookmark[level=2, page=%s]{%s}\\n' %(page[i], entry)\n line += append\n return line\n\n\n def clear_aux(self):\n\n extensions = (\"aux\", \"bbl\", \"blg\", \"idx\", \"ilg\", \"ind\", \"loe\", \"lof\", \"log\", \"lop\", \"loq\", \"lot\", \"minted*\", \"mw\", \"nav\", \"out\", \"pre\", \"pyg.lst\", \"pyg.sty\", \"pytxcode\", \"synctex*\", \"snm\", \"toc\", \"tmp\", \"upa\", \"upb\", \"vrb\")\n for ext in extensions:\n fnpattern = '*.' + ext\n for afile in glob.glob(fnpattern):\n os.remove(afile) \n for dir in glob.glob('pythontex-files-*'): \n shutil.rmtree(dir)\n\n\n def finalizer_on(self):\n\n with open(self.tex, mode = 'r', encoding = 'utf-8') as f:\n content = f.read()\n content = re.sub(\"\\\\\\\\FinalizerOff\", \"\\\\\\\\FinalizerOn\", content)\n with open(self.tex, mode = 'w', encoding = 'utf-8') as f:\n f.write(content)\n\n\n def finalizer_off(self):\n\n with open(self.tex, mode = 'r', encoding = 'utf-8') as f:\n content = f.read()\n content = re.sub(\"\\\\\\\\FinalizerOn\", \"\\\\\\\\FinalizerOff\", content)\n with open(self.tex, mode = 'w', encoding = 'utf-8') as f:\n f.write(content)\n\n\n def pythontex(self):\n\n os.system('pythontex.exe --runall=true %s' %(self.py))\n\n\n def asymptote(self):\n\n print('asy.exe %s' %(self.asy))\n os.system('asy.exe %s' %(self.asy))\n\n def compile(self):\n\n self.get_ready()\n if self.tex:\n if self.final_bool:\n self.finalizer_on()\n if self.draft_bool:\n self.finalizer_off()\n\n if not self.compile_bool:\n if self.tex:\n if self.index_bool or self.komkindex_bool:\n self.sort_index()\n if self.bibtex_bool:\n self.run_bibtex() \n else:\n if self.tex:\n cmd_tex = '%s %s \"%s\"' %(self.compiler, self.compile_mode, self.tex)\n if self.fully_bool:\n self.compile_fully(cmd_tex)\n elif self.twice_bool:\n self.compile_twice(cmd_tex)\n else:\n self.compile_once(cmd_tex) \n\n if self.clear_bool:\n self.clear_aux() \n\n if self.view_bool:\n if os.path.exists(self.pdf): \n opener = FileOpener()\n opener.OpenPDF(self.pdf)\n\n\nif __name__ == \"__main__\":\n texer = LatexCompiler()\n texer.parse_args()\n texer.compile()","sub_path":"ltx.py","file_name":"ltx.py","file_ext":"py","file_size_in_byte":15243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"213050486","text":"from django.shortcuts import redirect,render,get_object_or_404\r\nfrom django.http import HttpResponse\r\nfrom xml.dom import minidom\r\nimport urllib2, urllib, json\r\nimport unicodedata\r\n\r\ndef dashboard(request):\r\n\r\n\tbaseurl = \"https://query.yahooapis.com/v1/public/yql?\"\r\n\tyql_query = \"select item.forecast from weather.forecast where woeid=2295279 AND u='c'\"\r\n\tyql_url = baseurl + urllib.urlencode({'q':yql_query}) + \"&format=json\"\r\n\tresult = urllib2.urlopen(yql_url).read()\r\n\tdata = json.loads(result)\r\n\r\n\tforecast = data['query']['results']\r\n\r\n\r\n\tforecasts = []\r\n\tfor i in range(9):\r\n\t\tL = [1,2,3,4]\r\n\t\ta = forecast[u'channel'][i][u'item'][u'forecast'][u'date']\r\n\t\tb = forecast[u'channel'][i][u'item'][u'forecast'][u'low']\r\n\t\tc = forecast[u'channel'][i][u'item'][u'forecast'][u'high']\r\n\t\td = forecast[u'channel'][i][u'item'][u'forecast'][u'text']\r\n\t\tL[0] = unicodedata.normalize('NFKD', a).encode('ascii','ignore')\r\n\t\tL[1] = unicodedata.normalize('NFKD', b).encode('ascii','ignore')\r\n\t\tL[2] = unicodedata.normalize('NFKD', c).encode('ascii','ignore')\r\n\t\tL[3] = unicodedata.normalize('NFKD', d).encode('ascii','ignore')\r\n\t\tforecasts.append(L)\r\n\r\n\treturn render(request, 'pms/Dashboard_mist/pages/base.html', { 'forecasts' : forecasts })\r\n","sub_path":"pms/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"324766961","text":"#encoding=utf8\nimport os\nimport sys\nimport random\nimport numpy as np\nimport paddle\nimport paddle.fluid as fluid\nfrom pdnlp.toolkit.placeholder import Placeholder\n\n\ndef repeat(reader):\n \"\"\"Repeat a generator forever\"\"\"\n generator = reader()\n while True:\n try:\n yield next(generator)\n except StopIteration:\n generator = reader()\n yield next(generator)\n\n\ndef create_joint_generator(input_shape, generators, is_multi_task=True):\n\n def empty_output(input_shape, batch_size=1):\n results = []\n for i in range(len(input_shape)):\n if input_shape[i][1] == 'int32':\n dtype = np.int32\n if input_shape[i][1] == 'int64':\n dtype = np.int64\n if input_shape[i][1] == 'float32':\n dtype = np.float32\n if input_shape[i][1] == 'float64':\n dtype = np.float64\n shape = input_shape[i][0]\n shape[0] = batch_size\n pad_tensor = np.zeros(shape=shape, dtype=dtype)\n results.append(pad_tensor)\n return results\n\n def wrapper(): \n \"\"\"wrapper data\"\"\"\n generators_inst = [repeat(gen[0]) for gen in generators]\n\n generators_ratio = [gen[1] for gen in generators]\n weights = [ratio/sum(generators_ratio) for ratio in generators_ratio]\n run_task_id = range(len(generators))\n while True:\n idx = np.random.choice(run_task_id, p=weights)\n gen_results = next(generators_inst[idx])\n if not gen_results:\n break\n batch_size = gen_results[0].shape[0]\n results = empty_output(input_shape, batch_size)\n\n task_id_tensor = np.array([[idx]]).astype(\"int64\")\n results[0] = task_id_tensor\n for i in range(4):\n results[i+1] = gen_results[i]\n if idx == 0:\n # mrc batch\n results[5] = gen_results[4]\n results[6] = gen_results[5]\n elif idx == 1:\n # mlm batch\n results[7] = gen_results[4]\n results[8] = gen_results[5]\n elif idx == 2:\n # MNLI batch\n results[9] = gen_results[4]\n else:\n raise RuntimeError('Invalid task ID - {}'.format(idx))\n # idx stands for the task index\n yield results\n\n return wrapper\n\n\ndef create_reader(reader_name, input_shape, is_multi_task, *gens):\n \"\"\"\n build reader for multi_task_learning\n \"\"\"\n placeholder = Placeholder(input_shape)\n pyreader, model_inputs = placeholder.build(capacity=100, reader_name=reader_name)\n joint_generator = create_joint_generator(input_shape, gens[0], is_multi_task=is_multi_task)\n\n return joint_generator, pyreader, model_inputs\n\n","sub_path":"PaddleNLP/Research/MRQA2019-D-NET/server/bert_server/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"607299035","text":"import asyncio\nimport db\nfrom elasticsearch import Elasticsearch, exceptions\nimport pdb\n\ntreads = 40\n\n\nasync def essayindex(input_q, db_client):\n elastic = Elasticsearch([{\"host\": \"localhost\", \"port\": 9200}])\n while input_q.qsize():\n _id = await input_q.get()\n # Get data from database and put into elastic\n try:\n async with db_client.acquire() as conn:\n essays = await conn.execute(db.essay.select().where(db.essay.c.id == _id))\n async for essay in essays:\n data = {'name': essay.name, 'alias': essay.alias, 'text': essay.text}\n elastic.create(index=\"essays\", doc_type=\"essay\", id=_id, body=data)\n print('[ok]', essay.id)\n except Exception as e:\n print(type(e), e, '[database Exception]', _id)\n\n\nasync def crawler(input_q):\n async with db.create_engine(maxsize=treads, timeout=600, **db.connection) as db_client:\n jobs = [asyncio.ensure_future(essayindex(input_q, db_client)) for _ in range(treads)]\n await asyncio.gather(*jobs)\n\n\nasync def add_input_queue(input_q):\n elastic = Elasticsearch([{\"host\": \"localhost\", \"port\": 9200}])\n elastic.indices.delete(index='essays', ignore=[400, 404])\n ids = await db.manual(\"select id from essays;\")\n print(len(ids))\n for i in ids:\n # try:\n # elastic.get(index=\"essays\", doc_type=\"essay\", id=i[0], stored_fields='_id')\n # except exceptions.NotFoundError:\n await input_q.put(int(i[0]))\n print(input_q.qsize())\n\n\ndef del_from_elastic():\n with open('bad_ids.txt', 'r', encoding='utf-8') as file:\n bad_ids = [int(x.strip()) for x in file]\n elastic = Elasticsearch([{\"host\": \"localhost\", \"port\": 9200}])\n pdb.set_trace()\n for i in bad_ids:\n elastic.delete(index='essays', doc_type=\"essay\", id=i)\n print(i)\n\n\nif __name__ == '__main__':\n # loop = asyncio.get_event_loop()\n # input_q = asyncio.Queue()\n # loop.run_until_complete(add_input_queue(input_q))\n # loop.run_until_complete(crawler(input_q))\n del_from_elastic()\n","sub_path":"dbessays.com/core/elastic_create_index.py","file_name":"elastic_create_index.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"166282952","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 22 14:27:07 2020\n\n@author: abiral\n\"\"\"\nimport time\nimport webbrowser, os\nimport tkinter as tk\nfrom tkinter import simpledialog\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.preprocessing import StandardScaler\nimport math\nfrom sklearn.neural_network import MLPClassifier\nimport numpy as np\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.metrics.cluster import adjusted_rand_score\n#from sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import f1_score\nfrom geopy.geocoders import Nominatim\nfrom datetime import datetime\n\nwhile 1:\n ROOT = tk.Tk()\n \n ROOT.withdraw()\n # the input dialog\n USER_INP = simpledialog.askstring(title=\"Mortgage Application\",\n prompt=\"Please enter the location of desired mortgage property:\")\n \n # check it out\n \n \n \n print(\"Please enter address\")\n address=USER_INP\n geolocator = Nominatim(user_agent=\"specify_your_app_name_here\")\n location = geolocator.geocode(address)\n print((location.latitude, location.longitude))\n \n #%% read dataset\n df = pd.read_csv(\"crime_ds.csv\")\n \n \n #%% Label Encoder\n lb_make = LabelEncoder()\n df['Weapon'].fillna('OTHER', inplace = True)\n df['Location 1'].fillna('(39.3460800000, -76.6806500000)', inplace = True)\n df['Neighborhood'].fillna('Arlington', inplace = True)\n df['Weapon_code'] = lb_make.fit_transform(df['Weapon'])\n #df['Location 1'] = lb_make.fit_transform(df['Location 1'])\n #df['Neighborhood'] = lb_make.fit_transform(df['Neighborhood'])\n \n nodes = df[\"Location 1\"]\n node=(location.latitude, location.longitude)\n nodes_t =tuple(zip(nodes,nodes.index))\n dist_arr = []\n MAX = 100000\n firstmin = MAX\n secmin = MAX\n thirdmin = MAX\n \n for i in range(2000):\n temp = eval(nodes_t[i][0])\n dist = math.sqrt((temp[0]-node[0])**2+(temp[1]-node[1])**2)\n #print(dist)\n dist_arr.append(dist)\n \n for i in range(2000): \n if dist_arr[i] < firstmin: \n thirdmin = secmin \n secmin = firstmin \n firstmin = dist_arr[i] \n \n elif dist_arr[i] < secmin: \n thirdmin = secmin \n secmin = dist_arr[i] \n \n elif dist_arr[i] < thirdmin: \n thirdmin = dist_arr[i] \n #print(dist_arr)\n print(min(dist_arr), firstmin, secmin, thirdmin)\n index_min = dist_arr.index(firstmin)\n index_secmin = dist_arr.index(secmin)\n index_thirdmin =dist_arr.index(thirdmin)\n \n min_lat = eval(nodes_t[index_min][0])[0]\n min_lon = eval(nodes_t[index_min][0])[1]\n Dict = {'OTHER': 1, 'HANDS': 2, 'KNIFE': 4, 'FIREARM':6}\n index_attack_name = df['Weapon'][index_min] \n \n multiplier_1 = Dict.get(index_attack_name)\n multiplier_2 = df['Total Incidents'][index_min]\n \n point_1 = multiplier_1 * multiplier_2\n print(point_1)\n \n scaler = StandardScaler() \n a = np.loadtxt('Book2.csv',delimiter=',')\n X = a[0:15,:-1]\n scaler.fit(X)\n X = scaler.transform(X)\n y = a[0:15,-1]\n \n clf = MLPClassifier(solver='lbfgs', alpha=1e-5,\n hidden_layer_sizes=(150, ), random_state=1)\n clf.fit(X, y)\n \n test = a[12:13,:-1]\n year = 2020\n a[12:13,:-1]=[min_lat, min_lon, year]\n test = scaler.transform(a[12:13, :-1])\n ans_1=clf.predict(test)\n \n a[12:13,:-1]=[min_lat, min_lon, year-1]\n test = scaler.transform(a[12:13, :-1])\n ans_2=clf.predict(test)\n \n a[12:13,:-1]=[min_lat, min_lon, year+5]\n test = scaler.transform(a[12:13, :-1])\n ans_3=clf.predict(test)\n print(ans_1,ans_2,ans_3)\n if ans_1 > ans_2:\n point_2 = -5\n elif ans_1 == ans_2:\n point_2 = 0\n elif ans_1 < ans_2:\n point_2 = 5\n \n print(point_1+point_2)\n \n first_min_addr=nodes_t[index_min][0]\n second_min_addr=nodes_t[index_secmin][0]\n third_min_addr=nodes_t[index_thirdmin][0]\n \n print(first_min_addr, second_min_addr, third_min_addr)\n \n first_min_addr=first_min_addr.split(',')\n first_min_addr[0]=float(first_min_addr[0][1:])\n first_min_addr[1]=float(first_min_addr[1][:-1])\n first_min_addr=tuple(first_min_addr)\n \n second_min_addr=second_min_addr.split(',')\n second_min_addr[0]=float(second_min_addr[0][1:])\n second_min_addr[1]=float(second_min_addr[1][:-1])\n second_min_addr=tuple(second_min_addr)\n \n third_min_addr=third_min_addr.split(',')\n third_min_addr[0]=float(third_min_addr[0][1:])\n third_min_addr[1]=float(third_min_addr[1][:-1])\n third_min_addr=tuple(third_min_addr)\n \n import gmplot\n import webbrowser, os\n \n #Set different latitude and longitude points\n latitude1, longitude1 = zip(*[\n first_min_addr])\n #declare the center of the map, and how much we want the map zoomed in\n \n gmap3 = gmplot.GoogleMapPlotter(latitude1[0], longitude1[0], 13)\n \n \n index_attack_name_first = df['Weapon'][index_min] \n \n multiplier_1_first = Dict.get(index_attack_name_first)\n multiplier_2_first = df['Total Incidents'][index_min]\n \n point_1_first = multiplier_1_first * multiplier_2_first\n print(point_1_first)\n \n \n index_attack_name_second = df['Weapon'][index_secmin] \n \n multiplier_1_second = Dict.get(index_attack_name_second)\n multiplier_2_second = df['Total Incidents'][index_secmin]\n \n point_1_second = multiplier_1_second * multiplier_2_second\n print(point_1_second)\n \n \n index_attack_name_third = df['Weapon'][index_thirdmin] \n \n multiplier_1_third = Dict.get(index_attack_name_third)\n multiplier_2_third = df['Total Incidents'][index_thirdmin]\n \n point_1_third = multiplier_1_third * multiplier_2_third\n print(point_1_third)\n \n color1='#FF0000'; color2='#FF0000';color3='#FF0000';\n if point_1_first> point_1_second and point_1_third:\n color1='#008000'\n elif point_1_second> point_1_first and point_1_third:\n color2='#008000'\n elif point_1_third> point_1_second and point_1_first:\n color3='#008000'\n if point_1_first==point_1_second==point_1_third:\n color1='#008000'; color2='#008000'; color3='#008000';\n \n # Scatter map\n gmap3.scatter( latitude1, longitude1, color1,size = 200, marker = False ) \n \n latitude1, longitude1 = zip(*[\n second_min_addr]) \n #gmap3 = gmplot.GoogleMapPlotter(latitude1[0], longitude1[0], 13)\n # Scatter map\n gmap3.scatter( latitude1, longitude1, color2,size = 200, marker = False )\n \n latitude1, longitude1 = zip(*[\n third_min_addr]) \n \n # Scatter map\n gmap3.scatter( latitude1, longitude1, color3,size = 200, marker = False )\n # Plot method Draw a line in between given coordinates\n \n \n #Your Google_API_Key\n gmap3.apikey = 'AIzaSyDmllc9JkG8RRgTiriuzx-mwIziEyrzB7c'\n # save it to html\n gmap3.draw(\"index.html\")\n webbrowser.open(\"index.html\")\n time.sleep(5)\n \n ROOT1 = tk.Tk()\n \n ROOT1.withdraw()\n # the input dialog\n USER_INP = simpledialog.askstring(title=\"Streetview\",\n prompt=\"Do you want to view streetview of the best location for you?\")\n \n # check it out\n if USER_INP=='yes':\n \n if point_1_first> point_1_second and point_1_third:\n link=first_min_addr\n elif point_1_second> point_1_first and point_1_third:\n link=second_min_addr\n elif point_1_third> point_1_second and point_1_first:\n link=third_min_addr\n if point_1_first==point_1_second==point_1_third:\n link=first_min_addr\n website='https://www.google.com/maps?q&layer=c&cbll={},{}'.format(link[0],link[1])\n webbrowser.open(website)\n # Download the helper library from https://www.twilio.com/docs/python/install\n \n from twilio.rest import Client\n \n # Your Account Sid and Auth Token from twilio.com/console\n \n # DANGER! This is insecure. See http://twil.io/secure\n \n account_sid = 'AC6228a8017bc18b85006e8db2bb751402'\n \n auth_token = '528cc84fbf17018c8ebf8f0c29bcce43'\n \n client = Client(account_sid, auth_token)\n \n \n message = client.messages.create(\n \n body=website,\n \n from_='+16237772033',\n to='+14193221625'\n )\n\n print(message.sid)\n \n if USER_INP=='no':\n break\n","sub_path":"classifyCrime.py","file_name":"classifyCrime.py","file_ext":"py","file_size_in_byte":8801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"263126211","text":"# -*- coding: utf-8 -*-\n# pyweb-ko\n# bbcode.py\n# Copyright (C) 2011 mimu\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\nfrom google.appengine.ext import webapp \nimport re\nregister = webapp.template.create_template_register()\n\ndef addUrlPrefix(matchobj): \n if matchobj.group(1).find(\"http\") != 0:\n url = \"http://\" + matchobj.group(1)\n else:\n url = matchobj.group(1)\n \n return '%s' % ( url, matchobj.group(2) )\n\ndef addImagePrefix(matchobj): \n if matchobj.group(1).find(\"http\") != 0:\n url = \"http://\" + matchobj.group(1)\n else:\n url = matchobj.group(1)\n \n return '' % ( url )\n\ndef bbcode(value):\n single_tags = {'p':'p','br':'br /','i':'em','strong':'strong','b':'strong','blockquote':'blockquote','h3':'h3','h4':'h4','h5':'h5','h6':'h6'}\n \n value = value.replace(\"<\", \"<\")\n value = value.replace(\">\", \">\")\n \n value = value.replace(\"\\n\", \"
\")\n \n p = re.compile(\"\\[code.*?\\](.*?)\\[\\/code\\]+\", re.S)\n result = p.findall( value )\n for c in result:\n nc = c.replace(\"
\", \"\\n\");\n value = value.replace( c, nc )\n \n for bbcode, html in single_tags.items(): \n value = value.replace(\"[%s]\"%bbcode, \"<%s>\"%html)\n value = value.replace(\"[/%s]\"%bbcode, \"\"%html)\n \n value = re.sub( '\\[img.*?\\](.*?)\\[\\/img\\]', addImagePrefix, value )\n \n value = re.sub( '\\[url=(.*?)\\](.*?)\\[\\/url\\]', addUrlPrefix, value )\n \n p = re.compile( '\\[code.*?class=(.*?)\\](.*?)\\[\\/code\\]', re.DOTALL )\n value = p.sub( '
\\\\2
', value )\n \n return value\n\nregister.filter(bbcode)\n","sub_path":"filters/bbcode.py","file_name":"bbcode.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"612178181","text":"\n\nfrom xai.brain.wordbase.verbs._keyboard import _KEYBOARD\n\n#calss header\nclass _KEYBOARDS(_KEYBOARD, ):\n\tdef __init__(self,): \n\t\t_KEYBOARD.__init__(self)\n\t\tself.name = \"KEYBOARDS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"keyboard\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_keyboards.py","file_name":"_keyboards.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"394221018","text":"\"\"\" test_retorrent.test_relist \"\"\"\n\nimport unittest\n\nfrom retorrent.relist import lowercase_non_checksums, replace_singleitem, replace_doubleitem\n\nclass TestLowerCasing(unittest.TestCase): # pylint: disable=too-many-public-methods\n\n def test_lowercasing(self):\n self.assertEqual(lowercase_non_checksums(['foo', 'bar', 'baz']), ['foo', 'bar', 'baz'])\n self.assertEqual(lowercase_non_checksums(['FOO', 'BAR', 'BAZ']), ['foo', 'bar', 'baz'])\n self.assertEqual(lowercase_non_checksums(['foo', 'BAR', 'baz']), ['foo', 'bar', 'baz'])\n\n self.assertEqual(lowercase_non_checksums(['foo', 'AAAAAAAA', 'baz']),\n ['foo', 'AAAAAAAA', 'baz'])\n\n self.assertEqual(lowercase_non_checksums(['foo', '[AAAAAAAA]', 'baz']),\n ['foo', '[AAAAAAAA]', 'baz'])\n\nclass TestReplacement(unittest.TestCase): # pylint: disable=too-many-public-methods\n _input = [ str(i) for i in range(6) ]\n\n def test_replace_singleitem(self):\n expected = ['0', '1', 'fox', '3', '4', '5']\n self.assertEqual(replace_singleitem(self._input, 2, 'fox'), expected)\n\n def test_replace_doubleitem(self):\n expected = ['0', '1', 'fox', '4', '5']\n self.assertEqual(replace_doubleitem(self._input, 2, 'fox'), expected)\n\n def test_single_item_list(self):\n expected = ['fox']\n self.assertEqual(replace_singleitem(['asdf'], 0, 'fox'), expected)\n\n def test_multireplace_near_bounds(self):\n expected = ['0', '1', '2', '3', '4', 'fox' ]\n self.assertEqual(replace_doubleitem(self._input, 5, 'fox'), expected)\n\n expected = ['fox', '2', '3', '4', '5' ]\n self.assertEqual(replace_doubleitem(self._input, 0, 'fox'), expected)\n","sub_path":"tests/test_retorrent/test_relist.py","file_name":"test_relist.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"189811379","text":"from rest_framework import generics\nfrom rest_framework.filters import OrderingFilter\nfrom django_filters.rest_framework import DjangoFilterBackend\n\nfrom .models import Card, Dish\nfrom .serializers import CardsSerializer, DishSerializer\n\n\nclass CardList(generics.ListAPIView):\n \"\"\"\n List of all cards\n - padding defined in settings.py (4)\n - displays only Cards which contains at least one dish\n - ordered by card id (default) other options are:\n - name\n - number of associated dishes\n \"\"\"\n queryset = Card.objects.filter(dishes_count__gt=0)\n serializer_class = CardsSerializer\n filter_backends = (DjangoFilterBackend, OrderingFilter)\n ordering_fields = ('name', 'dishes_count')\n ordering = ('id',)\n\n\nclass CardDetails(generics.RetrieveAPIView):\n \"\"\"\n Details of chosen Card\n \"\"\"\n queryset = Card.objects.all()\n serializer_class = CardsSerializer\n\n\nclass DishDetails(generics.RetrieveAPIView):\n \"\"\"\n Details of dish\n \"\"\"\n queryset = Dish.objects.all()\n serializer_class = DishSerializer\n","sub_path":"src/menu/views_api.py","file_name":"views_api.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"394055572","text":"\nclass Solution(object):\n def alphabetBoardPath(self, target: str) -> str :\n m = {c : [i // 5, i % 5] for i, c in enumerate(\"abcdefghijklmnopqrstuvwxyz\")}\n print(m)\n x0, y0 = 0, 0\n res = []\n for c in target:\n x, y = m[c]\n if y < y0:\n res.append('L' * (y0-y))\n if x < x0:\n res.append('U' * (x0 - x))\n if x > x0:\n res.append('D' * (x - x0))\n if y > y0:\n res.append('R' * (y - y0))\n res.append('!')\n x0, y0 = x, y\n return \"\".join(res)\n\n\ntarget = \"leet\"\nres = Solution().alphabetBoardPath(target)\nprint(res)","sub_path":"string/1138_alphabet_board_path/1138_alphabet_board_path.py","file_name":"1138_alphabet_board_path.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"377166265","text":"import os\nfrom flask import Flask\nfrom webstore import default_settings as settings\n\napp = Flask(__name__)\napp.config.from_object(settings)\napp.config.from_envvar('WEBSTORE_SETTINGS', silent=True)\n# parent directory\nhere = os.path.dirname(os.path.abspath( __file__ ))\nconfig_path = os.path.join(os.path.dirname(here), 'settings_local.py')\nif os.path.exists(config_path):\n app.config.from_pyfile(config_path)\n\n","sub_path":"webstore/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"67045553","text":"from graph import Vertex, Graph, VertexExists\nimport pytest\n\n\ndef test_add_vertex(build_graph):\n graph = build_graph\n key_value = []\n\n for key, item in graph.graph_dict.items():\n key_value.append((key, item.value))\n key_value.sort()\n\n assert key_value == [(\"vertex_1\", \"vertex_1\"), (\"vertex_2\", \"vertex_2\"),\n (\"vertex_3\", \"vertex_3\"), (\"vertex_4\", \"vertex_4\")]\n\n\ndef test_error_when_try_add_vertex_with_existing_value():\n graph = Graph()\n graph.add_vertex(\"test\")\n with pytest.raises(VertexExists):\n graph.add_vertex(\"test\")\n\n\ndef test_is_directed(build_graph):\n graph = build_graph\n\n if graph.directed is False:\n cases_for_directed = [(\"vertex_1\", 0), (\"vertex_2\", 1)]\n else:\n cases_for_directed = [(\"vertex_2\", 1)]\n\n vertex = graph.get_vertex(\"vertex_3\")\n edges = []\n\n for key, item in vertex.edges.items():\n edges.append((key.value, item))\n\n assert edges == cases_for_directed\n\n\ndef test_get_vertex(build_graph):\n graph = build_graph\n vertex = graph.get_vertex(\"vertex_1\")\n\n assert vertex.value == \"vertex_1\"\n\n\ndef test_vertex():\n vertex_1 = Vertex(1)\n vertex_2 = Vertex(2)\n\n assert vertex_1.value == 1\n\n assert vertex_1.get_edges() == []\n vertex_1.add_edge(vertex_2)\n assert vertex_1.edges == {vertex_2: 0}\n assert vertex_1.get_edges() == [vertex_2]\n\n vertex_1.edges = {}\n vertex_1.add_edge(vertex_2, 1)\n assert vertex_1.edges == {vertex_2: 1}\n\n\ndef test_print(build_graph, capsys):\n graph = build_graph\n graph.print()\n captured = capsys.readouterr()\n assert \"vertex_1\" in captured.out\n\n\n","sub_path":"test/test_graph.py","file_name":"test_graph.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"241741498","text":"import turtle\nimport math\nimport random\nimport platform\nimport os\n# Determine platform type\n\nplt_list = [\"Linux\", \"Windows\", \"Darwin\"]\nplt = platform.system()\nif plt == \"Windows\":\n import winsound\n\n\n# Set up the screen\nwn = turtle.Screen()\nwn.bgcolor(\"black\")\nwn.title(\"Space Monsters\")\nwn.bgpic(\"space_monsters_background.gif\")\n\n# Register the shapes\nturtle.register_shape(\"monster.gif\")\nturtle.register_shape(\"player.gif\")\n\nhighest = open(\"highscore.txt\", \"r\")\nlast_score = highest.readline()\nhighest.close()\n# Draw border\nborder_pen = turtle.Turtle()\nborder_pen.speed(0)\nborder_pen.color(\"white\")\nborder_pen.penup()\nborder_pen.setposition(-300, -300)\nborder_pen.pendown()\nborder_pen.pensize(3)\nfor side in range(4):\n border_pen.fd(600)\n border_pen.lt(90)\nborder_pen.hideturtle()\n\n# Set the score to 0\nscore = 0\n\n# Draw the score\nscore_pen = turtle.Turtle()\nscore_pen.speed(0)\nscore_pen.color(\"white\")\nscore_pen.penup()\nscore_pen.setposition(-290, 300)\nscorestring = \"Score: %s\" % score\nscore_pen.write(scorestring, False, align=\"left\", font=(\"Arial\", 14, \"normal\"))\n\n# Credits and gameover pen\ngameover_pen = turtle.Turtle()\ngameover_pen.speed(0)\ngameover_pen.color(\"orange\")\ngameover_pen.penup()\ngameover_pen.setposition(75, -320)\ncreditstring = \"Developed by Rebahozkoc\"\ngameover_pen.write(creditstring, False, align=\"left\", font=(\"Arial\", 10, \"bold\"))\ngameover_pen.hideturtle()\n\n\n# Create the player turtle\nplayer = turtle.Turtle()\nplayer.color(\"blue\")\nplayer.shape(\"player.gif\")\nplayer.penup()\nplayer.speed(0)\nplayer.setposition(0, -250)\nplayer.setheading(90)\nplayerspeed = 15\n\n\n# Create the player's bullet\nbullet = turtle.Turtle()\nbullet.color(\"yellow\")\nbullet.shape(\"triangle\")\nbullet.penup()\nbullet.speed(0)\nbullet.setheading(90)\nbullet.shapesize(0.5, 0.5)\nbullet.hideturtle()\nbulletspeed = 20\n\n# Define bullet state\n# ready - ready to fire\n# fire - bullet is firing\nbulletstate = \"ready\"\n\n\n# Create enemy\n\n\nnumber_of_enemies = 5\nenemies = []\nfor i in range(number_of_enemies):\n enemies.append(turtle.Turtle())\n\nfor enemy in enemies:\n enemy.color(\"red\")\n enemy.shape(\"monster.gif\")\n enemy.penup()\n enemy.speed(0)\n x = random.randint(-200, 200)\n y = random.randint(100, 250)\n enemy.setposition(x, y)\n\nenemyspeed = 2\n# Move the player the left and right\n\n\ndef move_left():\n x_c = player.xcor()\n x_c = x_c - playerspeed\n if x_c < -285:\n x_c = -285\n player.setx(x_c)\n\n\ndef move_right():\n x_c = player.xcor()\n x_c = x_c + playerspeed\n if x_c > +285:\n x_c = +285\n player.setx(x_c)\n\n\n# Declare bulletstate as a global if it needs changed\ndef fire_bullet():\n global bulletstate\n # Move the bullet to just above the player\n if bulletstate == \"ready\":\n bulletstate = \"fire\"\n x_c = player.xcor()\n y_c = player.ycor() + 10\n bullet.setposition(x_c, y_c)\n bullet.showturtle()\n if plt == plt_list[0]:\n os.system(\"aplay laser.wav&\")\n if plt == plt_list[1]:\n winsound.PlaySound(\"laser.wav\", winsound.SND_ASYNC)\n if plt == plt_list[2]:\n os.system(\"aflay laser.wav&\")\n\n\ndef isCollision(t1, t2):\n distance = math.sqrt(math.pow(t1.xcor()-t2.xcor(), 2)+math.pow(t1.ycor()-t2.ycor(), 2))\n if distance < 15:\n return True\n else:\n return False\n\n\nwn.onkey(lambda: move_left(), \"Left\")\nwn.onkey(lambda: move_right(), \"Right\")\nwn.onkey(lambda: fire_bullet(), \"space\")\n\nwn.listen()\n\nendcheck = 1\nbordercheck = 0\nspeedchanger1 = 1\nspeedchanger2 = 1\n# main game loop\nwhile endcheck:\n\n if score == 3000 and speedchanger1:\n enemyspeed = 3\n speedchanger1 = 0\n if score == 6000 and not speedchanger1:\n enemyspeed = 4\n speedchanger1 = 1\n if score == 20000 and speedchanger2:\n enemyspeed = 5\n speedchanger2 = 0\n if score == 25000 and not speedchanger2:\n enemyspeed = 7\n speedchanger2 = 1\n for enemy in enemies:\n x = enemy.xcor()\n x += enemyspeed\n enemy.setx(x)\n if enemy.xcor() > 285:\n enemyspeed *= -1\n for i in enemies:\n y = i.ycor()\n y -= 40\n i.sety(y)\n if enemy.xcor() < -285:\n enemyspeed *= -1\n for j in enemies:\n y = j.ycor()\n y -= 40\n j.sety(y)\n if enemy.ycor() < -270:\n bordercheck = 1\n\n # Move the enemy back and down\n # Check for collision between the bullet and the enemy.\n if isCollision(bullet, enemy):\n bullet.hideturtle()\n bulletstate = \"ready\"\n bullet.setposition(0, -400)\n x = random.randint(-200, 200)\n y = random.randint(200, 250)\n enemy.setposition(x, y)\n if plt == plt_list[0]:\n os.system(\"aplay explosion.wav&\")\n if plt == plt_list[1]:\n winsound.PlaySound(\"explosion.wav\", winsound.SND_ASYNC)\n if plt == plt_list[2]:\n os.system(\"aflay explosion.wav&\")\n # Update the score\n score += 1000\n scorestring = \"Score: %s\" % score\n score_pen.clear()\n score_pen.write(scorestring, False, align=\"left\", font=(\"Arial\", 14, \"normal\"))\n\n if isCollision(enemy, player) or bordercheck:\n if plt == plt_list[0]:\n os.system(\"aplay game_over.wav\")\n if plt == plt_list[1]:\n winsound.PlaySound(\"gameover.wav\", winsound.SND_ASYNC)\n if plt == plt_list[2]:\n os.system(\"aflay game_over.wav\")\n\n if score > int(last_score):\n new_score = open(\"highscore.txt\", \"w\")\n new_score.write(str(score))\n new_score.close()\n player.hideturtle()\n bullet.hideturtle()\n for k in enemies:\n k.hideturtle()\n score_pen.clear()\n score_pen.penup()\n gameover_pen.clear()\n gameover_pen.penup()\n gameover_pen.setposition(0, 0)\n creditstring = \" GAME OVER\\n Your Score:%s\\n Last High Score:%s\" % (score, last_score)\n gameover_pen.write(creditstring, False, align=\"center\", font=(\"Arial\", 30, \"bold\"))\n gameover_pen.hideturtle()\n endcheck = 0\n break\n\n # Move the bullet\n if bulletstate == \"fire\":\n by = bullet.ycor()\n by += bulletspeed\n bullet.sety(by)\n # Check to see if the bullet has gone to the top\n if bullet.ycor() > 285:\n bullet.hideturtle()\n bulletstate = \"ready\"\n\n\nwn.mainloop()\n","sub_path":"SPACE_MONSTERS.py","file_name":"SPACE_MONSTERS.py","file_ext":"py","file_size_in_byte":6663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"351731480","text":"import os\nfrom datetime import datetime, timedelta\n\nfrom sqlalchemy import Column, Boolean, DateTime, String, Integer, func\nfrom sqlalchemy import create_engine\n\nfrom sqlalchemy.sql import text\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\n\n\nBase = declarative_base()\n\n\nclass User(Base):\n \"\"\" Represents the User table \"\"\"\n __tablename__ = 'users'\n\n username = Column(String(32), primary_key=True)\n full_name = Column(String(32), nullable=False, index=True)\n create_time = Column(DateTime, default=datetime.utcnow)\n update_time = Column(DateTime, default=datetime.utcnow, index=True)\n\nclass Book(Base):\n \"\"\" Represents the Books table \"\"\"\n __tablename__ = 'books'\n\n name = Column(String(32), primary_key=True, nullable=False)\n author = Column(String(32), primary_key=True, nullable=False)\n create_time = Column(DateTime, default=datetime.utcnow)\n\nclass Order(Base):\n \"\"\" Represents the Orders table \"\"\"\n __tablename__ = 'orders'\n\n username = Column(String(128), primary_key=True, nullable=False)\n bookname = Column(String(32), primary_key=True, nullable=False)\n order_time = Column(DateTime, default=datetime.utcnow)\n expiry_period_in_days = Column(Integer, default=30)\n\nclass SQLiteBackend(object):\n \"\"\" The SQLite backend that manages database connections, sessions and bootstraping. \"\"\"\n def __init__(self, db_string):\n self.engine = None\n self.Session = sessionmaker(\n autocommit=False,\n expire_on_commit=False\n )\n self.setup_session(db_string)\n\n def setup_session(self, db_string=None):\n \"\"\" Setup the engine and session. If the engine is already setup, then return. \"\"\"\n if self.engine:\n return\n self.engine = create_engine(db_string, echo=False, pool_recycle=3600)\n self.Session.configure(bind=self.engine)\n\n def reset(self):\n \"\"\" Reset the entire database \"\"\"\n Base.metadata.drop_all(bind=self.engine)\n Base.metadata.create_all(bind=self.engine)\n\n def ping(self):\n \"\"\" Check to see if a connection to the database is possible \"\"\"\n session = self.Session()\n pong = session.execute('select 1').fetchall()\n session.close()\n return pong\n\n def bootstrap(self):\n \"\"\" Does bootstraping i.e. creates database and tables.\n Assumes no databases have been setup. Retries until connection is established. \"\"\"\n connection = None\n for i in range(10): # retries\n try:\n connection = self.engine.connect()\n except:\n print(\"DBServer is probably not up yet, Retrying ...\")\n time.sleep(i * 5)\n continue\n if not connection:\n raise Exception(\"Couldn't connect to DBServer even after retries!\")\n\n Base.metadata.create_all(bind=self.engine)\n connection.close()\n\nclass BookAccountingSystem(SQLiteBackend):\n \"\"\" This class keeps account of all users and books and the orders made for book requests. \"\"\"\n def __init__(self, db_url):\n super(BookAccountingSystem, self).__init__(db_url)\n\n def reset(self):\n super(BookAccounting, self).reset()\n\n def create_user(self, username, full_name):\n \"\"\" Inserts a new user into the database. \"\"\"\n user = User(username=username, full_name=full_name)\n session = self.Session()\n session.add(user)\n try:\n session.commit()\n return user\n except IntegrityError:\n session.rollback()\n raise Exception(\"User exists\")\n finally:\n session.expunge_all()\n session.close()\n\n def get_user(self, username):\n session = self.Session()\n try:\n user = session.query(User).filter_by(username=username).first()\n except Exception as ex:\n print(\"Error getting user, error={}\".format(str(ex)))\n finally:\n session.close()\n return user\n\n def create_author(self, name, author):\n \"\"\" Inserts a new book and auther into the database. \"\"\"\n author = Book(name=name, author=author)\n session = self.Session()\n session.add(author)\n try:\n session.commit()\n return author\n except IntegrityError:\n session.rollback()\n raise Exception(\"Author exists\")\n finally:\n session.expunge_all()\n session.close()\n\n def get_books_of_author(self, author):\n session = self.Session()\n try:\n books = session.query(Book).filter_by(name=author).first()\n except Exception as ex:\n print(\"Error getting book, error={}\".format(str(ex)))\n finally:\n session.close()\n return books \n\n def place_order(self, username, bookname):\n order = Order(username=username, bookname=bookname)\n session = self.Session()\n session.add(order)\n try:\n session.commit()\n return order\n except IntegrityError:\n session.rollback()\n raise Exception(\"Order already placed\")\n finally:\n session.expunge_all()\n session.close()\n\n def get_orders_of_user(self, username):\n session = self.Session()\n try:\n orders = session.query(Order, User, Book).filter(Order.username == User.username).filter(Order.bookname == Book.name).filter(User.username == username)\n return orders\n except Exception as ex:\n print(\"Error getting order, error={}\".format(str(ex)))\n finally:\n session.close()\n\n def get_orders_of_book(self, bookname):\n session = self.Session()\n try:\n orders = session.query(Order, User, Book).filter(Order.username == User.username).filter(Order.bookname == Book.name).filter(Book.name == bookname)\n return orders\n except Exception as ex:\n print(\"Error getting order, error={}\".format(str(ex)))\n finally:\n session.close()\n\n def get_top_n_most_recent_orders(self, n):\n session = self.Session()\n try:\n recent_orders = session.query(Order).order_by(Order.order_time.desc()).limit(n).all()\n return recent_orders\n except Exception as ex:\n print(\"Error getting order, error={}\".format(str(ex)))\n finally:\n session.close()\n \n def get_top_n_most_oldest_orders(self, n):\n session = self.Session()\n try:\n recent_orders = session.query(Order).order_by(Order.order_time.asc()).limit(n).all()\n return recent_orders\n except Exception as ex:\n print(\"Error getting order, error={}\".format(str(ex)))\n finally:\n session.close()\n\n def get_orders_expired(self):\n session = self.Session()\n conn = session.connection()\n try:\n t = text(\"SELECT username, o.order_time, o.bookname, datetime(o.order_time,'+'||o.expiry_period_in_days||' days') AS ex FROM orders o WHERE ex < CURRENT_DATE;\")\n result = conn.execute(t).fetchall()\n return result\n except Exception as ex:\n print(\"Error getting order, error={}\".format(str(ex)))\n finally:\n session.close()\n\n def delete_book(self, book):\n session = self.Session()\n session.query(Book).filter_by(name=book).delete()\n try:\n session.commit()\n except IntegrityError:\n session.rollback()\n raise Exception(\"Delete not done\")\n finally:\n session.expunge_all()\n session.close()\n\n def update_book_author(self, book, updated_book, updated_author):\n session = self.Session()\n update = session.query(Book).filter_by(name=book).first()\n update.name = updated_book\n update.author = updated_author\n try:\n session.commit()\n return update\n except IntegrityError:\n session.rollback()\n raise Exception(\"Update not done\")\n finally:\n session.expunge_all()\n session.close()\n\n### The Controller Program\ndef main():\n \"\"\" The user interface class which users will use to perform actions on the Book Accounting System. \"\"\"\n print(\"Main program starting ...\")\n # db_url = os.environ['DB_URL'] # getting the DB url \\ $Env:DB_URL\"sqlite:///new.db\"\n db_url = \"sqlite:///new.db\" # Hard code - BD Name\n print(\"Connecting to DB={}\".format(db_url)) \n\n bas = BookAccountingSystem(db_url) # setting up the engine and session\n bas.bootstrap()\n if not bas.ping():\n raise Exception(\"Unable to ping the database!\")\n print(\"Connected.\")\n\n # Wait endlessly for input to perform actions\n welcome_message = \"\"\"\n Welcome to the Book Accounting System. To interact with the system\n please press the following options:\n\n 0. Quit\n 1. Create user\n 2. Get user\n 3. Create author\n 4. Books of author\n 5. Place an Order\n 6. Get orders of a user\n 7. Get orders of a book\n 8. Get top 3 recent orders\n 9. Get top 3 oldest orders\n 10. Get orders expired\n 11. Delete book\n 12. Update book\n\n Your Option:\\t\n \"\"\"\n\n option = int(input(welcome_message))\n print(\"\\n\\n\")\n while option != 0:\n\n if option == 1:\n username = input(\"Enter username: \")\n full_name = input(\"Enter Full name: \")\n u = bas.create_user(username, full_name)\n print(\"\\nUser has been created \\nUser name: {} \\nFull name: {}\".format(u.username, u.full_name))\n\n elif option == 2:\n username = input(\"Enter username: \")\n u = bas.get_user(username)\n if u:\n print(\"\\nUser name: {} \\nFull name: {}\".format(u.username, u.full_name))\n else:\n print(\"\\nUser with username={} does not exist\".format(username))\n \n elif option == 3:\n name = input(\"Enter book name: \")\n author = input(\"Enter author name: \")\n a = bas.create_author(name, author)\n print(\"\\nBook has been enetered into the database \\nBook name: {} \\nAuthor name: {}\".format(a.name, a.author))\n\n elif option == 4:\n book = input(\"Enter book's name: \")\n b = bas.get_books_of_author(book)\n if b:\n print(\"\\nAuthor's name: {} \\nBook' s name: {}\".format(b.author, b.name))\n else:\n print(\"\\nAuthor's with books {} does not exist\".format(name))\n\n elif option == 5:\n username = input(\"Enter username: \")\n bookname = input(\"Enter book name: \")\n o = bas.place_order(username, bookname)\n print(\"\\nUser name: {} \\nBook ordered: {}\".format(o.username, o.bookname))\n \n elif option == 6:\n username = input(\"Enter username: \")\n order = bas.get_orders_of_user(username)\n for o, u, b in order:\n print(\"Full name: {} \\nAuthor's book: {} \\nOrdered time: {}\".format(u.full_name, b.author, o.order_time))\n\n elif option == 7:\n bookname = input(\"Enter book name: \")\n order = bas.get_orders_of_book(bookname)\n for o, u, b in order:\n print(\"Full name: {} \\nBook ordered: {} \\nDate & Time of Order: {}\".format(u.full_name, b.author, o.order_time))\n\n elif option == 8:\n display_orders = int(input(\"Display number of recent orders: \"))\n recent_order = bas.get_top_n_most_recent_orders(display_orders)\n for o in recent_order:\n print(\"\\nMost recenet orders: \\nUser name: {} \\nBook name: {} \\nOrder time: {}\".format(o.username, o.bookname, o.order_time))\n\n elif option == 9:\n display_orders = int(input(\"Display number of recent orders: \"))\n recent_order = bas.get_top_n_most_oldest_orders(display_orders)\n for o in recent_order:\n print(\"\\nMost recenet orders: \\nUser name: {} \\nBook name: {} \\nOrder time: {}\".format(o.username, o.bookname, o.order_time))\n\n elif option == 10:\n exp = bas.get_orders_expired()\n if exp:\n for ex in exp:\n print(\"\\nUser name: {} \\nOrdered date & time: {} \\n Book name: {} \\nExpired date & time: {}\".format(ex.username, ex.order_time, ex.bookname, ex.ex))\n else:\n print(\"No orders are expired yet\")\n\n elif option == 11:\n name = input(\"Delete book name: \")\n d = bas.delete_book(name)\n print(\"\\nBook deleted: {} \".format(name))\n\n elif option == 12:\n name = input(\"Enter book name: \")\n update_name = input(\"Update book name: \")\n update_author = input(\"Update author name: \")\n u = bas.update_book_author(name, update_name, update_author)\n print(\"\\nBook name={} and author={} have been updated\".format(u.name, u.author))\n\n option = int(input(\"\\nYour option: \"))\n\n### Start the main program\nmain()","sub_path":"BookAccountingSystem.py","file_name":"BookAccountingSystem.py","file_ext":"py","file_size_in_byte":13209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"61868347","text":"from algorithms.Tools import *\nfrom sklearn.model_selection import LeaveOneOut\n\nsubtract_ys_test = False\nincreased_ys = False\nscale_data = False\nestimated = None\n\nif subtract_ys_test and increased_ys:\n raise Exception('If you want to subtract the YS, the increased_ys should be False')\nelif subtract_ys_test and scale_data:\n raise Exception('If you want to subtract the YS, the scale_data should be False')\n\ndata = Dataset(increased_ys=increased_ys, scale_data=scale_data)\n\nif estimated:\n X, Y = data.get_estimated_data()\nelif estimated is None:\n X, Y = data.get_pictures_data()\nelse:\n X, Y = data.get_no_estimated_data()\n\nid_test = get_id_test(increased_ys, scale_data, estimated, comment='loo')\n\nplots = Plots(id_test, increased_ys=increased_ys)\n\nloo = LeaveOneOut()\nloo.get_n_splits(X)\n\n\nclass TrainModels:\n def __init__(self):\n self.KNR = None\n self.BRR = None\n self.DTR = None\n self.GBR = GradientBoostingRegressor(learning_rate=0.1, loss='ls', max_depth=3, n_estimators=80)\n self.KRR = None\n self.GPR = None # GaussianProcessRegressor(alpha=0.1,kernel=RationalQuadratic(alpha=1,length_scale=1))\n self.SVRe = None\n self.MLPR = None\n self.ABR = None\n\n\nresults_CV = []\nfor train_index, test_index in loo.split(X):\n trainX, testX = X[train_index], X[test_index]\n trainY, testY = Y[train_index], Y[test_index]\n insert_train_test = [trainX, testX, trainY, testY]\n models = Models(X, Y, TrainModels(), subtract_ys=subtract_ys_test, insert_train_test=insert_train_test)\n matrix_all_results, df_all_results, all_predictions = models.get_results()\n\n results_CV = np.append(results_CV, matrix_all_results[0, 7])\n\nplots.mape_all_features(X, Y, results_CV)\n","sub_path":"algorithm/leave_one_out.py","file_name":"leave_one_out.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"90571085","text":"import time\r\nimport random\r\n\r\n#these can be changed to \"0\" to easily test game\r\nshort_pause = 1\r\nlong_pause = 2\r\n\r\n#pause any text for a given time\r\ndef text_pause(message, pause):\r\n print(message)\r\n time.sleep(pause)\r\n\r\n#introduction at start of game\r\ndef intro():\r\n text_pause(\"As the clouds roll away, you awake to find yourself in the mountains.\", long_pause)\r\n text_pause(\"You look around and notice two prominate places on the horizon.\", long_pause)\r\n text_pause(\"The first place you see is a fortress on top of the highest mountain.\", long_pause)\r\n text_pause(\"The second place is an apparently abandoned mineshaft.\", long_pause)\r\n\r\n#give player choice to fight villain or run away\r\ndef fight_or_flee(villain):\r\n text_pause(f'{villain} stands up and says, \"You will be crushed!\"', long_pause)\r\n fight_or_flight = get_valid_input(\"Will you fight? Or flee?\\n\", [\"fight\", \"flee\"])\r\n return fight_or_flight\r\n\r\n#after defeat or victory asks player to play again\r\ndef play_again(): \r\n play_choice = get_valid_input(\"Would you like to play again? y/n\\n\", [\"y\", \"n\"])\r\n if play_choice == \"y\":\r\n play_game()\r\n elif play_choice == \"n\":\r\n print(\"Thanks for playing!\")\r\n\r\n#asks any given question and has player choose from the given options\r\ndef get_valid_input(question, options):\r\n response = input(question).lower()\r\n while response not in options:\r\n text_pause(\"Invalid input\", short_pause)\r\n response = input(question)\r\n return response\r\n\r\n#the place where the player encounters the villain and is the only place the player can win or lose\r\ndef fortress(items, villain, weapon):\r\n text_pause(\"You enter the dark fortress.\", long_pause)\r\n text_pause(f\"And you see {villain} himself.\", long_pause)\r\n if weapon in items:\r\n text_pause(f\"Drawing your {weapon}, you strut up to him, blasting him repeatedly.\", long_pause)\r\n text_pause(f\"As his feeble mind tries to grasp the improbability of a {weapon} in this century, \"\r\n \"he collapases without a sound.\", long_pause)\r\n text_pause(\"Victory! At the loss of the time-space contunium...\", long_pause)\r\n play_again()\r\n elif \"Sword\" in items and \"Shield\" in items:\r\n fight_choice = fight_or_flee(villain)\r\n if \"fight\" in fight_choice:\r\n text_pause(f\"{villain} slowly approaches you.\", long_pause)\r\n text_pause(\"He suddenly lunges forward!\", short_pause)\r\n text_pause(\"You block with your shield!\", short_pause)\r\n text_pause(\"You slash back with your sword, catching him in the neck!\", short_pause)\r\n text_pause(f\"Choking on his own blood, {villain} falls.\", long_pause)\r\n text_pause(\"Victory! You have saved the mountains!\", long_pause)\r\n play_again()\r\n elif \"flee\" in fight_choice:\r\n text_pause(\"You quickly run back to the saftey of the mountains.\", long_pause)\r\n choices(items, villain, weapon)\r\n elif \"Sword\" in items:\r\n fight_choice = fight_or_flee(villain)\r\n if \"fight\" in fight_choice:\r\n text_pause(f\"{villain} slowly approaches you.\", long_pause)\r\n text_pause(\"He suddenly lunges forward!\", short_pause)\r\n text_pause(\"You try to parry with your sword but are too slow!\", short_pause)\r\n text_pause(\"You feel his weapon sink through your ribs and shred your heart.\", short_pause)\r\n text_pause(\"You drop to the ground as your blood leaves your body.\", long_pause)\r\n text_pause(f\"Defeat! {villain}'s reign continues!\", long_pause)\r\n play_again()\r\n elif \"flee\" in fight_choice:\r\n text_pause(\"You quickly run back to the safety of the mountains.\", long_pause)\r\n choices(items, villain, weapon)\r\n elif \"Shield\" in items:\r\n fight_choice = fight_or_flee(villain)\r\n if \"fight\" in fight_choice:\r\n text_pause(f\"{villain} slowly approaches you.\", long_pause)\r\n text_pause(\"He suddenly lunges forward!\", short_pause)\r\n text_pause(\"You block with your shield!\", short_pause)\r\n text_pause(\"You try to fight back, but with nothing in your hand you can only defend.\", short_pause)\r\n text_pause(f\"{villain} bellows with laughter as he notices your lack of weapon.\", long_pause)\r\n fight_again = input(\"Will you fight? Or flee?\\n\").lower()\r\n if \"fight\" in fight_again:\r\n text_pause(\"You continue to block his attacks, but grow weary over time.\", short_pause)\r\n text_pause(\"Eventually he overwhelms you and you fall tasting the steel of his weapon.\", long_pause)\r\n text_pause(f\"Defeat! {villain}'s reign continues!\", long_pause)\r\n play_again()\r\n elif \"flee\" in fight_choice:\r\n text_pause(\"You quickly run back to the safety of the mountains.\", long_pause)\r\n choices(items, villain, weapon)\r\n elif \"flee\" in fight_choice:\r\n text_pause(\"You quickly run back to the safety of the mountains.\", long_pause)\r\n choices(items, villain, weapon)\r\n else:\r\n text_pause(\"As you take in your surroundings, you notice your empty hands \"\r\n \"and realize you might be horribly out of your depth.\", long_pause)\r\n fight_choice = fight_or_flee(villain)\r\n if \"fight\" in fight_choice:\r\n text_pause(f\"{villain} slowly approaches you.\", long_pause)\r\n text_pause(\"He suddenly lunges forward!\", short_pause)\r\n text_pause(\"You quikcly dodge roll to your right.\", short_pause)\r\n text_pause(\"He follows with a sweeping blow from his weapon.\", short_pause)\r\n text_pause(\"You wildly throw your hands up in a feeble attempt to block.\", short_pause)\r\n text_pause(\"Your bones crack and limbs shred as his \"\r\n \"weapon makes contact with your bare arms.\", short_pause)\r\n text_pause(\"Mercilessly he wails away on your dying body.\", short_pause)\r\n text_pause(f\"Defeat! {villain}'s reign continues!\", long_pause)\r\n play_again()\r\n elif \"flee\" in fight_choice:\r\n text_pause(\"You quickly return to the safety of the mountians.\", long_pause)\r\n choices(items, villain, weapon)\r\n\r\n\r\n\r\n\r\n#the place where the player can collect items\r\ndef mineshaft(items, villain, weapon):\r\n if \"Sword\" in items or \"Shield\" in items or weapon in items: #checks to see if player has done anything in the mineshaft yet\r\n text_pause(\"You re-enter the mineshaft intersection.\", long_pause)\r\n if \"Sword\" in items and \"Shield\" in items and weapon in items: #checks to see what items player has. Tunnels are different depending on what the player has\r\n text_pause(\"On your right is a dark tunnel with a faint light at the end.\", long_pause)\r\n text_pause(\"On your left is a foggy tunnel.\", long_pause)\r\n text_pause(\"Straight ahead is pitch black.\", long_pause)\r\n elif \"Sword\" in items and \"Shield\" in items:\r\n text_pause(\"On your right is a dark tunnel with a faint light at the end.\", long_pause)\r\n text_pause(\"On your left is a foggy tunnel with things moving in the fog.\", long_pause)\r\n text_pause(\"Straight ahead is pitch black.\", long_pause)\r\n elif \"Sword\" in items and weapon in items:\r\n text_pause(\"On your right is a dark tunnel with a faint light at the end.\", long_pause)\r\n text_pause(\"On your left is a foggy tunnel.\", long_pause)\r\n text_pause(\"Straight ahead is pitch black with faint scuffling sounds.\", long_pause)\r\n elif \"Shield\" in items and weapon in items:\r\n text_pause(\"On your right is a dark tunnel with a glimmering light at the end.\", long_pause)\r\n text_pause(\"On your left is a foggy tunnel.\", long_pause)\r\n text_pause(\"Straight ahead is pitch black.\", long_pause)\r\n elif \"Sword\" in items:\r\n text_pause(\"On your right is a dark tunnel with a faint light at the end.\", long_pause)\r\n text_pause(\"On your left is a foggy tunnel with things moving in the fog.\", long_pause)\r\n text_pause(\"Straight ahead is pitch black with faint scuffling sounds.\", long_pause)\r\n elif \"Shield\" in items:\r\n text_pause(\"On your right is a dark tunnel with a glimmering light at the end.\", long_pause)\r\n text_pause(\"On your left is a foggy tunnel with things moving in the fog.\", long_pause)\r\n text_pause(\"Straight ahead is pitch black.\", long_pause)\r\n elif weapon in items:\r\n text_pause(\"On your right is a dark tunnel with a glimmering light at the end.\", long_pause)\r\n text_pause(\"On your left is a foggy tunnel.\", long_pause)\r\n text_pause(\"Straight ahead is pitch black with faint scuffling sounds.\", long_pause)\r\n else:\r\n text_pause(\"As you crawl through the crumbling entrance, \"\r\n \"you enter a mineshaft with three directions\", long_pause)\r\n text_pause(\"On your right is a dark tunnel with a glimmering light at the end.\", long_pause)\r\n text_pause(\"On your left is a foggy tunnel with things moving in the fog.\", long_pause)\r\n text_pause(\"Straight ahead is pitch black with faint scuffling sounds.\", long_pause)\r\n text_pause(\"Which way will you go?\", long_pause)\r\n direction = get_valid_input(\"Straight, right, left, or back?\\n\", [\"straight\", \"right\", \"left\", \"back\"])\r\n if \"straight\" in direction:\r\n if \"Shield\" in items:\r\n text_pause(\"You search the area where you found the shield, but to no avail.\", long_pause)\r\n text_pause(\"You quickly exit the tunnel.\", long_pause)\r\n mineshaft(items, villain, weapon)\r\n else:\r\n text_pause(\"Your senses slowly adjust to the utter darkness.\", long_pause)\r\n text_pause(\"You hear pitter-pattering on metal, and move towards the sound.\", long_pause)\r\n text_pause(\"As you approach you can feel rats scurring away \"\r\n \"when your hand brushes a metal disk.\", long_pause)\r\n text_pause(\"You collected the Ancient Shield!\", long_pause)\r\n items.append(\"Shield\")\r\n text_pause(\"You exit the dark tunnel.\", long_pause)\r\n mineshaft(items, villain, weapon)\r\n elif \"right\" in direction:\r\n if \"Sword\" in items:\r\n text_pause(\"You look around the beam of light, but find nothing.\", long_pause)\r\n text_pause(\"It seems the Ancient Sword was the only thing over here.\", long_pause)\r\n text_pause(\"You leave the glowing tunnel.\", long_pause)\r\n mineshaft(items, villain, weapon)\r\n else:\r\n text_pause(\"You head towards the light at the tunnels end.\", long_pause)\r\n text_pause(\"As you approach you see there is light coming from a hole in the roof.\", long_pause)\r\n text_pause(\"It is reflecting off something shiny....and sharp!\", long_pause)\r\n text_pause(\"You collected Ancient Sword!\", long_pause)\r\n items.append(\"Sword\")\r\n text_pause(\"You leave the way you came.\", long_pause)\r\n mineshaft(items, villain, weapon)\r\n elif \"left\" in direction:\r\n if weapon in items:\r\n text_pause(f\"You rummage around the skeleton where you found the {weapon} \"\r\n \"but come up empty.\", long_pause)\r\n text_pause(\"You leave the foggy tunnel.\", long_pause)\r\n mineshaft(items, villain, weapon)\r\n else:\r\n text_pause(\"You hesitantly make your way through the fog.\", long_pause)\r\n text_pause(\"Pushing through the fog, \"\r\n \"you realize what's moving is small electrical charges.\", long_pause)\r\n text_pause('You see shattered pieces of blue wood, '\r\n 'some with a strange word on them, \"Police\".', long_pause)\r\n text_pause(\"Among the blue pieces of wood, you see a skeleton with strange clothes.\", long_pause)\r\n text_pause(\"Grasped in it's hands is a strange weapon.\", long_pause)\r\n text_pause(f\"You recieved {weapon}!\", long_pause)\r\n items.append(weapon)\r\n text_pause(\"Confused, you leave the foggy tunnel, noticing the charges are gone.\", long_pause) \r\n mineshaft(items, villain, weapon)\r\n elif \"back\" in direction:\r\n text_pause(\"You head back to the mountains the way you came.\", long_pause)\r\n choices(items, villain, weapon)\r\n\r\n#area where player chooses where to go\r\ndef choices(items, villain, weapon):\r\n text_pause(\"Enter 1 to approach the fortress.\", long_pause)\r\n text_pause(\"Enter 2 to go into the mineshaft.\", long_pause)\r\n text_pause(\"What would you like to do?\", long_pause)\r\n choice = get_valid_input(\"(Please enter 1 or 2)\\n\", [\"1\", \"2\"])\r\n if choice == \"1\":\r\n fortress(items, villain, weapon)\r\n elif choice == \"2\":\r\n mineshaft(items, villain, weapon)\r\n\r\n\r\n\r\n#main function. Plays game and resets randoms every time it's played\r\ndef play_game():\r\n items = []\r\n villain = random.choice([\"The Dark Lord\", \"The Evil Warlord\", \"The Tyranical King\"])\r\n weapon = random.choice([\"Deagal\", \"Ray Gun\", \"AK-47\"])\r\n intro()\r\n choices(items, villain, weapon)\r\n\r\n\r\nplay_game()\r\n\r\n","sub_path":"save_the_mountains.py","file_name":"save_the_mountains.py","file_ext":"py","file_size_in_byte":13427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"274052824","text":"#!/usr/bin/env python3\r\n# -*- coding:utf-8 -*-\r\n'''\r\nCopyright (c) 2021 Inspur.com, Inc. All Rights Reserved\r\n\r\ndescription\r\n\r\nAuthor: Jeff Li \r\nDate: 2021/03/26 13:42:31\r\n'''\r\n\r\n\r\nimport collections\r\nimport datetime\r\nimport os\r\n\r\n\r\nSchedule = collections.namedtuple(\"Schedule\", [\"category\", \"week_days\", \"instance\", \"starttime\", \"stoptime\", \"minutes\"])\r\n\r\n\r\nBEFORE_CLASS_SCHEDULE = [\r\n Schedule(\"早读\", [0, 1, 2, 3, 4], 1, \"06:50:00\", \"08:00:00\", None),\r\n Schedule(\"课间操\", [0, 1, 2, 3, 4], 2, \"08:40:00\", None, 30),\r\n Schedule(\"课间\", [0, 1, 2, 3, 4], 3, \"09:50:00\", None, 10),\r\n Schedule(\"课间\", [0, 1, 2, 3, 4], 4, \"10:45:00\", None, 10),\r\n Schedule(\"午休\", [0, 1, 2, 3, ], 5, \"11:35:00\", \"13:30:00\", None),\r\n Schedule(\"午休\", [ 4], 5, \"11:35:00\", \"13:00:00\", None),\r\n Schedule(\"课间\", [0, 1, 2, 3, ], 6, \"14:10:00\", None, 10),\r\n Schedule(\"课间\", [ 4], 6, \"13:40:00\", None, 10),\r\n Schedule(\"课间\", [0, 1, 2, 3, ], 7, \"15:05:00\", None, 10),\r\n Schedule(\"课间\", [0, 1, 2, 3, ], 8, \"15:55:00\", None, 10),\r\n]\r\n\r\nELECTIVE_CLASS_SCHEDULE = Schedule(\"课程\", [ ], 7, \"15:15:00\", None, 90)\r\n\r\nCLASS_SCHEDULES = [\r\n Schedule(\"课程\", [0, 1, 2, 3, 4], 1, \"08:00:00\", None, 40),\r\n Schedule(\"课程\", [0, 1, 2, 3, 4], 2, \"09:10:00\", None, 40),\r\n Schedule(\"课程\", [0, 1, 2, 3, 4], 3, \"10:00:00\", None, 45),\r\n Schedule(\"课程\", [0, 1, 2, 3, 4], 4, \"10:55:00\", None, 40),\r\n Schedule(\"课程\", [0, 1, 2, 3 ], 5, \"13:30:00\", None, 40),\r\n Schedule(\"课程\", [ 4], 5, \"13:00:00\", None, 40),\r\n Schedule(\"课程\", [0, 1, 2, 3 ], 6, \"14:20:00\", None, 45),\r\n Schedule(\"课程\", [ 4], 6, \"13:50:00\", None, 45),\r\n Schedule(\"课程\", [0, 1, 2, 3 ], 7, \"15:15:00\", None, 40),\r\n Schedule(\"课程\", [0, 1, 2, 3 ], 8, \"16:05:00\", None, 40),\r\n]\r\n\r\n\r\ndef get_schedule_before_class(week_day, instance):\r\n for schedule in BEFORE_CLASS_SCHEDULE:\r\n if week_day in schedule.week_days and instance == schedule.instance:\r\n category, _, instance, starttime, stoptime, minutes = schedule\r\n return Schedule(category, [week_day], instance, starttime, stoptime, minutes)\r\n raise ValueError(\"No suitable schedule found before 星期{}第{}节\".format(week_day + 1, instance))\r\n\r\n\r\ndef get_class_schedule(week_day, instance):\r\n for schedule in CLASS_SCHEDULES:\r\n if week_day in schedule.week_days and instance == schedule.instance:\r\n category, _, instance, starttime, stoptime, minutes = schedule\r\n return Schedule(category, [week_day], instance, starttime, stoptime, minutes)\r\n raise ValueError(\"No suitable schedule found for 星期{}第{}节\".format(week_day + 1, instance))\r\n\r\n\r\ndef calc_class_schedule_from_images(filenames):\r\n basename_list = [os.path.basename(filename) for filename in filenames]\r\n classes = set()\r\n for basename in basename_list:\r\n week_day_str, instance_str = os.path.splitext(basename)[0].split(\"-\")\r\n classes.add((int(week_day_str) - 1, int(instance_str)))\r\n\r\n schedules = []\r\n for week_day, instance in sorted(classes):\r\n schedules.append(get_schedule_before_class(week_day, instance))\r\n schedules.append(get_class_schedule(week_day, instance))\r\n\r\n # 选修\r\n eighth_classes = [(week_day, instance) for week_day, instance in classes if instance == 8]\r\n if len(eighth_classes) > 0:\r\n week_days = [week_day for week_day, _ in eighth_classes]\r\n week_days_has_elective_class = set(range(4)) - set(week_days)\r\n for week_day in week_days_has_elective_class:\r\n replace_with_elective_class(schedules, week_day)\r\n\r\n # 放学\r\n add_schedule_after_class(schedules)\r\n\r\n return schedules\r\n\r\n\r\ndef test_calc_class_schedule_from_images():\r\n filenames = [\"1-7\", \"1-8\"]\r\n schedules = calc_class_schedule_from_images(filenames)\r\n expected_schedules = [\r\n Schedule(category='课间', week_days=[0], instance=7, starttime='15:05:00', stoptime=None, minutes=10),\r\n Schedule(category='课程', week_days=[0], instance=7, starttime='15:15:00', stoptime=None, minutes=40),\r\n Schedule(category='课间', week_days=[0], instance=8, starttime='15:55:00', stoptime=None, minutes=10),\r\n Schedule(category='课程', week_days=[0], instance=8, starttime='16:05:00', stoptime=None, minutes=40),\r\n Schedule(category='课程', week_days=[1], instance=7, starttime='15:15:00', stoptime=None, minutes=90),\r\n Schedule(category='课程', week_days=[2], instance=7, starttime='15:15:00', stoptime=None, minutes=90),\r\n Schedule(category='课程', week_days=[3], instance=7, starttime='15:15:00', stoptime=None, minutes=90),\r\n Schedule(category='放学', week_days=[0], instance=None, starttime='16:45:00', stoptime=None, minutes=30),\r\n Schedule(category='放学', week_days=[1], instance=None, starttime='16:45:00', stoptime=None, minutes=30),\r\n Schedule(category='放学', week_days=[2], instance=None, starttime='16:45:00', stoptime=None, minutes=30),\r\n Schedule(category='放学', week_days=[3], instance=None, starttime='16:45:00', stoptime=None, minutes=30),\r\n ]\r\n assert schedules == expected_schedules\r\n\r\n\r\ndef replace_with_elective_class(schedules, week_day):\r\n unneeded_schedules = []\r\n for schedule in schedules:\r\n if week_day in schedule.week_days:\r\n if (schedule.category == \"课程\" and schedule.instance in [7, 8]) or (schedule.category == \"课间\" and schedule.instance in [8]):\r\n unneeded_schedules.append(schedule)\r\n if len(unneeded_schedules) >= 0:\r\n for schedule in unneeded_schedules:\r\n schedules.remove(schedule)\r\n category, _, instance, starttime, stoptime, minutes = ELECTIVE_CLASS_SCHEDULE\r\n schedules.append(Schedule(category, [week_day], instance, starttime, stoptime, minutes))\r\n return schedules\r\n\r\n\r\ndef test_replace_with_elective_class():\r\n schedules = [\r\n Schedule(\"课间\", [0, ], 7, \"15:05:00\", None, 40),\r\n Schedule(\"课程\", [0, ], 7, \"15:15:00\", None, 40),\r\n Schedule(\"课间\", [0, ], 8, \"15:55:00\", None, 10),\r\n Schedule(\"课程\", [0, ], 8, \"16:05:00\", None, 40),\r\n ]\r\n replace_with_elective_class(schedules, 0)\r\n expected_schedules = [\r\n Schedule(\"课间\", [0, ], 7, \"15:05:00\", None, 40),\r\n Schedule(\"课程\", [0, ], 7, \"15:15:00\", None, 90),\r\n ]\r\n assert schedules == expected_schedules\r\n\r\n\r\ndef add_schedule_after_class(schedules):\r\n fmt = \"%H:%M:%S\"\r\n last_stoptimes = {}\r\n for schedule in schedules:\r\n category, week_days, instance, starttime, stoptime, minutes = schedule\r\n if stoptime is None:\r\n last_stop = datetime.datetime.strptime(starttime, fmt) + datetime.timedelta(minutes=minutes)\r\n else:\r\n last_stop = datetime.datetime.strptime(stoptime, fmt)\r\n week_day = week_days[0]\r\n if week_day not in last_stoptimes or last_stop > last_stoptimes[week_day]:\r\n last_stoptimes[week_day] = last_stop\r\n\r\n for week_day, last_stop in last_stoptimes.items():\r\n schedule = Schedule(\"放学\", [week_day], None, last_stop.strftime(fmt), None, 45)\r\n schedules.append(schedule)\r\n\r\n\r\ndef test_add_schedule_after_class():\r\n schedules = [\r\n Schedule(\"课程\", [ 4], 6, \"13:50:00\", None, 45),\r\n ]\r\n add_schedule_after_class(schedules)\r\n expected_schedules = [\r\n Schedule(\"课程\", [ 4], 6, \"13:50:00\", None, 45),\r\n Schedule(\"放学\", [ 4], None, \"14:35:00\", None, 30),\r\n ]\r\n assert schedules == expected_schedules\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import itertools\r\n a = itertools.product(range(1, 6), range(1,9))\r\n b = [\"{}-{}.JPG\".format(d,i) for d,i in a]\r\n b.remove('1-8.JPG')\r\n b.remove('5-7.JPG')\r\n b.remove('5-8.JPG')\r\n schedules = calc_class_schedule_from_images(b)\r\n for schedule in schedules:\r\n print(schedule)\r\n","sub_path":"src/ppt2ad/sched.py","file_name":"sched.py","file_ext":"py","file_size_in_byte":8429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"38764099","text":"# -*- coding: utf-8 -*-\nfrom anndata import AnnData\nfrom typing import Optional\nfrom scanpy import logging as logg\n\nfrom cellrank.tools._markov_chain import MarkovChain\nfrom cellrank.tools._constants import RcKey\nfrom cellrank.tools._transition_matrix import transition_matrix\nfrom cellrank.utils._docs import inject_docs\n\n\n_find_docs = \"\"\"\\\nComputes {cells} cells based on RNA velocity, see [Manno18]_. The tool models dynamic cellular\nprocesses as a Markov chain, where the transition matrix is computed based on the velocity vectors of each\nindividual cell. The spectrum of the transition matrix can be used to query approximate recurrent classes of the\nMarkov chain, which represent groups of {cells} cells.\n\nCells are filtered into transient/recurrent cells using the left eigenvectors of the transition matrix and clustered\ninto distinct groups of {cells} cells using the right eigenvectors of the transition matrix of the Markov chain.\n\nParams\n------\nadata : :class:`adata.AnnData`\n Annotated data object.\ncluster_key\n The tool can match computed {direction}points against pre-computed clusters to annotate the {direction}points.\n For this, provide a key from :paramref:`adata` `.obs` where cluster labels have been computed.\nweight_connectivities\n Weight given to a transition matrix computed on the basis of the KNN connectivities. Should be in `[0, 1]`. This\n can help in situations where we have noisy velocities and want to give some weight to transcriptomic similarity.\npercentile\n When making a distinction between transient and recurrent cells, a percentile is used for filtering. Choose\n this value according to the percentage of transient cells you expect to see in your data.\n E.g. :paramref:`percentile` `=98` means you are expecting 98% of your cells to be transient\n and 2% to be recurrent {direction}points.\nn_matches_min\n Parameter used to remove some noise. If `n_matches_min = L`, required that at least L of the nearest neighbors of\n cells *i* belong to the same {direction}point, otherwise, *i* is not considered a {direction}point itself.\nn_start_end\n If you know how many {direction}points you are expecting, you can provide this number.\n Otherwise, an eigen-gap heuristic is used.\nshow_plots\n Whether to show plots of the spectrum and eigenvectors in the embedding.\ncopy\n Whether to update the existing :paramref:`adata` object or to return a copy.\n\nReturns\n-------\n:class:`anndata.AnnData` or :class:`NoneType`\n Depending on :paramref:`copy`, either updates the existing :paramref:`adata` object or returns a copy.\n Marked cells can be found in :paramref:`adata` `.obs` under `{key_added!r}`.\n\"\"\"\n\n\ndef _root_final(\n adata: AnnData,\n final: bool = True,\n cluster_key: Optional[str] = None,\n weight_connectivities: float = None,\n percentile: int = 98,\n n_matches_min: Optional[int] = 1,\n n_start_end: Optional[int] = None,\n show_plots: bool = False,\n copy: bool = False,\n) -> Optional[AnnData]:\n\n key = RcKey.FORWARD if final else RcKey.BACKWARD\n logg.info(f\"Computing `{key}`\")\n adata = adata.copy() if copy else adata\n\n # compute kernel object\n kernel = transition_matrix(\n adata, backward=not final, weight_connectivities=weight_connectivities\n )\n\n # create MarkovChain object\n mc = MarkovChain(kernel)\n\n # run the computation\n mc.compute_eig()\n mc.compute_approx_rcs(\n percentile=percentile,\n n_matches_min=n_matches_min,\n use=n_start_end,\n n_clusters_kmeans=n_start_end,\n cluster_key=cluster_key,\n )\n\n if show_plots:\n mc.plot_real_spectrum()\n mc.plot_eig_embedding(abs_value=True, perc=[0, 98], use=n_start_end)\n mc.plot_eig_embedding(left=False, use=n_start_end)\n\n return adata if copy else None\n\n\n@inject_docs(\n root=_find_docs.format(cells=\"root\", direction=\"start\", key_added=\"root_cells\")\n)\ndef find_root(\n adata: AnnData,\n cluster_key: Optional[str] = None,\n weight_connectivities: float = None,\n percentile: int = 98,\n n_start_end: Optional[int] = None,\n show_plots: bool = False,\n copy: bool = False,\n) -> Optional[AnnData]:\n \"\"\"\n Root cells of a dynamic process in single cells.\n\n {root}\n \"\"\"\n\n return _root_final(\n adata,\n final=False,\n cluster_key=cluster_key,\n weight_connectivities=weight_connectivities,\n percentile=percentile,\n n_start_end=n_start_end,\n show_plots=show_plots,\n copy=copy,\n )\n\n\n@inject_docs(\n final=_find_docs.format(cells=\"final\", direction=\"end\", key_added=\"final_cells\")\n)\ndef find_final(\n adata: AnnData,\n cluster_key: Optional[str] = None,\n weight_connectivities: float = None,\n percentile: int = 98,\n n_start_end: Optional[int] = None,\n show_plots: bool = False,\n copy: bool = False,\n) -> Optional[AnnData]:\n \"\"\"\n Final cells of a dynamic process in single cells.\n\n {final}\n \"\"\"\n\n return _root_final(\n adata,\n final=True,\n cluster_key=cluster_key,\n weight_connectivities=weight_connectivities,\n percentile=percentile,\n n_start_end=n_start_end,\n show_plots=show_plots,\n copy=copy,\n )\n","sub_path":"cellrank/tools/_root_final.py","file_name":"_root_final.py","file_ext":"py","file_size_in_byte":5234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"157825084","text":"# -*- coding: utf-8 -*-\n\n\nfrom __future__ import unicode_literals\n\nimport os\nimport sys\nfrom datetime import datetime, timedelta\nfrom argparse import ArgumentParser\n\nfrom flask import Flask, request, abort\nfrom linebot import (\n LineBotApi, WebhookParser\n)\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\nfrom linebot.models import (\n MessageEvent, TextMessage, TextSendMessage,\n TemplateSendMessage, ButtonsTemplate, DatetimePickerTemplateAction,\n ImageSendMessage\n)\n\nimport threading\nimport sleepchecker\nimport requests\n\nclass Alarm:\n def __init__(self):\n self.active = False\n \n \"\"\"-----------テスト用---------\"\"\"\n #self.snooze_sec = 600 # 基本10分(600)\n self.snooze_sec = 100 # test:1分40(100)\n \n self.set_count = 0 # setした回数\n self.ring_count = 0 # アラームがなった回数\n\n def set(self, sec):\n # threadingでアラームを起動\n alarm_thread = threading.Timer(sec, self.ring)\n alarm_thread.start()\n \n # アラームが鳴る1分前に最新の睡眠情報を取得\n fitbit_thread = threading.Timer(sec - 60, self.check_sleep_fitbit)\n fitbit_thread.start()\n \n if self.set_count == 0:\n print(\"Alarm: on\")\n self.active = True\n self.set_count += 1\n else:\n print(\"Alarm: snooze set, After: {}sec, Set_count: {}\".format(sec, self.set_count))\n self.set_count += 1\n\n def reset(self):\n self.active = False\n self.set_count = 0\n self.ring_count = 0\n\n def ring(self):\n if self.active == True:\n self.ring_count += 1\n r = requests.get(\"http://192.168.3.2:5000/play/JR.mp3\")\n message = \"Alarm rang, {}回目\".format(self.ring_count)\n push_message(message)\n print(\"Alarm: ring, Ring_count: {}\".format(self.ring_count))\n \n def check_sleep_fitbit(self):\n if self.active == True:\n checker = sleepchecker.Checker()\n result = checker.check_sleep()\n if result == None:\n # 睡眠情報がない\n message = \"睡眠情報がありません。同期を確認してください!\"\n push_message(message)\n self.set(self.snooze_sec)\n elif result == False:\n # 起きていた場合\n message = \"起床しているので、アラームをOFFにしました!\"\n push_message(message)\n make_alarm_off()\n else:\n # 睡眠情報がある = 寝ている場合\n self.set(self.snooze_sec)\n\n\nchannel_secret = os.getenv('LINE_CHANNEL_SECRET', None)\nchannel_access_token = os.getenv('LINE_CHANNEL_ACCESS_TOKEN', None)\nline_user_id = os.getenv('LINE_USER_ID', None)\nif channel_secret is None:\n print('Specify LINE_CHANNEL_SECRET as environment variable.')\n sys.exit(1)\nif channel_access_token is None:\n print('Specify LINE_CHANNEL_ACCESS_TOKEN as environment variable.')\n sys.exit(1)\n\nif line_user_id is None: #push通知用\n print('Specify LINE_USER_ID as environment variable.')\n sys.exit(1)\n\nline_bot_api = LineBotApi(channel_access_token)\nparser = WebhookParser(channel_secret)\n\napp = Flask(__name__)\nalm = Alarm()\n\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n signature = request.headers['X-Line-Signature']\n\n body = request.get_data(as_text=True)\n #print(\"Request body: \" + body)\n\n # parse webhook body\n try:\n events = parser.parse(body, signature)\n except InvalidSignatureError:\n abort(400)\n\n # if event is MessageEvent and message is TextMessage, then echo text\n for event in events:\n if event.type == \"postback\":\n \"\"\" lineテンプレートで選択された時刻にアラームをセット \"\"\"\n if event.postback.data == \"datetime_picker\":\n dt = event.postback.params[\"datetime\"]\n now = datetime.now()\n alarm_time = datetime.strptime(dt, '%Y-%m-%dT%H:%M')\n dif = alarm_time - now\n alm.set(dif.seconds)\n print(dif.seconds)\n # TODO:この位置正しいのか確認\n message = 'Alarm: on, at ' + \" \".join(dt.split(\"T\"))\n push_message(message)\n\n if not isinstance(event, MessageEvent):\n continue\n if not isinstance(event.message, TextMessage):\n continue\n \n message = event.message.text\n \n if message == \"set\":\n # アラームセットのためのlineテンプレートの送信\n make_set_alarm_event(event.reply_token)\n elif message == \"off\":\n make_alarm_off()\n elif message == \"status\":\n check_alarm_status()\n elif message == \"graph\":\n send_heart_graph()\n else:\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text=message)\n )\n return \"ok\"\n\ndef make_set_alarm_event(token):\n # アラームセットのためのlineテンプレートの送信\n # TODO: 古いテンプレートは使えないようにする\n now = datetime.now()\n two_min_later = now + timedelta(minutes=2) # アラームセット2分後以降のみ\n date_picker = TemplateSendMessage(\n alt_text='datetime picker',\n template=ButtonsTemplate(\n title='アラームを設定しますか?',\n text='日付と時刻を選択してください',\n actions=[\n DatetimePickerTemplateAction(\n label='設定',\n data='datetime_picker',\n mode='datetime',\n initial=two_min_later.strftime(\"%Y-%m-%dT%H:%M\"),\n min=two_min_later.strftime(\"%Y-%m-%dT%H:%M\"),\n max='2099-12-31T23:59'\n )\n ]\n )\n )\n line_bot_api.reply_message(token, date_picker)\n\ndef make_alarm_off():\n if alm.active is True:\n alm.reset()\n message = 'Alarm: off'\n push_message(message)\n print(\"Alarm: off\")\n else:\n message = 'Alarm is not active'\n push_message(message)\n print(message)\n\ndef check_alarm_status():\n message = \"アラーム: {}\\n睡眠判定までの時間: {}秒\\n現在の睡眠判定回数: {}\\\n \\nアラームを鳴らした回数: {}\".format(alm.active, alm.snooze_sec, alm.set_count, alm.ring_count)\n push_message(message)\n\ndef push_message(message):\n line_bot_api.push_message(line_user_id, TextSendMessage(text=message))\n\n\nif __name__ == \"__main__\":\n arg_parser = ArgumentParser(\n usage='Usage: python ' + __file__ + ' [--port ] [--help]'\n )\n arg_parser.add_argument('-p', '--port', type=int, default=3000, help='port')\n arg_parser.add_argument('-d', '--debug', default=True, help='debug')\n options = arg_parser.parse_args()\n\n app.run(debug=options.debug, port=options.port)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":7199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"139121476","text":"#! python3\n# year.py - stores Year class, which holds months objects in dictionary.\n\nfrom yourFinance import month\n\n\nclass Year:\n def __init__(self, number=''):\n #Months in dict with month name as key - it will be easier to search\n #for a certain month without the need to iterate through all elements\n #as it woud be if we would have list instead of a dict.\n self.monthDict = {}\n if number == '':\n self.set_number()\n else:\n assert isinstance(number,int), \"Wrong data type of year number.\"\n self.number = number\n\n def set_number(self):\n \"\"\"Sets year number if it was not given while being created.\"\"\"\n while True:\n number = input('Which year: ')\n try:\n self.number = int(number)\n break\n except ValueError:\n print('It is not a proper number for year.')\n continue\n\n def add_month(self, monthObj):\n \"\"\"Adds month to dictionary, asks if it shoud override.\"\"\"\n assert isinstance(monthObj,month.Month),\\\n 'Wrong argument type (should be Month object).'\n if monthObj.name in self.monthDict:\n answer = input('Month exists. Override? (y/n) ')\n if answer.lower().startswith('n'):\n return False\n # Values will be month names, as it will be easier to find certain\n # month object.\n self.monthDict[monthObj.name] = monthObj\n\n\n def remove_month(self, monthObj):\n \"\"\"Removes month from dictionary, error if month is already there.\"\"\"\n assert isinstance(monthObj,month.Month),\\\n 'Wrong argument type (should be Month object).'\n # Function will raise error if month obj with this name is not in dict\n try:\n del self.monthDict[monthObj.name]\n except KeyError:\n raise Exception('No object with this name in months dictionary.')\n\n def show_year(self):\n \"\"\"Returns string with year name and months info.\"\"\"\n yearString = str(self.number) + '\\n\\n'\n for monthName in month.Month.MONTHS_NAMES:\n if monthName in self.monthDict.keys():\n yearString += self.monthDict[monthName].show_month()\n return yearString\n","sub_path":"yourFinance/year.py","file_name":"year.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"366674480","text":"import logging\nfrom flask import json\nfrom spyne.protocol.json import JsonDocument\nfrom spyne.protocol.http import HttpRpc\nfrom spyne import Application, srpc, ServiceBase, Decimal, Unicode\nfrom spyne.server.wsgi import WsgiApplication\nimport requests\nfrom model import *\n\n\n\nclass CheckCrimeService(ServiceBase):\n @srpc(Decimal, Decimal, Decimal, _returns=Unicode)\n def checkcrime(lat, lon, radius):\n # Get the information from the CrimeReport API\n url = 'https://api.spotcrime.com/crimes.json' \n params = {'lat':lat, 'lon':lon, 'radius':radius, 'key':'.'}\n input_data = requests.get(url=url, params=params) \n input_json = json.loads(input_data.text) #Convert response to JSON\n output_data = CrimeReport(input_json) #CrimeReport processes the input data\n\n #Create the output_json\n output_json = {\n \"total_crime\" : output_data.total_crime,\n \"the_most_dangerous_streets\" : output_data.the_most_dangerous_streets,\n \"crime_type_count\" : output_data.crime_type_count,\n \"event_time_count\" : output_data.event_time_count\n }\n\n return output_json\n\n\nif __name__ == '__main__':\n from wsgiref.simple_server import make_server\n #Keep track of events using a log system\n logging.basicConfig(level=logging.DEBUG)\n\n # Creates the application using HttpRpc as input protocol and JsonDocument as output protocol\n application = Application([CheckCrimeService], 'checkcrime',\n in_protocol=HttpRpc(validator='soft'), out_protocol=JsonDocument(ignore_wrappers=True))\n\n #Use wsgi instead of flask\n wsgi_app = WsgiApplication(application)\n server = make_server('0.0.0.0', 8000, wsgi_app)\n server.serve_forever()","sub_path":"lab2/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"101131083","text":"import time\nimport requests\nimport logging\n\nfrom db import Session\nfrom models import Article\nfrom article_fetcher import ArticleFetcher\nfrom comment_fetcher import CommentFetcher\n\nTOP_ARTICLES_URL = 'https://hacker-news.firebaseio.com/v0/topstories/.json'\nITEM_URL = 'https://hacker-news.firebaseio.com/v0/item/%s.json'\nLIMIT_TOP_RESULTS = 300\nREFRESH_INTERVAL = 300\n\n\nclass Refresher:\n def __init__(self, session, article_fetcher, comment_fetcher):\n self.ses = Session()\n self.article_fetcher = article_fetcher\n self.comment_fetcher = comment_fetcher\n\n def refresh(self):\n top_article_ids = requests.get(TOP_ARTICLES_URL).json()[:LIMIT_TOP_RESULTS]\n\n self.article_fetcher.fetch_list(top_article_ids)\n logging.info('Fetched: %s' % (len(top_article_ids)))\n\n self.update_rank(top_article_ids)\n self.update_scores_and_titles(top_article_ids)\n\n def update_rank(self, top_article_ids):\n logging.info('Updating ranks...')\n # Updates all to not ranked\n self.ses.query(Article).update({Article.rank: None})\n\n # If a page is rendered between now and the time the ranks have been updated,\n # it could result in a blank page. Low risk, timing based issue.\n\n logging.info('New order:')\n for i, article_id in enumerate(top_article_ids):\n arty = self.query_article(article_id)\n if arty:\n arty.rank = i\n logging.info(\"%s: %s\" % (i, article_id))\n\n submitter = arty.submitter\n # if user on front page isn't tagged yet prioritise them\n submitter.priority = i\n else:\n logging.info('Skipping %s: %s, does not exist' % (i, article_id))\n\n def collect_comments(self, article_dict):\n logging.info('Collecting child comments...')\n for key in article_dict.keys():\n self.article_fetcher.fetch_list(article_dict[key])\n\n def update_scores_and_titles(self, top_article_ids):\n logging.info('Updating scores')\n for article_id in top_article_ids:\n article_info = requests.get(ITEM_URL % article_id).json()\n article = self.query_article(article_id)\n\n if article:\n article.score = article_info.get(\"score\")\n article.number_of_comments = article_info.get(\"descendants\")\n if article_info.get(\"title\") != article.title:\n logging.debug(\"Title changed for article %s, from: '%s' to '%s\" % (article_id, article_id, article_info.get(\"title\")))\n article.title = article_info.get(\"title\")\n else:\n logging.debug(\"Article not found to update, hn_id: %s\" % article_id)\n\n def query_article(self, article_id):\n article = self.ses.query(Article).filter(Article.hn_id == article_id).first()\n return article\n\n\ndef refresh_top():\n # TODO Loop this or reschedule the job?\n print(\"Starting refresh loop\")\n while True:\n start_time = time.time()\n ses = None\n try:\n arty = ArticleFetcher()\n commy = CommentFetcher()\n refresher = Refresher(ses, arty, commy)\n refresher.refresh()\n except Exception as e:\n logging.error(\"Refresh failed, %s\" % e)\n if ses:\n ses.rollback()\n\n duration = time.time() - start_time\n logging.info('Loop duration %s' % duration)\n\n if duration < REFRESH_INTERVAL:\n sleep_time = REFRESH_INTERVAL - duration\n logging.debug(\"Sleeping for %s seconds\" % sleep_time)\n time.sleep(sleep_time)\n\n\nif __name__ == \"__main__\":\n refresh_top()\n","sub_path":"docker/refresh/refresh/refresh.py","file_name":"refresh.py","file_ext":"py","file_size_in_byte":3709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"70936897","text":"\"\"\"This module contains a set of functions to use the video_to_ascii from cli\"\"\"\n\nfrom . import video_engine as ve\n\ndef play(filename, **kwargs):\n \"\"\"\n Play a video from a file by default using ascii chars in terminal\n \"\"\"\n engine = ve.VideoEngine()\n if \"strategy\" in kwargs:\n engine.set_strategy(kwargs[\"strategy\"])\n engine.load_video_from_file(filename)\n engine.play()\n","sub_path":"video_to_ascii/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"624303852","text":"from django.conf.urls import patterns, url\r\n\r\nfrom finances.models import Article, Good, Ticket\r\nfrom django.views.generic import DetailView, ListView\r\nfrom django_tables2 import RequestConfig, SingleTableView, A\r\nfrom django_filter import FilteredSingleTableView, F\r\n\r\nimport django_tables2 as tables\r\n\r\n\r\nclass TicketTable(tables.Table):\r\n id = tables.LinkColumn('filter_detail', args=[A('pk')])\r\n user = tables.Column()\r\n total = tables.Column()\r\n# tz = tables.Column(verbose_name='time zone')\r\n good = tables.Column()\r\n comment = tables.Column()\r\n# summary = tables.Column(order_by=(\"good\", \"total\"))\r\n\r\n class Meta:\r\n model = Ticket\r\n attrs = {'class': 'paleblue'}\r\n order_by = \"-dt\"\r\n\r\n filters = (\r\n F('total', 'Filter name', values_list=(('True', '1'), ('False', '0'))),\r\n F('good__label', 'Filter name', values_list=[(str( x), x.label) for x in Good.objects.all()]),\r\n F('good__article__label', 'Filter name', values_list=[(str(x), x.label) for x in Article.objects.all()]),)\r\n\r\nfrom views import DetailTicket, CreateTicket, DeleteTicket\r\nurlpatterns = patterns('',\r\n url(r'^test/$',\r\n ListView.as_view(\r\n queryset=Ticket.objects.select_related(),\r\n # model = Ticket,\r\n context_object_name='tickets',\r\n paginate_by=10,\r\n template_name='finances/index.html')),\r\n\r\n url(r'^class-based/$',\r\n SingleTableView.as_view(\r\n table_class=TicketTable,\r\n #table_class = TicketTable,\r\n queryset=Ticket.objects.select_related(),\r\n template_name=\"finances/index2.html\")),\r\n\r\n url(r'^filter/$',\r\n FilteredSingleTableView.as_view(\r\n table_class=TicketTable,\r\n queryset=Ticket.objects.select_related(),\r\n template_name=\"finances/index2.html\")),\r\n\r\n# url(r'^filter/(?P\\d+)/$',\r\n# DetailView.as_view(\r\n # model=Ticket,\r\n# template_name='finances/detail.html'), name='filter_detail'),\r\n url(r'^filter/(?P\\d+)/$',\r\n DetailTicket.as_view(),\r\n# model=Ticket,\r\n# template_name='finances/detail.html'),\r\n name='filter_detail'),\r\n url(r'^filter/(?P\\d+)/delete$',\r\n DeleteTicket.as_view(),\r\n name='filter_delete'),\r\n url(r'^filter/new$',\r\n CreateTicket.as_view(),\r\n name='filter_new'),\r\n# (r'^admin/finances/stats/$', StatsListView.as_view(\r\n# queryset=Ticket.objects.values('good__article').annotate(sumtotal=Sum('total')).values('sumtotal','good__article__label'),\r\n# context_object_name=\"data\",\r\n# template_name = 'admin/finances/stats.html',\r\n# filters = ('test2'),\r\n# )),\r\n\r\n )\r\n","sub_path":"finances/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"471659349","text":"import mysql.connector\nfrom mysql.connector import MySQLConnection, Error\nimport requests\n\ndef insert_company():\n \n query = \"INSERT INTO financials.Indicators (ID,Name) VALUES(%s,%s)\"\n mydb = mysql.connector.connect(host=\"localhost\",user=\"root\")\n cursor = mydb.cursor()\n \n companies = requests.get(\"https://api.usfundamentals.com/v1/companies/xbrl?&format=json&token=moHx0ZmsuO49zm0R3GI1nA\")\n companiesJson = companies.json()\n\n for company in companiesJson:\n args = (company['company_id'], company['name_latest'])\n try:\n cursor.execute(query, args)\n except Error as error:\n print(error)\n \n cursor.close()\n mydb.commit()\n mydb.close()\n\ndef update_indicator(year,indicator):\n \n query = \"UPDATE financials.Indicators\" + year + \" set \" + indicator + \" = %s where ID = %s\"\n mydb = mysql.connector.connect(host=\"localhost\",user=\"root\")\n cursor = mydb.cursor()\n \n apiString = \"https://api.usfundamentals.com/v1/indicators/xbrl?indicators=\" + indicator + \"&periods=\" + year + \"&token=moHx0ZmsuO49zm0R3GI1nA\"\n data = requests.get(apiString)\n indicatorData = data.text.splitlines()\n\n for i in range (1, len(indicatorData)):\n fields = indicatorData[i].split(\",\")\n args = (fields[2],fields[0])\n try:\n cursor.execute(query,args)\n except Error as error:\n print(error)\n \n cursor.close()\n mydb.commit()\n mydb.close()\n\ndef main():\n# insert_company()\n indicators = [\"NetIncomeLoss\", \"AssetsCurrent\", \"LiabilitiesCurrent\", \"PropertyPlantAndEquipmentNet\", \"PropertyPlantAndEquipmentGross\", \"LongTermDebtCurrent\", \"LongTermDebtNoncurrent\", \"Liabilities\", \"StockholdersEquity\", \"RetainedEarningsAccumulatedDeficit\", \"GrossProfit\", \"SalesRevenueNet\", \"Revenues\", \"SellingGeneralAndAdministrativeExpense\", \"ResearchAndDevelopmentExpense\", \"DepreciationDepletionAndAmortization\", \"InterestExpense\", \"InterestExpenseDebt\", \"InterestPaid\", \"IncomeTaxExpenseBenefit\", \"IncomeTaxesPaidNet\", \"EarningsPerShareBasic\", \"EarningsPerShareDiluted\", \"Assets\", \"CashAndCashEquivalentsAtCarryingValue\", \"NetCashProvidedByUsedInFinancingActivities\", \"NetCashProvidedByUsedInInvestingActivities\", \"NetCashProvidedByUsedInOperatingActivities\", \"OperatingIncomeLoss\", \"Goodwill\"]\n years = [\"2010\",\"2011\",\"2012\",\"2013\",\"2014\",\"2015\",\"2016\",\"2017\"]\n for indicator in indicators:\n for year in years:\n update_indicator(year,indicator)\n\nif __name__ == '__main__':\n\tmain()\n\n\n\n","sub_path":"scripts/getFinancials.py","file_name":"getFinancials.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"566491237","text":"#Author: Daniel Reuter\r\n#Github: https://github.com/rojter-tech\r\n\r\ndef solution(X, A):\r\n N = len(A)\r\n reqpos = X*[0]\r\n appliedpos = 0\r\n for i in range(N):\r\n if reqpos[A[i] - 1] == 0:\r\n reqpos[A[i] - 1] = A[i]\r\n appliedpos = appliedpos + 1\r\n if appliedpos == X:\r\n return i\r\n return -1\r\n\r\n\r\nX = 5\r\nA = [1,5,3,4,2,3,4,5]\r\nprint(solution(X,A))","sub_path":"Python/Lesson04/Lesson[4-2]Four.py","file_name":"Lesson[4-2]Four.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"67802964","text":"\"\"\"\nThis is example how to write mapper and reducer methods of MapReduce class for\nWMArchive/Tools/myspark.py tool. User should perform all necessary actions with\ngiven set of records and return back desired results. Here our mapper process\nrecords from avro files and collect results into a single dictionary. The\nreducer will collect results from all mappers and return back aggregated\ninformation.\n\"\"\"\n\nimport re\n\ndef parse_spec(spec):\n \"Simple spec parser, it converts strings to patterns so far\"\n ospec = {}\n for key, val in spec.items():\n if isinstance(val, basestring):\n ospec[key] = re.compile(val)\n else:\n ospec[key] = val\n return ospec\n\ndef match_value(keyval, value):\n \"helper function to match value from spec with keyval\"\n if hasattr(value, 'pattern'): # it is re.compile pattern\n if value.match(keyval):\n return True\n else:\n if keyval == value:\n return True\n return False\n\ndef match(rec, spec):\n \"Find if record match given spec\"\n for key, val in spec.items():\n if key == 'lfn':\n for lfn in rec['LFNArray']:\n if match_value(lfn, val):\n return True\n elif key in rec:\n return match_value(rec[key], val)\n return False\n\nclass MapReduce(object):\n def __init__(self, ispec=None):\n self.fields = []\n if ispec:\n if 'spec' in ispec:\n spec = ispec['spec']\n if 'fields' in ispec:\n self.fields = ispec['fields']\n if 'timerange' in ispec:\n del ispec['timerange'] # this is not used for record search\n self.spec = parse_spec(ispec)\n else:\n self.spec = {}\n\n def mapper(self, records):\n \"\"\"\n Function to find a record for a given spec during spark\n collect process. It will be called by RDD.map() object within spark.\n The spec of the class is a JSON query which we'll apply to records.\n \"\"\"\n for rec in records:\n if not rec:\n continue\n if match(rec, self.spec):\n return rec\n return {}\n\n def reducer(self, records, init=0):\n \"Simpler reducer which collects all results from RDD.collect() records\"\n out = []\n nrec = 0\n for rec in records:\n if rec:\n nrec += 1\n if self.fields:\n fields = [rec[f] for f in self.fields]\n out.append(fields)\n return {\"nrecords\":nrec, \"result\":out}\n","sub_path":"src/python/WMArchive/PySpark/RecordFinder.py","file_name":"RecordFinder.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"525009936","text":"class Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n h = { }\n for ix, num in enumerate(nums):\n # print(ix,num)\n result = target - num\n # print(result)\n if result not in h:\n h[num] = ix\n else:\n return [h[result],ix]\n\nif __name__ == '__main__':\n nums = [2, 7, 11, 15]\n target = 17\n s1 = Solution()\n print(s1.twoSum(nums, target))\n\n\n","sub_path":"LeetCode/1.两个数之和.py","file_name":"1.两个数之和.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"10264890","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import Post,Category\nfrom django.contrib.auth.models import User\nfrom inquiry.models import Contact\nfrom core.models import Team\nfrom django.core.mail import send_mail\nfrom django.urls import reverse_lazy, reverse\nfrom django.http import HttpResponseRedirect\nfrom taggit.models import Tag\nfrom django.db.models import Count\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom .modelchoices import location_choices\nfrom .forms import EmailPostForm\nfrom django.db.models import Q\n\n# Create your views here.\n\ndef search(request):\n query = request.GET.get('query', '')\n listings = Post.published.filter(Q(address__icontains=query) | Q(body__icontains=query))\n\n return render(request, 'listings/search.html', {'listings': listings, 'query': query})\n\nclass PostDashboardView(LoginRequiredMixin, ListView):\n model = Contact\n context_object_name = 'post_list_dashboard'\n query = Contact.objects.all()\n\n def get_context_data(self, **kwargs):\n context = super(PostDashboardView, self).get_context_data(**kwargs)\n context['choice_list'] = location_choices\n return context\n\n template_name = 'listings/post_dashboard.html'\n\n def get_queryset(self):\n return Contact.objects.filter(user_id=self.request.user.id)\n \ndef blog_category(request, category_slug):\n teams = Team.objects.all()\n categories = Category.objects.all()\n posts = Post.published.filter(status='published')\n if category_slug:\n category = get_object_or_404(Category, slug= category_slug)\n posts = Post.published.filter(category=category)\n \n context = {\n \"categories\": categories,\n \"posts\": posts,\n \"category\": category,\n \"teams\": teams,\n }\n \n return render(request, \"listings/categories.html\", context)\n\ndef listings(request, tag_slug=None):\n teams = Team.objects.all()\n posts = Post.published.all()\n tag = None\n \n if tag_slug:\n tag = get_object_or_404(Tag,slug=tag_slug)\n posts = posts.filter(tags__in=[tag])\n \n paginator = Paginator(posts,3)# 3 posts at single page\n page = request.GET.get('page')\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n posts = paginator.page(1)\n except EmptyPage:\n posts = paginator.page(paginator.num_pages)\n \n template = 'listings/listings.html'\n \n context = {\n 'page': page,\n 'posts': posts,\n 'teams': teams,\n 'tag': tag,\n }\n \n return render(request, template, context)\n\n\n\ndef listing(request,listing_id):\n teams = Team.objects.all()\n post = get_object_or_404(Post, pk=listing_id, status='published')\n\n template = 'listings/listing.html'\n\n data = {\n 'post': post,\n 'teams' : teams,\n #'comments': comments,\n #'comment_form': comment_form,\n #'post_tags_ids': post_tags_ids,\n #'similar_posts': similar_posts,\n }\n\n return render(request, template, data)\n\ndef post_share(request, post_id):\n teams = Team.objects.all()\n post = get_object_or_404(Post, id=post_id, status='published')\n sent = False\n\n if request.method == 'POST':\n form = EmailPostForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n # ... send data\n post_url = request.build_absolute_uri(post.get_absolute_url())\n subject = f\"{cd['name']} recommends you read\" f\"{post.title}\"\n message = f\"Read {post.title} at {post_url}\\n\\n\" f\"{cd['name']}\\'s comments: {cd['comments']}\"\n send_mail(subject,message,'sewemallonline@gmail.com', [cd['to']])\n sent = True\n else:\n form = EmailPostForm()\n \n template = 'listings/share.html'\n \n context = {\n 'post' : post,\n 'form' : form,\n 'sent' : sent,\n 'teams': teams,\n }\n \n return render(request, template, context)\n\n","sub_path":"listings/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"588748589","text":"#-*- coding: utf-8 -*-\nimport numpy as np\nimport itertools\nimport copy\nfrom math import factorial as fact\n\n\ndef bose(m, n):\n '''Возвращает базис для Бозе-статистики. m - число узлов, n - число частиц'''\n R = fact(n + m - 1)/fact(n)/fact(m - 1)\n b = np.zeros((R,m), dtype=np.int8)\n b[0,m-1] = n\n for i in range(R-1):\n j = m - 1\n while j > 0:\n if b[i,j] in range(2,n+1) :\n b[i+1,:] = b[i,:]\n b[i+1,j] = 0\n b[i+1,j-1] = b[i+1,j-1] + 1\n b[i+1,m-1] = b[i,j] - 1\n break\n elif b[i,j] > 0:\n b[i+1,:] = b[i,:]\n b[i+1,j-1] = b[i+1,j-1] + 1\n b[i+1,j] = b[i,j] - 1\n break\n j -= 1\n return b\n\ndef limit_basis(m, n, n_max):\n '''Возвращает базис для Бозе-статистики с ограничением числа частиц на узле. \n m - число узлов, n - число частиц, n_max - максимальное число частиц на узле.'''\n # Размерность базиса\n R = fact(n + m - 1)/fact(n)/fact(m - 1)\n b = bose(m,n)\n f = np.zeros((R,m), dtype=np.int8)\n j = 0\n # Откиддываем функции, в которых на узлах частиц больше n_max\n for i in range(b.shape[0]):\n if any(b[i] > n_max): \n continue\n else:\n f[j] = b[i]\n j += 1\n return f[:j]\n\ndef bose_unsave(m, n):\n '''Возвращает базис для Бозе статистики с несохраняющимся числом частиц.\n m - число узлов, n - максимальное число частиц на узле\n '''\n return np.array( map(list, itertools.product(range(n+1),repeat=m)) )\n\ndef fermi(m, n_up, n_down):\n '''Возвращает базис для Ферми-статистики с учетом спина.'''\n R = (fact(m)/fact(n_up)/fact(m-n_up))*(fact(m)/fact(n_down)/fact(m-n_down)) \n fs = np.zeros((R,2*m), dtype=np.int8)\n part_1 = limit_basis(m,n_up,1)\n if n_up == n_down:\n part_2 = copy.copy(part_1)\n else:\n part_2 = limit_basis(m,n_down,1)\n size_1, size_2 = part_1.shape[0], part_2.shape[0]\n for i in range(size_1):\n for j in range(size_2):\n fs[i*size_2+j] = np.concatenate((part_1[i],part_2[j]), axis=0)\n return fs\n\ndef full_basis_save(m_d, m_c, m_b, n_down, n_up, n_max):\n '''\n Возвращает базис с сохраняющимся числом частиц\n ----------------------------------\n md - число узлов в кластере, \n mc - число узлов в Ферми ванне, \n mb - число узлов в Бозе ванне,\n n_up - число частиц со спином вверх, n_down - число частиц со спином вниз,\n n_max - максимум частиц на узле в Бозе ванне\n '''\n mtx_1 = fermi(m_d+m_c, n_up,n_down)\n mtx_2 = bose_unsave(m_b,n_max)\n size_1, size_2 = mtx_1.shape[0], mtx_2.shape[0]\n fb = np.zeros((size_1*size_2,mtx_1.shape[1]+m_b),dtype=np.int8)\n for i in range(size_1):\n for j in range(size_2):\n fb[i*size_2+j] = np.concatenate((mtx_1[i],mtx_2[j]), axis=0)\n return fb","sub_path":"ED_holstein/basis.py","file_name":"basis.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"123150951","text":"KEY = \"your-key-here\"\n\nREGIONS = {\n 'north_america':'na',\n 'eu_nordic_east':'eune',\n 'eu_west':'euw',\n 'korea':'kr',\n 'russia':'ru',\n 'oceanic':'oce',\n 'brazil':'br'\n}\n\nVERSIONS = {\n 'api-challenge':'4.1',\n 'static-data':'1.2',\n 'match':'2.2'\n}","sub_path":"const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"420096608","text":"import unittest\nfrom question1 import Array\n\nclass TestArray(unittest.TestCase):\n # question a -- Testing if length is strictly integer\n def testLength(self):\n data = [2, 7, 9, 10, 5]\n forTest = Array.length(data)\n self.assertEqual(forTest, 5)\n\n # question b -- Testing if the index is a number\n def testIndex(self):\n data = [2, 7, 9, 10, 5]\n forTest = Array.index(data)\n self.assertNotEqual(forTest, \"dog\")\n\n # Testing question c -- Testing if the position is correct\n def testReplace(self):\n data = [2, 7, 9, 10, 5]\n forTest = Array.replace(data)\n self.assertNotEqual(forTest, 23)\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"testquestion1.py","file_name":"testquestion1.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"197193914","text":"class Solution:\n def findMinDifference(self, timePoints: List[str]) -> int:\n result = []\n min_diff = math.inf\n for time in timePoints:\n h, m = time.split(\":\")\n min_ = int(h)*60 + int(m)\n result.append(min_)\n\n result.sort()\n result.append(result[0])\n\n for i in range(1,len(result)):\n diff = abs(result[i] - result[i-1])\n min_diff = min(diff, min_diff, 24*60-diff)\n\n return min_diff\n","sub_path":"mintimedifference.py","file_name":"mintimedifference.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"107987834","text":"import pygame\nfrom settings import Settings\nimport functions as fs\nfrom characters import Troll\n\ndef run_game():\n pygame.init()\n\n settings = Settings()\n screen = pygame.display.set_mode((\n settings.screen_width, settings.screen_height\n ))\n\n pygame.display.set_caption('Trollz')\n troll = Troll(screen)\n while True:\n fs.check_events()\n fs.update_screen(settings, screen, troll)\n\nrun_game()","sub_path":"Chapter 12 - Pygame/Chapter 12 - Exercises/12-2 Game character/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"534342934","text":"import math\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom nmt.model.common import clones\r\n\r\ndef attention(query, key, value, mask=None, dropout=None):\r\n \"\"\"\r\n Compute 'Scaled Dot Product Attention'\r\n :param query:\r\n :param key:\r\n :param value:\r\n :param mask:\r\n :param dropout:\r\n :return:\r\n \"\"\"\r\n d_k = query.size(-1)\r\n scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)\r\n if mask is not None:\r\n scores = scores.masked_fill(mask == 0, -1e9)\r\n p_attn = F.softmax(scores, dim=-1)\r\n if dropout is not None:\r\n p_attn = dropout(p_attn)\r\n return torch.matmul(p_attn, value), p_attn\r\n\r\nclass MultiHeadAttentionWithMetrics(nn.Module):\r\n\r\n def __init__(self, ctx, heads_count, d_model, dropout_prob=0.1, mode='self-attention'):\r\n super(MultiHeadAttentionWithMetrics, self).__init__()\r\n assert d_model % heads_count == 0\r\n assert mode in ('self-attention', 'memory-attention')\r\n self.context = ctx\r\n self.d_head = d_model // heads_count\r\n self.heads_count = heads_count\r\n self.mode = mode\r\n self.query_projection = nn.Linear(d_model, heads_count * self.d_head)\r\n self.key_projection = nn.Linear(d_model, heads_count * self.d_head)\r\n self.value_projection = nn.Linear(d_model, heads_count * self.d_head)\r\n self.final_projection = nn.Linear(d_model, heads_count * self.d_head)\r\n self.dropout = nn.Dropout(dropout_prob)\r\n self.softmax = nn.Softmax(dim=3)\r\n\r\n self.attention = None\r\n # For cache\r\n self.key_projected = None\r\n self.value_projected = None\r\n\r\n def forward(self, query, key, value, mask=None, layer_cache=None):\r\n # self.context.logger.debug('[%s] attention mask %s', self.__class__.__name__, mask)\r\n batch_size, query_len, d_model = query.size()\r\n\r\n d_head = d_model // self.heads_count\r\n\r\n query_projected = self.query_projection(query)\r\n # self.context.logger.debug('[%s] query_projected %s', self.__class__.__name__, query_projected.shape)\r\n\r\n if layer_cache is None or layer_cache[self.mode] is None: # Don't use cache\r\n key_projected = self.key_projection(key)\r\n value_projected = self.value_projection(value)\r\n else: # Use cache\r\n if self.mode == 'self-attention':\r\n key_projected = self.key_projection(key)\r\n value_projected = self.value_projection(value)\r\n\r\n key_projected = torch.cat([key_projected, layer_cache[self.mode]['key_projected']], dim=1)\r\n value_projected = torch.cat([value_projected, layer_cache[self.mode]['value_projected']], dim=1)\r\n elif self.mode == 'memory-attention':\r\n key_projected = layer_cache[self.mode]['key_projected']\r\n value_projected = layer_cache[self.mode]['value_projected']\r\n\r\n # For cache\r\n self.key_projected = key_projected\r\n self.value_projected = value_projected\r\n\r\n batch_size, key_len, d_model = key_projected.size()\r\n batch_size, value_len, d_model = value_projected.size()\r\n\r\n # (batch_size, heads_count, query_len, d_head)\r\n query_heads = query_projected.view(batch_size, query_len, self.heads_count, d_head).transpose(1, 2)\r\n # print('query_heads', query_heads.shape)\r\n # print(batch_size, key_len, self.heads_count, d_head)\r\n # print(key_projected.shape)\r\n # (batch_size, heads_count, key_len, d_head)\r\n key_heads = key_projected.view(batch_size, key_len, self.heads_count, d_head).transpose(1, 2)\r\n\r\n # (batch_size, heads_count, value_len, d_head)\r\n value_heads = value_projected.view(batch_size, value_len, self.heads_count, d_head).transpose(1, 2)\r\n\r\n # (batch_size, heads_count, query_len, key_len)\r\n attention_weights = self.scaled_dot_product(query_heads, key_heads)\r\n\r\n if mask is not None:\r\n # print('mode', self.mode)\r\n # print('mask', mask.shape)\r\n # print('attention_weights', attention_weights.shape)\r\n mask_expanded = mask.unsqueeze(1).expand_as(attention_weights)\r\n attention_weights = attention_weights.masked_fill(mask_expanded, -1e18)\r\n\r\n self.attention = self.softmax(attention_weights) # Save attention to the object\r\n # print('attention_weights', attention_weights.shape)\r\n attention_dropped = self.dropout(self.attention)\r\n context_heads = torch.matmul(attention_dropped, value_heads) # (batch_size, heads_count, query_len, d_head)\r\n # print('context_heads', context_heads.shape)\r\n context_sequence = context_heads.transpose(1, 2).contiguous() # (batch_size, query_len, heads_count, d_head)\r\n context = context_sequence.view(batch_size, query_len, d_model) # (batch_size, query_len, d_model)\r\n final_output = self.final_projection(context)\r\n # print('final_output', final_output.shape)\r\n # self.context.logger.debug(\"[%s] The query %s, key %s, value %s, final_output %s dimension\",\r\n # self.__class__.__name__, query.size(), key.size(), value.size(), final_output.size())\r\n return final_output\r\n\r\n def scaled_dot_product(self, query_heads, key_heads):\r\n \"\"\"\r\n\r\n Args:\r\n query_heads: (batch_size, heads_count, query_len, d_head)\r\n key_heads: (batch_size, heads_count, key_len, d_head)\r\n \"\"\"\r\n key_heads_transposed = key_heads.transpose(2, 3)\r\n dot_product = torch.matmul(query_heads, key_heads_transposed) # (batch_size, heads_count, query_len, key_len)\r\n attention_weights = dot_product / np.sqrt(self.d_head)\r\n return attention_weights\r\n\r\nclass MultiHeadedAttention(nn.Module):\r\n def __init__(self, ctx, h, d_model, dropout=0.1):\r\n \"\"\"Take in model size and number of heads.\"\"\"\r\n super(MultiHeadedAttention, self).__init__()\r\n assert d_model % h == 0\r\n self.context = ctx\r\n # We assume d_v always equals d_k\r\n self.d_k = d_model // h\r\n self.h = h\r\n self.linears = clones(nn.Linear(d_model, d_model), 4)\r\n self.attention = None\r\n self.dropout = nn.Dropout(p=dropout)\r\n\r\n def forward(self, query, key, value, mask=None):\r\n if mask is not None:\r\n # Same mask applied to all h heads.\r\n mask = mask.unsqueeze(1)\r\n nbatches = query.size(0)\r\n\r\n # 1) Do all the linear projections in batch from d_model => h x d_k\r\n query, key, value = [ln(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)\r\n for ln, x in zip(self.linears, (query, key, value))]\r\n\r\n # 2) Apply attention on all the projected vectors in batch.\r\n x, self.attention = attention(query, key, value, mask=mask, dropout=self.dropout)\r\n\r\n # 3) \"Concat\" using a view and apply a final linear.\r\n x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)\r\n # [MultiHeadedAttention] The query torch.Size([10, 8, 34, 16]),\r\n # key torch.Size([10, 8, 34, 16]),\r\n # value torch.Size([10, 8, 34, 16]),\r\n # output torch.Size([10, 34, 128])\r\n # self.context.logger.debug(\"[%s] The query %s, key %s, value %s, output %s\", self.__class__.__name__,\r\n # query.size(), key.size(), value.size(), x.size())\r\n\r\n return self.linears[-1](x)\r\n","sub_path":"nmt/model/attention.py","file_name":"attention.py","file_ext":"py","file_size_in_byte":7575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"163943157","text":"#!/usr/bin/env python\n\n'''\nThis script should return a set of functions corresponding to a tensorized\nGLL basis on a 2D quad. Order can be specified. The numbering goes like this \n\n\t _______________________\n\t|\t\t\t\t\t\t|\n\t|\tnEta\t2*nEta\t\t|\n\t|\t\t\t\t\t\t|\n\t|\t.\t\t .\t\t\t|\n\t|\t.\t\t .\t\t..\t|\n\t|\t\t\t\t\t\t|\n\t|\t2\t\tnEta+2\t\t|\n\t|\t\t\t\t\t\t|\n\t|\t1\t\tnEta+1\t\t|\t\n\t|_______________________|\n\n\t(eta)\n\t^\n\t|\n\t|\n\t|________> (epsilon)\n\n'''\n\nimport argparse\nimport sympy as sym\n\nfrom sympy.physics.quantum import TensorProduct\nfrom sympy.utilities.codegen import CCodeGen, Argument\n\ndef _genOrdering(nPtsPvtx, nPtsPedg, nPtsPfac):\n\n\t# Current numbering\n\tvtxs = [0, nPtsPedg+1, (nPtsPedg+1)*(nPtsPedg+2), (nPtsPedg+1)*(nPtsPedg+3)]\n\tedgs = [range(1,nPtsPedg+1)[::-1], \n\t\t\trange(nPtsPedg+2, (nPtsPedg+1)*(nPtsPedg+2), nPtsPedg+2),\n\t\t\trange(2*(nPtsPedg+2)-1, 4*nPtsPvtx+4*nPtsPedg+nPtsPfac-1, nPtsPedg+2)[::-1],\n\t\t\trange((nPtsPedg+1)*(nPtsPedg+2)+1, 4*nPtsPvtx+4*nPtsPedg+nPtsPfac-1)]\n\tflat_edgs = [x for sublist in edgs for x in sublist]\n\tfacs = [i for i in range(4*nPtsPvtx+4*nPtsPedg+nPtsPfac) if i not in flat_edgs and i not in vtxs]\n\n\t# Reorder\n\tedgs = [edgs[3], edgs[2], edgs[0], edgs[1]]\n\tflat_edgs = [x for sublist in edgs for x in sublist]\n\tvtxs = [vtxs[2], vtxs[3], vtxs[1], vtxs[0]]\n\n\treturn facs, vtxs, flat_edgs\n\ndef getMap(nPtsPvtx, nPtsPedg, nPtsPfac):\n\n\tfacs, vtxs, flat_edgs = _genOrdering(nPtsPvtx, nPtsPedg, nPtsPfac)\n\tm = facs\n\tm.extend(flat_edgs)\n\tm.extend(vtxs)\n\n\treturn m\n\ndef renumber(v, nPtsPvtx, nPtsPedg, nPtsPfac):\n\n\tfacs, vtxs, flat_edgs = _genOrdering(nPtsPvtx, nPtsPedg, nPtsPfac)\n\n\tvOrder = [v[i] for i in facs]\n\tvOrder.extend(v[i] for i in vtxs)\n\tvOrder.extend(v[i] for i in flat_edgs)\n\n\treturn sym.Matrix(vOrder)\n\ndef legendre_polynomials(order, x_all, direction):\n '''\n Returns a list of the N+1 Nth order lagrange polynomials.\n '''\n # Symbols\n x, Phi, d_Phi = sym.symbols('%s Phi dPhi' % (direction))\n\n # Equation A.19.\n Phi = 1\n for c in x_all:\n Phi *= (x - c)\n\n # Take derivative and substitute xi.\n d_Phi = sym.diff(Phi, x)\n\n l = []\n for xi in x_all:\n\n # Equation A.20\n d_Phi_sub = d_Phi.subs(x, xi)\n ll = (1 / (x - xi)) * (Phi / d_Phi_sub)\n l.append(ll)\n\n return l\n\ndef parseArguments():\n\n\tparser = argparse.ArgumentParser(\n\t\tdescription=\"Write out c functions for Salvus.\")\n\tparser.add_argument(\n\t\t\"--order\", help=\"polynomial order\", metavar='', required=True,\n\t\ttype=int)\n\tparser.add_argument(\n\t\t\"--dimension\", help=\"problem dimension\", metavar='', required=True,\n\t\ttype=int)\n\treturn parser.parse_args()\n\nif __name__ == '__main__':\n\n\targs = parseArguments()\n\tnPtsPdim = (args.order + 1)\n\tnPtsPelm = (args.order + 1)**args.dimension\n\n\t# For re-mapping tensor product to Petsc numbering\n\tnPtsPvtx = 1\n\tnPtsPedg = (args.order - 1)\n\tnPtsPfac = (args.order - 1)**args.dimension\n\n\t# Setup symbolic variables\n\txAll = sym.symbols(\"x_0:%d\" % (nPtsPdim))\n\tzAll = sym.symbols(\"z_0:%d\" % (nPtsPdim))\n\tepsAll = sym.symbols(\"epsilon_0:%d\" % (nPtsPdim))\n\tetaAll = sym.symbols(\"eta_0:%d\" % (nPtsPdim))\n\twEpsAll = sym.symbols(\"epsilon_w0:%d\" % (nPtsPdim))\n\twEtaAll = sym.symbols(\"eta_w0:%d\" % (nPtsPdim))\n\tepsilon, eta = sym.symbols('epsilon, eta')\n \t\n\t# Setup elastic properties in voigt notation\n\tc11, c13, c15, c33, c35, c55, rho = sym.symbols(\n\t\t'c_11, c_13, c_15, c_33, c_35, c_55, rho')\n\tC = sym.Matrix([[c11, c13, c15],\n\t\t\t\t\t[c13, c33, c35],\n\t\t\t\t\t[c15, c35, c55]])\n\n\t# Take tensor product of basis functions (pack into row vector)\n\tnEps = sym.Matrix(legendre_polynomials(args.order, epsAll, 'epsilon'))\n\tnEta = sym.Matrix(legendre_polynomials(args.order, etaAll, 'eta'))\n\tn = TensorProduct(nEps, nEta).T\n\n\t# Get gradient of basis functions\n\tgn = sym.Matrix(([sym.diff(i, eta) for i in n])).T\n\tsym.pprint(gn)\n\n\t# Mass matrix\n\tmm = (rho * n.T * n)\n\tmm = sym.Matrix([mm[i,i] for i in range(nPtsPelm)])\n\n\t# Get mapping\n\tm = sym.Matrix(getMap(nPtsPvtx, nPtsPedg, nPtsPfac))\n\n\t# Write code\n\troutines = []\n\tsem_c = CCodeGen()\n\troutines.append(sem_c.routine(\n\t\t'interpolateOrder%dDim%d' % (args.order, args.dimension), n,\n\t\targument_sequence=None))\n\troutines.append(sem_c.routine(\n 'massMatrixOrder%dDim%d' % (args.order, args.dimension), mm, \n argument_sequence=None))\n\troutines.append(sem_c.routine(\n 'gradientOperatorOrder%dDim%d' % (args.order, args.dimension), gn, \n argument_sequence=None))\n\troutines.append(sem_c.routine(\n 'mappingOrder%dDim%d' % (args.order, args.dimension), m, \n argument_sequence=None))\t\n\tsem_c.write(\n\t\troutines, 'auto/order%dDim%d' % (args.order, args.dimension), \n\t\tto_files=True)\n","sub_path":"pybind/functionSpace.py","file_name":"functionSpace.py","file_ext":"py","file_size_in_byte":4594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"632677606","text":"import pytest\nfrom fastapi.testclient import TestClient\n\nfrom server.api.dependencies import get_db\nfrom server.main import app\n\nfrom .settings.database import get_test_db, mongo_client\n\n\n@pytest.fixture(scope='session', autouse=True)\ndef override_test_db():\n app.dependency_overrides[get_db] = get_test_db\n yield\n\n\n@pytest.fixture(scope='module', autouse=True)\ndef reset_db():\n yield\n\n test_db = mongo_client.get_database()\n\n test_db.drop_collection('users')\n test_db.drop_collection('stocks')\n\n\n@pytest.fixture(scope='session')\ndef client() -> TestClient:\n test_client = TestClient(app=app)\n return test_client\n\n\n@pytest.fixture(scope='module')\ndef user_data() -> dict:\n return {\n 'username': 'test_user',\n 'password': 'password123',\n 'email': 'test@email.com',\n }\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"509147249","text":"from scapy.all import *\n\nhost='www.dvwa.co.uk'\n\nip = socket.gethostbyname(host)\n\nopenp=[]\nfilterdp=[]\n\ncommon_ports={21,22,23,25,53,69,80,88,109,110,123,137,138,139,143,156,161,389,443,445,500,546,\n 547,587,660,995,993,2086,2087,2082,2083,3306,8443,10000}\n\ndef is_up(ip):\n icmp = IP(dst=ip)/ICMP()\n resp=sr1(icmp,timeout=10)\n if resp=='None':\n return False\n else:\n return True\n\n\ndef probe_port(ip,port,result=1):\n src_port=RandShort()\n try:\n p=IP(dst=ip)/TCP(sport=src_port,dport=port,flags='F')\n resp=sr1(p,timeout=2)\n if str(type(resp)) ==\"\":\n result=1\n elif resp.haslayer(TCP):\n if resp.getlayer(TCP).flags==0x14:\n result = 0\n elif (int(resp.getlayer(ICMP).type)==3 and int(resp.getlayer(ICMP).code) in [1,2,3,9,10,13]):\n result =2\n except Exception as e:\n pass\n return result\n\n\nif is_up(ip):\n for port in common_ports:\n print (port)\n response = probe_port(ip,port)\n if response ==1:\n openp.append(port)\n elif response ==2:\n filterdp.append(port)\n if len(openp)!=0:\n print (\"possible open or filtered ports:\")\n print (openp)\n if len(filterdp) !=0:\n print (\"possible filtered ports:\")\n print (filterdp)\n if (len(openp)==0) and (len(filterdp)==0):\n print (\"sorry no open ports found\")\nelse:\n print(\"Host is down\")\n","sub_path":"syn-scanner.py","file_name":"syn-scanner.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"147384123","text":"# -*- coding: utf-8 -*-\n#############################################################################\n# reading output file\n#############################################################################\n\nfrom __future__ import print_function #Python 2.7 compatibility\nimport sys\nimport os\nfrom scipy.interpolate import griddata\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nimport numpy as np\nfrom matplotlib.ticker import NullFormatter\n\n# open file: FILIN = '../plotxy_scatter.dat' \nFILIN = '../plotxy_scatter.dat'#sys.argv[1]\n\n\n\n\n#### \nleft, width = 0.1, 0.65\nbottom, height = 0.1, 0.65\nbottom_h = left_h = left + width + 0.02\n\nrect_scatter = [left, bottom, width, height]\nrect_histx = [left, bottom_h, width, 0.2]\nrect_histy = [left_h, bottom, 0.2, height]\n\ndiam=30 # width of the uniform function\n\n\n#### back to xph\n\nf = open(FILIN, 'r')\n \nheader = {}\n#for i in range(0, 100):\nflag = 1\ni = 0\nX = [] # scatter X coordinate\nY = [] # scatter Y coordinate\nW = [] # scatter weight \nwhile True:\n linea = f.readline().strip()\n if linea == '':\n break\n elif linea[0] == '#':\n print('skip header ...')\n else:\n X.append(float(linea.split()[0]))\n Y.append(float(linea.split()[1]))\n W.append(float(linea.split()[2]))\n\nf.close()\n\nprint(len(X))\n############################################################################fig, ax = plt.subplots(nrows=1, ncols=1)\n############################################################################cpf = ax.scatter(X,Y,s=W,alpha=0.5) \n\n\n\n# Reversed Greys colourmap for filled contours\n#fig, ax = plt.subplots(nrows=1, ncols=1)\n#cpf = ax.contourf(X,Y,W, 40, cmap=cm.hot)\n#plt.colorbar(cpf)\n\n#################################################################################plt.show()\n\n# https://scipython.com/book/chapter-7-matplotlib/examples/simple-surface-plots/\n# fig, ax = plt.subplots(nrows=1, ncols=1, subplot_kw={'projection': '3d'})\n# ax.plot_surface(X,Y,A, rstride=20, cstride=20, cmap=cm.hot)\n\n\n#if 1 == 0:\n# Set the colours of the contours and labels so they're white where the\n# contour fill is dark (Z < 0) and black where it's light (Z >= 0)\n# colours = ['w' if level<0 else 'k' for level in cpf.levels]\n# cp = ax.contour(X, Y, Aresc, 5, colors=colours)\n\n# ax.clabel(cp, fontsize=10, colors=colours)\n\n# start with a rectangular Figure\n\nNrays = len(X)\nW = 0.04 # FWHM in cm\nt0x = Nrays * binwidth / W\n\naveX = np.average(X)\nstdX = np.std(X)\naveY = np.average(Y)\nstdY = np.std(Y)\n\nx=X\ny=Y\nnullfmt = NullFormatter() # no labels\nplt.figure(1, figsize=(8, 8))\n\naxScatter = plt.axes(rect_scatter)\naxHistx = plt.axes(rect_histx)\naxHisty = plt.axes(rect_histy)\nprint(axHistx)\n# no labels\naxHistx.xaxis.set_major_formatter(nullfmt)\naxHisty.yaxis.set_major_formatter(nullfmt)\n\n# the scatter plot:\naxScatter.scatter(x, y,s=W,alpha=0.5)\n\n# now determine nice limits by hand:\nbinwidth = 0.0008\nxymax = np.max([np.max(np.fabs(x)), np.max(np.fabs(y))])\nlim = (int(xymax/binwidth) + 1) * binwidth\nprint(lim)\naxScatter.set_xlim((-lim, lim))\naxScatter.set_ylim((-lim, lim))\n\nbins = np.arange(-lim, lim + binwidth, binwidth)\nprint(len(bins))\naxHistx.hist(x, bins=bins)\naxHisty.hist(y, bins=bins, orientation='horizontal')\nprint(x)\naxHistx.set_xlim(axScatter.get_xlim())\naxHisty.set_ylim(axScatter.get_ylim())\n\nplt.show()\n\n\n\n\n\n","sub_path":"e2s_SHADOW/ANALYSIS/ana_intensity_fb2.py","file_name":"ana_intensity_fb2.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"341362773","text":"from django.test import TestCase\nfrom django.urls import reverse\n\nfrom hordak.forms.accounts import AccountForm\nfrom hordak.models import Account\nfrom hordak.tests.utils import DataProvider\n\n\nclass AccountListViewTestCase(DataProvider, TestCase):\n def setUp(self):\n self.view_url = reverse(\"hordak:accounts_list\")\n self.login()\n\n self.bank_account = self.account(is_bank_account=True, type=Account.TYPES.asset)\n self.income_account = self.account(is_bank_account=False, type=Account.TYPES.income)\n\n def test_get(self):\n response = self.client.get(self.view_url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.context[\"accounts\"].count(), 2)\n\n\nclass AccountCreateViewTestCase(DataProvider, TestCase):\n def setUp(self):\n self.view_url = reverse(\"hordak:accounts_create\")\n self.login()\n\n def test_get(self):\n response = self.client.get(self.view_url)\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"form\", response.context)\n self.assertEqual(response.context[\"form\"][\"code\"].initial, \"01\")\n\n def test_post(self):\n response = self.client.post(\n self.view_url,\n data=dict(\n name=\"Test Account\", code=\"01\", type=\"IN\", is_bank_account=\"\", currencies=\"EUR, GBP\"\n ),\n )\n if response.context:\n self.assertFalse(response.context[\"form\"].errors)\n account = Account.objects.get()\n self.assertEqual(account.name, \"Test Account\")\n self.assertEqual(account.code, \"01\")\n self.assertEqual(account.type, Account.TYPES.income)\n self.assertEqual(account.is_bank_account, False)\n self.assertEqual(account.currencies, [\"EUR\", \"GBP\"])\n\n def test_bank_account_not_asset_account(self):\n \"\"\"Bank accounts must be asset accounts\"\"\"\n form = AccountForm(\n data=dict(\n name=\"Test Account\", code=\"01\", type=\"IN\", is_bank_account=\"yes\", currencies=\"GBP\"\n )\n )\n self.assertFalse(form.is_valid())\n error = form.errors[\"__all__\"][0].lower()\n self.assertIn(\"bank account\", error)\n self.assertIn(\"asset\", error)\n\n def test_bank_account_single_currency(self):\n \"\"\"Bank accounts may only have one currency\"\"\"\n form = AccountForm(\n data=dict(\n name=\"Test Account\",\n code=\"01\",\n type=\"AS\",\n is_bank_account=\"yes\",\n currencies=\"EUR, GBP\",\n )\n )\n self.assertFalse(form.is_valid())\n error = form.errors[\"__all__\"][0].lower()\n self.assertIn(\"bank account\", error)\n self.assertIn(\"currency\", error)\n\n def test_post_no_code(self):\n response = self.client.post(\n self.view_url,\n data=dict(\n name=\"Test Account\", code=\"\", type=\"IN\", is_bank_account=\"\", currencies=\"EUR, GBP\"\n ),\n )\n if response.context:\n self.assertFalse(response.context[\"form\"].errors)\n account = Account.objects.get()\n self.assertEqual(account.code, None)\n self.assertEqual(account.full_code, None)\n\n\nclass AccountUpdateViewTestCase(DataProvider, TestCase):\n def setUp(self):\n self.account1 = self.account(\n code=\"01\", currencies=[\"USD\"], type=Account.TYPES.expense, is_bank_account=False\n )\n self.view_url = reverse(\"hordak:accounts_update\", args=[self.account1.uuid])\n self.login()\n\n def test_get(self):\n response = self.client.get(self.view_url)\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"form\", response.context)\n self.assertEqual(response.context[\"form\"][\"code\"].value(), \"01\")\n\n def test_post(self):\n response = self.client.post(\n self.view_url,\n data=dict(\n name=\"My Account\",\n code=\"04\",\n type=\"LI\",\n is_bank_account=\"yes\",\n currencies=\"EUR, GBP\",\n ),\n )\n if response.context:\n self.assertFalse(response.context[\"form\"].errors)\n\n self.account1.refresh_from_db()\n self.assertEqual(self.account1.name, \"My Account\")\n self.assertEqual(self.account1.code, \"04\")\n self.assertEqual(self.account1.type, Account.TYPES.expense) # Not editable, so unchanged\n self.assertEqual(self.account1.is_bank_account, False) # Not editable, so unchanged\n self.assertEqual(self.account1.currencies, [\"USD\"]) # Not editable, so unchanged\n\n def test_post_no_code(self):\n response = self.client.post(\n self.view_url,\n data=dict(\n name=\"My Account\", code=\"\", type=\"LI\", is_bank_account=\"yes\", currencies=\"EUR, GBP\"\n ),\n )\n if response.context:\n self.assertFalse(response.context[\"form\"].errors)\n\n self.account1.refresh_from_db()\n self.assertEqual(self.account1.code, None)\n self.assertEqual(self.account1.full_code, None)\n","sub_path":"hordak/tests/views/test_accounts.py","file_name":"test_accounts.py","file_ext":"py","file_size_in_byte":5115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"201327641","text":"\"\"\"Ingestion of FAO data to Walden & Catalog.\n\nExample usage:\n\n```\npoetry run python -m ingests.faostat\n```\n\n\"\"\"\n\n\nimport datetime as dt\nimport tempfile\n\nimport requests\nimport click\n\nfrom owid.walden import files, add_to_catalog\n\n\nINCLUDED_DATASETS = [\n \"Food Security and Nutrition: Suite of Food Security Indicators\", # FS\n \"Production: Crops and livestock products\", # QCL\n]\n\n\nclass FAODataset:\n namespace: str = \"faostat\"\n url: str = \"http://www.fao.org/faostat/en/#data\"\n source_name: str = \"Food and Agriculture Organization of the United Nations\"\n _extra_metadata = {}\n\n def __init__(self, dataset_metadata: dict):\n \"\"\"[summary]\n\n Args:\n dataset_metadata (dict): Dataset raw metadata.\n catalog_dir (str): walden project local directory (clone project from https://github.com/owid/walden).\n \"\"\"\n self._dataset_metadata = dataset_metadata\n\n @property\n def publication_year(self):\n return dt.datetime.strptime(\n self._dataset_metadata[\"DateUpdate\"], \"%Y-%m-%d\"\n ).strftime(\"%Y\")\n\n @property\n def short_name(self):\n return f\"{self.namespace}_{self._dataset_metadata['DatasetCode']}\"\n\n @property\n def source_data_url(self):\n return self._dataset_metadata[\"FileLocation\"]\n\n @property\n def metadata(self):\n \"\"\"\n Walden-compatible view of this dataset's metadata.\n\n Required by the dataset index catalog (more info at https://github.com/owid/walden).\n \"\"\"\n return {\n \"namespace\": self.namespace,\n \"short_name\": f\"{self.namespace}_{self._dataset_metadata['DatasetCode']}\",\n \"name\": f\"{self._dataset_metadata['DatasetName']} - FAO ({self.publication_year})\",\n \"description\": self._dataset_metadata[\"DatasetDescription\"],\n \"source_name\": \"Food and Agriculture Organization of the United Nations\",\n \"publication_year\": int(self.publication_year),\n \"publication_date\": self._dataset_metadata[\"DateUpdate\"],\n \"date_accessed\": str(dt.date.today()),\n \"url\": self.url,\n \"source_data_url\": self.source_data_url,\n \"file_extension\": \"zip\",\n }\n\n def to_walden(self):\n \"\"\"\n Run faostat -> walden pipeline.\n\n Downloads the dataset from source, uploads it to Walden (DO/S3), creates the corresponding metadata file and\n places it in the walden local project repository.\n \"\"\"\n with tempfile.NamedTemporaryFile() as f:\n # fetch the file locally\n files.download(self.source_data_url, f.name)\n\n # add it to walden, both locally, and to our remote file cache\n add_to_catalog(self.metadata, f.name, upload=True)\n\n\ndef load_faostat_catalog():\n url_datasets = (\n \"http://fenixservices.fao.org/faostat/static/bulkdownloads/datasets_E.json\"\n )\n datasets = requests.get(url_datasets).json()[\"Datasets\"][\"Dataset\"]\n return datasets\n\n\n@click.command()\ndef main():\n faostat_catalog = load_faostat_catalog()\n for description in faostat_catalog:\n # Build FAODataset instance\n if description[\"DatasetName\"] in INCLUDED_DATASETS:\n faostat_dataset = FAODataset(description)\n # Run download pipeline\n faostat_dataset.to_walden()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ingests/faostat.py","file_name":"faostat.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"245871813","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\n\nfrom include.likertScalePlot import compute_percentage\n\n\ndef get_answer(file_answer):\n \"\"\"\n \"\"\"\n with open(file_answer, 'r') as f:\n return [x[:-1] for x in f.readlines()]\n\n\ndef count_choice(df, colnames, rename_columns=True,\n dropna=False, normalize=False,\n multiple_choice=False, sort_values=False):\n \"\"\"\n Count the values of different columns and transpose the count\n :params:\n :df pd.df(): dataframe containing the data\n :colnames list(): list of strings corresponding to the column header to select the right column\n :return:\n :result_df pd.df(): dataframe with the count of each answer for each columns\n \"\"\"\n df_sub = df[colnames]\n\n if rename_columns is True and multiple_choice is True:\n df_sub.columns = [s.split('[')[2][:-1] for s in colnames]\n\n if multiple_choice is True:\n df_sub = df_sub.fillna(value='No')\n\n df_sub = df_sub.apply(pd.Series.value_counts, dropna=dropna, normalize=normalize)\n\n if multiple_choice is True:\n df_sub.fillna(value=0, inplace=True)\n df_sub = df_sub.astype(int)\n df_sub = df_sub.ix['Yes']\n df_sub = df_sub.to_frame()\n df_sub.columns = ['Count']\n\n # Sorting with nan at the end, the in-built function is not working do not know why\n df_sub.sort_values(by=df_sub.columns[0], axis=0, ascending=False, inplace=True, na_position='last')\n # So implemented this dirty hack. If someone wants to fix, please do\n index_wo_nan = list()\n nan_value = False\n for x in df_sub.index:\n if pd.isnull(x):\n nan_value = True\n else:\n index_wo_nan.append(x)\n if nan_value:\n index_wo_nan.append(np.nan)\n\n df_sub = df_sub.reindex(index=index_wo_nan)\n return df_sub\n\n\ndef count_yn(df, colnames, multiple=False, normalize=False, dropna=False, sort_values=False):\n \"\"\"\n \"\"\"\n if multiple is True:\n df_sub = df[colnames]\n else:\n df_sub = df[colnames].to_frame(name=colnames)\n\n df_sub = df_sub.apply(pd.Series.value_counts,\n dropna=dropna,\n normalize=normalize)\n if sort_values is True:\n df_sub.sort_values(ascending=True, inplace=True, na_position='last')\n\n # Transpose the column to row to be able to plot a stacked bar chart\n df_sub = df_sub.transpose()\n if dropna is True:\n df_sub = df_sub[['Yes', 'No']]\n else:\n try:\n df_sub = df_sub[['Yes', 'No', np.nan]]\n except KeyError:\n df_sub[np.nan] = 0\n df_sub = df_sub[['Yes', 'No']]\n return df_sub\n\n\ndef count_likert(df, colnames, likert_answer, rename_columns=True, dropna=True, normalize=False):\n \"\"\"\n Count the values of different columns and transpose the count\n :params:\n :df pd.df(): dataframe containing the data\n :colnames list(): list of strings corresponding to the column header to select the right column\n :return:\n :result_df pd.df(): dataframe with the count of each answer for each columns\n \"\"\"\n # Subset the columns\n df_sub = df[colnames]\n\n if rename_columns is True:\n df_sub.columns = [s.split('[')[2][:-1] for s in colnames]\n\n # Calculate the counts for them\n df_sub = df_sub.apply(pd.Series.value_counts, dropna=dropna, normalize=normalize)\n if likert_answer:\n likert_answer = [x for x in likert_answer if x in df_sub.index]\n df_sub = df_sub.reindex(index=likert_answer)\n # Transpose the column to row to be able to plot a stacked bar chart\n return df_sub.transpose()\n\n\ndef get_percentage(df):\n \"\"\"\n Normalise results to be plotted\n \"\"\"\n if len(df.columns) > 1 and len(df.index) > 1:\n value = compute_percentage(df, by_row=True, by_col=False)\n else:\n value = compute_percentage(df, by_row=True, by_col=True)\n\n # Add [Percent] to the end of the column name to distinc the two datasets\n index_df = df.index\n name_df = df.columns\n if len(name_df) == 1:\n name_df = [\"{} [PERCENTAGE]\".format(x) for x in df.columns]\n if len(index_df) == 1:\n index_df = [\"{} [PERCENTAGE]\".format(x) for x in df.index]\n percent = pd.DataFrame(value, columns=name_df)\n percent.index = index_df\n return percent\n\n\ndef get_count(df, questions, type_question, file_answer):\n \"\"\"\n Choose which type of counting needs to be done\n\n :params:\n df dataframe(): dataframe containing all the data\n questions list(): list of the question strings to\n type_questions str(): type of questions that list_questions represent\n\n :return:\n \"\"\"\n if type_question.lower() == 'y/n/na':\n if len(questions) == 1:\n questions = questions[0]\n multiple = False\n else:\n multiple = True\n count = count_yn(df, questions, multiple=multiple, dropna=False)\n return count\n\n elif type_question.lower() == 'one choice':\n return count_choice(df, questions, multiple_choice=False)\n\n elif type_question.lower() == 'multiple choices':\n return count_choice(df, questions, multiple_choice=True)\n\n elif type_question.lower() == 'likert':\n likert_answer = get_answer(file_answer)\n if len(questions) == 1:\n rename_columns = False\n else:\n rename_columns = True\n return count_likert(df, questions, likert_answer, rename_columns)\n\n elif type_question.lower() == 'ranking':\n pass\n\n elif type_question.lower() == 'freetext':\n pass\n\n elif type_question.lower() == 'freenumeric':\n pass\n\n elif type_question.lower() == 'datetime':\n pass\n\n else:\n pass\n","sub_path":"analysis/include/counting.py","file_name":"counting.py","file_ext":"py","file_size_in_byte":5778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"87513995","text":"#!pyhton3\n#isValid.py\n#william Abbot\n\nfrom . import stringToLocation as stl\nimport re\n\n\n#This Function checks to see if the given candidate(digit) at the\n# entered location is legal given the rules of Sudoku. If there \n# are 1 or more occurences of the candidate in it's block, row, \n# or column, or row, then it returs false\ndef isValid(digits_list, location, candidate):\n l_string = location #location string\n digits = digits_list #list of all digits in board, has size of 81\n number = stl.getNumber(l_string) #location number\n letter = stl.getLetter(l_string) #location letter\n candidate = int(candidate) #ensure integerness\n \n current_block = getBlock(digits, number, letter)\n current_row = getRow(digits, number, letter)\n current_column = getColumn(digits, number, letter)\n if (candidate in current_block or candidate in current_row or candidate in current_column):\n return False\n return True\n\n\n#returns the values of all squares within the same block that\n# the current square is in\ndef getBlock(digits, number, letter):\n block = [0]*9\n counter = 0\n arr = [1, 4, 7]\n n = len(arr)\n letterInt = (ord(letter)-64)\n \n #round the number and letter to the beginning of the current block\n rounded_number = floorSearch(arr, 0, n-1, number)\n rounded_letter = floorSearch(arr, 0, n-1, letterInt)\n \n for x in range(rounded_number, rounded_number + 3):\n for y in range(rounded_letter, rounded_letter + 3):\n lst = [chr(y+64), str(x)]\n l_string = ''.join(lst)\n index = stl.stringToLocation(l_string)\n block[counter] = digits[index]#get the value at (x, y) and put it in block[]\n counter += 1\n return block\n\n#returns the values of all squares (cells) in the same row as\n# the letter input, as a list\ndef getRow(digits, number, letter):\n row = [0]*9\n for i in range(1, 10):\n number = i\n lst = [letter, str(number)]\n index = stl.stringToLocation(\"\".join(lst))\n row[i-1] = digits[index]\n return row\n\n#returns the values of all squares (cells) in the same column as\n# the letter input, as a list\ndef getColumn(digits, number, letter):\n column = [0]*9\n letter_num = (ord(letter)-65)\n for i in range(0, 9):\n letter_num = i\n lst = [chr(letter_num+65), str(number)]\n index = stl.stringToLocation(\"\".join(lst))\n column[i] = digits[index]\n return column\n\n\n# Function to get index of floo\n# of x in arr[low..high]\ndef floorSearch(arr, low, high, x): \n # If low and high cross each other \n if (low > high): \n return -1\n \n # If last element is smaller than x \n if (x >= arr[high]): \n return arr[high]\n \n # Find the middle point \n mid = int((low + high) / 2) \n \n # If middle point is floor. \n if (arr[mid] == x):\n return arr[mid]\n \n # If x lies between mid-1 and mid \n if (mid > 0 and arr[mid-1] <= x \n and x < arr[mid]): \n return arr[mid - 1]\n \n # If x is smaller than mid, \n # floor must be in left half. \n if (x < arr[mid]): \n return floorSearch(arr, low, mid-1, x) \n \n # If mid-1 is not floor and x is greater than \n # arr[mid], \n return floorSearch(arr, mid+1, high, x) \n","sub_path":"Sudoku/functions/isValid.py","file_name":"isValid.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"594721791","text":"__author__ = 'Nick'\n\n\nfrom math import sqrt\n\n\ndef is_prime(num):\n\tfor i in range(2, int(sqrt(num))+1):\n\t\tif num % i == 0:\n\t\t\treturn False\n\treturn True\n\n\ndef find_primes(limit):\n\tprimes = []\n\tfor i in range(2, limit+1):\n\t\tif is_prime(i):\n\t\t\tprimes.append(i)\n\treturn primes\n\n\ndef main():\n\tprint(sum(find_primes(2000000)))\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"Project Euler/src/problem10/problem10.py","file_name":"problem10.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"483531272","text":"import numpy as np \nimport matplotlib.pyplot as plt \nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\n\nnp.random.seed(73)\n\ndef f_x(xx1,xx2):\n\n # Fazendo por muliplicacao matricial (mais demorado)\n\n # m = np.array([[0, 0.5, -0.5],[0, 0.5, -0.5]])\n\n # a = np.exp(-0.5*np.matmul(xx-m[:,0],xx-m[:,0]))\n # b = np.exp(-0.5*np.matmul(xx-m[:,1],xx-m[:,1]))\n # c = np.exp(-0.5*np.matmul(xx-m[:,2],xx-m[:,2]))\n\n # return ((1/(2*np.pi))*np.diagonal(np.exp(-0.5*np.dot(xx-m[:,0], (xx-m[:,0]).T ))+\n # np.exp(-0.5*np.dot(xx-m[:,1], (xx-m[:,1]).T ))+\n # np.exp(-0.5*np.dot(xx-m[:,2], (xx-m[:,2]).T ))))\n\n\n m = np.array([0, 0.5, -0,5])\n\n return (1/(2*np.pi))*(np.exp(-0.5*((xx1 - m[0])**2 + (xx2 - m[0])**2)) + \n np.exp(-0.5*((xx1 - m[1])**2 + (xx2 - m[1])**2)) + \n np.exp(-0.5*((xx1 - m[2])**2 + (xx2 - m[2])**2)))\n\n\n \ndef generate_array(size=100):\n\n n = np.linspace(-10,10, size)\n x1, x2 = np.meshgrid(n,n)\n\n X1 = np.ravel(x1)\n X2 = np.ravel(x2)\n Z = f_x(X1,X2)\n\n dt = np.c_[X1,X2]\n dt = np.c_[dt, Z]\n np.random.shuffle(dt)\n return dt\n\n\n# size=100\n# dataframe = generate_array(size)\n\n# fig = plt.figure()\n# ax = fig.gca(projection='3d')\n# print(len(dataframe))\n\n# ax.contourf(dataframe[:,0].reshape(size,-1), dataframe[:,1].reshape(size,-1), dataframe[:,2].reshape(size,-1))\n# surf = ax.plot_trisurf(dataframe[:,0], dataframe[:,1], dataframe[:,2],cmap=cm.coolwarm)\n# ax.set_xlim3d(-1,1)\n# ax.set_xlim3d(-1,1)\n# surf = ax.plot_surface(dataframe[:,0].reshape(size,-1), dataframe[:,1].reshape(size,-1), dataframe[:,2].reshape(size,-1),\n# cmap=cm.coolwarm)\n# fig.colorbar(surf, shrink=0.5, aspect=5)\n\n# ax.scatter(dataframe[:,0].reshape(size,-1), dataframe[:,1].reshape(size,-1), dataframe[:,2].reshape(size,-1),)\n\n# plt.show()","sub_path":"lista01/Q7/generateDataSet_7c.py","file_name":"generateDataSet_7c.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"564164121","text":"# Implementation of boruta for feature selection\n# Boruta makes explicit use of a random forest to eliminate features\n# Boruta follows an all-relevant feature selection method where it captures all features\n# which are in some circumstances relevant to the outcome variable\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\nfrom boruta import BorutaPy\nfrom sklearn.base import TransformerMixin\nfrom sklearn.preprocessing import LabelEncoder\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n\n\ndef boruta(dsname, target):\n na = [\"\", \" \", \"-\", \"?\", \"N/A\"]\n data = pd.read_csv(dsname, na_values=na)\n data[target] = data[target].astype(object)\n columns = data.columns\n target_type = data[target].dtype\n print(target_type)\n\n class DataFrameImputer(TransformerMixin):\n def __init__(self):\n \"\"\"Impute missing values.\n\n Columns of dtype object are imputed with the most frequent value\n in column.\n\n Columns of other types are imputed with mean of column.\n\n \"\"\"\n\n def fit(self, X, y=None):\n self.fill = pd.Series([X[c].value_counts().index[0]\n if X[c].dtype == np.dtype('O') else X[c].mean() for c in X],\n index=X.columns)\n\n return self\n\n def transform(self, X, y=None):\n return X.fillna(self.fill)\n\n cat_data = data.select_dtypes(include=[\"object\"])\n num_data = data.select_dtypes(include=[\"int64\", \"float64\"])\n\n num_data = num_data.fillna(num_data.mean())\n cat_data = DataFrameImputer().fit_transform(cat_data)\n\n # Label encoding the values - Converting into Numerical\n \"\"\" Some of the algorithms has hard constraint that it cannot process categorical data . For such algorithms,\n User has to convert categorical features into Numeric. We have few techniques for this operation and \n we are implementing the famous and good-result methods here as \n Label - Encoding \n \"\"\"\n cat_data = cat_data.apply(LabelEncoder().fit_transform)\n data = pd.concat([num_data, cat_data], axis=1)\n data = data[columns]\n\n y = data[target]\n x = data.drop([target], axis=1)\n x = x.as_matrix()\n y = y.as_matrix()\n if target_type == 'object':\n\n rf = RandomForestClassifier(n_estimators=500, class_weight='balanced', max_depth=20)\n feature_selection = BorutaPy(rf, n_estimators='auto', verbose=2)\n feature_selection.fit(x, y)\n\n # number of selected features\n print('\\n Number of selected features:')\n print(feature_selection.n_features_)\n\n # check ranking of features\n print('\\n Feature ranking:')\n print(feature_selection.ranking_)\n\n print('\\n Initial features: ', data.columns.tolist())\n\n data = data.drop([target], axis=1)\n feature_df = pd.DataFrame(data.columns.tolist())\n feature_df['rank'] = feature_selection.ranking_\n feature_df = feature_df.sort_values('rank', ascending=True).reset_index(drop=True)\n print('\\n Top %d features:' % feature_selection.n_features_)\n feature_df.columns = ['feature_name', 'Rank']\n print(feature_df.head(feature_selection.n_features_))\n\n elif (target_type == 'int') | (target_type == 'float'):\n rf = RandomForestRegressor(n_estimators=500, max_depth=20)\n feature_selection = BorutaPy(rf, n_estimators='auto', verbose=2)\n print(feature_selection.fit(x, y))\n\n # number of selected features\n print('\\n Number of selected features:')\n print(feature_selection.n_features_)\n\n # check ranking of features\n print('\\n Feature ranking:')\n print(feature_selection.ranking_)\n\n print('\\n Initial features: ', data.columns.tolist())\n\n data = data.drop([target], axis=1)\n feature_df = pd.DataFrame(data.columns.tolist())\n feature_df['rank'] = feature_selection.ranking_\n feature_df = feature_df.sort_values('rank', ascending=True).reset_index(drop=True)\n print(feature_df)\n print('\\n Top %d features:' % feature_selection.n_features_)\n feature_df.columns = ['feature_name', 'Rank']\n print(feature_df.head(feature_selection.n_features_))\n","sub_path":"FEATURE_SELECTION_BORUTA.py","file_name":"FEATURE_SELECTION_BORUTA.py","file_ext":"py","file_size_in_byte":4304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"205858736","text":"import time\nfrom typing import Iterable, Optional\n\nimport attr\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom sklearn.metrics import classification_report\n\nfrom .ImageTrainingCycle import ImageTrainingCycle\nfrom .ImageLearner import ImageLearner\nfrom .DataContainer import DataContainer\n\n\n@attr.s(auto_attribs=True)\nclass ImageTrainer:\n learner: ImageLearner\n data_container: DataContainer\n\n def train(\n self,\n cycles: Iterable[ImageTrainingCycle],\n template: str = (\n \"Name: {} Train Time: {:.1f} min. \"\n \"Eval Time: {:.2f}s Loss: {:.4f} Accuracy: {:.2%}\"\n ),\n ) -> None:\n start_time = time.time()\n for cycle in cycles:\n self.learner.freeze() if cycle.freeze else self.learner.unfreeze()\n self.learner.compile(optimizer=cycle.optimizer, lr=cycle.lr)\n self.learner.fit(\n cycle.n_epochs,\n cycle.data,\n cycle.steps,\n self.data_container.validation,\n self.data_container.validation_steps,\n )\n end_time = time.time()\n\n eval_start_time = time.time()\n evaluation_results = self.evaluate(\n self.data_container.validation,\n self.data_container.validation_steps,\n verbose=0,\n )\n eval_end_time = time.time()\n\n print(\"-\".center(80, \"-\"))\n print(\n template.format(\n self.learner.base_model.name,\n (end_time - start_time) / 60,\n (eval_end_time - eval_start_time),\n *evaluation_results,\n )\n )\n print(\"-\".center(80, \"-\"))\n\n def evaluate(\n self, dataset: tf.data.Dataset, steps: Optional[int] = None, verbose: int = 1\n ) -> np.ndarray:\n return self.learner.model.evaluate(dataset, steps=steps, verbose=verbose)\n\n def predict(\n self, dataset: tf.data.Dataset, steps: Optional[int] = None, verbose: int = 0\n ):\n return self.learner.model.predict(dataset, steps=steps, verbose=verbose)\n\n def report(\n self, dataset: tf.data.Dataset, steps: Optional[int] = None, verbose: int = 0\n ):\n return classification_report(\n [label.numpy() for _, label in dataset.take(steps).unbatch()],\n self.learner.model.predict(dataset, steps=steps).argmax(axis=1),\n )\n\n def analyse(\n self, dataset: tf.data.Dataset, steps: Optional[int] = None, verbose: int = 0\n ):\n reverse_label_map = {\n value: key for key, value in self.data_container.label_map.items()\n }\n images = []\n label_codes = []\n for image, label_code in dataset.take(steps).unbatch():\n label_codes.append(label_code.numpy())\n images.append(image.numpy())\n labels = [reverse_label_map[label_code] for label_code in label_codes]\n probs = self.learner.model.predict(dataset, steps=steps)\n pred_codes = probs.argmax(axis=1)\n preds = [reverse_label_map[pred_code] for pred_code in pred_codes]\n return pd.DataFrame.from_dict(\n {\n \"image\": images,\n \"label\": labels,\n \"label_code\": label_codes,\n \"pred\": preds,\n \"pred_code\": pred_codes,\n \"label_probs\": probs[:, label_codes][np.eye(len(labels), dtype=bool)],\n \"pred_probs\": probs[:, pred_codes][np.eye(len(pred_codes), dtype=bool)],\n }\n )\n\n def show_predictions(\n self,\n dataset: tf.data.Dataset,\n steps: int,\n correct: bool = False,\n ascending: bool = True,\n rows: int = 4,\n cols: int = 4,\n ):\n df = self.analyse(dataset=dataset, steps=steps)\n df = df[(df.label == df.pred) if correct else (df.label != df.pred)]\n df.sort_values(by=[\"label_probs\"], ascending=ascending, inplace=True)\n _, ax = plt.subplots(rows, cols, figsize=(4 * cols, 5 * rows))\n for i, row in enumerate(df.head(cols * rows).itertuples()):\n idx = (i // cols, i % cols) if rows > 1 else i % cols\n ax[idx].axis(\"off\")\n ax[idx].imshow(row.image)\n ax[idx].set_title(\n f\"{row.label}\\n{row.pred}\\n{row.label_probs:.4f}\\n{row.pred_probs:.4f}\"\n )\n","sub_path":"toai/tensorflow/ImageTrainer.py","file_name":"ImageTrainer.py","file_ext":"py","file_size_in_byte":4399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"310739781","text":"'''\ncoding language: Python 3.7.0\n\nwritten by: Jonah Merrell\ndate written: March 22 2019\nwritten for: Homework5 Task8\ncourse: Math 5610\n\npurpose: This method tests the QR factorization method on various hilbert matrices.\n'''\n\nimport sys, os\nsys.path.append(os.path.abspath('../../mylibrary'))\nfrom _mymodules import matrix_QR_factorization, matrix_mult, matrix_transpose\n\ndef hilbert_matrix_QR_test():\n\n\n hilbert_matrix = [0]*11\n Q_matrix = [0]*11\n\n # Create the hilbert matrices and their corresponding Q factorization matrix.\n for n in range(1,11):\n hilbert_matrix[n] = [[1/(1+i+j) for i in range(n)] for j in range(n)] # Hilbert Matrix generator\n Q_matrix[n] = matrix_QR_factorization(hilbert_matrix[n])[0]\n\n # Print the Q-factorized hilbert matrix, as well as Q^t*Q\n for i in range(4,11,2): # 4, 6, 8, 10\n print(\"Given a \" + str(i) + \"-sized Hilbert matrix, its Q matrix is:\")\n for j in range(i):\n print(Q_matrix[i][j])\n\n print(\"Q^t*Q should result in the identity matrix:\")\n temp_matrix = matrix_mult(Q_matrix[i],matrix_transpose(Q_matrix[i]))\n for j in range(i):\n print(temp_matrix[j])\n\n print() # Print a new line for organization purposes.\n\n#hilbert_matrix_QR_test()","sub_path":"mylibrary/hilbert_matrix_QR_test.py","file_name":"hilbert_matrix_QR_test.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"611646710","text":"#import tkintter\nfrom tkinter import *\n\n#Create window\nroot = Tk()\n\n#window title\nroot.title(\"Ad-hoc Networks Attendance System\")\n\n#Create topframe with top side\ntopframe=Frame(root)\ntopframe.pack(side=TOP)\n\n#Create bottomframe with bottom side\nbottomframe=Frame(root)\n\n#Create leftframe in bottomframe with left side\nleftframe=Frame(bottomframe,bg='black')\nleftframe.pack(side=LEFT)\n\n#Create rightframe in bottomframe with right side\nrightframe=Frame(bottomframe,padx=50)\nrightframe.pack(side=RIGHT)\n#Set window geometry width 1210 and height 750 and open position on screen left to 150 and top to 150\nroot.geometry(\"1210x750+150+150\")\n#stable main window on infinity time\nroot.mainloop()\n\n\n","sub_path":"auto_attendance.py","file_name":"auto_attendance.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"528445899","text":"from flask import Flask,jsonify, render_template, request, Response, json, redirect, url_for\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask.helpers import flash\nimport os\nimport pymysql\npymysql.install_as_MySQLdb()\nfrom google.cloud import spanner, firestore_v1\n# from google.cloud import firestore\nfrom ddtrace import tracer,config\n\n\n#GCP Cloud Spanner\n# Imports the Google Cloud Client Library.\n#from google.cloud import spanner\n\n# Initialize DogStatsD and set the host.\n#initialize(statsd_host = 'dd-agent')\n\napp = Flask(__name__)\napp.config['SESSION_TYPE'] = 'filesystem'\napp.config['SECRET_KEY'] = os.urandom(24)\n\ndbuser=str(os.environ.get('dbuser'))\ndbpass=str(os.environ.get('dbpass'))\nprint (dbuser)\n\n#AWS RDS\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://' + dbuser + ':' + dbpass + '@jacktestdb.c3bw7kcbozbg.ap-northeast-1.rds.amazonaws.com/testdb'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\nclass test(db.Model):\n id = db.Column('id', db.Integer, primary_key = True)\n name = db.Column(db.String(50))\n def __init__(self, name):\n self.name = name\n\nclass people():\n def __init__(self,id,name):\n self.id=id\n self.name = name\n\n@app.route(\"/samplejson\",methods=['GET','POST'])\n#@tracer.wrap()\ndef getSampleJson():\n retVal={\n 'key1': 1,\n 'key2': \"Value2\",\n 'key3': [{\n 'key31':\"Value31\",\n 'key32':\"Value32\"\n },{\n 'key31':\"Value311\",\n 'key32':\"value322\"\n }]\n }\n return retVal\n\n\n@app.route(\"/firestore\",methods=['GET','POST'])\n#@tracer.wrap()\ndef firestore():\n config.grpc[\"service\"]=\"Google Firestore\"\n #GCP firestore\n # firestore_client = firestore.Client(project='datadog-sandbox')\n firestore_client=firestore_v1.Client()\n doc_ref = firestore_client.collection(u'users').document(u'namelist1')\n doc_ref.set({\n u'first': u'Jack',\n u'last': u'Wang', \n u'born': 1982\n })\n doc_ref = firestore_client.collection(u'users').document(u'namelist2')\n doc_ref.set({\n u'first': u'Zhizheng',\n u'middle': u'Jack',\n u'last': u'Wang',\n u'born': 1982\n })\n users_ref = firestore_client.collection(u'users')\n docs = users_ref.stream()\n peoples=[]\n for doc in docs:\n i=1\n print(f'{doc.id} => {doc.to_dict()}')\n peoples.append(people(i,doc.to_dict()['first']))\n i=i+1\n\n return render_template('show_all.html',peoples = peoples)\n \n\n@app.route(\"/gsp\",methods=['GET','POST'])\n# @tracer.wrap(\"gsp\",service=\"Google Spanner\")\ndef gsp():\n #GCP Cloud Spanner\n # Instantiate a client.\n config.grpc[\"service\"]=\"Google Spanner\"\n spanner_client = spanner.Client(project='datadog-sandbox')\n\n # Your Cloud Spanner instance ID.\n instance_id = 'jacktest'\n\n # Get a Cloud Spanner instance by ID.\n instance = spanner_client.instance(instance_id)\n\n # Your Cloud Spanner database ID.\n database_id = 'testdb'\n\n # Get a Cloud Spanner database by ID.\n database = instance.database(database_id)\n\n # Execute a simple SQL statement.\n #pan = tracer.trace('spanner.sql')\n with database.snapshot() as snapshot:\n sql=\"SELECT * from testtb\"\n span = tracer.current_span()\n span.set_tag(\"sql\",sql)\n # span.finish()\n results = snapshot.execute_sql(sql)\n peoples=[]\n for result in results:\n peoples.append(people(result[0],result[1]))\n # for people in results:\n # peoples.list.addpend(people.name)\n #span.finish()\n return render_template('show_all.html',peoples = peoples)\n\n\n\n@app.route('/')\ndef show_all():\n #Increment a Datadog counter.\n #statsd.increment('my_webapp.page.views')\n #db=SQLAlchemy(app)\n return render_template('show_all.html', peoples = test.query.all())\n@app.route('/jstime')\ndef jstime():\n return render_template(\"jstime.html\")\n \n@app.route('/test')\ndef testpage():\n headers=request.headers\n resp=Response(\"Request headers:\\n\" + str(headers))\n #resp = Response(\"test page response\")\n resp.headers['Access-Control-Allow-Origin'] = '*'\n return resp\n\n@app.route('/longsql')\ndef longsql():\n db=SQLAlchemy(app)\n db.session.execute('select sleep(10)')\n db.session.commit\n all=db.session.execute('select * from test').fetchall()\n resp=Response(all[0][1])\n resp.headers['Access-Control-Allow-Origin'] = '*'\n return resp\n\n@app.route('/new', methods = ['GET', 'POST'])\ndef new():\n if request.method == 'POST':\n if not request.form['name']:\n flash('Please enter all the fields', 'error')\n else:\n #people = test(request.form['name'], request.form['city'],request.form['addr'], request.form['pin'])\n people=test(request.form['name'])\n print(people)\n db.session.add(people)\n db.session.commit()\n flash('Record was successfully added')\n return redirect(url_for('show_all'))\n return render_template('new.html')\n\n\nif __name__ == \"__main__\":\n port = int(os.getenv(\"PORT\", 8080))\n app.run(host=\"0.0.0.0\",port=port)\n","sub_path":"flask-app.py","file_name":"flask-app.py","file_ext":"py","file_size_in_byte":5140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"466982033","text":"###########################\n###########################\n###Load Modules###\nimport importlib,sys\nif sys.modules.has_key('PyInstaller'):\n\t sys.modules['PyInstaller'].importModulesFromFolderNames(FolderNames=[\"Setter\"]);\nelse:\n\tsys.exit(\"ERROR @ PyGetter : Need to import PyInstaller Module First!\");\n\n###########################\n###########################\n###PyGetterClass Definition\n\n#set the BaseFolderNames\ndef getBaseFolderNames():\n\treturn [\"CustomDict\"];\n\n#Class definition\nclass PyGetterClass():\n\n\t####__init__\n\t#\n\tdef __init__(self,**Kwargs):\n\t\t#init Bases\n\t\tsys.modules['PyInstaller'].initBases(**{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t'BaseFolderNames':getBaseFolderNames(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t'Instance':self,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t'InitDictKwargs':Kwargs\n\t\t\t\t\t\t\t\t\t\t\t\t\t});\n\t\t\t\t\n\t###Special DictInit Method\n\t#\n\tdef initPyGetterDict(self,**Kwargs):\n\t\tpass;\n\t\t\t\t\n\t#\"smart\" method to find something in the mess depending on priorities for searching...\n\tdef __getitem__(self,Key):\n\t\tif type(Key)==int:\n\t\t\tif self.__dict__.has_key('Contents'):\n\t\t\t\tif type(self['Contents'])==list:\n\t\t\t\t\tif Key>>>>>>>>>\")\n dtest_data=write_file(disease,test_disease)\n\n dtest=xgb.DMatrix(dtest_data)\n dtest = xgb.DMatrix(\"performance/test.txt\")\n dtest_group = load_group_file(\"performance/test_group.txt\")\n\n\n dtest.set_group(dtest_group)\n pred_result=model.predict(dtest)\n gene_performance=dict()\n for gene_name,performance in zip(gene_list,pred_result):\n gene_performance[gene_name]=float(performance)\n\n result=sorted(gene_performance.items(), key=lambda x: x[1], reverse=True)\n\n\n genes=test_disease[disease]\n\n rank=0\n for data in result:\n rank+=1\n gene=data[0]\n if gene in genes:\n if gene in non_train_gene:\n non_train_gene_rank.append(rank)\n print(rank)\n average_rank.append(rank)\n\n final_result=np.mean(average_rank)\n\n print(\"the number of non train gene rank\",np.mean(non_train_gene_rank))\n print(top_k(average_rank,10))\n print(top_k(average_rank,30))\n print(top_k(average_rank,50))\n print(top_k(average_rank,100))\n print(top_k(average_rank,500))\n\n return final_result\n\n\ndef top_k(result,k):\n top_k_result=[]\n for data in result:\n if data = test_result[index]):\n rank = rankdata(test_result, method=\"average\")\n rank_list.append(len(gene_list) - rank[index])\n break\n\n if (rank_list==[]):\n print(valid_result)\n\n\n return rank_list\n\n\ndef compute_performance(original_rank, predicted_rank):\n print(\"the length of original_rank: \",len(original_rank))\n print(\"the length of predicted rank: \",len(predicted_rank))\n ranks = []\n another_index = 1\n predicted_value = []\n ranked_list = []\n print(\"the num of data examples\",len(original_rank)/len(gene_list))\n for index in range(len(original_rank)):\n if another_index % len(gene_list) == 0:\n ranked_list.append(float(predicted_rank[index]))\n if original_rank[index] == \"1\":\n predicted_value.append(float(predicted_rank[index]))\n\n average_rank = compare_list(predicted_value, ranked_list)\n\n # print(len(predicted_value),len(ranked_list))\n # print(average_rank)\n for r in average_rank:\n ranks.append(r)\n another_index = 1\n predicted_value = []\n ranked_list = []\n else:\n ranked_list.append(float(predicted_rank[index]))\n if original_rank[index] == \"1\":\n predicted_value.append(float(predicted_rank[index]))\n\n another_index += 1\n\n clean_rank=[]\n for data in ranks:\n if data<100000:\n clean_rank.append(data)\n else:\n print(data)\n print(\"something wrong with algorithm\")\n\n return np.mean(clean_rank)\n\n\ndef check_overfitting(top_rank, predicted_rank):\n top_array = []\n for index, value in enumerate(predicted_rank):\n if len(top_array) < top_rank:\n top_array.append(index)\n else:\n min_index = -100\n min_value = -1000\n for ind in range(len(top_array)):\n if predicted_rank[top_array[ind]] > min_value:\n min_index = ind\n min_value = predicted_rank[top_array[ind]]\n\n if min_index != -100:\n top_array[min_index] = index\n return sorted(top_array)\n\n\ndef check_ovf(original_rank, predicted_rank):\n another_index = 1\n predicted_value = []\n ranked_list = []\n for index in range(len(original_rank)):\n if another_index % len(gene_list) == 0:\n ranked_list.append(float(predicted_rank[index]))\n # average_rank=compare_list(predicted_value,ranked_list)\n print(check_overfitting(20, ranked_list))\n # print(len(predicted_value),len(ranked_list))\n # print(average_rank)\n\n another_index = 1\n predicted_value = []\n ranked_list = []\n else:\n ranked_list.append(float(predicted_rank[index]))\n if original_rank[index] == \"1\":\n predicted_value.append(float(predicted_rank[index]))\n\n another_index += 1\n\n","sub_path":"phenoinfer/boosting/predict_bidirectional_sampling_full_gene.py","file_name":"predict_bidirectional_sampling_full_gene.py","file_ext":"py","file_size_in_byte":10399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"49817544","text":"import csv\n\ndef count(filehandler, delimiter=','):\n reader = csv.reader(filehandler, delimiter=delimiter)\n store = {}\n for i, row in enumerate(reader):\n store[row[0]] = store.get(row[0],0) + 1\n return store\n\n\ndef spliiter(filehandler, store, delimiter=','):\n reader = csv.reader(filehandler, delimiter=delimiter)\n split_1 = csv.writer(open('split_1.csv', 'w'))\n split_2 = csv.writer(open('split_2.csv', 'w'))\n header = next(reader)\n split_1.writerow(header)\n split_2.writerow(header)\n cur_id = ''\n num = 0\n for i, row in enumerate(reader):\n if row[0] == cur_id:\n if num < store[row[0]]//2:\n split_1.writerow(row)\n else:\n split_2.writerow(row)\n num += 1\n else:\n cur_id = row[0]\n num = 1\n split_1.writerow(row)\n\n\nstore = count(open('output_2.csv','r'))\nspliiter(open('output_2.csv', 'r'), store)","sub_path":"util/downsample.py","file_name":"downsample.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"134013581","text":"# this program is supposed to allow the user to compare function speeds across other programs\r\n\r\n# Import files here, note you don't need to add ending '.py'\r\n## Of course they don't have to be strictly Python files, anything Python can improt and run should work\r\n## Also note that the main contents of the file will be run on import\r\nimport Euler17\r\n\r\n# add all the function names that you want to time here\r\n## make sure to include the filename, eg. 'myfile.myfunc'\r\n## also make sure to not add trailing parenthesis\r\nfunctions = [Euler17.wordbuilder, Euler17.wordAccumulator]\r\n\r\n\r\n# set args flag to 1 for unique arguments, 0 for arguments, and -1 for no arguments\r\nargs = 0\r\n\r\n# add arguments to this list\r\n## for unique arguments, enter each set of new arguments as a seperate list, eg. arguments = [['first', 'set'], ['second', 'set'], ['and', 'so', 'on']]\r\n## make sure the unique arguments match their functions, eg. function[0] matches to argument[0] and function[1] to argument[1], etc.\r\narguments = []\r\n\r\n# put the number of runs you would like to do here\r\n## note that more runs increases accuracy and consistency but will also take longer to finish\r\nruns = 15\r\n\r\n\r\n\r\n\r\n# You shouldn't need to edit anything beyond this, but you can if you want to\r\ndef timeTest3(funcs, args, runs):\r\n\timport time\r\n\r\n\texTimes = [0] * len(funcs)\t\t\t\t\t\t\t# make a list with an entry for each function\r\n\r\n\tfor i in range(runs):\t\t\t\t\t\t\t\t# do timing process for **run** times\r\n\t\tfor j in range(len(funcs)):\t\t\t\t\t\t# do every function\r\n\t\t\tfor k in range(runs):\t\t\t\t\t\t# and do it for **run** number of times;; note that this means runtime is runs^2 * funcs\r\n\t\t\t\tstart = time.time()\t\t\t\t\t\t# mark start time\r\n\t\t\t\tfuncs[j](args)\t\t\t\t\t\t\t# run function in list, passing arguments;; functions must accept one argument as a list\r\n\t\t\t\texTimes[j] += time.time() - start\t\t# add run time to function entry\r\n\r\n\tfor i in range(len(exTimes)):\r\n\t\texTimes[i] = exTimes[i] / (runs * runs)\t\t\t# average run times, did runs^2 so divide by runs^2\r\n\treturn exTimes\r\n\r\ndef timeTest3_noArg(funcs, runs):\r\n\timport time\r\n\r\n\texTimes = [0] * len(funcs)\t\t\t\t\t\t\t# make a list with an entry for each function\r\n\r\n\tfor i in range(runs):\t\t\t\t\t\t\t\t# do timing process for **run** times\r\n\t\tfor j in range(len(funcs)):\t\t\t\t\t\t# do every function\r\n\t\t\tfor k in range(runs):\t\t\t\t\t\t# and do it for **run** number of times;; note that this means runtime is runs^2 * funcs\r\n\t\t\t\tstart = time.time()\t\t\t\t\t\t# mark start time\r\n\t\t\t\tfuncs[j]()\t\t\t\t\t\t\t\t# run function in list\r\n\t\t\t\texTimes[j] += time.time() - start\t\t# add run time to function entry\r\n\r\n\tfor i in range(len(exTimes)):\r\n\t\texTimes[i] = exTimes[i] / (runs * runs)\t\t\t# average run times, did runs^2 so divide by runs^2\r\n\treturn exTimes\r\n\r\ndef timeTest3_uniqueArgs(funcs, args, runs):\r\n\timport time\r\n\r\n\texTimes = [0] * len(funcs)\t\t\t\t\t\t\t# make a list with an entry for each function\r\n\r\n\tfor i in range(runs):\t\t\t\t\t\t\t\t# do timing process for **run** times\r\n\t\tfor j in range(len(funcs)):\t\t\t\t\t\t# do every function\r\n\t\t\tfor k in range(runs):\t\t\t\t\t\t# and do it for **run** number of times;; note that this means runtime is runs^2 * funcs\r\n\t\t\t\tstart = time.time()\t\t\t\t\t\t# mark start time\r\n\t\t\t\tfuncs[j](args[j])\t\t\t\t\t\t\t# run function in list, passing arguments;; functions must accept one argument as a list\r\n\t\t\t\texTimes[j] += time.time() - start\t\t# add run time to function entry\r\n\r\n\tfor i in range(len(exTimes)):\r\n\t\texTimes[i] = exTimes[i] / (runs * runs)\t\t\t# average run times, did runs^2 so divide by runs^2\r\n\treturn exTimes\r\n\r\n\r\ndef displayTimes(funcs, args, runs, timeTester):\r\n\ttimeList = timeTester(funcs, args, runs)\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# get a list of average times for the functions\r\n\tfor i in range(len(timeList)):\r\n\t\tif i == 0:\r\n\t\t\tprint(\"Function 1:\", timeList[i], \"s\")\r\n\t\telse:\r\n\t\t\tprint(\"Function \", i + 1, \": \", timeList[i], \" s; \", ((timeList[0] / timeList[i]) * 100) - 100, \" % change from \", 1, sep=\"\")\t\t# print out times, compares each one's time to first\r\n\r\n\r\nif args == 0:\r\n\tdisplayTimes(functions, arguments, runs, timeTest3)\r\nelif args == -1:\r\n\tdisplayTimes(functions, arguments, runs, timeTest3_noArg)\r\nelif args == 1:\r\n\tdisplayTimes(functions, arguments, runs, timeTest3_uniqueArgs)\r\nelse:\r\n\tprint(\"It looks like the args flag is set to a number that is not allowed\")\r\n\r\n# Ways to improve this:\r\n## DONE: accomodate functions that don't take arguments\r\n## DONE: accomodate functions that take different arguments\r\n## accomodate functions that don't take a list as an argument\r\n## choose reference function\r\n## dynamically set reference function\r\n## add command line interface\r\n## add error checking","sub_path":"timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":4599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"404070913","text":"from matplotlib import pyplot as plt\nimport code\nfrom scipy import polyfit\nimport scipy.stats\nimport operator\nimport numpy as np\nimport glob\n\ndata = []\n\nfor f in glob.glob('edf*st.data'):\n n = int(f.split('edf')[1].split('st')[0])\n data.append((n, [int(x) for x in open(f, 'r').read().split()]))\n\ndata.sort(key=operator.itemgetter(0))\n\nxs, ys = zip(*data)\n\nfor x,y in data:\n print(f\"{x}: {len(y)} {min(y)}\")\n\n\nmeans = [np.mean(y) for y in ys]\n\nar, br, r_value, p_value, std_err = scipy.stats.linregress(np.log(xs), means)\n\nfig, ax = plt.subplots(figsize=(9,6))\n\nax.set_yscale('log', basey=10)\nax.set_xscale('log', basex=10)\n\nax.set_xticks([])\nax.set_xticks([], True)\n\ndata.sort(key=operator.itemgetter(0))\n\nbx_artists = ax.boxplot([x[1] for x in data], positions=[x[0] for x in data], widths=[x[0]/10 for x in data], sym='+', showmeans=True, manage_ticks=True, flierprops={'markeredgewidth': 0.5})\n\nyticks = [5, 10, 100]\nyticksmin = [20,30,40,50,60,70,80,90,120,140]\nax.set_ylim(5)\nax.set_yticks(yticks)\nax.set_yticks(yticksmin,True)\nax.set_yticklabels(yticks)\nax.set_yticklabels(yticksmin,minor=True)\n\nreg_xs = np.linspace(min(xs)*0.95, max(xs)*1.05, 4000)\n\ndef f(reg_xs):\n return ar * np.log(reg_xs) + br\n\nline = ax.plot(reg_xs, f(reg_xs), linestyle='dotted')\n\nax.legend([bx_artists['means'][0], bx_artists['medians'][0], line[0]], ['Mean', 'Median', f'Logarithmic fit to mean ($r^2$ = {r_value**2:.4f})'], loc='upper left')\n\nax.set_xlabel('Number of priorities')\nax.set_ylabel('Task switch time (μs)')\n\nfig.savefig('edfboxplot.eps')\nplt.show()\n","sub_path":"data/edf_swtboxplot.py","file_name":"edf_swtboxplot.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"571055431","text":"import socket, common, datetime, pickle, select\nimport sys\nfrom common import *\nclientID = \"P\"\n\n# O user ainda não subscreveu nenhum lugar\nSUBSCRIBED = False\n\npublicClientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\npublicClientSocket.connect((HOST, PORT))\nnotificationSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nnotificationSocket.connect((HOST, PORT))\n\n\nprint(\"Menu:\\n\"\n \"1 - Get locations of a poluent;\\n\"\n \"2 - Get last sample from a location:\\n\"\n \"3 - Publish-Subscribe\\n\")\n\nwhile True:\n\n # para ser possivel escolher qual o socket que está a comunicar \n socks,_,_ = select.select([notificationSocket, publicClientSocket, sys.stdin], [], [])\n\n for sock in socks:\n # Caso existam novas leituras\n if sock == notificationSocket:\n received = notificationSocket.recv(RECV_BUFFER)\n sample = decodeData(received)\n print(\"NOTIFICATION:\\n\")\n print(f\"Last Sample: {sample.id} || {str(sample.date)} || {str(sample.value)} || {sample.unit}\\n\")\n elif sock == sys.stdin:\n choice = input()\n \n # Obtem locais onde existe um poluente\n if choice == \"1\":\n poluent = input(\"Choose a poluent: \")\n publicClientSocket.send(pickle.dumps((clientID, choice, poluent, \"\")))\n \n data = publicClientSocket.recv(RECV_BUFFER) \n decodedList = decodeData(data)\n\n if len(decodedList) == 0:\n print(\"The chosen poluent does not exist in any place.\\n\")\n else:\n for locat in decodedList:\n print(locat)\n print(\"\\n\")\n \n # Obtem a ultima leitura do local X\n elif choice == \"2\":\n location = input(\"Choose a location to get last sample:\")\n publicClientSocket.send(pickle.dumps((clientID, choice, location, \"\")))\n\n data = publicClientSocket.recv(RECV_BUFFER) \n chosenLocation = decodeData(data)\n\n if chosenLocation is not 0:\n print(f\"SELECTED LOCATION:{chosenLocation.sample.id} || {str(chosenLocation.sample.date)} || {str(chosenLocation.sample.value)} || {chosenLocation.sample.unit}\\n\")\n else:\n print(\"Selected sensor does not exist.\\n\")\n\n # Publish Subscribe\n elif choice == \"3\":\n if not SUBSCRIBED:\n location = input(\"Choose a location to get subscribed:\")\n publicClientSocket.send(pickle.dumps((clientID, choice, location, notificationSocket.getsockname())))\n\n data = publicClientSocket.recv(RECV_BUFFER)\n answer = decodeData(data)\n\n if answer is 1:\n SUBSCRIBED = True\n else:\n print(\"That location does not have any sensor\\n\")\n else:\n break \n\npublicClientSocket.close()\n","sub_path":"BSc/Redes de Computadores/src/publicClient.py","file_name":"publicClient.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"638644386","text":"import logging\nimport os\nimport time\n\nimport telegram\nfrom telegram.ext import (\n Updater,\n run_async,\n CommandHandler,\n RegexHandler,\n CallbackQueryHandler,\n)\nfrom telegram import (\n Update,\n Bot,\n ReplyKeyboardMarkup,\n InlineKeyboardMarkup,\n InlineKeyboardButton,\n)\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\n\nimport strings, keyboards, database\n\nSession: scoped_session = None\n\n\nlogging.basicConfig(\n format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\",\n level=logging.INFO,\n)\n__log__ = logging.getLogger(__name__)\n\n\n@run_async\ndef start(bot: Bot, update: Update):\n chat_id = update.message.chat_id\n if chat_id == int(os.environ[\"ADMIN_ID\"]):\n bot.send_message(chat_id, strings.IS_ADMIN)\n return\n session = Session()\n user = session.query(database.User).filter_by(user_id=chat_id).first()\n if user is None:\n user = database.User(\n user_id=chat_id,\n first_name=update.message.from_user.first_name,\n last_name=update.message.from_user.last_name,\n )\n session.add(user)\n session.commit()\n user.bot_state = \"start\"\n Session.remove()\n bot.send_message(chat_id, strings.HELLO_1)\n bot.send_chat_action(chat_id=chat_id, action=telegram.ChatAction.TYPING)\n time.sleep(4)\n bot.send_message(chat_id, strings.HELLO_2)\n bot.send_chat_action(chat_id=chat_id, action=telegram.ChatAction.TYPING)\n time.sleep(4)\n bot.send_message(\n chat_id,\n strings.HELLO_3,\n reply_markup=ReplyKeyboardMarkup(\n keyboards.HELLO, resize_keyboard=True\n ),\n )\n\n\n@run_async\ndef start_low(bot: Bot, update: Update):\n chat_id = update.message.chat_id\n session = Session()\n session.query(database.User).filter_by(user_id=chat_id).update(\n {\"bot_state\": \"start_low\"}\n )\n session.commit()\n Session.remove()\n bot.send_message(chat_id, strings.INTERESTING_1)\n bot.send_chat_action(chat_id=chat_id, action=telegram.ChatAction.TYPING)\n time.sleep(1)\n bot.send_message(chat_id, strings.INTERESTING_2)\n bot.send_chat_action(chat_id=chat_id, action=telegram.ChatAction.TYPING)\n time.sleep(10)\n bot.send_message(\n chat_id,\n strings.INTERESTING_3,\n reply_markup=ReplyKeyboardMarkup(\n keyboards.INTERESTING, resize_keyboard=True\n ),\n )\n\n\n@run_async\ndef interesting(bot: Bot, update: Update):\n chat_id = update.message.chat_id\n session = Session()\n session.query(database.User).filter_by(user_id=chat_id).update(\n {\"bot_state\": \"interesting\"}\n )\n session.commit()\n Session.remove()\n bot.send_message(chat_id, strings.INFO_1)\n bot.send_chat_action(chat_id=chat_id, action=telegram.ChatAction.TYPING)\n time.sleep(10)\n bot.send_message(chat_id, strings.INFO_2)\n time.sleep(10)\n bot.send_message(chat_id, strings.INFO_3)\n bot.send_chat_action(chat_id=chat_id, action=telegram.ChatAction.TYPING)\n bot.send_chat_action(chat_id=chat_id, action=telegram.ChatAction.TYPING)\n time.sleep(10)\n bot.send_chat_action(chat_id=chat_id, action=telegram.ChatAction.TYPING)\n time.sleep(10)\n bot.send_chat_action(chat_id=chat_id, action=telegram.ChatAction.TYPING)\n time.sleep(5)\n bot.send_message(\n chat_id,\n strings.INFO_4,\n reply_markup=ReplyKeyboardMarkup(\n keyboards.CALLBACK, resize_keyboard=True\n ),\n )\n\n\ndef contact_handler(bot: Bot, update: Update):\n from_chat_id = update.message.chat_id\n message_id = update.message.message_id\n msg = bot.forward_message(os.environ[\"ADMIN_ID\"], from_chat_id, message_id)\n bot.send_message(\n os.environ[\"ADMIN_ID\"],\n strings.NEW_CONTACT,\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n strings.CHECK,\n callback_data=\"delete_%d_%d\"\n % (msg.message_id, from_chat_id),\n )\n ]\n ]\n ),\n )\n bot.send_message(from_chat_id, strings.CONTACT)\n\n\ndef check_handler(bot: Bot, update: Update):\n data = update.callback_query.data\n message_id, user_id = data.split(\"_\")[1:]\n session = Session()\n user = session.query(database.User).filter_by(user_id=user_id).first()\n user.is_contacted = True\n session.commit()\n Session.remove()\n bot.delete_message(os.environ[\"ADMIN_ID\"], message_id)\n bot.delete_message(\n os.environ[\"ADMIN_ID\"], update.callback_query.message.message_id\n )\n\n\ndef error(_, update, exc):\n \"\"\"Log Errors caused by Updates.\"\"\"\n __log__.warning('Update \"%s\" caused error \"%s\"', update, exc)\n\n\ndef main():\n global Session\n engine = create_engine(os.environ[\"DB_URI\"])\n Session = scoped_session(sessionmaker(engine))\n updater = Updater(os.environ[\"TOKEN\"])\n updater.dispatcher.add_handler(CommandHandler(\"start\", start))\n updater.dispatcher.add_handler(\n RegexHandler(keyboards.HELLO[0][0], start_low)\n )\n updater.dispatcher.add_handler(\n RegexHandler(keyboards.INTERESTING[0][0], interesting)\n )\n updater.dispatcher.add_handler(\n RegexHandler(keyboards.CALLBACK[0][0], contact_handler)\n )\n updater.dispatcher.add_handler(\n CallbackQueryHandler(check_handler, pattern=r\"^delete_.*\")\n )\n updater.dispatcher.add_error_handler(error)\n updater.start_polling()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":5551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"111311264","text":"# -*- coding: utf-8 -*-\nimport tkinter as tk\n\nclass Menu:\n \"\"\" Menu widget for the main GUI window \"\"\"\n def __init__(self, master, config, shortcuts, functions):\n \"\"\" Initialize the Menu \"\"\"\n self.__config = config # obtain link on config file\n self.__shortcuts = shortcuts # obtain link on keyboard shortcuts\n self.__functs = functions # obtain link on dictionary of functions\n self.menubar = tk.Menu(master) # create main menu bar, public for the main GUI\n self.empty_menu = tk.Menu(master) # empty menu to hide the real menubar in fullscreen mode\n # Enable / disable these menu labels\n self.__label_recent = 'Open recent'\n self.__label_close = 'Close image'\n self.__label_tools = 'Tools'\n self.__label_rect = 'Get Rectangles'\n self.__label_open = 'Open ROI'\n self.__label_save = 'Save ROI'\n # Create menu for the image\n self.__file = tk.Menu(self.menubar, tearoff=False, postcommand=self.__list_recent)\n self.__file.add_command(label='Open image',\n command=self.__shortcuts[0][2],\n accelerator=self.__shortcuts[0][0])\n self.__recent_images = tk.Menu(self.__file, tearoff=False)\n self.__file.add_cascade(label=self.__label_recent, menu=self.__recent_images)\n self.__file.add_command(label=self.__label_close,\n command=self.__shortcuts[1][2],\n accelerator=self.__shortcuts[1][0],\n state='disabled')\n self.__file.add_separator()\n self.__file.add_command(label='Exit',\n command=self.__functs['destroy'],\n accelerator=u'Alt+F4')\n self.menubar.add_cascade(label='File', menu=self.__file)\n # Create menu for the tools: cut rectangular images with the rolling window, etc.\n self.__tools = tk.Menu(self.menubar, tearoff=False, postcommand=self.__check_figures)\n self.__tools.add_command(label=self.__label_rect,\n command=self.__shortcuts[2][2],\n accelerator=self.__shortcuts[2][0],\n state='disabled')\n self.__tools.add_separator()\n self.__tools.add_command(label=self.__label_open,\n command=self.__shortcuts[3][2],\n accelerator=self.__shortcuts[3][0])\n self.__tools.add_command(label=self.__label_save,\n command=self.__shortcuts[4][2],\n accelerator=self.__shortcuts[4][0])\n self.menubar.add_cascade(label=self.__label_tools, menu=self.__tools, state='disabled')\n # Create menu for the view: fullscreen, default size, etc.\n self.__view = tk.Menu(self.menubar, tearoff=False)\n self.__view.add_command(label='Fullscreen',\n command=self.__functs['toggle_fullscreen'],\n accelerator='F11')\n self.__view.add_command(label='Default size',\n command=self.__functs['default_geometry'],\n accelerator='F5')\n self.menubar.add_cascade(label='View', menu=self.__view)\n\n def __list_recent(self):\n \"\"\" List of the recent images \"\"\"\n self.__recent_images.delete(0, 'end') # empty previous list\n lst = self.__config.get_recent_list() # get list of recently opened images\n for path in lst: # get list of recent image paths\n self.__recent_images.add_command(label=path,\n command=lambda x=path: self.__functs['set_image'](x))\n # Disable recent list menu if it is empty.\n if self.__recent_images.index('end') is None:\n self.__file.entryconfigure(self.__label_recent, state='disabled')\n else:\n self.__file.entryconfigure(self.__label_recent, state='normal')\n\n def __check_figures(self):\n \"\"\" Check if there are figures on the image and enable/disable menu 'Rolling Window' \"\"\"\n if self.__functs['check_roi'](): # there are regions of interest on the image\n self.__tools.entryconfigure(self.__label_rect, state='normal') # enable menu\n else: # if there are no figures\n self.__tools.entryconfigure(self.__label_rect, state='disabled') # disable menu\n\n def set_state(self, state):\n \"\"\" Enable / disable some menus \"\"\"\n self.menubar.entryconfigure(self.__label_tools, state=state)\n self.__file.entryconfigure(self.__label_close, state=state)\n self.__tools.entryconfigure(self.__label_rect, state=state)\n","sub_path":"graphics_editor/editor/gui_menu.py","file_name":"gui_menu.py","file_ext":"py","file_size_in_byte":4795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"640008731","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.selector import Selector\nfrom book.items import BookItem\nfrom snownlp import SnowNLP\nimport re\n\nurl = 'https://book.douban.com/subject/34995224/comments/'\n\nclass BookflaskSpider(scrapy.Spider):\n name = 'bookflask'\n allowed_domains = ['book.douban.com']\n start_urls = ['http://book.douban.com/']\n\n \n\n def start_requests(self):\n yield scrapy.Request(url=url ,callback=self.parse)\n \n def parse(self, response):\n comment_list = Selector(response=response).xpath('//*[@id=\"comments\"]/ul/li')\n # print(comment_list)\n\n for comment in comment_list:\n star = comment.xpath('.//span[@class=\"comment-info\"]/span/@title').extract_first().strip()\n # print(star)\n shorts = comment.xpath('.//p[@class=\"comment-content\"]/span/text()').extract_first().strip()\n # print(shorts)\n short = re.sub('\\n', \"\", shorts)\n # print(short)\n sentiment = SnowNLP(short).sentiments\n # print(sentiment)\n\n item = BookItem()\n item['star'] = star\n item['short'] = short\n item['sentiment'] = sentiment\n\n print(item)\n yield item\n\n ","sub_path":"Week_06/G20190389010004/week06_0004/book/book/spiders/bookflask.py","file_name":"bookflask.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"506900080","text":"# Given the following Person class:\n\nclass Person:\n\n \n\n def __init__(self, name, email, phone):\n self.name = name\n self.email = email\n self.phone = phone\n self.friends = []\n self.greetingcount = 0\n\n\n def greet(self, other_person):\n print('Hello {}, I am {}!'.format(other_person.name, self.name))\n self.greetingcount = self.greetingcount + 1\n\n# Create a new method for class Person to return the person phone number and email so that we can call the method if we needed the person information.\n def description(self):\n return f'{self.name} phone number is {self.phone} and his email is {self.email}.'\n\n# Add a method to print out the contact info for a object instance of Person\n def print_contact_info(self):\n print(f\"{self.name}'s email: {self.email}\")\n print(f\"{self.name}'s phone number: {self.phone}\") \n\n# add a friends instance variable (attribute) to the Person class. You will initialize it to an empty list within the constructor __init__. Once you've done this you should be able to add a friend to a person using list's append method:\n\n def add_friend(self, other_friend):\n self.friends.append(other_friend)\n for friend in self.friends:\n print(friend.name)\n\n#\n def num_friends(self):\n return(len(self.friends))\n\n def greeting_count(self):\n return self.greetingcount\n\n\n\n# Instantiate an instance object to the class Person with their name, email, and phone number\nsonny = Person('Sonny', 'sonny@hotmail.com', '483-485-4948')\njordan = Person('Jordan', 'jordan@aol.com', '495-586-3456')\n\n# Have sonny greet jordan using the greet method\nsonny.greet(jordan)\n\n# Have jordan greet sonny using the greet method\njordan.greet(sonny)\n\n# Write a print statement to print the contact info (email and phone) of Sonny\nprint(sonny.description())\n\n# Write a print statement to print the contact info (email and phone) of Jordan\nprint(jordan.description())\n\nsonny.print_contact_info()\n\n\njordan.add_friend(sonny)\nsonny.add_friend(jordan)\n\nsonny.num_friends()\njordan.num_friends()\n\n\nsonny.greet(jordan)\nsonny.greet(jordan)\nsonny.greet(jordan)\nsonny.greet(jordan)\n\nprint(sonny.greeting_count())\n\nprint(f'Sonny has {sonny.num_friends()} friends') ","sub_path":"sonny.py","file_name":"sonny.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"330960127","text":"CollatzCountSaveF = 0\nCollatzMaxSave = 0\nCollatzMinSave = 0\nCollatzCountArray= []\n\n\nfor i in range(2):\n CollatzInput = int(input())\n CollatzCountArray.append(CollatzInput)\n\nCollatzCountArrayLenth = len(CollatzCountArray)\n\nfor i in range(CollatzCountArrayLenth):\n CollatzCalculate = CollatzCountArray[i]\n while True:\n\n if CollatzCalculate % 2 == 0:\n CollatzCalculate = (CollatzCalculate//2)\n CollatzCountSaveF += 1\n\n elif CollatzCalculate % 2 == 1:\n CollatzCalculate = (3 * CollatzCalculate) + 1\n CollatzCountSaveF += 1\n else:\n raise NotImplementedError\n\n CollatzMinSave = CollatzCalculate\n\n if CollatzMaxSave <= CollatzMinSave:\n CollatzMaxSave = CollatzMinSave\n\n elif CollatzCalculate == 1:\n CollatzCountSaveF += 1\n break\n print(\"CollatzLenth :\",CollatzCountSaveF,\"CollatMax :\", CollatzMaxSave)\n\n","sub_path":"CodeUpCode/CollatzConjecture.py","file_name":"CollatzConjecture.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"260646142","text":"# Les constructeurs\n\n# Les constructeurs ne sont pas des m��thodes mais leur traitement est relativement semblable.\n# En Python, ce sont des méthodes ayant pour nom __init__. Ils sont automatiquement\n# exécutés lors de la création d'un objet.\n\n# Dans la section \"Instancier un objet\" nous avons dit que pour créer un objet, on utilisait\n# la syntaxe suivante (pour un objet de type Voiture): une_voiutre = Voiture()\n\n# Jusqu'à maintenant, nous avons utilisés des constructeurs sans paramètres. On peut cependant,\n# comme n'importe quelle autre méthode ou fonction, passer des paramètres au constructeur. Cela permet\n# d'initialiser l'objet avec certaines valeurs dès sa création plutôt que d'avoir des valeurs par défaut.\n\n# Pour la classe Voiture, on pourrait alors avoir le constructeur suivant:\n\nclass Voiture:\n def __init__(self, no_serie, marque, modele, annee, km, prix):\n self.no_serie = no_serie\n self.marque = marque\n self.modele = modele\n self.annee = annee\n self.km = km\n self.prix = prix\n\n def imprimer_voiture(self):\n print(\"{} \\t {} \\t {} \\t {} \\t {} \\t {}\".format(\n self.no_serie,\n self.marque,\n self.modele,\n self.annee,\n self.km,\n self.prix\n ))\n\n\n# Ainsi, je vais pouvoir créer mes objets Voiture en leur passant en paramètre les valeurs initiales\n# qui leur sont propres, ce qui est beaucoup plus efficace que de créer un objet par défaut\n# et ensuite affecter ses attributs…\n\n# Pour utiliser le constructeur, on utilise la syntaxe suivante:\n# une_voiture = Voiture(\"Honda\", \"Civic\", 1999, 42000, 17000)\n\n# Un exemple:\n\nif __name__ == \"__main__\":\n une_voiture = Voiture(\"1\", \"Honda\", \"Civic\", 1999, 42000, 17000)\n une_voiture.imprimer_voiture()\n\n une_voiture.km += 5000\n\n une_voiture.imprimer_voiture()\n\n# Suite dans concessionnairev1.py\n\n\n","sub_path":"TD5/voiturev3.py","file_name":"voiturev3.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"298342760","text":"inFile = open('HEK293-Trans-Integration.blasted.filtered.rawseq')\nouFile = open('HEK293-Trans-Integration.blasted.filtered.rawseq-num', 'w')\nD = {}\nTrans = ['pFRT-lacZeo', 'pcDNA6-TR']\nwhile True:\n line1 = inFile.readline().strip()\n line2 = inFile.readline().strip()\n if line1:\n fields1 = line1.split('\\t')\n fields2 = line2.split('\\t')\n ch1 = fields1[3]\n ch2 = fields2[3]\n if ch1 in Trans:\n D.setdefault(ch1, {})\n D[ch1].setdefault(ch2, 0)\n D[ch1][ch2] += 1\n elif ch2 in Trans:\n D.setdefault(ch2, {})\n D[ch2].setdefault(ch1, 0)\n D[ch2][ch1] += 1\n else:\n break\ninFile.close()\nfor k in D:\n for x in D[k]:\n ouFile.write(k + '\\t' + x + '\\t' + str(D[k][x]) + '\\n')\nouFile.close()\n","sub_path":"IntegrationSite/Blast/9-ch.py","file_name":"9-ch.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"461299806","text":"from mgipython.dao.user_dao import UserDAO\nfrom mgipython.dao.vocterm_dao import VocTermDAO\nfrom mgipython.model import MGIUser\nfrom mgipython.domain.mgi_domains import UserDomain\nfrom mgipython.domain.gxd_domains import *\nfrom flask_login import current_user\nfrom mgipython.error import NotFoundError\nfrom mgipython.modelconfig import cache\nfrom mgipython.domain import convert_models\nfrom .vocterm_service import VocTermService\n\nclass UserService():\n \n user_dao = UserDAO()\n vocterm_dao = VocTermDAO()\n vocterm_service = VocTermService()\n \n def get_by_key(self, _user_key):\n user = self.user_dao.get_by_key(_user_key)\n if not user:\n raise NotFoundError(\"No MGIUser for _user_key=%d\" % _user_key)\n return convert_models(user, UserDomain)\n \n def search(self, search_query):\n \"\"\"\n Search using a SearchQuery\n \"\"\"\n search_result = self.user_dao.search(search_query)\n \n # convert results to domain objects\n users = search_result.items\n search_result.items = convert_models(users, UserDomain)\n \n return search_result\n \n def create(self, args):\n \"\"\"\n Create user with an argument object\n \"\"\"\n user = MGIUser()\n # get the next primary key\n user._user_key = self.user_dao.get_next_key()\n # set MGIUser values\n user.login = args['login']\n user.name = args['name']\n user._usertype_key = args['_usertype_key']\n user._userstatus_key = args['_userstatus_key']\n \n #user._createdby_key = current_user._user_key\n #user._modifiedby_key = current_user._modifiedby_key\n self.user_dao.create(user)\n return convert_models(user, UserDomain)\n \n \n def update(self, key, args):\n \"\"\"\n update user with and argument object\n \"\"\"\n user = self.user_dao.get_by_key(key)\n if not user:\n raise NotFoundError(\"No MGIUser for _user_key=%d\" % key)\n user.login = args['login']\n user.name = args['name']\n user._usertype_key = args['_usertype_key']\n user._userstatus_key = args['_userstatus_key']\n #user._modifiedby_key = current_user._modifiedby_key\n self.user_dao.update(user)\n return convert_models(user, UserDomain)\n \n \n def delete(self, _user_key):\n \"\"\"\n Delete MGIUser object\n \"\"\"\n user = self.user_dao.get_by_key(_user_key)\n if not user:\n raise NotFoundError(\"No MGIUser for _user_key=%d\" % _user_key)\n self.user_dao.delete(user)\n return convert_models(user, UserDomain)\n\n def current_user(self):\n return convert_models(current_user, UserDomain)\n","sub_path":"mgipython/service/user_service.py","file_name":"user_service.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"342908243","text":"import torch\nfrom utils import Counter\nfrom torch.utils.data import TensorDataset, DataLoader\nimport math\n\n\nclass AttackWrapper():\n def __init__(self, attacker, model_wrapper):\n self.attacker = attacker\n self.model = model_wrapper.model\n self.device = next(self.model.parameters()).device\n self.stat = Counter()\n self.epsilon = []\n\n def attack(self, x, y):\n ori_outputs = self.model(x)\n imgs = self.attacker(x, y)\n self.epsilon.append(imgs-x)\n label = torch.ones_like(y)\n ae_outputs = self.model(imgs)\n _, ori_predicted = torch.max(ori_outputs.data, 1)\n _, ad_predicted = torch.max(ae_outputs.data, 1)\n self.stat.count(ad_predicted, ori_predicted)\n return imgs, label\n\n def transform(self, dataloader, num_batch, shuffle=True):\n total_iteration = math.ceil(min(num_batch, len(dataloader.dataset)) / dataloader.batch_size)\n x = []\n y = []\n for i, (images, labels) in enumerate(dataloader):\n if i >= total_iteration:\n break\n images = images.to(self.device)\n labels = labels.to(self.device)\n ae_images, _ = self.attack(images, labels)\n x.append(ae_images)\n y.append(labels)\n i = len(y)\n print(\"finish {}/{} batches\".format(str(i), str(total_iteration)))\n\n x = torch.cat(x, dim=0).cpu()\n y = torch.cat(y, 0).cpu()\n dataset = TensorDataset(x.detach(), y.detach())\n return DataLoader(dataset, batch_size=dataloader.batch_size, shuffle=shuffle)\n","sub_path":"attacks/attack_wrapper.py","file_name":"attack_wrapper.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"371266844","text":"#!/usr/bin/env python3\nimport argparse\nimport os\nimport hashlib\nimport shutil\nimport datetime\n# update git init\n\n\ndef get_argument():\n parser = argparse.ArgumentParser()\n parser.add_argument('command', nargs='+', help='init/add/commit/snapshots/'\n 'index/config/status')\n parser.add_argument('--author', action='store')\n parser.add_argument('-m', action='store')\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = get_argument()\n content_index = []\n command = args.command[0]\n author = os.environ['LOGNAME']\n path_dir = os.getcwd()\n init = 0\n flag = 0\n while path_dir != '/' and flag == 0:\n for root, dirnames, filenames in os.walk(path_dir):\n if flag == 0:\n for name in dirnames:\n if name == '.lgit':\n init = 1\n path_lgit = path_dir\n flag = 1\n break\n path_dir = os.path.dirname(path_dir)\n if command == 'init':\n create_dir()\n init = 1\n elif init == 0:\n print('fatal: not a git repository (or any of the parent'\n ' directories)')\n elif init == 1:\n if command == 'add':\n argument = args.command[1:]\n flag = 1\n for item in argument:\n if not os.path.exists(item):\n print(\"fatal: pathspec '\" + item +\n \"' did not match any files\")\n flag = 0\n break\n if flag == 1:\n for item in argument:\n list_index = lgit_add(path_lgit, item)\n content_index = add_list(list_index, content_index)\n write_index_content(path_lgit, content_index)\n elif command == 'rm':\n argument = args.command[1:]\n for item in argument:\n if not os.path.exists(item):\n print(\"fatal: pathspec '\" + item +\n \"' did not match any files\")\n break\n else:\n update_index = remove_index(path_lgit, item)\n if update_index == 0:\n print(\"fatal: pathspec '\" + item +\n \"' did not match any files\")\n else:\n write_index(path_lgit, '\\n'.join(update_index) + '\\n')\n remove_file(item)\n elif command == 'config':\n author = args.author\n config(path_lgit, args.author)\n elif command == 'commit':\n # not do fatal error of commit: not init\n with open(path_lgit + '/.lgit/config', 'r') as file:\n author = file.readline().strip()\n lgit_commit(path_lgit, args.m, author)\n elif command == 'ls-files':\n print_ls_files(path_lgit)\n elif command == 'status':\n status_list, commit = get_status(path_lgit)\n print_status(status_list, commit)\n elif command == 'log':\n lgit_log(path_lgit)\n\n\ndef lgit_log(path_lgit):\n list_files = []\n path = path_lgit + '/.lgit/commits'\n for dirname, dirnames, filenames in os.walk(path):\n for name in filenames:\n list_files.append(name)\n list_files.sort(reverse=True)\n for i in list_files:\n with open(path + \"/\" + i, \"r\") as file:\n lines = file.readlines()\n print('commit', i)\n print('Author:', lines[0].strip())\n time = datetime.datetime.strptime(lines[1].strip(), '%Y%m%d%H%M%S')\n print('Date:', time.strftime('%a %b %d %H:%M:%S %Y'))\n print()\n print('\\t' + lines[3] + '\\n')\n\n\n# -------------------------------LGIT STATUS-----------------------------------\ndef get_status(path_lgit):\n status_list = [[], [], []] # [to be committed][not staged][untracked]\n if len(os.listdir(path_lgit + '/.lgit/commits')) == 0:\n commit = 0\n else:\n commit = 1\n with open(path_lgit + '/.lgit/index', 'r') as file:\n line = file.readlines()\n files = []\n for x in range(len(line)):\n line[x] = line[x][:-1].split(' ')\n line[x][1] = caculate_sha1_file(path_lgit+'/'+line[x][-1])\n if line[x][2] != line[x][3]:\n status_list[0].append(line[x][-1])\n if line[x][1] != line[x][2]:\n status_list[1].append(line[x][-1])\n files.append(line[x][-1])\n with open(path_lgit + '/.lgit/index', 'w') as file:\n for x in line:\n file.write(' '.join(x) + '\\n')\n list_file = []\n pre_path = os.getcwd().split(path_lgit)[-1][1:]\n if len(pre_path) > 0:\n pre_path += '/'\n for dirname, dirnames, filenames in os.walk('./'):\n for filename in filenames:\n path = os.path.join(dirname, filename)\n if '.lgit' not in path and '.git' not in path:\n list_file.append(pre_path + path[2:])\n for file in list_file:\n if file not in files:\n status_list[2].append(file)\n return status_list, commit\n\n\ndef print_status(status_list, commit):\n print('On branch master')\n if commit == 0:\n print('\\nNo commits yet\\n')\n if status_list[0]:\n print('Changes to be committed:')\n print(' (use \"./lgit.py reset HEAD ...\" to unstage)\\n')\n for x in status_list[0]:\n print('\\t modified: ' + x)\n print()\n if status_list[1]:\n print('Changes not staged for commit:')\n print(' (use \"./lgit.py add ...\" to update what will be committed)')\n print(' (use \"./lgit.py checkout -- ...\" to discard changes in'\n ' working directory)\\n')\n for x in status_list[1]:\n print('\\t modified: ' + x)\n print()\n if status_list[2]:\n print('Untracked files:')\n print(' (use \"./lgit.py add ...\" to include in what will'\n ' be committed)\\n')\n for x in status_list[2]:\n print('\\t' + x)\n print('\\nnothing added to commit but untracked files'\n ' present (use \"./lgit.py add\" to track)\\n')\n\n\n# -------------------------------LGIT LS-FILES---------------------------------\ndef print_ls_files(path_lgit):\n path = os.getcwd()\n list_file = []\n list_result = []\n with open(path_lgit + \"/.lgit/index\", \"r\") as f_index:\n lines = f_index.readlines()\n for dirname, dirnames, filenames in os.walk(path):\n for filename in filenames:\n list_file.append(os.path.join(dirname, filename))\n for line in lines:\n path_index = (line.split(' ')[-1]).strip()\n for path1 in list_file:\n if path_index in path1:\n index = path1.split(path)[-1][1:]\n if index not in list_result:\n list_result.append(index)\n list_result = sorted(list_result)\n print('\\n'.join(list_result))\n\n\ndef check_directory(path): # ?????\n path_dir = path\n flag = 0\n while flag != 1:\n for root, dirnames, filenames in os.walk(path_dir):\n for name in dirnames:\n if name == '.lgit':\n return path_dir\n path_dir = os.path.dirname(path_dir)\n\n\n# -------------------------------LGIT RM-------------------------------------\ndef remove_file(filename):\n path_list = filename\n basename = os.path.basename(filename)\n if os.path.exists(filename):\n os.remove(filename)\n\n\ndef remove_index(path_lgit, filename):\n # find pathname_deleted in index and rm file # (2)\n update_index = []\n flag = 0\n with open(path_lgit + \"/.lgit/index\", \"r\") as f_index:\n lines = f_index.readlines()\n for line in lines:\n path = (line.split(' ')[-1]).strip()\n if filename == path:\n flag = 1\n if filename != path:\n update_index.append(line.strip())\n if flag == 0:\n return flag # if not have turn 0\n else:\n return update_index # if have turn list are deleted file index\n\n\ndef write_index_content(path_lgit, content):\n with open(path_lgit + '/.lgit/index', 'r+') as file:\n lines = file.readlines()\n for line_content in content:\n path_name = line_content.split(' ')[-1]\n flag = 0\n for i in range(len(lines)):\n lines[i] = lines[i].strip()\n path_index = lines[i].split(' ')[-1]\n if path_name == path_index:\n lines[i] = lines[i].split(' ')\n lines[i][1] = lines[i][2] = line_content.split(' ')[1]\n lines[i][0] = line_content.split(' ')[0]\n lines[i] = ' '.join(lines[i])\n flag = 1\n if flag == 0:\n lines.append(line_content)\n write_index(path_lgit, '\\n'.join(lines) + '\\n')\n\n\n# -------------------------------LGIT COMMIT----------------------------------\ndef lgit_commit(path_lgit, mess, author):\n check = 0\n with open(path_lgit + '/.lgit/index', 'r+') as file:\n lines = file.readlines()\n for x in range(len(lines)):\n subline = []\n for y in lines[x].split(' '):\n if y != '':\n subline.append(y)\n if len(subline) == 4:\n subline.insert(3, subline[2])\n check = 1\n elif subline[3] != subline[2]:\n subline[3] = subline[2]\n check = 1\n lines[x] = ' '.join(subline)\n if check == 1:\n with open(path_lgit + '/.lgit/index', 'w+') as file:\n file.write(''.join(lines))\n time = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S.%f\")\n with open(path_lgit + '/.lgit/commits/' + time, 'w+') as file:\n file.write(author + '\\n')\n file.write(time.split('.')[0] + '\\n\\n')\n file.write(mess + '\\n')\n with open(path_lgit + '/.lgit/snapshots/' + time, 'w+') as f:\n for line in lines:\n f.write(line.split(' ')[3] + ' ' + line.split(' ')[4])\n elif check == 0:\n print('On branch master')\n print(\"Your branch is up-to-date with 'origin/master'.\")\n print('nothing to commit, working directory clean')\n\n\n# -------------------------------LGIT CONFIG--AUTHOR---------------------------\ndef config(path_lgit, author):\n file = path_lgit + '/.lgit/config'\n with open(file, 'w+') as f:\n f.write(author + '\\n')\n\n\ndef write_index(path_lgit, content): # write file index # (1)\n with open(path_lgit + \"/.lgit/index\", 'w') as f_index:\n f_index.write(content)\n f_index.close()\n\n\n# -------------------------------LGIT ADD----------------------------------\ndef lgit_add(path_lgit, file_name):\n list_index = []\n if os.path.isdir(file_name):\n files = directory_tree_list(file_name)\n for file in files:\n index = create_file_objects(path_lgit, file)\n list_index.append(index)\n if os.path.isfile(file_name):\n index = create_file_objects(path_lgit, file_name)\n list_index.append(index)\n return list_index\n\n\ndef directory_tree_list(path):\n list_file = []\n for dirname, dirnames, filenames in os.walk(path):\n for filename in filenames:\n list_file.append(os.path.join(dirname, filename))\n return list_file\n\n\ndef create_file_objects(path_lgit, filename):\n file_content = open(filename, 'r').read()\n path_objects = path_lgit + '/.lgit/objects'\n hash_sha1 = caculate_sha1_file(filename)\n file_name = hash_sha1[2:]\n dir_name = hash_sha1[:2]\n if not os.path.exists(path_objects + \"/\" + dir_name):\n os.mkdir(path_objects + \"/\" + dir_name)\n file = open(path_objects + \"/\" + dir_name + \"/\" + file_name, 'w+')\n file.write(file_content)\n file.close()\n hash_sha2 = caculate_sha1_file(path_objects + \"/\" + dir_name + \"/\"\n + file_name)\n index = create_structure_index(path_lgit, filename, hash_sha1, hash_sha2)\n return(index)\n\n\ndef add_list(list, list_add): # (3)\n for i in list:\n if '.lgit' not in i and '.git' not in i:\n list_add.append(i)\n return list_add\n\n\ndef caculate_sha1_file(filename):\n with open(filename, 'rb') as file:\n text = b''.join(file.readlines())\n hash_object = hashlib.sha1(text)\n hex_dig = hash_object.hexdigest()\n return hex_dig\n\n\ndef create_structure_index(path_lgit, filename, hash1, hash2):\n file_index = []\n timestamp = str(get_timestamp(filename))\n file_index.append(timestamp)\n file_index.append(hash1)\n file_index.append(hash2)\n # SHA1 of the file content after commited\n file_index.append(' ' * 40)\n path = os.getcwd().split(path_lgit)[-1][1:]\n if len(path) > 0:\n path += '/'\n if filename[:2] == './':\n file_index.append(path + filename[2:])\n else:\n file_index.append(path + filename)\n return ' '.join(file_index)\n\n\ndef get_timestamp(filename):\n t = os.path.getmtime(filename)\n time = str(datetime.datetime.fromtimestamp(t))\n stamp = datetime.datetime.fromtimestamp(t).timestamp() * 1000\n list1 = time.split('.')\n time = list1[0]\n list_time = list(time)\n timestamp = []\n for i in list_time:\n if i != '-' and i != ':' and i != ' ':\n timestamp.append(i)\n return(''.join(timestamp))\n\n\n# --------------------------------INIT---------------------------------\ndef create_dir():\n path = os.getcwd() + '/.lgit'\n if os.path.exists(path):\n print('Git repository already initialized.')\n else:\n os.mkdir(path)\n os.mkdir(path + '/commits')\n os.mkdir(path + '/objects')\n os.mkdir(path + '/snapshots')\n filename_index = os.path.join(path, 'index')\n file = open(filename_index, 'w+')\n file.close()\n filename_config = os.path.join(path, 'config')\n file = open(filename_config, 'w+')\n file.write(os.environ['LOGNAME'])\n file.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"lgit.py","file_name":"lgit.py","file_ext":"py","file_size_in_byte":13893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"446178008","text":"from boltiot import Sms, Bolt\nimport json, time\n\nmybolt = Bolt(conf.API_KEY, conf.DEVICE_ID)\nsms = Sms(conf.SID, conf.AUTH_TOKEN, conf.TO_NUMBER, conf.FROM_NUMBER)\ngarbage_limit = 10\nresponse = mybolt.serialRead('13')\nprint(response)\n\n\nwhile True:\n print (\"Reading sensor value\")\n response = mybolt.serialRead('13')\n data = json.loads(response)\n print(\"Garbage value is: \" + str(data['value']))\n try:\n garbage_value = int(data['value'].rstrip())\n if garbage_value < garbage_limit or garbage_value == 357:\n print(\"Making request to Twilio to send a SMS\")\n response = sms.send_sms(\"Hello I'm full, please clean me\")\n except Exception as e:\n print (\"Error occurred: Below are the details\")\n print (e)\n time.sleep(20)\n","sub_path":"dustbin_full_alert.py","file_name":"dustbin_full_alert.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"211623168","text":"#!/usr/bin/python\n#\n# Copyright (C) 2015 NTT Innovation Institute, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\n\ndef read_file(path):\n result = []\n f = open(path)\n for line in f:\n result.append(line)\n f.close()\n return result\n\n\ndef write_file(path, lines):\n f = open(path, 'w')\n f.writelines(lines)\n f.close()\n\n\ndef startswith(content_a, content_b):\n l = len(content_a)\n if len(content_b) < l:\n return False\n for i in range(l):\n if content_a[i] != content_b[i]:\n return False\n return True\n\n\ndef find_files(directory, extension, ignore_dir):\n for root, dirs, files in os.walk(directory):\n if root.find(ignore_dir) > 0:\n continue\n for f in files:\n if f.endswith(extension):\n yield os.path.join(root, f)\n\n\nlicense = read_file(\"./tools/go_license_header\")\n\n# process go files\nfor go_file in find_files(\".\", \".go\", \"Godeps\"):\n go_code = read_file(go_file)\n if not startswith(license, go_code):\n print(\"%s has no license header. Adding..\" % go_file)\n new_source = []\n new_source.extend(license)\n new_source.extend(['\\n', '\\n'])\n new_source.extend(go_code)\n write_file(go_file, new_source)\n\n# process go files\nfor js_file in find_files(\".\", \".js\", \"webui\"):\n js_code = read_file(js_file)\n if not startswith(license, js_code):\n print(\"%s has no license header. Adding..\" % js_file)\n new_source = []\n new_source.extend(license)\n new_source.extend(['\\n', '\\n'])\n new_source.extend(js_code)\n write_file(js_file, new_source)\n","sub_path":"tools/license.py","file_name":"license.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"196356416","text":"\nimport requests\nfrom dateutil import parser\nfrom datetime import datetime\n\ngraph_url = 'https://graph.facebook.com'\n\ndef order_feed(all_feeds):\n\n\tfor post in all_feeds:\n\t#created_time key\n\t\t#twitter-time\n\t\tif 'created_at' in post:\n\t\t\tpost['created_time'] = post.pop('created_at')\n\n\t\t\t\n\n\t\t#message key\n\t\tif 'text' in post:\t\n\t\t\tpost['message'] = post.pop('text')\n\t\t#twitter picture to link key\n\t\tif 'entities' in post:\n\t\t\tif 'media' in post['entities']:\n\t\t\t\tpost['link']=post['entities']['media'][0]['media_url']\t\t\n\n\t\t#assign datetime.datetime\n\t\t#post['created_time'] = parser.parse(post['created_time'])\n\t\t#twitter profile pic\n\t\tif 'user' in post:\n\t\t\tpost['profile_pic'] = post['user']['profile_image_url']\n\t\t\tpost['name']=post['user']['name']\n\t\t\tpost['platform']='twitter'\n\t\t\t\t\n\t\t\tpost['created_time'] = parser.parse(post['created_time'])\n\t\t\tpost[\"id\"]=str(post[\"id\"])\n\t\t#facebook profile pic from graph call in views.py\n\t\tif 'from' in post:\n\t\t\tposter_id=post['from']['id']\n\t\t\tendpoint=graph_url+'/'+poster_id+'/picture'\n\t\t\tparams={'redirect':'false'}\n\t\t\tr=requests.get(endpoint,params=params)\n\t\t\tdata=r.json()\n\t\t\tprofile_pic=data['data']['url']\n\t\t\tpost['profile_pic']=profile_pic\n\n\t\t\tpost['name']=post['from']['name']\n\t\t\tpost['platform']='facebook'\n\t\t\t\n\t\t\tpost['created_time'] = parser.parse(post['created_time'])\n\n\t\t\tif 'picture' in post:\n\t\t\t\tndx_of_s = post['picture'].rfind('s')\n\t\t\t\tndx_of_n = post['picture'].rfind('n')\n\t\t\t\tif ndx_of_s > 50:\n\t\t\t\t\tnew_link = post['picture'][:ndx_of_s]+'n'+post['picture'][ndx_of_s+1:]\n\t\t\t\telse:\n\t\t\t\t\tnew_link=post['picture']\n\t\t\t\tpost['picture']=new_link\n\n\n\t\t\tif 'source' in post:\n\t\t\t\tlink=post['source']\n\t\t\t\tndx=link.find('p')\n\t\t\t\tlink=link[:ndx+1]+'s'+link[ndx+1:]\n\t\t\t\tlink=link.strip(\"&autoplay=1\")\n\t\t\t\tpost[\"source\"]=link\n\n\t\t#linkedin-time\n\t\tif 'timestamp' in post:\n\t\t\ttimestamp = post['timestamp']\n\t\t\tstringify = str(timestamp)\n\t\t\tin_sec = stringify[0:-3]\n\t\t\tfloated = float(in_sec)\n\t\t\tobj = datetime.fromtimestamp(floated)\n\t\t\tstrftime = datetime.strftime(obj,'%Y %d %b %H:%M:%S +0000')\n\t\t\t\n\t\t\tpost['created_time']=parser.parse(strftime)\n\t\t\tpost['platform']='linkedin'\n\t\t\t\n\treturn all_feeds\n","sub_path":"home_feed.py","file_name":"home_feed.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"56103439","text":"\nclass ArrayExample:\n\n def __init__(self):\n print(\"----------------------------ARRAY PROGRAM---------------------------------\")\n\n def reversearray(self):\n element = input(\"Enter Number of Element : \")\n array = []\n for n in range(int(element)):\n number = int(input(\"Enter the value : \"))\n array.append(number)\n print(array)\n array.reverse()\n print(array)\n","sub_path":"arrayproblems/Program3.py","file_name":"Program3.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"474440516","text":"# This program finds certain substrings within a string.\n\n# From https://open.kattis.com/problems/whatdoesthefoxsay\n\n\"\"\"\nTitle: What does the fox say?\nDetermined to discover the ancient mystery—the sound that the fox makes—you went into the forest, armed with a very good digital audio recorder. The forest is, however, full of animals’ voices, and on your recording, many different sounds can be heard. But you are well prepared for your task: you know exactly all the sounds which other animals make. Therefore the rest of the recording—all the unidentified noises—must have been made by the fox.\n\nInput\nThe first line of input contains the number of test cases T. The descriptions of the test cases follow:\n\nThe first line of each test case contains the recording—words over lower case English alphabet, separated by spaces. Each contains at most 100 letters and there are no more than 100 words. The next few lines are your pre-gathered information about other animals, in the format goes . There are no more than 100 animals, their names are not longer than 100 letters each and are actual names of animals in English. There is no fox goes ... among these lines.\n\nThe last line of the test case is exactly the question you are supposed to answer: what does the fox say?\n\nOutput\nFor each test case, output one line containing the sounds made by the fox, in the order from the recording. You may assume that the fox was not silent (contrary to popular belief, foxes do not communicate by Morse code).\n\nSample Input 1\n1\ntoot woof wa ow ow ow pa blub blub pa toot pa blub pa pa ow pow toot\ndog goes woof\nfish goes blub\nelephant goes toot\nseal goes ow\nwhat does the fox say?\n\nSample Output 1\nwa pa pa pa pa pa pow\n\"\"\"\n\nclass Case:\n \"\"\"This is a test case.\"\"\"\n def __init__(self, line, animals):\n self.line = line\n self.animals = animals\n\nclass Animal:\n \"\"\"This is an animal.\"\"\"\n def __init__(self, animal, sound):\n self.animal = animal\n self.sound = sound\n\n# eliminate known sounds\ndef foxSays(case):\n \"\"\"This function returns what the fox says.\"\"\"\n line = case.line\n animalQuantity = len(case.animals)\n for i in range(animalQuantity):\n # find the sounds\n sound = case.animals[i].sound\n\n # Delete sounds in the middle of the line.\n location = line.find(f\" {sound} \")\n while location != -1:\n line = line.replace(f\" {sound} \", ' ')\n location = line.find(f\" {sound} \")\n\n # Delete the sound at the beginning of the line.\n if line[0:len(sound)] == sound:\n line = line[len(sound):]\n\n # Delete sounds at the end of the line.\n if line[len(line) - len(sound):] == sound:\n line = line[:len(line) - len(sound)]\n\n return(line)\n\n# get input\ncaseQuantity = int(input())\ncases = []\n\n# Collect test cases from input.\nfor i in range(caseQuantity):\n animals = []\n line = input()\n nextLine = input()\n while nextLine != \"what does the fox say?\":\n words = nextLine.split()\n animal = words[0]\n sound = words[2:][0]\n newAnimal = Animal(animal, sound)\n animals.append(newAnimal)\n nextLine = input()\n case = Case(line, animals)\n cases.append(case)\n\nresults = []\n\n# Process test cases.\nfor case in cases:\n results.append(foxSays(case))\n\n# print results.\nfor result in results:\n print(result)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"kattis/whatdoesthefoxsay/whatdoesthefoxsay.py","file_name":"whatdoesthefoxsay.py","file_ext":"py","file_size_in_byte":3437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"598871222","text":"import json\n\nclass MakeGraph:\n def __init__(self):\n self.parisent = []\n def testCruskal(self, sorted_edge, nodes):\n print(\"sorted\",sorted_edge)\n node_size = len(nodes)\n edge_size = len(sorted_edge) - 1\n self.parent = [-1 for _ in range(node_size)]\n treeEdges = list()\n while(node_size != 0 ):\n print(node_size,edge_size)\n a = sorted_edge[edge_size]['to']\n b = sorted_edge[edge_size]['from']\n if(self.is_cycle(a, b)):\n print(\"cycle!\")\n else:\n print(a, b)\n treeEdges.append(sorted_edge[edge_size])\n node_size -= 1\n if(node_size == 0):\n break\n edge_size -= 1\n out = dict()\n out['nodes'] = nodes\n out['edges'] = treeEdges\n\n\n source = json.dumps(out, indent=4)\n #print(source)\n # file write : ./con\n return source\n\n def py2json(self, concept, conceptRelation, All_degree):\n out = dict()\n nodes = list()\n edges = list()\n resistDistnace = 10\n\n for i, ci in enumerate(concept):\n node = dict()\n node['id'] = i\n node['label'] = ci\n nodes.append(node)\n for j in range(i + 1, len(concept)):\n # print(\"i:\", i, \"j:\", j)\n # print(\"conceptRelation[i]:\", conceptRelation[i])\n # print(\"conceptRelation[j]\", conceptRelation[j])\n if (conceptRelation[i][j] == conceptRelation[j][i]):\n # 다른 feature로 정의\n print(\"when 같을 때 \")\n if (conceptRelation[i][j] > conceptRelation[j][i]):\n if (conceptRelation[i][j] < resistDistnace):\n source = i\n target = j\n v = conceptRelation[j][i]\n else:\n continue\n else:\n if (conceptRelation[j][i] < resistDistnace):\n source = j\n target = i\n v = conceptRelation[i][j]\n\n elif (conceptRelation[i][j] > conceptRelation[j][i]):\n if(conceptRelation[i][j] < resistDistnace):\n source = i\n target = j\n v = conceptRelation[j][i]\n else:\n continue\n else:\n if (conceptRelation[j][i] < resistDistnace):\n source = j\n target = i\n v = conceptRelation[i][j]\n else:\n continue\n edge = dict()\n edge['to'] = target\n edge['from'] = source\n edge['option'] = 'direct'\n edge['value'] = v\n edges.append(edge)\n #tree\n sorted_edge = sorted(edges, key = lambda k:k['value'],reverse=True)\n print(\"sorted\",sorted_edge)\n node_size = len(nodes)\n edge_size = len(sorted_edge) - 1\n\n self.parent = [-1 for _ in range(node_size)]\n\n treeEdges = list()\n while(node_size != 0):\n print(node_size,edge_size)\n a = sorted_edge[edge_size]['to']\n b = sorted_edge[edge_size]['from']\n if(self.is_cycle(a, b)):\n print(\"cycle!\")\n else:\n print(a, b)\n treeEdges.append(sorted_edge[edge_size])\n node_size -= 1\n if(node_size == 0):\n break\n edge_size -= 1\n if(edge_size == 0):\n break\n out['nodes'] = nodes\n out['edges'] = treeEdges\n \n source = json.dumps(out, indent=4)\n #print(source)\n # file write : ./con\n return source\n\n def find(self,i):\n if (self.parent[i] == -1):\n return i\n pass\n return self.find(self.parent[i])\n\t\n def union(self, x, y):\n xx = self.find(x)\n yy = self.find(y)\n if(xx==yy): #사이클 존재할 수 있음\n return False\n else:\n self.parent[xx] = yy\n return True\n\n def is_cycle(self,a,b):\n return self.union(a, b)\n\n '''\n example\n #conceptRelation -> dict()\n\n\n conceptRelation = {\n 'nodes' : [{'id' :0, 'caption':'asdf'}, ...]\n 'edges' : [{'source' : 0, 'target':1]}\n }\n\n ->\n {\n \"comment\": \"test\",\n \"nodes\": [\n {\n \"id\": 0,\n \"caption\": \"Synapse\"\n },\n {\n }\n ],\n \"edges\": [\n {\n \"source\": 0,\n \"target\": 1\n }\n ]\n }\n '''","sub_path":"RelationExtraction/MakeGraph.py","file_name":"MakeGraph.py","file_ext":"py","file_size_in_byte":4949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"288059390","text":"# No Copyright (-) 2010 The Ampify Authors. This file is under the\n# Public Domain license that can be found in the root LICENSE file.\n\n\"\"\"App configuration for the Tent App.\"\"\"\n\nimport os\n\nfrom datetime import timedelta\nfrom time import time\n\ntry:\n from updated import APPLICATION_TIMESTAMP\nexcept:\n APPLICATION_TIMESTAMP = time()\n\n__all__ = [\n 'APPLICATION_TIMESTAMP', 'COOKIE_DOMAIN_HTTP', 'COOKIE_DOMAIN_HTTPS',\n 'DEBUG', 'LIVE_HOST', 'REMOTE_KEY', 'SITE_ADMINS', 'STATIC_HTTP_HOSTS',\n 'STATIC_HTTPS_HOSTS', 'STATIC_PATH',\n 'TAMPER_PROOF_DEFAULT_DURATION', 'TAMPER_PROOF_KEY', 'TENT_HTTP_HOST',\n 'TENT_HTTPS_HOST'\n ]\n\n# ------------------------------------------------------------------------------\n# Core Settings\n# ------------------------------------------------------------------------------\n\nCOOKIE_DOMAIN_HTTP = '.espra.com'\nCOOKIE_DOMAIN_HTTPS = 'espra.appspot.com'\n\nDEBUG = False\n#DEBUG = 1\n\nLIVE_HOST = 'https://tentlive.espra.com'\n\nSTATIC_HTTP_HOSTS = [\n 'http://static1.espra.com',\n 'http://static2.espra.com',\n 'http://static3.espra.com'\n ]\n\nSTATIC_HTTPS_HOSTS = [\n 'https://static1.espra.appspot.com',\n 'https://static2.espra.appspot.com',\n 'https://static3.espra.appspot.com'\n ]\n\nSTATIC_PATH = '/static/'\n\nTENT_HTTP_HOST = 'http://tent.espra.com'\nTENT_HTTPS_HOST = 'https://espra.appspot.com'\n\n# ------------------------------------------------------------------------------\n# Secret Settings\n# ------------------------------------------------------------------------------\n\nSITE_ADMINS = frozenset([\n 'admin@googlemail.com'\n ])\n\nPASSWORD_KEY = \"secret key\"\n\nTAMPER_PROOF_KEY = \"secret key\"\n\nTAMPER_PROOF_DEFAULT_DURATION = timedelta(minutes=20)\n\n#CLEANUP_BATCH_SIZE = 100\n#EXPIRATION_WINDOW = timedelta(seconds=60*60*1) # 1 hour\n\nREMOTE_KEY = \"secret\"\n\nfrom secret import *\n","sub_path":"src/zero/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"418387544","text":"# imports\nfrom author_rank.score import top_authors\nimport json\n\n\n# read in sample json\nwith open(\"../data/author_network.json\", 'r') as f:\n data = json.load(f)\n\n# get the top authors for a set of documents\ntop = top_authors(documents=data['documents'], normalize_scores=True)\n\n# print the results\nfor i, j in zip(top[0], top[1]):\n print(i, j)\n\n","sub_path":"examples/top_n_authors.py","file_name":"top_n_authors.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"248102495","text":"__author__ = 'PyBeaner'\n\nimport threading, time\n\n\nclass PeriodicTimer:\n def __init__(self, interval):\n self._interval = interval\n self._flag = 0\n # A condition variable allows one or more threads to wait until they are\n # notified by another thread.\n self._condition = threading.Condition()\n\n def start(self):\n t = threading.Thread(target=self.run)\n t.daemon = True\n t.start()\n\n def run(self):\n \"\"\"\n Run the timer and notify waiting threads after each interval\n \"\"\"\n while True:\n time.sleep(self._interval)\n with self._condition:\n self._flag ^= 1 # toggle waiting or notifying\n # Wake up all threads waiting on this condition.\n self._condition.notify_all()\n\n def wait_for_tick(self):\n \"\"\"\n Wait for the next tick of the timer\n \"\"\"\n with self._condition:\n last_flag = self._flag\n while last_flag == self._flag: # not notified;still waiting\n self._condition.wait() # (calling thread)Wait until notified or until a timeout occurs.\n\n# Example use of the PeriodicTimer\nptimer = PeriodicTimer(1)\nptimer.start()\n\n\n# Two threads that synchronize on the same timer\ndef countdown(n):\n while n > 0:\n ptimer.wait_for_tick()\n print(\"T-minus\", n)\n n -= 1\n\n\ndef countup(up_to):\n n = 0\n while n < up_to:\n ptimer.wait_for_tick() # the same timer as countdown\n print(\"Counting up\", n)\n n += 1\n\n\nthreading.Thread(target=countdown, args=(5,)).start()\nthreading.Thread(target=countup, args=(10,)).start()\n","sub_path":"Chapter 12.Concurrency/Determining If a Thread Has Started/thread_condition.py","file_name":"thread_condition.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"105109315","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nUsage: clop-cutechess-cli.py CPU_ID SEED [PARAM_NAME PARAM_VALUE]...\nRun cutechess-cli with CLOP_PARAM(s).\n\n CPU_ID Symbolic name of the CPU or machine that should run the game\n SEED Running number for the game to be played\n PARAM_NAME Name of a parameter that's being optimized\n PARAM_VALUE Integer value for parameter PARAM_NAME\n\nCLOP is a black-box parameter tuning tool designed and written by Rémi Coulom.\nMore information about CLOP can be found at the CLOP website:\nhttp://remi.coulom.free.fr/CLOP/\n\nThis script works between CLOP and cutechess-cli. The path to this script,\nwithout any parameters, should be on the \"Script\" line of the .clop file.\n'Replications' in the .clop file should be set to 2 so that this script can\nalternate the engine's playing side correctly.\n\nIn this script the variables 'cutechess_cli_path', 'engine', 'engine_param_cmd',\n'opponents' and 'options' must be modified to fit the test environment and\nconditions. The default values are just examples.\n\nWhen the game is completed the script writes the game outcome to its\nstandard output:\n W = win\n L = loss\n D = draw\n\"\"\"\n\nfrom subprocess import Popen, PIPE\nimport sys\n#import exceptions\n\n\n# Path to the cutechess-cli executable.\n# On Windows this should point to cutechess-cli.exe\ncutechess_cli_path = './cutechess-cli/cutechess-cli.sh'\n\n# The engine whose parameters will be optimized\nengine = 'cmd=stockfish proto=uci option.Threads=1'\n\n# Format for the commands that are sent to the engine to\n# set the parameter values. When the command is sent,\n# {name} will be replaced with the parameter name and {value}\n# with the parameter value.\nengine_param_cmd = 'setoption name {name} value {value}'\n\n# A pool of opponents for the engine. The opponent will be\n# chosen based on the seed sent by CLOP.\nopponents = [\n 'cmd=stockfish proto=uci option.Threads=1 name=base',\n]\n\n# Additional cutechess-cli options, eg. time control and opening book\n#options = '-each tc=40/2+0.01 -draw 80 1 -resign 5 500'\n#options = '-resign 3 500 -draw 20 5 -each tc=2+0.05 book=varied.bin'\noptions = '-resign 3 500 -draw 20 5 -each st=5000 nodes=8000 book=varied.bin'\n\ndef main(argv = None):\n if argv is None:\n argv = sys.argv[1:]\n\n if len(argv) == 0 or argv[0] == '--help':\n print(__doc__)\n return 0\n\n argv = argv[1:]\n if len(argv) < 3 or len(argv) % 2 == 0:\n print('Too few arguments')\n return 2\n\n clop_seed = 0\n try:\n clop_seed = int(argv[0])\n except exceptions.ValueError:\n print('Invalid seed value: %s' % argv[0])\n return 2\n\n fcp = engine\n scp = opponents[(clop_seed >> 1) % len(opponents)]\n\n # Parse the parameters that should be optimized\n for i in range(1, len(argv), 2):\n # Make sure the parameter value is numeric\n try:\n float(argv[i + 1])\n except exceptions.ValueError:\n print('Invalid value for parameter %s: %s' % (argv[i], argv[i + 1]))\n return 2\n # Pass CLOP's parameters to the engine by using\n # cutechess-cli's initialization string feature\n initstr = engine_param_cmd.format(name = argv[i], value = argv[i + 1])\n fcp += ' initstr=\"%s\"' % initstr\n\n # Choose the engine's playing side (color) based on CLOP's seed\n if clop_seed % 2 != 0:\n fcp, scp = scp, fcp\n\n cutechess_args = '-srand %d -engine %s -engine %s %s' % (clop_seed >> 1, fcp, scp, options)\n command = '%s %s' % (cutechess_cli_path, cutechess_args)\n\n # Run cutechess-cli and wait for it to finish\n process = Popen(command, shell = True, stdout = PIPE)\n output = process.communicate()[0]\n if process.returncode != 0:\n print('Could not execute command: %s' % command)\n return 2\n\n # Convert Cutechess-cli's result into W/L/D\n # Note that only one game should be played\n result = -1\n for line in output.splitlines():\n if line.startswith('Finished game'):\n if line.find(\": 1-0\") != -1:\n result = clop_seed % 2\n elif line.find(\": 0-1\") != -1:\n result = (clop_seed % 2) ^ 1\n elif line.find(\": 1/2-1/2\") != -1:\n result = 2\n else:\n print('The game did not terminate properly')\n return 2\n break\n\n if result == 0:\n print('W')\n elif result == 1:\n print('L')\n elif result == 2:\n print('D')\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"dockerized-gists/3763682/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":4542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"25747070","text":"# Copyright 2015 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\nimport glob\nimport gzip\nimport os\nimport re\nimport shutil\nimport signal as sig\nimport stat\nimport tempfile\nimport time\nimport uuid\n\nfrom oslo_log import log as logging\nimport signal\nimport six\nimport yaml\n\nfrom fuel_agent import errors\nfrom fuel_agent.utils import fs as fu\nfrom fuel_agent.utils import hardware as hu\nfrom fuel_agent.utils import utils\n\nLOG = logging.getLogger(__name__)\n\nGRUB2_DMRAID_SETTINGS = 'etc/default/grub.d/dmraid2mdadm.cfg'\nDEFAULT_APT_PATH = {\n 'sources_file': 'etc/apt/sources.list',\n 'sources_dir': 'etc/apt/sources.list.d',\n 'preferences_file': 'etc/apt/preferences',\n 'preferences_dir': 'etc/apt/preferences.d',\n 'conf_dir': 'etc/apt/apt.conf.d',\n}\n# protocol : conf_file_name\n# FIXME(azvyagintsev): Move to oslo_config\n# Bug: https://bugs.launchpad.net/fuel/+bug/1514772\nPROXY_PROTOCOLS = {\n 'ftp': '01fuel_agent-use-proxy-ftp',\n 'http': '01fuel_agent-use-proxy-http',\n 'https': '01fuel_agent-use-proxy-https'\n}\nADDITIONAL_DEBOOTSTRAP_PACKAGES = ['ca-certificates',\n 'apt-transport-https']\n\n\ndef run_debootstrap(uri, suite, chroot, arch='amd64', eatmydata=False,\n attempts=10, proxies=None, direct_repo_addr=None):\n \"\"\"Builds initial base system.\n\n debootstrap builds initial base system which is capable to run apt-get.\n debootstrap is well known for its glithcy resolving of package dependecies,\n so the rest of packages will be installed later by run_apt_get.\n \"\"\"\n env_vars = copy.deepcopy(os.environ)\n for proto in six.iterkeys(PROXY_PROTOCOLS):\n if proto in (proxies or {}):\n LOG.debug('Using {0} proxy {1} for debootstrap'.format(\n proto, proxies[proto]))\n env_vars['{0}_proxy'.format(proto)] = proxies[proto]\n\n if direct_repo_addr:\n env_vars['no_proxy'] = ','.join(direct_repo_addr)\n LOG.debug('Setting no_proxy for: {0}'.format(env_vars['no_proxy']))\n\n cmds = ['debootstrap',\n '--include={0}'.format(\",\".join(ADDITIONAL_DEBOOTSTRAP_PACKAGES)),\n '--verbose', '--no-check-gpg',\n '--arch={0}'.format(arch)]\n if eatmydata:\n cmds.extend(['--include=eatmydata'])\n cmds.extend([suite, chroot, uri])\n stdout, stderr = utils.execute(*cmds, attempts=attempts,\n env_variables=env_vars)\n LOG.debug('Running deboostrap completed.\\nstdout: %s\\nstderr: %s', stdout,\n stderr)\n\n\ndef set_apt_get_env():\n # NOTE(agordeev): disable any confirmations/questions from apt-get side\n os.environ['DEBIAN_FRONTEND'] = 'noninteractive'\n os.environ['DEBCONF_NONINTERACTIVE_SEEN'] = 'true'\n os.environ['LC_ALL'] = os.environ['LANG'] = os.environ['LANGUAGE'] = 'C'\n\n\ndef run_apt_get(chroot, packages, eatmydata=False, attempts=10):\n \"\"\"Runs apt-get install .\n\n Unlike debootstrap, apt-get has a perfect package dependecies resolver\n under the hood.\n eatmydata could be used to totally ignore the storm of sync() calls from\n dpkg/apt-get tools. It's dangerous, but could decrease package install\n time in X times.\n \"\"\"\n for action in ('update', 'dist-upgrade'):\n cmds = ['chroot', chroot, 'apt-get', '-y', action]\n stdout, stderr = utils.execute(*cmds, attempts=attempts)\n LOG.debug('Running apt-get %s completed.\\nstdout: %s\\nstderr: %s',\n action, stdout, stderr)\n cmds = ['chroot', chroot, 'apt-get', '-y', 'install', ' '.join(packages)]\n if eatmydata:\n cmds.insert(2, 'eatmydata')\n stdout, stderr = utils.execute(*cmds, attempts=attempts)\n LOG.debug('Running apt-get install completed.\\nstdout: %s\\nstderr: %s',\n stdout, stderr)\n\n\ndef suppress_services_start(chroot):\n \"\"\"Suppresses services start.\n\n Prevents start of any service such as udev/ssh/etc in chrooted environment\n while image is being built.\n \"\"\"\n path = os.path.join(chroot, 'usr/sbin')\n if not os.path.exists(path):\n os.makedirs(path)\n with open(os.path.join(path, 'policy-rc.d'), 'w') as f:\n f.write('#!/bin/sh\\n'\n '# prevent any service from being started\\n'\n 'exit 101\\n')\n os.fchmod(f.fileno(), 0o755)\n\n\ndef clean_dirs(chroot, dirs, delete=False):\n \"\"\"Removes dirs and recreates them\n\n :param chroot: Root directory where to look for subdirectories\n :param dirs: List of directories to clean/remove (Relative to chroot)\n :param delete: (Boolean) If True, directories will be removed\n (Default: False)\n \"\"\"\n for d in dirs:\n path = os.path.join(chroot, d)\n if os.path.isdir(path):\n LOG.debug('Removing dir: %s', path)\n shutil.rmtree(path)\n if not delete:\n LOG.debug('Creating empty dir: %s', path)\n os.makedirs(path)\n\n\ndef remove_files(chroot, files):\n for f in files:\n path = os.path.join(chroot, f)\n if os.path.exists(path):\n os.remove(path)\n LOG.debug('Removed file: %s', path)\n\n\ndef clean_apt_settings(chroot, allow_unsigned_file='allow_unsigned_packages',\n force_ipv4_file='force_ipv4',\n pipeline_depth_file='pipeline_depth',\n install_rule_file='install_rule'):\n \"\"\"Cleans apt settings such as package sources and repo pinning.\"\"\"\n files = [DEFAULT_APT_PATH['sources_file'],\n DEFAULT_APT_PATH['preferences_file'],\n os.path.join(DEFAULT_APT_PATH['conf_dir'], force_ipv4_file),\n os.path.join(DEFAULT_APT_PATH['conf_dir'], allow_unsigned_file),\n os.path.join(DEFAULT_APT_PATH['conf_dir'], pipeline_depth_file),\n os.path.join(DEFAULT_APT_PATH['conf_dir'], install_rule_file)]\n # also remove proxies\n for p_file in six.itervalues(PROXY_PROTOCOLS):\n files.append(os.path.join(DEFAULT_APT_PATH['conf_dir'], p_file))\n remove_files(chroot, files)\n dirs = [DEFAULT_APT_PATH['preferences_dir'],\n DEFAULT_APT_PATH['sources_dir']]\n clean_dirs(chroot, dirs)\n\n\ndef fix_cloud_init_config(config_path):\n # NOTE(mzhnichkov): change an order of executing cloud-init modules\n # this change is suitable for cloud-init packages from trust/xenial\n with open(config_path, 'r') as cloud_conf:\n config = yaml.safe_load(cloud_conf)\n if 'write-files' in config['cloud_init_modules']:\n config['cloud_init_modules'].remove('write-files')\n config['cloud_config_modules'].append('write-files')\n with open(config_path, 'w') as cloud_conf:\n yaml.safe_dump(config,\n cloud_conf, encoding='utf-8', default_flow_style=False)\n\n\ndef do_post_inst(chroot, hashed_root_password,\n allow_unsigned_file='allow_unsigned_packages',\n force_ipv4_file='force_ipv4',\n pipeline_depth_file='pipeline_depth',\n install_rule_file='install_rule'):\n # NOTE(agordeev): set up password for root\n utils.execute('sed', '-i',\n 's%root:[\\*,\\!]%root:' + hashed_root_password + '%',\n os.path.join(chroot, 'etc/shadow'))\n # NOTE(agordeev): backport from bash-script:\n # in order to prevent the later puppet workflow outage, puppet service\n # should be disabled on a node startup.\n # Being enabled by default, sometimes it leads to puppet service hanging\n # and recognizing the deployment as failed.\n # TODO(agordeev): take care of puppet service for other distros, once\n # fuel-agent will be capable of building images for them too.\n if os.path.exists(os.path.join(chroot, 'etc/init.d/puppet')):\n utils.execute('chroot', chroot, 'update-rc.d', 'puppet', 'disable')\n # NOTE(agordeev): disable mcollective to be automatically started on boot\n # to prevent confusing messages in its log (regarding connection errors).\n with open(os.path.join(chroot, 'etc/init/mcollective.override'), 'w') as f:\n f.write(\"manual\\n\")\n service_link = os.path.join(\n chroot,\n 'etc/systemd/system/multi-user.target.wants/mcollective.service')\n if os.path.exists(service_link):\n os.unlink(service_link)\n cloud_cfg = os.path.join(chroot, 'etc/cloud/cloud.cfg.d/')\n utils.makedirs_if_not_exists(os.path.dirname(cloud_cfg))\n with open(os.path.join(\n chroot,\n 'etc/cloud/cloud.cfg.d/99-disable-network-config.cfg'), 'w') as cf:\n yaml.safe_dump({'network': {'config': 'disabled'}}, cf,\n encoding='utf-8',\n default_flow_style=False)\n cloud_init_conf = os.path.join(chroot, 'etc/cloud/cloud.cfg')\n if os.path.exists(cloud_init_conf):\n fix_cloud_init_config(cloud_init_conf)\n # NOTE(agordeev): remove custom policy-rc.d which is needed to disable\n # execution of post/pre-install package hooks and start of services\n remove_files(chroot, ['usr/sbin/policy-rc.d'])\n # enable mdadm (remove nomdadmddf nomdadmism options from cmdline)\n utils.execute('chroot', chroot, 'dpkg-divert', '--local', '--add',\n os.path.join('/', GRUB2_DMRAID_SETTINGS))\n remove_files(chroot, [GRUB2_DMRAID_SETTINGS])\n # remove cached apt files\n utils.execute('chroot', chroot, 'apt-get', 'clean')\n clean_apt_settings(chroot, allow_unsigned_file=allow_unsigned_file,\n force_ipv4_file=force_ipv4_file,\n pipeline_depth_file=pipeline_depth_file,\n install_rule_file=install_rule_file)\n\n\ndef stop_chrooted_processes(chroot, signal=sig.SIGTERM,\n attempts=10, attempts_delay=2):\n \"\"\"Sends signal to all processes, which are running inside chroot.\n\n It tries several times until all processes die. If at some point there\n are no running processes found, it returns True.\n\n :param chroot: Process root directory.\n :param signal: Which signal to send to processes. It must be either\n SIGTERM or SIGKILL. (Default: SIGTERM)\n :param attempts: Number of attempts (Default: 10)\n :param attempts_delay: Delay between attempts (Default: 2)\n \"\"\"\n\n if signal not in (sig.SIGTERM, sig.SIGKILL):\n raise ValueError('Signal must be either SIGTERM or SIGKILL')\n\n def get_running_processes():\n # fuser shows *some* (mount point, swap file) accesses by\n # the kernel using the string 'kernel' as a pid, ignore these\n out, _ = utils.execute('fuser', '-v', chroot, check_exit_code=False)\n return [pid for pid in out.split() if pid != 'kernel']\n\n for i in six.moves.range(attempts):\n running_processes = get_running_processes()\n if not running_processes:\n LOG.debug('There are no running processes in %s ', chroot)\n return True\n for p in running_processes:\n try:\n pid = int(p)\n if os.readlink('/proc/%s/root' % pid) == chroot:\n LOG.debug('Sending %s to chrooted process %s', signal, pid)\n os.kill(pid, signal)\n except (OSError, ValueError) as e:\n cmdline = ''\n pid = p\n try:\n with open('/proc/%s/cmdline' % pid) as f:\n cmdline = f.read()\n except Exception:\n LOG.debug('Can not read cmdline for pid=%s', pid)\n LOG.warning('Exception while sending signal: '\n 'pid: %s cmdline: %s message: %s. Skipping it.',\n pid, cmdline, e)\n\n # First of all, signal delivery is asynchronous.\n # Just because the signal has been sent doesn't\n # mean the kernel will deliver it instantly\n # (the target process might be uninterruptible at the moment).\n # Secondly, exiting might take a while (the process might have\n # some data to fsync, etc)\n LOG.debug('Attempt %s. Waiting for %s seconds', i + 1, attempts_delay)\n time.sleep(attempts_delay)\n\n running_processes = get_running_processes()\n if running_processes:\n for pid in running_processes:\n cmdline = ''\n try:\n with open('/proc/%s/cmdline' % pid) as f:\n cmdline = f.read()\n except Exception:\n LOG.debug('Can not read cmdline for pid=%s', pid)\n LOG.warning('Process is still running: pid=%s cmdline: %s',\n pid, cmdline)\n return False\n return True\n\n\ndef get_free_loop_device(loop_device_major_number=7,\n max_loop_devices_count=255):\n \"\"\"Returns the name of free loop device.\n\n It should return the name of free loop device or raise an exception.\n Unfortunately, free loop device couldn't be reversed for the later usage,\n so we must start to use it as fast as we can.\n If there's no free loop it will try to create new one and ask a system for\n free loop again.\n \"\"\"\n for minor in range(0, max_loop_devices_count):\n cur_loop = \"/dev/loop%s\" % minor\n if not os.path.exists(cur_loop):\n os.mknod(cur_loop, 0o660 | stat.S_IFBLK,\n os.makedev(loop_device_major_number, minor))\n try:\n return utils.execute('losetup', '--find')[0].split()[0]\n except (IndexError, errors.ProcessExecutionError):\n LOG.debug(\"Couldn't find free loop device, trying again\")\n raise errors.NoFreeLoopDevices('Free loop device not found')\n\n\ndef populate_basic_dev(chroot):\n \"\"\"Populates /dev with basic files, links, device nodes.\"\"\"\n # prevent failures related with /dev/fd/62\n utils.execute('chroot', chroot, 'rm', '-fr', '/dev/fd')\n utils.execute('chroot', chroot,\n 'ln', '-s', '/proc/self/fd', '/dev/fd')\n\n\ndef create_sparse_tmp_file(dir, suffix, size=8192):\n \"\"\"Creates sparse file.\n\n Creates file which consumes disk space more efficiently when the file\n itself is mostly empty.\n \"\"\"\n tf = tempfile.NamedTemporaryFile(dir=dir, suffix=suffix, delete=False)\n utils.execute('truncate', '-s', '%sM' % size, tf.name)\n return tf.name\n\n\ndef attach_file_to_loop(filename, loop):\n utils.execute('losetup', loop, filename)\n\n\ndef deattach_loop(loop, check_exit_code=[0]):\n LOG.debug('Trying to figure out if loop device %s is attached', loop)\n output = utils.execute('losetup', '-a')[0]\n for line in output.split('\\n'):\n # output lines are assumed to have the following format\n # /dev/loop0: [fd03]:130820 (/dev/loop0)\n if loop == line.split(':')[0]:\n LOG.debug('Loop device %s seems to be attached. '\n 'Trying to detach.', loop)\n utils.execute('losetup', '-d', loop,\n check_exit_code=check_exit_code)\n\n\ndef shrink_sparse_file(filename):\n \"\"\"Shrinks file to its size of actual data. Only ext fs are supported.\"\"\"\n utils.execute('e2fsck', '-y', '-f', filename)\n utils.execute('resize2fs', '-M', filename)\n data = hu.parse_simple_kv('dumpe2fs', filename)\n block_count = int(data['block count'])\n block_size = int(data['block size'])\n with open(filename, 'rwb+') as f:\n f.truncate(block_count * block_size)\n\n\ndef strip_filename(name):\n \"\"\"Strips filename for apt settings.\n\n The name could only contain alphanumeric, hyphen (-), underscore (_) and\n period (.) characters.\n \"\"\"\n return re.sub(r\"[^a-zA-Z0-9-_.]*\", \"\", name)\n\n\ndef get_release_file(uri, suite, section, proxies=None,\n direct_repo_addrs=None):\n \"\"\"Download and parse repo's Release file\n\n Returns an apt preferences for specified repo.\n\n :param proxies: Dict protocol:uri format\n :param direct_repo_addrs: List of addresses which should be bypassed\n by proxy\n :returns: a string with apt preferences rules\n \"\"\"\n if section:\n # We can't use urljoin here because it works pretty bad in\n # cases when 'uri' doesn't have a trailing slash.\n download_uri = os.path.join(uri, 'dists', suite, 'Release')\n else:\n # Well, we have a flat repo case, so we should download Release\n # file from a different place. Please note, we have to strip\n # a leading slash from suite because otherwise the download\n # link will be wrong.\n download_uri = os.path.join(uri, suite.lstrip('/'), 'Release')\n\n return utils.init_http_request(download_uri, proxies=proxies,\n noproxy_addrs=direct_repo_addrs).text\n\n\ndef parse_release_file(content):\n \"\"\"Parse Debian repo's Release file content.\n\n :param content: a Debian's Release file content\n :returns: a dict with repo's attributes\n \"\"\"\n _multivalued_fields = {\n 'SHA1': ['sha1', 'size', 'name'],\n 'SHA256': ['sha256', 'size', 'name'],\n 'SHA512': ['sha512', 'size', 'name'],\n 'MD5Sum': ['md5sum', 'size', 'name'],\n }\n\n # debian data format is very similiar to yaml, except\n # multivalued field. so we can parse it just like yaml\n # and then perform additional transformation for those\n # fields (we know which ones are multivalues).\n data = yaml.load(content)\n\n for attr, columns in six.iteritems(_multivalued_fields):\n if attr not in data:\n continue\n\n values = data[attr].split()\n data[attr] = []\n\n for group in utils.grouper(values, len(columns)):\n data[attr].append(dict(zip(columns, group)))\n\n return data\n\n\ndef add_apt_source(name, uri, suite, section, chroot):\n # NOTE(agordeev): The files have either no or \"list\" as filename extension\n filename = 'fuel-image-{name}.list'.format(name=strip_filename(name))\n if section:\n entry = 'deb {uri} {suite} {section}\\n'.format(uri=uri, suite=suite,\n section=section)\n else:\n entry = 'deb {uri} {suite}\\n'.format(uri=uri, suite=suite)\n with open(os.path.join(chroot, DEFAULT_APT_PATH['sources_dir'], filename),\n 'w') as f:\n f.write(entry)\n\n\ndef add_apt_preference(name, priority, suite, section, chroot, uri,\n proxies=None, direct_repo_addrs=None):\n \"\"\"Add apt reference file for the repo\n\n :param proxies: dict with protocol:uri format\n :param direct_repo_addrs: list of addressess which should be bypassed\n by proxy\n \"\"\"\n\n # NOTE(agordeev): The files have either no or \"pref\" as filename extension\n filename = 'fuel-image-{name}.pref'.format(name=strip_filename(name))\n # NOTE(agordeev): priotity=None means that there's no specific pinning for\n # particular repo and nothing to process.\n # Default system-wide preferences (priority=500) will be used instead.\n\n _transformations = {\n 'Archive': 'a',\n 'Suite': 'a', # suite is a synonym for archive\n 'Codename': 'n',\n 'Version': 'v',\n 'Origin': 'o',\n 'Label': 'l',\n }\n\n try:\n deb_release = parse_release_file(get_release_file(\n uri, suite, section, proxies=proxies,\n direct_repo_addrs=direct_repo_addrs))\n except ValueError as exc:\n LOG.error(\n \"[Attention] Failed to fetch Release file \"\n \"for repo '{0}': {1} - skipping. \"\n \"This may lead both to trouble with packages \"\n \"and broken OS\".format(name, six.text_type(exc))\n )\n return\n\n conditions = set()\n for field, condition in six.iteritems(_transformations):\n if field in deb_release:\n conditions.add(\n '{0}={1}'.format(condition, deb_release[field])\n )\n\n with open(os.path.join(chroot, DEFAULT_APT_PATH['preferences_dir'],\n filename), 'w') as f:\n f.write('Package: *\\n')\n f.write('Pin: release ')\n f.write(', '.join(conditions) + \"\\n\")\n f.write('Pin-Priority: {priority}\\n'.format(priority=priority))\n\n\ndef set_apt_proxy(chroot, proxies, direct_repo_addr=None):\n \"\"\"Configure proxy for apt-config\n\n direct_repo_addr:: direct apt address:\n access to it bypass proxies.\n \"\"\"\n\n for protocol in six.iterkeys(proxies):\n with open(os.path.join(chroot, DEFAULT_APT_PATH['conf_dir'],\n PROXY_PROTOCOLS[protocol]), 'w') as f:\n f.write('Acquire::{0}::proxy \"{1}\";\\n'\n ''.format(protocol, proxies[protocol]))\n LOG.debug('Apply apt-proxy: \\nprotocol: {0}\\nurl: {1}'\n ''.format(protocol, proxies[protocol]))\n if direct_repo_addr:\n for addr in direct_repo_addr:\n f.write('Acquire::{0}::proxy::{1} \"DIRECT\";\\n'\n ''.format(protocol, addr))\n LOG.debug('Set DIRECT repo: \\nprotocol:'\n ' {0}\\nurl: {1}'.format(protocol, addr))\n\n\ndef pre_apt_get(chroot, allow_unsigned_file='allow_unsigned_packages',\n force_ipv4_file='force_ipv4',\n pipeline_depth_file='pipeline_depth',\n install_rule_file='install_rule',\n proxies=None, direct_repo_addr=None):\n \"\"\"It must be called prior run_apt_get.\"\"\"\n clean_apt_settings(chroot, allow_unsigned_file=allow_unsigned_file,\n force_ipv4_file=force_ipv4_file,\n pipeline_depth_file=pipeline_depth_file,\n install_rule_file=install_rule_file)\n # NOTE(agordeev): allow to install packages without gpg digest\n with open(os.path.join(chroot, DEFAULT_APT_PATH['conf_dir'],\n allow_unsigned_file), 'w') as f:\n f.write('APT::Get::AllowUnauthenticated 1;\\n')\n with open(os.path.join(chroot, DEFAULT_APT_PATH['conf_dir'],\n force_ipv4_file), 'w') as f:\n f.write('Acquire::ForceIPv4 \"true\";\\n')\n with open(os.path.join(chroot, DEFAULT_APT_PATH['conf_dir'],\n pipeline_depth_file), 'w') as f:\n f.write('Acquire::http::Pipeline-Depth 0;\\n')\n with open(os.path.join(chroot, DEFAULT_APT_PATH['conf_dir'],\n install_rule_file), 'w') as f:\n f.write('APT::Install-Recommends \"false\";\\n')\n with open(os.path.join(chroot, DEFAULT_APT_PATH['conf_dir'],\n install_rule_file), 'a') as f:\n f.write('APT::Install-Suggests \"false\";\\n')\n if proxies:\n set_apt_proxy(chroot, proxies, direct_repo_addr)\n\n\ndef containerize(filename, container, chunk_size=1048576):\n if container == 'gzip':\n output_file = filename + '.gz'\n with open(filename, 'rb') as f:\n # NOTE(agordeev): gzip in python2.6 doesn't have context manager\n # support\n g = gzip.open(output_file, 'wb')\n for chunk in iter(lambda: f.read(chunk_size), ''):\n g.write(chunk)\n g.close()\n os.remove(filename)\n return output_file\n raise errors.WrongImageDataError(\n 'Error while image initialization: '\n 'unsupported image container: {container}'.format(container=container))\n\n\ndef attach_file_to_free_loop_device(filename, max_loop_devices_count=255,\n loop_device_major_number=7,\n max_attempts=1):\n \"\"\"Find free loop device and try to attach `filename` to it.\n\n If attaching fails then retry again. Max allowed attempts is\n `max_attempts`.\n\n Returns loop device to which file is attached. Otherwise, raises\n errors.NoFreeLoopDevices.\n \"\"\"\n loop_device = None\n for i in range(0, max_attempts):\n try:\n LOG.debug('Looking for a free loop device')\n loop_device = get_free_loop_device(\n loop_device_major_number=loop_device_major_number,\n max_loop_devices_count=max_loop_devices_count)\n\n log_msg = \"Attaching image file '{0}' to free loop device '{1}'\"\n LOG.debug(log_msg.format(filename, loop_device))\n attach_file_to_loop(filename, loop_device)\n break\n except errors.ProcessExecutionError:\n log_msg = \"Couldn't attach image file '{0}' to loop device '{1}'.\"\n LOG.debug(log_msg.format(filename, loop_device))\n\n if i == max_attempts - 1:\n log_msg = (\"Maximum allowed attempts ({0}) to attach image \"\n \"file '{1}' to loop device '{2}' is exceeded.\")\n LOG.debug(log_msg.format(max_attempts, filename, loop_device))\n raise errors.NoFreeLoopDevices('Free loop device not found.')\n else:\n log_msg = (\"Trying again to attach image file '{0}' \"\n \"to free loop device '{1}'. \"\n \"Attempt #{2} out of {3}\")\n LOG.debug(log_msg.format(filename, loop_device,\n i + 1, max_attempts))\n\n return loop_device\n\n\ndef make_targz(source_dir, output_name=None):\n \"\"\"Archive the given directory\n\n :param source_dir: directory to archive\n :param output_name: output file name, might be a relative\n or an absolute path\n \"\"\"\n if not output_name:\n output_name = six.text_type(uuid.uuid4()) + '.tar.gz'\n utils.makedirs_if_not_exists(os.path.dirname(output_name))\n\n LOG.info('Creating archive: %s', output_name)\n utils.execute('tar', '-czf', output_name, '--directory',\n os.path.normcase(source_dir), '.', logged=True)\n return output_name\n\n\ndef run_script_in_chroot(chroot, script):\n \"\"\"Run script inside chroot\n\n 1)Copy script file inside chroot\n 2)Make it executable\n 3)Run it with bash\n \"\"\"\n LOG.info('Copy user-script {0} into chroot:{1}'.format(script, chroot))\n if not os.path.isdir(chroot):\n raise errors.IncorrectChroot(\n \"Can't run script in incorrect chroot %s\", chroot)\n chrooted_file = os.path.join(chroot, os.path.basename(script))\n shutil.copy(script, chrooted_file)\n LOG.info('Make user-script {0} executable:'.format(chrooted_file))\n os.chmod(chrooted_file, 0o755)\n utils.execute(\n 'chroot', chroot, '/bin/bash', '-c', os.path.join(\n '/', os.path.basename(script)), logged=True)\n LOG.debug('User-script completed')\n\n\ndef recompress_initramfs(chroot, compress='xz', initrd_mask='initrd*'):\n \"\"\"Remove old and rebuild initrd\n\n :param chroot:\n :param compress: compression type for initrd\n :return:\n :initrd_mask: search kernel file by Unix style pathname\n \"\"\"\n env_vars = copy.deepcopy(os.environ)\n add_env_vars = {'TMPDIR': '/tmp',\n 'TMP': '/tmp'}\n\n LOG.debug('Changing initramfs compression type to: %s', compress)\n utils.execute(\n 'sed', '-i', 's/^COMPRESS=.*/COMPRESS={0}/'.format(compress),\n os.path.join(chroot, 'etc/initramfs-tools/initramfs.conf'))\n\n boot_dir = os.path.join(chroot, 'boot')\n initrds = glob.glob(os.path.join(boot_dir, initrd_mask))\n LOG.debug('Removing initrd images: %s', initrds)\n remove_files('/', initrds)\n\n env_vars.update(add_env_vars)\n LOG.info('Building initramfs')\n cmds = ['chroot', chroot, 'update-initramfs -v -c -k all']\n utils.execute(*cmds,\n env_variables=env_vars, logged=True)\n LOG.debug('Running \"update-initramfs\" completed')\n\n\ndef propagate_host_resolv_conf(chroot):\n \"\"\"Copy DNS settings from host system to chroot.\n\n Make a backup of original /etc/resolv.conf and /etc/hosts.\n\n # In case user pass some custom rules in hosts\\resolv.conf.\n opposite to restore_resolv_conf\n \"\"\"\n c_etc = os.path.join(chroot, 'etc/')\n utils.makedirs_if_not_exists(c_etc)\n for conf_name in ('resolv.conf', 'hosts'):\n dst_conf_name = os.path.join(c_etc, conf_name)\n src_conf_name = os.path.join('/etc/', conf_name)\n files_to_copy = [(dst_conf_name, dst_conf_name + '.bak'),\n (src_conf_name, dst_conf_name)]\n for src, dst in files_to_copy:\n if os.path.isfile(src):\n shutil.copy(src, dst)\n\n\ndef restore_resolv_conf(chroot):\n \"\"\"Restore hosts/resolv files in chroot\n\n opposite to propagate_host_resolv_conf\n \"\"\"\n c_etc = os.path.join(chroot, 'etc/')\n utils.makedirs_if_not_exists(c_etc)\n for conf_name in ('resolv.conf', 'hosts'):\n dst_conf_name = os.path.join(c_etc, conf_name)\n if os.path.isfile(dst_conf_name + '.bak'):\n LOG.debug('Restoring default %s inside chroot', conf_name)\n shutil.move(dst_conf_name + '.bak', dst_conf_name)\n\n\ndef mkdtemp_smart(root_dir, suffix):\n \"\"\"Create a unique temporary directory in root_dir\n\n Automatically creates root_dir if it does not exist.\n Otherwise same as tempfile.mkdtemp\n \"\"\"\n\n LOG.debug('Creating temporary chroot directory')\n utils.makedirs_if_not_exists(root_dir)\n chroot = tempfile.mkdtemp(\n dir=root_dir, suffix=suffix)\n LOG.debug('Temporary chroot dir: %s', chroot)\n return chroot\n\n\ndef copy_kernel_initramfs(chroot, dstdir, clean=False):\n \"\"\"Copy latest or newest vmlinuz and initrd from chroot\n\n :param chroot:\n :param dstdir: copy to folder\n :param clean: remove all vmlinuz\\initrd after done\n :return:\n \"\"\"\n # TODO(azvyagintsev) fetch from uri driver\n # module* : result filename\n files = {'vmlinuz': 'vmlinuz',\n 'initrd': 'initrd.img'\n }\n utils.makedirs_if_not_exists(dstdir)\n boot_dir = os.path.join(chroot, 'boot')\n for module in six.iterkeys(files):\n mask = os.path.join(boot_dir, module + '*')\n all_files = glob.glob(mask)\n if len(all_files) > 1:\n raise errors.TooManyKernels(\n \"Too many %s detected :%s\", module, all_files)\n file_to_copy = all_files[0]\n copy_to = os.path.join(dstdir, files[module])\n LOG.debug('Copying file: %s to: %s', file_to_copy, copy_to)\n shutil.copy(file_to_copy, copy_to)\n if clean:\n files_to_remove = glob.glob(mask)\n remove_files('/', files_to_remove)\n\n\ndef run_mksquashfs(chroot, output_name=None, compression_algorithm='xz'):\n \"\"\"Pack the target system as squashfs using mksquashfs\n\n :param chroot: chroot system, to be squashfs'd\n :param output_name: output file name, might be a relative\n or an absolute path\n\n The kernel squashfs driver has to match with the user space squasfs tools.\n Use the mksquashfs provided by the target distro to achieve this.\n (typically the distro maintainers are smart enough to ship the correct\n version of mksquashfs)\n Use mksquashfs installed in the target system\n\n 1)Mount tmpfs under chroot/mnt\n 2)run mksquashfs inside a chroot\n 3)move result files to dstdir\n \"\"\"\n if not output_name:\n output_name = 'root.squashfs' + six.text_type(uuid.uuid4())\n utils.makedirs_if_not_exists(os.path.dirname(output_name))\n dstdir = os.path.dirname(output_name)\n temp = '.mksquashfs.tmp.' + six.text_type(uuid.uuid4())\n s_dst = os.path.join(chroot, 'mnt/dst')\n s_src = os.path.join(chroot, 'mnt/src')\n try:\n fu.mount_fs(\n 'tmpfs', 'mnt_{0}'.format(temp),\n (os.path.join(chroot, 'mnt')),\n 'rw,nodev,nosuid,noatime,mode=0755,size=4M')\n utils.makedirs_if_not_exists(s_src)\n utils.makedirs_if_not_exists(s_dst)\n # Bind mount the chroot to avoid including various temporary/virtual\n # files (/proc, /sys, /dev, and so on) into the image\n fu.mount_fs(None, chroot, s_src, opts='bind')\n fu.mount_fs(None, None, s_src, 'remount,bind,ro')\n fu.mount_fs(None, dstdir, s_dst, opts='bind')\n # run mksquashfs\n chroot_squash = os.path.join('/mnt/dst/' + temp)\n long_squash = os.path.join(chroot, 'mnt/dst/{0}'.format(temp))\n LOG.info('Building squashfs')\n utils.execute(\n 'chroot', chroot, 'mksquashfs', '/mnt/src',\n chroot_squash,\n '-comp', compression_algorithm,\n '-no-progress', '-noappend', logged=True)\n # move to result name\n LOG.debug('Moving file: %s to: %s', long_squash, output_name)\n shutil.move(long_squash, output_name)\n except Exception as exc:\n LOG.error('squashfs_image build failed: %s', exc)\n raise\n finally:\n LOG.info('squashfs_image clean-up')\n stop_chrooted_processes(chroot, signal=signal.SIGTERM)\n fu.umount_fs(os.path.join(chroot, 'mnt/dst'))\n fu.umount_fs(os.path.join(chroot, 'mnt/src'))\n fu.umount_fs(os.path.join(chroot, 'mnt'))\n\n\ndef get_installed_packages(chroot):\n \"\"\"The packages installed in chroot along with their versions\"\"\"\n\n out, err = utils.execute('chroot', chroot, 'dpkg-query', '-W',\n '-f=\"${Package} ${Version};;\"')\n pkglist = filter(None, out.split(';;'))\n return dict([pkgver.split() for pkgver in pkglist])\n\n\ndef rsync_inject(src, dst):\n \"\"\"Recursively copy the src to dst using full source paths\n\n Example: suppose the source directory looks like\n src/etc/myconfig\n src/usr/bin/myscript\n\n rsync_inject('src', '/tmp/chroot')\n\n copies src/etc/myconfig to /tmp/chroot/etc/myconfig,\n and src/usr/bin/myscript to /tmp/chroot/usr/bin/myscript,\n respectively\n\n \"\"\"\n utils.makedirs_if_not_exists(os.path.dirname(dst))\n LOG.debug('Rsync files from %s to: %s', src, dst)\n utils.execute('rsync', '-rlptDKv', src + '/',\n dst + '/', logged=True)\n\n\ndef copy_update_certs(certs, chroot):\n \"\"\"Try to copy and update CA certificates in chroot\"\"\"\n for cert in certs:\n rsync_inject(cert, chroot)\n utils.execute('chroot', chroot, 'update-ca-certificates',\n check_exit_code=False, logged=True)\n\n\ndef dump_runtime_uuid(uuid, config):\n \"\"\"Save runtime_uuid into yaml file\n\n Simple uuid variable to identify bootstrap.\n Variable will be hard-coded into config yaml file, in build-time\n :param uuid:\n :param config: yaml file\n :return:\n \"\"\"\n data = {}\n utils.makedirs_if_not_exists(os.path.dirname(config))\n if os.path.isfile(config):\n with open(config, 'r') as f:\n data = yaml.load(f)\n data['runtime_uuid'] = uuid\n LOG.debug('Save runtime_uuid:%s to file: %s', uuid, config)\n with open(config, 'wt') as f:\n yaml.safe_dump(data, stream=f, encoding='utf-8')\n\n\ndef save_bs_container(output, input_dir, format=\"tar.gz\"):\n \"\"\"Copy files from dir to archive or another directory\n\n :param output:\n :param input_dir:\n :param format:\n :return:\n \"\"\"\n\n if format == 'directory':\n utils.makedirs_if_not_exists(output)\n bs_files = os.listdir(input_dir)\n LOG.debug(\"Output folder: %s\\ntry to copy bootstrap files: %s\",\n output, bs_files)\n for bs_file in bs_files:\n abs_bs_file = os.path.join(input_dir, bs_file)\n if (os.path.isfile(abs_bs_file)):\n if os.path.isfile(os.path.join(output, bs_file)):\n raise errors.BootstrapFileAlreadyExists(\n \"File: {0} already exists in: {1}\"\n .format(bs_file, output))\n shutil.copy(abs_bs_file, output)\n os.chmod(os.path.join(output, bs_file), 0o755)\n return output\n elif format == 'tar.gz':\n LOG.debug(\"Try to make output archive file: %s\", output)\n output = make_targz(input_dir, output_name=output)\n return output\n else:\n raise errors.WrongOutputContainer(\n \"Unsupported bootstrap container format {0}.\"\n .format(format))\n\n\n# NOTE(sslypushenko) Modern lvm supports lvmlocal.conf to selective overriding\n# set of configuration options. So, this functionality for patching lvm\n# configuration should be removed after lvm upgrade in Ubuntu repositories and\n# replaced with proper lvmlocal.conf file\ndef get_lvm_config_value(chroot, section, name):\n \"\"\"Get option value from current lvm configuration.\n\n If option is not present in lvm.conf, None returns\n \"\"\"\n raw_value = utils.execute('chroot', chroot, 'lvm dumpconfig',\n '/'.join((section, name)),\n check_exit_code=[0, 5])[0]\n if '=' not in raw_value:\n return\n\n raw_value = raw_value.split('=')[1].strip()\n\n re_str = '\"[^\"]*\"'\n re_float = '\\\\d*\\\\.\\\\d*'\n re_int = '\\\\d+'\n tokens = re.findall('|'.join((re_str, re_float, re_int)), raw_value)\n\n values = []\n for token in tokens:\n if re.match(re_str, token):\n values.append(token.strip('\"'))\n elif re.match(re_float, token):\n values.append(float(token))\n elif re.match(re_int, token):\n values.append(int(token))\n\n if not values:\n return\n elif len(values) == 1:\n return values[0]\n else:\n return values\n\n\ndef _update_option_in_lvm_raw_config(section, name, value, raw_config):\n \"\"\"Update option in dumped LVM configuration.\n\n :param raw_config should be a string with dumped LVM configuration.\n\n If section and key present in config, option will be overwritten.\n If there is no key but section presents in config, option will be added\n in to the end of section.\n If there are no section and key in config, section will be added in the end\n of the config.\n \"\"\"\n def dump_value(value):\n if isinstance(value, int):\n return str(value)\n elif isinstance(value, float):\n return '{:.10f}'.format(value).rstrip('0')\n elif isinstance(value, str):\n return '\"{}\"'.format(value)\n elif isinstance(value, list or tuple):\n return '[{}]'.format(', '.join(dump_value(v) for v in value))\n\n lines = raw_config.splitlines()\n section_start = next((n for n, line in enumerate(lines)\n if line.strip().startswith('{} '.format(section))),\n None)\n if section_start is None:\n raw_section = '{} {{\\n\\t{}={}\\n}}'.format(section, name,\n dump_value(value))\n lines.append(raw_section)\n return '\\n'.join(lines)\n\n line_no = section_start\n while not lines[line_no].strip().endswith('}'):\n if lines[line_no].strip().startswith(name):\n lines[line_no] = '\\t{}={}'.format(name, dump_value(value))\n return '\\n'.join(lines)\n line_no += 1\n\n lines[line_no] = '\\t{}={}\\n}}'.format(name, dump_value(value))\n return '\\n'.join(lines)\n\n\ndef override_lvm_config_value(chroot, section, name, value, lvm_conf_file):\n \"\"\"Override option in LVM configuration.\n\n If option is not valid, then errors.ProcessExecutionError will be raised\n and lvm configuration will remain unchanged\n \"\"\"\n lvm_conf_file = os.path.join(chroot, lvm_conf_file.lstrip('/'))\n updated_config = _update_option_in_lvm_raw_config(\n section, name, value,\n utils.execute('chroot', chroot, 'lvm dumpconfig')[0])\n lvm_conf_file_bak = '{}.bak.{}'.format(lvm_conf_file,\n time.strftime(\"%Y_%m_%d_%H_%M_%S\"))\n shutil.copy(lvm_conf_file, lvm_conf_file_bak)\n LOG.debug('Backup for origin LVM configuration file: {}'\n ''.format(lvm_conf_file_bak))\n with open(lvm_conf_file, mode='w') as lvm_conf:\n lvm_conf.write(updated_config)\n\n # NOTE(sslypushenko) Extra cycle of dump/save lvm.conf is required to be\n # sure that updated configuration is valid and to adjust it to general\n # lvm.conf formatting\n try:\n current_config = utils.execute('chroot', chroot, 'lvm dumpconfig')[0]\n with open(lvm_conf_file, mode='w') as lvm_conf:\n lvm_conf.write(current_config)\n LOG.info('LVM configuration {} updated. '\n 'Option {}/{} gets new value: {}'\n ''.format(lvm_conf_file, section, name, value))\n except errors.ProcessExecutionError as exc:\n shutil.move(lvm_conf_file_bak, lvm_conf_file)\n LOG.debug('Option {}/{} can not be updated with value {}. '\n 'Configuration restored'.format(section, name, value))\n raise exc\n\n\ndef override_lvm_config(chroot, config, lvm_conf_path='/etc/lvm/lvm.conf',\n update_initramfs=False):\n \"\"\"Override custom values in LVM configuration\n\n :param config: should be a dict with part of LVM configuration to override\n Example:\n {'devices': {'filter': ['a/.*/'],\n 'preferred_names': '^/dev/mapper/'}}\n \"\"\"\n\n for section in config:\n for name in config[section]:\n override_lvm_config_value(chroot, section, name,\n config[section][name],\n lvm_conf_path)\n if update_initramfs:\n # NOTE(sslypushenko) We need to update initramfs for pushing\n # LVM configuration into it.\n LOG.info('Updating target initramfs')\n utils.execute('chroot', chroot, 'update-initramfs -v -u -k all')\n LOG.debug('Running \"update-initramfs\" completed')\n","sub_path":"fuel_agent/utils/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":41936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"100860828","text":"\n# 01\n\nimport sys\nfrom PyQt5.QtWidgets import *\nimport Ui_book\nimport create\nimport change\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\n\n\npersonList = {}\n\n\nclass My_window(QMainWindow, Ui_book.Ui_MainWindow):\n def __init__(self):\n super(My_window, self).__init__()\n self.setupUi(self)\n\n # 禁止编辑\n self.tabletable.setEditTriggers(QAbstractItemView.NoEditTriggers)\n # 只能选中一行\n self.tabletable.setSelectionMode(QAbstractItemView.SingleSelection)\n # 选中整行\n self.tabletable.setSelectionBehavior(QAbstractItemView.SelectRows)\n\n # 第一列不显示\n self.tabletable.verticalHeader().setVisible(False)\n self.display()\n # 信号与槽链接\n self.pushButton_create.clicked.connect(self.save)\n self.pushButton_del.clicked.connect(self.delete)\n self.pushButton_change.clicked.connect(self.change)\n self.pushButton_search.clicked.connect(self.search)\n\n def display(self):\n personList = create.example.get_data()\n\n if personList:\n\n r = len(personList)\n\n self.tabletable.setRowCount(r)\n i = 0\n num = 1\n for v in personList.values():\n numItem = QTableWidgetItem(str(num))\n self.tabletable.setItem(i, 0, numItem)\n\n nameItem = QTableWidgetItem(v.name)\n self.tabletable.setItem(i, 1, nameItem)\n\n telItem = QTableWidgetItem(v.number)\n self.tabletable.setItem(i, 2, telItem)\n\n emailItem = QTableWidgetItem(v.email)\n self.tabletable.setItem(i, 3, emailItem)\n i += 1\n num += 1\n else:\n self.tabletable.setRowCount(0)\n QMessageBox.warning(self, '提示', '没有读取到联系人,请确认数据文件位置或新建联系人。')\n\n def save(self):\n dialog = create.My_Dialog(self)\n dialog.Signal_save.connect(self.display)\n dialog.show()\n\n def delete(self):\n r = self.tabletable.currentRow()\n if r >= 0:\n name = self.tabletable.selectedItems()[1].text()\n r = self.tabletable.selectedItems()[1].row()\n\n button = QMessageBox.question(self, '确认删除', '确认删除联系人{}吗?'.format(\n name), QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)\n if button == QMessageBox.Yes:\n create.example.delete_person(name)\n else:\n pass\n self.display()\n else:\n QMessageBox.critical(self, 'ERROE', '未选中联系人!')\n\n def change(self):\n dailog = change.Change_Dialog(self)\n r = self.tabletable.currentRow()\n if r >= 0:\n name = self.tabletable.selectedItems()[1].text()\n tel = self.tabletable.selectedItems()[2].text()\n email = self.tabletable.selectedItems()[3].text()\n dailog.lineEdit_name.setText(name)\n dailog.lineEdit_tel.setText(tel)\n dailog.lineEdit_email.setText(email)\n\n dailog.Signal_change.connect(self.display)\n dailog.show()\n else:\n QMessageBox.critical(self, 'ERROE', '未选中联系人!')\n\n def search(self):\n list = create.example.get_data()\n if list:\n search_text = self.lineEdit.text()\n if search_text:\n items = self.tabletable.findItems(search_text, Qt.MatchExactly)\n if items:\n item = items[0]\n item.setSelected(True)\n row = item.row()\n self.tabletable.verticalScrollBar().setSliderPosition(row)\n else:\n QMessageBox.critical(self, '未找到对应信息', '未找到对应信息')\n else:QMessageBox.critical(self, 'ERROR', '请输入要查询信息')\n\n\n else:\n QMessageBox.critical(self, 'ERROR', '没有数据,请添加联系人')\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = My_window()\n\n window.show()\n sys.exit(app.exec_())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"649477416","text":"import numpy as np\nimport matplotlib.pyplot as plt\n \n \nax = plt.subplot(111)\n \nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\nax.xaxis.set_ticks_position('bottom')\nax.spines['bottom'].set_position(('data',0))\nax.yaxis.set_ticks_position('left')\nax.spines['left'].set_position(('data',0))\n \nx=np.linspace(-np.pi,np.pi,256,endpoint=True)\nC,S=np.cos(x),np.sin(x)\n \nplt.plot(x,C,color='red',linewidth=2.5,linestyle='-',label=r'$cos(t)$')\nplt.plot(x,S,color='blue',linewidth=2.5,linestyle='-',label=r'$sin(t)$')\n \nplt.xlim(x.min()*1.1, x.max()*1.1)\nplt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi],\n [r'$-\\pi$', r'$-\\pi/2$', r'$0$', r'$+\\pi/2$', r'$+\\pi$'])\n \nplt.ylim(C.min()*1.1,C.max()*1.1)\nplt.yticks([-1, 0, +1],\n [r'$-1$', r'$0$', r'$+1$'])\nplt.show()","sub_path":"drawCosSin.py","file_name":"drawCosSin.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"158132071","text":"r\"\"\" # r ignores special characters and prints them as the raw data.\nThis script creates an empty file.\n\"\"\"\n\nimport datetime\n\nfilename = datetime.datetime.now()\n# filename = \"sample1.txt\"\ncontent = datetime.datetime.now()\n\n# create an empty file\ndef create_file():\n \"\"\"This function creates an empty file\"\"\"\n with open(filename.strftime(\"%d %b %Y_%H:%M:%S\") + '.txt', 'a') as file:\n file.write(str(content) + '\\n') # writes the datetime to file\n\ncreate_file()\n\n\"\"\"\nrun this in Python...\n>>> import dates_and_times\n>>> dates_and_times.__doc__\n'\\nThis script creates an empty file.\\n'\n# this has printed out in Python the 'docstring' that was written in line 2\n\n>>> dates_and_times.create_file()\n# calls the function and creates the file\n\nhttp://strftime.org/\nInfo on formatting characters to use with .strftime()\n\"\"\"\n","sub_path":"dates_and_times.py","file_name":"dates_and_times.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"424244315","text":"# -*- coding: utf-8 -*-\n\nimport urllib.request\n# import datetime\n# import json\n# import isbnmcu\n\nfrom clibooks import isbnmcu\n\n\ndef _isbn_from_openlibrary(isbn13):\n\n OPEN_LIBRARY_API = (\"https://openlibrary.org/api/books?bibkeys=ISBN:{}\" +\n \"&jscmd=data&format=json\")\n\n r = urllib.request.urlopen(OPEN_LIBRARY_API.format(isbn13))\n encoding = r.info().get_content_charset('utf-8')\n data = r.read()\n return data.decode(encoding)\n\n\ndef _isbn_from_mcu(isbn13):\n return isbnmcu.isbn_from_mcu(isbn13)\n\n\ndef is_isbn10(isbn10):\n if isbn10 is None:\n return False\n\n if len(isbn10) != 10:\n return False\n sumatory = 0\n for i in range(1, len(isbn10) + 1):\n d = isbn10[i - 1]\n if d.upper() == 'X':\n d = 10\n sumatory += int(d) * i\n return (sumatory % 11) == 0\n\n\ndef is_isbn13(isbn13):\n if isbn13 is None:\n return False\n\n if len(isbn13) != 13:\n return False\n\n sumatory = 0\n for i in range(0, 13):\n if i % 2 == 0:\n sumatory += int(isbn13[i])\n else:\n sumatory += int(isbn13[i]) * 3\n return (sumatory % 10) == 0\n\n\ndef to_isbn13(isbn10):\n if isbn10 is None:\n return None\n\n if len(isbn10) != 10:\n return None\n isbn12 = '978' + isbn10[0:9]\n\n sumatory = 0\n for i in range(0, 12):\n if i % 2 == 0:\n sumatory += int(isbn12[i])\n else:\n sumatory += int(isbn12[i]) * 3\n\n r = (10 - sumatory) % 10\n\n return isbn12 + str(r)\n # if r < 10:\n # return isbn12 + str(r)\n # else:\n # return isbn12 + '0'\n\n\ndef isbn_OL_format(isbn13):\n\n if isbn13.startswith('97884'):\n return _isbn_from_mcu(isbn13)\n else:\n return _isbn_from_openlibrary(isbn13)\n","sub_path":"clibooks/isbn.py","file_name":"isbn.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"253502473","text":"\"\"\"\n58. タプルの抽出\nStanford Core NLPの係り受け解析の結果(collapsed-dependencies)に基づき,\n「主語 述語 目的語」の組をタブ区切り形式で出力せよ.ただし,主語,述語,目的語の定義は以下を参考にせよ.\n述語: nsubj関係とdobj関係の子(dependant)を持つ単語\n主語: 述語からnsubj関係にある子(dependent)\n目的語: 述語からdobj関係にある子(dependent)\n\n秋山メモ\n目的語、述語の係元の主語の係情報が得られれば、組みとできる。\nElement Treeの使い方も、APIマニュアルだけではしんどいので、\n素人の言語処理100本ノックを参考に、52番あたりから勉強。\n構造を知るには、正規表現で引っ張ってくるのも良いのだが、56あたりから\nしんどい。\n参考WEBでは、便利な、xpath表現の場所指定を使っているが、これでは、\n勉強にならないので、使わない形に改編。\nあと、デバッグ・解析用に、使う文を指定できるようにした。\n\n\"\"\"\n# coding: utf-8\n#from collections import defaultdict\n\nimport re\nimport xml.etree.ElementTree as ET\nimport pydot\n\nxml_root = ET.parse('nlp.txt.xml')\n\n\n\nbeg = 1\nend = 100\n\n\n\nfor sentence in xml_root.iterfind('./document/sentences/sentence'):\n #get sentence id\n sent_no = int(sentence.get('id'))\n if (sent_no >= beg) and (sent_no <= end):\n\n dict_pred = dict()\n dict_nsubj = dict()\n dict_dobj = dict()\n\n #for dep in sentence.iterfind(\\\n #'./dependencies[@type=\"collapsed-dependencies\"]/dep'):\n\n for depe in sentence.findall('./dependencies'):\n #print(depe)\n if depe.get('type') == 'collapsed-dependencies':\n #print('yes')\n for dep in depe.findall('dep'):\n\n dep_type = dep.get('type')\n if dep_type == 'nsubj' or dep_type == 'dobj':\n\n gover = dep.find('./governor')\n idx = gover.get('idx')\n #係元のidx\n dict_pred[idx] = gover.text\n #係元の単語\n if dep_type == 'nsubj':\n dict_nsubj[idx] = dep.find('./dependent').text\n else:\n dict_dobj[idx] = dep.find('./dependent').text\n\n for idx, pred in sorted(dict_pred.items(),key=lambda x: x[0]):\n nsubj = dict_nsubj.get(idx)\n dobj = dict_dobj.get(idx)\n if (nsubj is not None) and (dobj is not None):\n print('{}\\t{}\\t{}'.format(nsubj, pred, dobj))\n\n\n\n\n\n'''\nfor token in xml_root.iterfind(\\\n'./document/sentences/sentence/tokens/token[NER=\"PERSON\"]'\\\n):\n print(token.findtext('word'))\n'''\n","sub_path":"kohei4/chapter06/knock58.py","file_name":"knock58.py","file_ext":"py","file_size_in_byte":2884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"370005090","text":"from __future__ import print_function\nimport sys\nimport os\nimport os.path\nfrom thread import start_new_thread\n\n# TODO Nicer way to import bleeding edge libraries?\nsys.path.append(\"/home/apsync/dronekit-python/\")\nprint(sys.path)\n\nimport time\nfrom dronekit import connect as drone_connect, mavutil, VehicleMode\n\nclass Drone(object):\n\n def __init__(self, connection_string, release_altitude=10):\n self.connection_string = connection_string\n self.closed_pwm = 1200\n self.open_pwm = 1850\n self.release_servo_number = 9 # aux 1\n self.test_servo_number = 11\n self.release_altitude = release_altitude\n self.current_test_servo_pwm = self.closed_pwm\n\n def set_servo(self, servo_number, pwm_value):\n pwm_value_int = int(pwm_value)\n msg = self.connection.message_factory.command_long_encode(\n 0, 0, # target system, target component\n mavutil.mavlink.MAV_CMD_DO_SET_SERVO,\n 0,\n servo_number,\n pwm_value_int,\n 0, 0, 0, 0, 0\n )\n self.connection.send_mavlink(msg)\n\n # Moves servo 9 (aux 1) to release payload\n def release_payload(self):\n print(\"releasing payload\")\n start_new_thread(self.set_servo, (self.release_servo_number, self.open_pwm,))\n\n # Moves servo 9 (aux 1) to closed to hold payload\n def lock_payload(self):\n print(\"locking payload\")\n start_new_thread(self.set_servo, (self.release_servo_number, self.closed_pwm,))\n\n # Moves servo 9 (aux 1) to closed to hold payload\n def move_test_servos(self):\n print(\"moving test servos\")\n start_new_thread(self.set_servo, (self.test_servo_number, self.current_test_servo_pwm,))\n if self.current_test_servo_pwm != self.closed_pwm:\n self.current_test_servo_pwm = self.closed_pwm\n else:\n self.current_test_servo_pwm = self.open_pwm\n\n def connect(self):\n self.connection = drone_connect(self.connection_string, wait_ready=True)\n return self.connection\n\n def report(self):\n print(\" GPS: %s\" % self.connection.gps_0)\n print(\" Battery: %s\" % self.connection.battery)\n print(\" Last Heartbeat: %s\" % self.connection.last_heartbeat)\n print(\" Is Armable?: %s\" % self.connection.is_armable)\n print(\" System status: %s\" % self.connection.system_status.state)\n print(\" Mode: %s\" % self.connection.mode.name)\n\n def autopilot(self):\n # Make sure the payload release is in the closed position\n self.lock_payload()\n\n # Calculate relative altitude only for low level tests.\n # WARNING This can't be used for long duration flights in-case the script restarts\n startalt = None\n while not startalt:\n time.sleep(1)\n startalt = self.connection.location.global_frame.alt\n\n stop = False\n\n while not stop:\n alt = self.connection.location.global_frame.alt - startalt\n print ( \"Height %s\" % alt )\n if alt >= self.release_altitude:\n self.release_payload()\n time.sleep(2)\n self.move_test_servos()\n time.sleep(3)\n if os.path.isfile(\"/home/apsync/stopmission\"):\n stop = True\n\n # Other commands e.g. set flight mode\n\n self.connection.close()\n\ndef start_flight(connection_string):\n drone = Drone(connection_string)\n print(\"Connecting to plane on %s\" % (drone.connection_string,))\n connection = drone.connect()\n\n @connection.on_message('SYSTEM_TIME')\n def listener(self, name, message):\n thetime = int(message.time_unix_usec)/1000000\n if sys.platform in ['linux', 'linux2', 'darwin']:\n os.system(\"sudo date +%s -s @%s\" % ('%s', thetime))\n\n drone.report()\n drone.autopilot()\n\nif __name__ == '__main__':\n start_flight('0.0.0.0:9000')","sub_path":"scripts/mission.py","file_name":"mission.py","file_ext":"py","file_size_in_byte":3892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"160400495","text":"import io, os, sys, types, ast\nimport nbformat\n\nfrom IPython import get_ipython\nfrom IPython.core.interactiveshell import InteractiveShell\n\ndef find_notebook(fullname, path=None):\n \"\"\"find a notebook, given its fully qualified name and an optional path\n \n This turns \"foo.bar\" into \"foo/bar.ipynb\"\n and tries turning \"Foo_Bar\" into \"Foo Bar\" if Foo_Bar\n does not exist.\n \"\"\"\n name = fullname.rsplit('.', 1)[-1]\n if not path:\n path = ['']\n for d in path:\n nb_path = os.path.join(d, name + \".ipynb\")\n if os.path.isfile(nb_path):\n return nb_path\n # let import Notebook_Name find \"Notebook Name.ipynb\"\n nb_path = nb_path.replace(\"_\", \" \")\n if os.path.isfile(nb_path):\n return nb_path\n\nclass CellTransformer(ast.NodeTransformer):\n \"\"\" Removes all nodes from an AST tree which are not suitable for export out \n of a notebook. \"\"\"\n def visit(self, node):\n \"\"\" Visit a node. \"\"\"\n if node.__class__.__name__ in ['Module', 'FunctionDef', 'ClassDef', \n 'Import', 'ImportFrom']:\n return node\n return None\n\nclass NotebookLoader(object):\n \"\"\"Module Loader for IPython Notebooks\"\"\"\n def __init__(self, path=None):\n self.shell = InteractiveShell.instance()\n self.path = path\n \n def load_module(self, fullname):\n \"\"\"import a notebook as a module\"\"\"\n path = find_notebook(fullname, self.path)\n \n print (\"importing notebook from %s\" % path)\n \n # load the notebook object\n nb = nbformat.read(path, as_version=4)\n \n \n # create the module and add it to sys.modules\n # if name in sys.modules:\n # return sys.modules[name]\n mod = types.ModuleType(fullname)\n mod.__file__ = path\n mod.__loader__ = self\n mod.__dict__['get_ipython'] = get_ipython\n sys.modules[fullname] = mod\n \n # extra work to ensure that magics that would affect the user_ns\n # actually affect the notebook module's ns\n save_user_ns = self.shell.user_ns\n self.shell.user_ns = mod.__dict__\n \n try:\n deleter = CellTransformer()\n for cell in filter(lambda c: c.cell_type == 'code', nb.cells):\n # transform the input to executable Python\n code = self.shell.input_transformer_manager.transform_cell(cell.source)\n # Remove anything that isn't a def or a class\n tree = deleter.generic_visit(ast.parse(code))\n # run the code in themodule\n codeobj = compile(tree, filename=path, mode='exec')\n exec(codeobj, mod.__dict__)\n finally:\n self.shell.user_ns = save_user_ns\n return mod\n\nclass NotebookFinder(object):\n \"\"\"Module finder that locates IPython Notebooks\"\"\"\n def __init__(self):\n self.loaders = {}\n \n def find_module(self, fullname, path=None):\n nb_path = find_notebook(fullname, path)\n if not nb_path:\n return\n \n key = path\n if path:\n # lists aren't hashable\n key = os.path.sep.join(path)\n \n if key not in self.loaders:\n self.loaders[key] = NotebookLoader(path)\n return self.loaders[key]\n\n\n# Import stuff\nsys.meta_path.append(NotebookFinder())\n","sub_path":"nbimporter.py","file_name":"nbimporter.py","file_ext":"py","file_size_in_byte":3449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"25769275","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process('EVTSELECT')\n\n# import of standard configurations\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.load('Configuration.StandardSequences.GeometryDB_cff')\nprocess.load('Configuration.StandardSequences.MagneticField_38T_cff')\nprocess.load('Configuration.StandardSequences.RawToDigi_Data_cff')\nprocess.load('Configuration.StandardSequences.ReconstructionHeavyIons_cff')\nprocess.load('Configuration.StandardSequences.EndOfProcess_cff')\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')\nprocess.load('Configuration.EventContent.EventContentHeavyIons_cff')\n\nprocess.load(\"HeavyIonsAnalysis.Configuration.collisionEventSelection_cff\")\n\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(-1)\n)\nprocess.options = cms.untracked.PSet(\n)\n\n################################################################################\n# Input Variable Parsing\n#\n\nimport FWCore.ParameterSet.VarParsing as VarParsing\n\nivars = VarParsing.VarParsing('python')\n\nivars.inputFiles = 'file:///gpfs22/grid/store/hidata/HIRun2010/HICorePhysics/RAW/v1/000/151/153/F2AA5770-E1F0-DF11-BFEB-001D09F2527B.root'\n\nivars.outputFile = 'selected_events.root'\n\nivars.parseArguments()\n\n\n# Input source\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring( ivars.inputFiles )\n)\n\n# Output definition\nprocess.output = cms.OutputModule(\"PoolOutputModule\",\n splitLevel = cms.untracked.int32(0),\n outputCommands = process.FEVTEventContent.outputCommands,\n SelectEvents = cms.untracked.PSet(\n SelectEvents = cms.vstring('reco_step')\n ),\n fileName = cms.untracked.string( ivars.outputFile )\n)\n\nimport HLTrigger.HLTfilters.hltHighLevel_cfi\nprocess.hltMinBiasHF = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone()\nprocess.hltMinBiasHF.HLTPaths = [\"HLT_HIMinBiasHfOrBSC_Core\"]\n\n# Other statements\nprocess.GlobalTag.globaltag = 'GR10_P_V12::All'\n\n# Path and EndPath definitions\n\nprocess.reco_step = cms.Path( \n process.RawToDigi *\n process.reconstructionHeavyIons *\n process.hltMinBiasHF * \n process.collisionEventSelection \n)\n\nprocess.endjob_step = cms.Path(process.endOfProcess)\nprocess.out_step = cms.EndPath(process.output)\n\nprocess.schedule = cms.Schedule(\n process.reco_step,\n process.endjob_step,\n process.out_step\n)\n\n","sub_path":"MergedTrackCorrections/dataMix/step1_eventSelection_cfg.py","file_name":"step1_eventSelection_cfg.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"485370768","text":"\"\"\"Compute optimal and average improvement for different parameters\"\"\"\n\nimport csv\nfrom math import inf, nan\n\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom library.array_to_results import two_col_array_to_results\nfrom library.compare_old_new import compute_improvement\nfrom library.mc_enum import MCEnum\nfrom library.mc_enum_to_dist import mc_enum_to_dist\nfrom library.monte_carlo_dist import MonteCarloDist\nfrom library.perform_parameter import PerformParameter\nfrom nc_operations.perform_enum import PerformEnum\nfrom nc_processes.arrival_enum import ArrivalEnum\nfrom nc_processes.constant_rate_server import ConstantRate\nfrom nc_processes.ebb import EBB\nfrom nc_processes.markov_modulated import MMOOFluid\nfrom nc_processes.qt import DM1, MD1\nfrom nc_processes.regulated_arrivals import LeakyBucketMassOne\nfrom optimization.opt_method import OptMethod\nfrom single_server.single_server_perform import SingleServerPerform\n\n\ndef csv_single_param_power(\n arrival_enum: ArrivalEnum, perform_param: PerformParameter,\n opt_method: OptMethod, mc_dist: MonteCarloDist) -> dict:\n \"\"\"Chooses parameters by Monte Carlo type random choice\"\"\"\n total_iterations = 10**4\n valid_iterations = total_iterations\n metric = \"relative\"\n\n size_array = [\n total_iterations,\n arrival_enum.number_parameters() + 1 # const_rate has 1 parameter\n ]\n # [rows, columns]\n\n param_array = mc_enum_to_dist(mc_dist=mc_dist, size=size_array)\n\n res_array = np.empty([total_iterations, 2])\n\n for i in tqdm(range(total_iterations), total=total_iterations):\n if arrival_enum == ArrivalEnum.DM1:\n arrival = DM1(lamb=param_array[i, 0])\n\n elif arrival_enum == ArrivalEnum.MD1:\n arrival = MD1(\n lamb=param_array[i, 0],\n mu=1 / (param_array[i, arrival_enum.number_parameters()]))\n # TODO: double check\n\n elif arrival_enum == ArrivalEnum.MMOO:\n arrival = MMOOFluid(\n mu=param_array[i, 0],\n lamb=param_array[i, 1],\n burst=param_array[i, 2])\n\n elif arrival_enum == ArrivalEnum.EBB:\n arrival = EBB(\n factor_m=param_array[i, 0],\n decay=param_array[i, 1],\n rho_single=param_array[i, 2])\n\n elif arrival_enum == ArrivalEnum.MassOne:\n arrival = LeakyBucketMassOne(\n sigma_single=param_array[i, 0],\n rho_single=param_array[i, 1],\n n=20)\n # TODO: note that n is fixed\n\n else:\n raise NameError(\"Arrival parameter {0} is infeasible\".format(\n arrival_enum.name))\n\n if arrival_enum == ArrivalEnum.MD1 or arrival_enum == ArrivalEnum.MM1:\n setting = SingleServerPerform(\n arr=arrival,\n const_rate=ConstantRate(rate=1.0),\n perform_param=perform_param)\n else:\n setting = SingleServerPerform(\n arr=arrival,\n const_rate=ConstantRate(\n rate=param_array[i, arrival_enum.number_parameters()]),\n perform_param=perform_param)\n\n # standard_bound, new_bound = compute_improvement()\n res_array[i, 0], res_array[i, 1] = compute_improvement(\n setting=setting, opt_method=opt_method)\n\n if perform_param.perform_metric == PerformEnum.DELAY_PROB:\n if res_array[i, 1] > 1.0:\n res_array[i, ] = nan\n\n if (res_array[i, 0] == inf or res_array[i, 1] == inf\n or res_array[i, 0] == nan or res_array[i, 1] == nan):\n res_array[i, ] = nan\n valid_iterations -= 1\n\n res_dict = two_col_array_to_results(\n arrival_enum=arrival_enum,\n metric=metric,\n param_array=param_array,\n res_array=res_array,\n number_servers=1,\n valid_iterations=valid_iterations)\n\n res_dict.update({\n \"iterations\": total_iterations,\n \"delta_time\": perform_param.value,\n \"optimization\": opt_method.name,\n \"metric\": metric,\n \"MCDistribution\": mc_dist.to_name(),\n \"MCParam\": mc_dist.param_to_string()\n })\n\n with open(\n \"single_{0}_{1}_results_MC{2}_{3}_{4}.csv\".format(\n perform_param.to_name(), arrival_enum.name, mc_dist.to_name(),\n opt_method.name, metric), 'w') as csv_file:\n writer = csv.writer(csv_file)\n for key, value in res_dict.items():\n writer.writerow([key, value])\n\n return res_dict\n\n\ndef grid_param_single_dm1(perform_param: PerformParameter,\n opt_method: OptMethod, metric: str, lamb1_range,\n rate1_range) -> dict:\n \"\"\"Choose parameters along a grid\"\"\"\n\n total_iterations = len(lamb1_range) * len(rate1_range)\n valid_iterations = total_iterations\n\n param_array = np.empty([total_iterations, 2])\n res_array = np.empty([total_iterations, 2])\n\n i = 0\n for lamb1 in tqdm(lamb1_range):\n for rate1 in rate1_range:\n setting = SingleServerPerform(\n arr=DM1(lamb=lamb1),\n const_rate=ConstantRate(rate=rate1),\n perform_param=perform_param)\n param_array[i, 0] = lamb1\n param_array[i, 1] = rate1\n\n res_array[i, 0], res_array[i, 1] = compute_improvement(\n setting=setting, opt_method=opt_method, number_l=1)\n if res_array[i, 1] == inf:\n res_array[i, ] = nan\n valid_iterations -= 1\n\n i += 1\n\n return two_col_array_to_results(\n arrival_enum=ArrivalEnum.DM1,\n metric=metric,\n param_array=param_array,\n res_array=param_array,\n number_servers=1,\n valid_iterations=valid_iterations)\n\n\nif __name__ == '__main__':\n # OUTPUT4 = PerformParameter(perform_metric=PerformEnum.OUTPUT, value=4)\n\n DELAY10 = PerformParameter(perform_metric=PerformEnum.DELAY_PROB, value=10)\n\n COMMON_OPTIMIZATION = OptMethod.GRID_SEARCH\n\n MC_UNIF20 = MonteCarloDist(mc_enum=MCEnum.UNIFORM, param_list=[20.0])\n MC_EXP1 = MonteCarloDist(mc_enum=MCEnum.EXPONENTIAL, param_list=[1.0])\n\n ARRIVAL_PROCESS = ArrivalEnum.MD1\n\n print(\n csv_single_param_power(\n arrival_enum=ARRIVAL_PROCESS,\n perform_param=DELAY10,\n opt_method=COMMON_OPTIMIZATION,\n mc_dist=MC_UNIF20))\n\n print(\n csv_single_param_power(\n arrival_enum=ARRIVAL_PROCESS,\n perform_param=DELAY10,\n opt_method=COMMON_OPTIMIZATION,\n mc_dist=MC_EXP1))\n\n # print(\n # grid_param_single_dm1(\n # perform_param=DELAY10,\n # opt_method=OptMethod.GRID_SEARCH,\n # lamb1_range=[0.1, 0.3, 0.5, 0.7, 1, 2, 4, 8, 12],\n # rate1_range=[0.1, 0.3, 0.5, 0.7, 1, 2, 4, 8, 12]))\n","sub_path":"src/archive/csv_single_param_power.py","file_name":"csv_single_param_power.py","file_ext":"py","file_size_in_byte":6885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"637144154","text":"from collections import OrderedDict\nfrom typing import List, Tuple, Type\n\nfrom django.conf import settings\nfrom django.core.files.storage import FileSystemStorage\nfrom django.db import transaction\nfrom django.db.models.query_utils import Q\nfrom django.forms import Form\nfrom django.http import Http404, HttpResponseRedirect\nfrom django.shortcuts import redirect\nfrom django.utils import timezone\nfrom django.utils.translation import gettext as _\nfrom django.views.generic import DetailView, TemplateView, UpdateView\nfrom django.views.generic.detail import SingleObjectMixin\nfrom django_hosts import reverse\nfrom formtools.wizard.views import SessionWizardView\n\nimport config.hosts\nfrom transit_odp.bods.interfaces.plugins import get_notifications\nfrom transit_odp.common.forms import ConfirmationForm\nfrom transit_odp.common.view_mixins import BODSBaseView\nfrom transit_odp.organisation.constants import DatasetType, FeedStatus\nfrom transit_odp.organisation.models import Dataset, DatasetRevision\nfrom transit_odp.publish.forms import (\n FeedDescriptionForm,\n FeedPublishCancelForm,\n FeedUploadForm,\n RevisionPublishForm,\n)\nfrom transit_odp.users.models import AgentUserInvite\nfrom transit_odp.users.views.mixins import OrgUserViewMixin\n\nExpiredStatus = FeedStatus.expired.value\n\n\nclass BaseTemplateView(BODSBaseView, TemplateView):\n pass\n\n\nclass BaseDetailView(BODSBaseView, DetailView):\n pass\n\n\nclass BaseUpdateView(BODSBaseView, UpdateView):\n pass\n\n\nclass PublishFeedDetailViewBase(BaseDetailView):\n \"\"\"Baseclass to use for all child routes of the /feed//\n\n Filters Feed queryset to those 'owned by organisation' to lookup feed pk.\n \"\"\"\n\n model = Dataset\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .filter(\n organisation_id=self.organisation.id,\n )\n .add_live_data()\n .select_related(\"live_revision\")\n )\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update({\"pk1\": self.kwargs[\"pk\"]})\n return context\n\n\nclass FeedWizardBaseView(BODSBaseView, OrgUserViewMixin, SessionWizardView):\n DESCRIPTION_STEP = \"description\"\n PUBLISH_CANCEL_STEP = \"cancel\"\n UPLOAD_STEP = \"upload\"\n\n form_list: List[Tuple[str, Type[Form]]] = [\n (DESCRIPTION_STEP, FeedDescriptionForm),\n (PUBLISH_CANCEL_STEP, FeedPublishCancelForm),\n (UPLOAD_STEP, FeedUploadForm),\n ]\n\n step_context = {\n DESCRIPTION_STEP: {\"step_title\": _(\"Describe your data set\")},\n PUBLISH_CANCEL_STEP: {\"step_title\": _(\"Cancel step for publish\")},\n UPLOAD_STEP: {\"step_title\": _(\"Choose how you want to publish your data\")},\n }\n\n file_storage = FileSystemStorage(location=settings.MEDIA_ROOT + \"/tmp\")\n\n def get_context_data(self, form, **kwargs):\n kwargs = super().get_context_data(form, **kwargs)\n kwargs.update(self.step_context[self.steps.current])\n kwargs.update({\"form_list\": list(self.form_list.items())[0:3]})\n kwargs.update({\"current_step\": self.steps.current, \"pk1\": self.kwargs[\"pk1\"]})\n return kwargs\n\n def get_form_initial(self, step):\n kwargs = super().get_form_initial(step)\n # Initialise form with stored step data\n stored = self.storage.get_step_data(step) or {}\n kwargs.update(**stored)\n return kwargs\n\n def render_done(self, form, **kwargs):\n final_forms = OrderedDict()\n # walk through the form list and try to validate the data again.\n for form_key in self.get_form_list():\n form_obj = self.get_form(\n step=form_key,\n data=self.storage.get_step_data(form_key),\n files=self.storage.get_step_files(form_key),\n )\n if not form_obj.is_valid() and form_key != self.PUBLISH_CANCEL_STEP:\n return self.render_revalidation_failure(form_key, form_obj, **kwargs)\n final_forms[form_key] = form_obj\n\n done_response = self.done(final_forms.values(), form_dict=final_forms, **kwargs)\n self.storage.reset()\n return done_response\n\n\nclass ReviewBaseView(OrgUserViewMixin, BaseUpdateView):\n \"\"\"The base view of all review pages\"\"\"\n\n model = DatasetRevision\n form_class = RevisionPublishForm\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"pk1\"] = self.kwargs[\"pk1\"]\n return context\n\n def get_dataset_queryset(self):\n \"\"\"Returns a DatasetQuerySet for Datasets owned by the user's organisation\"\"\"\n return Dataset.objects.filter(organisation_id=self.organisation.id)\n\n def get_dataset(self):\n \"\"\"Get Dataset using URL parameter\"\"\"\n try:\n pk = self.kwargs.get(self.pk_url_kwarg)\n queryset = self.get_dataset_queryset()\n return queryset.get(id=pk)\n except Dataset.DoesNotExist:\n raise Http404(\n _(\"No %(verbose_name)s found matching the query\")\n % {\"verbose_name\": Dataset._meta.verbose_name}\n )\n\n def get_queryset(self):\n \"\"\"Returns a QuerySet of DatasetRevisions\"\"\"\n dataset = self.get_dataset()\n queryset = dataset.revisions.select_related(\"dataset__organisation\")\n return queryset\n\n def get_object(self, queryset=None):\n queryset = self.get_queryset()\n try:\n self.object = queryset.get(is_published=False)\n return self.object\n except DatasetRevision.DoesNotExist:\n raise Http404(\n _(\"No %(verbose_name)s found matching the query\")\n % {\"verbose_name\": DatasetRevision._meta.verbose_name}\n )\n\n def form_valid(self, form):\n revision = self.get_object()\n if not revision.is_published:\n revision.publish(self.request.user)\n return HttpResponseRedirect(self.get_success_url())\n\n def is_loading(self):\n revision = self.object\n status = revision.status\n return status == \"indexing\" or status == \"processing\" or status == \"pending\"\n\n\nclass DeleteRevisionBaseView(OrgUserViewMixin, BaseUpdateView):\n model = Dataset\n form_class = ConfirmationForm\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n revision = self.object.revisions.order_by(\"-created\").first()\n context.update({\"revision_name\": revision.name, \"pk1\": self.kwargs[\"pk1\"]})\n return context\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n feed_id = self.object.id\n kwargs.update({\"label\": \"Delete\", \"cancel_url\": self.get_cancel_url(feed_id)})\n kwargs.pop(\"instance\", None)\n return kwargs\n\n def get_cancel_url(self, feed_id):\n pass\n\n def form_valid(self, form):\n client = get_notifications()\n dataset = self.get_object()\n revision = dataset.revisions.order_by(\"-created\").first()\n\n # Delete revision\n if not revision.is_published or revision.status == ExpiredStatus:\n\n try:\n DatasetRevision.objects.get(id=revision.id).delete()\n\n except DatasetRevision.DoesNotExist:\n # This shouldnt happen but we dont want to break the site if it does\n pass\n\n else:\n client.send_data_endpoint_deleted_deleter_notification(\n dataset_id=dataset.id,\n dataset_name=revision.name,\n contact_email=self.request.user.email,\n )\n if dataset.contact != self.request.user:\n client.send_data_endpoint_deleted_updater_notification(\n dataset_id=dataset.id,\n contact_email=dataset.contact.email,\n dataset_name=revision.name,\n last_updated=revision.modified,\n )\n\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass FeedArchiveBaseView(OrgUserViewMixin, BaseUpdateView):\n form_class = ConfirmationForm\n model = Dataset\n app_name = None\n dataset_type = None\n\n def get_back_url(self):\n return reverse(\n f\"{self.viewname_prefix}feed-detail\",\n host=self.request.host.name,\n kwargs={\"pk\": self.object.id, \"pk1\": self.object.organisation_id},\n )\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update({\"pk1\": self.kwargs[\"pk1\"], \"backlink\": self.get_back_url()})\n return context\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .filter(\n organisation_id=self.organisation.id,\n dataset_type=self.dataset_type,\n )\n .get_active()\n )\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n feed_id = self.object.id\n\n kwargs.update(\n {\n \"cancel_url\": reverse(\n f\"{self.viewname_prefix}feed-detail\",\n host=self.request.host.name,\n kwargs={\"pk\": feed_id, \"pk1\": self.object.organisation_id},\n )\n }\n )\n kwargs.pop(\"instance\", None)\n return kwargs\n\n def get_success_url(self):\n feed_id = self.object.id\n\n return reverse(\n f\"{self.viewname_prefix}feed-archive-success\",\n kwargs={\"pk\": feed_id, \"pk1\": self.kwargs[\"pk1\"]},\n host=config.hosts.PUBLISH_HOST,\n )\n\n def form_valid(self, form):\n client = get_notifications()\n dataset = self.get_object()\n dataset_revision = dataset.live_revision\n user = self.request.user\n\n # set published revision to 'inactive'\n dataset_revision.to_inactive()\n dataset_revision.last_modified_user = user\n dataset_revision.save()\n now = timezone.now()\n\n # delete draft revisions of the dataset\n draft_revisions = dataset.revisions.exclude(id=dataset_revision.id).filter(\n is_published=False\n )\n draft_revisions.delete()\n\n if not dataset.contact.is_agent_user:\n # If the normal user flow is respected (ie adding through invites)\n # then we can assume that if an organisations user is an agent\n # then they must be an agent for that organisation. Mixed accounts\n # are not supported.\n client.send_data_endpoint_deactivated_notification(\n dataset_id=dataset.id,\n dataset_name=dataset.live_revision.name,\n short_description=dataset.live_revision.short_description,\n contact_email=dataset.contact.email,\n published_at=dataset.live_revision.published_at,\n expired_at=now,\n )\n\n for agent in dataset.organisation.agentuserinvite_set.filter(\n status=AgentUserInvite.ACCEPTED\n ):\n client.send_agent_data_endpoint_deactivated_notification(\n dataset_id=dataset.id,\n dataset_name=dataset.live_revision.name,\n contact_email=agent.email,\n operator_name=dataset.organisation.name,\n short_description=dataset.live_revision.short_description,\n published_at=dataset.live_revision.published_at,\n expired_at=now,\n )\n\n for developer in dataset.subscribers.exclude(\n settings__mute_all_dataset_notifications=True\n ).order_by(\"id\"):\n client.send_developer_data_endpoint_expired_notification(\n dataset_id=dataset.id,\n dataset_name=dataset.live_revision.name,\n short_description=dataset.live_revision.short_description,\n contact_email=developer.email,\n published_at=dataset.live_revision.published_at,\n expired_at=now,\n )\n\n return HttpResponseRedirect(self.get_success_url())\n\n @property\n def viewname_prefix(self):\n return \"\" if self.app_name is None else f\"{self.app_name}:\"\n\n\nclass FeedArchiveSuccessBaseView(OrgUserViewMixin, PublishFeedDetailViewBase):\n template_name = \"publish/feed_archive_success.html\"\n\n def back_to_data_sets_url(self):\n viewname_prefix = \"\" if self.app_name is None else f\"{self.app_name}:\"\n return reverse(\n f\"{viewname_prefix}feed-list\",\n kwargs={\"pk1\": self.kwargs[\"pk1\"]},\n host=config.hosts.PUBLISH_HOST,\n )\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"back_to_data_sets\"] = self.back_to_data_sets_url()\n return context\n\n\nclass EditDescriptionBaseView(OrgUserViewMixin, BaseUpdateView):\n model = Dataset\n dataset_type: DatasetType\n object: DatasetRevision\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .filter(\n organisation_id=self.organisation.id,\n dataset_type=self.dataset_type,\n )\n )\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n revision_name = self.object.name\n if len(revision_name) > 20:\n revision_name = revision_name[:19] + \"...\"\n context.update({\"pk1\": self.kwargs[\"pk1\"], \"revision_name\": revision_name})\n return context\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({\"instance\": self.object})\n return kwargs\n\n def post(self, request, *args, **kwargs):\n if self.request.POST.get(\"cancel\", None) == \"cancel\":\n return redirect(self.get_cancel_url())\n return super().post(request, *args, **kwargs)\n\n def form_valid(self, form):\n self.object.description = form.cleaned_data[\"description\"]\n self.object.short_description = form.cleaned_data[\"short_description\"]\n self.object.save()\n return redirect(self.get_success_url())\n\n def get_dataset_url(self):\n pass\n\n def get_success_url(self):\n return self.get_dataset_url()\n\n def get_cancel_url(self):\n return self.get_dataset_url()\n\n\nclass BaseFeedUploadWizard(FeedWizardBaseView):\n def get_template_names(self):\n if self.request.POST.get(\"cancel\", None) == self.PUBLISH_CANCEL_STEP:\n return \"publish/feed_publish_cancel.html\"\n return \"publish/feed_form.html\"\n\n def get_step_data(self, step):\n step_data = self.storage.get_step_data(step)\n if not step_data:\n return None\n if step == self.DESCRIPTION_STEP:\n return step_data.get(\"description-description\", None)\n elif step == self.UPLOAD_STEP:\n return step_data.get(\"upload-upload\", None)\n\n return None\n\n def get_context_data(self, form, **kwargs):\n kwargs = super().get_context_data(form, **kwargs)\n kwargs.update(\n {\n \"title_tag_text\": f\"Publish new data set: {kwargs.get('current_step')}\",\n \"pk1\": self.kwargs[\"pk1\"],\n }\n )\n\n if self.request.POST.get(\"cancel\", None) == self.PUBLISH_CANCEL_STEP:\n kwargs[\"previous_step\"] = self.request.POST.get(\n \"feed_upload_wizard-current_step\", None\n )\n return kwargs\n\n def get_next_step(self, step=None):\n if self.steps.current == self.DESCRIPTION_STEP:\n return self.UPLOAD_STEP\n\n return None\n\n def step_was_modified(self, step):\n if step == self.UPLOAD_STEP:\n return True\n cleaned_data = self.storage.get_step_data(step)\n return cleaned_data is not None\n\n def post(self, *args, **kwargs):\n wizard_goto_step = self.request.POST.get(\"wizard_goto_step\", None)\n\n if (\n self.request.POST.get(\"cancel\", None) == self.PUBLISH_CANCEL_STEP\n ) and not wizard_goto_step:\n self.storage.current_step = self.PUBLISH_CANCEL_STEP\n return self.render_goto_step(self.storage.current_step)\n\n return super().post(*args, **kwargs)\n\n def get_dataset(self):\n return Dataset.objects.create(\n contact=self.request.user, organisation=self.organisation\n )\n\n @transaction.atomic\n def done(self, form_list, **kwargs):\n all_data = self.get_all_cleaned_data()\n dataset = self.get_dataset()\n all_data.update(\n {\"last_modified_user\": self.request.user, \"comment\": \"First publication\"}\n )\n\n revision = DatasetRevision.objects.filter(\n Q(dataset=dataset) & Q(is_published=False)\n ).update_or_create(dataset=dataset, is_published=False, defaults=all_data)[0]\n\n # trigger ETL job to run\n revision.start_etl()\n\n return HttpResponseRedirect(\n reverse(\n \"revision-publish\",\n kwargs={\"pk\": dataset.id, \"pk1\": dataset.organisation_id},\n host=config.hosts.PUBLISH_HOST,\n )\n )\n\n\nclass BaseDatasetUploadModify(SingleObjectMixin, BaseFeedUploadWizard):\n dataset = None\n PUBLISH_CANCEL_STEP = \"cancel\"\n UPLOAD_STEP = \"upload\"\n\n form_list: List[Tuple[str, Type[Form]]] = [\n (PUBLISH_CANCEL_STEP, FeedPublishCancelForm),\n (UPLOAD_STEP, FeedUploadForm),\n ]\n\n step_context = {\n PUBLISH_CANCEL_STEP: {\"step_title\": _(\"Cancel step for publish\")},\n UPLOAD_STEP: {\"step_title\": _(\"Choose how you want to publish your data\")},\n }\n\n def get(self, request, *args, **kwargs):\n self.object = self.get_object()\n # self.dataset = self.get_dataset()\n # reset the current step to the first step.\n self.storage.current_step = self.UPLOAD_STEP\n return self.render(self.get_form())\n\n def get_dataset(self):\n # Filter by dataset\n pk = self.kwargs.get(self.pk_url_kwarg)\n try:\n # Get the single item from the filtered queryset\n dataset = Dataset.objects.get(id=pk)\n except self.queryset.model.DoesNotExist:\n raise Http404(\n _(\"No %(verbose_name)s found matching the query\")\n % {\"verbose_name\": self.queryset.model._meta.verbose_name}\n )\n return dataset\n\n def get_form_instance(self, step):\n return self.object\n\n def get_queryset(self):\n return DatasetRevision.objects.filter(\n dataset__organisation_id=self.organisation.id\n )\n\n def get_object(self, queryset=None):\n revision = None\n queryset = Dataset.objects.filter(organisation_id=self.organisation.id)\n # Get the single item from the filtered queryset\n self.dataset = self.get_dataset()\n\n try:\n revision = self.dataset.revisions.get(is_published=False)\n except queryset.model.DoesNotExist:\n raise Http404(\n _(\"No %(verbose_name)s found matching the query\")\n % {\"verbose_name\": DatasetRevision._meta.verbose_name}\n )\n if revision is not None:\n return revision\n\n def get_context_data(self, **kwargs):\n kwargs = super().get_context_data(**kwargs)\n kwargs.update(\n {\n \"title_tag_text\": f\"Provide data: {kwargs.get('current_step')}\",\n \"is_revision_modify\": True,\n }\n )\n\n if self.request.POST.get(\"cancel\", None) == self.PUBLISH_CANCEL_STEP:\n kwargs[\"previous_step\"] = self.request.POST.get(\n f\"{self.get_prefix(self.request)}-current_step\", None\n )\n\n return kwargs\n\n def post(self, *args, **kwargs):\n # Get feed object to update\n self.object = self.get_object()\n return super().post(*args, **kwargs)\n","sub_path":"transit_odp/publish/views/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":20022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"53320362","text":"\r\nfrom googleapiclient.discovery import build\r\nfrom google.auth.transport.requests import Request\r\nfrom google.oauth2.credentials import Credentials\r\nfrom google.oauth2 import service_account\r\n\r\n\r\n\r\n\r\nSERVICE_ACCOUNT_FILE = 'keys.json'\r\n# If modifying these scopes, delete the file token.json.\r\nSCOPES = ['https://www.googleapis.com/auth/spreadsheets']\r\ncreds = None\r\ncreds = service_account.Credentials.from_service_account_file(\r\n SERVICE_ACCOUNT_FILE, scopes=SCOPES)\r\n\r\n\r\n\r\n\r\n\r\n\r\n# The ID and range of a sample spreadsheet.\r\nSAMPLE_SPREADSHEET_ID = '1r9uRW8T74x-hrURdwiNH-gqvZZRZyosei0o4jj4B-Os'\r\n\r\n\r\n\r\n\r\nservice = build('sheets', 'v4', credentials=creds)\r\n\r\n# Call the Sheets API\r\nsheet = service.spreadsheets()\r\nresult = sheet.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\r\n range=\"A1:G13\").execute()\r\nvalues = result.get('values', [])\r\naoa = [{\"Cost\":39}]\r\nrequest = service.spreadsheets().values().append(spreadsheetId=SAMPLE_SPREADSHEET_ID, range=\"A1\", valueInputOption=\"USER_ENTERED\",body={\"values\":aoa})\r\nresponse = request.execute()\r\nif not values:\r\n print('No data found.')\r\nelse:\r\n print(result)\r\n\r\n\r\n","sub_path":"read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"292912330","text":"import numpy as np\n\n\n\ndef NormalMode(k, m):\n matrix = np.array([[2, -1, 0], [-1, 2, 1], [0, -1, 2]]) ;matrix = matrix * (k/m)\n eigenvalues, eigenvectors = np.linalg.eig(matrix)\n \n return print(eigenvalues), print(eigenvectors)\n \n\n\nNormalMode(1, 1)","sub_path":"DAY 8.py","file_name":"DAY 8.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"257261608","text":"from flask import url_for\nfrom sqlalchemy.schema import Table\n\nfrom app import db\n\n\nclass PaginatedAPIMixin:\n @staticmethod\n def to_collection_dict(query, page, per_page, endpoint, **kwargs):\n resources = query.paginate(page, per_page, False)\n data = {\n 'items': [item.to_dict() for item in resources.items],\n '_meta': {\n 'page': page,\n 'per_page': per_page,\n 'total_pages': resources.pages,\n 'total_items': resources.total,\n },\n '_links': {\n 'self': url_for(endpoint, page=page, per_page=per_page,\n **kwargs),\n 'next': url_for(endpoint, page=page + 1, per_page=per_page,\n **kwargs) if resources.has_next else None,\n 'prev': url_for(endpoint, page=page - 1, per_page=per_page,\n **kwargs) if resources.has_prev else None\n }\n }\n return data\n\n\ndb.reflect()\n\n\nclass Platform(db.Model):\n __tablename__ = 'gamecatalog_platform'\n\n\nclass Genre(db.Model):\n __tablename__ = 'gamecatalog_genre'\n\n\nclass Keyword(db.Model):\n __tablename__ = 'gamecatalog_keyword'\n\n\nclass Screenshot(db.Model):\n __tablename__ = 'gamecatalog_screenshot'\n\n\nclass Game(PaginatedAPIMixin, db.Model):\n __tablename__ = 'gamecatalog_game'\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'cover_url': self.cover_url,\n 'summary': self.summary,\n 'release_data': self.release_date,\n 'rating': self.rating,\n 'rating_count': self.rating_count,\n 'aggregated_rating': self.aggregated_rating,\n 'aggregated_rating_count': self.aggregated_rating_count,\n 'screenshots': [screenshot.image_url for screenshot in self.screenshots],\n 'platforms': [platform.name for platform in self.platforms],\n 'genres': [genre.slug for genre in self.genres],\n 'keywords': [keyword.slug for keyword in self.keywords],\n }\n\n\nclass User(db.Model):\n __tablename__ = 'gamecatalog_user'\n\n\nclass Favourite(PaginatedAPIMixin, db.Model):\n __tablename__ = 'gamecatalog_favourite'\n\n def to_dict(self):\n return {\n 'game': self.game.to_dict(),\n 'is_deleted': self.is_deleted,\n }\n\n\ngame_platforms_association = db.Model.metadata.tables['gamecatalog_game_platforms']\ngame_genres_association = db.Model.metadata.tables['gamecatalog_game_genres']\ngame_keywords_association = db.Model.metadata.tables['gamecatalog_game_keywords']\n\nGame.platforms = db.relationship('Platform', secondary=game_platforms_association, backref='games')\nGame.genres = db.relationship('Genre', secondary=game_genres_association, backref='games')\nGame.keywords = db.relationship('Keyword', secondary=game_keywords_association, backref='games')\n\nGame.screenshots = db.relationship('Screenshot', backref='game', lazy='dynamic')\n\nFavourite.game = db.relationship('Game', backref='favourites')\nFavourite.user = db.relationship('User', backref='favourites')\n","sub_path":"app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"370794923","text":"#!/usr/bin/env python\nimport re\nfrom glob import glob\n\nfiles = glob('*.eml')\n\nRX_SSN = r'\\b\\d{3}[^\\d\\w\\s]?\\d{2}[^\\d\\w\\s]?\\d{4}\\b'\n\nfor file_name in files:\n with open(file_name, 'r') as file_in:\n content = file_in.read()\n for m in re.finditer(RX_SSN, content):\n print(file_name, m.start(0), m.group(0))\n","sub_path":"find_ssn.py","file_name":"find_ssn.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"463332402","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nurl = \"http://www.vasanthamrecharge.com/aboutus.php\"\r\nr = requests.get(url)\r\nsoup = BeautifulSoup(r.text)\r\nopenfile = open(\"webpage.txt\",'w')\r\nfor paragraph in soup.find_all('p'):\r\n if paragraph.a:\r\n openfile.write(paragraph.a.text.replace(\"\\n\",\" \").strip())\r\n else:\r\n openfile.write(paragraph.contents[0].encode('utf-8').replace(\"\\n\",\" \").strip())\r\nopenfile.close()\r\n \r\n","sub_path":"write into a file.py","file_name":"write into a file.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"416267734","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[234]:\n\n\nimport numpy as np\nimport pandas as pd\nimport os\nfrom pandas.tseries.offsets import MonthEnd\nfrom datetime import date\nfrom dateutil.relativedelta import relativedelta\n\nimport matplotlib.pyplot as plt\nimport pickle\n\nfrom tqdm import tqdm\n\nimport plotly\nimport plotly.offline\nimport cufflinks as cf\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\nfrom IPython.display import HTML\nfrom IPython.core.display import display, HTML\nimport copy\n\nfrom sklearn.metrics import median_absolute_error, r2_score\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder\nfrom sklearn.decomposition import PCA\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.compose import TransformedTargetRegressor, make_column_transformer\nfrom sklearn.linear_model import ElasticNet, Lasso, LassoLarsIC, LinearRegression, Ridge\nfrom sklearn.model_selection import GridSearchCV, TimeSeriesSplit,train_test_split, cross_validate, cross_val_score, validation_curve\n\nimport plotly.express as px\nimport seaborn as sns\n\nimport statsmodels.api as sm\nimport scipy.stats as stats\n\nimport logging\nimport logging.config\n\nfrom utils.misc import LogWrapper, BlockingTimeSeriesSplit, SMWrapper, StandardScalerClipper, get_start_end_dates, get_nonexistant_path\n\nimport chart_studio.plotly as py\nimport plotly.graph_objs as go\n\nfrom utils.plotlyhelper import plotly_fig2json, plotly_multi_shades\n\n\n# In[2]:\n\n\n# Plotly settings\ncf.go_offline()\ncf.set_config_file(offline=False, world_readable=True)\n\n\n# In[3]:\n\n\ninit_notebook_mode()\n\n\n# In[4]:\n\n\nget_ipython().run_line_magic('load_ext', 'autoreload')\nget_ipython().run_line_magic('autoreload', '2')\n\n\n# # 1. Load datasets\n\n# In[5]:\n\n\nbf_filename = '../../data/processed/base_assets_M.pkl'\nbf_w_filename = '../../data/processed/base_assets_W.pkl'\n\n\n# In[6]:\n\n\n_freq = 'W'\n\n\n# #### Set a start date and end date\n\n# In[7]:\n\n\nds_bf = pd.read_pickle(bf_filename) if _freq == 'M' else pd.read_pickle(bf_w_filename)\n\n\n# In[8]:\n\n\ninputs = pd.read_csv('../../data/raw/inputs.log', index_col='Date', parse_dates=True)\ninputs.index = inputs.index.to_period('M') if _freq == 'M' else inputs.index.to_period('W-FRI')\nphase = inputs.Phase\nphase.name = 'phase'\nstart_dt = min(phase.index)\nend_dt = max(phase.index)\n\n\n# In[9]:\n\n\nphase_filename = '../../data/processed/phase_' + _freq\ninputs.Phase.to_pickle(phase_filename + '.pkl')\n\n\n# In[10]:\n\n\nds_mf = inputs.loc[:, inputs.columns != 'Phase']\n\n\n# In[11]:\n\n\nds_mf\n\n\n# In[12]:\n\n\nphase.value_counts()\n\n\n# In[13]:\n\n\nds_bf\n\n\n# # 2. Preprocessing\n\n# ### 2-1) Settings\n# #### From EDAV, we know that `best_lookback` is 12 weeks for the weekly model.\n\n# In[14]:\n\n\nbest_lookback = 3 if _freq == 'M' else 12\n\n\n# #### Split the dataset\n# - `8:2` split: fixed past data\n\n# In[15]:\n\n\ntest_size = 0.3\n\n\n# ### Some calculations\n\n# #### Calculate `m`-length rolling returns.\n\n# In[16]:\n\n\nbf = pd.DataFrame(columns=ds_bf.columns)\nlb_range = range(1, 25) if _freq == 'M' else [1] + list(range(4, 108, 4))\n\nfor m in tqdm(lb_range):\n rolling_bf = ds_bf.rolling(window=m).sum()\n rolling_bf['lookback'] = int(m)\n bf = pd.concat((bf, rolling_bf), axis=0)[rolling_bf.columns]\n\n# Add a phase column.\nbf = pd.merge(bf, phase, how='right', left_index=True, right_index=True)\n\n\n# In[17]:\n\n\nX = pd.merge(ds_mf.loc[start_dt:end_dt].copy(), phase, how='inner', left_index=True, right_index=True)\ny_train_dic = {}\ny_test_dic = {}\n\nfor b in tqdm(ds_bf.columns):\n y_train_dic[b] = {}\n y_test_dic[b] = {}\n \n for m in lb_range:\n y = bf.loc[bf.lookback == m, b].copy()\n X_train, X_test, y_train, y_test = train_test_split(X,\n y,\n shuffle=False,\n test_size=test_size)\n y_train_dic[b][m] = y_train\n y_test_dic[b][m] = y_test\n\n\n# - `Walk forward split`\n# - Sliding window size `sliding_window`: 10 years\n\n# In[18]:\n\n\nsliding_window = 12*10 if _freq == 'M' else 52*10\n\n\n# In[19]:\n\n\nX_train_wf_dic = {}\nX_test_wf_dic = {}\ny_train_wf_dic = {}\ny_test_wf_dic = {}\n\n# for b in tqdm(ds_bf.columns):\n# y_train_wf_dic[b] = {}\n# y_test_wf_dic[b] = {}\nfor t, idx in tqdm(enumerate(range(sliding_window, X.shape[0]))):\n y = bf.loc[bf.lookback == best_lookback, :].drop(['lookback'], axis=1).copy()\n X_train_wf_dic[t], X_test_wf_dic[t], y_train_wf_dic[t], y_test_wf_dic[t] = X.iloc[t:t+sliding_window], X.iloc[t+sliding_window:t+sliding_window+1], y.iloc[t:t+sliding_window], y.iloc[t+sliding_window:t+sliding_window+1]\n\n\n# In[20]:\n\n\ny_test_wf_dic[0]\n\n\n# # 3. Model training\n\n# ## `ML-basic` model.\n# - Step 1: Apply a variant of Two-Stage Least-Squares Instrumental Variables estimation approach.\n# - Stage 1: Supervised PCAing base assets to get fitted macro factors so that we can reduce measurement errors within the macroeconomic variables.\n# - Stage 2: Run OLS post-t Lasso on the fitted macro factors over each base asset as *y* to get macro-factor loadings $\\mathbf{B}$.\n# - Step 2: Run multivariate OLS where $X$ is macro factors and $y$ is a base asset by replacing the OLS betas with the factor loadings $\\mathbf{B}$. Then, we have FMP weights $\\mathbf{W_K}$.\n# - $\\mathbf{W_K=\\Omega^{-1}B(B^T\\Omega^{-1}B)^{-1}B_K=B(B^TB)^{-1}}$\n# - $\\mathbf{\\Omega}=\\sigma\\mathbf{I_N}$ (Uncorrrelated assets with constant variance)\n# - $\\mathbf{B_K=I_K}$ (identity matrix)\n\n# #### Integrate `phase` into a dataset through one-hot encoding\n\n# In[21]:\n\n\ncategorical_columns = ['phase']\nonehot_encoding = make_column_transformer(\n (OneHotEncoder(drop='if_binary'), categorical_columns),\n remainder='passthrough')\n\n\n# ## Step 1: Get macro-factor loadings $\\mathbf{B}$\n\n# ### We do supervised PCA and then OLS post-t Lasso.\n\n# In[22]:\n\n\ninput_sz = len(y_train_wf_dic.keys())\n\n\n# In[23]:\n\n\nX_train_wf_dic[t]\n\n\n# In[24]:\n\n\nB_df = {}\n\nfor t in tqdm(range(input_sz)):\n bf_train_trial = y_train_wf_dic[t]\n \n # Apply PCA to base asset returns\n pca_pipe = Pipeline([('onehot_encoding', onehot_encoding), ('pca', PCA())])\n bf_train = bf_train_trial.copy()\n y_pca = pca_pipe.fit_transform(bf_train)\n\n model_lasso = {}\n fitted_mf_train = {} # Fitted macro factors (in the training set)\n\n for f in ds_mf.columns:\n # This selection comes from GridSearchCV at an EDAV stage. No more used because macro factors have changed since Oct 2020.\n opt_criterion = 'aic' if f == 'Growth' else 'bic'\n\n scaler_x = StandardScaler()\n scaler_y = StandardScalerClipper(zmin=-3, zmax=3)\n\n # We project each macro factor on a space spanned by principal components of base assets.\n # -> X_train, named as `X_pc_train`, should be `y_pca[m]`.\n X_pc_train = y_pca\n\n # We want to find a fitted macro factor.\n # -> y_train should be X_train[GRTH|INFL|UNCR]\n y_train = X_train_wf_dic[t][f]\n\n model_lasso[f] = Pipeline([\n ('standardizing_X', scaler_x),\n ('estimator',\n TransformedTargetRegressor(regressor=LassoLarsIC(criterion=opt_criterion),\n transformer=scaler_y, check_inverse=False)) # scaler_y clips z-values outside [-3, 3] to either -3 or 3, so no inversable.\n ])\n\n model_lasso[f].fit(X_pc_train, y_train)\n fitted_mf_train[f] = model_lasso[f].predict(X_pc_train)\n\n # Save the fitted macro factors as DataFrame.\n fitted_mf_train = pd.DataFrame().from_dict(fitted_mf_train)\n\n # Get fitted macro factors using trained models.\n bf_test_trial = pd.DataFrame(y_test_wf_dic[t])\n\n # We use the same pca instances trained on the training set to prevent any look-ahead bias.\n y_pca_test = pca_pipe.transform(bf_test_trial)\n\n # Predict fitted macro factors in the test set.\n fitted_mf_test = {}\n for f in ds_mf.columns:\n # As we did in the training process above, we take principal components extracted from trained-PCA instances, y_pca_test[m], as our spanned-space, X_pc_test.\n X_pc_test = y_pca_test\n\n # `fitted_mf_test[f][m]` contains fitted factor returns of 'f' factor\n fitted_mf_test[f] = model_lasso[f].predict(X_pc_test)\n\n fitted_mf_test = pd.DataFrame().from_dict(fitted_mf_test)\n\n X_fit_train = fitted_mf_train.to_numpy()\n X_fit_test = fitted_mf_test.to_numpy()\n \n # Stage 2: Apply OLS Post-t Lasso to each of the selected model, `model_lasso[f][m]`\n ## the threshold to drop a coefficient set to be 0.05.\n model_sqrt_lasso = {}\n coef_sqrt_lasso = {}\n \n for b in ds_bf.columns:\n scaler_x = StandardScalerClipper(zmin=-3, zmax=3)\n scaler_y = StandardScaler()\n\n y_train = bf_train_trial[b].to_numpy(dtype=np.float)\n\n model_sqrt_lasso[b] = Pipeline([\n ('standardizing_X', scaler_x),\n ('estimator', TransformedTargetRegressor(regressor=SMWrapper(model_class=sm.OLS, lasso_t=0.05, fit_intercept=True, refit=True),\n transformer=scaler_y))\n ])\n\n try:\n model_sqrt_lasso[b].fit(X_fit_train, y_train)\n coef_sqrt_lasso[b] = model_sqrt_lasso[b].named_steps['estimator'].regressor_.results_.params[1:] # We don't use an intercept, which is in 0th element.\n num_params = coef_sqrt_lasso['DMEQ'].shape[0]\n if num_params == 6:\n print('Only six params found. We need seven. X_fit_train is:', X_fit_train)\n except Exception as e:\n print('You might want to increase the size of your test set.\\n')\n print('An exception occurs:', e)\n \n # X_test_wf_dic[t].index[0] is an index of the following week.\n # e.g.: We train on a training set of pre-June 2020 data and\n # save the result in B_df[1st week of July 2020].\n B_df[X_test_wf_dic[t].index[0]] = pd.DataFrame().from_dict(coef_sqrt_lasso, orient='index', columns=ds_mf.columns)\n\n\n# #### Now we have $\\mathbf{B}$ as follows.\n\n# - New inputs on Nov 26, 2020.\n\n# In[25]:\n\n\nB_df[max(B_df.keys())].style.format('{:.2f}')\n\n\n# In[26]:\n\n\nB_df[max(B_df.keys())].style.format('{:.2f}')\n\n\n# In[27]:\n\n\nprint('Step 1: macro factor loadings B are calculated.')\n\n\n# ## Step 2: Compute an FMP weight vector $\\mathbf{W_K}$\n\n# #### Finally we can calculate an FMP weight vector $\\mathbf{W_K}$:\n# - $\\mathbf{W_K} = \\mathbf{\\Omega^{-1}B(B^T \\Omega^{-1}B)^{-1}}$ and this can be further simplified depending on a choice of covariance matrix of base assets $\\mathbf{\\Omega}$:\n# - 1) $\\mathbf{\\Omega}=\\sigma\\mathbf{I_N}$: base assets are `uncorrelated` with `constant variance` over time.\n# - 2) $\\mathbf{\\Omega}=Diag(\\sigma^2)$: base assets are `uncorrelated`.\n# - 3) `Unconstrained` $\\mathbf{\\Omega}$.\n\n# #### Calculate weight vector $\\mathbf{W}$ in `W_df` and FMP returns in `fmp_rt`\n\n# - e.g:\n# - `fmp_rt['Growth']` contains Growth FMP returns.\n# - `W_df[list(W_df.keys())[-1]]['Inflation']` returns FMP weights for a macro factor, **Inflation**.\n\n# In[28]:\n\n\nstart_test_dt = bf_test_trial.index[0]\nW_df = {}\nfmp_wt = pd.DataFrame()\nfmp_rt = {}\nfor k in tqdm(B_df.keys()):\n B = B_df[k].to_numpy()\n# W = B@np.linalg.inv(B.T@B)\n W = B@np.linalg.pinv(B.T@B) # np.linalg.pinv() leverages SVD to approximate initial matrix, which might be used for almost singluar matrice.\n W_df[k] = pd.DataFrame(W, index=ds_bf.columns, columns=ds_mf.columns)\n temp_W = W_df[k].T.reset_index()\n temp_W['Date'] = k.end_time.strftime('%Y-%m-%d')\n fmp_wt = pd.concat((fmp_wt, temp_W))\n fmp_rt[k] = ds_bf.loc[k-1]@W_df[k]\n\n\n# In[29]:\n\n\nprint('Step 2: FMP weights are calculated.')\n\n\n# - `fmp_wt` is a big matrix that includes all FMP weights *over time*.\n\n# In[30]:\n\n\nfmp_wt = fmp_wt.reset_index(drop='True')\nfmp_wt.rename(columns={'index':'Factor'}, inplace=True)\nfmp_wt = fmp_wt[['Date'] + list(fmp_wt.columns[:-1])]\n\n\n# - Save works.\n\n# In[31]:\n\n\nfmp_wt.to_csv('../../data/processed/fmp_wt.csv')\nfmp_wt.to_pickle('../../data/processed/fmp_wt.pkl')\n\n\n# In[32]:\n\n\nfmp_wt.tail(10)\n\n\n# # 4. Show model restuls.\n\n# #### Calculate a macro factor return matrix `mf_rt`.\n\n# - `mf_rt` is just values of macro factors as they are defined in `inputs.log`. We call them macro factor or macro factor returns, although they may not be really returns.\n# - `fmp_cum[n]` is **n**-week cumulative returns of FMPs.\n\n# In[33]:\n\n\nfmp_rt = pd.DataFrame().from_dict(fmp_rt, orient='index')\nscaler = StandardScaler()\nmf_rt = X.loc[fmp_rt.index[0]:fmp_rt.index[-1], ds_mf.columns].copy()\nfmp_rt.set_index(fmp_rt.index.to_timestamp(how='E').strftime('%Y-%m-%d'), inplace=True)\nmf_rt.set_index(mf_rt.index.to_timestamp(how='E').strftime('%Y-%m-%d'), inplace=True)\nlb_range = [1] + list(range(4,56,4))\nfmp_cum = {cum:fmp_rt.rolling(window=cum).sum() for cum in lb_range}\n\n\n# In[34]:\n\n\nfmp_rt.tail()\n\n\n# #### Compute correlation matrice between various-length FMP returns `cum` and factor returns.\n# - Lookback-window for correlations is `window_sz`. e.g. Corr(52-week FMP returns, macro factor) when `window_sz`=52. \n\n# In[35]:\n\n\nwindow_sz = [26, 52, 104]\n\n\n# In[36]:\n\n\nmf_len = len(ds_mf.columns)\nr_corr = {}\ncross_corr_rolling = None\nprint('Calculating correlations between FMP returns and macro factor returns...')\n\nfor cum in tqdm(fmp_cum.keys()):\n r_corr[cum] = {}\n for w in window_sz:\n r_corr[cum][w] = {}\n for r_idx in range(len(mf_rt.index)-w):\n c = pd.merge(fmp_cum[cum][r_idx:r_idx+w], mf_rt[r_idx:r_idx+w], how='inner', left_index=True, right_index=True, suffixes=('_fmp', '_mf')).corr()\n \n # We get a tuple of correlations(FMP returns, Macro factor returns)\n r_corr[cum][w][mf_rt.index[r_idx+w]] = tuple([c.iloc[i, i+mf_len] for i in range(mf_len)]) \n \n corr_fmp_mf = pd.DataFrame().from_dict(r_corr[cum][w], orient='index', columns=ds_mf.columns)\n if cross_corr_rolling is None:\n cross_corr_rolling = corr_fmp_mf.copy()\n cross_corr_rolling['lookback'] = cum\n cross_corr_rolling['window'] = w\n else:\n corr_fmp_mf['lookback'] = cum\n corr_fmp_mf['window'] = w\n cross_corr_rolling = pd.concat((cross_corr_rolling, corr_fmp_mf.copy()))\n\n# Pull out the 'date' column from the index.\ncross_corr_rolling.index.name='date'\ncross_corr_rolling = cross_corr_rolling.reset_index()\n\n\n# In[37]:\n\n\ndef make_corr_fig(n_week):\n mf_len = ds_mf.columns.shape[0]\n fig = plotly.subplots.make_subplots(rows=int(mf_len/2)+1, cols=2, horizontal_spacing = 0.05, vertical_spacing = 0.08,\n subplot_titles=['Correlation: '] + ds_mf.columns)\n \n for row, col_no in enumerate(range(0, len(ds_mf.columns),2)):\n fig.add_trace(\n go.Heatmap(\n name='{}'.format(ds_mf.columns[col_no]),\n z=cross_corr_rolling.loc[cross_corr_rolling.window==n_week, ds_mf.columns[col_no]],\n x=cross_corr_rolling.loc[cross_corr_rolling.window==n_week, 'date'],\n y=cross_corr_rolling.loc[cross_corr_rolling.window==n_week, 'lookback'],\n colorscale='RdBu', zmin=-1, zmax=1\n ), row+1, 1)\n \n if col_no+1 < len(ds_mf.columns):\n fig.add_trace(\n go.Heatmap(\n name='Corr. ({})'.format(ds_mf.columns[col_no+1]),\n z=cross_corr_rolling.loc[cross_corr_rolling.window==n_week, ds_mf.columns[col_no+1]],\n x=cross_corr_rolling.loc[cross_corr_rolling.window==n_week, 'date'],\n y=cross_corr_rolling.loc[cross_corr_rolling.window==n_week, 'lookback'],\n colorscale='RdBu', zmin=-1, zmax=1\n ), row+1, 2)\n\n fig['layout'].update(height=800, width=1200, title=str(n_week) + '-week correlations over time (y-week cumulative FMP return, macro factor)', template='plotly_white',\n yaxis=dict(title='y-week'), autosize=False)\n\n return fig\n\n\n# In[38]:\n\n\nprint('Making correlation figures...')\n\n\n# ### `1) Rolling correlations`\n\n# In[39]:\n\n\nmake_corr_fig(26).show()\n\n\n# In[40]:\n\n\nmake_corr_fig(52).show()\n\n\n# In[41]:\n\n\nmake_corr_fig(104).show()\n\n\n# ### `2) FMP weights` (Spot)\n\n# In[42]:\n\n\nlatest_dt = max(fmp_wt.Date)\n\n\n# In[43]:\n\n\nfig_fmp_wt = fmp_wt[fmp_wt.Date==latest_dt].drop(\n ['Date'],\n axis=1).set_index('Factor').iplot(asFigure=True,\n kind='barh',\n bargap=.2,\n colorscale='plotly',\n theme='white',\n title='매크로 팩터를 추종하는 포트폴리오의 자산구성 (예시)',\n yTitle='매크로 팩터',\n xTitle='비중',\n barmode='stack')\nfig_fmp_wt['layout'].update(xaxis=dict(tickformat='.1%'),\n yaxis=dict(categoryorder='category descending'),\n legend_orientation='h')\nfig_fmp_wt.layout.legend.title = '투자자산'\n\n\n# In[44]:\n\n\niplot(fig_fmp_wt)\n\n\n# In[45]:\n\n\nfmp_wt_kor = fmp_wt.rename(\n columns={\n 'DMEQ': '선진 주식',\n 'UST': '미국 국채',\n 'CRE': '투자등급 채권',\n 'ILB': '미국 물가지수연동채권',\n 'DXY': '달러지수',\n 'FXCS': '자원부국 통화-안전자산 통화',\n 'GOLD': '금',\n 'ENGY': '에너지',\n 'REIT': '리츠'\n }, inplace=False)\n\n\n# In[46]:\n\n\nfig_fmp_wt_kor = fmp_wt_kor[fmp_wt_kor.Date==latest_dt].drop(\n ['Date'],\n axis=1).set_index('Factor').iplot(asFigure=True,\n kind='barh',\n bargap=.2,\n colorscale='plotly',\n theme='white',\n title='매크로 팩터를 추종하는 포트폴리오의 자산구성 (예시)',\n yTitle='매크로 팩터',\n xTitle='비중',\n barmode='stack')\nfig_fmp_wt_kor['layout'].update(xaxis=dict(tickformat='.1%'),\n yaxis=dict(categoryorder='category descending'),\n legend_orientation='h')\nfig_fmp_wt_kor.layout.legend.title = '투자자산'\n\n\n# In[47]:\n\n\niplot(fig_fmp_wt_kor)\n\n\n# ### `3) FMP weights` (Time-series)\n\n# #### Set `start_dt` and `end_dt` to plot.\n# - Set *None* if you want to plot all available dates.\n# - Format: yyyy-mm-dd\n\n# In[212]:\n\n\nstart_dt = '2013-01-01'\nend_dt = None\n\n\n# In[213]:\n\n\nmin_dt = min(fmp_wt.Date) if start_dt is None else max(start_dt, min(fmp_wt.Date))\nmax_dt = max(fmp_wt.Date) if end_dt is None else min(end_dt, max(fmp_wt.Date))\n\n\n# In[199]:\n\n\nprint('From {} to {}.'.format(min_dt, max_dt))\n\n\n# - Set `shade_nm` to be phase names. Their corresponding dates will be shaded in the following FMP weights chart.\n\n# In[200]:\n\n\nshade_nm = ['Recovery', 'Expansion']\nshade_colors=['gray', 'green']\n\n\n# In[201]:\n\n\n\n\n\n# In[202]:\n\n\nlegend_name = [' (Equities)', ' (UST 10yr)', ' (BBB-AAA)', ' (TIPS)', ' (Dollar Index)', ' (FX:Commidity-Safe)', '', ' (Energy)', '']\n\n\n# #### Set `plot_name` to be a phase name to be plotted.\n\n# In[251]:\n\n\nprint('Macro factors are: {}'.format(ds_mf.columns))\n\n\n# In[204]:\n\n\nplot_name = 'Growth'\n\n\n# #### Set `visible_base_assets` to be base asset names visible, which may be turned on and off later.\n\n# In[192]:\n\n\nvisible_base_assets = ['DMEQ', 'CRE', 'GOLD']\n\n\n# In[214]:\n\n\ndef make_fmp_wt_fig(plot_name='Growth',\n shade_nm=['Recovery', 'Expansion'],\n shade_colors=['gray', 'green'],\n start_dt=None,\n end_dt=None,\n visible_base_assets=['DMEQ', 'CRE', 'GOLD']):\n\n # Set start date and end date to plot.\n min_dt = min(fmp_wt.Date) if start_dt is None else max(start_dt, min(fmp_wt.Date))\n max_dt = max(fmp_wt.Date) if end_dt is None else min(end_dt, max(fmp_wt.Date))\n \n # Set dates for shading.\n shade1_dt = pd.Series(phase[phase==shade_nm[0]].index.to_timestamp(), name='date')\n shade2_dt = pd.Series(phase[phase==shade_nm[1]].index.to_timestamp(), name='date')\n shade1_start, shade1_end = get_start_end_dates(shade1_dt, 'W')\n shade2_start, shade2_end = get_start_end_dates(shade2_dt, 'W')\n \n # Set a figure.\n fig_fmp_wt_ts = fmp_wt[fmp_wt.Factor==plot_name].loc[np.logical_and(fmp_wt.Date>=min_dt, fmp_wt.Date<=max_dt), ~fmp_wt.columns.isin(['Factor'])].iplot(\n asFigure=True,\n kind='bar',\n x='Date',\n colorscale='plotly',\n barmode='stack',\n theme='white',\n world_readable=True,\n title='Changes in {} Factor-Mimicking-Portfolio weights over time'.format(plot_name),\n xTitle='Date',\n yTitle='Weights'\n )\n\n # Draw shades.\n fig_fmp_wt_ts = plotly_multi_shades(fig_fmp_wt_ts, x0=[shade1_start, shade2_start], x1=[shade1_end, shade2_end], colors=shade_colors, alpha=0.2)\n fig_fmp_wt_ts['layout'].update(yaxis=dict(tickformat='%'))\n fig_fmp_wt_ts.layout.legend.title = 'Base assets'\n fig_fmp_wt_ts = fig_fmp_wt_ts.update_xaxes(range=[min_dt, max_dt])\n\n # Set visible base assets and make legends with more explanation as needed.\n for i, fdata in enumerate(fig_fmp_wt_ts.data):\n fdata.visible = 'legendonly' if fig_fmp_wt_ts.data[i].name not in visible_base_assets else None\n fdata.name = fig_fmp_wt_ts.data[i].name + legend_name[i]\n\n fig_fmp_wt_ts.update_layout(\n annotations=[\n dict(\n x=0,\n y=0,\n xref='paper',\n yref='paper',\n text=\"Gray: {} phase, Green: {} phase\".format(shade_nm[0], shade_nm[1]),\n showarrow=False\n )\n ]\n );\n \n return fig_fmp_wt_ts\n\n\n# In[211]:\n\n\nfmp_wt[fmp_wt.Factor==plot_name].loc[np.logical_and(fmp_wt.Date>=min_dt, fmp_wt.Date<=max_dt), ~fmp_wt.columns.isin(['Factor'])]\n\n\n# In[195]:\n\n\nfig_fmp_wt_ts = fmp_wt[fmp_wt.Factor==plot_name].loc[np.logical_and(fmp_wt.Date>=min_dt, fmp_wt.Date<=max_dt), ~fmp_wt.columns.isin(['Factor'])].iplot(\n asFigure=True,\n kind='bar',\n x='Date',\n colorscale='plotly',\n barmode='stack',\n theme='white',\n world_readable=True,\n title='Changes in {} Factor-Mimicking-Portfolio weights over time'.format(plot_name),\n xTitle='Date',\n yTitle='Weights'\n)\nfig_fmp_wt_ts = plotly_multi_shades(fig_fmp_wt_ts, x0=[shade1_start, shade2_start], x1=[shade1_end, shade2_end], colors=['gray', 'green'], alpha=0.2)\nfig_fmp_wt_ts['layout'].update(yaxis=dict(tickformat='%'))\nfig_fmp_wt_ts.layout.legend.title = 'Base assets'\nfig_fmp_wt_ts = fig_fmp_wt_ts.update_xaxes(range=[min_dt, max_dt])\n\nfor i, fdata in enumerate(fig_fmp_wt_ts.data):\n fdata.visible = 'legendonly' if fig_fmp_wt_ts.data[i].name not in visible_base_assets else None\n fdata.name = fig_fmp_wt_ts.data[i].name + legend_name[i]\n\nfig_fmp_wt_ts.update_layout(\n annotations=[\n dict(\n x=0,\n y=0,\n xref='paper',\n yref='paper',\n text=\"Gray: {} phase, Green: {} phase\".format(shade_nm[0], shade_nm[1]),\n showarrow=False\n )\n ]\n);\n\n\n# In[256]:\n\n\nfig = make_fmp_wt_fig(plot_name=plot_name,\n shade_nm=shade_nm,\n shade_colors=shade_colors,\n start_dt=None,\n end_dt=None,\n visible_base_assets=visible_base_assets)\n\n\n# In[257]:\n\n\niplot(fig)\n\n\n# # 5. Save works\n\n# #### 1. Tables (csv files)\n# - `results_to_save` is a list of instances to be saved in .pkl and .csv, where their file names are specified in `filenames` in order.\n# - `correlations`: Cross correlations between `n`-week cumulative FMP returns and macro factors.\n# - `fmp_rt`: FMP returns. Return frequency is `_freq`.\n# - `macrofactor_rt`: Macro factor (returns).\n# - `fmp_wt`: Macro factor weights. **FILAL RESULTS of this model.**\n# \n# #### 2. Charts (png, pdf, json files)\n# - `results_to_save` is a list of instances to be saved in .png, .pdf and .json, where their file names are specified in `filenames` in order.\n# - `corr_w##`: Rolling correlation charts, where their lookback window size is ##.\n\n# In[254]:\n\n\ndef save_works(what='all'):\n '''\n Save results.\n \n Parameters:\n what: {'all'|'tables'|'figures'}\n '''\n \n today = date.today().strftime('%Y-%m-%d')\n \n # Save tables\n if (what == 'all') or (what == 'tables'):\n results_to_save = [cross_corr_rolling, fmp_rt, mf_rt, fmp_wt]\n filenames = ['/correlations', '/fmp_rt', '/macrofactor_rt', '/fmp_wt']\n assert len(results_to_save) == len(filenames), 'The number of elements in both lists, results_to_save and filenames, must be equal.'\n \n table_path = '../../reports/tables/'\n paths = [table_path + today + filename for filename in filenames]\n\n # Make a folder if needed.\n if not os.path.exists(table_path+today):\n os.mkdir(table_path+today)\n\n # Get sequential file names.\n filename_seq_pkl = [get_nonexistant_path(path+'.pkl') for path in paths]\n filename_seq_csv = [get_nonexistant_path(path+'.csv') for path in paths]\n \n # We save the same results in tables in different formats: csv and pickle.\n for idx in range(len(paths)):\n results_to_save[idx].to_pickle(filename_seq_pkl[idx])\n results_to_save[idx].to_csv(filename_seq_csv[idx])\n \n print('Tables are saved in {}.'.format(table_path))\n \n # Save charts.\n if (what == 'all') or (what == 'figures'):\n results_to_save = [make_corr_fig(26), make_corr_fig(52), make_corr_fig(104), fig_fmp_wt, fig_fmp_wt_kor] + [make_fmp_wt_fig(plot_name=mf) for mf in ds_mf.columns]\n filenames = ['/corr_w26', '/corr_w52', '/corr_w104', '/fmp_wt', '/fmp_wt_kor'] + ['/fmp_wt_ts_' + mf for mf in ds_mf.columns]\n assert len(results_to_save) == len(filenames), 'The number of elements in both lists, results_to_save and filenames, must be equal.'\n\n figure_path = '../../reports/figures/'\n paths = [figure_path + today + filename for filename in filenames]\n\n # Make a folder if needed.\n if not os.path.exists(figure_path+today):\n os.mkdir(figure_path+today)\n\n # Get sequential file names.\n filename_seq_png = [get_nonexistant_path(path+'.png') for path in paths]\n filename_seq_pdf = [get_nonexistant_path(path+'.pdf') for path in paths]\n filename_seq_json = [get_nonexistant_path(path+'.json') for path in paths]\n \n # We save the same charts in different formats: png, svg and json.\n for idx in range(len(paths)):\n results_to_save[idx].write_image(filename_seq_png[idx])\n results_to_save[idx].write_image(filename_seq_pdf[idx])\n plotly_fig2json(results_to_save[idx], filename_seq_json[idx])\n \n print('Charts are saved in {}.'.format(figure_path))\n\n\n# In[255]:\n\n\nsave_works()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"macrofactor/src/models/trash/train_model-dist.py","file_name":"train_model-dist.py","file_ext":"py","file_size_in_byte":27314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"387419816","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n\n\nimport os\nimport subprocess\nimport json\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib import dates\nfrom matplotlib.dates import date2num\nfrom matplotlib.dates import AutoDateLocator\nfrom matplotlib.dates import AutoDateFormatter\nimport datetime\nimport time\n\n\nfile = open('./calBmi/doc_BMI19/file_kg.json')\ndata = json.load(file)\n#file.close\n\nfor (key, value) in data.items():\n print(\"Key: \" + key)\n print(\"Valeur: \" + str(value))\n print(\"\\nTo represent the data_get:\\n\")\n print(data.get(\"data\"))\n print(\"\\n\")\n print(\"Valeur: \" + str(value[0]))\n print(\"Valeur: \" + str(value[1]))\n print(\"\\n\")\n print(\"Date: \" + str(value[0][\"Date\"]))\n print(\"Kg: \" + str(value[0][\"Kg\"]))\n print(\"\\n\")\n print(\"Date: \" + str(value[1][\"Date\"]))\n print(\"Kg: \" + str(value[1][\"Kg\"]))\n \ndata_list1 = []\nfor value in zip(value):\n data_list1.append(value[0]['Date'])\n\nfor (key, value) in data.items():\n print(key, value)\n print(\"\\n\")\n\nprint(\"\\nList of weight\\n\")\n\ndata_list2 = []\nfor value in zip(value):\n data_list2.append(value[0]['Kg'])\n\ndicolist = {}\n\nfor data_list1, data_list2 in zip(data_list1, data_list2):\n dicolist[data_list1] = data_list2\n\nprint(\"\\nDisplay dictionary :\")\nprint(\"---------------------------\")\nprint(dicolist)\n\nlist1 = []\nlist2 = []\n\nfor key, value in dicolist.items():\n list1.append(key)\n list2.append(value)\n \nprint(\"\\nList of dates :\")\nprint(\"----------------------------------\")\nprint(list1)\n\nprint(\"\\nList of weight :\")\nprint(\"------------------------\")\nprint(list2)\n\nlist2 = list(map(float, list2))\nlist1 = list(map(str, list1))\n\nconverted_dates = list(map(datetime.datetime.strptime, list1, len(list1)*['%d-%m-%Y']))\nx_axis = converted_dates\nformatter = dates.DateFormatter('%d-%m-%Y')\ny_axis = list2\n\nshow_grid = True\nwith plt.style.context('dark_background'):\n figure, axes = plt.subplots()\n # apply autoformatter for displaying of dates \n locator = AutoDateLocator()\n axes.xaxis.set_major_locator(locator)\n ax = plt.gcf().axes[0]\n ax.xaxis.set_major_formatter(formatter)\n #axes.xaxis.set_major_formatter(AutoDateFormatter(locator))\n min_date = date2num(datetime.datetime.strptime('01-01-2020', \"%d-%m-%Y\"))\n max_date = date2num(datetime.datetime.strptime('31-12-2020', \"%d-%m-%Y\"))\n axes.set_xlim([min_date, max_date])\n #figure.autofmt_xdate()\n\n plt.plot(x_axis, y_axis, 'o-', color='cyan')\n plt.ylabel('Kg')\n plt.xlabel('Dates')\n plt.title('Kg for one year')\n #plt.xticks(rotation=45)\n plt.legend(['kg/date'])\n plt.grid(show_grid)\n plt.gcf().autofmt_xdate(rotation=45)\n plt.show()\n\n# to verify if file exist.\ntry:\n if os.path.getsize('./calBmi/doc_BMI19/customBmi.py'):\n subprocess.run('./calBmi/doc_BMI19/customBmi.py', check=True)\nexcept FileNotFoundError as callfile1:\n print(\"+ File customBmi.py doesn't exist !\", callfile1)\n\n# to read into file the dates entered.\ntry:\n with open('./calBmi/doc_BMI19/custom_kg.txt', 'r') as namefile:\n line_1=namefile.readline()\n print(line_1)\n line_2=namefile.readline()\n print(line_2)\nexcept FileNotFoundError as callfile2:\n print(\"+ File custom_kg.txt doesn't exist !\", callfile2)\n\n# to delete '\\n' at the end of line_1\nprintmonth=len(line_1)\nconvert_line=line_1[0:-1]\nprint(convert_line)\n\n# or seaborn-darkgrid\nshow_grid = True\nwith plt.style.context('seaborn-darkgrid'):\n figure, axes = plt.subplots()\n\n locator = AutoDateLocator()\n axes.xaxis.set_major_locator(locator)\n ax = plt.gcf().axes[0]\n ax.xaxis.set_major_formatter(formatter)\n min_date = date2num(datetime.datetime.strptime(convert_line, \"%d-%m-%Y\"))\n max_date = date2num(datetime.datetime.strptime(line_2, \"%d-%m-%Y\"))\n axes.set_xlim([min_date, max_date])\n\n plt.plot(x_axis, y_axis, 'o-', color='purple')\n plt.ylabel('Kg')\n plt.xlabel('Dates')\n plt.title('Kg/Date customised')\n #plt.xticks(rotation=45)\n plt.legend(['kg/date'])\n plt.grid(show_grid)\n plt.gcf().autofmt_xdate(rotation=45)\n plt.show()","sub_path":"calBmi/doc_BMI19/convert_kg.py","file_name":"convert_kg.py","file_ext":"py","file_size_in_byte":4101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"607010905","text":"# import sys\n# sys.stdin = open('input.txt')\n\nT = int(input())\n# 모든 경우의 수에 대해 완전탐색\n# 각 색깔에 해당하는 행의 개수가 몇 개인지 입력\ndef color_sum(w,b,r):\n idx = 0\n cnt = 0\n #각 개수만큼 색을 변환하며 바꾸는데 필요한 색의 개수를 저장\n while idx < w:\n cnt += len([x for x in arr[idx] if x != 'W'])\n idx+=1\n while idx < w+b:\n cnt += len([x for x in arr[idx] if x != 'B'])\n idx += 1\n while idx < n:\n cnt += len([x for x in arr[idx] if x != 'R'])\n idx += 1\n return cnt\n\nfor tc in range(1, T+1):\n n, m = map(int, input().split())\n arr = [list(input()) for _ in range(n)]\n result = 0\n min_v = float('inf')\n # 각 색깔의 개수에 대한 모든 경우의 수를 탐색\n for i in range(1, n-1):\n for j in range(1, n-i):\n result = color_sum(i, j, n-j-i)\n if result < min_v:\n min_v = result\n print('#{} {}'.format(tc, min_v))\n\n","sub_path":"Algorithm/swea/[4613] 러시아 국기 같은 깃발.py","file_name":"[4613] 러시아 국기 같은 깃발.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"79121122","text":"\"\"\"Command-line parsing library\n\nThis module is an extension of argparse parsing library that utilizes a\nclass-oriented approach of declaring command-line interfaces.\n\nThe following is a simple usage example that sums integers from the\ncommand-line and prints the result.\n\n class Sum(flagparse.Command):\n\n name = \"sum\"\n description = \"sum the integers at the command line\"\n\n arguments = [\n ([\"integers\"],\n dict(metavar=\"INT\",\n type=int,\n nargs=\"+\",\n help=\"integers to be summed\")),\n ]\n\n def handle(self, args: flagparse.Namespace) -> None:\n if len(args.integers) < 2:\n raise flagparse.ExitError(\"at least 2 integers expected\")\n print(sum(args.integers))\n\n Sum().parse()\n\n\nThe module contains the following public classes:\n\n - Command -- The main entry point for parsing arguments of the program. It\n can specify either a self-sufficient command or define a\n hierarchy of sub-commands.\n\n - SubCommand -- The sub-command for building complex nested command-line\n interfaces.\n\n - ExitError -- The exception to specify what error should be returned\n by the program. It is a placeholder for exit code and\n optional message that will be shown before the exit.\n\"\"\"\n\n__version__ = \"0.0.3\"\n\n\nimport argparse\nimport io\nimport sys\nimport traceback\n\nfrom typing import Any, Sequence, Optional, Union\n\n\nclass Namespace(argparse.Namespace):\n \"\"\"Namespace to hold parsed options as object attributes.\"\"\"\n pass\n\n\nclass ExitError(Exception):\n \"\"\"Exception used to specify flag handling error.\n\n Attributes:\n code -- process exit code\n message -- optional message printed before exit\n \"\"\"\n\n def __init__(self, code: int = 1, message: str = \"\"):\n self.code = code\n self.message = message\n\n super().__init__(f\"{self.code}, message={self.message}\")\n\n\nclass _Formatter(argparse.ArgumentDefaultsHelpFormatter):\n \"\"\"Formatter to format arguments with default values for wide screens.\n\n Print the usage of commands to 140 character-wide terminals.\n \"\"\"\n\n def __init__(self,\n prog,\n indent_increment=2,\n max_help_position=48,\n width=140):\n super().__init__(prog, indent_increment, max_help_position, width)\n\n\nclass _Attr:\n\n def __init__(self, name: str, required: Union[bool] = False):\n self.name = name\n self.required = bool(required)\n\n\nclass _Meta(Namespace):\n \"\"\"Holds meta information about command or sub-command.\"\"\"\n\n def __init__(self, obj: Any, attributes: Sequence[_Attr]):\n attrs = {}\n for attr in attributes:\n attr_val = getattr(obj, attr.name, None)\n if attr.required and attr_val is None:\n raise ValueError(f\"required attribute {attr.name} is missing\")\n attrs[attr.name] = attr_val\n super().__init__(**attrs)\n\n\nclass SubCommand:\n \"\"\"Parser for sub-commands of the parent command.\n\n Attributes:\n subparsers -- a list of argument sub-parsers\n \"\"\"\n\n __attributes__ = [\n _Attr(\"name\", required=True),\n _Attr(\"aliases\"),\n _Attr(\"arguments\"),\n _Attr(\"help\"),\n _Attr(\"description\"),\n _Attr(\"subcommands\"),\n ]\n\n def __init__(self, subparsers):\n # Copy all meta parameters of the command into the __meta__\n # dictionary, so it can be accessible during the setup of commands.\n self.__meta__ = _Meta(self, self.__attributes__)\n self.subcommands = []\n\n self.subparser = subparsers.add_parser(\n name=self.__meta__.name,\n help=self.__meta__.help,\n aliases=(self.__meta__.aliases or []),\n description=self.__meta__.description,\n formatter_class=_Formatter)\n\n for args, kwargs in self.__meta__.arguments or []:\n self.subparser.add_argument(*args, **kwargs)\n\n # At least print the help message for the command.\n self.subparser.set_defaults(func=self.handle)\n\n if self.__meta__.subcommands:\n subparsers = self.subparser.add_subparsers(dest=self.__meta__.name)\n self.subcommands = [c(subparsers) for c in\n self.__meta__.subcommands]\n\n def handle(self, args: Namespace):\n self.subparser.print_help()\n\n\nclass Command:\n \"\"\"Parser for parsing command line strings and handling commands.\n\n Arguments:\n subcommands -- optional list of sub-commands\n \"\"\"\n\n __attributes__ = [\n _Attr(\"name\", required=True),\n _Attr(\"arguments\"),\n _Attr(\"description\"),\n ]\n\n def __init__(self,\n subcommands: Optional[Sequence[SubCommand]] = None):\n self.__meta__ = _Meta(self, self.__attributes__)\n\n self.parser = argparse.ArgumentParser(\n formatter_class=_Formatter,\n prog=self.__meta__.name,\n description=self.__meta__.description,\n )\n\n self.parser.set_defaults(func=self.handle)\n\n for args, kwargs in self.__meta__.arguments or []:\n self.parser.add_argument(*args, **kwargs)\n\n if subcommands:\n subparsers = self.parser.add_subparsers()\n self.subcommands = [c(subparsers) for c in subcommands]\n\n def handle(self, args: Namespace) -> None:\n self.parser.print_help()\n\n def parse(self,\n args: Optional[Sequence[str]] = None,\n trace: bool = False,\n errlog: io.TextIOBase = sys.stderr):\n try:\n try:\n args = self.parser.parse_args(args)\n return args.func(args=Namespace(**args.__dict__))\n except Exception as e:\n if trace:\n traceback.print_exc()\n raise e\n except ExitError as e:\n if e.message:\n errlog.write(f\"{e.message}\\n\")\n errlog.flush()\n sys.exit(e.code)\n except Exception as e:\n errlog.write(f\"{e}\\n\")\n errlog.flush()\n sys.exit(1)\n finally:\n sys.exit(0)\n","sub_path":"flagparse.py","file_name":"flagparse.py","file_ext":"py","file_size_in_byte":6279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"86199063","text":"class Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n\n # 排序后使用字典判断\n from collections import defaultdict\n dict_ = defaultdict(list)\n\n for s in strs:\n dict_[tuple(sorted(s))].append(s)\n return list(dict_.values())","sub_path":"Week_02/G20200343030412/LeetCode_49_412.py","file_name":"LeetCode_49_412.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"586255632","text":"UNEXCEPTABLE_PART_OF_SPEECH = ['CC', 'CD', 'DT', 'EX', 'FW', 'IN', 'LS', 'MD', \\\n 'PRP', 'PRP$', 'TO', 'WDT', 'WP', 'WP$', 'WRB']\n\nVERB_SUFFIX = ['ing', 'ed', 's']\nVERB_TAGS = ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']\n\nIRREGULAR_COMPARATIVE_ADJECTIVES = {\n 'worse' : 'bad',\n 'farther': 'far',\n 'better': 'good',\n 'later': 'late',\n 'less': 'little',\n 'more': 'many'\n}\nIRREGULAR_SUPERLATIVE_ADJECTIVES = {\n 'worst' : 'bad',\n 'farthest': 'far',\n 'best': 'good',\n 'last': 'late',\n 'latest': 'late',\n 'least': 'little',\n 'most': 'many'\n}\n\nSTANDARD_DEVIATION_MULTIPLIER = 22","sub_path":"core/constants/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"266494154","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 8 10:10:27 2021\n\n@author: brian\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport pdb\ndef groupby_concat_mean(df,gbcol,datcol):\n '''\n Take grouped mean of column of arrays in pandas dataframe.\n \n I.e. if a column has many array objects of the same length, use df.groupby()\n and apply np.mean(axis=0) to np.vstack() of grouped rows\n\n Parameters\n ----------\n df : pandas.DataFrame\n must contain gbcol and datcol columns\n gbcol : String\n Column of df to groupby\n\n datcol : String\n Column of arrays of same length to be concatenated vertically:\n \n\n \n \n Returns\n -------\n pandas.DataFrame\n Grouped rows w/ concatenated means of arrays.\n \n for example: df, gbcol = 'anid', datcol = 'speed'\n df=\n 'anid' 'speed'\n 'AB1' [0,1,1]\n 'AB1' [0,2,1]\n \n returns:\n df = 'anid' 'speed'\n 'AB1' [0, 1.5, 1]\n\n '''\n temp=df.groupby(by=gbcol).apply(lambda x: np.mean(np.vstack(x.loc[:,datcol]),axis=0))\n temp0=df.groupby(by=gbcol).mean()\n temp0.loc[:,datcol]=temp.values\n return temp0.reset_index()\n\ndef consolidate_columns_to_labels(df,label_columns,\n value_column_name='value',\n label_column_name='label'):\n ''' Take a dataframe with several columns and convert those column names\n to label values, and put values in same row of value column.\n Update: this seems to be the same as pd.melt() !!!!\n \n e.g. if df = 'a' 'b' 'c' 'id'\n 1 2 3 'apple'\n 3 4 5 'dog'\n \n return 'label' 'value' 'id'\n a 1 'apple'\n b 2 'apple'\n c 3 'apple'\n a 3 'dog'\n b 4 'dog'\n c 5 'dog'\n \n '''\n \n #Initialize the new dataframe in format desired:\n keep_columns = [col for col in df.columns if (col not in label_columns)]\n output_df={value_column_name : [],\n label_column_name : [],\n }\n for col in keep_columns:\n output_df[col]=[]\n \n \n #Turn dataframe into a column of labels and values, w/ chunks ID'd by index:\n unwrapped =df.stack().reset_index(level=1, name='val')\n ind = np.unique(df.index)\n \n for i in ind:\n #For each chunk of rows in unwrapped data frame \n chunk = unwrapped.loc[i].reset_index(drop=True)\n labels = chunk.loc[:,'level_1']\n \n #Keep label/value pairs as is, but add in\n for ii,lab in enumerate(labels):\n if lab in label_columns:\n output_df[value_column_name].append(chunk.loc[ii,'val'])\n output_df[label_column_name].append(lab)\n else:\n for j in range(0,len(label_columns)):\n output_df[lab].append(chunk.iloc[ii,1])\n return pd.DataFrame(output_df)\n\n\ndef df_melt_stack(dfs,df_conds,label_columns, var_name, value_name, \n static_columns, sort_column=None):\n ''' Take 2 dataframes of different conditions, melt columns and concatenate.\n Input: \n dfs = list of pandas.DataFrames with identical column variables,\n df_conds = list of string labels describing condition of data frames ['control','experimental']\n label_columns = list of column names to unpivot (pd.melt(value_vars))\n static_columns = list of column names to fill when unpivotted (pd.melt(id_vars))\n sort_column (optional) = string. name of column to use for sorting first if desired.\n \n Output:\n df_out= pandas dataframe with melted,stacked data.\n \n e.g. \n \n if dfs[0] = 'a' 'b' 'c' 'id'\n 1 2 3 'apple'\n 3 4 5 'dog'\n \n dfs[1] = 'a' 'b' 'c' 'id'\n 0 0 0 'apple'\n 1 3 2 'dog'\n \n with df_conds=['saline','cno']\n static_columns=['id']\n \n return:\n 'var_name' 'value_name' 'id' 'cond'\n a 1 'apple' 'saline'\n b 2 'apple' 'saline'\n c 3 'apple' 'saline'\n a 3 'dog' 'saline'\n b 4 'dog' 'saline'\n c 5 'dog' 'saline'\n a 0 'apple' 'cno'\n b 0 'apple' 'cno'\n c 0 'apple' 'cno'\n a 0 'dog' 'cno'\n b 3 'dog' 'cno'\n c 2 'dog' 'cno'\n \n \n '''\n static_columns += ['cond']\n df_out = pd.DataFrame()\n for cond_label,df in zip(df_conds,dfs):\n df['cond']=cond_label\n # pdb.set_trace()\n df_out= pd.concat((df_out,\n pd.melt(df,\n value_vars = label_columns,\n id_vars = static_columns, \n value_name=value_name,\n var_name=var_name)\n ))\n return df_out","sub_path":"table_wrappers.py","file_name":"table_wrappers.py","file_ext":"py","file_size_in_byte":5620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"325932243","text":"import numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt \n\ndataset = pd.read_csv(\"data.csv\", index_col = 0)\nx = np.array(dataset['x'])\ny = np.array(dataset['y'])\n\ndef costFunction(m, t0, t1, x, y):\n\treturn 1/(2*m) * sum([(t0 + t1* np.asarray([x[i]]) - y[i])**2 for i in range(m)])\n\n\ndef gd(lr, x, y, iterations):\n\t#initialize theta\n\ttheeta0 = 0\n\ttheeta1 = 0\n\n\t#number of examples\n\tm = x.shape[0]\n\tdecay_rate = 0.99\n\t#total error\n\tJ = costFunction(m, theeta0, theeta1, x, y)\n\tcache0, cache1, eps = 0,0,0.000001\n\tm0,m1,v0,v1 = 0,0,0,0 \n\tbeta1 = 0.99\n\tbeta2 = 0.9999\n\tloss = np.empty(iterations)\n\tcount = [i for i in range(1, iterations+1)]\n\n\tfor it in range(1, iterations+1):\n\t\tgrad0 = 1/m * sum([(theeta0 + theeta1*np.asarray([x[i]]) - y[i]) for i in range(m)]) \n\t\tgrad1 = 1/m * sum([(theeta0 + theeta1*np.asarray([x[i]]) - y[i])*np.asarray([x[i]]) for i in range(m)])\n\t\t\n\t\tm0 = beta1*m0 + (1-beta1)*grad0\n\t\tmtheeta0 = m0 / (1-beta1**it)\n\t\tv0 = beta2*v0 + (1-beta2)*(grad0**2)\n\t\tvtheeta0 = v0 / (1-beta2**it)\n\t\t\n\t\tm1 = beta1*m1 + (1-beta1)*grad1\n\t\tmtheeta1 = m1 / (1-beta1**it)\n\t\tv1 = beta2*v1 + (1-beta2)*(grad1**2)\n\t\tvtheeta1 = v1 / (1-beta2**it)\n\n\t\ttheeta0 = theeta0 - (lr * mtheeta0)/(np.sqrt(vtheeta0 + eps))\n\t\ttheeta1 = theeta1 - (lr * mtheeta1)/(np.sqrt(vtheeta1 + eps))\n\t\tloss[it-1] = costFunction(m, theeta0, theeta1, x, y)\n\n\treturn count, loss, theeta0, theeta1\n\nmax_iter = 10000\nalpha = 1\ncount, loss, theta0, theta1 = gd(alpha, x, y, max_iter)\n\nprint('theta0 = ' + str(theta0))\nprint('theta1 = ' + str(theta1))\n\nplt.figure(0)\nplt.scatter(x, y, c = 'red')\nplt.plot(x, theta0 + theta1 * x)\nplt.xlabel('x')\nplt.ylabel('y')\nplt.title('Dataset')\nplt.savefig('output.png')\nplt.show()\n\nplt.figure(1)\nplt.plot(count, loss)\nplt.xlabel('iteration')\nplt.ylabel('Loss')\nplt.title('Loss History')\nplt.savefig('loss.png')\nplt.show()\n","sub_path":"adam.py","file_name":"adam.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"229883278","text":"import os\nimport discord\n\nclient = discord.Client()\n\n\n@client.event\nasync def on_ready():\n print('Connected to endpoint')\n print('Logged in as {0.user}'.format(client))\n print('Hello smyhk. How about a nice game of chess?')\n\n\n@client.event\nasync def on_message(message):\n\n # we do not want the bot to reply to itself\n if message.author == client.user:\n return\n elif message.content == \"cookie\":\n await message.channel.send(\":cookie:\")\n elif message.content.upper().startswith(\"!PING\"):\n msg = \"{0.author.mention} Pong!\".format(message)\n await message.channel.send(msg)\n elif message.content.upper().startswith(\"!SAY\"):\n # only user with this id can execute the command\n if message.author.id == 351094569309306891:\n args = message.content.split(\" \")\n msg = \" \".join(args[1:]).format(message)\n await message.channel.send(msg)\n else:\n await message.channel.send(\"Ah ah ah, you didn't say the magic word!\")\n elif message.content.upper().startswith(\"!AMIADMIN\"):\n # user users with this role id can execute the command\n if 405241489292263424 in [role.id for role in message.author.roles]:\n await message.channel.send(\"You are an admin\")\n else:\n await message.channel.send(\"You are not an admin\")\n\n\n# Load the bot token from an environment variable\nclient.run(os.environ.get('BOT_TOKEN'))\n","sub_path":"pycord.py","file_name":"pycord.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"402513009","text":"\n# Name:\n#\n# UID:\n#\n# People I interacted with:\n#\n# Resources I used:\n#\n\n\nimport math\n\nfrom itertools import izip, repeat\n\n# PROBLEM 1\n\n# parse the file named fname into a dictionary of the form\n# {'width': int, 'height' : int, 'max' : int, 'pixels' : (int * int * int) list}\ndef parsePPM(fname):\n with open(fname, \"rb\") as ppm:\n ppm.readline()\n width, height = map(int, ppm.readline().strip().split(' '))\n maxcolor = int(ppm.readline().strip())\n return {'width': width,\n 'max': maxcolor,\n 'pixels': list(izip(*[iter(map(ord, ppm.read()))]*3)),\n 'height': height}\n\n# a test to make sure you have the right format for your dictionaries\ndef testParsePPM():\n return parsePPM(\"example.ppm\") == {'width': 2, 'max': 255, 'pixels': [(10, 23, 52), (82, 3, 215), (30, 181, 101), (33, 45, 205), (40, 68, 92), (111, 76, 1)], 'height': 3}\n\n# write the given ppm dictionary as a PPM image file named fname\n# the function should not return anything\ndef unparsePPM(ppm, fname):\n with open(fname, \"wb\") as out:\n out.writelines(['P6\\n',\n '%s %s\\n' % (ppm['width'], ppm['height']),\n '%s\\n' % ppm['max']])\n [out.write('%s%s%s' % (chr(r),chr(g),chr(b))) for (r,g,b) in ppm['pixels']]\n\n\n# PROBLEM 2\ndef negate(ppm):\n return {'width': ppm['width'],\n 'max': ppm['max'],\n 'pixels': [(ppm['max']-r, ppm['max']-g, ppm['max']-b) for (r,g,b) in ppm['pixels']],\n 'height': ppm['height']}\n\n\n# PROBLEM 3\ndef mirrorImage(ppm):\n return {'width': ppm['width'],\n 'max': ppm['max'],\n 'pixels': sum([t[::-1] for t in izip(*[iter(ppm['pixels'])]*ppm['width'])], ()),\n 'height': ppm['height']}\n\n\n# PROBLEM 4\n\n# produce a greyscale version of the given ppm dictionary.\n# the resulting dictionary should have the same format,\n# except it will only have a single value for each pixel,\n# rather than an RGB triple.\ndef greyscale(ppm):\n return {'width': ppm['width'],\n 'max': ppm['max'],\n 'pixels': [int(round(0.299 * r + 0.587 * g + 0.114 * b)) for (r,g,b) in ppm['pixels']],\n 'height': ppm['height']}\n\n\n# take a dictionary produced by the greyscale function and write it as a PGM image file named fname\n# the function should not return anything\ndef unparsePGM(pgm, fname):\n with open(fname, \"wb\") as out:\n out.writelines(['P5\\n',\n '%s %s\\n' % (pgm['width'], pgm['height']),\n '%s\\n' % pgm['max']])\n out.write(''.join(map(chr, pgm['pixels'])))\n\n\n# PROBLEM 5\n\n# gaussian blur code adapted from:\n# http://stackoverflow.com/questions/8204645/implementing-gaussian-blur-how-to-calculate-convolution-matrix-kernel\ndef gaussian(x, mu, sigma):\n return math.exp( -(((x-mu)/(sigma))**2)/2.0 )\n\ndef gaussianFilter(radius, sigma):\n # compute the actual kernel elements\n hkernel = [gaussian(x, radius, sigma) for x in range(2*radius+1)]\n vkernel = [x for x in hkernel]\n kernel2d = [[xh*xv for xh in hkernel] for xv in vkernel]\n\n # normalize the kernel elements\n kernelsum = sum([sum(row) for row in kernel2d])\n kernel2d = [[x/kernelsum for x in row] for row in kernel2d]\n return kernel2d\n\ndef gaussianApply(filt, wind):\n return tuple(map(sum, zip(*[tuple(map(sum, zip(*[(gauss*r, gauss*g, gauss*b) for (gauss,(r,g,b)) in zip(r1,r2)])))\n for (r1,r2) in zip(filt, wind)])))\n\n# blur a given ppm dictionary, returning a new dictionary\n# the blurring uses a gaussian filter produced by the above function\ndef gaussianBlur(ppm, radius, sigma):\n # obtain the filter\n gfilter = gaussianFilter(radius, sigma)\n\n grid = [list(t) for t in list(izip(*[iter(ppm['pixels'])]*ppm['width']))]\n xgrid = list(repeat(grid[0], radius-1)) + grid + list(repeat(grid[-1], radius-1))\n xgrid = map(list, zip(*xgrid))\n xgrid = list(repeat(xgrid[0], radius-1)) + xgrid + list(repeat(xgrid[-1], radius-1))\n xgrid = map(list, zip(*xgrid))\n\n return {'width': ppm['width'],\n 'max': ppm['max'],\n 'pixels': [(int(round(r)), int(round(g)), int(round(b))) for (r,g,b)\n in sum([[gaussianApply(gfilter, [row[j:j+(2*radius)+1] for row in xgrid[i:i+(2*radius)+1]])\n for (j,_) in enumerate(r)]\n for i,r in enumerate(grid)], [])],\n 'height': ppm['height']}\n","sub_path":"HW Solutions/HW 4 - Nov 13/hw4.py","file_name":"hw4.py","file_ext":"py","file_size_in_byte":4473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"553996363","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport shutil\nimport logging\nimport importlib\n\nimport django\nfrom django.core.management import call_command, find_commands, load_command_class\nfrom django.test import TestCase\nfrom django.utils.six import StringIO, PY3\n\nfrom django_extensions.management.modelviz import use_model, generate_graph_data\nfrom . import force_color_support\n\n\nclass MockLoggingHandler(logging.Handler):\n \"\"\" Mock logging handler to check for expected logs. \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.reset()\n logging.Handler.__init__(self, *args, **kwargs)\n\n def emit(self, record):\n self.messages[record.levelname.lower()].append(record.getMessage())\n\n def reset(self):\n self.messages = {\n 'debug': [],\n 'info': [],\n 'warning': [],\n 'error': [],\n 'critical': [],\n }\n\n\nclass CommandTest(TestCase):\n def test_error_logging(self):\n # Ensure command errors are properly logged and reraised\n from django_extensions.management.base import logger\n logger.addHandler(MockLoggingHandler())\n module_path = \"tests.management.commands.error_raising_command\"\n module = importlib.import_module(module_path)\n error_raising_command = module.Command()\n self.assertRaises(Exception, error_raising_command.execute)\n handler = logger.handlers[0]\n self.assertEqual(len(handler.messages['error']), 1)\n\n\nclass ShowTemplateTagsTests(TestCase):\n def test_some_output(self):\n out = StringIO()\n call_command('show_template_tags', stdout=out)\n output = out.getvalue()\n # Once django_extension is installed during tests it should appear with\n # its templatetags\n self.assertIn('django_extensions', output)\n # let's check at least one\n self.assertIn('truncate_letters', output)\n\n\nclass CreateAppTests(TestCase):\n def test_command(self):\n if django.VERSION[:2] >= (1, 10):\n return\n\n tmpname = \"testapptest\"\n # TODO better temp dir handling\n tmpdir = \"/tmp\"\n tmppath = os.path.join(tmpdir, tmpname)\n self.assertFalse(os.path.isdir(tmppath))\n\n out = StringIO()\n try:\n call_command('create_app', tmpname, parent_path=tmpdir, stdout=out)\n finally:\n if os.path.isdir(tmppath):\n shutil.rmtree(tmppath)\n\n output = out.getvalue()\n self.assertIn(\"Application '%s' created.\" % tmpname, output)\n\n\nclass AdminGeneratorTests(TestCase):\n def test_command(self):\n out = StringIO()\n call_command('admin_generator', 'django_extensions', stdout=out)\n output = out.getvalue()\n self.assertIn(\"class SecretAdmin(admin.ModelAdmin):\", output)\n if PY3:\n self.assertIn(\"list_display = ('id', 'name', 'text')\", output)\n self.assertIn(\"search_fields = ('name',)\", output)\n else:\n self.assertIn(\"list_display = (u'id', u'name', u'text')\", output)\n self.assertIn(\"search_fields = (u'name',)\", output)\n\n\nclass DescribeFormTests(TestCase):\n def test_command(self):\n out = StringIO()\n call_command('describe_form', 'django_extensions.Secret', stdout=out)\n output = out.getvalue()\n self.assertIn(\"class SecretForm(forms.Form):\", output)\n self.assertRegexpMatches(output, \"name = forms.CharField\\(.*max_length=255\")\n self.assertRegexpMatches(output, \"name = forms.CharField\\(.*required=False\")\n self.assertRegexpMatches(output, \"name = forms.CharField\\(.*label=u?'Name'\")\n self.assertRegexpMatches(output, \"text = forms.CharField\\(.*required=False\")\n self.assertRegexpMatches(output, \"text = forms.CharField\\(.*label=u?'Text'\")\n\n\nclass UpdatePermissionsTests(TestCase):\n def test_works(self):\n from django.db import models\n\n class PermModel(models.Model):\n class Meta:\n app_label = 'django_extensions'\n permissions = (('test_permission', 'test_permission'),)\n\n original_stdout = sys.stdout\n out = sys.stdout = StringIO()\n call_command('update_permissions', stdout=out, verbosity=3)\n sys.stdout = original_stdout\n self.assertIn(\"Can change perm model\", out.getvalue())\n\n\nclass CommandSignalTests(TestCase):\n pre = None\n post = None\n\n def test_works(self):\n from django_extensions.management.signals import post_command, \\\n pre_command\n from django_extensions.management.commands.show_template_tags import \\\n Command\n\n def pre(sender, **kwargs):\n CommandSignalTests.pre = dict(**kwargs)\n\n def post(sender, **kwargs):\n CommandSignalTests.post = dict(**kwargs)\n\n pre_command.connect(pre, Command)\n post_command.connect(post, Command)\n\n out = StringIO()\n call_command('show_template_tags', stdout=out)\n\n self.assertIn('args', CommandSignalTests.pre)\n self.assertIn('kwargs', CommandSignalTests.pre)\n\n self.assertIn('args', CommandSignalTests.post)\n self.assertIn('kwargs', CommandSignalTests.post)\n self.assertIn('outcome', CommandSignalTests.post)\n\n\nclass CommandClassTests(TestCase):\n def setUp(self):\n management_dir = os.path.join('django_extensions', 'management')\n self.commands = find_commands(management_dir)\n\n def test_load_commands(self):\n \"\"\"Try to load every management command to catch exceptions.\"\"\"\n try:\n for command in self.commands:\n load_command_class('django_extensions', command)\n except Exception as e:\n self.fail(\"Can't load command class of {0}\\n{1}\".format(command, e))\n\n\nclass GraphModelsTests(TestCase):\n \"\"\"\n Tests for the `graph_models` management command.\n \"\"\"\n def test_use_model(self):\n include_models = [\n 'NoWildcardInclude',\n 'Wildcard*InsideInclude',\n '*WildcardPrefixInclude',\n 'WildcardSuffixInclude*',\n '*WildcardBothInclude*'\n ]\n exclude_models = [\n 'NoWildcardExclude',\n 'Wildcard*InsideExclude',\n '*WildcardPrefixExclude',\n 'WildcardSuffixExclude*',\n '*WildcardBothExclude*'\n ]\n # Any model name should be used if neither include or exclude\n # are defined.\n self.assertTrue(use_model(\n 'SomeModel',\n None,\n None\n ))\n # Any model name should be allowed if `*` is in `include_models`.\n self.assertTrue(use_model(\n 'SomeModel',\n ['OtherModel', '*', 'Wildcard*Model'],\n None\n ))\n # No model name should be allowed if `*` is in `exclude_models`.\n self.assertFalse(use_model(\n 'SomeModel',\n None,\n ['OtherModel', '*', 'Wildcard*Model']\n ))\n # Some tests with the `include_models` defined above.\n self.assertFalse(use_model(\n 'SomeModel',\n include_models,\n None\n ))\n self.assertTrue(use_model(\n 'NoWildcardInclude',\n include_models,\n None\n ))\n self.assertTrue(use_model(\n 'WildcardSomewhereInsideInclude',\n include_models,\n None\n ))\n self.assertTrue(use_model(\n 'MyWildcardPrefixInclude',\n include_models,\n None\n ))\n self.assertTrue(use_model(\n 'WildcardSuffixIncludeModel',\n include_models,\n None\n ))\n self.assertTrue(use_model(\n 'MyWildcardBothIncludeModel',\n include_models,\n None\n ))\n # Some tests with the `exclude_models` defined above.\n self.assertTrue(use_model(\n 'SomeModel',\n None,\n exclude_models\n ))\n self.assertFalse(use_model(\n 'NoWildcardExclude',\n None,\n exclude_models\n ))\n self.assertFalse(use_model(\n 'WildcardSomewhereInsideExclude',\n None,\n exclude_models\n ))\n self.assertFalse(use_model(\n 'MyWildcardPrefixExclude',\n None,\n exclude_models\n ))\n self.assertFalse(use_model(\n 'WildcardSuffixExcludeModel',\n None,\n exclude_models\n ))\n self.assertFalse(use_model(\n 'MyWildcardBothExcludeModel',\n None,\n exclude_models\n ))\n\n def test_no_models_dot_py(self):\n data = generate_graph_data(['testapp_with_no_models_file'])\n self.assertEqual(len(data['graphs']), 1)\n\n model_name = data['graphs'][0]['models'][0]['name']\n self.assertEqual(model_name, 'TeslaCar')\n\n\nclass ShowUrlsTests(TestCase):\n \"\"\"\n Tests for the `show_urls` management command.\n \"\"\"\n def test_color(self):\n with force_color_support:\n out = StringIO()\n call_command('show_urls', stdout=out)\n self.output = out.getvalue()\n self.assertIn('\\x1b', self.output)\n\n def test_no_color(self):\n with force_color_support:\n out = StringIO()\n call_command('show_urls', '--no-color', stdout=out)\n self.output = out.getvalue()\n self.assertNotIn('\\x1b', self.output)\n","sub_path":"tests/test_management_command.py","file_name":"test_management_command.py","file_ext":"py","file_size_in_byte":9495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"89403856","text":"from __future__ import print_function, division\nimport os, sys\nproject_rootdir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\nsys.path.insert(0, project_rootdir)\nsys.path.append('core')\n\nimport argparse\nimport os\nimport cv2\nimport time\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport time\n\nfrom torch.utils.data import DataLoader\nfrom exp_pose_mdepth_kitti_eigen.dataset_kitti_eigen import KITTI_eigen\nfrom mDnet import MDepthNet\n\nfrom torch.utils.tensorboard import SummaryWriter\nimport torch.utils.data as data\nfrom PIL import Image, ImageDraw\nfrom core.utils.flow_viz import flow_to_image\nfrom core.utils.utils import InputPadder, forward_interpolate, tensor2disp, tensor2rgb, vls_ins\nfrom posenet import Posenet\nimport torch.multiprocessing as mp\nimport torch.distributed as dist\nfrom torch.autograd import Variable\n\nfrom tqdm import tqdm\n\ntry:\n from torch.cuda.amp import GradScaler\nexcept:\n # dummy GradScaler for PyTorch < 1.6\n class GradScaler:\n def __init__(self):\n pass\n\n def scale(self, loss):\n return loss\n\n def unscale_(self, optimizer):\n pass\n\n def step(self, optimizer):\n optimizer.step()\n\n def update(self):\n pass\n\n# exclude extremly large displacements\nMAX_FLOW = 400\nSUM_FREQ = 100\nVAL_FREQ = 5000\n\nclass SSIM(nn.Module):\n \"\"\"Layer to compute the SSIM loss between a pair of images\n \"\"\"\n def __init__(self):\n super(SSIM, self).__init__()\n self.mu_x_pool = nn.AvgPool2d(3, 1)\n self.mu_y_pool = nn.AvgPool2d(3, 1)\n self.sig_x_pool = nn.AvgPool2d(3, 1)\n self.sig_y_pool = nn.AvgPool2d(3, 1)\n self.sig_xy_pool = nn.AvgPool2d(3, 1)\n\n self.refl = nn.ReflectionPad2d(1)\n\n self.C1 = 0.01 ** 2\n self.C2 = 0.03 ** 2\n\n def forward(self, x, y):\n x = self.refl(x)\n y = self.refl(y)\n\n mu_x = self.mu_x_pool(x)\n mu_y = self.mu_y_pool(y)\n\n sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2\n sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2\n sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y\n\n SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2)\n SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y + self.C2)\n\n return torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1)\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\ndef fetch_optimizer(args, model):\n \"\"\" Create the optimizer and learning rate scheduler \"\"\"\n optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon)\n\n scheduler = optim.lr_scheduler.OneCycleLR(optimizer, args.lr, args.num_steps + 100,\n pct_start=0.05, cycle_momentum=False, anneal_strategy='linear')\n\n return optimizer, scheduler\n\nclass Logger:\n def __init__(self, logpath):\n self.logpath = logpath\n self.writer = None\n\n def create_summarywriter(self):\n if self.writer is None:\n self.writer = SummaryWriter(self.logpath)\n\n def write_vls(self, data_blob, outputs, flowselector, reprojselector, step):\n img1 = data_blob['img1'][0].permute([1, 2, 0]).numpy().astype(np.uint8)\n img2 = data_blob['img2'][0].permute([1, 2, 0]).numpy().astype(np.uint8)\n insmap = data_blob['insmap'][0].squeeze().numpy()\n\n figmask_flow = tensor2disp(flowselector, vmax=1, viewind=0)\n figmask_reprojection = tensor2disp(reprojselector, vmax=1, viewind=0)\n insvls = vls_ins(img1, insmap)\n\n depthpredvls = tensor2disp(1 / outputs[('mDepth', 0)], vmax=0.15, viewind=0)\n depthgtvls = tensor2disp(1 / data_blob['depthmap'], vmax=0.15, viewind=0)\n flowvls = flow_to_image(outputs[('flowpred', 0)][0].detach().cpu().permute([1, 2, 0]).numpy(), rad_max=10)\n imgrecon = tensor2rgb(outputs[('reconImg', 0)], ind=0)\n\n img_val_up = np.concatenate([np.array(insvls), np.array(img2)], axis=1)\n img_val_mid1 = np.concatenate([np.array(figmask_flow), np.array(figmask_reprojection)], axis=1)\n img_val_mid2 = np.concatenate([np.array(depthpredvls), np.array(depthgtvls)], axis=1)\n img_val_mid3 = np.concatenate([np.array(imgrecon), np.array(flowvls)], axis=1)\n img_val = np.concatenate([np.array(img_val_up), np.array(img_val_mid1), np.array(img_val_mid2), np.array(img_val_mid3)], axis=0)\n self.writer.add_image('predvls', (torch.from_numpy(img_val).float() / 255).permute([2, 0, 1]), step)\n\n X = self.vls_sampling(img1, img2, data_blob['depthvls'], outputs)\n self.writer.add_image('X', (torch.from_numpy(X).float() / 255).permute([2, 0, 1]), step)\n\n def vls_sampling(self, img1, img2, depthgt, outputs):\n depthgtnp = depthgt[0].squeeze().cpu().numpy()\n\n h, w, _ = img1.shape\n xx, yy = np.meshgrid(range(w), range(h), indexing='xy')\n selector = (depthgtnp > 0)\n\n flowx = outputs[('flowpred', 0)][0, 0].detach().cpu().numpy()\n flowy = outputs[('flowpred', 0)][0, 1].detach().cpu().numpy()\n flowxf = flowx[selector]\n flowyf = flowy[selector]\n\n xxf = xx[selector]\n yyf = yy[selector]\n df = depthgtnp[selector]\n\n cm = plt.get_cmap('magma')\n rndcolor = cm(1 / df / 0.15)[:, 0:3]\n\n fig = plt.figure(figsize=(16, 9))\n canvas = FigureCanvasAgg(fig)\n fig.add_subplot(2, 1, 1)\n plt.scatter(xxf, yyf, 3, rndcolor)\n plt.imshow(img1)\n\n fig.add_subplot(2, 1, 2)\n plt.scatter(xxf + flowxf, yyf + flowyf, 3, rndcolor)\n plt.imshow(img2)\n\n fig.tight_layout() # Or equivalently, \"plt.tight_layout()\"\n canvas.draw()\n buf = canvas.buffer_rgba()\n plt.close()\n X = np.asarray(buf)\n return X\n\n def write_vls_eval(self, data_blob, outputs, tagname, step):\n img1 = data_blob['img1'][0].permute([1, 2, 0]).numpy().astype(np.uint8)\n img2 = data_blob['img2'][0].permute([1, 2, 0]).numpy().astype(np.uint8)\n insmap = data_blob['insmap'][0].squeeze().numpy()\n insvls = vls_ins(img1, insmap)\n\n depthpredvls = tensor2disp(1 / outputs[('mDepth', 0)], vmax=0.15, viewind=0)\n flowvls = flow_to_image(outputs[('flowpred', 0)][0].detach().cpu().permute([1, 2, 0]).numpy(), rad_max=10)\n imgrecon = tensor2rgb(outputs[('reconImg', 0)], ind=0)\n\n img_val_up = np.concatenate([np.array(insvls), np.array(img2)], axis=1)\n img_val_mid2 = np.concatenate([np.array(depthpredvls), np.array(flowvls)], axis=1)\n img_val_mid3 = np.concatenate([np.array(img1), np.array(imgrecon)], axis=1)\n img_val = np.concatenate([np.array(img_val_up), np.array(img_val_mid2), np.array(img_val_mid3)], axis=0)\n self.writer.add_image('{}_predvls'.format(tagname), (torch.from_numpy(img_val).float() / 255).permute([2, 0, 1]), step)\n\n X = self.vls_sampling(img1, img2, data_blob['depthvls'], outputs)\n self.writer.add_image('{}_X'.format(tagname), (torch.from_numpy(X).float() / 255).permute([2, 0, 1]), step)\n\n def write_dict(self, results, step):\n for key in results:\n self.writer.add_scalar(key, results[key], step)\n\n def close(self):\n self.writer.close()\n\n@torch.no_grad()\ndef validate_kitti(model, args, eval_loader, logger, group, total_steps):\n \"\"\" Peform validation using the KITTI-2015 (train) split \"\"\"\n \"\"\" Peform validation using the KITTI-2015 (train) split \"\"\"\n model.eval()\n gpu = args.gpu\n eval_epe = torch.zeros(2).cuda(device=gpu)\n eval_out = torch.zeros(2).cuda(device=gpu)\n\n for val_id, data_blob in enumerate(tqdm(eval_loader)):\n image1 = data_blob['img1'].cuda(gpu) / 255.0\n image2 = data_blob['img2'].cuda(gpu) / 255.0\n intrinsic = data_blob['intrinsic'].cuda(gpu)\n insmap = data_blob['insmap'].cuda(gpu)\n flowgt = data_blob['flowmap'].cuda(gpu)\n\n outputs = model(image1, image2, intrinsic, insmap)\n\n flow_pr = outputs[('flowpred', 0)]\n selector = (((flowgt[:, 0] == 0) * (flowgt[:, 1] == 0)) == 0).float().unsqueeze(1)\n\n epe = torch.sum((flow_pr - flowgt)**2, dim=1, keepdim=True).sqrt()\n mag = (torch.sum(flowgt**2, dim=1, keepdim=True) + 1e-10).sqrt()\n\n out = ((epe > 3.0) * ((epe / (mag + 1e-10)) > 0.05) * selector).float()\n\n eval_out[0] += torch.sum(out)\n eval_out[1] += torch.sum(selector)\n\n eval_epe[0] += torch.sum(torch.sum(epe * selector, dim=[1, 2, 3]) / torch.sum(selector, dim=[1, 2, 3]))\n eval_epe[1] += image1.shape[0]\n\n if not(logger is None) and np.mod(val_id, 20) == 0:\n seq, frmidx = data_blob['tag'][0].split(' ')\n tag = \"{}_{}\".format(seq.split('/')[-1], frmidx)\n logger.write_vls_eval(data_blob, outputs, tag, total_steps)\n\n if args.distributed:\n dist.all_reduce(tensor=eval_out, op=dist.ReduceOp.SUM, group=group)\n dist.all_reduce(tensor=eval_epe, op=dist.ReduceOp.SUM, group=group)\n\n if args.gpu == 0:\n eval_out[0] = eval_out[0] / eval_out[1]\n eval_epe[0] = eval_epe[0] / eval_epe[1]\n\n print(\"in {} eval samples: Out: {:7.3f}, Epe: {:7.3f}\".format(eval_epe[1].item(), eval_out[0].item(), eval_epe[0].item()))\n return {'out': float(eval_out[0].item()), 'epe': float(eval_epe[0].item())}\n else:\n return None\n\ndef read_splits():\n split_root = os.path.join(project_rootdir, 'exp_pose_mdepth_kitti_eigen/splits')\n train_entries = [x.rstrip('\\n') for x in open(os.path.join(split_root, 'train_files.txt'), 'r')]\n evaluation_entries = [x.rstrip('\\n') for x in open(os.path.join(split_root, 'test_files.txt'), 'r')]\n return train_entries, evaluation_entries\n\nclass PoseMDNet(nn.Module):\n def __init__(self, args):\n super(PoseMDNet, self).__init__()\n self.args = args\n self.deptmodel = MDepthNet(num_layers=args.num_layers, args=args)\n self.posemodel = Posenet(num_layers=args.num_layers, args=args)\n self.pts2ddict = dict()\n\n def forward(self, img1, img2, intrinsic, insmap):\n bz, _, h, w = img1.shape\n\n outputs = dict()\n outputs.update(self.deptmodel(img1))\n\n self_ang, self_tdir, self_tscale, obj_pose = self.posemodel(img1, img2)\n selfRT = self.posemodel.get_selfpose(selfang=self_ang, selftdir=self_tdir, selfscale=self_tscale)\n outputs['selfpose'] = selfRT\n\n maxinsnum = insmap.max().item() + 1\n insnum = self.posemodel.eppcompress(insmap, (insmap > -1).float().squeeze(1).unsqueeze(-1).unsqueeze(-1), maxinsnum)\n intrinsic_ex = intrinsic.unsqueeze(1).expand([-1, maxinsnum, -1, -1])\n\n infkey = \"{}_{}_{}\".format(bz, h, w)\n if infkey not in self.pts2ddict.keys():\n xx, yy = np.meshgrid(range(w), range(h), indexing='xy')\n xx = torch.from_numpy(xx).float().unsqueeze(0).expand([bz, -1, -1]).cuda(img1.device)\n yy = torch.from_numpy(yy).float().unsqueeze(0).expand([bz, -1, -1]).cuda(img1.device)\n ones = torch.ones_like(xx)\n self.pts2ddict[infkey] = (xx, yy, ones)\n xx, yy, ones = self.pts2ddict[infkey]\n\n for k in range(4):\n obj_scalep = F.interpolate(obj_pose[('obj_scale', k)], [h, w], mode='bilinear', align_corners=False)\n obj_angp = F.interpolate(obj_pose[('obj_angle', k)], [h, w], mode='bilinear', align_corners=False)\n\n obj_scalep_cps = self.posemodel.eppcompress(insmap, obj_scalep.squeeze(1).unsqueeze(-1).unsqueeze(-1), maxinsnum)\n obj_scalep_cps = obj_scalep_cps / (insnum + 1e-10)\n\n obj_angp_cps = self.posemodel.eppcompress(insmap, obj_angp.squeeze(1).unsqueeze(-1).unsqueeze(-1), maxinsnum)\n obj_angp_cps = obj_angp_cps / (insnum + 1e-10)\n\n predposes = self.posemodel.mvinfo2objpose(obj_angp_cps, obj_scalep_cps, selfRT)\n pM = intrinsic_ex @ predposes @ torch.inverse(intrinsic_ex)\n pMImg = self.posemodel.eppinflate(insmap, pM)\n\n mDepth = F.interpolate(outputs['mDepth', k], [h, w], align_corners=False, mode='bilinear').squeeze(1)\n pts3d = torch.stack([xx * mDepth, yy * mDepth, mDepth, ones], dim=-1).unsqueeze(-1)\n pts2dp = pMImg @ pts3d\n\n pxx, pyy, pzz, _ = torch.split(pts2dp, 1, dim=3)\n\n sign = pzz.sign()\n sign[sign == 0] = 1\n pzz = torch.clamp(torch.abs(pzz), min=1e-20) * sign\n\n pxx = (pxx / pzz).squeeze(-1).squeeze(-1)\n pyy = (pyy / pzz).squeeze(-1).squeeze(-1)\n\n flowx = pxx - xx\n flowy = pyy - yy\n outputs[('flowpred', k)] = torch.stack([flowx, flowy], dim=1)\n\n pxx = (pxx / w - 0.5) * 2\n pyy = (pyy / h - 0.5) * 2\n ptssample = torch.stack([pxx, pyy], dim=-1)\n reconstructedimg = F.grid_sample(img2, ptssample, mode='bilinear', align_corners=False, padding_mode='zeros')\n outputs[('reconImg', k)] = reconstructedimg\n\n return outputs\n\ndef get_depth_loss(depthgt, outputs):\n _, _, h, w = depthgt.shape\n selector = (depthgt > 0).float()\n depthloss = 0\n for k in range(4):\n mDepthpred = F.interpolate(outputs[('mDepth', k)], [h, w], mode='bilinear', align_corners=False)\n depthloss += torch.sum(torch.abs(mDepthpred - depthgt) * selector) / (torch.sum(selector) + 1)\n\n depthloss = depthloss / 4\n return depthloss\n\ndef get_pose_loss(selfposegt, outputs):\n poseloss = torch.mean(torch.abs(outputs['selfpose'] - selfposegt))\n return poseloss\n\ndef get_flow_loss(flowgt, outputs):\n selector = (((flowgt[:, 0] == 0) * (flowgt[:, 1] == 0)) == 0).float().unsqueeze(1)\n flowloss = 0\n for k in range(4):\n flowloss += torch.sum(torch.sum(torch.abs(outputs[('flowpred', k)] - flowgt), dim=1, keepdim=True) * selector) / (torch.sum(selector) + 1)\n flowloss = flowloss / 4\n return flowloss, selector\n\ndef get_reprojection_loss(img1, outputs, ssim):\n reprojloss = 0\n selector = (outputs[('reconImg', 0)].sum(dim=1, keepdim=True) != 0).float()\n for k in range(4):\n ssimloss = ssim(outputs[('reconImg', k)], img1).mean(dim=1, keepdim=True)\n l1_loss = torch.abs(outputs[('reconImg', k)] - img1).mean(dim=1, keepdim=True)\n reprojectionloss = 0.85 * ssimloss + 0.15 * l1_loss\n reprojloss += (reprojectionloss * selector).sum() / (selector.sum() + 1)\n reprojloss = reprojloss / 4\n return reprojloss, selector\n\ndef train(gpu, ngpus_per_node, args):\n print(\"Using GPU %d for training\" % gpu)\n args.gpu = gpu\n\n if args.distributed:\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=ngpus_per_node, rank=args.gpu)\n\n model = PoseMDNet(args=args)\n if args.distributed:\n torch.cuda.set_device(args.gpu)\n args.batch_size = int(args.batch_size / ngpus_per_node)\n model = nn.SyncBatchNorm.convert_sync_batchnorm(module=model)\n model = model.to(f'cuda:{args.gpu}')\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True, output_device=args.gpu)\n else:\n model = torch.nn.DataParallel(model)\n model.cuda()\n\n ssim = SSIM()\n\n logroot = os.path.join(args.logroot, args.name)\n print(\"Parameter Count: %d, saving location: %s\" % (count_parameters(model), logroot))\n\n if args.restore_ckpt is not None:\n print(\"=> loading checkpoint '{}'\".format(args.restore_ckpt))\n loc = 'cuda:{}'.format(args.gpu)\n checkpoint = torch.load(args.restore_ckpt, map_location=loc)\n model.load_state_dict(checkpoint, strict=False)\n\n model.train()\n\n train_entries, evaluation_entries = read_splits()\n\n train_dataset = KITTI_eigen(root=args.dataset_root, inheight=args.inheight, inwidth=args.inwidth, entries=train_entries,\n depth_root=args.depth_root, depthvls_root=args.depthvlsgt_root, ins_root=args.ins_root, istrain=True, muteaug=False)\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None\n train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size, pin_memory=True, num_workers=int(args.num_workers / ngpus_per_node), drop_last=True, sampler=train_sampler)\n\n eval_dataset = KITTI_eigen(root=args.dataset_root, inheight=args.evalheight, inwidth=args.evalwidth, entries=evaluation_entries, depth_root=args.depth_root, depthvls_root=args.depthvlsgt_root, ins_root=args.ins_root, istrain=False)\n eval_sampler = torch.utils.data.distributed.DistributedSampler(eval_dataset) if args.distributed else None\n eval_loader = data.DataLoader(eval_dataset, batch_size=args.batch_size, pin_memory=True, num_workers=3, drop_last=True, sampler=eval_sampler)\n\n print(\"Training splits contain %d images while test splits contain %d images\" % (train_dataset.__len__(), eval_dataset.__len__()))\n\n if args.distributed:\n group = dist.new_group([i for i in range(ngpus_per_node)])\n\n optimizer, scheduler = fetch_optimizer(args, model)\n\n total_steps = 0\n\n if args.gpu == 0:\n logger = Logger(logroot)\n logger_evaluation = Logger(os.path.join(args.logroot, 'evaluation_eigen_background', args.name))\n logger.create_summarywriter()\n logger_evaluation.create_summarywriter()\n\n VAL_FREQ = 5000\n epoch = 0\n maxout = 100\n\n st = time.time()\n should_keep_training = True\n while should_keep_training:\n train_sampler.set_epoch(epoch)\n for i_batch, data_blob in enumerate(train_loader):\n optimizer.zero_grad()\n\n image1 = data_blob['img1'].cuda(gpu) / 255.0\n image2 = data_blob['img2'].cuda(gpu) / 255.0\n rel_pose = data_blob['rel_pose'].cuda(gpu)\n intrinsic = data_blob['intrinsic'].cuda(gpu)\n insmap = data_blob['insmap'].cuda(gpu)\n depthgt = data_blob['depthmap'].cuda(gpu)\n flowgt = data_blob['flowmap'].cuda(gpu)\n\n outputs = model(image1, image2, intrinsic, insmap)\n depthloss = get_depth_loss(depthgt, outputs)\n poseloss = get_pose_loss(rel_pose, outputs)\n flowloss, flowselector = get_flow_loss(flowgt, outputs)\n ssimloss, reprojselector = get_reprojection_loss(image1, outputs, ssim)\n\n metrics = dict()\n metrics['depthloss'] = depthloss.item()\n metrics['poseloss'] = poseloss.item()\n metrics['flowloss'] = flowloss.item()\n metrics['ssimloss'] = ssimloss.item()\n\n loss = depthloss + poseloss + flowloss + ssimloss\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)\n\n optimizer.step()\n scheduler.step()\n\n if args.gpu == 0:\n logger.write_dict(metrics, step=total_steps)\n if total_steps % SUM_FREQ == 0:\n dr = time.time() - st\n resths = (args.num_steps - total_steps) * dr / (total_steps + 1) / 60 / 60\n print(\"Step: %d, rest hour: %f, depthloss: %f, poseloss: %f, flowloss: %f, ssimloss: %f\" % (total_steps, resths, depthloss.item(), poseloss.item(), flowloss.item(), ssimloss.item()))\n logger.write_vls(data_blob, outputs, flowselector, reprojselector, total_steps)\n\n if total_steps % VAL_FREQ == 1:\n if args.gpu == 0:\n results = validate_kitti(model.module, args, eval_loader, logger, group, total_steps)\n else:\n results = validate_kitti(model.module, args, eval_loader, None, group, None)\n\n if args.gpu == 0:\n logger_evaluation.write_dict(results, total_steps)\n if results['out'] < maxout:\n maxout = results['out']\n PATH = os.path.join(logroot, 'minout.pth')\n torch.save(model.state_dict(), PATH)\n print(\"model saved to %s\" % PATH)\n\n model.train()\n\n total_steps += 1\n\n if total_steps > args.num_steps:\n should_keep_training = False\n break\n epoch = epoch + 1\n\n if args.gpu == 0:\n logger.close()\n PATH = os.path.join(logroot, 'final.pth')\n torch.save(model.state_dict(), PATH)\n\n return PATH\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--name', default='raft', help=\"name your experiment\")\n parser.add_argument('--stage', help=\"determines which dataset to use for training\")\n parser.add_argument('--restore_ckpt', help=\"restore checkpoint\")\n\n parser.add_argument('--lr', type=float, default=0.00002)\n parser.add_argument('--num_steps', type=int, default=100000)\n parser.add_argument('--batch_size', type=int, default=6)\n parser.add_argument('--image_size', type=int, nargs='+', default=[384, 512])\n parser.add_argument('--inheight', type=int, default=288)\n parser.add_argument('--inwidth', type=int, default=960)\n parser.add_argument('--evalheight', type=int, default=288)\n parser.add_argument('--evalwidth', type=int, default=1216)\n parser.add_argument('--maxinsnum', type=int, default=20)\n parser.add_argument('--min_depth_pred', type=float, default=1)\n parser.add_argument('--max_depth_pred', type=float, default=85)\n parser.add_argument('--min_depth_eval', type=float, default=1e-3)\n parser.add_argument('--max_depth_eval', type=float, default=80)\n\n parser.add_argument('--tscale_range', type=float, default=3)\n parser.add_argument('--objtscale_range', type=float, default=10)\n parser.add_argument('--angx_range', type=float, default=0.03)\n parser.add_argument('--angy_range', type=float, default=0.06)\n parser.add_argument('--angz_range', type=float, default=0.01)\n parser.add_argument('--num_layers', type=int, default=50)\n\n parser.add_argument('--wdecay', type=float, default=.00005)\n parser.add_argument('--epsilon', type=float, default=1e-8)\n parser.add_argument('--clip', type=float, default=1.0)\n parser.add_argument('--dropout', type=float, default=0.0)\n parser.add_argument('--add_noise', action='store_true')\n parser.add_argument('--dataset_root', type=str)\n parser.add_argument('--semantics_root', type=str)\n parser.add_argument('--depth_root', type=str)\n parser.add_argument('--depthvlsgt_root', type=str)\n parser.add_argument('--ins_root', type=str)\n parser.add_argument('--logroot', type=str)\n parser.add_argument('--num_workers', type=int, default=12)\n\n parser.add_argument('--distributed', default=True, type=bool)\n parser.add_argument('--dist_url', type=str, help='url used to set up distributed training', default='tcp://127.0.0.1:1234')\n parser.add_argument('--dist_backend', type=str, help='distributed backend', default='nccl')\n\n args = parser.parse_args()\n\n torch.manual_seed(1234)\n np.random.seed(1234)\n\n if not os.path.isdir(os.path.join(args.logroot, args.name)):\n os.makedirs(os.path.join(args.logroot, args.name), exist_ok=True)\n os.makedirs(os.path.join(args.logroot, 'evaluation', args.name), exist_ok=True)\n\n torch.cuda.empty_cache()\n\n ngpus_per_node = torch.cuda.device_count()\n\n if args.distributed:\n args.world_size = ngpus_per_node\n mp.spawn(train, nprocs=ngpus_per_node, args=(ngpus_per_node, args))\n else:\n train(args.gpu, ngpus_per_node, args)","sub_path":"exp_pose_mdepth_kitti_eigen/train/train_pose_mdepth_kitti_eigen.py","file_name":"train_pose_mdepth_kitti_eigen.py","file_ext":"py","file_size_in_byte":23798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"471214678","text":"import pytest\nimport pytest_subprocess\nimport subprocess\n\nimport connaisseur.sigstore_validator as sigstore_validator\nfrom connaisseur.exceptions import (\n NotFoundException,\n ValidationError,\n CosignError,\n CosignTimeout,\n UnexpectedCosignData,\n)\n\ncosign_payload = '{\"Critical\":{\"Identity\":{\"docker-reference\":\"\"},\"Image\":{\"Docker-manifest-digest\":\"sha256:c5327b291d702719a26c6cf8cc93f72e7902df46547106a9930feda2c002a4a7\"},\"Type\":\"cosign container signature\"},\"Optional\":null}'\ncosign_multiline_payload = \"\"\"\n{\"Critical\":{\"Identity\":{\"docker-reference\":\"\"},\"Image\":{\"Docker-manifest-digest\":\"sha256:2f6d89c49ad745bfd5d997f9b2d253329323da4c500c7fe343e068c0382b8df4\"},\"Type\":\"cosign container signature\"},\"Optional\":null}\n{\"Critical\":{\"Identity\":{\"docker-reference\":\"\"},\"Image\":{\"Docker-manifest-digest\":\"sha256:2f6d89c49ad745bfd5d997f9b2d253329323da4c500c7fe343e068c0382b8df4\"},\"Type\":\"cosign container signature\"},\"Optional\":{\"foo\":\"bar\"}}\n\"\"\"\ncosign_payload_unexpected_json_format = '{\"Important\":{\"Identity\":{\"docker-reference\":\"\"},\"Image\":{\"Docker-manifest-digest\":\"sha256:c5327b291d702719a26c6cf8cc93f72e7902df46547106a9930feda2c002a4a7\"},\"Type\":\"cosign container signature\"},\"Optional\":null}'\ncosign_payload_unexpected_digest_pattern = '{\"Critical\":{\"Identity\":{\"docker-reference\":\"\"},\"Image\":{\"Docker-manifest-digest\":\"sha512:c5327b291d702719a26c6cf8cc93f72e7902df46547106a9930feda2c002a4a7\"},\"Type\":\"cosign container signature\"},\"Optional\":null}'\n\ncosign_nonjson_payload = \"This is not json.\"\ncosign_combined_payload = \"{}\\n{}\".format(cosign_payload, cosign_nonjson_payload)\n\nexample_pubkey = \"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE6uuXbZhEfTYb4Mnb/LdrtXKTIIbzNBp8mwriocbaxXxzquvbZpv4QtOTPoIw+0192MW9dWlSVaQPJd7IaiZIIQ==\"\n\nwith open(\"tests/data/cosign_error_wrong_key.txt\", \"r\") as readfile:\n cosign_error_message_wrong_pubkey = readfile.read()\n\nwith open(\"tests/data/cosign_error_no_data.txt\", \"r\") as readfile:\n cosign_error_message_no_cosign_signature = readfile.read()\n\ncosign_stderr_at_success = \"\"\"\nThe following checks were performed on each of these signatures:\n - The cosign claims were validated\n - The signatures were verified against the specified public key\n - Any certificates were verified against the Fulcio roots.\n\"\"\"\n\n\n@pytest.fixture()\ndef mock_add_kill_fake_process(monkeypatch):\n def mock_kill(self):\n return\n\n pytest_subprocess.core.FakePopen.kill = mock_kill\n\n\n@pytest.fixture()\ndef mock_invoke_cosign(mocker, status_code, stdout, stderr):\n mocker.patch(\n \"connaisseur.sigstore_validator.invoke_cosign\",\n return_value=(status_code, stdout, stderr),\n )\n\n\n@pytest.mark.parametrize(\n \"status_code, stdout, stderr, image, output\",\n [\n (\n 0,\n cosign_payload,\n cosign_stderr_at_success,\n \"testimage:v1\",\n [\"c5327b291d702719a26c6cf8cc93f72e7902df46547106a9930feda2c002a4a7\"],\n ),\n (\n 0,\n cosign_combined_payload,\n cosign_stderr_at_success,\n \"testimage:v1\",\n [\"c5327b291d702719a26c6cf8cc93f72e7902df46547106a9930feda2c002a4a7\"],\n ),\n (\n 0,\n cosign_multiline_payload,\n cosign_stderr_at_success,\n \"\",\n [\n \"2f6d89c49ad745bfd5d997f9b2d253329323da4c500c7fe343e068c0382b8df4\",\n \"2f6d89c49ad745bfd5d997f9b2d253329323da4c500c7fe343e068c0382b8df4\",\n ],\n ),\n ],\n)\ndef test_get_cosign_validated_digests(\n mock_invoke_cosign, mocker, status_code, stdout, stderr, image, output\n):\n mock_info_log = mocker.patch(\"logging.info\")\n digests = sigstore_validator.get_cosign_validated_digests(image, \"sth\")\n mock_info_log.assert_has_calls(\n [\n mocker.call(\n \"COSIGN output for image: %s; RETURNCODE: %s; STDOUT: %s; STDERR: %s\",\n image,\n status_code,\n stdout,\n stderr,\n )\n ]\n )\n if stdout == (cosign_nonjson_payload or cosign_combined_payload):\n mock_info_log.assert_has_calls(\n [\n mocker.call(\n \"non-json signature data from cosign: %s\", cosign_nonjson_payload\n )\n ]\n )\n assert digests == output\n\n\n@pytest.mark.parametrize(\n \"status_code, stdout, stderr, image\",\n [\n (1, \"\", cosign_error_message_wrong_pubkey, \"testimage:v1\"),\n ],\n)\ndef test_get_cosign_validated_digests_validation_error(\n mock_invoke_cosign, status_code, stdout, stderr, image\n):\n with pytest.raises(ValidationError) as err:\n sigstore_validator.get_cosign_validated_digests(image, \"sth\")\n assert \"failed to verify signature of trust data.\" in str(err.value)\n\n\n@pytest.mark.parametrize(\n \"status_code, stdout, stderr, image, error_message\",\n [\n (\n 0,\n cosign_payload_unexpected_json_format,\n cosign_stderr_at_success,\n \"testimage:v1\",\n \"could not retrieve valid and unambiguous digest from data received by cosign: KeyError: 'Critical'\",\n ),\n (\n 0,\n cosign_payload_unexpected_digest_pattern,\n cosign_stderr_at_success,\n \"testimage:v1\",\n \"could not retrieve valid and unambiguous digest from data received by cosign: \"\n \"Exception: digest 'sha512:c5327b291d702719a26c6cf8cc93f72e7902df46547106a9930feda2c002a4a7' \"\n \"does not match expected digest pattern.\",\n ),\n (\n 0,\n cosign_nonjson_payload,\n cosign_stderr_at_success,\n \"testimage:v1\",\n \"could not extract any digest from data received by cosign \"\n \"despite successful image verification.\",\n ),\n ],\n)\ndef test_get_cosign_validated_digests_unexpected_cosign_data_error(\n mock_invoke_cosign, mocker, status_code, stdout, stderr, image, error_message\n):\n with pytest.raises(UnexpectedCosignData) as err:\n sigstore_validator.get_cosign_validated_digests(image, \"sth\")\n assert error_message in str(err.value)\n\n\n@pytest.mark.parametrize(\n \"status_code, stdout, stderr, image\",\n [\n (1, \"\", cosign_error_message_no_cosign_signature, \"testimage:v1\"),\n ],\n)\ndef test_get_cosign_validated_digests_not_found_exception(\n mock_invoke_cosign, status_code, stdout, stderr, image\n):\n with pytest.raises(NotFoundException) as err:\n sigstore_validator.get_cosign_validated_digests(image, \"sth\")\n assert 'no trust data for image \"testimage:v1\"' in str(err.value)\n\n\n@pytest.mark.parametrize(\n \"status_code, stdout, stderr, image\",\n [\n (1, \"\", \"Hm. Something weird happened.\", \"testimage:v1\"),\n ],\n)\ndef test_get_cosign_validated_digests_cosign_error(\n mock_invoke_cosign, status_code, stdout, stderr, image\n):\n with pytest.raises(CosignError) as err:\n sigstore_validator.get_cosign_validated_digests(image, \"sth\")\n assert (\n 'unexpected cosign exception for image \"testimage:v1\": Hm. Something weird happened.'\n in str(err.value)\n )\n\n\n@pytest.mark.parametrize(\n \"image, process_input\",\n [\n (\n \"testimage:v1\",\n \"-----BEGIN PUBLIC KEY-----\\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE6uuXbZhEfTYb4Mnb/LdrtXKTIIbzNBp8mwriocbaxXxzquvbZpv4QtOTPoIw+0192MW9dWlSVaQPJd7IaiZIIQ==\\n-----END PUBLIC KEY-----\",\n )\n ],\n)\ndef test_invoke_cosign(fake_process, image, process_input):\n def stdin_function(input):\n return {\"stderr\": input.decode(), \"stdout\": input}\n\n # as we are mocking the subprocess the output doesn't change with the input. To check that the\n # .communicate() method is invoked with the correct input, we append it to stderr as explained in the docs\n # https://pytest-subprocess.readthedocs.io/en/latest/usage.html#passing-input\n # It seems there is a bug that, when appending the input to a data stream (e.g. stderr),\n # eats the other data stream (stdout in that case). Thus, simply appending to both.\n fake_process.register_subprocess(\n [\"/app/cosign/cosign\", \"verify\", \"-key\", \"/dev/stdin\", image],\n stderr=cosign_stderr_at_success,\n stdout=bytes(cosign_payload, \"utf-8\"),\n stdin_callable=stdin_function,\n )\n returncode, stdout, stderr = sigstore_validator.invoke_cosign(\n \"testimage:v1\", example_pubkey\n )\n assert [\n \"/app/cosign/cosign\",\n \"verify\",\n \"-key\",\n \"/dev/stdin\",\n image,\n ] in fake_process.calls\n assert (returncode, stdout, stderr) == (\n 0,\n \"{}{}\".format(cosign_payload, process_input),\n \"{}{}\".format(cosign_stderr_at_success, process_input),\n )\n\n\n@pytest.mark.parametrize(\n \"image\",\n [\n \"testimage:v1\",\n ],\n)\ndef test_invoke_cosign_timeout_expired(\n mocker, mock_add_kill_fake_process, fake_process, image\n):\n def callback_function(input):\n fake_process.register_subprocess([\"test\"], wait=0.5)\n fake_process_raising_timeout = subprocess.Popen([\"test\"])\n fake_process_raising_timeout.wait(timeout=0.1)\n\n fake_process.register_subprocess(\n [\"/app/cosign/cosign\", \"verify\", \"-key\", \"/dev/stdin\", image],\n stdin_callable=callback_function,\n )\n\n mock_kill = mocker.patch(\"pytest_subprocess.core.FakePopen.kill\")\n\n with pytest.raises(CosignTimeout) as err:\n sigstore_validator.invoke_cosign(image, example_pubkey)\n\n mock_kill.assert_has_calls([mocker.call()])\n assert \"cosign timed out.\" in str(err.value)\n","sub_path":"connaisseur/tests/test_sigstore_validator.py","file_name":"test_sigstore_validator.py","file_ext":"py","file_size_in_byte":9601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"391607462","text":"# 定义一个类\nclass Calculator: #首字母要大写,冒号不能缺\n name = 'Good Calculator' #该行为class的属性\n price = 18\n def add(self,x,y):\n print(self.name)\n result = x + y\n print(result)\n def minus(self,x,y):\n result=x-y\n print(result)\n def times(self,x,y):\n print(x*y)\n def divide(self,x,y):\n print(x/y)\n\n\ncal = Calculator()\nprint(cal.name)\nprint(cal.price)\ncal.add(10,20)\ncal.minus(10,20)\ncal.times(10,20)\ncal.divide(10, 20)\n\n# init,_init__可以理解成初始化class的变量,取自英文中initial 最初的意思.可以在运行时,给初始值附值,\nclass Calculator:\n name='good calculator'\n price=18\n def __init__(self,name,price,height,width,weight=10): # 注意,这里的下划线是双下划线,参数后可以加默认值\n self.name=name\n self.price=price\n self.h=height\n self.wi=width\n self.we=weight\nc=Calculator('bad calculator',18,17,16,15)\nprint(c.name,c.price,c.h,c.wi,c.we)","sub_path":"8.class.py","file_name":"8.class.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"125861942","text":"import numpy as np\n\nimport brainscore\nfrom brainscore.benchmarks._neural_common import NeuralBenchmark, average_repetition\nfrom brainscore.metrics.ceiling import InternalConsistency\nfrom brainscore.metrics.regression import CrossRegressedCorrelation, pls_regression, pearsonr_correlation\nfrom brainscore.utils import LazyLoad\n\n\nVISUAL_DEGREES = 5\nNUMBER_OF_TRIALS = 50\nBIBTEX = \"\"\"\"\"\"\n\n\ndef _DicarloSanghaviMurty2020Region(region, identifier_metric_suffix, similarity_metric, ceiler):\n assembly_repetition = LazyLoad(lambda region=region: load_assembly(average_repetitions=False, region=region))\n assembly = LazyLoad(lambda region=region: load_assembly(average_repetitions=True, region=region))\n return NeuralBenchmark(identifier=f'dicarlo.SanghaviMurty2020.{region}-{identifier_metric_suffix}', version=1,\n assembly=assembly, similarity_metric=similarity_metric,\n visual_degrees=VISUAL_DEGREES, number_of_trials=NUMBER_OF_TRIALS,\n ceiling_func=lambda: ceiler(assembly_repetition),\n parent=region,\n bibtex=BIBTEX)\n\n\ndef DicarloSanghaviMurty2020V4PLS():\n return _DicarloSanghaviMurty2020Region('V4', identifier_metric_suffix='pls',\n similarity_metric=CrossRegressedCorrelation(\n regression=pls_regression(), correlation=pearsonr_correlation(),\n crossvalidation_kwargs=dict(stratification_coord=None)),\n ceiler=InternalConsistency())\n\n\ndef DicarloSanghaviMurty2020ITPLS():\n return _DicarloSanghaviMurty2020Region('IT', identifier_metric_suffix='pls',\n similarity_metric=CrossRegressedCorrelation(\n regression=pls_regression(), correlation=pearsonr_correlation(),\n crossvalidation_kwargs=dict(stratification_coord=None)),\n ceiler=InternalConsistency())\n\n\ndef load_assembly(average_repetitions, region):\n assembly = brainscore.get_assembly(name=f'dicarlo.SanghaviMurty2020')\n assembly = assembly.sel(region=region)\n assembly['region'] = 'neuroid', [region] * len(assembly['neuroid'])\n assembly.load()\n assembly = assembly.sel(time_bin_id=0) # 70-170ms\n assembly = assembly.squeeze('time_bin')\n assert NUMBER_OF_TRIALS == len(np.unique(assembly.coords['repetition']))\n assert VISUAL_DEGREES == assembly.attrs['image_size_degree']\n if average_repetitions:\n assembly = average_repetition(assembly)\n return assembly\n","sub_path":"brainscore/benchmarks/sanghavimurty2020.py","file_name":"sanghavimurty2020.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"65026131","text":"import asyncio\nimport websockets\n\nURI = \"ws://localhost:3000\"\n\nasync def client():\n async with websockets.connect(URI) as websocket:\n await websocket.send(\"MEssage from client\")\n message_back = await websocket.recv()\n print(message_back)\n\nasyncio.get_event_loop().run_until_complete(client())\n","sub_path":"snakegame/server/client_test.py","file_name":"client_test.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"124690964","text":"import asyncio\nimport datetime\nimport math\n\nimport pytest\n\nfrom hat import aio\nfrom hat import util\nfrom hat.drivers import iec104\n\n\npytestmark = pytest.mark.asyncio\n\n\n@pytest.fixture\ndef addr():\n return iec104.Address('127.0.0.1', util.get_unused_tcp_port())\n\n\nasync def test_server_without_connections(addr):\n srv = await iec104.listen(connection_cb=lambda _: None,\n addr=addr)\n assert not srv.is_closed\n assert srv.addresses == [addr]\n\n await srv.async_close()\n assert srv.is_closed\n\n\nasync def test_connect(addr):\n conn_queue = aio.Queue()\n srv = await iec104.listen(conn_queue.put_nowait, addr)\n conn = await iec104.connect(addr)\n\n srv_conn = await asyncio.wait_for(conn_queue.get(), 0.1)\n\n assert conn.info.local_addr == srv_conn.info.remote_addr\n assert conn.info.remote_addr == srv_conn.info.local_addr\n assert conn.info.local_addr != conn.info.remote_addr\n assert conn.info.remote_addr == srv.addresses[0]\n\n assert not srv.is_closed\n assert not conn.is_closed\n assert not srv_conn.is_closed\n\n await srv.async_close()\n await asyncio.wait_for(srv_conn.wait_closed(), 0.1)\n await asyncio.wait_for(conn.wait_closed(), 0.1)\n\n\n@pytest.mark.parametrize(\"conn_count\", [1, 2, 10])\nasync def test_multiple_connections(addr, conn_count):\n conn_queue = aio.Queue()\n srv = await iec104.listen(conn_queue.put_nowait, addr)\n\n conns = []\n for _ in range(conn_count):\n conn = await iec104.connect(addr)\n conns.append(conn)\n\n srv_conns = []\n for _ in range(conn_count):\n srv_conn = await asyncio.wait_for(conn_queue.get(), 0.1)\n srv_conns.append(srv_conn)\n\n for conn in conns:\n await conn.async_close()\n for srv_conn in srv_conns:\n await srv_conn.wait_closed()\n await srv.async_close()\n\n\nasync def test_interogate(addr):\n conn_queue = aio.Queue()\n interrogate_queue = aio.Queue()\n\n async def on_interrogate(conn, asdu):\n f = asyncio.Future()\n interrogate_queue.put_nowait((asdu, f))\n result = await f\n return result\n\n srv = await iec104.listen(conn_queue.put_nowait, addr,\n interrogate_cb=on_interrogate)\n conn = await iec104.connect(addr)\n srv_conn = await conn_queue.get()\n\n conn_f = asyncio.ensure_future(conn.interrogate(123))\n asdu, srv_conn_f = await asyncio.wait_for(interrogate_queue.get(), 0.1)\n assert asdu == 123\n data = iec104.Data(\n value=iec104.SingleValue.ON,\n quality=iec104.Quality(\n invalid=False,\n not_topical=False,\n substituted=False,\n blocked=False,\n overflow=False),\n time=None,\n asdu_address=asdu,\n io_address=1,\n cause=iec104.Cause.INTERROGATED_STATION,\n is_test=False)\n srv_conn_f.set_result([data])\n result = await conn_f\n assert result == [data]\n\n await srv.async_close()\n await conn.async_close()\n await srv_conn.async_close()\n\n\nasync def test_counter_interogate(addr):\n conn_queue = aio.Queue()\n counter_interrogate_queue = aio.Queue()\n\n async def on_counter_interrogate(conn, asdu, freeze):\n f = asyncio.Future()\n counter_interrogate_queue.put_nowait((asdu, freeze, f))\n result = await f\n return result\n\n srv = await iec104.listen(conn_queue.put_nowait, addr,\n counter_interrogate_cb=on_counter_interrogate)\n conn = await iec104.connect(addr)\n srv_conn = await conn_queue.get()\n\n conn_f = asyncio.ensure_future(conn.counter_interrogate(123))\n asdu, freeze, srv_conn_f = await asyncio.wait_for(\n counter_interrogate_queue.get(), 0.1)\n assert asdu == 123\n assert freeze == iec104.FreezeCode.READ\n data = iec104.Data(\n value=iec104.BinaryCounterValue(\n value=321,\n sequence=1,\n overflow=False,\n adjusted=False,\n invalid=False),\n quality=None,\n time=None,\n asdu_address=asdu,\n io_address=1,\n cause=iec104.Cause.INTERROGATED_COUNTER,\n is_test=False)\n srv_conn_f.set_result([data])\n result = await conn_f\n assert result == [data]\n\n await srv.async_close()\n await conn.async_close()\n await srv_conn.async_close()\n\n\n@pytest.mark.parametrize(\"data\", [\n iec104.Data(value=iec104.SingleValue.ON,\n quality=iec104.Quality(invalid=False,\n not_topical=False,\n substituted=False,\n blocked=False,\n overflow=False),\n time=None,\n asdu_address=123,\n io_address=321,\n cause=iec104.Cause.SPONTANEOUS,\n is_test=False),\n iec104.Data(value=iec104.DoubleValue.FAULT,\n quality=iec104.Quality(invalid=True,\n not_topical=False,\n substituted=True,\n blocked=False,\n overflow=False),\n time=iec104.time_from_datetime(datetime.datetime.now()),\n asdu_address=1,\n io_address=2,\n cause=iec104.Cause.SPONTANEOUS,\n is_test=True),\n iec104.Data(value=iec104.StepPositionValue(value=32,\n transient=False),\n quality=iec104.Quality(invalid=False,\n not_topical=True,\n substituted=False,\n blocked=True,\n overflow=False),\n time=iec104.time_from_datetime(datetime.datetime.now(), False),\n asdu_address=1,\n io_address=2,\n cause=iec104.Cause.SPONTANEOUS,\n is_test=False),\n iec104.Data(value=iec104.BitstringValue(value=b'1234'),\n quality=iec104.Quality(invalid=False,\n not_topical=False,\n substituted=False,\n blocked=False,\n overflow=False),\n time=None,\n asdu_address=1,\n io_address=2,\n cause=iec104.Cause.SPONTANEOUS,\n is_test=False),\n iec104.Data(value=iec104.NormalizedValue(value=0.123),\n quality=iec104.Quality(invalid=False,\n not_topical=False,\n substituted=False,\n blocked=False,\n overflow=False),\n time=None,\n asdu_address=1,\n io_address=2,\n cause=iec104.Cause.SPONTANEOUS,\n is_test=False),\n iec104.Data(value=iec104.ScaledValue(value=-123),\n quality=iec104.Quality(invalid=False,\n not_topical=False,\n substituted=False,\n blocked=False,\n overflow=True),\n time=None,\n asdu_address=1,\n io_address=2,\n cause=iec104.Cause.SPONTANEOUS,\n is_test=False),\n iec104.Data(value=iec104.FloatingValue(value=123.456),\n quality=iec104.Quality(invalid=False,\n not_topical=False,\n substituted=False,\n blocked=False,\n overflow=False),\n time=None,\n asdu_address=1,\n io_address=2,\n cause=iec104.Cause.SPONTANEOUS,\n is_test=False)\n])\nasync def test_receive(addr, data):\n conn_queue = aio.Queue()\n srv = await iec104.listen(conn_queue.put_nowait, addr)\n conn = await iec104.connect(addr)\n srv_conn = await conn_queue.get()\n\n srv_conn.notify_data_change([data])\n result = await conn.receive()\n if (isinstance(data.value, iec104.NormalizedValue) or\n isinstance(data.value, iec104.FloatingValue)):\n assert math.isclose(data.value.value, result[0].value.value,\n rel_tol=1e-3)\n data = data._replace(\n value=data.value._replace(\n value=result[0].value.value))\n assert result == [data]\n\n await srv.async_close()\n await conn.async_close()\n await srv_conn.async_close()\n\n\n@pytest.mark.parametrize(\"command\", [\n iec104.Command(action=iec104.Action.EXECUTE,\n value=iec104.SingleValue.OFF,\n asdu_address=1,\n io_address=2,\n time=None,\n qualifier=1),\n iec104.Command(action=iec104.Action.CANCEL,\n value=iec104.DoubleValue.ON,\n asdu_address=2,\n io_address=1,\n time=iec104.time_from_datetime(datetime.datetime.now()),\n qualifier=2),\n iec104.Command(action=iec104.Action.SELECT,\n value=iec104.RegulatingValue.HIGHER,\n asdu_address=1,\n io_address=2,\n time=None,\n qualifier=3),\n iec104.Command(action=iec104.Action.CANCEL,\n value=iec104.NormalizedValue(value=0.321),\n asdu_address=1,\n io_address=2,\n time=None,\n qualifier=4),\n iec104.Command(action=iec104.Action.EXECUTE,\n value=iec104.ScaledValue(value=123),\n asdu_address=1,\n io_address=2,\n time=None,\n qualifier=5),\n iec104.Command(action=iec104.Action.EXECUTE,\n value=iec104.FloatingValue(value=-123.456),\n asdu_address=1,\n io_address=2,\n time=None,\n qualifier=6)\n])\n@pytest.mark.parametrize(\"success\", [True, False])\nasync def test_send_command(addr, command, success):\n conn_queue = aio.Queue()\n\n async def on_command(conn, commands):\n nonlocal command\n if (isinstance(command.value, iec104.NormalizedValue) or\n isinstance(command.value, iec104.FloatingValue)):\n assert math.isclose(command.value.value, commands[0].value.value,\n rel_tol=1e-3)\n command = command._replace(\n value=command.value._replace(\n value=commands[0].value.value))\n assert commands == [command]\n return success\n\n srv = await iec104.listen(conn_queue.put_nowait, addr,\n command_cb=on_command)\n conn = await iec104.connect(addr)\n srv_conn = await conn_queue.get()\n\n result = await conn.send_command(command)\n assert result == success\n\n await srv.async_close()\n await conn.async_close()\n await srv_conn.async_close()\n\n\nasync def test_interrogate_negative_response(addr):\n conn_queue = aio.Queue()\n srv = await iec104.listen(conn_queue.put_nowait, addr,\n interrogate_cb=lambda _, __: None)\n conn = await iec104.connect(addr)\n srv_conn = await conn_queue.get()\n\n result = await conn.interrogate(123)\n assert result == []\n\n await conn.async_close()\n await srv_conn.async_close()\n await srv.async_close()\n\n\nasync def test_example_docs():\n addr = iec104.Address('127.0.0.1', util.get_unused_tcp_port())\n conn2_future = asyncio.Future()\n srv = await iec104.listen(conn2_future.set_result, addr)\n conn1 = await iec104.connect(addr)\n conn2 = await conn2_future\n\n data = iec104.Data(value=iec104.SingleValue.ON,\n quality=iec104.Quality(invalid=False,\n not_topical=False,\n substituted=False,\n blocked=False,\n overflow=False),\n time=None,\n asdu_address=123,\n io_address=321,\n cause=iec104.Cause.SPONTANEOUS,\n is_test=False)\n\n # send data from conn1 to conn2\n conn1.notify_data_change([data])\n result = await conn2.receive()\n assert result == [data]\n\n # send data from conn2 to conn1\n conn2.notify_data_change([data])\n result = await conn1.receive()\n assert result == [data]\n\n await conn1.async_close()\n await conn2.async_close()\n await srv.async_close()\n","sub_path":"test_pytest/test_iec104.py","file_name":"test_iec104.py","file_ext":"py","file_size_in_byte":12931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"633423975","text":"#!/python\n\n#import statement\nimport sys,os\nim\n#varial declarition\nnumclient=8\n\n\n#function define\ndef start(cmdline):\n\tos.system(cmdline)\n\n#program block statement\nargs=' '.join(sys.argv[1:])\nfor i in range(numclient):\n\tstart('python echo_client.py %s'% args)\n","sub_path":"python/programingPython/internetProgram/networkScript/socketProgramming/socketBasics/testecho.py","file_name":"testecho.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"605533398","text":"#!/usr/bin/env python2.7\nimport logging\n\nclass CustomFormatter(logging.Formatter):\n\n \"\"\"\n This class is for Custom log formatter\n \"\"\"\n\n dbg_fmt = '%(msg)s'\n info_fmt = '%(msg)s'\n warn_fmt = '[%(levelname)-7s] %(asctime)s %(msg)s'\n err_fmt = '*************************************[ %(levelname)s ]*************************************\\n' + \\\n '%(asctime)s (%(filename)s,%(lineno)d) %(msg)s\\n' + \\\n '*************************************[ %(levelname)s ]*************************************'\n\n\n def __init__(self, fmt=\"%(levelno)s: %(msg)s\"):\n logging.Formatter.__init__(self, fmt)\n\n def format(self, record):\n # Preparation Custom Formatter\n format_org = self._fmt\n if record.levelno == logging.DEBUG:\n self._fmt = CustomFormatter.dbg_fmt\n\n elif record.levelno == logging.INFO:\n self._fmt = CustomFormatter.info_fmt\n\n elif record.levelno == logging.WARN:\n self._fmt = CustomFormatter.warn_fmt\n\n elif record.levelno == logging.ERROR:\n self._fmt = CustomFormatter.err_fmt\n result = logging.Formatter.format(self, record)\n self._fmt = format_org\n return result\n","sub_path":"modules/base/custom_formatter.py","file_name":"custom_formatter.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"191866718","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}\n\nimport tensorflow as tf \nimport csv\n\nfrom tensorflow import TensorShape\n\nfrom DatasetLoader import *\nfrom models.vae_model import *\nimport copy\n\nloader = True\n\ndataloader = DatasetLoader()\n\nlosses={\"alpha\":512,\"beta\":15,\"delta\":5,\"theta\":1, \"kappa\":0, \"ce_eq\":1}\nconfig = {}\nconfig.setdefault(\"losses\", losses)\n#config[\"training_steps\"] = data.training_steps\nconfig.setdefault(\"test_on_batch\", False)\nconfig.setdefault(\"clustering_mode\", True)\nconfig.setdefault(\"binary_detection\", False)\nconfig.setdefault(\"meta_learning_data\", False)\nconfig.setdefault(\"use_latentloss\", False)\nconfig.setdefault(\"only_positives\", False)\n\nconfig.setdefault(\"epochs\", 10)\nconfig.setdefault(\"batch_size\", 256)\n\n\nconfig.setdefault(\"max_clusters\", dataloader.max_clusters)\n\nconfig.setdefault(\"load_pretrained\", loader)\nconfig.setdefault(\"fine_tune_detector\", loader)\nconfig.setdefault(\"freeze_ae\", False)\nconfig.setdefault(\"pretrained_model_path\", \"trained_models\")\nconfig.setdefault(\"global_detector_dropout\", 0.5)\nconfig.setdefault(\"global_ae_dropout\", 0.5)\nconfig.setdefault(\"initial_clustering_weight\", 0.001)\n\ntrain_data, train_labels = dataloader.get_numpy(dataloader.test_dataset, return_in_batches=False)\n\n#train_data = np.reshape(train_data, ((-1,)+tuple(train_data.shape[2:])))\n#train_labels = np.reshape(train_labels, (-1, train_labels.shape[-1]))\n\nclusters = train_labels.shape[-1]\nint_labels = np.argmax(train_labels, axis=1)\n\nmodel = VAE_sorter(config_file=\"model_config.xml\", config = config)\nmodel.build(load_weights = config[\"load_pretrained\"], freeze_ae=config[\"freeze_ae\"], use_clustering=config[\"clustering_mode\"])\n\ndef cluster_labels(prod=1):\n\tlatent = model.encoder.predict([train_data, train_labels])\n\t#reconstruction = model.decoder.predict(latent)\n\treconstruction = copy.deepcopy(train_data)\n\t# At this point we should have a one hot encoded label array so we have to reverse it to int\n\treturn reconstruction, latent\n\n\tclustering_imgs = []\n\tclustering_latents = []\n\n\tfor i in range(clusters):\n\t\t\n\t\tif len(train_data[int_labels == i]) > 0:\t\t\n\t\t\t#latent[int_labels==i] = latent[int_labels==i] - np.nan_to_num(prod*config[\"initial_clustering_weight\"]*(latent[int_labels==i] - np.nanmean(latent[int_labels==i], axis=0)))\n\t\t\treconstruction[int_labels == i] = train_data[int_labels == i] - np.nan_to_num(prod*config[\"initial_clustering_weight\"]*(train_data[int_labels == i] - np.nanmean(train_data[int_labels == i], axis=0)))\n\t\t\n\n\tlatent = np.nan_to_num(latent)\n\treconstruction = np.nan_to_num(reconstruction)\n\treturn reconstruction, latent\n\n\nif config[\"only_positives\"]:\n\ttrain_data = train_data[int_labels > 0]\n\ttrain_labels = train_labels[int_labels > 0]\n\tint_labels = int_labels[int_labels > 0]\n\n\ndef batch_generator(data, labels, latent, batch_size = 32):\n indices = np.arange(len(data)) \n batch=[]\n while True:\n np.random.shuffle(indices) \n for i in indices:\n batch.append(i)\n if len(batch)==batch_size:\n \tyield [data[batch], labels[batch]], [data[batch], labels[batch],labels[batch],latent[batch]]\n \tbatch=[]\n\nuse_generator = False\n\nfor i in range(0,20):\n\treconstruction, latent = cluster_labels(i+1)\n\tif use_generator:\n\t\ttrain_generator = batch_generator(train_data, train_labels, latent, batch_size=config[\"batch_size\"])\t\n\t\tmodel.vae.fit(train_generator, steps_per_epoch=len(train_data)//config[\"batch_size\"], epochs=config[\"epochs\"])\n\telse:\n\t\tmodel.vae.fit([train_data, train_labels], [reconstruction, train_labels, train_labels, latent], batch_size=config[\"batch_size\"], epochs=config[\"epochs\"], validation_split=0.2)\n\n\tmodel.save_weights_separate()\n\n\n\n\t#val_data, val_labels = dataloader.get_numpy(dataloader.val_dataset, return_in_batches=False)\n\t#model.vae.eva\n\t#exit()\n\t\n\t","sub_path":"train_for_clustering.py","file_name":"train_for_clustering.py","file_ext":"py","file_size_in_byte":3966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"121350140","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport time\nimport subprocess\nimport requests\nimport sys\nimport glob\nimport shutil\nimport ast\nfrom gigablast import GigablastAPI, GigablastInstances\nfrom junit_xml import TestSuite, TestCase\nfrom urllib.parse import parse_qs\n\n\nclass TestRunner:\n def __init__(self, testdir, testcase, gb_instances, gb_host, webserver, ws_scheme, ws_domain, ws_port):\n self.testcase = testcase\n self.testcasedir = os.path.join(testdir, testcase)\n self.testcaseconfigdir = os.path.join(self.testcasedir, 'testcase')\n testcasedescpath = os.path.join(self.testcasedir, 'README')\n if os.path.exists(testcasedescpath):\n self.testcasedesc = self.read_file(testcasedescpath)[0].replace('.', '')\n else:\n self.testcasedesc = self.testcase\n\n self.gb_instances = gb_instances\n\n self.gb_path = gb_instances.get_instance_path(0)\n self.gb_starttime = 0\n\n self.spider_apis = []\n if self.gb_instances.num_instances == self.gb_instances.num_shards:\n host_offset = 0\n else:\n host_offset = self.gb_instances.num_shards\n\n for i in range(self.gb_instances.num_shards):\n self.spider_apis.append(GigablastAPI(gb_host, self.gb_instances.get_instance_port(host_offset + i)))\n\n self.api = self.spider_apis[0]\n\n self.webserver = webserver\n self.ws_scheme = ws_scheme\n self.ws_domain = ws_domain\n self.ws_port = ws_port\n\n self.testcases = []\n\n def run_test(self):\n # verify we have testcase to run\n if os.path.exists(self.testcaseconfigdir):\n # verify gb has started\n if self.start_gb():\n if not self.run_instructions():\n self.run_testcase()\n\n # stop & cleanup\n self.stop_gb()\n\n return self.get_testsuite()\n\n @staticmethod\n def read_file(filename):\n if os.path.exists(filename):\n with open(filename, 'r') as file:\n return file.read().splitlines()\n\n return []\n\n def format_url(self, url):\n return url.format(SCHEME=self.ws_scheme, DOMAIN=self.ws_domain, PORT=self.ws_port)\n\n def start_gb(self):\n print('Cleaning old data')\n subprocess.call(['./gb', 'dsh2', 'make cleantest'], cwd=self.gb_path, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n\n print('Copy config files')\n for filename in glob.glob(os.path.join(self.testcaseconfigdir, '*.txt')):\n destfile = shutil.copy(filename, self.gb_path)\n lines = self.read_file(destfile)\n with open(destfile, 'w') as file:\n for line in lines:\n file.write(self.format_url(line) + '\\n')\n\n subprocess.call(['./gb', 'installfile', os.path.basename(filename)], cwd=self.gb_path)\n\n print('Starting gigablast')\n start_time = time.perf_counter()\n\n subprocess.call(['./gb', 'start'], cwd=self.gb_path, stdout=subprocess.DEVNULL)\n\n # wait until started\n result = True\n while result:\n try:\n # wait until gb is initialized\n self.wait_processup()\n\n self.update_processuptime()\n\n # set some default/custom config\n self.config_gb()\n\n # put some delay after start\n time.sleep(1)\n break\n except requests.exceptions.ConnectionError as e:\n # wait for a max of 300 seconds\n if time.perf_counter() - start_time > 300:\n result = False\n break\n time.sleep(0.5)\n\n self.add_testcase('pre', 'start', start_time, not result)\n return result\n\n def save_gb(self):\n print('Saving gigablast')\n subprocess.call(['./gb', 'save'], cwd=self.gb_path, stderr=subprocess.DEVNULL)\n\n # wait for gb mode to be updated\n time.sleep(0.5)\n\n def stop_gb(self):\n print('Stopping gigablast')\n subprocess.call(['./gb', 'stop'], cwd=self.gb_path, stderr=subprocess.DEVNULL)\n\n def config_gb(self):\n self.api.config_crawldelay(0, 0)\n self.api.config_dns('127.0.0.1')\n\n # enable debug/trace logs\n self.api.config_log({'ldq': '1'})\n self.api.config_log({'ldspid': '1'})\n self.api.config_log({'ltrc_sp': '1'})\n self.api.config_log({'ltrc_msgfour': '1'})\n self.api.config_log({'ltrc_xmldoc': '1'})\n\n # apply custom config\n self.custom_config()\n\n def run_instructions(self):\n # check instruction file\n filenames = sorted(glob.glob(os.path.join(self.testcaseconfigdir, 'instructions*')))\n for filename in filenames:\n print('Processing', os.path.basename(filename))\n instructions = self.read_file(filename)\n\n for instruction in instructions:\n # skip comment\n if len(instruction) == 0 or instruction.startswith('#'):\n continue\n\n tokens = instruction.split()\n token = tokens.pop(0)\n func = getattr(self, token, None)\n if func is not None:\n func(*tokens)\n else:\n print('Unknown instruction -', token)\n\n return len(filenames)\n\n def run_testcase(self):\n # seed gb\n self.seed()\n\n # verify gb has done spidering (only run other test if spidering is successful)\n if self.wait_spider_done():\n # verify query language\n self.verify_query_language()\n\n # verify query terms\n self.verify_query_terms()\n\n # search\n self.just_search()\n\n # verify indexed\n self.verify_indexed()\n\n # verify not indexed\n self.verify_not_indexed()\n\n # verify search result url\n self.verify_search_result_url()\n\n # verify search result title & summary\n self.verify_search_result_titlesummary()\n\n # verify spidered\n self.verify_spidered()\n\n # verify only spidered\n self.verify_only_spidered()\n\n # verify not spidered\n self.verify_not_spidered()\n\n # verify spider response\n self.verify_spider_response()\n\n @staticmethod\n def convert_config_log(tokens):\n it = iter(tokens)\n return dict(zip(it, it))\n\n def custom_config(self, *args):\n print('Applying custom config')\n file_name = 'custom_config'\n\n items = []\n if len(args):\n items.append(' '.join(args))\n else:\n filename = os.path.join(self.testcaseconfigdir, file_name)\n items = self.read_file(filename)\n\n for item in items:\n tokens = item.split()\n token = tokens.pop(0)\n\n convert_func = getattr(self, 'convert_' + token, None)\n func = getattr(self.api, token, None)\n if func is not None:\n if convert_func is not None:\n func(convert_func(tokens))\n else:\n func(*tokens)\n else:\n print('Unknown instruction -', token)\n\n def seed(self, *args):\n print('Adding seed for spidering')\n\n if len(args):\n if len(args[0]):\n seedstr = self.format_url(args[0]) + '\\n'\n else:\n filename = os.path.join(self.testcaseconfigdir, 'seeds')\n items = self.read_file(filename)\n seedstr = \"\"\n if len(items):\n for item in items:\n seedstr += self.format_url(item) + '\\n'\n\n if len(seedstr) == 0:\n # default seed\n for entry in os.scandir(self.testcasedir):\n if entry.is_dir() and entry.name != 'testcase':\n seedstr += \"{}://{}.{}.{}:{}/\\n\".format(self.ws_scheme, entry.name, self.testcase,\n self.ws_domain, self.ws_port)\n\n seedstr = seedstr.rstrip('\\n')\n print(seedstr)\n\n self.api.config_sitelist(seedstr)\n\n def wait_spider_done(self, *args):\n print('Waiting for spidering to complete')\n\n # wait until\n # - spider is in progress\n # - waitingTree spider time is more than an hour\n # - no pending doleIP\n # - nothing is being spidered\n for spider_api in self.spider_apis:\n start_time = time.perf_counter()\n\n result = True\n while result:\n try:\n response = spider_api.get_spiderqueue()['response']\n print(response)\n except:\n result = False\n break\n\n if response['statusCode'] == 7 and response['doleIPCount'] == 0 and response['spiderCount'] == 0:\n if response['waitingTreeCount'] > 0:\n has_pending_spider = False\n for waiting_tree in response['waitingTrees']:\n if waiting_tree['spiderTime'] < ((time.time() + 3600) * 1000):\n has_pending_spider = True\n\n if not has_pending_spider:\n break\n else:\n # wait for 5 seconds\n if time.perf_counter() - start_time > 5:\n break\n\n if response['statusCode'] == 0:\n # we only wait for 5 seconds if it's initializing\n if time.perf_counter() - start_time > 5:\n break\n\n # wait for a max of 180 seconds\n if time.perf_counter() - start_time > 180:\n print(response)\n result = False\n break\n\n time.sleep(1.0)\n\n if result:\n self.save_gb()\n\n self.add_testcase('pre', 'spider', start_time, not result)\n\n served_urls = self.webserver.get_served_urls()\n for served_url in served_urls:\n print('Spidered ', served_url)\n\n return result\n\n def add_testcase(self, test_type, test_item, start_time, failed=False):\n test_name = test_type + ' - ' + test_item\n testcase = TestCase(test_name,\n classname='systemtest.' + str(self.gb_instances.offset) + '.' + self.testcasedesc,\n elapsed_sec=(time.perf_counter() - start_time))\n if failed:\n testcase.add_failure_info(test_name + ' - failed')\n\n if not self.validate_processuptime():\n testcase.add_failure_info(test_name + ' - gb restarted')\n self.update_processuptime()\n\n self.testcases.append(testcase)\n\n def get_testsuite(self):\n return TestSuite(self.testcase, test_cases=self.testcases, package='systemtest')\n\n def wait_processup(self):\n for spider_api in self.spider_apis:\n start_time = time.perf_counter()\n\n while True:\n response = spider_api.status()\n if response['response']['statusCode'] == 0 or response['response']['statusCode'] == 7:\n # SP_INITIALIZING / SP_INPROGRESS\n break\n\n # wait for a max of 60 seconds\n if time.perf_counter() - start_time > 60:\n print(response)\n break\n\n time.sleep(0.5)\n\n def validate_processuptime(self):\n return self.api.status_processstarttime() == self.gb_starttime\n\n def update_processuptime(self):\n self.gb_starttime = self.api.status_processstarttime()\n\n def dump(self, *args):\n start_time = time.perf_counter()\n self.api.dump()\n self.add_testcase('dump', '', start_time)\n\n def just_search(self, *args):\n test_type = 'just_search'\n print('Running test -', test_type)\n\n items = []\n if len(args):\n items.append(' '.join(args))\n else:\n filename = os.path.join(self.testcaseconfigdir, test_type)\n items = self.read_file(filename)\n\n for item in items:\n start_time = time.perf_counter()\n try:\n response = self.api.search(item)\n self.add_testcase(test_type, item, start_time)\n except:\n self.add_testcase(test_type, item, start_time, True)\n\n def verify_indexed(self, *args):\n test_type = 'verify_indexed'\n print('Running test -', test_type)\n\n items = []\n if len(args):\n items.append(' '.join(args))\n else:\n filename = os.path.join(self.testcaseconfigdir, test_type)\n items = self.read_file(filename)\n\n for item in items:\n start_time = time.perf_counter()\n try:\n response = self.api.search(item)\n\n failed = (not len(response['results']) != 0)\n if failed:\n print(test_type + ' - ' + item)\n print(response)\n\n self.add_testcase(test_type, item, start_time, failed)\n except:\n self.add_testcase(test_type, item, start_time, True)\n\n def verify_not_indexed(self, *args):\n test_type = 'verify_not_indexed'\n print('Running test -', test_type)\n\n items = []\n if len(args):\n items.append(' '.join(args))\n else:\n filename = os.path.join(self.testcaseconfigdir, test_type)\n items = self.read_file(filename)\n\n for item in items:\n start_time = time.perf_counter()\n try:\n response = self.api.search(item)\n\n failed = (not len(response['results']) == 0)\n if failed:\n print(test_type + ' - ' + item)\n print(response)\n\n self.add_testcase(test_type, item, start_time, failed)\n except:\n self.add_testcase(test_type, item, start_time, True)\n\n def verify_query_language(self, *args):\n test_type = 'verify_query_language'\n print('Running test -', test_type)\n\n items = []\n if len(args):\n items.append(' '.join(args))\n else:\n filename = os.path.join(self.testcaseconfigdir, test_type)\n items = self.read_file(filename)\n\n for item in items:\n start_time = time.perf_counter()\n\n tokens = item.split('|')\n if len(tokens) != 3:\n print('Invalid format ', item)\n self.add_testcase(test_type, query, start_time, True)\n return\n\n query = tokens[0]\n query_param = tokens[1]\n language = tokens[2]\n\n try:\n response = self.api.search(query, parse_qs(query_param))\n failed = (not response['queryInfo']['queryLanguageAbbr'] == language)\n\n if failed:\n print(test_type + ' - ' + query + ' - ' + query_param)\n print(response)\n\n self.add_testcase(test_type, query + ' - ' + query_param, start_time, failed)\n except:\n self.add_testcase(test_type, query + ' - ' + query_param, start_time, True)\n\n def verify_query_terms(self, *args):\n test_type = 'verify_query_terms'\n print('Running test -', test_type)\n\n items = []\n if len(args):\n items.append(' '.join(args))\n else:\n filename = os.path.join(self.testcaseconfigdir, test_type)\n items = self.read_file(filename)\n\n for item in items:\n start_time = time.perf_counter()\n\n tokens = item.split('|')\n\n query = tokens.pop(0)\n if len(tokens) == 0:\n print('Invalid format ', item)\n self.add_testcase(test_type, query, start_time, True)\n return\n\n query_param = tokens.pop(0)\n if len(tokens) == 0:\n print('Invalid format ', item)\n self.add_testcase(test_type, query, start_time, True)\n return\n\n num_terms = int(tokens.pop(0))\n if len(tokens) != num_terms:\n print('Invalid format ', item)\n self.add_testcase(test_type, query, start_time, True)\n return\n\n try:\n response = self.api.search(query, parse_qs(query_param))\n\n failed = (not response['queryInfo']['queryNumTermsTotal'] == num_terms)\n if not failed:\n for index, token in enumerate(tokens):\n term = response['queryInfo']['terms'][index]['termStr']\n\n if token != term:\n failed = True\n break\n\n if failed:\n print(test_type + ' - ' + item)\n print(response)\n\n self.add_testcase(test_type, query, start_time, failed)\n except:\n self.add_testcase(test_type, query, start_time, True)\n\n def verify_search_result_url(self, *args):\n test_type = 'verify_search_result_url'\n print('Running test -', test_type)\n\n items = []\n if len(args):\n items.append(' '.join(args))\n else:\n filename = os.path.join(self.testcaseconfigdir, test_type)\n items = self.read_file(filename)\n\n for item in items:\n start_time = time.perf_counter()\n\n tokens = item.split('|')\n\n query = self.format_url(tokens.pop(0))\n if len(tokens) == 0:\n print('Invalid format ', item)\n self.add_testcase(test_type, query, start_time, True)\n return\n\n query_param = tokens.pop(0)\n if len(tokens) == 0:\n print('Invalid format ', item)\n self.add_testcase(test_type, query, start_time, True)\n return\n\n num_results = int(tokens.pop(0))\n if len(tokens) != num_results:\n print('Invalid format ', item)\n self.add_testcase(test_type, query, start_time, True)\n return\n\n results = []\n for token in tokens:\n results.append(self.format_url(token))\n\n try:\n response = self.api.search(query, parse_qs(query_param))\n\n failed = (not len(response['results']) == num_results)\n if not failed:\n for index, result in enumerate(results):\n url = response['results'][index]['url']\n\n # gb doesn't return url with scheme when it's http\n if self.ws_scheme == 'http':\n url = 'http://' + url\n\n if result != url:\n failed = True\n break\n\n if failed:\n print(test_type + ' - ' + query + ' - ' + query_param)\n print(response)\n\n self.add_testcase(test_type, query, start_time, failed)\n except:\n self.add_testcase(test_type, query, start_time, True)\n\n def verify_search_result_titlesummary(self, *args):\n test_type = 'verify_search_result_titlesummary'\n print('Running test -', test_type)\n\n items = []\n if len(args):\n items.append(' '.join(args))\n else:\n filename = os.path.join(self.testcaseconfigdir, test_type)\n items = self.read_file(filename)\n\n for item in items:\n start_time = time.perf_counter()\n\n tokens = item.split('|')\n\n query = self.format_url(tokens.pop(0))\n if len(tokens) == 0:\n print('Invalid format ', item)\n self.add_testcase(test_type, query, start_time, True)\n return\n\n query_param = tokens.pop(0)\n if len(tokens) == 0:\n print('Invalid format ', item)\n self.add_testcase(test_type, query + ' - ' + query_param, start_time, True)\n return\n\n num_results = int(tokens.pop(0))\n if len(tokens) != num_results * 2:\n print('Invalid format ', item)\n self.add_testcase(test_type, query + ' - ' + query_param, start_time, True)\n return\n\n it = iter(tokens)\n results = zip(it, it)\n\n try:\n response = self.api.search(query, parse_qs(query_param))\n\n failed = (not len(response['results']) == num_results)\n if not failed:\n for index, (title, summary) in enumerate(results):\n r_title = response['results'][index]['title']\n r_summary = response['results'][index]['sum']\n\n if title != r_title or summary != r_summary:\n failed = True\n break\n\n if failed:\n print(test_type + ' - ' + query + ' - ' + query_param)\n print(response)\n\n self.add_testcase(test_type, query + ' - ' + query_param, start_time, failed)\n except Exception as e:\n print(e)\n self.add_testcase(test_type, query + ' - ' + query_param, start_time, True)\n\n def verify_spidered(self, *args):\n test_type = 'verify_spidered'\n print('Running test -', test_type)\n\n items = []\n if len(args):\n items.append(args[0])\n else:\n filename = os.path.join(self.testcaseconfigdir, test_type)\n items = self.read_file(filename)\n\n served_urls = self.webserver.get_served_urls()\n for item in items:\n start_time = time.perf_counter()\n try:\n url = self.format_url(item)\n failed = (url not in served_urls)\n\n if failed:\n print(test_type + ' - ' + url)\n\n self.add_testcase(test_type, item, start_time, failed)\n except:\n self.add_testcase(test_type, item, start_time, True)\n\n def verify_only_spidered(self, *args):\n test_type = 'verify_only_spidered'\n print('Running test -', test_type)\n\n items = []\n if len(args):\n items.append(args[0])\n else:\n filename = os.path.join(self.testcaseconfigdir, test_type)\n items = self.read_file(filename)\n\n if len(items):\n served_urls = self.webserver.get_served_urls()\n\n start_time = time.perf_counter()\n\n formated_items = []\n for item in items:\n formated_items.append(self.format_url(item))\n\n for url in formated_items:\n self.add_testcase(test_type, url, start_time, (url not in served_urls))\n\n for url in served_urls:\n if url not in formated_items:\n self.add_testcase(test_type, url, start_time, True)\n\n def verify_not_spidered(self, *args):\n test_type = 'verify_not_spidered'\n print('Running test -', test_type)\n\n items = []\n if len(args):\n items.append(args[0])\n else:\n filename = os.path.join(self.testcaseconfigdir, test_type)\n items = self.read_file(filename)\n\n served_urls = self.webserver.get_served_urls()\n for index, item in enumerate(items):\n start_time = time.perf_counter()\n try:\n url = self.format_url(item)\n failed = (url in served_urls)\n\n if failed:\n print(test_type + ' - ' + url)\n\n self.add_testcase(test_type, item, start_time, failed)\n except:\n self.add_testcase(test_type, item, start_time, True)\n\n def verify_spider_response(self, *args):\n test_type = 'verify_spider_response'\n print('Running test -', test_type)\n\n items = []\n if len(args):\n items.append(' '.join(args))\n else:\n filename = os.path.join(self.testcaseconfigdir, test_type)\n items = self.read_file(filename)\n\n for item in items:\n start_time = time.perf_counter()\n\n tokens = item.split('|')\n if len(tokens) != 2:\n print('Invalid format ', item)\n self.add_testcase(test_type, item, start_time, True)\n return\n\n url = self.format_url(tokens.pop(0))\n\n result = ast.literal_eval(tokens.pop(0))\n if type(result) is not dict:\n print('Invalid format ', item)\n self.add_testcase(test_type, item, start_time, True)\n return\n\n try:\n response = self.api.lookup_spiderdb(url)\n\n failed = ('spiderReply' not in response)\n if not failed:\n for key, value in result.items():\n if response['spiderReply'][key] != value:\n failed = True\n break\n\n if failed:\n print(test_type + ' - ' + url + ' - ' + str(result))\n print(response)\n\n self.add_testcase(test_type, url + ' - ' + str(result), start_time, failed)\n except Exception as e:\n print(e)\n self.add_testcase(test_type, url + ' - ' + str(result), start_time, True)\n\n\ndef main(testdir, testcase, gb_instances, gb_host, webserver, ws_scheme, ws_domain, ws_port):\n test_runner = TestRunner(testdir, testcase, gb_instances, gb_host, webserver, ws_scheme, ws_domain, ws_port)\n result = test_runner.run_test()\n print(TestSuite.to_xml_string([result]))\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('testcase', help='Test case to run')\n parser.add_argument('--testdir', dest='testdir', default='tests', action='store',\n help='Directory containing test cases')\n\n parser.add_argument('--offset', dest='gb_offset', type=int, default=0, action='store',\n help='Gigablast offset for running multiple gb at the same time (default: 0)')\n default_gbpath = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)),\n '../open-source-search-engine'))\n parser.add_argument('--path', dest='gb_path', default=default_gbpath, action='store',\n help='Directory containing gigablast binary (default: {})'.format(default_gbpath))\n parser.add_argument('--num-instances', dest=\"gb_num_instances\", type=int, default=1, action='store',\n help='Number of gigablast instances (default: 1)')\n parser.add_argument('--num-shards', dest=\"gb_num_shards\", type=int, default=1, action='store',\n help='Number of gigablast shards (default: 1)')\n parser.add_argument('--host', dest='gb_host', default='127.0.0.1', action='store',\n help='Gigablast host (default: 127.0.0.1)')\n parser.add_argument('--port', dest='gb_port', type=int, default=28000, action='store',\n help='Gigablast port (default: 28000')\n\n parser.add_argument('--dest-scheme', dest='ws_scheme', default='http', action='store',\n help='Destination host scheme (default: 127.0.0.1)')\n parser.add_argument('--dest-domain', dest='ws_domain', default='privacore.test', action='store',\n help='Destination host domain (default: privacore.test)')\n parser.add_argument('--dest-port', dest='ws_port', type=int, default=28080, action='store',\n help='Destination host port (default: 28080')\n\n pargs = parser.parse_args()\n\n from webserver import TestWebServer\n\n # start webserver\n test_webserver = TestWebServer(pargs.ws_port)\n\n gb_instances = GigablastInstances(pargs.gb_offset, pargs.gb_path, pargs.gb_num_instances, pargs.gb_num_shards, pargs.gb_port)\n main(pargs.testdir, pargs.testcase, gb_instances, pargs.gb_host, test_webserver, pargs.ws_scheme, pargs.ws_domain, pargs.ws_port)\n\n # stop webserver\n test_webserver.stop()\n","sub_path":"testrunner.py","file_name":"testrunner.py","file_ext":"py","file_size_in_byte":28673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"73766876","text":"#encoding: UTF-8\n\n# Autor: DiegoArmandoPerezGonzalez, A01374794\n# Descripcion: con respecto al peso y altura dado por los usuarios calcula tu imc\n\ndef iMC(pesoKg, alturaM):\n iMC = ((pesoKg)/(alturaM**2))\n if iMC < 18.5:\n return (\"Bajo peso\", iMC)\n elif iMC <= 25 or iMC >= 18.5:\n return (\"Peso Normal\", iMC)\n elif iMC >= 25:\n return (\"Sobrepeso\", iMC)\n\n\ndef main ():\n pesoKg = float(input(\"Ingresa tu peso en kg: \"))\n alturaM = float(input(\"Ingresa tu altura en M: \"))\n print(\"Tu peso es de: \", pesoKg, \"kg\")\n print(\"Tu altura es de: \", alturaM, \"m\")\n if pesoKg <= 0 or alturaM <= 0:\n print(\"Error\")\n #if alturaM <= 0:\n #print(\"Error\")\n print(iMC(pesoKg, alturaM))\n\nmain()","sub_path":"iMC.py","file_name":"iMC.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"497095237","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('data', '0019_alldata_auction'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='datastate_auction',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('state', models.IntegerField(default=0)),\n ('detail', models.TextField(null=True)),\n ('dataid', models.ForeignKey(to='data.alldata')),\n ('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),\n ],\n ),\n ]\n","sub_path":"data/migrations/0020_datastate_auction.py","file_name":"0020_datastate_auction.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"338706380","text":"from intersect import intersect\n\n\n__author__ = \"davidmurphy\"\n\n\ndata_dir = \"/Users/davidmurphy/GoogleDrive/linked_selection/data/\"\n\n\ndef gerp_exons(chromosome, anno):\n \"\"\"\n Intersect conserved elements from GERP++ with annotations from UCSC genes\n :param chromosome: human chromosome\n :param anno: annotation like exon, nonexonic, etc.\n :return: chrN_gerp_annoSegments.bed\n \"\"\"\n\n # load filenames strings\n gerp_file = \"{}hg19.GERP_elements/hg19_chr{}_elems.txt\".format(data_dir, chromosome)\n anno_file = \"{0}coords/chr{1}/chr{1}_{2}.bed\".format(data_dir, chromosome, anno)\n out_file = \"{0}coords/chr{1}/chr{1}_gerp_{2}.bed\".format(data_dir, chromosome, anno)\n\n # load data into lists of segment tuples\n with open(gerp_file, \"r\") as f:\n d = [x.strip().split(\"\\t\") for x in f]\n gerp_segments = [(int(x[0]), int(x[1])) for x in d]\n\n with open(anno_file, \"r\") as f:\n d = [x.strip().split(\"\\t\") for x in f if not x.startswith(\"#\")]\n anno_segments = [(int(x[1]), int(x[2])) for x in d]\n\n # create an intersect generator from the two segment sets\n intersection = intersect(gerp_segments, anno_segments)\n\n # write the intersection to a new bed file\n with open(out_file, \"w\") as o:\n o.write(\"# GERP++ conserved elements intersected with {} segments\\n\".format(anno))\n o.write(\"\\n\".join(\"chr{}\\t{}\\t{}\".format(chromosome, start, end) for start, end in intersection) + \"\\n\")\n\n return None\n\n\n# debug\nfor c in xrange(1, 23):\n gerp_exons(c, \"knownGene_merged_nonexonicSegments\")\n\n","sub_path":"GoogleDrive/linked_selection/myprograms/cluster_code/selected_features/gerp_exons.py","file_name":"gerp_exons.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"63883308","text":"from p2p import bondora, mintos, viainvest\nimport configparser\n\nconfig = configparser.ConfigParser()\n\n\ndef setup():\n print(\"======================================\")\n print(\"\\t\\tCONFIG\")\n print(\"1) Bondora\")\n print(\"2) Mintos\")\n print(\"Please select one\")\n choice = int(input())\n if (choice == 1):\n config.add_section('BONDORA')\n print(\"Input mail \")\n config.set('BONDORA', 'MAIL', input())\n print(\"Input password \")\n config.set('BONDORA', 'PW', input())\n if (choice == 2):\n config.add_section('MINTOS')\n print(\"Input mail \")\n config.set('MINTOS', 'MAIL', input())\n print(\"Input password \")\n config.set('MINTOS', 'PW', input())\n\n with open('conf.ini', 'w') as configfile: # save\n config.write(configfile)\n\n\nif (not config.read('conf.ini')):\n setup()\nprint(\"Do you want to set up? (y/n)\")\nchoice = input()\nif (choice == 'y'):\n setup()\n\nmint = None\nbond = None\nvia = None\n\n'''\nif 'MINTOS' in config:\n mintos_mail = config['MINTOS']['MAIL']\n mintos_pw = config['MINTOS']['PW']\n mint = mintos.Mintos(mintos_mail, mintos_pw)\n if mint.s is None:\n mint = None\n'''\n\nif 'BONDORA' in config:\n bondora_mail = config['BONDORA']['MAIL']\n bondora_pw = config['BONDORA']['PW']\n bond = bondora.Bondora(bondora_mail, bondora_pw)\n if bond.s is None:\n bond = None\n\nif 'VIAINVEST' in config:\n via = viainvest.Viainvest(config['VIAINVEST']['MAIL'], config['VIAINVEST']['PW'])\n if bond.s is None:\n bond = None\n\nif bond is not None:\n print('===================================================')\n print('\\t\\t\\tBONDORA')\n print('{:<30} {:<8.2f}'.format('Current Value', bond.CurrentValue))\n print('{:<30} {:<8.2f}'.format('Available Funds', bond.AvailableFunds))\n print('{:<30} {:<8.2f}'.format('Lifetime Net Profit', bond.LifetimeNetProfit))\n print('{:<30} {:<8.2f}'.format('Lifetime Portfolio Value', bond.LifetimePortfolioValue))\n print('{:<30} {:<8.2f}%'.format('Net Annual Return', bond.YieldToMaturity))\n print('{:<30} {:<8.2f}%'.format('Overdue Rate', bond.OverdueRate))\n\nif via is not None:\n print('===================================================')\n print('\\t\\t\\tVIAINVEST')\n print('{:<30} {:<8.2f}'.format('Current Value', via.CurrentValue))\n print('{:<30} {:<8.2f}'.format('Available Funds', via.AvailableFunds))\n \"\"\"\n print('{:<30} {:<8.2f}'.format('Available Funds', bond.AvailableFunds))\n print('{:<30} {:<8.2f}'.format('Lifetime Net Profit', bond.LifetimeNetProfit))\n print('{:<30} {:<8.2f}'.format('Lifetime Portfolio Value', bond.LifetimePortfolioValue))\n print('{:<30} {:<8.2f}%'.format('Net Annual Return', bond.YieldToMaturity))\n print('{:<30} {:<8.2f}%'.format('Overdue Rate', bond.OverdueRate))\n \"\"\"\n","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"170159925","text":"n = int(input())\narr = ''.join(input().split())\n \nmid = n//2 - 1\nres = ''\n \nfor i in range(mid, -1, -1):\n if arr[i] == arr[n-1-i]:\n continue\n if arr[i] > arr[n-1-i]:\n res += arr[:i+1] + arr[i+1:mid+1]\n break\n else:\n res += arr[:i] + str(int(arr[i]) + 1) + arr[i+1:mid+1]\n break\n \nif res == '':\n res = arr[:mid]\n res += str(int(arr[mid])+1)\nif n%2 == 0:\n res += res[::-1]\nelse:\n res += arr[mid+1]\n res += res[-2::-1]\nprint(res)\n","sub_path":"polash smallest possible palindrome.py","file_name":"polash smallest possible palindrome.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"596779057","text":"ans = list('BFTLC')\ninp = input()\nfull = True;\nfor x in range(0, len(ans)):\n\tif ans[x] not in inp:\n\t\tprint(ans[x])\n\t\tfull = False\n\nif full:\n\tprint(\"NO MISSING PARTS\")\n","sub_path":"Python/Ship.py","file_name":"Ship.py","file_ext":"py","file_size_in_byte":167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"224379365","text":"\"\"\"\nFile: randomwalk.py\n\nA turtle takes a random walk.\n\"\"\"\n\nimport random\nfrom turtle import Turtle\n\n\ndef randomWalk(t, turns, distance=20):\n for x in range(turns):\n if x % 2 == 0:\n t.left(random.randint(0, 270))\n else:\n t.right(random.randint(0, 270))\n t.forward(distance)\n\n\ndef main():\n t = Turtle()\n t.shape(\"turtle\")\n randomWalk(t, 40, 30)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Ch_07_Student_Files/randomwalk.py","file_name":"randomwalk.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"622659802","text":"\"\"\"Day 11: 2D arrays\"\"\"\n\n\ndef read_input():\n \"\"\"Returns a nested list from input in the following form:\n [ [x,y,z], [x,y,z], [x,y,z], [x,y,z], [x,y,z], [x,y,z] ]\"\"\"\n return [[int(x) for x in input().split()] for _ in range(6)]\n\n\ndef list_hourglasses(array):\n \"\"\"Receives a 6 × 6 2D array and returns all 16 hourglasses contained in\n it.\"\"\"\n hourglasses = []\n for index_x in range(4):\n for index_y in range(4):\n hourglass = []\n hourglass.extend([\n array[index_x][index_y], array[index_x][index_y + 1],\n array[index_x][index_y + 2], array[index_x + 1][index_y + 1],\n array[index_x + 2][index_y], array[index_x + 2][index_y + 1],\n array[index_x + 2][index_y + 2]\n ])\n hourglasses.append(hourglass)\n return hourglasses\n\n\ndef max_sum(array):\n \"\"\"Returns hourglass with the greatest sum.\"\"\"\n sum_list = []\n for hourglass in array:\n hour_sum = sum(hourglass)\n sum_list.append(hour_sum)\n return max(sum_list)\n\n\nif __name__ == '__main__':\n ARRAY = read_input()\n HOURGLASSES = list_hourglasses(ARRAY)\n GREATEST_SUM = max_sum(HOURGLASSES)\n print(GREATEST_SUM)\n","sub_path":"websites_programming_challenges/hackerrank/30-days-of-code/day11.py","file_name":"day11.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"276193954","text":"#!/usr/bin/python\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport netCDF4\nimport sys\nimport time\nfrom datetime import datetime\n\nsite = sys.argv[1]\nclimurl = sys.argv[2]\nobsurl = sys.argv[3]\nmodurl = sys.argv[4]\nlati = float(sys.argv[5])\nloni = float(sys.argv[6])\nyeari = float(sys.argv[7])\n\nmodel = modurl.rsplit('/')[-3]\nobs = obsurl.rsplit('/')[-3]\n\n\n'''\nsite = 'AltaFloresta'\nobsurl = '/lustre/storeB/project/aerocom/aerocom1/AEROCOM_OBSDATA/Export/AERONETSun2.0/od550aer_daily_AltaFloresta.txt'\nmodurl = '/lustre/storeB/project/aerocom/aerocom1/ECMWF_OSUITE_NRT.old/renamed/aerocom.ECMWF_OSUITE_NRT.daily.alertaer.2015.nc'\nlati = 68.35\nloni = 18.817\nyeari = 2014\n'''\n\n\n#clim reading (NetCDF)\nvarclim='od550aerclim'\nnc = netCDF4.Dataset(climurl)\nnc.variables\nlat = nc.variables['lat'][:]\nlon = nc.variables['lon'][:]\ntimes = nc.variables['time']\n#conversion 0|360 to -180|180\nfor il,l in enumerate(lon):\n if l>180:\n lon[il]=lon[il]-360\n\n\n# function to find index to nearest point\ndef near(array,value):\n idx=(np.abs(array-value)).argmin()\n return idx\n\n# find nearest point to desired location\nix = near(lon, loni)\niy = near(lat, lati)\n\n# get all time records of variable [vname] at indices [iy,ix]\n#var = nc.variables[varalert]\n#modalert = var[:,iy,ix]\nvar = nc.variables[varclim]\naodclim = var[:,iy,ix]\n\n#observation reading\n#model reading (NetCDF)\nvarod550='od550aer'\nnc = netCDF4.Dataset(obsurl)\nnc.variables\nlat = nc.variables['lat'][:]\nlon = nc.variables['lon'][:]\n#times = nc.variables['time']\n#conversion 0|360 to -180|180\nfor il,l in enumerate(lon):\n if l>180:\n lon[il]=lon[il]-360\n\n\n# find nearest point to desired location\nix = near(lon, loni)\niy = near(lat, lati)\n\n# get all time records of variable [vname] at indices [iy,ix]\nvar = nc.variables[varod550]\nobsaod = var[:,iy,ix]\nfrom datetime import date\n#index \nidx=np.arange(1,len(obsaod)+1)\nobsm = []\n#TO DO: CHECK FOR LEAP YEARS\nfor i in idx:\n obsm.append(date.fromordinal(i).month)\n\nif len(obsaod)>0:\n #observation alert process\n obsalert = [0] * len(aodclim)\n jd=[]\n for i in range(len(aodclim)):\t\n if obsaod[i]>0.5 and obsaod[i]/aodclim[i]>2:\n obsalert[i]=1\n if obsaod[i]>0.5 and obsaod[i]/aodclim[i]>3:\n obsalert[i]=2\n if obsaod[i]>0.5 and obsaod[i]/aodclim[i]>5:\n obsalert[i]=3\n jd.append(i+1)\n\n #not nan\n notnan=[]\n for i in range(len(aodclim)):\n if not np.isnan(obsaod[i]):\n notnan.append(i)\n\n\n #model reading (NetCDF)\n varod550='od550aer'\n nc = netCDF4.Dataset(modurl)\n nc.variables\n lat = nc.variables['lat'][:]\n lon = nc.variables['lon'][:]\n #times = nc.variables['time']\n\n # find nearest point to desired location\n ix = near(lon, loni)\n iy = near(lat, lati)\n\n # get all time records of variable [vname] at indices [iy,ix]\n var = nc.variables[varod550]\n modaod = var[:,iy,ix]\n\n #model alert process\n modalert = [0] * len(aodclim)\n jd=[]\n for i in range(len(aodclim)):\t\n if modaod[i]>0.5 and modaod[i]/aodclim[i]>2:\n modalert[i]=1\n if modaod[i]>0.5 and modaod[i]/aodclim[i]>3:\n modalert[i]=2\n if modaod[i]>0.5 and modaod[i]/aodclim[i]>5:\n modalert[i]=3\n jd.append(i+1)\n\n\n for i in notnan:\n print(jd[i],obsm[i],modalert[i],aodclim[i],obsaod[i],obsalert[i])\n\n\n def stat(obsalert, modalert, ilist):\n #statistics S[AlertObservation][AlertModel]\n S00, S01, S02, S03 = 0, 0, 0, 0\n S10, S11, S12, S13 = 0, 0, 0, 0\n S20, S21, S22, S23 = 0, 0, 0, 0\n S30, S31, S32, S33 = 0, 0, 0, 0\n\n for i in ilist:\t\n if obsalert[i]==0 and modalert[i]==0:\n S00=S00+1\n if obsalert[i]==0 and modalert[i]==1:\n S01=S01+1\n if obsalert[i]==0 and modalert[i]==2:\n S02=S02+1\n if obsalert[i]==0 and modalert[i]==3:\n S03=S03+1\n if obsalert[i]==1 and modalert[i]==0:\n S10=S10+1\n if obsalert[i]==1 and modalert[i]==1:\n S11=S11+1\n if obsalert[i]==1 and modalert[i]==2:\n S12=S12+1\n if obsalert[i]==1 and modalert[i]==3:\n S13=S13+1\n if obsalert[i]==2 and modalert[i]==0:\n S20=S20+1\n if obsalert[i]==2 and modalert[i]==1:\n S21=S21+1\n if obsalert[i]==2 and modalert[i]==2:\n S22=S22+1\n if obsalert[i]==2 and modalert[i]==3:\n S23=S23+1\n if obsalert[i]==3 and modalert[i]==0:\n S30=S30+1\n if obsalert[i]==3 and modalert[i]==1:\n S31=S31+1\n if obsalert[i]==3 and modalert[i]==2:\n S32=S32+1\n if obsalert[i]==3 and modalert[i]==3:\n S33=S33+1\n \n return(S00,S01,S02,S03,S10,S11,S12,S13,S20,S21,S22,S23,S30,S31,S32,S33)\n\n\n periods=['JFM','AMJ','JAS','OND','Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']\n\n #write in ascii file\n import os\n directory='out/' + model + '-' + obs + '/'\n if not os.path.exists(directory):\n os.makedirs(directory)\n f = open(directory + site + '-alert-'+str(int(yeari))+'.txt', 'w')\n\n #header\n f.write('site:'+'\\t'+site+'\\n')\n f.write('lat:'+'\\t'+str(lati)+'\\n')\n f.write('long:'+'\\t'+str(loni)+'\\n')\n\n towrt=['tim','tot','S00','S01','S02','S03','S10','S11','S12','S13','S20','S21','S22','S23','S30','S31','S32','S33','HSS']\n for i in range(len(towrt)):\n f.write(str(towrt[i])+'\\t')\n f.write('\\n')\n\n #stat per season\n for s in range(len(periods)):\n iperiod=[]\n print(periods[s])\n if s==0:\n mis=[1,2,3]\n if s==1:\n mis=[4,5,6]\n if s==2:\n mis=[7,8,9]\n if s==3:\n mis=[10,11,12]\n if s==4:\n mis=[1]\n if s==5:\n mis=[2]\n if s==6:\n mis=[3]\n if s==7:\n mis=[4]\n if s==8:\n mis=[5]\n if s==9:\n mis=[6]\n if s==10:\n mis=[7]\n if s==11:\n mis=[8]\n if s==12:\n mis=[9]\n if s==13:\n mis=[10]\n if s==14:\n mis=[11]\n if s==15:\n mis=[12]\n\n for i in notnan:\n if obsm[i] in mis:\n iperiod.append(i)\n \n if len(iperiod)>0:\n [S00,S01,S02,S03,S10,S11,S12,S13,S20,S21,S22,S23,S30,S31,S32,S33]=stat(obsalert,modalert,iperiod)\n else:\n [S00,S01,S02,S03,S10,S11,S12,S13,S20,S21,S22,S23,S30,S31,S32,S33]=[np.nan]*16\n print(periods[s],len(iperiod),S00,S01,S02,S03,S10,S11,S12,S13,S20,S21,S22,S23,S30,S31,S32,S33)\n \n #calculation of Heidke Skill Score\n a = S11 + S22 + S33 + S12 + S13 + S21 + S23 + S31 +S32\n b = S10 + S20 + S30\n c = S01 + S02 + S03\n d = S00\n\n\n #makes sense if measurements\n if len(iperiod)>0:\n num = a*d - b*c\n denum = (a+c)*(c+d) + (a+b)*(b+d)\n if denum != 0:\n HSS = 2 * (a*d - b*c) / ( (a+c)*(c+d) + (a+b)*(b+d) )\n else:\n HSS = np.nan\n else:\n HSS=np.nan\n\n towrt=[periods[s],len(iperiod),S00,S01,S02,S03,S10,S11,S12,S13,S20,S21,S22,S23,S30,S31,S32,S33,HSS]\n \n #stats writing\n for i in range(len(towrt)):\n f.write(str(towrt[i])+'\\t')\n f.write('\\n')\n #f.write('\\t'+curst_stat+'\\n')\n\n f.close()\n\n","sub_path":"alert-modmod.py","file_name":"alert-modmod.py","file_ext":"py","file_size_in_byte":7624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"152135131","text":"# Using NetworkX package and conllu package\r\n# This is baseline conditions module; more conditions can be added as functions\r\nimport os\r\nfrom io import open\r\nimport networkx as nx\r\nfrom operator import itemgetter\r\nimport random\r\nfrom Measures_rand import *\r\nfrom Measures import *\r\nimport treegen as gen\r\nimport depgraph as dep\r\n\r\nclass Random_base(object):\r\n def __init__(self, tree): # tree has an abstract node=0 and real nodes =1,2,... \r\n self.tree=tree # tree encodes the nodes and edges content in dictionary format. It uses directed graph (DiGraph) feature of networkX package. For example, nodes are encoded like this - tree.nodes={1:{form:'this',POS:'PRN'},2:{...}} \r\n self.ls_rand=[]\r\n\r\n## def crossings_in(self,tree):\r\n## for edge in tree.edges():\r\n## n1, n2 = sorted(edge)\r\n## for edge_ in tree.edges():\r\n## n1_, n2_ = sorted(edge_)\r\n## if not (n2_ <= n1 or n2 <= n1_ or (n1 <= n1_ and n2_ <= n2) or (n1_ <= n1 and n2 <= n2_)):\r\n## yield frozenset({edge, edge_})\r\n\r\n def num_cross_rand(self,randtree,abs_root): # requires random tree and its abstract root=10000\r\n comput=Compute_measures_rand(randtree,abs_root)\r\n ncross_random=0\r\n for edgex in randtree.edges:\r\n if not edgex[0]==abs_root: \r\n if comput.is_projective(edgex): # checks if edge is projective or not\r\n ncross_random += 0\r\n else:\r\n ncross_random += 1\r\n return ncross_random # returns number of crossings in the random tree \r\n\r\n def is_equal_num_crossings(self,randtree,abs_root,num_cross_real): # requires random tree, its abstract root and numCrossings in real tree \r\n flag=False\r\n num_cross_random=self.num_cross_rand(randtree,abs_root) \r\n if num_cross_random==num_cross_real: # checks if number of crossings are equal in real and random tree\r\n flag=True\r\n return flag\r\n\r\n def is_same_tree(self,randtree,abs_root):\r\n rand_tree=nx.DiGraph()\r\n for edgex in randtree.edges:\r\n if not edgex[0]==abs_root:\r\n rand_tree.add_edge(edgex[0],edgex[1]) # regenrates a dummy random tree by removing its abstract root\r\n\r\n real_tree = nx.DiGraph()\r\n for edgez in self.tree.edges:\r\n if not edgez[0]==0:\r\n real_tree.add_edge(edgez[0],edgez[1]) # regenrates a dummy real tree by removing its abstract root\r\n mapping_real=dict(zip(real_tree.nodes(),range(1,len(real_tree.nodes)+1)))\r\n mapping_rand=dict(zip(rand_tree.nodes(),range(1,len(rand_tree.nodes)+1)))\r\n REC_real=nx.relabel_nodes(real_tree,mapping_real)\r\n REC_rand=nx.relabel_nodes(rand_tree,mapping_rand)\r\n return REC_real.edges==REC_rand.edges\r\n\r\n\r\n def is_similar_DD_distribution(self,randtree,abs_root):\r\n find=Compute_measures_rand(randtree,abs_root)\r\n rand_tree=nx.DiGraph()\r\n for edgex in randtree.edges:\r\n if not edgex[0]==abs_root:\r\n rand_tree.add_edge(edgex[0],edgex[1]) # regenrates a dummy random tree by removing its abstract root\r\n \r\n random_dd_sample=[]\r\n for edgey in rand_tree.edges:\r\n random_dd_sample.append(find.dependency_distance(edgey))\r\n random_dd_sample.sort()\r\n \r\n get=Compute_measures(self.tree)\r\n real_tree = nx.DiGraph()\r\n for edgez in self.tree.edges:\r\n if not edgez[0]==0:\r\n real_tree.add_edge(edgez[0],edgez[1]) # regenrates a dummy real tree by removing its abstract root\r\n real_dd_sample=[]\r\n for edgev in real_tree.edges:\r\n real_dd_sample.append(get.dependency_distance(edgev))\r\n real_dd_sample.sort()\r\n return random_dd_sample==real_dd_sample\r\n \r\n def rand_tree(self,num_cross_real): # requires number of crossings from the real tree\r\n real_tree = nx.DiGraph()\r\n for edgez in self.tree.edges:\r\n if not edgez[0]==0:\r\n real_tree.add_edge(edgez[0],edgez[1]) # regenrates a dummy real tree by removing its abstract root\r\n edge_list=list(real_tree.edges())\r\n node_list=list(real_tree.nodes()) \r\n random.shuffle(edge_list)\r\n random.shuffle(node_list) # shuffles the ordering of edges of the dummy real tree \r\n\r\n treex=nx.DiGraph() # generates an empty random tree \r\n treex.add_nodes_from(node_list)\r\n\r\n for nodex in treex.nodes:\r\n if self.tree.has_node(self.tree.nodes[nodex]['head']): # to handle disjoint trees\r\n if not self.tree.nodes[nodex]['head']==0:\r\n treex.add_edge(self.tree.nodes[nodex]['head'],nodex) # adds edges as relation between nodes\r\n \r\n mapping=dict(zip(treex.nodes(),range(1,len(treex.nodes)+1)))\r\n treey=nx.relabel_nodes(treex,mapping) \r\n\r\n abstract_root=1000\r\n real_root=next(nx.topological_sort(treey))\r\n treey.add_edge(abstract_root,real_root) # adds an abstract root to the random tree \r\n for edgex in treey.edges:\r\n treey.nodes[edgex[1]]['head']=edgex[0]\r\n\r\n if self.is_equal_num_crossings(treey,abstract_root,num_cross_real): # matches the no. of crossings in the real and random tree\r\n if not self.is_same_tree(treey,abstract_root):\r\n self.ls_rand.append(treey) # adds the random tree to the list\r\n\r\n def gen_random(self,num_cross_real): # requires numbr of crossings from the real tree\r\n n = len(self.tree.edges)\r\n rand_out=[]\r\n if n<30:\r\n x=0\r\n while len(self.ls_rand)==0 and x<60000: # checks if list of random trees is ampty and limits the generating attempts \r\n x=x+1\r\n self.rand_tree(num_cross_real)\r\n rand_out=self.ls_rand\r\n return rand_out # returns the list of random trees\r\n","sub_path":"baseline_conditions_Order.py","file_name":"baseline_conditions_Order.py","file_ext":"py","file_size_in_byte":6280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"562137854","text":"import sys, pyperclip\n\ndef add_prefix(text, prefix):\n \n lines = text.split(\"\\n\")\n\n for i in range(len(lines)):\n lines[i] = prefix + lines[i]\n\n text = '\\n'.join(lines)\n\n return text\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print('usage: python add_prefix - add prefix to the text in clipboard')\n sys.exit(1)\n\n prefix = sys.argv[1]\n text = pyperclip.paste()\n print(\"copied text to clipboard:\\n{}\\n{}\\n\".format('-'*20,text))\n text = add_prefix(text, prefix)\n print(\"copied text from clipboard:\\n{}\\n{}\\n\".format('-'*20,text))\n","sub_path":"python/exam/add_prefix.py","file_name":"add_prefix.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"389176785","text":"from django.http import HttpResponse, Http404\nfrom django.views.generic import ListView, DetailView\nfrom django.shortcuts import render, redirect, get_object_or_404\n\nfrom .models import Movie\n\nclass MovieUpcomingListView(ListView):\n\ttemplate_name = \"home-page.html\"\n\n\tdef get_queryset(self, *args, **kwargs):\n \t\trequest = self.request\n \t\treturn Product.objects.upcoming()\n\nclass MovieUpcomingDetailView(DetailView):\n\ttemplate_name = \"upcoming-detail.html\"\n\n\tdef get_queryset(self, *args, **kwargs):\n \t\trequest = self.request\n \t\treturn Product.objects.upcoming()\n\nclass MovieDetailSlugView(DetailView):\n\tqueryset = Movie.objects.all()\n\ttemplate_name = \"movie-detail.html\"\n\n\tdef get_object(self, *args, **kwargs):\n\t\trequest = self.request\n\t\tslug = self.kwargs.get('slug')\n\t\ttry:\n\t\t\tinstance = Movie.objects.get(slug=slug)\n\t\texcept Movie.DoesNotExist:\n\t\t\traise Http404(\"Not Found\")\n\t\texcept Movie.MultipleObjectsReturned:\n\t\t\tqs = Movie.objects.filter(slug=slug)\n\t\t\tinstance = qs.first()\n\t\texcept:\n\t\t\traise Http404(\"Technical Error.\")\n\t\treturn instance\n\ndef movie_list_view(request):\n\tqueryset = Movie.objects.all()\n\tcontext = {\n\t\t\"object_list\": queryset\n\t}\n\tprint(queryset)\n\n\treturn render(request, \"home-page.html\", context)\n\ndef movie_detail_view(request, pk=None, *args, **kwargs):\n\tinstance = Movie.objects.get_by_id(pk)\n\tif instance is None:\n\t\traise Http404(\"Movie does not exist\")\n\tcontext = {\n\t\t\"object\": instance\n\t}\n\treturn render(request, \"movie-detail.html\", context)","sub_path":"movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"326287295","text":"\"\"\"\n2.\tПосчитать четные и нечетные цифры введенного натурального числа.\nНапример, если введено число 34560, то у него 3 четные цифры\n(4, 6 и 0) и 2 нечетные (3 и 5).\n\"\"\"\n\n# Отсутствует обработка для случая, если пользователь ввел не число\n\ndef cycle():\n while True:\n a = str(input('Введите число: '))\n even = 0\n odd = 0\n for i in a:\n if int(i) % 2 == 0:\n even += int(i)\n else:\n odd += int(i)\n print('Сумма четных:', even)\n print('Сумма нечетных:', odd)\n\n\n# Понимание пришло не сразу...(\ndef recursion(digit, even, odd):\n check = digit % 10\n if digit == 0:\n print('Четное:', even)\n print('Нечетное', odd)\n digit = int(input('Введите число: '))\n recursion(digit, even = 0, odd = 0)\n elif check % 2 == 0:\n even += check\n recursion(digit // 10, even, odd)\n else:\n odd += check\n recursion(digit // 10, even, odd)\n\nif __name__ == '__main__':\n # cycle()\n dig = int(input('Введите число: '))\n even = 0\n odd = 0\n recursion(dig, even, odd)","sub_path":"Lesson_2/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"69378310","text":"# Write a class to hold player information, e.g. what room they are in\n# currently.\nfrom item import LightSource\n\nclass Player:\n def __init__(self, name, currentRoom):\n self.name = name\n self.currentRoom = currentRoom\n self.inventory = []\n self.score = 0\n self.hasLight = False\n def travel(self, direction):\n nextRoom = self.currentRoom.getRoomInDirection(direction)\n if nextRoom is not None:\n self.currentRoom = nextRoom\n if self.hasLight == True or self.currentRoom.hasLight == True:\n print(nextRoom)\n else:\n print(f\"\\n\\n{self.currentRoom.name}\\n\\n {self.currentRoom.description}\\n\\n It's pitch black!\\n\")\n else:\n print(\"You cannot move in that direction.\")\n def look(self, direction=None):\n if direction is None:\n print(self.currentRoom)\n else:\n nextRoom = self.currentRoom.getRoomInDirection(direction)\n if nextRoom is not None:\n print(nextRoom)\n else:\n print(\"There is nothing there.\")\n def takeItem(self, item):\n if self.hasLight == False or self.currentRoom.hasLight == False:\n print(\"Good luck finding that in the dark!\")\n else:\n self.inventory.append(item)\n self.score += item.on_take()\n if isinstance(item, LightSource):\n self.hasLight = True\n def dropItem(self, item):\n for i in self.inventory:\n if i.name == item:\n if isinstance(i, LightSource):\n i.on_drop()\n self.hasLight = False\n self.inventory.remove(i)\n return i\n def checkInventory(self):\n print(\" Inventory:\")\n for i in self.inventory:\n print(f\" {i.name}: {i.description}\")\n def checkScore(self):\n print(f\"Score: {self.score}\")\n def addScore(self, value):\n self.score += value","sub_path":"src/days-2-4-adv/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"164025903","text":"# Making a df file\n# Version 3\n# Vikram Anantha\n# Feb 6 2021\n\"\"\"\n\nID | Timestamp | Name | Email | Pemail | Grade | Town | State | Found out about HELM | Class | Took_class_3 | Took_class_4 | Took_class_5 ... | Probs_of_taking_class\n22 | 2020-09-07 | Tom | tom@gmail.com | tomsmom@gmail.com | 6 | Lex | MA | Whatsapp Group | [5] | 0 | 1 | 0 ... | 1 or 0\n\n\n\"\"\"\n\nimport pandas\nfrom helper_functions import *\n\ndef main():\n cnx = create_connection()\n cursor = cnx.cursor(buffered=True)\n\n master = {\n 'id': [],\n 'student_id': [],\n 'timestamp': [],\n 'name': [],\n 'email': [],\n 'pemail': [],\n 'grade': [],\n 'town': [],\n 'state': [],\n 'heard_about': [],\n 'took_classes': [],\n 'class': [],\n 'probs': [],\n }\n numbers = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0']\n\n sql0 = 'SELECT id FROM classes'\n cursor.execute(sql0)\n cid = cursor.fetchall()\n for l in range(3, cid[-1][0]+1):\n master[\"took_class_%s\" % l] = []\n\n sql = 'SELECT id, Timestamp, Student_Name, Email_Address, Parent_Email, Grade, City, State, Heard_about_us FROM students ORDER BY Timestamp'\n sql2 = 'SELECT class_id FROM classes_to_students WHERE student_id = \"{}\" ORDER BY timestamp'\n cursor.execute(sql)\n students = cursor.fetchall()\n count = 1\n student = 1\n columns = ['student_id', 'timestamp', 'name', 'email', 'pemail', 'grade', 'town', 'state', 'heard_about','took_classes', 'class', 'probs', ]\n for i in range(3, cid[-1][0]+1):\n columns.append('took_class_%s' % i)\n for i in students:\n cursor.execute(sql2.format(i[0]))\n theclasses = cursor.fetchall()\n classes = []\n for l in theclasses:\n #print(\"%s %s\" % (i[2], j[0]))\n if l[0] not in classes:\n classes.append(l[0])\n\n tag1 = []\n for k in classes:\n cursor.execute('SELECT tag_id FROM classes_to_tags WHERE class_id = \"{}\"'.format(k))\n tags1 = cursor.fetchall()\n \n \n for taqwerg in tags1:\n tag1.append(taqwerg[0])\n for j in range(3, cid[-1][0]+1): \n for k in range(3, cid[-1][0]+1):\n hasclass = 0\n if k in classes and k != j:\n hasclass = 1\n master['took_class_%s' % k].append(hasclass)\n\n sb = str(i[5])\n gradenum = ''\n for ily in sb:\n if ily == '.':\n break\n if (ily in numbers):\n gradenum += ily\n if gradenum == '':\n gradenum = '0'\n gradenum = int(gradenum)\n\n master['id'].append(count)\n count+= 1\n # print(str(type(i)) + \" \" + str(i))\n master['student_id'].append(i[0])\n master['timestamp'].append(i[1])\n master['name'].append(i[2])\n master['email'].append(i[3])\n master['pemail'].append(i[4])\n master['grade'].append(gradenum)\n master['town'].append(i[6])\n master['state'].append(i[7])\n master['heard_about'].append(i[8])\n master['took_classes'].append(classes)\n master['class'].append(j)\n\n prob = 0\n cursor.execute('SELECT tag_id FROM classes_to_tags WHERE class_id = \"{}\"'.format(j))\n tags2 = cursor.fetchall()\n tag2 = list(np.reshape(tags2, (1, len(tags2)))[0])\n\n # for taqwerg in tags2:\n # tag2.append(taqwerg[0])\n \n # print(tag1)\n # print(tag1)\n numtags = 0\n # for k in tag2:\n # if k in tag1:\n # numtags+=1\n numtags = len(set(tag1) & set(tag2))\n # cursor.execute('SELECT tag FROM tags WHERE id=\"{}\"'.format(k))\n # ifjnsk = cursor.fetchall()[0][0]\n # print(\">>> %s\" % ifjnsk)\n # input(\"Class: %s\" % j)\n if numtags >= 1:\n probs = 2\n if numtags >= 2:\n probs = 5\n if numtags >= 3:\n probs = 7\n if numtags >= 4:\n probs = 8\n if j in classes:\n probs = 10\n # print(probs)\n master['probs'].append(probs)\n\n if (student % 5 == 0):\n print(\"Student %s\" % student)\n if student % 20 == 0:\n print(\"Updating Dataframe...\", end='')\n df = pd.DataFrame(master, columns = columns)\n df.to_csv ('students_v3.csv', index = False, header=True)\n print(\" Updated!\")\n student += 1 \n\n\n df = pd.DataFrame(master, columns = columns)\n print(df)\n df.to_csv ('students_v3.csv', index = False, header=True)\n\nif __name__ == '__main__':\n main()","sub_path":"Science Fair/make_df_v3.py","file_name":"make_df_v3.py","file_ext":"py","file_size_in_byte":4994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"58480252","text":"import threading\nimport argparse\nimport time\nimport amqp\n\n\ndef worker(ch, requests):\n for _ in range(requests):\n ch.basic_publish(amqp.Message('hello worlds'), 'test_exchange', 'queue1')\n\n\ndef first(pool_size, requests):\n conn = amqp.Connection()\n conn.connect()\n ch = conn.channel()\n for _ in range(pool_size):\n t = threading.Thread(target=worker, args=[ch, requests])\n t.start()\n t.join()\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('-workers', type=int, default=1)\n parser.add_argument('-msgs', type=int, default=1000)\n args = parser.parse_args()\n print(args.workers, args.msgs)\n t = time.time()\n first(args.workers, args.msgs)\n print('time:', time.time() - t)\n time.sleep(5)\n # conn = amqp.Connection()\n # conn.connect()\n # ch = conn.channel()\n # q = ch.queue_purge('test_queue')\n # print('purged:', q)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"448486151","text":"import json\nimport pandas as pd\n\nfrom .. import Client as VanillaClient\nfrom ..constants import DEFAULT_DECISION_TREE_VERSION\nfrom ..errors import CraftAiBadRequestError\nfrom .interpreter import Interpreter\nfrom .utils import format_input, is_valid_property_value, create_timezone_df\n\n\ndef chunker(to_be_chunked_df, chunk_size):\n return (\n to_be_chunked_df[pos : pos + chunk_size]\n for pos in range(0, len(to_be_chunked_df), chunk_size)\n )\n\n\nclass Client(VanillaClient):\n \"\"\"Client class for craft ai's API using pandas dataframe types\"\"\"\n\n def add_agent_operations(self, agent_id, operations):\n if isinstance(operations, pd.DataFrame):\n if not isinstance(operations.index, pd.DatetimeIndex):\n raise CraftAiBadRequestError(\n \"Invalid dataframe given, it is not time indexed.\"\n )\n if operations.index.tz is None:\n raise CraftAiBadRequestError(\n \"\"\"tz-naive DatetimeIndex are not supported,\n it must be tz-aware.\"\"\"\n )\n agent = super(Client, self).get_agent(agent_id)\n operations = operations.copy(deep=True)\n\n tz_col = [\n key\n for key, value in agent[\"configuration\"][\"context\"].items()\n if value[\"type\"] == \"timezone\"\n ]\n if tz_col:\n tz_col = tz_col[0]\n operations[tz_col] = create_timezone_df(operations, tz_col).iloc[:, 0]\n\n chunk_size = self.config[\"operationsChunksSize\"]\n for chunk in chunker(operations, chunk_size):\n chunk_operations = [\n {\n \"timestamp\": row.name.value\n // 10 ** 9, # Timestamp.value returns nanoseconds\n \"context\": {\n col: format_input(row[col])\n for col in chunk.columns\n if is_valid_property_value(col, row[col])\n },\n }\n for _, row in chunk.iterrows()\n ]\n super(Client, self).add_agent_operations(agent_id, chunk_operations)\n\n return {\n \"message\": 'Successfully added %i operation(s) to the agent \"%s/%s/%s\" context.'\n % (\n len(operations),\n self.config[\"owner\"],\n self.config[\"project\"],\n agent_id,\n )\n }\n else:\n return super(Client, self).add_agent_operations(agent_id, operations)\n\n def add_agents_operations_bulk(self, payload):\n \"\"\"Add operations to a group of agents.\n\n :param list payload: contains the informations necessary for the action.\n It's in the form [{\"id\": agent_id, \"operations\": operations}]\n With id that is an str containing only characters in \"a-zA-Z0-9_-\"\n and must be between 1 and 36 characters. It must referenced an\n existing agent.\n With operations either a list of dict or a DataFrame that has\n the form given in the craft_ai documentation and the configuration of\n the agent.\n\n :return: list of agents containing a message about the added\n operations.\n :rtype: list of dict.\n\n :raises CraftAiBadRequestError: if all of the ids are invalid or\n referenced non existing agents or one of the operations is invalid.\n \"\"\"\n # Check all ids, raise an error if all ids are invalid\n valid_indices, _, _ = self._check_entity_id_bulk(\n payload, check_serializable=False\n )\n valid_payload = [payload[i] for i in valid_indices]\n\n new_payload = []\n for agent in valid_payload:\n operations = agent[\"operations\"]\n agent_id = agent[\"id\"]\n if isinstance(operations, pd.DataFrame):\n if not isinstance(operations.index, pd.DatetimeIndex):\n raise CraftAiBadRequestError(\n \"Invalid dataframe given for agent \"\n \"{}, it is not time indexed.\".format(agent_id)\n )\n if operations.index.tz is None:\n raise CraftAiBadRequestError(\n \"tz-naive DatetimeIndex are not supported for \"\n \"agent {}, it must be tz-aware.\".format(agent_id)\n )\n\n agent = super(Client, self).get_agent(agent_id)\n tz_col = [\n key\n for key, value in agent[\"configuration\"][\"context\"].items()\n if value[\"type\"] == \"timezone\"\n ]\n if tz_col:\n tz_col = tz_col[0]\n operations[tz_col] = create_timezone_df(operations, tz_col).iloc[\n :, 0\n ]\n\n new_operations = [\n {\n \"timestamp\": row.name.value\n // 10 ** 9, # Timestamp.value returns nanoseconds\n \"context\": {\n col: format_input(row[col])\n for col in operations.columns\n if is_valid_property_value(col, row[col])\n },\n }\n for _, row in operations.iterrows()\n ]\n new_payload.append({\"id\": agent_id, \"operations\": new_operations})\n elif isinstance(operations, list):\n # Check if the operations are serializable\n json.dumps([agent])\n new_payload.append({\"id\": agent_id, \"operations\": operations})\n else:\n raise CraftAiBadRequestError(\n \"The operations are not put in a DataFrame or a list\"\n \"of dict form for the agent {}.\".format(agent_id)\n )\n\n return super(Client, self).add_agents_operations_bulk(new_payload)\n\n def get_agent_operations(self, agent_id, start=None, end=None):\n operations_list = super(Client, self).get_agent_operations(agent_id, start, end)\n return pd.DataFrame(\n [operation[\"context\"] for operation in operations_list],\n index=pd.to_datetime(\n [operation[\"timestamp\"] for operation in operations_list], unit=\"s\"\n ).tz_localize(\"UTC\"),\n )\n\n def get_agent_states(self, agent_id, start=None, end=None):\n states = super(Client, self).get_agent_states(agent_id, start, end)\n\n return pd.DataFrame(\n [state[\"sample\"] for state in states],\n index=pd.to_datetime(\n [state[\"timestamp\"] for state in states], unit=\"s\"\n ).tz_localize(\"UTC\"),\n )\n\n @staticmethod\n def decide_from_contexts_df(tree, contexts_df):\n if isinstance(contexts_df, pd.DataFrame):\n if not isinstance(contexts_df.index, pd.DatetimeIndex):\n raise CraftAiBadRequestError(\n \"Invalid dataframe given, it is not time indexed.\"\n )\n if contexts_df.index.tz is None:\n raise CraftAiBadRequestError(\n \"\"\"tz-naive DatetimeIndex are not supported,\n it must be tz-aware.\"\"\"\n )\n else:\n raise CraftAiBadRequestError(\"Invalid data given, it is not a DataFrame.\")\n return Interpreter.decide_from_contexts_df(tree, contexts_df)\n\n def get_agent_decision_tree(\n self, agent_id, timestamp=None, version=DEFAULT_DECISION_TREE_VERSION\n ):\n # Convert pandas timestamp to a numerical timestamp in seconds\n if isinstance(timestamp, pd.Timestamp):\n timestamp = timestamp.value // 10 ** 9\n\n return super(Client, self).get_agent_decision_tree(agent_id, timestamp, version)\n\n def get_generator_decision_tree(\n self, generator_id, timestamp=None, version=DEFAULT_DECISION_TREE_VERSION\n ):\n # Convert pandas timestamp to a numerical timestamp in seconds\n if isinstance(timestamp, pd.Timestamp):\n timestamp = timestamp.value // 10 ** 9\n\n return super(Client, self).get_generator_decision_tree(\n generator_id, timestamp, version\n )\n","sub_path":"craft_ai/pandas/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":8447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"2959205","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\" Template plotting script \r\n\"\"\"\r\n# imports\r\nimport os\r\nimport netCDF4 as nc\r\nimport numpy as np\r\nimport plot2D as pl\r\n#\r\n########################## run options\r\n#\r\n# set project path\r\nprojDir = ''\r\n# list of paths for files to plot\r\n## for average by height\r\nhtFiles = [ 'firstFileName.nc',\r\n 'secondFileName.nc', ]\r\n## for time-series\r\ntsFiles = [ 'firstFileName.nc', \r\n 'secondFileName.nc', ]\r\n# height-averaging times, indices\r\ntimeIs = [12,13,14,15,16,17,18,19,20,21,22,23] # last 12 hrs\r\n# x-axis extent of time-series plots [hr]\r\nts_xAxis = (4,24)\r\n# chart styling\r\nlineStyles = ['solid','dashed']\r\nlineColors = ['k',]\r\nlineWidths = [1.0,]\r\nzOrders = [i for i in range(len(htFiles),-1,-1)] # plotting priority\r\nlegendLabels = ['First','Second']\r\n# figure saving\r\nsaveOption = True # option to save all figures on run\r\nsavePath = None # path to save figures, None means relative path\r\ngroupName = 'GroupName_' # start of all figure filenames\r\n#\r\n##########################\r\n#\r\n# open netCDF datasets to list\r\nhtNCs = [nc.Dataset(fileName, 'r') for fileName in htFiles]\r\ntsNCs = [nc.Dataset(fileName, 'r') for fileName in tsFiles]\r\n# get dimensions\r\n## hourly average\r\nhtTimes = [NC.variables['time'][:] for NC in htNCs] # [s]\r\nhtHeights = [NC.variables['z'][:] for NC in htNCs] # [m]\r\nhtHeightsKm = [height*1e-3 for height in htHeights] # [km]\r\n## timeseries\r\ntsTimes = [NC.variables['time'][:] for NC in tsNCs] # [s]\r\ntsTimesHr = [times/3600 for times in tsTimes] # [hr]\r\n# get variables\r\nrho_a = htNCs[0].variables['RHO'] # air density, [kg m-3]\r\nrho_a_srf = float( htNCs[0].variables['RHO'][0,0] ) # [kg m-3]\r\n#\r\n### lists of dictionaries of all variables from each netCDF\r\n##... careful if uncommenting, this will unpackage everything\r\n#htVars =[{key:value[:] for key,value in NC.variables.items()} for NC in htNCs]\r\n#tsVars =[{key:value[:] for key,value in NC.variables.items()} for NC in tsNCs]\r\n###\r\n#\r\n## average by height\r\nCLDs = [NC.variables['CLD'][:] for NC in htNCs] # [fraction]\r\nPRECIPs = [NC.variables['PRECIP'][:] for NC in htNCs] # [mm/d]\r\nTVFLUXs = [NC.variables['TVFLUX'][:] for NC in htNCs] # [W m-2]\r\nTHETALs = [NC.variables['THETAL'][:] for NC in htNCs] # [K]\r\nQTs = [NC.variables['QT'][:] for NC in htNCs] # [g/kg]\r\nQCs = [NC.variables['QC'][:] for NC in htNCs] # [g/kg]\r\nW2s = [NC.variables['W2'][:] for NC in htNCs] # [m2 s-2]\r\nWSKEWs = [NC.variables['WSKEW'][:] for NC in htNCs] # [?]\r\n## timeseries\r\nprec_srfs = [NC.variables['prec_srf'][:] for NC in tsNCs] # [kg/kg m/s]\r\nNcs = [NC.variables['Nc'][:] for NC in tsNCs] # [cm-3]\r\nLWPs = [NC.variables['LWP'][:] for NC in tsNCs] # [g m-2]\r\nlhfs = [NC.variables['lhf'][:] for NC in tsNCs] # [kg/kg m/s]\r\ntkes = [NC.variables['tke'][:] for NC in tsNCs] # [m3 s-2]\r\nccs = [NC.variables['cc'][:] for NC in tsNCs] # [fraction]\r\n# close files\r\nfor NC in htNCs+tsNCs:\r\n NC.close()\r\n# calculate plot values\r\n## hourly averages by height\r\n### CLD\r\nCLD_tAvg = np.array([ [np.sum(CLDs[i][timeIs,z])/np.size(CLDs[i][timeIs,z]) \\\r\n for z in range(np.size(htHeightsKm[i]))] \\\r\n for i in range(len(CLDs)) ]) # [fraction]\r\n### PRECIP\r\nPRECIP_tAvg = np.array([ [np.sum(PRECIPs[i][timeIs,z])/ \\\r\n np.size(PRECIPs[i][timeIs,z]) \\\r\n for z in range(np.size(htHeightsKm[i]))] \\\r\n for i in range(len(PRECIPs)) ]) # [mm/d]\r\n### TVFLUX\r\nTVFLUX_tAvg = np.array([ [np.sum(TVFLUXs[i][timeIs,z])/ \\\r\n np.size(TVFLUXs[i][timeIs,z]) \\\r\n for z in range(np.size(htHeightsKm[i]))] \\\r\n for i in range(len(TVFLUXs)) ]) # [W m-2]\r\n### THETAL\r\nTHETAL_tAvg = np.array([ [np.sum(THETALs[i][timeIs,z])/ \\\r\n np.size(THETALs[i][timeIs,z]) \\\r\n for z in range(np.size(htHeightsKm[i]))] \\\r\n for i in range(len(THETALs)) ]) # [K]\r\n### QT\r\nQT_tAvg = np.array([ [np.sum(QTs[i][timeIs,z])/np.size(QTs[i][timeIs,z]) \\\r\n for z in range(np.size(htHeightsKm[i]))] \\\r\n for i in range(len(QTs)) ]) # [g/kg]\r\n### QC\r\nQC_tAvg = np.array([ [np.sum(QCs[i][timeIs,z])/np.size(QCs[i][timeIs,z]) \\\r\n for z in range(np.size(htHeightsKm[i]))] \\\r\n for i in range(len(QCs)) ]) # [g/kg]\r\n### W2\r\nW2_tAvg = np.array([ [np.sum(W2s[i][timeIs,z])/np.size(W2s[i][timeIs,z]) \\\r\n for z in range(np.size(htHeightsKm[i]))] \\\r\n for i in range(len(W2s)) ]) # [m+2 s-2]\r\n### WSKEW\r\nWSKEW_tAvg = np.array([ [np.sum(WSKEWs[i][timeIs,z])/ \\\r\n np.size(WSKEWs[i][timeIs,z]) \\\r\n for z in range(np.size(htHeightsKm[i]))] \\\r\n for i in range(len(W2s)) ]) # [m+2 s-2]\r\n## timeseries\r\n### constants\r\nrho_r = 1000 # liquid water density: [kg m-3]\r\nL_v = 2.5104e+6 # latent heat of vaporization, water: [J/kg] \r\n### convert prec_srfs to [mm/d]\r\n#... get air density at surface\r\n#... prec_srf: [kg_w/kg_a * m/s]*(kg_a m-3 / kg_w m-3)*(s/d)*(mm/m) = [mm/d]\r\nprec_srfs_mmd = [prec_srf * (rho_a_srf/rho_r) * 86400*1000 \r\n for prec_srf in prec_srfs] # [mm/d]\r\n### calculate accumulated surface precipitation for timeseries\r\nprec_srfs_accums = [] # set list\r\nfor series in prec_srfs_mmd: # iterate for each prec_srf\r\n prec_srf_accum = []; accum = 0 # set list; set for sum\r\n for n in range(len(series)): # iterate over prec_srf\r\n accum += ( series[n]*(60/86400) ) # add to accum [mm]\r\n prec_srf_accum.append( accum ) # append each accum\r\n prec_srfs_accums.append( prec_srf_accum ) # append each list\r\n### convert lhfs to [W m-2]\r\n#... lhf: [kg_w/kg_a * m/s]*(kg_a m-3 / kg_w m-3)*(J/kg_w) = [W m-2]\r\nlhfs_Wm2 = [lhf *(rho_a_srf/rho_r)*L_v for lhf in lhfs]\r\n#\r\n# plots\r\n#\r\n## average by height\r\n### CLD\r\nCLD_plot = [(CLD_tAvg[i],htHeightsKm[i]) for i in range(len(CLD_tAvg))]\r\npl.plot2D( CLD_plot,\r\n chartStyle=['line',],\r\n zOrders=zOrders,\r\n lineStyle=lineStyles,\r\n lineColor=lineColors,\r\n lineWidth=lineWidths,\r\n figSize=(5,5),\r\n title=None,\r\n xTitle='Cloud cover fraction [ratio]',\r\n yTitle='Height [km]',\r\n xLimit=(0,0.1),\r\n yLimit=(0,4),\r\n spineVis=(True,True,True,True),\r\n spineMajorTicks=(True,True,True,True),\r\n spineMinorTicks=(True,True,True,True),\r\n tickLengths=(6,3),\r\n tickDirections=('out','out'),\r\n spineLabels=(False,False,True,True),\r\n majorSpace=(0.02,0.5),\r\n minorSpace=(None,None),\r\n minorLabels=False,\r\n fontSize=(12,14,14,10),\r\n legendLabels=legendLabels,\r\n legendLoc='best',\r\n save=saveOption,\r\n path=savePath,\r\n fileName=groupName+'CLD',\r\n fileFormat='png',\r\n quality=300 \r\n )\r\n### PRECIP\r\nPRECIP_plot = [(PRECIP_tAvg[i],htHeightsKm[i]) \\\r\n for i in range(len(PRECIP_tAvg))]\r\npl.plot2D( PRECIP_plot,\r\n chartStyle=['line',],\r\n zOrders=zOrders,\r\n lineStyle=lineStyles, \r\n lineColor=lineColors,\r\n lineWidth=lineWidths,\r\n figSize=(5,5),\r\n title=None,\r\n xTitle=r'Precipitation flux [mm $\\mathregular{d^{-1}}$]',\r\n yTitle='Height [km]',\r\n xLimit=(0,2),\r\n yLimit=(0,4),\r\n spineVis=(1,1,True,True),\r\n spineMajorTicks=(1,1,True,True),\r\n spineMinorTicks=(1,1,True,True),\r\n tickLengths=(6,3),\r\n tickDirections=('out','out'),\r\n spineLabels=(False,False,True,True),\r\n majorSpace=(0.5,0.5),\r\n fontSize=(12,14,14,10),\r\n legendLabels=legendLabels,\r\n legendLoc='best',\r\n save=saveOption,\r\n path=savePath,\r\n fileName=groupName+'PRECIP',\r\n fileFormat='png',\r\n quality=300 \r\n )\r\n### TVFLUX\r\nTVFLUX_plot = [(TVFLUX_tAvg[i],htHeightsKm[i]) \\\r\n for i in range(len(TVFLUX_tAvg))]\r\nvertZero = ( np.zeros_like(htHeightsKm[0]), htHeightsKm[0] )\r\npl.plot2D( [vertZero,] + TVFLUX_plot,\r\n chartStyle=['line',],\r\n zOrders=[0,]+zOrders,\r\n lineStyle= ['solid',] + lineStyles,\r\n lineColor= ['0.5'] + lineColors,\r\n lineWidth= [1.0] + lineWidths,\r\n figSize=(5,5),\r\n title=None,\r\n xTitle=r'Buoyancy flux [W $\\mathregular{m^{-2}}$]',\r\n yTitle='Height [km]',\r\n xLimit=(-20,40),\r\n yLimit=(0,4),\r\n spineVis=(1,1,1,1),\r\n spineMajorTicks=(1,1,1,1),\r\n spineMinorTicks=(1,1,1,1),\r\n tickLengths=(6,3),\r\n tickDirections=('out','out'),\r\n spineLabels=(False,False,1,True),\r\n majorSpace=(10,0.5),\r\n fontSize=(12,14,14,10),\r\n legendLabels=['_',]+legendLabels,\r\n legendLoc='best',\r\n save=saveOption,\r\n path=savePath,\r\n fileName=groupName+'TVFLUX',\r\n fileFormat='png',\r\n quality=300 \r\n )\r\n### THETAL\r\nTHETAL_plot = [(THETAL_tAvg[i],htHeightsKm[i]) \\\r\n for i in range(len(THETAL_tAvg))]\r\npl.plot2D( THETAL_plot,\r\n chartStyle=['line',],\r\n zOrders=zOrders,\r\n lineStyle=lineStyles, \r\n lineColor=lineColors,\r\n lineWidth=lineWidths,\r\n figSize=(5,5),\r\n title=None,\r\n xTitle='Liquid water potential temperature [K]',\r\n yTitle='Height [km]',\r\n xLimit=(295,325),\r\n yLimit=(0,4),\r\n spineVis=(1,1,True,True),\r\n spineMajorTicks=(1,1,True,True),\r\n spineMinorTicks=(1,1,True,True),\r\n tickLengths=(6,3),\r\n tickDirections=('out','out'),\r\n spineLabels=(False,False,True,True),\r\n majorSpace=(10,0.5),\r\n minorSpace=(None,None),\r\n fontSize=(12,14,14,10),\r\n legendLabels=legendLabels,\r\n legendLoc='best',\r\n save=saveOption,\r\n path=savePath,\r\n fileName=groupName+'THETAL',\r\n fileFormat='png',\r\n quality=300 \r\n )\r\n### QT\r\nQT_plot = [(QT_tAvg[i],htHeightsKm[i]) for i in range(len(QT_tAvg))]\r\npl.plot2D( QT_plot,\r\n chartStyle=['line',],\r\n zOrders=zOrders,\r\n lineStyle=lineStyles, \r\n lineColor=lineColors,\r\n lineWidth=lineWidths,\r\n figSize=(5,5),\r\n title=None,\r\n xTitle=r'Total water [g $\\mathregular{kg^{-1}}$]',\r\n yTitle='Height [km]',\r\n xLimit=(0,20),\r\n yLimit=(0,4),\r\n spineVis=(1,1,1,True),\r\n spineMajorTicks=(1,1,1,True),\r\n spineMinorTicks=(1,1,1,True),\r\n tickLengths=(6,3),\r\n tickDirections=('out','out'),\r\n spineLabels=(False,False,1,True),\r\n majorSpace=(2,0.5),\r\n fontSize=(12,14,14,10),\r\n legendLabels=legendLabels,\r\n legendLoc='best',\r\n save=saveOption,\r\n path=savePath,\r\n fileName=groupName+'QT',\r\n fileFormat='png',\r\n quality=300 \r\n )\r\n### QC\r\nQC_plot = [(QC_tAvg[i],htHeightsKm[i]) for i in range(len(QC_tAvg))]\r\npl.plot2D( QC_plot,\r\n chartStyle=['line',],\r\n zOrders=zOrders,\r\n lineStyle=lineStyles, \r\n lineColor=lineColors,\r\n lineWidth=lineWidths,\r\n figSize=(5,5),\r\n title=None,\r\n xTitle=r'Cloud water [g $\\mathregular{kg^{-1}}$]',\r\n yTitle='Height [km]',\r\n xLimit=(0,0.05),\r\n yLimit=(0,4),\r\n spineVis=(1,1,1,1),\r\n spineMajorTicks=(1,1,1,True),\r\n spineMinorTicks=(1,1,1,True),\r\n tickLengths=(6,3),\r\n tickDirections=('out','out'),\r\n spineLabels=(False,False,1,True),\r\n majorSpace=(0.01,0.5),\r\n fontSize=(12,14,14,10),\r\n legendLabels=legendLabels,\r\n legendLoc='best',\r\n save=saveOption,\r\n path=savePath,\r\n fileName=groupName+'QC',\r\n fileFormat='png',\r\n quality=300 \r\n )\r\n### W2\r\nW2_plot = [(W2_tAvg[i],htHeightsKm[i]) for i in range(len(W2_tAvg))]\r\npl.plot2D( W2_plot,\r\n chartStyle=['line',],\r\n zOrders=zOrders,\r\n lineStyle=lineStyles, \r\n lineColor=lineColors,\r\n lineWidth=lineWidths,\r\n figSize=(5,5),\r\n title=None,\r\n xTitle=r'Variance of vertical velocity [$\\mathregular{m^{2}s^{-2}}$]',\r\n yTitle='Height [km]',\r\n xLimit=(0,0.3),\r\n yLimit=(0,4),\r\n spineVis=(1,1,1,1),\r\n spineMajorTicks=(1,1,1,1),\r\n spineMinorTicks=(1,1,1,1),\r\n tickLengths=(6,3),\r\n tickDirections=('out','out'),\r\n spineLabels=(0,0,1,1),\r\n majorSpace=(0.1,0.5),\r\n fontSize=(12,14,14,10),\r\n legendLabels=legendLabels,\r\n legendLoc='best',\r\n save=saveOption,\r\n path=savePath,\r\n fileName=groupName+'W2',\r\n fileFormat='png',\r\n quality=300 \r\n )\r\n### WSKEW\r\nWSKEW_plot = [(WSKEW_tAvg[i],htHeightsKm[i]) for i in range(len(WSKEW_tAvg))]\r\nvertZero = ( np.zeros_like(htHeightsKm[0]), htHeightsKm[0] )\r\npl.plot2D( [vertZero,] + WSKEW_plot,\r\n chartStyle=['line',],\r\n zOrders=[0,]+zOrders,\r\n lineStyle= ['solid'] + lineStyles,\r\n lineColor= ['0.5'] + lineColors,\r\n lineWidth= [1.0,] + lineWidths,\r\n figSize=(5,5),\r\n title=None,\r\n xTitle='Vertical velocity skewness',\r\n yTitle='Height [km]',\r\n xLimit=(-1,1),\r\n yLimit=(0,4),\r\n spineVis=(1,1,1,1),\r\n spineMajorTicks=(1,1,1,1),\r\n spineMinorTicks=(1,1,1,1),\r\n tickLengths=(6,3),\r\n tickDirections=('out','out'),\r\n spineLabels=(0,0,1,1),\r\n majorSpace=(0.5,0.5),\r\n fontSize=(12,14,14,10),\r\n legendLabels=['_',]+legendLabels,\r\n legendLoc='best',\r\n save=saveOption,\r\n path=savePath,\r\n fileName=groupName+'WSKEW',\r\n fileFormat='png',\r\n quality=300 \r\n )\r\n## timeseries\r\n### prec_srf\r\nprec_srf_plot = [(tsTimesHr[i],prec_srfs_mmd[i]) \\\r\n for i in range(len(prec_srfs_mmd))]\r\npl.plot2D( prec_srf_plot,\r\n chartStyle=['line',],\r\n zOrders=zOrders,\r\n lineStyle=lineStyles, \r\n lineColor=lineColors,\r\n lineWidth=lineWidths,\r\n figSize=(12,4),\r\n title='Surface precipitation rate',\r\n xTitle='Time [hr]',\r\n yTitle='Precip. rate [mm $\\mathregular{d^{-1}}$]',\r\n xLimit=ts_xAxis,\r\n yLimit=(0,10),\r\n spineVis=(1,1,1,1),\r\n spineMajorTicks=(1,1,True,True),\r\n spineMinorTicks=(1,1,True,True),\r\n tickLengths=(6,3),\r\n tickDirections=('out','out'),\r\n spineLabels=(False,False,True,True),\r\n majorSpace=(4,None),\r\n fontSize=(12,14,14,10),\r\n legendLabels=legendLabels,\r\n legendLoc='best',\r\n save=saveOption,\r\n path=savePath,\r\n fileName=groupName+'ts_precip_srf',\r\n fileFormat='png',\r\n quality=600 \r\n )\r\n### prec_srf_accum\r\nprec_srfs_accums_plot = [(tsTimesHr[i],prec_srfs_accums[i]) \\\r\n for i in range(len(prec_srfs_accums))]\r\npl.plot2D( prec_srfs_accums_plot,\r\n chartStyle=['line',],\r\n zOrders=zOrders,\r\n lineStyle=lineStyles,\r\n lineColor=lineColors,\r\n lineWidth=lineWidths,\r\n figSize=(12,4),\r\n title='Surface precipitation accumulation',\r\n xTitle='Time [hr]',\r\n yTitle='Accumulation [mm]',\r\n xLimit=ts_xAxis,\r\n yLimit=(0,0.5),\r\n spineVis=(1,1,1,1),\r\n spineMajorTicks=(1,1,True,True),\r\n spineMinorTicks=(1,1,True,True),\r\n tickLengths=(6,3),\r\n tickDirections=('out','out'),\r\n spineLabels=(False,False,True,True),\r\n majorSpace=(4,0.1),\r\n fontSize=(12,14,14,10),\r\n legendLabels=legendLabels,\r\n legendLoc='upper left',\r\n save=saveOption,\r\n path=savePath,\r\n fileName=groupName+'ts_precip_srf_accum',\r\n fileFormat='png',\r\n quality=600 \r\n )\r\n### Nc\r\nNc_plot = [(tsTimesHr[i],Ncs[i]) for i in range(len(Ncs))]\r\npl.plot2D( Nc_plot,\r\n chartStyle=['line',],\r\n zOrders=zOrders,\r\n lineStyle=lineStyles, \r\n lineColor=lineColors,\r\n lineWidth=lineWidths,\r\n figSize=(12,4),\r\n title='Mean cloud droplet concentration',\r\n xTitle='Time [hr]',\r\n yTitle='Concentration [$\\mathregular{cm^{-3}}$]',\r\n xLimit=ts_xAxis,\r\n yLimit=(0,100),\r\n spineVis=(1,1,1,1),\r\n spineMajorTicks=(1,1,True,True),\r\n spineMinorTicks=(1,1,True,True),\r\n tickLengths=(6,3),\r\n tickDirections=('out','out'),\r\n spineLabels=(False,False,True,True),\r\n majorSpace=(4,None),\r\n fontSize=(12,14,14,10),\r\n legendLabels=legendLabels,\r\n legendLoc='best',\r\n save=saveOption,\r\n path=savePath,\r\n fileName=groupName+'ts_Nc',\r\n fileFormat='png',\r\n quality=600 \r\n )\r\n### LWP\r\nLWP_plot = [(tsTimesHr[i],LWPs[i]) for i in range(len(LWPs))]\r\npl.plot2D( LWP_plot,\r\n chartStyle=['line',],\r\n zOrders=zOrders,\r\n lineStyle=lineStyles, \r\n lineColor=lineColors,\r\n lineWidth=lineWidths,\r\n figSize=(12,4),\r\n title='Mean liquid water path',\r\n xTitle='Time [hr]',\r\n yTitle='LWP [g $\\mathregular{m^{-2}}$]',\r\n xLimit=ts_xAxis,\r\n yLimit=(0,100),\r\n spineVis=(1,1,1,1),\r\n spineMajorTicks=(1,1,True,True),\r\n spineMinorTicks=(1,1,True,True),\r\n tickLengths=(6,3),\r\n tickDirections=('out','out'),\r\n spineLabels=(False,False,True,True),\r\n majorSpace=(4,None),\r\n fontSize=(12,14,14,10),\r\n legendLabels=legendLabels,\r\n legendLoc='best',\r\n save=saveOption,\r\n path=savePath,\r\n fileName=groupName+'ts_LWP',\r\n fileFormat='png',\r\n quality=600 \r\n )\r\n### lhf\r\nlhf_plot = [(tsTimesHr[i],lhfs_Wm2[i]) for i in range(len(lhfs_Wm2))]\r\npl.plot2D( lhf_plot,\r\n chartStyle=['line',],\r\n zOrders=zOrders,\r\n lineStyle=lineStyles,\r\n lineColor=lineColors,\r\n lineWidth=lineWidths,\r\n figSize=(12,4),\r\n title=None,\r\n xTitle='Time [hr]',\r\n yTitle='Latent heat flux [W $\\mathregular{m^{-2}}$]',\r\n xLimit=ts_xAxis,\r\n yLimit=(0.14,0.2),\r\n spineVis=(1,1,1,1),\r\n spineMajorTicks=(1,1,True,True),\r\n spineMinorTicks=(1,1,True,True),\r\n tickLengths=(6,3),\r\n tickDirections=('out','out'),\r\n spineLabels=(False,False,True,True),\r\n majorSpace=(4,0.005),\r\n fontSize=(12,14,14,10),\r\n legendLabels=legendLabels,\r\n legendLoc='upper left',\r\n save=saveOption,\r\n path=savePath,\r\n fileName=groupName+'ts_lhf',\r\n fileFormat='png',\r\n quality=600 \r\n )\r\n### tke\r\ntke_plot = [(tsTimesHr[i],tkes[i]) for i in range(len(tkes))]\r\npl.plot2D( tke_plot,\r\n chartStyle=['line',],\r\n zOrders=zOrders,\r\n lineStyle=lineStyles, \r\n lineColor=lineColors,\r\n lineWidth=lineWidths,\r\n figSize=(12,4),\r\n title='Turbulent kinetic energy',\r\n xTitle='Time [hr]',\r\n yTitle='TKE [$\\mathregular{m^{3}s^{-2}}$]',\r\n xLimit=ts_xAxis,\r\n yLimit=(0.0,1600),\r\n spineVis=(1,1,1,1),\r\n spineMajorTicks=(1,1,True,True),\r\n spineMinorTicks=(1,1,True,True),\r\n tickLengths=(6,3),\r\n tickDirections=('out','out'),\r\n spineLabels=(False,False,True,True),\r\n majorSpace=(4,300),\r\n fontSize=(12,14,14,10),\r\n legendLabels=legendLabels,\r\n legendLoc='upper left',\r\n save=saveOption,\r\n path=savePath,\r\n fileName=groupName+'ts_tke',\r\n fileFormat='png',\r\n quality=600 \r\n )\r\n# cc\r\ncc_plot = [(tsTimesHr[i],ccs[i]) for i in range(len(ccs))]\r\npl.plot2D( cc_plot,\r\n chartStyle=['line',],\r\n zOrders=zOrders,\r\n lineStyle=lineStyles, \r\n lineColor=lineColors,\r\n lineWidth=lineWidths,\r\n figSize=(12,4),\r\n title=None,\r\n xTitle='Time [hr]',\r\n yTitle='Cloud cover [ratio]',\r\n xLimit=ts_xAxis,\r\n yLimit=(0.0,0.5),\r\n spineVis=(1,1,1,1),\r\n spineMajorTicks=(1,1,True,True),\r\n spineMinorTicks=(1,1,True,True),\r\n tickLengths=(6,3),\r\n tickDirections=('out','out'),\r\n spineLabels=(False,False,True,True),\r\n majorSpace=(4,0.1),\r\n fontSize=(12,14,14,10),\r\n legendLabels=legendLabels,\r\n legendLoc='best',\r\n save=saveOption,\r\n path=savePath,\r\n fileName=groupName+'ts_cc',\r\n fileFormat='png',\r\n quality=600 \r\n )\r\n#\r\n##########################","sub_path":"plot_LES.py","file_name":"plot_LES.py","file_ext":"py","file_size_in_byte":21736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"632431027","text":"# Problem 1\n# ---------\ndef check_valid_paren(s):\n return check_valid_paren_helper(s)\n\ndef check_valid_paren_helper(s,left = 0,right = 0):\n if left < right:\n return False\n if len(s) == 0:\n return left == right\n else:\n return check_valid_paren_helper(s[1:],left + (s[0] == '('),right + (s[0] == ')'))\n\n# Problem 2\n# ---------\ndef solve_latin_square(grid):\n myAnswer = []\n myNext = nextCell(grid)\n if not myNext:\n return grid\n try:\n for i in findChoice(grid,myNext):\n temp = deepCopyGrid(grid,myNext,i)\n myAnswer.extend([solve_latin_square(temp)])\n myAnswer = [i for i in myAnswer if i]\n return myAnswer[0]\n except IndexError:\n return False\n\ndef deepCopyGrid(grid,position,item):\n myGrid = [i[:] for i in grid]\n myGrid[position[0]][position[1]] = item\n return myGrid\n \n\ndef nextCell(grid):\n for i in range(len(grid)):\n for j in range(len(grid)):\n if grid[i][j] < 0:\n return (i,j)\n return False\n\ndef findChoice(grid,position):\n myList = []\n for i in range(1,len(grid)+1):\n if i not in grid[position[0]] and i not in [grid[j][position[1]] for j in range(len(grid))]:\n myList.append(i)\n return myList\n\n# Problem 3\n# ---------\ndef is_proper(root):\n myAnswer = is_proper_helper(root)\n return all([myAnswer[i] == myAnswer[i+1] for i in range(len(myAnswer)-1)])\n \ndef is_proper_helper(root,count = 0):\n myAnswer = []\n if root[\"left\"] == -1 and root[\"right\"] == -1:\n if root[\"color\"] == \"black\":\n count += 1\n return [count]\n else:\n if root[\"color\"] == \"black\":\n count += 1\n if root[\"right\"] != -1:\n myAnswer.extend(is_proper_helper(root[\"right\"],count))\n if root[\"left\"] != -1:\n myAnswer.extend(is_proper_helper(root[\"left\"],count))\n return myAnswer\n\n","sub_path":"6.009/quiz_2_practice_1/quiz.py","file_name":"quiz.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"375260476","text":"from django.contrib import admin\nfrom .forms import AtLeastOnePieceRequiredInlineFormSet\nfrom .models import Piece\nfrom .models import Carrier\nfrom .models import Quote\n\n\n# Define freight classes\nFREIGHT_CLASS_LIBRARY = {\n '50 - 1000': {\n 'class': '50',\n 'desciption': 'Fits on standard shrink-wrapped 4X4 pallet, very durable',\n },\n '35 - 50': {\n 'class': '55',\n 'desciption': 'Bricks, cement, mortar, hardwood flooring',\n },\n '30 - 35': {\n 'class': '60',\n 'desciption': 'Car accessories & car parts',\n },\n '22.5 - 30': {\n 'class': '65',\n 'desciption': 'Car accessories & car parts, bottled beverages, books in boxes',\n },\n '15 - 22.5': {\n 'class': '70',\n 'desciption': 'Car accessories & car parts, food items, automobile engines',\n },\n '13.5 - 15': {\n 'class': '77.5',\n 'desciption': 'Tires, bathroom fixtures',\n },\n '12 - 13.5': {\n 'class': '85',\n 'desciption': 'Crated machinery, cast iron stoves',\n },\n '10.5 - 12': {\n 'class': '92.5',\n 'desciption': 'Computers, monitors, refrigerators',\n },\n '9 - 10.5': {\n 'class': '100',\n 'desciption': 'Boat covers, car covers, canvas, wine cases, caskets',\n },\n '8 - 9': {\n 'class': '110',\n 'desciption': 'Cabinets, framed artwork, table saw',\n },\n '7 - 8': {\n 'class': '125',\n 'desciption': 'Small Household appliances',\n },\n '6 - 7': {\n 'class': '150',\n 'desciption': 'Auto sheet metal parts, bookcases',\n },\n '5 - 6': {\n 'class': '175',\n 'desciption': 'Clothing, couches stuffed furniture',\n },\n '4 - 5': {\n 'class': '200',\n 'desciption': 'Auto sheet metal parts, aircraft parts, aluminum table, packaged mattresses',\n },\n '3 - 4': {\n 'class': '250',\n 'desciption': 'Bamboo furniture, mattress and box spring, plasma TV',\n },\n '2 - 3': {\n 'class': '300',\n 'desciption': 'Wood cabinets, tables, chairs setup, model boats',\n },\n '1 - 2': {\n 'class': '400',\n 'desciption': 'Deer antlers',\n },\n '0 - 1': {\n 'class': '500',\n 'desciption': 'Bags of gold dust, ping pong balls',\n },\n}\n\n\nclass PieceAdmin(admin.TabularInline):\n model = Piece\n extra = 0\n formset = AtLeastOnePieceRequiredInlineFormSet\n\n\n# TODO: Check if register can be via @admin.register(Quote)\nclass QuoteAdmin(admin.ModelAdmin):\n # # Read only\n # readonly_fields = [\n # 'reference_number',\n # 'quote_status',\n # 'created_at',\n # 'moving_from_address',\n # 'moving_from_city',\n # 'moving_from_state',\n # 'moving_from_zip',\n # 'moving_from_country',\n # 'moving_from_location',\n # 'moving_to_address',\n # 'moving_to_city',\n # 'moving_to_state',\n # 'moving_to_zip',\n # 'moving_to_country',\n # 'moving_to_location',\n # # 'saturday_pick_up',\n # # 'saturday_delivery',\n # # 'appointment_pick_up',\n # # 'appointment_delivery',\n # # 'inside_pick_up',\n # # 'inside_delivery',\n # # 'extended_loading_pick_up',\n # # 'extended_loading_delivery',\n # # 'carrier_insurance',\n # 'extended_insurance',\n # 'expiration_date',\n # ]\n # # Rearrange QuoteAdmin\n # fieldsets = [\n # ('General', {'fields': [\n # 'reference_number',\n # 'quote_status',\n # 'created_at',\n # 'extended_insurance',\n # 'expiration_date',\n # ]}),\n # ('Moving from', {'fields': [\n # 'moving_from_location',\n # 'moving_from_address',\n # 'moving_from_city',\n # 'moving_from_state',\n # 'moving_from_zip',\n # 'moving_from_country',\n # ]}),\n # ('Moving to', {'fields': [\n # 'moving_to_location',\n # 'moving_to_address',\n # 'moving_to_city',\n # 'moving_to_state',\n # 'moving_to_zip',\n # 'moving_to_country',\n # ]}),\n # ]\n inlines = [PieceAdmin,]\n\n def response_add(self, request, new_object):\n obj = self.after_saving_model_and_related_inlines(new_object)\n return super(QuoteAdmin, self).response_add(request, obj)\n\n def response_change(self, request, obj):\n obj = self.after_saving_model_and_related_inlines(obj)\n return super(QuoteAdmin, self).response_change(request, obj)\n\n def after_saving_model_and_related_inlines(self, obj):\n pieces = obj.piece_quote_set.all()\n count_pieces = 0\n count_weight = 0\n count_value = 0\n total_piece_cubic_inches = 0\n for piece in pieces:\n count_pieces += 1\n piece_cubic_inches = float(piece.length * piece.width * piece.height)\n count_weight += piece.weight\n count_value += piece.value\n total_piece_cubic_inches += piece_cubic_inches\n total_cubic_feet = (total_piece_cubic_inches / 1728) \n\n density = round(float(count_weight / total_cubic_feet), 2)\n obj.total_pieces = count_pieces\n obj.total_gross_weight = count_weight\n obj.total_shipment_value = count_value\n obj.total_cubic_feet = total_cubic_feet\n obj.density = density\n\n for freight_class_k, freight_class_d in FREIGHT_CLASS_LIBRARY.items():\n freight_class_x_y = freight_class_k.split(' - ')\n f_x = freight_class_x_y[0].strip()\n f_y = freight_class_x_y[-1].strip()\n if float(f_x) < density < float(f_y):\n freight_class = freight_class_d['class']\n break\n obj.freight_class = freight_class\n\n obj.save()\n return obj\n\nadmin.site.register(Quote, QuoteAdmin)\n#admin.site.register(Piece)","sub_path":"rmapp_backend/quotes/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":5986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"438872882","text":"def write_manifest(ctx):\n # TODO(bazel-team): I don't think this classpath is what you want\n manifest = \"Class-Path: \\n\"\n if getattr(ctx.attr, \"main_class\", \"\"):\n manifest += \"Main-Class: %s\\n\" % ctx.attr.main_class\n\n ctx.actions.write(output = ctx.outputs.manifest, content = manifest)\n\ndef collect_srcjars(targets):\n srcjars = []\n for target in targets:\n if hasattr(target, \"srcjars\"):\n srcjars.append(target.srcjars.srcjar)\n return depset(srcjars)\n\ndef collect_jars(dep_targets, dependency_analyzer_is_off = True):\n \"\"\"Compute the runtime and compile-time dependencies from the given targets\"\"\" # noqa\n\n if dependency_analyzer_is_off:\n return _collect_jars_when_dependency_analyzer_is_off(dep_targets)\n else:\n return _collect_jars_when_dependency_analyzer_is_on(dep_targets)\n\ndef _collect_jars_when_dependency_analyzer_is_off(dep_targets):\n compile_jars = []\n runtime_jars = []\n\n for dep_target in dep_targets:\n if JavaInfo in dep_target:\n java_provider = dep_target[JavaInfo]\n compile_jars.append(java_provider.compile_jars)\n runtime_jars.append(java_provider.transitive_runtime_jars)\n else:\n # support http_file pointed at a jar. http_jar uses ijar,\n # which breaks scala macros\n compile_jars.append(filter_not_sources(dep_target.files))\n runtime_jars.append(filter_not_sources(dep_target.files))\n\n return struct(\n compile_jars = depset(transitive = compile_jars),\n transitive_runtime_jars = depset(transitive = runtime_jars),\n jars2labels = {},\n transitive_compile_jars = depset())\n\ndef _collect_jars_when_dependency_analyzer_is_on(dep_targets):\n transitive_compile_jars = []\n jars2labels = {}\n compile_jars = []\n runtime_jars = []\n\n for dep_target in dep_targets:\n current_dep_compile_jars = None\n current_dep_transitive_compile_jars = None\n\n if JavaInfo in dep_target:\n java_provider = dep_target[JavaInfo]\n current_dep_compile_jars = java_provider.compile_jars\n current_dep_transitive_compile_jars = java_provider.transitive_compile_time_jars\n runtime_jars.append(java_provider.transitive_runtime_jars)\n else:\n # support http_file pointed at a jar. http_jar uses ijar,\n # which breaks scala macros\n current_dep_compile_jars = filter_not_sources(dep_target.files)\n current_dep_transitive_compile_jars = filter_not_sources(dep_target.files)\n runtime_jars.append(filter_not_sources(dep_target.files))\n\n compile_jars.append(current_dep_compile_jars)\n transitive_compile_jars.append(current_dep_transitive_compile_jars)\n add_labels_of_jars_to(jars2labels, dep_target,\n current_dep_transitive_compile_jars.to_list(),\n current_dep_compile_jars.to_list())\n\n return struct(\n compile_jars = depset(transitive = compile_jars),\n transitive_runtime_jars = depset(transitive = runtime_jars),\n jars2labels = jars2labels,\n transitive_compile_jars = depset(transitive = transitive_compile_jars))\n\n# When import mavan_jar's for scala macros we have to use the jar:file requirement\n# since bazel 0.6.0 this brings in the source jar too\n# the scala compiler thinks a source jar can look like a package space\n# causing a conflict between objects and packages warning\n# error: package cats contains object and package with same name: implicits\n# one of them needs to be removed from classpath\n# import cats.implicits._\n\ndef not_sources_jar(name):\n return \"-sources.jar\" not in name\n\ndef filter_not_sources(deps):\n return depset(\n [dep for dep in deps.to_list() if not_sources_jar(dep.basename)])\n\ndef add_labels_of_jars_to(jars2labels, dependency, all_jars, direct_jars):\n for jar in direct_jars:\n _add_label_of_direct_jar_to(jars2labels, dependency, jar)\n for jar in all_jars:\n _add_label_of_indirect_jar_to(jars2labels, dependency, jar)\n\ndef _add_label_of_direct_jar_to(jars2labels, dependency, jar):\n jars2labels[jar.path] = dependency.label\n\ndef _add_label_of_indirect_jar_to(jars2labels, dependency, jar):\n if _label_already_exists(jars2labels, jar):\n return\n\n # skylark exposes only labels of direct dependencies.\n # to get labels of indirect dependencies we collect them from the providers transitively\n if _provider_of_dependency_contains_label_of(dependency, jar):\n jars2labels[jar.path] = dependency.jars_to_labels[jar.path]\n else:\n jars2labels[\n jar.\n path] = \"Unknown label of file {jar_path} which came from {dependency_label}\".format(\n jar_path = jar.path, dependency_label = dependency.label)\n\ndef _label_already_exists(jars2labels, jar):\n return jar.path in jars2labels\n\ndef _provider_of_dependency_contains_label_of(dependency, jar):\n return hasattr(dependency,\n \"jars_to_labels\") and jar.path in dependency.jars_to_labels\n\n# TODO this seems to have limited value now that JavaInfo has everything\ndef create_java_provider(scalaattr, transitive_compile_time_jars):\n return java_common.create_provider(\n use_ijar = False,\n compile_time_jars = scalaattr.compile_jars,\n runtime_jars = scalaattr.transitive_runtime_jars,\n transitive_compile_time_jars = depset(\n transitive = [transitive_compile_time_jars, scalaattr.compile_jars]),\n transitive_runtime_jars = scalaattr.transitive_runtime_jars,\n )\n","sub_path":"scala/private/common.bzl","file_name":"common.bzl","file_ext":"bzl","file_size_in_byte":5341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"282115025","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Cody Dillinger\n# Functions related to finding near vertices and nearest vertices to a given vertex\n\nfrom math import*\nfrom classes import Dimensions\nfrom geometry_procedures import dist, norm\n##############################################################################################################\n\n\n# this nearest is not being used. using nearest2 **********************\ndef nearest(vertex_rand, current_vertex, vertex_nearest, k): # recursively exhaustively search tree for nearest node\n current_vertex.k_nearest = k\n if dist(vertex_rand, current_vertex) < dist(vertex_rand, vertex_nearest) and current_vertex.at_goal_set is False: # if distance to next vertex in tree is smaller\n vertex_nearest = current_vertex # update it as the nearest - is this accidentally changing root node value?\n for i in range(len(current_vertex.children)): # for all children vertices\n if current_vertex.children[i].k_nearest != k: # if next vertex not checked yet\n vertex_nearest = nearest(vertex_rand, current_vertex.children[i], vertex_nearest, k) # call nearest function again\n return vertex_nearest\n##############################################################################################################\n\n\n# traverses portion of tree, excluding regions that are guaranteed to have no closer points\ndef nearest2(vertex_rand, current_vertex, prev_vertex, nearest_,\n axis_): # uses kd tree to efficiently return approximate nearest vertex\n # print 'optimized nearest, axis:', axis_.axis\n if norm(current_vertex, vertex_rand) < norm(nearest_, vertex_rand) and current_vertex.at_goal_set is False:\n nearst = current_vertex\n else:\n nearst = nearest_\n\n \"\"\"\n this would be an example of an exhaustive search, which we are not doing:\n if current_vertex.left_child is not None:\n nearst = nearest2(vertex_rand, current_vertex.left_child, current_vertex, nearst, axis_.next_)\n if current_vertex.right_child is not None:\n nearst = nearest2(vertex_rand, current_vertex.right_child, current_vertex, nearst, axis_.next_)\n return nearst\n\n Actual design:\n traverse in direction of current vertex towards random vertex\n if the current vertex is further in the previous axis direction from vert_rand than the closest is in Euclidean distance,\n then don't traverse in direction of current vertex away from random vertex\n this method allows exclusions of certain parts of the tree (pruning)\n which is great for large numbers of vertices\n \"\"\"\n\n if getattr(vertex_rand, axis_.axis) > getattr(current_vertex, axis_.axis): # if random vertex is to the right of or above the current vertex\n next_vert1 = current_vertex.right_child # then traverse towards the random vertex (right)\n next_vert2 = current_vertex.left_child\n else:\n next_vert1 = current_vertex.left_child # else traverse towards the random vertex (left)\n next_vert2 = current_vertex.right_child\n if next_vert1 is not None: # if there is a child in this direction\n nearst = nearest2(vertex_rand, next_vert1, current_vertex, nearst, axis_.next_)\n if next_vert2 is not None: # if there is a child in this direction\n if abs(getattr(current_vertex, axis_.axis) - getattr(vertex_rand, axis_.axis)) > norm(nearst, vertex_rand):\n # prune subtree\n pass\n else:\n nearst = nearest2(vertex_rand, next_vert2, current_vertex, nearst, axis_.next_)\n return nearst\n##############################################################################################################\n\n\ndef add_to_kd_tree(vertex_new, node, axis_): # use kd tree for spatial sorting, faster near/nearest searching\n #print 'add to kd tree, axis:', axis_.axis\n if getattr(vertex_new, axis_.axis) <= getattr(node, axis_.axis): # indirect attribute access, if less than node val\n if node.left_child is not None: # if left child already exists\n #print 'traversing left since ', getattr(vertex_new, axis_.axis), '<', getattr(node, axis_.axis)\n temp = node.left_child\n add_to_kd_tree(vertex_new, temp, axis_.next_) # traverse left. alternate between x and y comparisons\n del temp\n else:\n #print 'adding left child'\n node.left_child = vertex_new # else no left child exists, create one\n else:\n if node.right_child is not None: # if right child already exists\n #print 'traversing right since ', getattr(vertex_new, axis_.axis), '>', getattr(node, axis_.axis)\n temp = node.right_child\n add_to_kd_tree(vertex_new, temp, axis_.next_) # traverse right. alternate between x and y comparisons\n del temp\n else:\n #print 'adding right child'\n node.right_child = vertex_new # else no right child exists, create one\n return\n##############################################################################################################\n\n\ndef near_vertices(vertex_new, current_vertex, k, vertices_near): # return all vertices within near_radius\n current_vertex.k_near = k # for avoiding checking one vertex multiple times\n if k == 1:\n near_radius = Dimensions.eta\n else:\n near_radius = min(Dimensions.gamma * sqrt(log(k) / k), Dimensions.eta) # function of numPoints\n #print 'near radius:', near_radius\n #print 'distance btwn vert_new and vert_current:', dist(vertex_new, current_vertex)\n if dist(vertex_new, current_vertex) <= near_radius and (current_vertex.at_goal_set is False): # if within radius\n vertices_near.append(current_vertex) # add to list of near vertices\n for i in range(len(current_vertex.children)): # for all children of this vertex\n if current_vertex.children[i].k_near != k: # if that child not checked yet\n vertices_near = near_vertices(vertex_new, current_vertex.children[i], k, vertices_near) # call function again\n return vertices_near\n##############################################################################################################\n\n\ndef near_vertices2(vertex_new, current_vertex, prev_vertex, k, vertices_near, axis_): # uses kd tree to efficiently return approximate nearest vertex\n if k == 1:\n radius = Dimensions.eta\n else:\n radius = min(Dimensions.gamma * sqrt(log(k) / k), Dimensions.eta) # function of numPoints\n if radius > norm(current_vertex, vertex_new) > 0 and current_vertex.at_goal_set is False:\n vertices_near.append(current_vertex)\n if getattr(vertex_new, axis_.axis) > getattr(current_vertex, axis_.axis): # if new vertex is to the right of or above the current vertex\n next_vert1 = current_vertex.right_child # then traverse towards the new vertex (right)\n next_vert2 = current_vertex.left_child\n else:\n next_vert1 = current_vertex.left_child # else traverse towards the new vertex (left)\n next_vert2 = current_vertex.right_child\n if next_vert1 is not None: # if there is a child in this direction\n vertices_near = near_vertices2(vertex_new, next_vert1, current_vertex, k, vertices_near, axis_.next_)\n if next_vert2 is not None: # if there is a child in this direction\n if abs(getattr(current_vertex, axis_.axis) - getattr(vertex_new, axis_.axis)) > radius:\n # prune subtree\n pass\n else:\n vertices_near = near_vertices2(vertex_new, next_vert2, current_vertex, k, vertices_near, axis_.next_)\n return vertices_near\n##############################################################################################################\n","sub_path":"Gazebo_Simulator/i_robot_simulator/i_robot_sim1/iRobot_create/src/search_algorithms.py","file_name":"search_algorithms.py","file_ext":"py","file_size_in_byte":8075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"360305559","text":"#!/usr/bin/python3\n\n\n# importing the requests library\n\n# importing the requests library\nimport requests\n\n# api-endpoint\nURL = \"http://www.baidu.com\"\n\n# location given here\nlocation = \"delhi technological university\"\n\n# defining a params dict for the parameters to be sent to the API\nPARAMS = {'address':location}\n\n# sending get request and saving the response as response object\nr = requests.get(url = URL, params = PARAMS)\n\n# extracting data in json format\n#data = r.json()\nprint(r);\n\n\n'''\nimport urllib\nimport urllib2\nimport time\n'''\n'''\nimport requests\nurl = \"http://www.baidu.com\"\nparams={'number': 12524, 'type': 'issue', 'action': 'show'}\nresult = requests.post(url, data=params)\nprint(result.status_code, result.reason)\n\n\n200 OK\nprint(r.text[:300] + '...')\n\n\n\n\nurl = \"http://dev.intebox.com/boss30/api/60526/saas/targetDevice/set/onoff/td/000000000700000075/off\"\ncount = 1;\nwhile count <= 10000:\n\n time.sleep(0.01)\n req = urllib2.Request(url)\n #print req\n res_data = urllib2.urlopen(req)\n res = res_data.read()\n #print res\n if (count % 10) ==1 :\n print 'count:', count\n count = count +1\n\n'''\n","sub_path":"python/http/sendPostRequest.py","file_name":"sendPostRequest.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"645684433","text":"import numpy as np\nfrom numpy import r_ as c\nimport scipy.signal\nimport scipy.io.wavfile\ndef fala_mua(ktory,folder):\n miedzy_nr=2\n miedzy_liter=1.1224620534321632\n miedzy_pol=1.0594630994594263\n cyfry=np.arange(0,10)-4\n litery=np.array(['C','D','E','F','G','A','B'])\n x=np.genfromtxt(fname=folder+\"track\"+ktory+\".txt\",dtype=str,comments='?')\n mian=len(x[0,:])\n x=x[:,0]\n cyferki=np.repeat(0,len(x))\n literki=np.repeat(\"\",len(x))\n polowki=np.repeat(\"\",len(x))\n for i in np.arange(0,len(x)):\n if x[i]!=\"---\":\n cyferki[i]=int(x[i][2])\n literki[i]=x[i][0]\n polowki[i]=x[i][1]\n czest=np.repeat(440,len(x))\n czest[np.where (x==\"---\")]=0;\n cyferki_nowe=cyferki-4\n for i in np.arange(0,len(x)):\n if x[i]!='---':\n if cyferki_nowe[i]<0:\n czest[i]=czest[i]/(miedzy_nr**np.abs(cyferki_nowe[i]))\n if cyferki_nowe[i]>0:\n czest[i]=czest[i]*miedzy_nr**cyferki_nowe[i]\n literki_poz=np.repeat(0,len(x))\n for i in np.arange(0,len(litery)):\n literki_poz[np.where(literki==litery[i])]=i-5\n for i in np.arange(0,len(x)):\n if literki_poz[i]<0:\n czest[i]=czest[i]/(miedzy_liter**np.abs(literki_poz[i]))\n if literki_poz[i]>0:\n czest[i]=czest[i]*miedzy_liter**literki_poz[i]\n czest[np.where(polowki==\"#\")]=czest[np.where(polowki==\"#\")]*miedzy_pol\n s = open(folder+'defs.txt', 'r')\n newDict={}\n for line in s:\n listedline = line.strip().split(':')\n if len(listedline) > 1:\n newDict[listedline[0]] = float(listedline[1])\n timebeat=60/newDict['bpm']\n dl=len(x)\n dl_track=dl*timebeat\n dl_track_czest=dl*timebeat*44100\n t = np.linspace(0, dl_track, dl_track_czest)\n f=np.repeat(0,len(t))\n for i in np.arange(0,len(czest)):\n f_temp=np.repeat(czest[i],len(t)/len(czest))\n f[i*len(t)/len(czest):((i+1)*len(t)/len(czest))]=f_temp\n fala=np.sin(2*np.pi*f*t)/mian\n return fala","sub_path":"fala_mua.py","file_name":"fala_mua.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"225060059","text":"# Copyright (c) 2016 ZTE Inc.\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom neutron_lib.api import converters\n\nfrom neutron.api import extensions\nfrom neutron.api.v2 import attributes as attr\nfrom neutron.api.v2 import resource_helper\n\n\nRESOURCE_ATTRIBUTE_MAP = {\n 'trunks': {\n 'admin_state_up': {'allow_post': True, 'allow_put': True,\n 'default': True,\n 'convert_to': converters.convert_to_boolean,\n 'is_visible': True},\n 'id': {'allow_post': False, 'allow_put': False,\n 'validate': {'type:uuid': None},\n 'is_visible': True, 'primary_key': True},\n 'name': {'allow_post': True, 'allow_put': True,\n 'validate': {'type:string': attr.NAME_MAX_LEN},\n 'default': '', 'is_visible': True},\n 'tenant_id': {'allow_post': True, 'allow_put': False,\n 'required_by_policy': True,\n 'validate':\n {'type:string': attr.TENANT_ID_MAX_LEN},\n 'is_visible': True},\n 'port_id': {'allow_post': True, 'allow_put': False,\n 'required_by_policy': True,\n 'validate': {'type:uuid': None},\n 'is_visible': True},\n 'status': {'allow_post': False, 'allow_put': False,\n 'is_visible': True},\n 'sub_ports': {'allow_post': True, 'allow_put': False,\n 'default': [],\n 'convert_list_to': converters.convert_kvp_list_to_dict,\n 'validate': {'type:subports': None},\n 'enforce_policy': True,\n 'is_visible': True},\n },\n}\n\n\nclass Trunk(extensions.ExtensionDescriptor):\n \"\"\"Trunk API extension.\"\"\"\n\n @classmethod\n def get_name(cls):\n return \"Trunk Extension\"\n\n @classmethod\n def get_alias(cls):\n return \"trunk\"\n\n @classmethod\n def get_description(cls):\n return \"Provides support for trunk ports\"\n\n @classmethod\n def get_updated(cls):\n return \"2016-01-01T10:00:00-00:00\"\n\n @classmethod\n def get_resources(cls):\n \"\"\"Returns Ext Resources.\"\"\"\n plural_mappings = resource_helper.build_plural_mappings(\n {}, RESOURCE_ATTRIBUTE_MAP)\n attr.PLURALS.update(plural_mappings)\n action_map = {'trunk': {'add_subports': 'PUT',\n 'remove_subports': 'PUT',\n 'get_subports': 'GET'}}\n return resource_helper.build_resource_info(plural_mappings,\n RESOURCE_ATTRIBUTE_MAP,\n 'trunk',\n action_map=action_map,\n register_quota=True)\n\n def update_attributes_map(self, attributes, extension_attrs_map=None):\n super(Trunk, self).update_attributes_map(\n attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)\n\n def get_required_extensions(self):\n return [\"binding\"]\n\n def get_extended_resources(self, version):\n if version == \"2.0\":\n return RESOURCE_ATTRIBUTE_MAP\n else:\n return {}\n","sub_path":"neutron/extensions/trunk.py","file_name":"trunk.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"435873537","text":"import socket\nimport select\nimport time \nfrom httpfun.HttpService import * \n\ndef main():\n # 创建套接字\n s_tcp = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n s_tcp.bind((\"\",7890))\n s_tcp.listen(128)\n s_tcp.setblocking(False)\n # 创建一个select对象 \n epl = select.epoll()\n\n # 将tcp监听套接字注册\n epl.register(s_tcp.fileno(),select.EPOLLIN)\n # 创建fd-套接字的字典\n socket_dict = dict()\n # 开始循环检测数据是否到来\n while True:\n fd_event_list = epl.poll() # 返回一个元组(fd,event)\n for fd,event in fd_event_list:\n # 判断event到底是有客户端连接还是已连接的客户端发数据来了\n if fd == s_tcp.fileno():\n s_client,addr_client = s_tcp.accept()\n epl.register(s_client.fileno(),select.EPOLLIN)\n socket_dict[s_client.fileno()] = s_client\n # 如果fd不是监听套接字的\n elif event == select.EPOLLIN:\n # 通过fd找到套接字\n cur_client = socket_dict[fd]\n recv_data = cur_client.recv(1024)\n # 假如是close\n if not recv_data:\n cur_client.close() # 关闭客服套接字\n del socket_dict[fd] # 从字典中删除\n epl.unregister(fd) # 从epoll内存中删除\n else:\n server_service(cur_client,recv_data)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"socket/ubuntu_py/多任务-http服务器/05-epoll.py","file_name":"05-epoll.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"617952228","text":"# Collect the names of my friends\n# Decide how to greet them\n# Print the greeting with each of their names\n\nfriends = [\"Stevesy\", \"Medge\", \"Marcus\"]\n\ngreeting = \"Yo what up, \"\n\nfor name in friends:\n print(greeting, name)","sub_path":"06_intro-to-programming/greetfriends.py","file_name":"greetfriends.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"86587513","text":"#encoding: utf-8\n#!/bin/python2.7\n\nimport os\nimport sys\nimport time\nimport subprocess as sp\nimport numpy as np\n\ngpdcbin = 'gpdc'\n#gpdcbin = '/opt/geopsy/2.9.0/bin/gpdc'\n\n# --------------------------------------------------------------------\n# >> VSNU2VP\n# --------------------------------------------------------------------\ndef vsnu2vp(vs, nu):\n \"\"\"\n Calculate Vp from vs and Poisson's coefficient.\n \"\"\"\n vp = np.sqrt(2.*(1-nu)/(1.-2.*nu))*vs\n return vp\n\n\n# --------------------------------------------------------------------\n# >> VSNU2VP\n# --------------------------------------------------------------------\ndef fgpdc(mod, wmin, wmax, nw):\n \n fmod = open('model.txt', 'w')\n nlay = len(mod)\n \n fmod.write(str(nlay)+'\\n')\n for ilay in range(0, nlay):\n zl = mod[ilay][0]\n vs = mod[ilay][1]\n nu = mod[ilay][2]\n ro = mod[ilay][3]\n vp = vsnu2vp(vs, nu)\n fmod.write(str(zl)+' ')\n fmod.write(str(vp)+' ')\n fmod.write(str(vs)+' ')\n fmod.write(str(ro)+' ')\n fmod.write('\\n')\n fmod.close()\n sp.call(str(gpdcbin)+' -n '+str(nw)+' -min '+str(wmin)+' -max '+str(wmax)+' -s frequency < model.txt > disp.txt', shell=True)\n disp = np.loadtxt('disp.txt', comments='#')\n disp[:,1] = 1./disp[:,1]\n return disp[:,1]\n","sub_path":"DIKES/script/forward.py","file_name":"forward.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"203717702","text":"\"\"\"add foreign key and non-null constraint after backfilled\n\nRevision ID: 8a2073751905\nRevises: 98bfb39389ff\nCreate Date: 2020-05-31 18:26:00.183742\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '8a2073751905'\ndown_revision = '98bfb39389ff'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('spotify_tracks', 'spotify_album_uri',\n existing_type=sa.VARCHAR(length=128),\n nullable=False)\n op.create_foreign_key(None, 'spotify_tracks', 'spotify_albums', ['spotify_album_uri'], ['uri'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'spotify_tracks', type_='foreignkey')\n op.alter_column('spotify_tracks', 'spotify_album_uri',\n existing_type=sa.VARCHAR(length=128),\n nullable=True)\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/8a2073751905_add_foreign_key_and_non_null_constraint_.py","file_name":"8a2073751905_add_foreign_key_and_non_null_constraint_.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"504096312","text":"\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nimport rest\nfrom room import plain_models\n\n# add a handler for REST calls\napplication = webapp.WSGIApplication(\n [\n ('/rest/metadata', lambda x: d),\n ('/room/rest/.*', rest.Dispatcher)\n ],\n debug=True\n)\n\n# configure the rest dispatcher to know what prefix to expect on request\n# urls\nrest.Dispatcher.base_url = '/room/rest'\n# add specific models (with given names)\nrest.Dispatcher.model_handlers = {}\nrest.Dispatcher.add_models({\n 'Program': plain_models.Program,\n})\n\n\nclass RoomAuthenticator(rest.Authenticator):\n\n def authenticate(self, dispatcher):\n if users.GetCurrentUser():\n return\n dispatcher.forbidden()\n\n\n# rest.Dispatcher.authenticator = RoomAuthenticator()\n# rest.Dispatcher.authorizer = RoomAuthorizer()\n\ndef main():\n run_wsgi_app(application)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"room/rest_main.py","file_name":"rest_main.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"186263689","text":"# test_get_individual_field_from_embark_xml.py 2/18/19 sm\n\"\"\" test get_individual_field_from_embark_xml.py \"\"\"\n\nimport json\nimport unittest\nfrom xml.etree.ElementTree import ElementTree\n\n# add parent directory to path\nimport os\nimport inspect\nimport sys\nCURRENTDIR = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nPARENTDIR = os.path.dirname(CURRENTDIR)\nsys.path.insert(0, PARENTDIR)\n\nfrom get_individual_field_from_embark_xml import GetEmbarkField, _starts_with_ok, _does_not_start_with_ok\n\n\nclass Test(unittest.TestCase):\n \"\"\" Class for test fixtures \"\"\"\n\n def get_xml_doc(self):\n \"\"\" Need to load xml internally to control contents for testing \"\"\"\n try:\n embark_xml_doc = ElementTree(file='./sample_xml_for_testing.xml')\n except OSError:\n print('Unable to open the file you specified. Please try again.')\n raise\n return embark_xml_doc\n\n def test_read_record_id(self):\n \"\"\" test Read and Parse \"\"\"\n xml_of_embark_item = self.get_xml_doc()\n json_of_embark_field = {}\n field_definition = json.loads('{\"name\": \"recordId\",\"required\": true,\"duplicatesAllowed\": false,'\n + '\"xpath\": \"./variable[@id=\\'object_00055\\']\"}')\n field = GetEmbarkField(field_definition)\n json_of_embark_field = field.get_json_representation_of_field(xml_of_embark_item)\n self.assertTrue(json.loads('{\"recordId\": \"1976.057\"}') == json_of_embark_field)\n\n def test_read_constant(self):\n \"\"\" test Read and Parse \"\"\"\n xml_of_embark_item = self.get_xml_doc()\n json_of_embark_field = {}\n field_definition = json.loads('{\"name\": \"repository\",\"required\": false,\"duplicatesAllowed\": false,'\n + '\"xpath\": \"required, but not used here.\",\"constant\": \"Snite\"}')\n field = GetEmbarkField(field_definition)\n json_of_embark_field = field.get_json_representation_of_field(xml_of_embark_item)\n self.assertTrue(json.loads('{\"repository\": \"Snite\"}') == json_of_embark_field)\n\n def test_read_creator(self):\n \"\"\" test Read and Parse \"\"\"\n xml_of_embark_item = self.get_xml_doc()\n json_of_embark_field = {}\n field_definition = json.loads('{\"name\": \"creator\",\"required\": false,\"duplicatesAllowed\": false,'\n + '\"xpath\": \"./variable[@id=\\'object_00060\\']\"}')\n field = GetEmbarkField(field_definition)\n json_of_embark_field = field.get_json_representation_of_field(xml_of_embark_item)\n self.assertTrue(json.loads('{\"creator\": \"Paul Wood\"}') == json_of_embark_field)\n\n def test_read_creation_date(self):\n \"\"\" test Read and Parse \"\"\"\n xml_of_embark_item = self.get_xml_doc()\n json_of_embark_field = {}\n field_definition = json.loads('{\"name\": \"creationDate\",\"required\": false,\"duplicatesAllowed\": false,'\n + '\"xpath\": \"./variable[@id=\\'object_00062\\']\",'\n + '\"validation\": \"validateYYYYMMDD\"}')\n field = GetEmbarkField(field_definition)\n json_of_embark_field = field.get_json_representation_of_field(xml_of_embark_item)\n self.assertTrue(json.loads('{\"creationDate\": \"18910101\"}') == json_of_embark_field)\n\n def test_read_exhibition(self):\n \"\"\" test Read and Parse \"\"\"\n xml_of_embark_item = self.get_xml_doc()\n json_of_embark_field = {}\n field_definition = json.loads('{\"name\": \"exhibition\",\"required\": false,\"duplicatesAllowed\": true,\"xpath\":'\n + ' \"./group[@id=\\'object_00002\\']/variable[@id=\\'object_00002\\']\"}')\n field = GetEmbarkField(field_definition)\n json_of_embark_field = field.get_json_representation_of_field(xml_of_embark_item)\n self.assertTrue(json.loads('{\"exhibition\": [{\"name\": \"Picturing History\", \"startDate\": \"09/01/94\",'\n + '\"endDate\": \"12/01/94\"}]}') == json_of_embark_field)\n\n def test_read_keyword(self):\n \"\"\" test keyword \"\"\"\n xml_of_embark_item = self.get_xml_doc()\n json_of_embark_field = {}\n field_definition = json.loads('{\"name\": \"keyword\",\"required\": false,\"duplicatesAllowed\": true,\"xpath\":'\n + '\"./group[@id=\\'object_00080\\']/variable[@id=\\'object_00080\\']\"}')\n field = GetEmbarkField(field_definition)\n json_of_embark_field = field.get_json_representation_of_field(xml_of_embark_item)\n for keyword in json_of_embark_field['keyword']:\n value = keyword['value']\n self.assertTrue(value == 'confederate')\n break\n\n def test_read_default(self):\n \"\"\" test default \"\"\"\n xml_of_embark_item = self.get_xml_doc()\n json_of_embark_field = {}\n field_definition = json.loads('{\"name\": \"classification\",\"required\": false,\"duplicatesAllowed\": false,'\n + '\"xpath\": \"./variable[@name=\\'[Object]Class 2\\']\",\"default\": \"painting\"}')\n field = GetEmbarkField(field_definition)\n json_of_embark_field = field.get_json_representation_of_field(xml_of_embark_item)\n # print(json_of_embark_field)\n self.assertTrue(json.loads('{\"classification\": \"painting\"}') == json_of_embark_field)\n\n def test_try_to_throw_required_value_missing_error(self):\n \"\"\" test default \"\"\"\n xml_of_embark_item = self.get_xml_doc()\n json_of_embark_field = {}\n field_definition = json.loads('{\"name\": \"classification\",\"required\": true,\"duplicatesAllowed\":'\n + ' false,\"xpath\": \"./variable[@name=\\'[Object]Class 2\\']\"}')\n field = GetEmbarkField(field_definition)\n self.assertRaises(ValueError, field.get_json_representation_of_field, xml_of_embark_item)\n field_definition = json.loads('{\"name\": \"classification\",\"required\": true,\"duplicatesAllowed\":'\n + ' false,\"xpath\": \"./variable[@name=\\'[Object]Class 2\\']\",\"default\":'\n + ' \"painting\"}')\n field.load_json_field_definition(field_definition)\n json_of_embark_field = field.get_json_representation_of_field(xml_of_embark_item)\n self.assertTrue(json.loads('{\"classification\": \"painting\"}') == json_of_embark_field)\n\n def test_starts_with(self):\n \"\"\" Test Starts With and Does Not Start With routines \"\"\"\n self.assertTrue(_starts_with_ok('abc123', 'abc'))\n self.assertTrue(not _starts_with_ok('abc123', '123'))\n self.assertTrue(_starts_with_ok('abc123', ''))\n self.assertTrue(_does_not_start_with_ok('abc123', ''))\n self.assertTrue(_does_not_start_with_ok('abc123', '123'))\n self.assertTrue(not _does_not_start_with_ok('abc123', 'abc'))\n\n\ndef suite():\n \"\"\" define test suite \"\"\"\n return unittest.TestLoader().loadTestsFromTestCase(Test)\n\n\nif __name__ == '__main__':\n suite()\n unittest.main()\n","sub_path":"test/test_get_individual_field_from_embark_xml.py","file_name":"test_get_individual_field_from_embark_xml.py","file_ext":"py","file_size_in_byte":7033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"639390818","text":"import fcntl\nimport time\n\ndef main():\n teclado = open('/dev/tty0', 'r')\n leds = 0\n \n while True:\n fcntl.ioctl(teclado, 0x4B31, leds)\n \n if (leds == 8):\n leds = 0\n \n leds += 1\n time.sleep(1)\n\nif __name__ == '__main__':\n main() \n","sub_path":"projects/piscaled.py","file_name":"piscaled.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"576609510","text":"def l1Norm(v):\n '''\n v: The vector to have the norm returned.\n '''\n sum = 0\n for i in range(len(v)):\n sum += abs(v[i])\n return sum\n\nif __name__ == \"__main__\":\n v = [1,2,3]\n print(l1Norm(v))\n","sub_path":"task_sheets/sheet9/L1Norm.py","file_name":"L1Norm.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"200762988","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.neighbors import DistanceMetric\nfrom sklearn.cluster import DBSCAN\n\n\n#ЭТО РАССТОЯНИЕ МЕЖДУ ТОЧКАМИ\ndef distance(latitude1, longitude1, latitude12, longitude2):\n\n\n return (dist.pairwise(\n np.radians([[latitude1, longitude1]]),\n np.radians([[latitude12, longitude2]])\n ) * earth_radius)[0][0]\n###########################################################\n\n\n#ОКРУГЛЕНИЕ ЕБАННЫХ ДРОБЕЙ\ndef street_drob(a,mas):\n\n\n buf = a\n for i in range(len(mas)):\n if mas[i]>1:\n mas[i] = round(mas[i])\n buf = buf - mas[i]\n buf_mas = [x for x in mas if x < 1]\n for j in range(len(mas)):\n for i in range(len(mas)):\n if mas[i]<1 and mas[i] == max(buf_mas) and buf != 0:\n mas[i] = 1\n buf_mas = [x for x in mas if x < 1]\n buf = buf - 1\n for i in range(len(mas)):\n if mas[i]<1:\n mas[i]=0\n return(mas)\n################################################################\n\n\n\n# ВЫЧИСЛЕНИЕ КООРДИНАТ ДЛЯ КЛАСТЕРОВ\ndef patrol_label(label, primer, pogr):\n\n\n dist_all = []\n for i in range(len(primer[['latitude', 'longitude']][primer['labels'] == label])):\n sum_dist = 0\n for j in range(len(primer[['latitude', 'longitude']][primer['labels'] == label])):\n dist = distance(\n primer[['latitude', 'longitude']][primer['labels'] == label].iloc[i][0],\n primer[['latitude', 'longitude']][primer['labels'] == label].iloc[i][1],\n primer[['latitude', 'longitude']][primer['labels'] == label].iloc[j][0],\n primer[['latitude', 'longitude']][primer['labels'] == label].iloc[j][1]\n )\n sum_dist += dist\n dist_all.append(sum_dist)\n dist_all = np.array(dist_all)\n if (dist_all.size == 0):\n return primer[['latitude', 'longitude']][primer['labels'] == label].iloc[0]\n else:\n return primer[['latitude', 'longitude']][primer['labels'] == label].iloc[dist_all.argmin()]\n######################################################################################################\n\n\n\n# РАСПРЕДЕЛЕНИЕ ПАТРУЛЕЙ ПО УЛИЦАМ\ndef raspredelenie_street(obj,patrol_number):\n\n\n dtp_count = 0\n for i in obj.street.value_counts():\n dtp_count += i\n patrol_count_street = pd.DataFrame()\n patrol_count_street['street'] = obj.street.unique()\n patrol_count_street['count_patr'] = ''\n for i in range(len(obj.street.value_counts())):\n for j in range(len(patrol_count_street['street'])):\n if (patrol_count_street.iloc[j]['street']) == (obj.street.value_counts().index[i]):\n patrol_count_street.at[j,'count_patr'] = round(obj.street.value_counts()[i]/dtp_count,1)*patrol_number\n buf_mas = street_drob(patrol_number,list(patrol_count_street['count_patr']))\n patrol_count_street['count_patr'] = buf_mas\n return(patrol_count_street)\n#####################################################################################################################\n\n\n\n# РАСПРЕДЕЛЕНИЕ ПАТРУЛЕЙ ПО КЛАСТЕРАМ\ndef raspredelenie_claster(street, patrol_number, pogr):\n\n\n objct = df[df['street'] == street]\n coords = objct.as_matrix(columns=['latitude', 'longitude'])\n coord_patrol = pd.DataFrame(columns=[['latitude', 'longitude']])\n schet = 0\n\n db = DBSCAN(eps=0.001, min_samples=2, algorithm='ball_tree', metric='haversine').fit(np.radians(coords))\n labels = np.unique(db.labels_)\n counts = np.unique(db.labels_, return_counts=True)[1]\n if patrol_number != 0:\n if len(labels) > patrol_number:\n total_counts = patrol_number\n else:\n total_counts = len(labels)\n\n print(total_counts)\n total_labels = pd.DataFrame(counts)\n total_labels = total_labels.sort_values(by=0).index[-int(total_counts):]\n not_total_labels = [x for x in labels if x not in total_labels]\n\n df_coords = pd.DataFrame()\n df_coords['latitude'] = coords[:, 0]\n df_coords['longitude'] = coords[:, 1]\n df_coords['labels'] = db.labels_\n df_coords = df_coords.query('labels not in ' + str(not_total_labels))\n\n for i in total_labels:\n a = patrol_label(i, df_coords, pogr)\n if a[0] != 0:\n coord_patrol.at[schet, 'latitude'] = a[0]\n coord_patrol.at[schet, 'longitude'] = a[1]\n schet += 1\n print(coord_patrol)\n return (coord_patrol)\n############################################################################################################\n\n\n\n# ВЫВОД КООРДИНАТ\ndef dinamic_patrol_func(df: list, pogr: float, patrol_number: int) -> list:\n\n\n dist = DistanceMetric.get_metric('haversine')\n earth_radius = 6371\n coord_patrol = pd.DataFrame()\n dtp_count = 0\n pogr = 0.2\n # Кол-во патрулей по районам\n dtp_count = len(df)\n if dtp_count == 0:\n return (0)\n patrol_count = pd.DataFrame()\n patrol_count['District'] = df['District'].unique()\n patrol_count['count_raion'] = ''\n for i in range(len(df.District.value_counts())):\n for j in range(len(patrol_count.District)):\n if (patrol_count.iloc[j]['District']) == (df.District.value_counts().index[i]):\n patrol_count.at[j, 'count_raion'] = round(\n (df.District.value_counts()[i] / dtp_count), 2)\n patrol_count['count_raion'] = patrol_count['count_raion'] * patrol_number\n\n # распределение по улицам\n schet = 0\n for district in df.District.unique():\n if (patrol_count[patrol_count['District'] == district].count_raion[schet] != 0):\n obj_dstr = df[df['District'] == district]\n obj_dstr = obj_dstr.dropna(axis='index', how='any', subset=['street'])\n patrol_count_street = raspredelenie_street(obj_dstr,\n round(patrol_count[patrol_count['District'] == district].count_raion.values[0]))\n for i in range(len(patrol_count_street)):\n print(patrol_count_street.iloc[i][0])\n print(round(patrol_count_street.iloc[i][1]))\n coord_buf = raspredelenie_claster(patrol_count_street.iloc[i][0],\n patrol_count_street.iloc[i][1],\n pogr)\n coord_patrol = coord_patrol.append(coord_buf, ignore_index=True)\n schet += 1\n return (coord_patrol)\n#######################################################################################################","sub_path":"function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":6800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"407361366","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 18 16:07:02 2020.\n\n@author: jw349\n\nIn Greek mythology, Cadmus /ˈkædməs/; Greek: Κάδμος Kadmos), was the founder\n and first king of Thebes.[1] Initially a Phoenician prince, son of\n king Agenor and queen Telephassa of Tyre and the brother of Phoenix,\n Cilix and Europa, he was originally sent by his royal parents to seek out\n and escort his sister Europa back to Tyre after she was abducted from the\n shores of Phoenicia by Zeus.[2] Cadmus founded the Greek city of Thebes,\n the acropolis of which was originally named Cadmeia in his honour.\n\nCadmus was credited by the ancient Greeks (Herodotus[3] is an example) with\nintroducing the original Alphabet or Phoenician alphabet—Φοινίκων γράμματα\nPhoinikōn grammata, \"Phoenician letters\"—to the Greeks, who adapted it to\nform their Greek alphabet. Herodotus estimates that Cadmus lived sixteen\nhundred years before his time, or around 2000 BC.[4] Herodotus had seen and\ndescribed the Cadmean writing in the temple of Apollo at Thebes engraved on\ncertain tripods. He estimated those tripods to date back to the time of Laius\nthe great-grandson of Cadmus.[5] On one of the tripods there was this\ninscription in Cadmean writing, which, as he attested, resembled Ionian\nletters: Ἀμφιτρύων μ᾽ ἀνέθηκ᾽ ἐνάρων ἀπὸ Τηλεβοάων (\"Amphitryon dedicated me\n[don't forget] the spoils of [the battle of] Teleboae.\").\n\n Sources, Inputs, Process, Outputs, Consumers (SIPOC) Diagram Insertion\n\n Traditionally Suppliers and Customers are used instead of Sources and\n Consumers, but that doesn't sound right for what we do.\n\nCommand Line Use:\n processProcessing.py [] [-r ]\n\nCommand Line for Graphical User Interface Mode:\n processProcessing.py\n\nWARNING: When pasting text from Microsoft applications like Word or\n PowerPoint be aware that left and right-handed single and double quotes\n will not process correctly. These characters will likely produce an error\n along the lines of: \"The input must be a UTF-8 encoded text.\"\n\n Watch for csv-table errors:\n Missing quotation marks\n Space after quotation mark and before a comma - error can be\n \"unexpected ','\"\n\nUPDATE NOTES: When converting this module to be compatible with Flask several\nchanges were made to accommodate the web app paradigm. Using the command line\nor a GUI calls to the user for input could be made directly. No convenient\nmethod was found, at the time of this comment, to accomplish this in the Flask\nframework. This required the break-up of some routines to route feedback\nthrough the web app instead.\n\nThis was aided by converting the original module and code style to a class\nstructure. This reduced the need for complex function return structures and\nprovided greater ability for the web app to interogate and set states in the\nclass remotely.\n\nUse of loggers passed into the class instance allows the original calling\nroutine to access the error and status messages generated during processing.\n\nDEVELOPMENT NOTES: Since this evolved (made small random changes and hoped it\ngot better) from a command line execution, having a GUI, to being linked to a\nFlask app, certain reporting functions were moved around to finally use the\nFlaskLoggingClass functionality that was introduced to handle messages to the\nuser. This adds some complication in message routing with regards to the GUI.\nSince the GUI is more interactive on messaging, mutliple changes to the GUI\nstatus bar updates were made to use the logging. This gets a bit cluttered,\nbut is confined to the startGUI function.\n\"\"\"\n\n\nimport re\nfrom collections import OrderedDict\nfrom collections import defaultdict\nfrom graphviz import Digraph\n# from graphviz import Source\nimport os\nimport subprocess\nimport PySimpleGUI as sg\nfrom typing import Tuple, List, Dict\nimport sys\nfrom itertools import chain\nfrom threading import Thread\n# import src.Flask.cadmus_doc_processing.FlaskLoggingClass_v0_7 as fLog\nimport FlaskLoggingClass_v0_7 as fLog\n\n# os.environ.setdefault('PYPANDOC_PANDOC', '/home/x/whatever/pandoc')\n\n## pats['nonalphanum7bit'] = '[!-/:-@[-`{-~]'\n## 'line': r'(%(nonalphanum7bit)s)\\1* *$' % pats,\n\n##r'(%([!-/:-@[-`{-~])s)\\1* *$'\n\n\nclass cadmusClass(Thread):\n \"\"\"Class for processing rST files.\"\"\"\n\n def __init__(self, isFlaskApp: bool, statusLog, errorLog):\n \"\"\"\n Initialize the class.\n\n Parameters\n ----------\n isFlaskApp : bool\n A flag that this class was instantiated by a Flask app.\n TODO: Convert this to a real flag instead.\n\n Returns\n -------\n None.\n\n \"\"\"\n Thread.__init__(self)\n self.isFlaskApp = isFlaskApp\n self.SOURCES_STR = 'Sources' # string specifications allowing for a one stop shop for changing\n self.PROCESS_STR = 'Process'\n self.INPUTS_STR = 'Inputs'\n self.OUTPUTS_STR = 'Outputs'\n self.CONSUMERS_STR = 'Consumers'\n self.OUTLINENUMBER_STR = 'outlineNumber'\n self.NOTES_STR = 'Notes'\n self.REQTS_STR = 'Requirements'\n self.DIAGRAM_MARKER = '.. INSERT SIPOC DIAGRAM' # marks the line at which the SIPOC diagram for the section or subsection should be inserted\n self.DIAGRAM_LINE_NUM = 'DiagramLineNum'\n self.FLOWCHART_MARKER = '.. flowchart_:: '\n self.IMAGE_MARKER = '.. image:: '\n self.FIGURE_MARKER = '.. figure:: '\n self.IMAGE_FORMAT = 'jpg'\n\n self.SIPOCDict = {self.SOURCES_STR: ['Process Inputs', 1, '#ceb3ab'],\n self.INPUTS_STR: ['Process Inputs', 0, '#c8d3d5'],\n self.PROCESS_STR: ['Process Requirements', 1, '#a4b8c4'],\n self.OUTPUTS_STR: ['Process Outputs', 0, '#6e8387'],\n self.CONSUMERS_STR: ['Process Outputs', 1, '#0cca4a'],\n } # Table Label, Index Position for the *_STR Data, Color of the SIPOC Block\n self.CSV_TABLE_MARKER = '.. csv-table:: ' # Marks the beginning of a csv-table block\n self.SIPOCTypeDict = { 'Process Inputs': [self.INPUTS_STR, self.SOURCES_STR, self.NOTES_STR],\n 'Process Requirements': ['Process Requirements', self.NOTES_STR],\n 'Consumer Requirements': ['Consumer Requirements', self.CONSUMERS_STR, self.NOTES_STR],\n 'Process Outputs': [self.OUTPUTS_STR, self.CONSUMERS_STR, self.NOTES_STR],\n }\n self.SIPOCTypeKeys = self.SIPOCTypeDict.keys()\n self.SIPOCTypeMarkers = [self.CSV_TABLE_MARKER + infoTpKs for infoTpKs in self.SIPOCTypeKeys] # get only the lines for the tables we want from directives\n self.hasStatus = False # Has anything been statused?\n self.statusList = [] # List of status strings that have been posted\n self.hasError = False # Has an error been logged?\n self.errorList = [] # List of error strings that have been logged\n self.isConfirmed = None # Flag for confirmation to proceed in spite of errors\n self.needConfirmation = False\n self.processingComplete = False\n self.lines = [] # The lines of text from the original rST document. : list of strings\n self.outlineNumbers = [] # Outline numbers of each line of text in the source rST document. : list of ints\n self.headers = [] # Header string extracted from each section and subsection in the source rST document. : list of strings\n self.headerByLine = [] # Header string of each line of text in the source rST document. : list of strings\n self.levels = [] # Outline levels of each line of text in the source rST document. : list of ints\n self.finalFileName = None # string of the final output rST file as processed\n self.wordFileName = None # string of the final output MS Word file as processed, if any\n self.continueGUIPolling = True # signal to the GUI to keep going\n self.statusLog = statusLog\n self.errorLog = errorLog\n\n def getFileNames(self) -> Tuple[str, str]:\n \"\"\"\n Fetch the file names that were used to output the processed rST file into.\n\n Returns\n -------\n str\n Final processed rST filename output including inserted SIPOC\n diagrams and flowcharts.\n str\n Final processed MS Word filename output based on the final rST file.\n\n \"\"\"\n return self.finalFileName, self.wordFileName\n\n def getProcessingComplete(self) -> Tuple[bool, str, str]:\n \"\"\"\n Get flag for processing being complete and the file names of interest.\n\n Returns the processingComplete flag to indicate if the processing is\n complete. Also returns the final processed rST filename output and\n the final processed MS Word filename output if there was one.\n\n Returns\n -------\n bool\n Flag to indicate if the processing is complete.\n str\n Final processed rST filename output including inserted SIPOC\n diagrams and flowcharts.\n str\n Final processed MS Word filename output based on the final rST file.\n\n \"\"\"\n return self.processingComplete, self.finalFileName, self.wordFileName\n\n def getContinueGUIPolling(self) -> bool:\n \"\"\"\n Get flag to continue processing while the GUI polls for user input.\n\n Returns\n -------\n bool\n DESCRIPTION.\n\n \"\"\"\n return self.continueGUIPolling\n\n def setContinueGUIPolling(self, state: bool):\n \"\"\"\n Set flag to continue processing while the GUI polls for user input.\n\n Parameters\n ----------\n state : bool\n Flag to indicate the code should keep polling the GUI for events.\n\n Returns\n -------\n None.\n\n \"\"\"\n self.continueGUIPolling = state\n\n\n def outlineNumberString(self, level: int, outlineNumber: str) -> str:\n \"\"\"\n Create a string of the outline number.\n\n Composite a string of outline levels with periods in-between.\n Stops at the depth given by the argument level.\n\n Parameters\n ----------\n level : int\n Outline level or depth of the outline number for the section / subsection.\n outlineNumber : list of ints\n Outline number at each level for the section / subsection.\n\n Returns\n -------\n outlineNumStr : string\n String of the outline number.\n\n \"\"\"\n outlineNumStr = ''\n for i in range(level+1):\n outlineNumStr += str(outlineNumber[i]) + '.'\n return outlineNumStr\n\n def processFile(self, sourceFilename: str):\n \"\"\"\n Parse the source rST file and build-up document structure data.\n\n Parses the source file looking for header markers, identifying the headers,\n determining the outline number for each header, and outline level of each header.\n Then collects the headers, outline number, and outline level for each line\n in the source document.\n\n Parameters\n ----------\n sourceFilename : reStructuredText file name\n The name of the source document containing the reStructuredText markup data.\n\n Returns\n -------\n None.\n\n \"\"\"\n try:\n source = open(sourceFilename,\"r\", errors='ignore')\n lines = source.readlines()\n source.close()\n sectionMarkers = '!\"#$%&\\'()*+,-./:;<=>?@[\\]^_`{|}~' # valid section markers, https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html#sections\n headers = [\"\"] * len(lines)\n headerByLine = [\"\"] * len(lines)\n markers = [line[0] if line[0] in sectionMarkers and line[:-1] == line[0] * (len(line)-1) and len(line) > 3 else \"\" for line in lines]\n markersNoBlanks = [mark for mark in markers if mark != '']\n markerDict = OrderedDict.fromkeys(markersNoBlanks)\n markerString = ''.join(markerDict)\n levels = [markerString.find(mark) if mark != '' else -1 for mark in markers]\n levels[0] = 0 # by definition the first line has to be level 0\n for index, level in enumerate(levels[1:]): # remember index will start at zero\n levels[index+1] = level if level > -1 else levels[index] # so watch how that plays out in here\n for index, mark in enumerate(markers[1:-3]): # remember index will start at zero\n if mark != '':\n if mark == markers[index+3]: # forward check grabs centered headers which are not numbered\n levels[index+2] = levels[index+1]\n else:\n if lines[index] == '\\n':\n markers[index+1] = ''\n else:\n headers[index] = lines[index][:-1]\n levels[index] = levels[index+1]\n if index > 1 and mark == markers[index-1]: # if this is an underline to an overline do not make it a header\n headers[index] = ''\n headerByLine[0] = headers[0] if headers[0] != '' else ''\n for index, head in enumerate(headers[1:]):\n headerByLine[index+1] = head if head != '' else headerByLine[index]\n outlineNumbers = [[0]] * len(lines)\n outlineNumberStrings = [''] * len(lines)\n currentOutlineNumber = [0] * 10\n outlineNumbers[0] = currentOutlineNumber.copy()\n if headers[0] != '':\n currentOutlineNumber[0] = 1\n outlineNumbers[0] = currentOutlineNumber.copy()\n for index, (lvl, hdr) in enumerate(zip(levels[1:], headers[1:])):\n if hdr != '': # a non-blank header means an outline number change\n if lvl == levels[index]:\n currentOutlineNumber[lvl] += 1\n elif lvl > levels[index]:\n currentOutlineNumber[lvl] = 1\n elif lvl < levels[index]:\n currentOutlineNumber[lvl] += 1\n for i in range(lvl+1, len(currentOutlineNumber)):\n currentOutlineNumber[i] = 0\n outlineNumbers[index+1] = currentOutlineNumber.copy()\n for i in range(len(lines)):\n outlineNumberStrings[i] = self.outlineNumberString(levels[i],outlineNumbers[i]) if headers[i] != '' else ''\n self.lines = lines # list of strings : The lines of text from the original rST document.\n self.outlineNumbers = outlineNumbers # list of ints : Outline numbers of each line of text in the source rST document.\n self.headers = headers # list of strings : Header string extracted from each section and subsection in the source rST document.\n self.headerByLine = headerByLine # list of strings : Header string of each line of text in the source rST document.\n self.levels = levels # list of ints : Outline levels of each line of text in the source rST document.\n except Exception as err:\n self.errorLog.warn(str(err))\n\n\n def fetchIOandReqts(self, lines: List[str], headerByLine: List[str],\n outlineNumbers: List[List[int]], levels: List[int]) -> dict:\n \"\"\"\n Extract all SIPOC related information and package the data.\n\n Scans the text lines from the rST source document, identifies the appropriately\n marked csv-table directives, and pulls in the data from those tables.\n Packages this extracted data with header and outline number data for further processing.\n\n Parameters\n ----------\n lines : list of strings\n The lines of text from the original rST document.\n headerByLine : list of strings\n Header string of each line of text in the source rST document.\n outlineNumbers : list of ints\n Outline numbers of each line of text in the source rST document.\n levels : list of ints\n Outline levels of each line of text in the source rST document.\n\n Returns\n -------\n payload : default dictionary\n Contains data from the source rST document consisting of the headers,\n source, input, process, output, and consumer data type, and associated data.\n\n \"\"\"\n payload = defaultdict(dict)\n for index, ln in enumerate(lines):\n if ln[:-1] in self.SIPOCTypeMarkers:\n self.SIPOCTypeString = ln[len(self.CSV_TABLE_MARKER):-1]\n scanLineIndex = index + 3\n dataLoad = []\n while len(lines[scanLineIndex].lstrip()) > 1:\n dataLoad.append(lines[scanLineIndex].lstrip().split('\"')[1::2])\n scanLineIndex += 1\n payload[headerByLine[index]][self.SIPOCTypeString] = dataLoad\n payload[headerByLine[index]][self.OUTLINENUMBER_STR] = self.outlineNumberString(levels[index],outlineNumbers[index])\n if ln[:-1] == self.DIAGRAM_MARKER:\n payload[headerByLine[index]][self.DIAGRAM_LINE_NUM] = index\n return payload # {
: , [[], [], ...], [] , ...}\n\n\n def traverseGraph(self, graph: Dict[str, List[str]], accumulate: List[List[str]],\n path: List[str], stopNode: str):\n \"\"\"\n Find all valid paths through the graph from start to stop.\n\n Traverses the provided graph from the initial node at path[-1] (usually\n the initial call uses the start node for the graph) and following all node\n paths to the stop node. While tracing a path if it loops back upon itself\n that candidate path is discarded. Collects all valid paths that travel\n from the supplied initial node to the defined stop node by recursive calls\n to this function. All nodes are designated as strings.\n\n Parameters\n ----------\n graph: Dict[str, List]\n The graph is in the form of a dictionary with string keys and values\n that are lists of strings. Each string is the name of a node. Each\n key value is an originating or \"from\" node, and each list element\n string destination or \"to\" node.\n path: List[str]\n The start of an initial path from which its succeeding nodes will\n be traced.\n stopNode: str\n Singular terminal node name where the directed graph stops.\n\n Returns\n -------\n accumulate: List[List[str]]\n All valid paths through the graph from the start node to the stop node.\n Contains no paths with loopbacks found on this iteration.\n\n \"\"\"\n if path[-1] in graph.keys(): # if the current node has succeeding nodes, follow them\n nextNode = graph[path[-1]] # get the list of succeeding nodes from teh graph\n for node in nextNode: # span the succeeding nodes\n if node == stopNode: # if this next node is a stop node, add it to the valid paths in accumulate\n path.append(stopNode)\n accumulate.append(path.copy())\n path.pop(-1) # reset path to try the next succeeding node\n elif node in path: # if this next node has already been traversed by this path, then it loops back so we skip it\n pass\n else: # otherwise copy path onto a new path, add this next node, and call this function recursively\n newPath = path.copy()\n newPath.append(node)\n self.traverseGraph(graph, accumulate, newPath, stopNode) # keep running down the new path\n return accumulate # return all the valid paths we found so far\n\n\n def analyzeGraph(self, graph: Dict[str, List]):\n \"\"\"\n Analyze flowchart graph data.\n\n Analyzes the provided graph to determine the start and stop nodes, and\n finds the shortest path through the graph from the start node to the\n stop node.\n\n Parameters\n ----------\n graph: Dict[str, List]\n The graph is in the form of a dictionary with string keys and values\n that are lists of strings. Each string is the name of a node. Each\n key value is an originating or \"from\" node, and each list element\n string destination or \"to\" node.\n\n Returns\n -------\n minPath: List[str]\n The first most minimal length path found in all valid paths through the graph\n\n \"\"\"\n allKeysSet = set(graph.keys()) # should contain all nodes except the stop nodes, i.e., all \"from\" nodes\n allValuesSet = set(chain(*graph.values())) # should contain all nodes except the start nodes, i.e., all \"to\" nodes\n\n startNode = list(allKeysSet - allValuesSet) # returns only nodes that are not \"to\" nodes\n stopNode = list(allValuesSet - allKeysSet) # returns only nodes that are terminal nodes, i.e., have no connections beyond themselves\n\n graphError = False # collect all possible graph errors and return None if any are found\n startErrorStr = '\\n'\n stopErrorStr = '\\n'\n if len(startNode) > 1: # more than one start node is not allowed and returns None\n graphError = True\n startErrorStr += 'More than one starting node in the graph:\\n'\n startErrorStr += \"Starting Nodes are: '\" + str(startNode) + \"\\n'\"\n startErrorStr += 'A valid process can have only one starting node. Exiting graph analysis....\\n'\n elif len(startNode) == 0: # no start node is not allowed and returns None\n graphError = True\n startErrorStr += 'No unique starting node in the graph:\\n'\n startErrorStr += 'A valid process must have only one starting node. Exiting graph analysis....\\n'\n if len(stopNode) == 0: # no stop node is not allowed and returns None\n graphError = True\n stopErrorStr += 'No unique stopping nodes in the graph:\\n'\n stopErrorStr += 'A valid process must have at least one stopping node. Exiting graph analysis....\\n'\n elif len(stopNode) > 1: # more than one stop node is not allowed and returns None\n graphError = True\n stopErrorStr += 'More than one stopping node in the graph:\\n'\n stopErrorStr += \"Stopping Nodes are: '\" + str(stopNode) + \"'\\n\"\n stopErrorStr += 'A valid process can have only one stopping node. Exiting graph analysis....\\n'\n if graphError:\n errorStr = (startErrorStr + stopErrorStr)\n self.errorLog.error(errorStr)\n return None, True # at least one graph error has been found\n paths = self.traverseGraph(graph, [], [startNode[0]], stopNode[0]) # get all valid paths through the graph\n pathLens = [len(p) for p in paths] # get the lengths of each valid path\n minPathLen = min(pathLens) # get the shortest path length\n minPath = paths[pathLens.index(minPathLen)] # get the first shortest path\n return minPath, False\n\n def fetchFlowcharts(self, lines: List[str], headerByLine: List[str],\n outlineNumbers: List[List[int]], levels: List[int]) -> dict:\n \"\"\"\n Find flowchart data in the rST file.\n\n Scans the text lines from the rST source document, identifies the appropriately\n marked flowchart directives, and pulls in the data from those tables.\n Packages this extracted data with header and outline number data for further processing.\n\n Parameters\n ----------\n lines : list of strings\n The lines of text from the original rST document.\n headerByLine : list of strings\n Header string of each line of text in the source rST document.\n outlineNumbers : list of ints\n Outline numbers of each line of text in the source rST document.\n levels : list of ints\n Outline levels of each line of text in the source rST document.\n\n Returns\n -------\n flowchartsAndInsertPoints : list of lists\n List of lists holding the [, , ].\n\n \"\"\"\n graphError = False\n aGraphError = False\n markerLen = len(self.FLOWCHART_MARKER)\n index = 0\n stopAt = len(lines) - 1\n flowchartsAndInsertPoints = []\n while index < stopAt:\n thisLine = lines[index]\n if thisLine[:markerLen] == self.FLOWCHART_MARKER:\n thisGraph = defaultdict(list)\n edgeLabels = defaultdict(list)\n headerFileName = headerByLine[index]\n headerFileName.replace('&','')\n headerFileName = re.sub('[^\\w]', '_', headerFileName)\n headerFileName = headerFileName + '_flowchart.gv'\n graphLabel = '*Figure ' + self.outlineNumberString(levels[index],outlineNumbers[index]) + ' ' + thisLine[markerLen:-1] + '*\\n'\n flowchartsAndInsertPoints.append([(index-1), headerFileName + '.' + self.IMAGE_FORMAT + '\\n', graphLabel])\n g = Digraph('G', filename=headerFileName,\n graph_attr={'rankdir':'TB', 'splines':'ortho',\n 'fixedsize': 'true', 'fontname': 'Times New Roman, Bold',\n 'fontsize': '18.0', 'compound': 'true',\n 'nodesep': '1.2', 'ranksep': '0.4',\n 'labeljust': 'l', 'labelloc': 'l'})\n # g = Digraph('G', filename=headerFileName, graph_attr={'rankdir':'LR', 'splines':'ortho', 'fixedsize': 'true', 'fontname': 'Times New Roman, Bold', 'fontsize': '24.0', 'compound': 'true'})\n # g.attr('graph', label=graphLabel)\n g.attr('node', shape='box')\n\n dataIndex = index + 3\n while not lines[dataIndex].isspace(): # keep scanning and collecting the flowchart data until a blank line is found\n parsedLine = re.findall(r'\"([^\"]*)\"', lines[dataIndex])\n lenParsedLine = len(parsedLine)\n if lenParsedLine == 3: # indicates a well formed flowchart line\n thisGraph[parsedLine[0]].append(parsedLine[1])\n if len(parsedLine[2]) > 0: # if there is text in the third position this is an edge label\n edgeLabels[parsedLine[2]].append([parsedLine[0], parsedLine[1]])\n else: # otherwise the edge has no label, just an arrow\n edgeLabels['none'].append([parsedLine[0], parsedLine[1]])\n # g.edge(parsedLine[0], parsedLine[1], label=parsedLine[2])\n else:\n graphError = True\n self.errorLog.error(f\"Error at line {dataIndex}, should have three entries but has {lenParsedLine}.\")\n self.errorLog.error(f\"Lines reads '{lines[dataIndex]}'\")\n dataIndex += 1\n aGraphError = False\n minpath, aGraphError = self.analyzeGraph(thisGraph) # finds the shortest path through the flowchart from start to stop\n if not aGraphError:\n allKeysSet = set(thisGraph.keys())\n allValuesSet = set(chain(*thisGraph.values()))\n allNodes = allKeysSet | allValuesSet\n minorNodes = allNodes - set(minpath)\n with g.subgraph(name='cluster_0') as s:\n s.attr(rank='same')\n for nodeName in minpath:\n g.node(nodeName, group='1')\n with g.subgraph(name='cluster_1') as s:\n for nodeName in minorNodes:\n g.node(nodeName, group='2')\n for edgeStr in edgeLabels.keys():\n for nodePairs in edgeLabels[edgeStr]:\n if edgeStr == 'none':\n g.edge(nodePairs[0], nodePairs[1], label='')\n else:\n # g.edge(nodePairs[0], nodePairs[1], taillabel=('t2'+edgeStr+'\\l'), labeldistance='2') #, labeldistance='10', labelangle='90')\n # g.edge(nodePairs[0], nodePairs[1], xlabel=('x'+edgeStr+'\\l')) #, labeldistance='10', labelangle='90')\n g.edge(nodePairs[0], nodePairs[1], xlabel=(edgeStr+' ')) #, labeldistance='10', labelangle='90')\n\n # # DEBUG WITH PDF OUTPUTS vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n # try: # ***** DEBUG **** Produces PDF files that may conflict and cause errors\n # g.view() # for debugging purposes now, COMMENT OUT upon completion of testing\n # except Exception:\n # print(\"ERROR: The diagram\",headerFileName,\"may already be open in another application.\")\n # # self.reportError(f\"ERROR: The diagram {headerFileName} may already be open in another application.\")\n # self.errorLog.error(f\"ERROR: The diagram {headerFileName} may already be open in another application.\")\n # # DEBUG WITH PDF OUTPUTS ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n g.render(format=self.IMAGE_FORMAT)\n\n index = dataIndex - 1\n if aGraphError:\n graphError = graphError or aGraphError\n index += 1\n if graphError:\n self.statusLog.error('Processing failed because of a graph error.')\n return flowchartsAndInsertPoints, graphError\n\n\n def getSIPOCNodes(self, payload: dict, hdr: str) -> Dict[str, str]:\n \"\"\"\n Extract node data from embedded SIPOC data.\n\n Extracts each node name by type as defined by SIPOCDict.keys() that will go\n into the SIPOC diagram. Adds dummy nodes to each type ('Sources', 'Inputs',\n 'Process', 'Outputs', 'Consumers') to balance the graph vertically. All\n 'Process' nodes are dummies, but needed so the graph edges line up nicely.\n\n Parameters\n ----------\n payload : default dictionary\n Contains data from the source rST document consisting of the headers,\n source, input, process, output, and consumer data type, and associated data.\n hdr : string\n One of the SIPOCDict.keys().\n\n Returns\n -------\n nodeStack : dictionary of lists\n Contains all the names of the nodes of each SIPOC entry for the diagram.\n\n \"\"\"\n nodeError = False\n nodeStack = defaultdict(list)\n processInputsStr = self.PROCESS_STR + ' ' + self.INPUTS_STR\n processOutputsStr = self.PROCESS_STR + ' ' + self.OUTPUTS_STR\n for kee in self.SIPOCDict.keys():\n nodeStack[kee] = []\n for SIPOCType in [processInputsStr, processOutputsStr]: # was in SIPOCTypeKeys:\n try:\n for data in payload[hdr][SIPOCType]:\n for index, entry in enumerate(data):\n if index < 2: # was: 3:\n # we don't want to process Notes or Requirements in the diagrams, so skip those. They can get wonky for some reason that might need to be explored later.\n try:\n nodeStack[self.SIPOCTypeDict[SIPOCType][index]].append(entry)\n except IndexError:\n nodeError = True\n self.errorLog.error('An index error has been encountered. It may indicate a malformed table.')\n self.errorLog.error('Possible location of the error in the source rST file at:')\n self.errorLog.error(f' Header: {hdr}')\n self.errorLog.error(f' SIPOC Table: {SIPOCType}')\n self.errorLog.error(f' Partial Data Entry: {data}')\n except KeyError:\n nodeError = True\n self.errorLog.error(f\"There is no data table for '{SIPOCType}' in the '{hdr}' section of the document.\")\n for SIPOCEntry in nodeStack:\n nodeStack[SIPOCEntry] = list(dict.fromkeys(nodeStack[SIPOCEntry]))\n maxNodeCount = max([len(nodeStack[SIPOCEntry]) for SIPOCEntry in nodeStack])\n nodeStack[self.PROCESS_STR] = ['_'+self.PROCESS_STR+str(index) for index in range(maxNodeCount)]\n for self.SIPOCcol in self.SIPOCDict.keys(): # create subgraphs and nodes\n nodeCountDiff = maxNodeCount - len(nodeStack[self.SIPOCcol])\n nodesFront = nodeCountDiff // 2\n nodesBack = nodeCountDiff - nodesFront\n for x in range(nodesFront):\n nodeStack[self.SIPOCcol].insert(0,'_'+self.SIPOCcol[:2]+'F'+str(x))\n for x in range(nodesBack):\n nodeStack[self.SIPOCcol].append('_'+self.SIPOCcol[:2]+'B'+str(x))\n return nodeStack, nodeError\n\n def createSIPOCDiagrams(self, payload: defaultdict) -> List[list]:\n \"\"\"\n Create SIPOC diagrams from embedded data.\n\n Processes all the the source, input, output, and consumer data embedded in\n the source rST document into SIPOC diagrams\n\n Parameters\n ----------\n payload : default dictionary\n Contains data from the source rST document consisting of the headers,\n source, input, process, output, and consumer data type, and associated data.\n\n Returns\n -------\n diagramsAndInsertPoints : list of lists\n List of lists holding the [, , ].\n\n \"\"\"\n diagramsAndInsertPoints = []\n for hdr in payload:\n headerFileName = hdr\n headerFileName.replace('&','')\n headerFileName = re.sub('[^\\w]', '_', headerFileName)\n headerFileName = headerFileName + '.gv'\n diagramsAndInsertPoints.append([payload[hdr][self.DIAGRAM_LINE_NUM],\n headerFileName + '.' + self.IMAGE_FORMAT,\n ('*Figure ' + payload[hdr][self.OUTLINENUMBER_STR]\n +' '+hdr + ' SIPOC Diagram*')])\n # fontname below was changed from 'Times Roman' to 'Times New Roman, Bold' to improve graph readability\n g = Digraph('G', filename=headerFileName, graph_attr={'rankdir':'LR', 'splines':'line', 'fixedsize': 'true', 'fontname': 'Times New Roman, Bold', 'fontsize': '24.0', 'compound': 'true'})\n # g.attr('graph', label=payload[hdr][OUTLINENUMBER_STR]+' '+hdr+' SIPOC Diagram')\n g.attr('node', shape='box')\n\n nodeStack, nodeError = self.getSIPOCNodes(payload, hdr)\n if nodeError:\n return None, nodeError\n subgraphStore = {}\n for SIPOCcol in self.SIPOCDict.keys(): # create subgraphs and nodes\n subgraphName = 'cluster_' + SIPOCcol\n with g.subgraph(name=subgraphName) as c:\n c.attr(style='filled', color=self.SIPOCDict[SIPOCcol][2])\n c.node_attr.update(style='filled', color='white', shape='box', fontsize='20', fontname='Times New Roman, Bold')\n for index, nodeName in enumerate(nodeStack[SIPOCcol]):\n # fillStyle = 'invis' if nodeName[0] == '_' or nodeName[0:7] == PROCESS_STR else 'filled'\n # fillColor = 'invis' if nodeName[0] == '_' or nodeName[0:7] == PROCESS_STR else 'white'\n fillStyle = 'invis' if nodeName[0] == '_' else 'filled'\n fillColor = 'invis' if nodeName[0] == '_' else 'white'\n # print(SIPOCcol,index, nodeName, fillStyle, fillColor)\n c.node(nodeName, group=str(index), style=fillStyle, color=fillColor) # group lines up the nodes left to right\n c.attr(label=SIPOCcol, fontsize='24')\n subgraphStore[subgraphName] = c\n for SIPOCType in self.SIPOCTypeKeys: # create edges\n if self.INPUTS_STR in SIPOCType or self.OUTPUTS_STR in SIPOCType:\n try:\n for edgePair in payload[hdr][SIPOCType]:\n if self.INPUTS_STR in SIPOCType:\n g.edge(edgePair[1]+':e',edgePair[0]+':w')\n index = nodeStack[self.INPUTS_STR].index(edgePair[0])\n g.edge(edgePair[0]+':e', '_'+self.PROCESS_STR+str(index), lhead='cluster_Process')\n else:\n g.edge(edgePair[0]+':e',edgePair[1])\n index = nodeStack[self.OUTPUTS_STR].index(edgePair[0])\n g.edge('_'+self.PROCESS_STR+str(index), edgePair[0]+':w', ltail='cluster_Process')\n except KeyError:\n pass # getSIPOCNodes will have already grabbed this error\n # # DEBUG WITH PDF OUTPUTS vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n # try: # ***** DEBUG **** Produces PDF files that may conflict and cause errors\n # g.view() # for debugging purposes now, COMMENT OUT upon completion of testing\n # except Exception:\n # print(\"ERROR: The diagram\",headerFileName,\"may already be open in another application.\")\n # # self.reportError(f\"ERROR: The diagram {headerFileName} may already be open in another application.\")\n # self.errorLog.error(f\"ERROR: The diagram {headerFileName} may already be open in another application.\")\n # # DEBUG WITH PDF OUTPUTS ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n g.render(format=self.IMAGE_FORMAT)\n return diagramsAndInsertPoints, False\n\n def writeFinalrST(self, sourceFilename: str, lines: List[str],\n diagramsAndInsertPoints: List[list],\n flowchartsAndInsertPoints: List[list]) -> str:\n \"\"\"\n Write out the final processed rST file.\n\n Insert the rST image directives into the original rST document and\n writes the result to a new file for use in online documentation or\n translation to other formats. Inserts images as rST figures and adds\n captions based on the header at that location.\n\n Inserts the directives from back to front to avoid needing to keep track\n of line number changes if it were done front to back. That is, inserting\n near the back of the file does not change the line numbers earlier in the file.\n\n Parameters\n ----------\n sourceFilename : string\n File name of the original rST document.\n lines : list of strings\n The lines of text from the original rST document.\n diagramsAndInsertPoints : list of lists\n List of lists holding the [, , ].\n flowchartsAndInsertPoints : list of lists\n List of lists holding the [, , ].\n\n Returns\n -------\n filenameWithImages : string\n sourceFilename modified to indicate that image links have been added to the final output file\n\n \"\"\"\n wholeList = diagramsAndInsertPoints + flowchartsAndInsertPoints # combine SIPOC diagram and flowchart information\n wholeList.sort(key=lambda x: x[0]) # sort based on the line number for insertion\n for dAndIP in reversed(wholeList): # go back to front to avoid line number shifts\n if lines[dAndIP[0] + 1] != '\\n' and not self.FLOWCHART_MARKER in lines[dAndIP[0] + 1]:\n lines[dAndIP[0] + 1] = self.FIGURE_MARKER + dAndIP[1] + '\\n\\n' + dAndIP[2] + '\\n' # overwrite whatever is there because it is supposed to be blank\n else:\n lines.insert(dAndIP[0] + 1, self.FIGURE_MARKER + dAndIP[1] + '\\n' + dAndIP[2] + '\\n' )\n filenameWithInserts = sourceFilename[:-4] + '_WI' + sourceFilename[-4:]\n outfile = open(filenameWithInserts,'w')\n outfile.writelines(lines)\n outfile.close()\n return filenameWithInserts\n\n def testPandoc(self, lines: List[str], startingLineIndex: int, stoppingLineIndex: int, sourceFilename: str, referenceTemplate: str):\n \"\"\"\n Test for errors breaking up the rST and using Pandoc.\n\n Test portions of the source rST file to help isolate the location of the\n errors if errors were found in the source rST file. If errors are found\n in running Pandoc those errors are reported out to the user.\n\n Parameters\n ----------\n lines: List[str]\n The lines of text from the original rST document.\n startingLineIndex: int\n Index of the line of text in the original rST document at which to start the test.\n stoppingLineIndex: int\n Index of the line of text in the original rST document at which to stop the test.\n sourceFilename: str\n File name of the original rST document.\n referenceTemplate: str\n File name of the reference template Pandoc uses to convert the rST document to MS Word.\n\n Returns\n -------\n None.\n\n \"\"\"\n outFilename = 'testPandoc.rst' # set the filename once and then just use the variable instead of a string\n outFileDoc = 'testPandoc.docx' # set the filename once and then just use the variable instead of a string\n outfile = open(outFilename,'w')\n outfile.writelines(lines)\n outfile.close()\n cmdLineStr = 'pandoc ' + outFilename + ' -o ' + outFileDoc + ' --reference-doc \"' + referenceTemplate + '\"'\n errorStr = None\n cmdLineRtn = subprocess.Popen(cmdLineStr, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) # run pandoc to translate the rST file into MS Word formatted document\n if cmdLineRtn.wait() != 0:\n output, errorStr = cmdLineRtn.communicate()\n startingLine = startingLineIndex + 1 # zero array offset stuff, text editors start line numbers at 1\n self.errorLog.warn(f'Pandoc Command Return: {output}')\n self.errorLog.warn(f'Pandoc Error String: {errorStr}')\n self.errorLog.warn(f\"Error occurred in source file '{sourceFilename}' using reference template '{referenceTemplate}'.\")\n self.errorLog.warn(f\"Text where error occurred at lines {startingLine}-{stoppingLineIndex}, text at issue follows:\")\n self.errorLog.warn('**********************************************')\n for lineNumber, lin in zip(range(startingLine, stoppingLineIndex+1),lines):\n self.errorLog.warn(f\"{lineNumber} {lin}\")\n self.errorLog.warn('**********************************************')\n if os.path.exists(outFilename): os.remove(outFilename) # cleanup the test rST output file\n if os.path.exists(outFileDoc): os.remove(outFileDoc) # cleanup any possible MS Word file created by Pnadoc\n\n def findNextBlankLine(self, lines: List[str], index: int) -> int:\n \"\"\"\n Find the next blank line after a given 'index'.\n\n Search forward from the line given be 'index' until the next line containing\n nothing but whitespace is found.\n\n Parameters\n ----------\n lines: List[str]\n The lines of text from the original rST document.\n index: int\n Index of the line of text in the original rST document at which to start the search.\n\n Returns\n -------\n nextIndex : Index of the line of text in the original rST document that\n contains only whitespace after 'index'.\n\n \"\"\"\n nextIndex = index + 1\n if nextIndex < len(lines):\n while not lines[nextIndex].isspace() and nextIndex < len(lines):\n nextIndex += 1\n else:\n nextIndex = index\n return nextIndex\n\n def findPandocErrors(self, lines: List[str], sourceFilename: str, referenceTemplate: str):\n \"\"\"\n Find where Pandoc errors are occurring.\n\n Test portions of the source rST file to help isolate the location of the\n errors if errors were found in the source rST file. This routine searches\n for text in between whitespace only lines.\n If a CSV table is located the entire table is loaded and tested.\n\n TODO: Need to expand testing for other table types and any directives that\n might include whitespace in a complete text set.\n\n Parameters\n ----------\n lines: List[str]\n The lines of text from the original rST document.\n sourceFilename: str\n File name of the original rST document.\n referenceTemplate: str\n File name of the reference template Pandoc uses to convert the rST document to MS Word.\n\n Returns\n -------\n None.\n\n \"\"\"\n stopAt = len(lines)\n index = 0\n markerLen = len(self.CSV_TABLE_MARKER)\n while index < stopAt:\n startingLineIndex = index\n if not lines[index].isspace():\n if lines[index][0:markerLen] == self.CSV_TABLE_MARKER:\n index = self.findNextBlankLine(lines, index)\n index = self.findNextBlankLine(lines, index)\n else:\n index = self.findNextBlankLine(lines, index)\n # testPandoc(accumText, startingLineIndex, index, sourceFilename, referenceTemplate)\n self.testPandoc(lines[startingLineIndex:index+1], startingLineIndex, index, sourceFilename, referenceTemplate)\n index += 1\n\n def analyzeRSTLine4QuotedStr(self, thisLine: str) -> Tuple[int, bool, int]:\n \"\"\"\n Find quoted strings in a string to help parse CSV tables.\n\n Parameters\n ----------\n thisLine: str\n A line of text from the original rST document.\n\n Returns\n -------\n countList, anyEmptyEntry, badQuoteCommaCount: Union[int, bool, int]\n countList - total number of quoted string found\n anyEmptyEntry - True if any of the quoted strings found are empty strings\n badQuoteCommaCount - total number of instances where there is a space\n after a quotation mark but before a comma\n\n \"\"\"\n parsedLine = re.findall(r'\"([^\"]*)\"', thisLine)\n lenZeroList = [len(x) == 0 for x in parsedLine]\n anyEmptyEntry = any(lenZeroList)\n countList = len(parsedLine)\n badQuoteCommaCount = thisLine.count('\" ,') + thisLine.count(\"' ,\")\n return countList, anyEmptyEntry, badQuoteCommaCount\n\n def reportRSTError(self, sourceFilename: str, tableType: str, tableHeader: str,\n thisLine: str, indexLine: int, shouldCount: int,\n doesCount: int, anyEmptyEntry: bool, badQuoteCommaCount: int) -> bool:\n \"\"\"\n Report errors found in the rST file.\n\n This function informs the user what kind of error occurred, where it\n was, and the content of the line on which it was found.\n\n Parameters\n ----------\n lines: List[str]\n The lines of text from the original rST document.\n sourceFilename: str\n File name of the original rST document.\n thisLine: str\n A line of text from the original rST document.\n countList: int\n Total number of quoted string found.\n anyEmptyEntry: bool\n True if any of the quoted strings found are empty strings.\n badQuoteCommaCount: int\n Total number of instances where there is a space after a quotation.\n mark but before a comma\n\n Returns\n -------\n isRSTError: bool\n True if an rST error was detected.\n\n \"\"\"\n isRSTError = False\n if shouldCount != doesCount:\n self.errorLog.error(f\"\\nError in source rst file '{sourceFilename}' in table '{tableType}'.\")\n self.errorLog.error(f\"Line should match header format '{tableHeader}'\")\n self.errorLog.error(f\"Line {indexLine} should have {shouldCount} entries but has {doesCount}\")\n self.errorLog.error(f\"Line reads: '{thisLine}'\")\n isRSTError = True\n if badQuoteCommaCount > 0:\n self.errorLog.error(f\"\\nError in source rst file '{sourceFilename}' in table '{tableType}'.\")\n plural = '' if badQuoteCommaCount == 1 else 's'\n self.errorLog.error(f\"Line {indexLine} has {badQuoteCommaCount} formating error{plural}, likely a single or double quote followed by a space.\")\n self.errorLog.error(f\"Line reads: '{thisLine}'\")\n isRSTError = True\n return isRSTError\n\n def checkRSTKnownErrors(self, sourceFilename: str, lines: List[str]) -> bool:\n \"\"\"\n Check for known rST errors.\n\n Checks to find known errors in the source rST file, this function will be\n used to inform the user what kind of error occurred, where it was, and\n the content of the line on which it was found.\n\n Parameters\n ----------\n sourceFilename: str\n File name of the original rST document.\n lines: List[str]\n The lines of text from the original rST document.\n\n Returns\n -------\n isRSTError: bool\n True if an rST error was detected.\n\n \"\"\"\n isRSTError = False\n for i, b in enumerate(lines):\n lq = re.search('‘', b)\n rq = re.search('’', b)\n if lq != None or rq != None:\n print(f\"\\nError in source rst file '{sourceFilename}' at line '{i}'.\")\n andStr = ' and ' if lq != None and rq != None else ''\n lqStr = 'left quote' if lq != None else ''\n rqStr = 'right quote' if rq != None else ''\n print(f\"Line {i} has non-UTF-8 quotation marks, {lqStr}{andStr}{rqStr}.\")\n print(f\"Line reads: '{b}'\")\n isRSTError = True\n self.errorLog.error(f\"\\nError in source rst file '{sourceFilename}' at line '{i}'.\")\n andStr = ' and ' if lq != None and rq != None else ''\n lqStr = 'left quote' if lq != None else ''\n rqStr = 'right quote' if rq != None else ''\n self.errorLog.error(f\"Line {i} has non-UTF-8 quotation marks, {lqStr}{andStr}{rqStr}.\")\n self.errorLog.error(f\"Line reads: '{b}'\")\n m = re.search(self.CSV_TABLE_MARKER, b)\n if m != None:\n headerLine = lines[i+1]\n entriesShouldCount, anyEmptyEntry, badQuoteCommaCount = self.analyzeRSTLine4QuotedStr(headerLine)\n possibleRSTError = self.reportRSTError(sourceFilename, b, headerLine, headerLine, i, entriesShouldCount, entriesShouldCount, anyEmptyEntry, badQuoteCommaCount)\n if possibleRSTError:\n isRSTError = True\n indexLine = i + 3\n while not lines[indexLine].isspace():\n thisLine = lines[indexLine]\n entriesDoCount, anyEmptyEntry, badQuoteCommaCount = self.analyzeRSTLine4QuotedStr(thisLine)\n possibleRSTError = self.reportRSTError(sourceFilename, b, lines[i+1], thisLine, i, entriesShouldCount, entriesDoCount, anyEmptyEntry, badQuoteCommaCount)\n if possibleRSTError:\n isRSTError = True\n indexLine += 1\n return isRSTError\n\n # def makeUserConfirmationPopUp(self, GUIWindow: sg.Window, displayText: str) -> bool:\n # \"\"\"\n # Create a pop-up window to confirm continuing processing with errors.\n\n # Parameters\n # ----------\n # GUIWindow : sg.Window\n # The window of the graphical user interface (GUI).\n # displayText : str\n # Text to be shown to the user in the pop-up window.\n\n # Returns\n # -------\n # bool\n # True if the user wants to proceed with errors, False otherwise.\n\n # \"\"\"\n # GUIWindow.hide()\n # layout = [ [sg.Text(displayText) ],\n # [sg.Button('OK'),sg.Button('Cancel')]]\n\n # window = sg.Window('Confirm Proceed with Errors?').Layout(layout)\n\n # isProceedConfirmed = False\n # continueLoop = True\n # while continueLoop:\n # event, values = window.Read()\n # if event is None:\n # break\n # elif event == 'OK':\n # isProceedConfirmed = True\n # continueLoop = False\n # elif event == 'Cancel':\n # isProceedConfirmed = False\n # continueLoop = False\n # window.Close()\n # GUIWindow.un_hide()\n # return isProceedConfirmed\n\n\n def readInDocument(self, GUIWindow: sg.Window, sourceFilename: str, referenceTemplate: str, areRecoverableErrorsStr: str):\n \"\"\"\n Read in the source rST file and check for known rST errors.\n\n Parameters\n ----------\n GUIWindow : sg.Window\n The window of the graphical user interface (GUI).\n sourceFilename : str\n DESCRIPTION.\n referenceTemplate : str\n DESCRIPTION.\n areRecoverableErrorsStr : str\n DESCRIPTION.\n\n Returns\n -------\n None.\n\n \"\"\"\n isProceedConfirmed = True\n self.processFile(sourceFilename)\n isRSTError = self.checkRSTKnownErrors(sourceFilename, self.lines)\n errorText = 'Proceed in spite of the errors? The program may fail or give errorenous results.'\n if isRSTError:\n if GUIWindow is None:\n isProceedConfirmed = False\n if self.isFlaskApp: # then this is from a flask app and needs special handling\n self.errorLog.warn(\"reStructuredText file errors have been detected. Further processing is possible but may fail or give errorenous results.\")\n self.errorLog.warn(areRecoverableErrorsStr)\n else: # then this was run on the command line and can be confirmed there.\n promptReply = input(errorText + ' (y/n)[n]?')\n isProceedConfirmed = True if promptReply == 'y' or promptReply == 'Y' else False\n else:\n isProceedConfirmed = self.makeUserConfirmationPopUp(GUIWindow, errorText)\n if not isProceedConfirmed and not self.isFlaskApp:\n statusStr = f\"Further processing of '{sourceFilename}' cancelled by the user with identified rST file errors.\"\n self.statusLog.info(statusStr)\n return isRSTError, isProceedConfirmed\n\n\n def processDocument(self, GUIWindow: sg.Window, sourceFilename: str, referenceTemplate: str):\n \"\"\"\n Process a document to autogenerate SIPOC diagrams and flowcharts.\n\n Parameters\n ----------\n GUIWindow : sg.Window\n The window of the graphical user interface (GUI).\n sourceFilename : str\n String containing the name of the rST file to be processed.\n referenceTemplate : str\n String containing the name of the Pandoc template file to be used\n to create an MS Word document from the processed rST file.\n\n Returns\n -------\n None.\n\n \"\"\"\n payload = self.fetchIOandReqts(self.lines, self.headerByLine, self.outlineNumbers, self.levels)\n flowchartsAndInsertPoints, graphError = self.fetchFlowcharts(self.lines, self.headerByLine, self.outlineNumbers, self.levels)\n diagramsAndInsertPoints, nodeError = self.createSIPOCDiagrams(payload)\n if not (graphError or nodeError):\n self.finalFileName = self.writeFinalrST(sourceFilename, self.lines, diagramsAndInsertPoints, flowchartsAndInsertPoints)\n isPandocAvailable = True if os.system('pandoc --version') == 0 else False\n if not isPandocAvailable and not referenceTemplate is None:\n self.errorLog.warn('Pandoc is not available to this application.')\n self.errorLog.warn(f'Unable to process \"{self.finalFileName}\" and into the MS Word file \"{self.wordFileName}\"')\n pandocError = False\n pandocErrorStr = None\n if isPandocAvailable and not referenceTemplate is None:\n self.wordFileName = sourceFilename[:-4] + '.docx'\n cmdLineList = ['pandoc', self.finalFileName, '-o', self.wordFileName, '--reference-doc', referenceTemplate]\n cmdLineRtn = subprocess.Popen(cmdLineList, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, shell=True,\n creationflags=subprocess.CREATE_NEW_CONSOLE) # run pandoc to translate the rST file into MS Word formatted document\n try: # re-did this to get it working with flask, don't know why it worked, but it did.\n output, errorStr = cmdLineRtn.communicate(timeout=60)\n except subprocess.TimeoutExpired:\n cmdLineRtn.kill()\n output, pandocErrorStr = cmdLineRtn.communicate()\n self.errorLog.error('Pandoc Command Return: ' + output)\n self.errorLog.error('Pandoc Error String: ' + pandocErrorStr)\n self.errorLog.error('If error is \"Invalid UTF-8 stream\", the source .rst file needs to be encoded in UTF-8 to be processed by pandoc.')\n pandocError = True\n # self.finalFileName, self.wordFileName\n if self.wordFilename is None:\n self.wordFilename = 'None - Error in processing'\n if referenceTemplate is None:\n self.errorLog.error(\"Error in processing '\" + sourceFilename\n + \"', returned error(s):\")\n else:\n self.errorLog.error(\"Error in processing '\" + sourceFilename\n + \"' which will affect the MS Word file '\" + self.wordFilename + \"'\")\n self.errorLog.error(\"Error in processing '\" + sourceFilename\n + \"', returned error(s):\" )\n self.findPandocErrors(self.lines, sourceFilename, referenceTemplate)\n wasError = graphError or nodeError or pandocError\n\n if not wasError: # DEBUGGING\n if isPandocAvailable and not referenceTemplate is None:\n self.statusLog.info(f\"Processing Complete, processed '{sourceFilename}' into output file '{self.finalFileName}' and into the MS Word file '{self.wordFileName}'\")\n else:\n self.statusLog.info(f\"Processing Complete, processed '{sourceFilename}' into output file '{self.finalFileName}'\")\n\n\n\n# sourceFilename = \"demo.txt\"\n# sourceFilename = \"MS Analysis Process.rst\"\n# sourceFilename = \"Process_Documentation_Process v2.rst\"\n# pandocPath = 'pandoc'\n# referenceTemplate = 'custom-reference-other.docx'\n# processDocument(sourceFilename, referenceTemplate)\n\n# print(sys.argv)\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) == 2:\n if sys.argv[1] in ['-h','-help','-?']:\n print(f\"Command Line Usage: '{sys.argv[0]} [] [-r ]'\")\n print(f\"GUI Usage: '{sys.argv[0]}'\")\n else:\n sourceFilename = sys.argv[1]\n statusLogger = fLog.flaskLoggingClass(f'Cadmus Status Log - {sourceFilename} *******', 'main_status.log', True)\n errorLogger = fLog.flaskLoggingClass(f'Cadmus Error Log - {sourceFilename} *******', 'main_error.log', True)\n cadmus = cadmusClass(None, statusLogger, errorLogger);\n isRSTError, isProceedConfirmed = cadmus.readInDocument(None, sourceFilename, None, 'Possible recoverable errors')\n if isProceedConfirmed:\n cadmus.processDocument(None, sourceFilename, None)\n\n if errorLogger.hasError:\n errorLogger.error(f\"Error in processing '{sourceFilename}'\")\n elif len(sys.argv) == 4:\n refIndex = sys.argv.index('-r')\n if refIndex == 1:\n referenceTemplate = sys.argv[2]\n sourceFilename = sys.argv[3]\n elif refIndex == 2:\n referenceTemplate = sys.argv[3]\n sourceFilename = sys.argv[1]\n else:\n print(f\"Syntax error, for a reference template use syntax '{sys.argv[0]} -r '\")\n sys.exit()\n statusLogger = fLog.flaskLoggingClass(f'Cadmus Status Log - {sourceFilename} w/ {referenceTemplate} *******', 'main_status.log', True)\n errorLogger = fLog.flaskLoggingClass(f'Cadmus Error Log - {sourceFilename} w/ {referenceTemplate} *******', 'main_error.log', True)\n cadmus = cadmusClass(None, statusLogger, errorLogger);\n isRSTError, isProceedConfirmed = cadmus.readInDocument(None, sourceFilename, referenceTemplate, 'Possible recoverable errors')\n if isProceedConfirmed:\n cadmus.processDocument(None, sourceFilename, referenceTemplate)\n else:\n print(f\"Syntax error, use syntax '{sys.argv[0]} [] [-r ]'\")\n print()\n statusLogger.printLog()\n print()\n errorLogger.printLog()\n statusLogger.closeLogFile()\n errorLogger.closeLogFile()\n\n\n##### IMPORTANT: CONVERSION USING PANDOC: ##############\n# conversion to Word:\n # pandoc \"MS Analysis Process_WI.rst\" -o MS_Analysis_Process.docx --reference-doc custom-reference-other.docx\n # pandoc \"Process_Documentation_Process_WI.rst\" -o Process_Documentation_Process.docx --reference-doc custom-reference-other.docx\n\n\"\"\"\nUseful testing command line stuff:\nD:\ncd \\tools\\src\\Flask\\\nd:\\anaconda3-2020.02b2\\python.exe CadmusClass_v0_6.py\n\"\"\"\n\n# set PATH=%SystemRoot%\\system32;%SystemRoot%;%SystemRoot%\\System32\\Wbem;d:\\anaconda3-2020.02b2;d:\\anaconda3-2020.02b2\\Library\\usr\\bin;d:\\anaconda3-2020.02b2\\Library\\bin;d:\\anaconda3-2020.02b2\\Scripts;%PATH%\n\n\"\"\"\nCommand line test execution text:\nd:\\anaconda3-2020.02b2\\python.exe CadmusClass_v0_7.py D:\\tools\\src\\Document_Processing_Test_Files\\Documentation_Pre-Processing_Process_v3.rst\nd:\\anaconda3-2020.02b2\\python.exe CadmusClass_v0_7.py D:\\tools\\src\\Document_Processing_Test_Files\\Problem_Document_1.rst\nd:\\anaconda3-2020.02b2\\python.exe CadmusClass_v0_7.py D:\\tools\\src\\Document_Processing_Test_Files\\Problem_Document_2.rst\nd:\\anaconda3-2020.02b2\\python.exe CadmusClass_v0_7.py D:\\tools\\src\\Document_Processing_Test_Files\\Problem_Document_3.rst\nd:\\anaconda3-2020.02b2\\python.exe CadmusClass_v0_7.py D:\\tools\\src\\Document_Processing_Test_Files\\Problem_Document_4-No_Proceed.rst\nd:\\anaconda3-2020.02b2\\python.exe CadmusClass_v0_7.py D:\\tools\\src\\Document_Processing_Test_Files\\Problem_Document_4-Proceed.rst\nd:\\anaconda3-2020.02b2\\python.exe CadmusClass_v0_7.py D:\\tools\\src\\Document_Processing_Test_Files\\Problem_Document_5-No_Proceed.rst\nd:\\anaconda3-2020.02b2\\python.exe CadmusClass_v0_7.py D:\\tools\\src\\Document_Processing_Test_Files\\Problem_Document_5-Proceed.rst\nd:\\anaconda3-2020.02b2\\python.exe CadmusClass_v0_7.py D:\\tools\\src\\Document_Processing_Test_Files\\Process_Documentation_Process_v3.rst\nd:\\anaconda3-2020.02b2\\python.exe CadmusClass_v0_7.py D:\\tools\\src\\Document_Processing_Test_Files\\Software_Documentation_Process.rst\nd:\\anaconda3-2020.02b2\\python.exe CadmusClass_v0_7.py D:\\tools\\src\\Document_Processing_Test_Files\\Documentation_Pre-Processing_Process_v3.rst -r pandoc-reference-template.docx\nd:\\anaconda3-2020.02b2\\python.exe CadmusClass_v0_7.py D:\\tools\\src\\Document_Processing_Test_Files\\Problem_Document_1.rst -r pandoc-reference-template.docx\nd:\\anaconda3-2020.02b2\\python.exe CadmusClass_v0_7.py D:\\tools\\src\\Document_Processing_Test_Files\\Problem_Document_2.rst -r pandoc-reference-template.docx\nd:\\anaconda3-2020.02b2\\python.exe CadmusClass_v0_7.py D:\\tools\\src\\Document_Processing_Test_Files\\Problem_Document_3.rst -r pandoc-reference-template.docx\nd:\\anaconda3-2020.02b2\\python.exe CadmusClass_v0_7.py D:\\tools\\src\\Document_Processing_Test_Files\\Problem_Document_4-No_Proceed.rst -r pandoc-reference-template.docx\nd:\\anaconda3-2020.02b2\\python.exe CadmusClass_v0_7.py D:\\tools\\src\\Document_Processing_Test_Files\\Problem_Document_4-Proceed.rst -r pandoc-reference-template.docx\nd:\\anaconda3-2020.02b2\\python.exe CadmusClass_v0_7.py D:\\tools\\src\\Document_Processing_Test_Files\\Problem_Document_5-No_Proceed.rst -r pandoc-reference-template.docx\nd:\\anaconda3-2020.02b2\\python.exe CadmusClass_v0_7.py D:\\tools\\src\\Document_Processing_Test_Files\\Problem_Document_5-Proceed.rst -r pandoc-reference-template.docx\nd:\\anaconda3-2020.02b2\\python.exe CadmusClass_v0_7.py D:\\tools\\src\\Document_Processing_Test_Files\\Process_Documentation_Process_v3.rst -r pandoc-reference-template.docx\nd:\\anaconda3-2020.02b2\\python.exe CadmusClass_v0_7.py D:\\tools\\src\\Document_Processing_Test_Files\\Software_Documentation_Process.rst -r pandoc-reference-template.docx\nd:\\anaconda3-2020.02b2\\python.exe CadmusClass_v0_7.py D:\\tools\\src\\Document_Processing_Test_Files\\Documentation_Pre-Processing_Process_v3.rst -r\nd:\\anaconda3-2020.02b2\\python.exe CadmusClass_v0_7.py D:\\tools\\src\\Document_Processing_Test_Files\\Documentation_Pre-Processing_Process_v3.rst -r pandoc-reference-template.docx extra_should_fail\n\n\n\"\"\"\n","sub_path":"tools/src/visualization/Flask/cadmus_doc_processing/Archive/CadmusClass_v0_7.py","file_name":"CadmusClass_v0_7.py","file_ext":"py","file_size_in_byte":66321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"565532353","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/modipy/debug.py\n# Compiled at: 2009-08-25 18:19:45\n__version__ = '$Revision: 98 $'\nimport logging, logging.handlers, sys\nFORMAT = '%(asctime)s %(levelname)7s: %(message)s'\nformatter = logging.Formatter(FORMAT, '%Y-%m-%d %H:%M:%S')\nstdoutHandler = logging.StreamHandler(sys.stdout)\nstdoutHandler.setFormatter(formatter)\n\nclass LocalLogger(logging.Logger):\n\n def __init__(self, name):\n level = logging.INFO\n logging.Logger.__init__(self, name, level)\n self.addHandler(stdoutHandler)\n\n\ndef add_file_handler(filename):\n handler = logging.handlers.RotatingFileHandler(filename=filename, maxBytes=10000000.0, backupCount=10)\n handler.setFormatter(formatter)\n log = logging.getLogger('modipy')\n log.addHandler(handler)\n\n\nlogging.setLoggerClass(LocalLogger)","sub_path":"pycfiles/ModiPy-1.2.2-py2.5/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"22318838","text":"import os\nimport sys\nsys.path.append('../')\n\nimport re\nimport socket\nimport itertools\nimport pandas as pd\nimport numpy as np\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import GridSearchCV, train_test_split, cross_val_score\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.svm import LinearSVC\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.neighbors import KNeighborsClassifier\n\nfrom app import database, l1_login, app\n\n\n\ndef cheat():\n ip = l1_login.get_ip().pop()\n once_neg_percent = 1\n text = 'cheating'\n\n database.l1_user_connect(once_neg_percent, text)\n\ndef l1_ai(text):\n\n dp_list = pd.read_excel('app/depression_wrod_list.xlsx', index_col=0, header=0)\n # tester = pd.read_csv('/Users/takipon/Desktop/dprapp/tester.csv')\n tester = text.split()\n enco_list = pd.read_excel('app/sample.xlsx', index_col=None, header=1, sheet_name='encouraging_list')\n\n\n\n #新規データの重要な単語抜き出し\n tester_list = list(tester) #csvを文章でlist化\n\n vect = CountVectorizer(max_df=70, stop_words='english').fit(tester_list)#max_dfは入力された文字数に関係する?\n X_train = vect.transform(tester_list)\n\n new_tester_words = vect.get_feature_names()\n # print('TESTER features:\\n{}'.format(new_tester_words\n #)) #文字リスト ['alive', 'death', 'die', 'think', 'want', 'way']\n\n\n\n #「理解・共感」辞書型ボキャの頻出度のキーと値を入れ替えて、頻出度の高い単語を使う\n new_tester_words_voca = vect.vocabulary_\n # print(new_tester_words_voca) #{'want': 4, 'die': 2, 'alive': 0, 'think': 3, 'way': 5, 'death': 1}\n\n renew_tester_words_voca = {v: k for k, v in new_tester_words_voca.items()}\n #辞書(new_tester_words_voca)のキーと値を入れ替え {4: 'want', 2: 'die', 0: 'alive', 3: 'think', 5: 'way', 1: 'death'}\n\n # print('「理解」I see your think of %s' % renew_tester_words_voca[0])\n\n\n\n #「感想」新規データポイント(文字数)をsample.xlsx/encouraging_listデータポイント(X:文字数 Y:配列番号)をK近傍で適切なものを探す(精度がクソなので恐らく意味がない)、中身を確認し新規データポイントと近ければ反映する\n tester_words_num = 1\n for i in range(len(tester_list)):\n tester_words_num = tester_words_num + len(re.findall(\" \", tester_list[i])) #新規データポイント(tester)の単語数カウント #17\n\n enco_list_words_num = []\n enco_list = list(enco_list.sentence)\n for i in range(0, len(enco_list)):\n enco_list_words_num.append(len(re.findall(\" \", enco_list[i]))+1) #1番目から、encoの各文の単語数カウント \n enco_list_words_num = np.array(enco_list_words_num)\n # print(enco_list_words_num) #enco_listの文字数カウント #[ 5 4 3 2 2 4 3 3 5 4 2 5 4 5 3 4 5 3 2 4 4 9 3 1 13]\n\n enco_list_array_num = []\n for i in range(len(enco_list)):\n enco_list_array_num.append(i)\n # print(enco_list_array_num) #enco_listの配列番号カウント #[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]\n\n enco_list_words_num_4knn = enco_list_words_num.reshape(len(enco_list_words_num), 1) #Xに使うデータを二次元配列に変更\n tester_words_num_4knn = np.array(tester_words_num).reshape(1, 1) #本番に使うデータを二次元配列に変更\n\n knn = KNeighborsClassifier(n_neighbors=4,weights='distance') \n knn.fit(enco_list_words_num_4knn, enco_list_array_num)\n # print(knn.score(enco_list_words_num_4knn, enco_list_array_num)) #knn精度確認 #0.28 データがなさすぎ?Yの範囲がデカすぎる?\n\n enco_array_num_4knn = knn.kneighbors(tester_words_num_4knn) #新規データポイントをknnに入れて、近いものを引き抜く\n enco_array_num_4knn = list(itertools.chain.from_iterable(knn.kneighbors(tester_words_num_4knn)[1])) #結果の2番目にあるarray(つまり、配列番号)を抜き出す、二次元を一次元に\n\n enco_respond = []\n num = 0\n for i in range(len(enco_array_num_4knn)):\n num = enco_list_words_num[enco_array_num_4knn[i]]+num\n enco_respond.append(enco_list[enco_array_num_4knn[i]])\n if num >= tester_words_num-5:\n break\n else:\n enco_respond.append(enco_list[enco_array_num_4knn[i]])\n continue\n\n # print('「感想」 %s'%enco_respond) #新規データポイントの数に追いつくまで、enco_listの単語たちが入力されるようになる\n\n\n\n #変数インスタンス + 小文字化 + append作業 + アルファベットリスト同士を結合 = for文1行\n dp_list_low = [dp_list.vocabulary[num].lower() for num in range(len(dp_list))]\n\n #1つのセルに連続してある特徴量(str)を分解\n tif = TfidfVectorizer()\n x = tif.fit_transform(dp_list_low)\n dp_list_eachwords = tif.get_feature_names()\n\n # print(dp_list_eachwords) #各単語から構成される一次元配列\n\n\n\n #新規データ内にあるネガティブワードを、単語リストを参照しながらカウント\n badwords=0\n see_badwords = [badwords+dp_list_eachwords.count(new_tester_words[i]) for i in range(len(new_tester_words))]\n print('badwords',see_badwords) #リスト型、単語があれば1/なければ0 #[0, 1, 1, 0, 0, 0]\n\n\n\n #新規テキストに対するネガティブワードの計算を小数点で実行\n once_neg_percent = '{:.2}'.format(sum(see_badwords) / len(see_badwords))\n # print(once_neg_percent) #0.33\n print('percentage',sum(see_badwords) / len(see_badwords)) #0.33\n\n\n #ipアドレスがページが変わると随時変わる\b、csvも消えるので一時的受け渡しはできないので\n #loginでipアドレスと名前を挿入、loginでipアドレスが一致する名前を抜き出して、それをipとして使う\n # ip = l1_login.get_ip().pop()\n # text = ''.join(tester_list)\n text = str(text)\n\n #データベース.pyにIPアドレスとネガティブパーセンテージ+テキスト受け渡し\n database.l1_user_connect(once_neg_percent, text)\n\n\n # データベース.pyでデータベースの中身を取得\n l1_ip_df = pd.DataFrame(database.l1_user_show())\n try:\n l1_ip_df_col = list(l1_ip_df[2])\n except KeyError:\n l1_ip_df_col = ''\n\n if len(l1_ip_df_col) > 0:\n l1_ip_total=0\n for i in range(len(l1_ip_df_col)):\n l1_ip_total = l1_ip_total + float(l1_ip_df_col[i])\n l1_ip_total = l1_ip_total/len(l1_ip_df_col)\n else:\n l1_ip_total = once_neg_percent\n\n return renew_tester_words_voca, enco_respond, l1_ip_total","sub_path":"app/l1_ai.py","file_name":"l1_ai.py","file_ext":"py","file_size_in_byte":6891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"304320923","text":"import os\nimport time\nimport numpy as np\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nimport torchvision\nfrom torchvision.datasets import CIFAR10 #MNIST\nfrom torch.utils.data import DataLoader, random_split\nfrom torchvision import transforms\nimport pytorch_lightning as pl\nimport matplotlib.pyplot as plt\nfrom torchsummary import summary\nimport cv2\n\n#from net_encoder_decoder_vgg16 import Encoder, Decoder\n#from net_encoder_decoder_vgg_resnet import Encoder, Decoder\n#from net_encoder_decoder_vgg_resnet2 import Encoder, Decoder\nfrom net_colarization_resnet import ColorizationNet\n\ndef imshow(img,file='', text_=''):\n img = img / 2 + 0.5 # unnormalize\n npimg = img.detach().numpy() #img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.text(x = 3, y = 2, s = text_, c = \"red\")\n plt.pause(3)\n if file != '':\n plt.savefig(file+'.png')\n plt.close()\n\nfrom pytorch_lightning.callbacks import Callback \nclass MyPrintingCallback(Callback):\n def on_epoch_end(self, trainer, pl_module):\n print('')\n\nclass rgb2YCrCb(object):\n def __init__(self):\n self.ts = transforms.ToPILImage()\n self.ts2 = transforms.ToTensor()\n mean, std =[0.5,0.5,0.5], [0.25,0.25,0.25]\n self.ts3 = transforms.Normalize(mean, std)\n pass\n \n def __call__(self, tensor):\n tensor = tensor / 4 + 0.5 # unnormalize\n orgYCrCb = cv2.cvtColor(np.float32(self.ts(tensor)), cv2.COLOR_BGR2YCR_CB)\n Y, Cr,Cb = cv2.split(orgYCrCb)\n CC = cv2.merge((Cr,Cb))\n CC = np.array(CC).reshape(2,32*8,32*8) #(2,32*2,32*2)\n #print(CC.shape)\n return np.array(CC)\n \n def __repr__(self):\n return self.__class__.__name__\n \nclass rgb2YCrCb_(object):\n def __init__(self):\n self.ts = transforms.ToPILImage()\n self.ts2 = transforms.ToTensor()\n mean, std =[0.5,0.5,0.5], [0.25,0.25,0.25]\n self.ts3 = transforms.Normalize(mean, std)\n pass\n \n def __call__(self, tensor):\n #tensor = self.ts3(self.ts2(self.ts(tensor))) / 4 + 0.5 # unnormalize \n tensor = tensor / 4 + 0.5 # unnormalize\n orgYCrCb = cv2.cvtColor(np.float32(self.ts(tensor)), cv2.COLOR_BGR2YCR_CB)\n Y, Cr,Cb = cv2.split(orgYCrCb)\n CC = cv2.merge((Cr,Cb))\n Y = np.array(Y).reshape(1,32*8,32*8) #(1,32*2,32*2)\n #print(Y.shape)\n return Y\n\nclass ImageDataset(torch.utils.data.Dataset):\n\n def __init__(self, data_num,train_=True, transform1 = None, transform2 = None,train = True):\n \n self.transform1 = transform1\n self.transform2 = transform2\n self.ts = transforms.ToPILImage()\n self.ts2 = transforms.ToTensor()\n mean, std =[0.5,0.5,0.5], [0.25,0.25,0.25]\n self.ts3 = transforms.Compose([\n transforms.ToTensor(),\n #transforms.Resize((64,64)),\n transforms.Normalize(mean, std),\n ])\n self.train = train_\n \n self.data_dir = './'\n self.data_num = data_num\n self.data = []\n self.label = []\n\n # download\n CIFAR10(self.data_dir, train=True, download=True)\n self.data =CIFAR10(self.data_dir, train=self.train, transform=self.ts3)\n\n def __len__(self):\n return self.data_num\n\n def __getitem__(self, idx):\n out_data = self.data[idx][0]\n out_label_ = self.data[idx][1]\n out_label = torch.from_numpy(np.array(out_label_)).long()\n \n if self.transform1:\n out_data1 = self.transform1(out_data)\n if self.transform2:\n out_data2 = self.transform2(out_data)\n \n return out_data, out_data1, out_data2, out_label\n \nclass LitAutoEncoder(pl.LightningModule):\n\n def __init__(self, data_dir='./'):\n super().__init__()\n self.ts2 = transforms.ToTensor()\n self.ts = transforms.ToPILImage()\n self.data_dir = data_dir\n self.data_num =50000 #50000\n # Hardcode some dataset specific attributes\n self.num_classes = 10\n self.classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n self.dims = (32*8, 32*8)\n \n self.encoder_decoder = ColorizationNet()\n #self.encoder = Encoder()\n #self.decoder = Decoder()\n\n def forward(self, x):\n # in lightning, forward defines the prediction/inference actions\n \n x = self.encoder_decoder(x)\n return x\n\n def training_step(self, batch, batch_idx):\n # training_step defined the train loop. It is independent of forward\n _,x,x_ , y = batch\n x_hat = self.encoder_decoder(x) ##resnet\n loss = F.mse_loss(x_hat, x_)\n self.log('train_loss', loss, prog_bar = True)\n return loss\n\n def validation_step(self, batch, batch_idx):\n _,x, x_, y = batch\n x_hat = self.encoder_decoder(x)\n loss = F.mse_loss(x_hat, x_)\n self.log('test_loss', loss, prog_bar = True)\n return loss\n \n def test_step(self, batch, batch_idx):\n # Here we just reuse the validation_step for testing\n return self.validation_step(batch, batch_idx)\n \n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=1e-3) \n return optimizer\n \ndef main():\n ts = transforms.ToPILImage()\n ts2 = transforms.ToTensor()\n mean, std =[0.5,0.5,0.5], [0.25,0.25,0.25]\n ts3 = transforms.Normalize(mean, std)\n ts4 = transforms.Resize((256,256))\n meang, stdg =[0.5], [0.25]\n ts5 = transforms.Normalize(meang, stdg)\n trans2 = transforms.Compose([\n transforms.Resize((256,256)),\n #transforms.Normalize(mean, std),\n rgb2YCrCb(), #CrCb\n ])\n trans1 = transforms.Compose([\n transforms.Resize((256,256)),\n #transforms.Normalize(mean, std),\n rgb2YCrCb_(), #Y\n ])\n dim1 =(256,256)\n dim2 = (1,256,256)\n dim3 = (256,256,2)\n data_num = 50000\n cifar10_full =ImageDataset(data_num, train=True, transform1=trans1, transform2=trans2)\n n_train = int(len(cifar10_full)*0.95)\n n_val = int(len(cifar10_full)*0.04)\n n_test = len(cifar10_full)-n_train -n_val\n cifar10_train, cifar10_val, cifar10_test = torch.utils.data.random_split(cifar10_full, [n_train, n_val, n_test])\n \n trainloader = DataLoader(cifar10_train, shuffle=True, drop_last = True, batch_size=32, num_workers=0)\n valloader = DataLoader(cifar10_val, shuffle=False, batch_size=32, num_workers=0)\n testloader = DataLoader(cifar10_test, shuffle=False, batch_size=32, num_workers=0)\n \n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") #for gpu\n # Assuming that we are on a CUDA machine, this should print a CUDA device:\n print(device)\n pl.seed_everything(0)\n\n # model\n autoencoder = LitAutoEncoder()\n #path_ = './simple_coloring/'\n #PATH = path_+'example_cifar4Ln100_9.ckpt'\n #autoencoder = autoencoder.load_from_checkpoint(PATH)\n \n #autoencoder = LitAutoEncoder()\n autoencoder = autoencoder.to(device) #for gpu\n print(autoencoder)\n summary(autoencoder,dim2)\n \n trainer = pl.Trainer(max_epochs=1, gpus=1, callbacks=[MyPrintingCallback()]) ####epoch\n sk = 0\n for i in range(0,10,1):\n trainer.fit(autoencoder, trainloader, valloader) \n print('training_finished')\n \n results = trainer.test(autoencoder, testloader)\n print(results)\n if sk%1==0:\n dataiter = iter(trainloader)\n _,images, images_, labels = dataiter.next()\n print(images.shape, images_.shape)\n\n images0 = []\n for i in range(32):\n print(i, images[i].shape, images_[i].shape)\n YCC_ = cv2.merge((np.array(images[i]).reshape(dim1),np.array(images_[i]).reshape(dim3)))\n images0_ = cv2.cvtColor(YCC_, cv2.COLOR_YCR_CB2BGR)\n images0.append(ts2(images0_/255.))\n # show images \n imshow(torchvision.utils.make_grid(images0), 'cifar10_results',text_ =' '.join('%5s' % autoencoder.classes[labels[j]] for j in range(4))) #3\n # print labels\n print(' '.join('%5s' % autoencoder.classes[labels[j]] for j in range(4)))\n\n path_ = './simple_coloring/'\n PATH = path_+'example_cifar4Ln100_{}.ckpt'.format(sk)\n trainer.save_checkpoint(PATH)\n\n pretrained_model = autoencoder.load_from_checkpoint(PATH)\n pretrained_model.freeze()\n pretrained_model.eval()\n\n latent_dim,ver = \"Gray2Clolor_resnet\", \"1_{}\".format(sk) #####save condition\n dataiter = iter(testloader)\n images0,images, images1, labels = dataiter.next() #original, Y, CrCb, label\n # show images\n imshow(torchvision.utils.make_grid(images.reshape(32,1,32*8,32*8)/255.),path_+'1_Y_cifar10_{}_{}'.format(latent_dim,0),text_ =' '.join('%5s' % autoencoder.classes[labels[j]] for j in range(4)))\n # show images0\n imshow(torchvision.utils.make_grid(images0.reshape(32,3,32,32)),path_+'2_original_cifar10_{}_{}'.format(latent_dim,0),text_ =' '.join('%5s' % autoencoder.classes[labels[j]] for j in range(4)))\n # show images0\n imshow(torchvision.utils.make_grid(ts4(images0).reshape(32,3,32*8,32*8)),path_+'3_original_normx2_cifar10_{}_{}'.format(latent_dim,0),text_ =' '.join('%5s' % autoencoder.classes[labels[j]] for j in range(4))) \n # show images1\n #imshow(torchvision.utils.make_grid(images1.reshape(32,3,32*2,32*2)),'normalized_images1_cifar10_{}_{}'.format(latent_dim,ver),text_ =' '.join('%5s' % autoencoder.classes[labels[j]] for j in range(4))) \n\n decode_img = pretrained_model.encoder_decoder(images[0:32].to('cpu').reshape(32,1,32*8,32*8)) #3\n #decode_img = pretrained_model.decoder(encode_img)\n decode_img_cpu = decode_img.cpu()\n images2 = []\n for i in range(32):\n print(i, images[i].shape, decode_img_cpu[i].shape)\n YCC_ = cv2.merge((np.array(images[i].reshape(dim1)),np.array(decode_img_cpu[i].reshape(dim3))))\n images2_ = cv2.cvtColor(YCC_, cv2.COLOR_YCR_CB2BGR)\n images2.append(ts3(ts2(images2_/255.)))\n #images2.append(ts2(images2_/255.))\n imshow(torchvision.utils.make_grid(images2), path_+'4_preds_cifar10_{}_{}'.format(latent_dim,ver),text_ =' '.join('%5s' % autoencoder.classes[labels[j]] for j in range(4)))\n sk += 1\n\nif __name__ == '__main__':\n start_time = time.time()\n main()\n print('elapsed time: {:.3f} [sec]'.format(time.time() - start_time)) \n","sub_path":"Coloring/simple_YCC_resnet.py","file_name":"simple_YCC_resnet.py","file_ext":"py","file_size_in_byte":10750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"640044785","text":"import os\nimport shutil\nimport random\nimport argparse\nimport random\n\n# FUNCION PARA CAMBIAR UNA LINEA DE UN ARCHIVO POR OTRA #\ndef modificarLinea(archivo, buscar, reemplazar):\n with open(archivo, \"r\") as f:\n lines = (line.rstrip() for line in f)\n altered_lines = [reemplazar if line == buscar else line for line in lines]\n\n with open(archivo, \"w\") as f:\n f.write('\\n'.join(altered_lines) + '\\n')\n\n\ndef lifetime(ctau_mean_mm, input=\"unweighted_events.lhe\", output=\"unweighted_events_new.lhe\"):\n '''\n function using in replace_lifetime_in_LHE.py\n necesita estar en la carpeta para que funcione\n '''\n # set input file name\n # filename = \"unweighted_events.lhe\"\n f = open(input, 'r')\n g = open(output, 'w')\n event_begin = False\n event_end = True\n for line in f:\n if line == '\\n':\n event_begin = True\n event_end = False\n if line == '\\n':\n event_begin = False\n event_end = True\n new_line = ''\n if event_begin == True and event_end == False:\n word_n = 0\n for word in line.split():\n if word == '3000022' or word_n > 0:\n word_n = word_n + 1\n if word_n < 13:\n if word_n == 12:\n if ctau_mean_mm is not 0:\n ctau_mm = '%E' % random.expovariate(\n 1.0 / float(ctau_mean_mm)) # exponential distribution\n # print \"ctau (mm) mean: \", ctau_mean_mm, \" actual: \", ctau_mm\n else:\n ctau_mm = '%E' % float(0)\n new_line = new_line + ctau_mm + ' '\n else:\n new_line = new_line + word + ' '\n else:\n new_line = new_line + word + '\\n'\n word_n = 0\n if new_line == '':\n g.write(line.rstrip('\\n') + \"\\n\")\n #print line.rstrip('\\n')\n else:\n g.write(new_line.rstrip('\\n') + \"\\n\")\n #print new_line.rstrip('\\n')\n f.close()\n g.close()\n","sub_path":"generador_muestras/mod_configuration.py","file_name":"mod_configuration.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"122926195","text":"def matrix_calculus(matrix):\n \"\"\"\n Args:\n matrix: the matrix to calculate coercive\n Returns:\n the matrix value .\n Raises:\n ValueError: if number is not a number.\n\n matrix_calculus([\"{0} {1}{2}{3} {4}\".format('1', '2', r'\\\\n', '3', '4')])\n >>> matrix_calculus([[1, 0], [2, 0]])\n -1\n >>> matrix_calculus(\"-3\")\n ValueError\n \"\"\"\n try:\n x = matrix[0]\n y = matrix[1]\n return (x[0] + y[1]) - (x[1] + y[0])\n\n except ValueError as err:\n return err\n\n\ndef factorial(number):\n \"\"\" Returns factorial of given number\n Args:\n number: the number to get the factorial of.\n Returns:\n the factorial of number.\n Raises:\n ValueError: if number is not a number.\n >>> factorial(4)\n 24\n \"\"\"\n x = 0\n ilo = 1\n while number-x > 0:\n x += 1\n ilo *= x\n return ilo\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod(verbose=True)\n","sub_path":"doctest_test.py","file_name":"doctest_test.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"214143859","text":"#The below code runs for infinite time until it is stopped and it will keep sending the correct values after \n#at intervals of 1 min\n\n\nimport paho.mqtt.publish as publish\nimport paho.mqtt.subscribe \nimport random\nimport time\n\n#Using Thingspeak as MQTT broker\nchannelID = \"your_channel_id\" #channel ID of thingspeak\napiKey1 = \"Write_API_Key\" #write API Key of Channel \ntopic = \"channels/\" + channelID + \"/publish/\" + apiKey1 # topic\nmqttHost = \"mqtt.thingspeak.com\"\ntTransport = \"tcp\"\ntPort = 1883\ntTLS = None\n\n\n\n\ndef sensors():\n while(True):\n # below two lines are for input of values of sensors since we are not using it \n #so random values are initialized within a defined range\n \n hum= random.randint(50,100) \n temp = random.randint(20,30)\n \n # Here we check the condition of the input received \n T = temp>=20 and temp<=30\n H = hum>=50 and hum<=100\n \n #I have taken always true condition but it can be set to any other condition also\n # then the value is published in the below code\n if( T==True and H==True ):\n tPayload = \"&field1=\" + str(temp) + \"&field2=\" + str(hum)\n print (\"Temperature: \", temp)\n print (\"Humidity\", hum)\n try:\n publish.single(topic, payload=tPayload, hostname=mqttHost, port=tPort, tls=tTLS, transport=tTransport)\n print (\"Valid Temperature Data sent\")\n print (\"Valid Humidity Data sent\")\n \n except:\n print (\"Failure in sending data\")\n # Rest are the other cases for invalid data if data is invalid then I am sending 0 or we can send null also \n elif(T==True and H==False):\n tPayload = \"&field1=\" + str(temp) + \"&field2=\" + str(0)\n print (\"Temperature: \", temp)\n print (\"Humidity\", hum)\n try:\n publish.single(topic, payload=tPayload, hostname=mqttHost, port=tPort, tls=tTLS, transport=tTransport)\n print (\"Valid Temperature Data sent\")\n print (\"Humidity Data Not sent\")\n \n except:\n print (\"Failure in sending data\")\n elif(T==False and H==True):\n tPayload = \"&field1=\" + str(0) + \"&field2=\" + str(hum)\n print (\"Temperature: \", temp)\n print (\"Humidity\", hum)\n try:\n publish.single(topic, payload=tPayload, hostname=mqttHost, port=tPort, tls=tTLS, transport=tTransport)\n print (\"Temperature Data Not sent\")\n print (\"Valid Humidity Data sent\")\n \n except:\n print (\"Failure in sending data\")\n else:\n print(\"Both values Invalid\")\n \n #At last this sleeps for 1 min and then next value is send\n time.sleep(60)\n\nif __name__==\"__main__\":\n sensors()\n \n","sub_path":"Sensor_Data_Module.py","file_name":"Sensor_Data_Module.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"474167977","text":"import discord\nfrom discord.ext import commands, tasks\nimport json\nimport os\n\n\nclient = discord.Client()\n\n@client.event\nasync def on_message(message):\n if message.author == client.user:\n return\n if(message.content.startswith('!hello')):\n author = str(message.author)\n resp = \"Hello \" + author[:-5]\n await message.channel.send(resp)\n\n\nclient = commands.Bot(command_prefix=os.getenv('DISCORD_BOT_PREFIX'), case_insensitive=True)\n\nfor filename in os.listdir('./cogs'):\n if filename.endswith('.py'):\n client.load_extension(f'cogs.{filename[:-3]}')\n\n@client.event\nasync def on_ready():\n activity = os.getenv('DISCORD_ACTIVITY') or 'Minecraft'\n await client.change_presence(activity=discord.Game(name=activity))\n\nclient.run(os.getenv('DISCORD_BOT_TOKEN'))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"241303930","text":"import util\nfrom gensim.models import FastText\nfrom gensim.models.word2vec import LineSentence\n\nfrom tqdm import tqdm\nimport logging\n\n# https://joyhong.tistory.com/137 임베딩 - FastText (한글 자소 분리)\n\n\n# def process_jamo(tokenized_corpus_fname, output_fname):\n# toatal_lines = sum(1 for line in open(tokenized_corpus_fname, 'r', encoding='utf-8'))\n# with open(tokenized_corpus_fname, 'r', encoding='utf-8') as f1, \\\n# open(output_fname, 'w', encoding='utf-8') as f2:\n# for _, line in tqdm(enumerate(f1), total=toatal_lines):\n# sentence = line.replace('\\n', '').strip()\n# processed_sentence = util.jamo_sentence(sentence)\n# f2.writelines(processed_sentence + '\\n')\n\n# def process_jamo(tokenized_corpus_fname, output_fname):\n# with open(tokenized_corpus_fname, 'r', encoding='utf-8') as f1, \\\n# open(output_fname, 'w', encoding='utf-8') as f2:\n# for line in f1:\n# sentence = line.replace('\\n', '').strip()\n# processed_sentence = jamo_sentence(sentence)\n# f2.writelines(processed_sentence + '\\n')\n\n# tokenized_corpus_fname = 'corpus_mecab.txt'\n# output_fname = 'corpus_mecab_jamo.txt'\n# process_jamo(tokenized_corpus_fname, output_fname)\n\n\n\"\"\"\nProcess Hangul Jamo Sentence.\nInspired By:\nhttps://lovit.github.io/nlp/representation/2018/10/22/fasttext_subword\n\"\"\"\nimport re\nfrom soynlp.hangle import compose, decompose, character_is_korean\n\ndoublespace_pattern = re.compile('\\s+')\n\ndef jamo_sentence(sent):\n\n def transform(char):\n if char == ' ':\n return char\n cjj = decompose(char)\n if len(cjj) == 1:\n return cjj\n cjj_ = ''.join(c if c != ' ' else '-' for c in cjj)\n return cjj_\n\n sent_ = []\n for char in sent:\n if character_is_korean(char):\n sent_.append(transform(char))\n else:\n sent_.append(char)\n sent_ = doublespace_pattern.sub(' ', ''.join(sent_))\n return sent_\n\n\ndef jamo_to_word(jamo):\n jamo_list, idx = [], 0\n while idx < len(jamo):\n if not character_is_korean(jamo[idx]):\n jamo_list.append(jamo[idx])\n idx += 1\n else:\n jamo_list.append(jamo[idx:idx + 3])\n idx += 3\n word = \"\"\n for jamo_char in jamo_list:\n if len(jamo_char) == 1:\n word += jamo_char\n elif jamo_char[2] == \"-\":\n word += compose(jamo_char[0], jamo_char[1], \" \")\n else: word += compose(jamo_char[0], jamo_char[1], jamo_char[2])\n return word\n\n\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\ndocument = [\n '5g 휴대폰 플랜에 대해서 설명해주세요',\n 'KT',\n '하나님 예배를 하자',\n ]\n\n\nprocessed_document = list(map(lambda x: jamo_sentence(x), document))\n\nmodel_fname = 'model_fasttext'\nprint('corpus 생성')\n# corpus = [sent.strip().split(\" \") for sent in tqdm(processed_document)]\ncorpus = [s.split() for s in processed_document]\n\n\n\n# print(\"학습 중\")\n# # model = FastText(corpus, size=100, workers=4, sg=1, iter=2, word_ngrams=5)\n# model = FastText(corpus, size=100, workers=4, sg=1, iter=2, word_ngrams=5, min_count=1)\n# model.save('model_fasttext')\n#\n# print(f\"학습 소요 시간 : {model.total_train_time}\")\n# # https://projector.tensorflow.org/ 에서 시각화 하기 위해 따로 저장\n# model.wv.save_word2vec_format(model_fname + \"_vis\")\n# print('완료')\n\n\n\ndef transform(list):\n return [(jamo_to_word(w), r) for (w, r) in list]\n\n# 모델을 로딩하여 가장 유사한 단어를 출력\nloaded_model = FastText.load(model_fname)\nprint(loaded_model.wv.vectors.shape)\n\nprint(transform(loaded_model.wv.most_similar(jamo_sentence('후대폰'), topn=5)))\nprint(transform(loaded_model.wv.most_similar(jamo_sentence('예베를'), topn=5)))\nprint(transform(loaded_model.wv.most_similar(jamo_sentence('KT'), topn=5)))\n","sub_path":"ztst/jamofasttext/jamofasttext.py","file_name":"jamofasttext.py","file_ext":"py","file_size_in_byte":3964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"151637586","text":"# ii.\tAsk users to enter a number, then print n positive numbers from 0 to n-1: \n\n\nn = int(input(\"Please input n\"))\n\nif n >= 0:\n for i in range(n):\n print(i, end=\" \")\nelse:\n print(\"n must be a positive number\")","sub_path":"C4E18 Sessions/Homework/Session_2/Exercise_3a_ii.py","file_name":"Exercise_3a_ii.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"382949478","text":"'''\n Start: Thursday, October 29, 2020\n End: \n\n Assignment: Computational Explorations of Political Representation\n\n Objectives:\n 1) Create a state that is a 6x3 grid of cells.\n 2) What is your state's name? Lin-o-eL\n 3) Each cell is inhabited entirely by citizens who vote '1' XOR citizens who vote '0'.\n 4) The program should be able to:\n a) Randomly populate each cell with a 1 or 0.\n b) Display a generated configuration.\n c) Divide your state into 6 \"districts\":\n i) A district is a list of coordinate pairs, where each pair represents a member cell.\n ii) A district must be comprised of adjacent cells.\n d) Primary goal: ensure each cell is in a district.\n e) Secondary goal: balance district sizes.\n'''\nimport random\n\n# Set the state size\nROWS = 6\nCOLS = 3\n\n# Create the state in a 2D array populated with random 1s and 0s\nstate = []\nfor i in range(ROWS):\n row = []\n for j in range(COLS):\n row.append(random.randint(0,1))\n state.append(row)\n\n#####################################\n## Create Districs ##\n#####################################\ndistrictMap = [];\nif (False):\n#simple distric map\n for i in range(1,ROWS+1):\n row = []\n for j in range(COLS):\n row.append(i)\n districtMap.append(row)\nelse:\n#create random district shapes\n districtMap.append([1,1,2])\n districtMap.append([3,1,2])\n districtMap.append([3,3,2])\n districtMap.append([4,4,4])\n districtMap.append([5,5,6])\n districtMap.append([5,6,6])\n\n\n# Look at original district layout\nprint(\"\\nDISTRICT MAP:\")\nfor i in range(ROWS):\n print(districtMap[i])\n\n# Check how every cell \"voted\"\nprint(\"\\nVOTES BY REGION:\")\nfor i in range(ROWS):\n print(state[i])\n\n####################################\n## Add color to party votes ##\n####################################\ndef addColor(num, i=None):\n colorized = \"\"\n if i == None:\n if num == 0:\n colorized += \"\\u001b[31m\"\n elif num == 1:\n colorized += \"\\u001b[36m\"\n else:\n colorized += \"\\u001b[32m\"\n else:\n if i == 0:\n colorized += \"\\u001b[31m\"\n elif i == 1:\n colorized += \"\\u001b[36m\"\n else:\n colorized += \"\\u001b[32m\"\n colorized += str(num) + \"\\u001b[0m\"\n return colorized\n####################################\n## End of addColor ##\n####################################\n\n####################################\n## Create District Borders ##\n####################################\ndef createDistrictBorders(vote, map): \n\n output = \"\"\n \n # terminal border lines\n # ajejb ┌─┬─┐\n # k k k │ │ │\n # fjijg ├─┼─┤\n # k k k │ │ │\n # cjhjd └─┴─┘\n \n a = chr(0x250c) #upper-left corner\n b = chr(0x2510) #upper-right corner\n c = chr(0x2514) #lower-left corner\n d = chr(0x2518) #lower-right corner\n e = chr(0x252c) #top T\n f = chr(0x251c) #left T\n g = chr(0x2524) #right T\n h = chr(0x2534) #bottom T\n i = chr(0x253c) #middle plus, +\n j = chr(0x2500) #horizontal dash\n k = chr(0x2502) #vertical dash\n\n #top rows\n row1String = a #border row\n row2String = k #vote row\n for m in range(len(map[0])-1):\n row2String += \" \" + addColor(vote[0][m]) + \" \"\n row1String += j*3\n if (map[0][m] == map[0][m+1]):\n row2String += \" \"\n row1String += j\n else:\n row2String += k\n row1String += e\n row2String += \" \" + addColor(vote[0][len(map[0])-1]) + \" \" + k\n row1String += j*3 + b\n output += row1String + '\\n' + row2String + '\\n'\n\n #middle rows\n for m in range(1,len(map)):\n if (map[m][0] == map[m-1][0]):\n row1String = k\n else:\n row1String = f\n row2String = k\n for n in range(len(map[m])-1):\n row2String += \" \" + addColor(vote[m][n]) + \" \"\n if (map[m][n] == map[m-1][n]):\n row1String += \" \"\n if (map[m][n] == map[m][n+1]):\n row2String += \" \"\n if (map[m][n+1] == map[m-1][n+1]):\n row1String += \" \"\n else:\n row1String += c\n else:\n row2String += k\n if (map[m][n+1] == map[m-1][n+1]):\n row1String += k\n else:\n row1String += a\n else:\n row1String += j*3\n if (map[m][n] == map[m][n+1]):\n row2String += \" \"\n if (map[m][n+1] == map[m-1][n+1]):\n row1String += d\n else:\n if (map[m-1][n] == map[m-1][n+1]):\n row1String += j\n else:\n row1String += h\n else:\n row2String += k\n if (map[m][n+1] == map[m-1][n+1]):\n if (map[m-1][n] == map[m-1][n+1]):\n row1String += b\n else:\n row1String += g\n else:\n if (map[m-1][n] == map[m-1][n+1]):\n row1String += e\n else:\n row1String += i\n row2String += \" \" + addColor(vote[m][len(map[m])-1]) + \" \" + k\n if (map[m][len(map[m])-1] == map[m-1][len(map[m])-1]):\n row1String += \" \" + k\n else:\n row1String += j*3 + g\n output += row1String + '\\n' + row2String + '\\n'\n \n #bottom row\n row1String = c\n n = len(map)-1\n for m in range(len(map[n])-1):\n if (map[n][m] == map[n][m+1]):\n row1String += j*3 + j\n else:\n row1String += j*3 + h\n row1String += j*3 + d\n output += row1String + '\\n'\n return output\n####################################\n## END OF DISTRICT BORDERS ##\n####################################\n\n####################################\n## Check who won each district ##\n####################################\ndef checkWin(vote, map):\n count = []\n for i in range(6+2):\n row = []\n for j in range(2+1):\n row.append(0)\n count.append(row)\n for i in range(len(map)):\n for j in range(len(map[i])):\n count[map[i][j]-1][vote[i][j]] += 1\n #row 6 will contain the total votes\n #row 7 will contain the district wins\n #col 2 indicates who won that district\n sum0 = 0\n sum1 = 0\n win0 = 0\n win1 = 0\n for i in range(len(map)):\n sum0 += count[i][0]\n sum1 += count[i][1]\n if count[i][0] > count[i][1]:\n count[i][2] = 0\n win0 += 1\n elif count[i][1] > count[i][0]:\n count[i][2] = 1\n win1 += 1\n else:\n count[i][2]=-1\n count[6][0] = sum0\n count[6][1] = sum1\n count[7][0] = win0\n count[7][1] = win1\n return count\n####################################\n## End of checkWin function ##\n####################################\n\n\nprint('\\n' + createDistrictBorders(state, districtMap))\n\nresults = checkWin(state, districtMap)\n\nprint(\"Popular Vote:\")\nprint(\"Rep: \\033[31m%.1f\\033[0m%%, Dem: \\033[36m%.1f\\033[0m%%\" %(results[6][0]*100/(ROWS*COLS), results[6][1]*100/(ROWS*COLS)))\n\nshout = \"\"\nif results[7][0] > results[7][1]:\n shout += \"\\033[31;1;4mRepublicans Win!\\033[0m\"\nelif results[7][1] > results[7][0]:\n shout += \"\\033[36;1;4mDeomcrats Win!\\033[0m\"\nelse:\n shout += \"\\033[32;1;4mIt's a tie!\\033[0m\"\nprint(\"\\nBy District -\", shout)\nprint(\"Reps won\", addColor(results[7][0],0), \"districts\")\nprint(\"Dems won\", addColor(results[7][1],1), \"districts\")","sub_path":"08-gerry/poliRepMap.py","file_name":"poliRepMap.py","file_ext":"py","file_size_in_byte":6984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"483018996","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch import optim\n\nimport gym\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass PGAgent():\n def __init__(self, lr, in_dims=8, gamma=0.99, n_actions=4) -> None:\n self.gamma = gamma\n self.lr = lr\n self.in_dims = in_dims\n self.memory = list()\n self.action_memory = list()\n\n self.policy = nn.Sequential(\n nn.Linear(self.in_dims, 128),\n nn.ReLU(),\n nn.Linear(128, 128),\n nn.ReLU(),\n nn.Linear(128, n_actions)\n )\n self.opt = optim.Adam(self.policy.parameters())\n\n def choose_action(self, obs):\n state = torch.from_numpy(obs)\n probs = F.softmax(self.policy(state))\n actions_probs = torch.distributions.Categorical(probs)\n action = actions_probs.sample()\n log_probs = actions_probs.log_prob(action)\n self.action_memory.append(log_probs)\n\n return action.item()\n\n def store_rewards(self, reward):\n self.memory.append(reward)\n\n def learn(self):\n self.opt.zero_grad()\n G = np.zeros_like(self.memory)\n for t in range(len(self.memory)):\n g_sum = 0\n discount = 1\n for k in range(t, len(self.memory)):\n g_sum += self.memory[k] * discount\n discount *= self.gamma\n G[t] = g_sum\n G = torch.from_numpy(G)\n\n loss = torch.zeros(1, requires_grad=True)\n for g, logprob in zip(G, self.action_memory):\n loss = loss + -g * logprob\n loss.backward()\n self.opt.step()\n\n self.memory = list()\n self.action_memory = list()\n\ndef plot_curve(scores, x, file):\n avg = np.zeros(len(scores))\n for i in range(len(scores)):\n avg[i] = np.mean(scores[max(0, i-100): i+1])\n\n plt.plot(x, avg) # otherwise the right y-label is slightly clipped\n plt.title('Running avg of previous 100 scores')\n plt.show()\n plt.savefig(file)\n\ndef main():\n env = gym.make('LunarLander-v2')\n episodes = 3000\n lr = 0.0005\n agent = PGAgent(lr)\n filename = f'PG_lr{lr}_eps{episodes}.png'\n\n scores = []\n for i in range(episodes):\n obs = env.reset()\n score = 0\n done = False\n while not done:\n action = agent.choose_action(obs)\n obs_, reward, done, info = env.step(action)\n score += reward\n agent.store_rewards(reward)\n obs = obs_\n # env.render()\n agent.learn()\n scores.append(score)\n\n avg_score = np.mean(scores[-100:])\n if i % 300 == 0:\n print(f'episode {i}: score -> {score}, avg (100 games) -> {avg_score}')\n\n x = range(len(scores))\n plot_curve(scores, x, filename)\n\nmain()","sub_path":"policy_gradient/lunar_lander_pg.py","file_name":"lunar_lander_pg.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"619275006","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jan 27 14:07:31 2021\r\n\r\n@author: barto\r\n\"\"\"\r\nd = {'parzyste': [], 'nieparzyste': []}\r\nfor i in range(1,20):\r\n if i%2 == 0:\r\n d['parzyste'].append(i)\r\n if i%2 == 1:\r\n d['nieparzyste'].append(i)\r\nprint(d)\r\n\r\n\r\ndef funkcja(a,b):\r\n wynikdod = a + b\r\n wynikodej = a - b\r\n return (wynikdod, wynikodej)\r\nb = funkcja(2,3)\r\nprint(b)\r\n#print(b)\r\n#gerrhtjtrj\r\ng = 6 + 8","sub_path":"odpytkainfa.py","file_name":"odpytkainfa.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"433411253","text":"import os\nimport configparser\nimport sys\nimport re\n\nsys.path.append(os.path.dirname(__file__) + '/')\nimport checkBalance\nimport getLeaderBoard\nimport checkCashFlow\nimport transferMoney\nimport playBlackJack\n\nconfig = configparser.ConfigParser()\nconfig.read(os.path.dirname(__file__) + '/../../../config.ini')\ncommandPrefix = config['command']['prefix'] + ' '\ncommandPrefixLen = len(commandPrefix)\n\n\nasync def messageParser(self, message, db):\n \"\"\"\n Parse message\n Identify whether it is a command to this bot, or just a normal message\n :param self: Discord's client object\n :param message: Message obj\n :param db: Database object\n :return: None\n \"\"\"\n if message.content[:commandPrefixLen] != commandPrefix:\n return\n if len(message.content) > 100:\n await message.channel.send(\"你说的太长了\")\n return\n command = message.content[3:]\n if re.match(f\"^余额$\", command):\n await checkBalance.checkBalance(message, db)\n if re.match(f\"^富豪榜$\", command):\n await getLeaderBoard.getLeaderBoard(self, message, db)\n if re.match(f\"^账单$\", command):\n await checkCashFlow.checkCashFlow(self, message, db)\n if re.match(f\"^账单 .+\", command):\n await checkCashFlow.checkCashFlowWithFilter(self, message, db)\n if re.match(f\"^转账 [0-9]+\\.?[0-9]* \\<\\@\\![0-9]+\\>$\", command):\n await transferMoney.transferMoney(self, db, message, command)\n if re.match(f\"^玩 21点 [0-9]+\\.?[0-9]* \\<\\@\\![0-9]+\\>$\"):\n await playBlackJack.playBlackJack(self, message, db, command)\n","sub_path":"src/controller/messageAnalysis/messageParser.py","file_name":"messageParser.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"370543591","text":"#source : https://gabii.tistory.com/entry/BaekJoonPython3-%EB%B0%B1%EC%A4%80-1110%EB%B2%88-%EB%8D%94%ED%95%98%EA%B8%B0-%EC%82%AC%EC%9D%B4%ED%81%B4\ntmp = inp = int(input())\ncount = 0\nwhile True:\n ten = tmp // 10\n one = tmp % 10\n res = ten + one\n count = count + 1\n tmp = int(str(tmp%10)+str(res%10))\n if (tmp == inp):\n break\nprint(count)","sub_path":"20200213/1110_더하기 사이클.py","file_name":"1110_더하기 사이클.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"177559085","text":"import runWorld as rw\nimport drawWorld as dw\nimport pygame as pg\n\nname = \"UVA\"\nwidth = 1000\nheight = 1000\nrw.newDisplay(width, height, name)\n\nclass State:\n def __init__(self, startX, startY):\n self.x = startX\n self.y = startY\n\ninitState = State(0, 1)\n\nmyimage = dw.loadImage(\"uva.png\")\n\nmylabel = dw.makeLabel(\"Go Hoos!\", \"arial\", 100, (0,0,255))\notherlabel = dw.makeLabel(\"Don't lose the Hoos\", \"serif\", 80, (255,255,255))\n\ndef updateDisplay(state):\n dw.fill(dw.blue)\n dw.draw(otherlabel, (150,100))\n dw.draw(myimage, (state.x, width/50))\n dw.draw(mylabel, (250,250))\n\n \ndef updateState(state):\n state.x = state.x+state.y\n return state\n\n\ndef endState(state):\n if (state.x >= width or state.x < -width):\n return True\n else:\n return False\n\n\ndef handleEvent(state,event): \n if (event.type == pg.MOUSEBUTTONDOWN):\n if (state.y) == 1:\n newState = -1\n else:\n newState = 1\n state.y = newState\n return(state)\n else:\n return(state)\n\nframeRate = 375\n# initState = (0,1)\n\n\nrw.runWorld(initState, updateDisplay, updateState, handleEvent,\n endState, frameRate)\n","sub_path":"uvaGameUpdates.py","file_name":"uvaGameUpdates.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"574317211","text":"from .exceptions import BadgeNotFound\nfrom .PlaceInfo import PlaceInfo\nimport datetime\nfrom .Classes import Time\nfrom .utils import Requests\n\n\nclass BadgeInfo:\n\n def __init__(self, badge_id, request: Requests):\n \"\"\"\n\n Represents a ROBLOX Badge.\n\n **Parameter**\n -------------\n\n badge_id : int\n Badge Id\n request : roblox_py.Requests\n \"\"\"\n self.request = request\n self.badge_id = badge_id\n self._json_obj = None\n\n async def update(self) -> None:\n \"\"\"\n Must be called before using the class else the class will misbehave.\n \"\"\"\n r = await self.request.request(url=f'https://badges.roblox.com/v1/badges/{self.badge_id}', method='get')\n if \"id\" not in r:\n raise BadgeNotFound(\"Invalid BadgeInfo ID\")\n self._json_obj = r\n\n @property\n def name(self) -> str:\n \"\"\"\n Returns Badge's Name\n\n \"\"\"\n return self._json_obj['name']\n\n @property\n def id(self) -> int:\n \"\"\"\n Returns Badge's ID\n \"\"\"\n return self._json_obj['id']\n\n @property\n def description(self) -> str:\n \"\"\"\n Returns Badge's Description\n \"\"\"\n return self._json_obj['description']\n\n @property\n def is_enabled(self) -> bool:\n \"\"\"\n Checks if the badge is enabled or not\n\n \"\"\"\n\n return self._json_obj['enabled']\n\n @property\n def created_at(self) -> str:\n \"\"\"\n Gives the created date in iso8601 format\n \"\"\"\n return self._json_obj['created']\n\n def created_at_formatted(self) -> Time:\n \"\"\"\n Returns Formatted Badge Creation Date\n \"\"\"\n date_time_str = self.created_at\n noob = date_time_str[:10]\n strp = datetime.datetime.strptime(noob, '%Y-%m-%d')\n return Time(yrs=strp.year, month=strp.month, day=strp.day)\n\n @property\n def updated_at(self):\n \"\"\"\n Gives the last updated date in iso8601 format\n \"\"\"\n return self._json_obj['updated']\n\n def updated_at_formatted(self) -> Time:\n \"\"\"\n Returns a Time instance which contains the years, months, and days which contains formatted date\n \"\"\"\n date_time_str = self.updated_at\n noob = date_time_str[:10]\n strp = datetime.datetime.strptime(noob, '%Y-%m-%d')\n return Time(yrs=strp.year, month=strp.month, day=strp.day)\n\n def updated_age(self) -> Time:\n \"\"\"\n Returns a Time instance which contains the years, months, and days since the badge's last update.\n \"\"\"\n date_time_str = self.updated_at\n noob = date_time_str[:10]\n strp = datetime.datetime.strptime(noob, '%Y-%m-%d')\n now = datetime.datetime.utcnow()\n diff = now - strp\n days = diff.days\n months, days = divmod(days, 30)\n yrs, months = divmod(months, 12)\n return Time(yrs=yrs, month=months, day=days)\n\n def created_age(self) -> Time:\n \"\"\"\n Returns a Time instance which contains the years, months, and days the account has been up for.\n \"\"\"\n date_time_str = self.created_at\n noob = date_time_str[:10]\n strp = datetime.datetime.strptime(noob, '%Y-%m-%d')\n now = datetime.datetime.utcnow()\n diff = now - strp\n days = diff.days\n months, days = divmod(days, 30)\n yrs, months = divmod(months, 12)\n return Time(yrs=yrs, month=months, day=days)\n\n @property\n def past_day_awarded_count(self) -> int:\n \"\"\"\n Returns amount of people awarded in past day\n \"\"\"\n return self._json_obj['statistics']['pastDayAwardedCount']\n\n @property\n def total_awarded_count(self) -> int:\n \"\"\"\n Returns total amount of people awarded\n \"\"\"\n return self._json_obj['statistics']['awardedCount']\n\n @property\n def win_rate(self) -> float:\n \"\"\"\n Returns Win-rate Ratio of the badge\n \"\"\"\n return self._json_obj['statistics']['winRatePercentage']\n\n async def game(self) -> PlaceInfo:\n \"\"\"\n Returns Place info instance which contains more info about the badge's game\n\n **Returns**\n -----------\n\n roblox_py.PlaceInfo\n \"\"\"\n game = PlaceInfo(\n universe_id=self._json_obj['awardingUniverse']['id'],\n request=self.request)\n await game.update()\n return game\n\n async def thumbnail(self) -> str:\n \"\"\"\n Returns the badge's thumbnail image link.\n \"\"\"\n r = await self.request.request(\n url=f'https://thumbnails.roblox.com/v1/badges/icons?badgeIds={self.id}'\n f'&size=150x150&format=Png&isCircular=false')\n return r['data'][0]['imageUrl']\n\n def __repr__(self):\n return self.name\n","sub_path":"roblox_py/BadgeInfo.py","file_name":"BadgeInfo.py","file_ext":"py","file_size_in_byte":4850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"165528257","text":"from graphene import ID, String, ObjectType\nfrom btb.api.models import db\nfrom sqlalchemy import text\n\nfrom promise import Promise\nfrom promise.dataloader import DataLoader\n\nfrom flask import current_app, g\nfrom .match import MatchQuery\n\n\nclass DemandQuery(MatchQuery):\n def __init__(self, skills, location):\n super().__init__(\"btb.match_team_demand\", skills, location)\n\n def map_result(self, record):\n return self.map_default_result(\"demand\", g.demand_loader, record)\n\n\ndef match_demand(demand, cursor = None):\n match_query = DemandQuery(demand.skills, demand.postal_code)\n # match_query.set_radius(demand.radius)\n\n if cursor is not None:\n match_query.set_offset(cursor.offset)\n\n if demand.max_hourly_salary:\n match_query.match_salary(demand.max_hourly_salary)\n\n if demand.quantity:\n match_query.match_quantity(demand.quantity)\n\n return match_query.execute()\n\n\ndef match_demand_by_id(root, info, id, cursor=None):\n with db.engine.begin() as conn:\n sql = text(\"select d.*, c.postal_code from btb.team_demand d, btb.company c where d.company_id = c.id and d.id = :id\")\n data = conn.execute(sql, id=id).fetchone()\n\n for row in data:\n return match_demand(data, cursor)\n\n return {\n \"page_info\": {\n \"has_next_page\": False,\n },\n \"matches\": [],\n }\n\n\ndef match_demands_by_query(root, info, query, cursor=None):\n match_query = DemandQuery(query.skills, query.postal_code,)\n\n match_query.set_radius(query.radius)\n\n if cursor is not None:\n match_query.set_offset(cursor.offset)\n\n if query.max_salary:\n match_query.match_salary(query.max_salary)\n\n if query.min_quantity:\n match_query.match_quantity(query.min_quantity)\n\n return match_query.execute()\n","sub_path":"backend/api/btb/api/schema/resolvers/match_demands.py","file_name":"match_demands.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"78862895","text":"import tensorflow as tf\nfrom tensorflow.compat.v1 import global_variables_initializer as global_variables_initializer\nfrom tensorflow.compat.v1 import tables_initializer as tables_initializer\nfrom tensorflow.compat.v1 import Session as Session\n\nimport tensorflow_hub as hub\nimport pandas as pd\nimport numpy as np\nimport spacy\nfrom scipy import spatial\n\nfrom flair.models import SequenceTagger\nfrom flair.data import Sentence\nfrom segtok.segmenter import split_single\n\nclass action_classifier:\n\tdef __init__(self):\n\t\tself.nlp = spacy.load('en_core_web_sm')\n\t\turl = \"https://tfhub.dev/google/elmo/2\"\n\t\tself.embed = hub.Module(url)\n\n\t\tself.tagger = SequenceTagger.load('pos')\n\t\tself.verb_classes = []\n\t\tself.class_length = None\n\t\tself.action_vec = []\n\n\tdef split_str(self, string):\n\t\ttemp = string[1:len(string)-1].split(',')\n\t\tresult = []\n\t\tfor word in temp:\n\t\t\tword = word.strip()\n\t\t\tword = word[1:len(word)-1]\n\t\t\tresult.append(word)\n\n\t\treturn result\n\n\tdef create_dict(self):\n\t\t\n\t\tself.verb_classes = np.asarray(pd.read_csv('EPIC_verb_classes')['class_key'])\n\t\tself.class_length = self.verb_classes.shape[0]\n\n\t\tverb_classes_total = np.asarray(pd.read_csv('EPIC_verb_classes')['verbs'])\n\n\t\tidx = 0\n\t\tall_vec = []\n\t\tlookup = []\n\t\tfor i in range(verb_classes_total.shape[0]):\n\t\t\ttemp = self.split_str(verb_classes_total[i])\n\t\t\tfor j in temp:\n\t\t\t\tall_vec.append(j)\n\t\t\t\tidx += 1\n\t\t\tlookup.append(idx)\n\n\t\taction_embeddings = self.embed(all_vec, signature=\"default\", as_dict=True)[\"default\"]\n\n\t\taction_vector = []\n\t\twith Session() as sess:\n\t\t\tsess.run(global_variables_initializer())\n\t\t\tsess.run(tables_initializer())\n\t\t\taction_vector = sess.run(action_embeddings)\n\n\t\tfor i in range(len(lookup)):\n\t\t\tif i == 0:\n\t\t\t\ttemp_vec = np.asarray(action_vector[0:lookup[i], :])\n\t\t\t\tavg_vec = np.mean(temp_vec)\n\t\t\t\tself.action_vec.append((self.verb_classes[i], avg_vec))\n\t\t\telse:\n\t\t\t\ttemp_vec = np.asarray(action_vector[lookup[i-1]:lookup[i], :])\n\t\t\t\tavg_vec = np.mean(temp_vec)\n\t\t\t\tself.action_vec.append((self.verb_classes[i], avg_vec))\n\n\tdef classify(self, embeddings):\n\t\tverb_classes = []\n\t\tfor ebd in embeddings:\n\t\t\tmax_sim = 0\n\t\t\tverb_class = None\n\t\t\tfor i in range(self.class_length):\n\t\t\t\tsim = 1 - spatial.distance.cosine(ebd, self.action_vec[i][1])\n\t\t\t\t\n\t\t\t\tif sim > max_sim:\n\t\t\t\t\tmax_sim = sim\n\t\t\t\t\tverb_class = self.action_vec[i][0]\n\t\t\tverb_classes.append(verb_class)\n\n\t\treturn verb_classes\n\n\tdef pos_tag(self, text):\n\t\tsentences = [Sentence(sent, use_tokenizer=True) for sent in split_single(text)]\n\t\tsent2act = []\n\t\tsent_len = []\n\n\t\tfor sent in sentences:\n\t\t\tself.tagger.predict(sent)\n\t\t\tcount = 0\n\t\t\tactions = []\n\t\t\tfor token in sent:\n\t\t\t\ttag = token.get_tag(tag_type='pos').value\n\n\t\t\t\tif tag[0:2] == 'VB':\n\t\t\t\t\tactions.append(token.text)\n\t\t\t\t\tcount += 1\n\t\t\tsent2act += actions\n\t\t\tsent_len.append(count)\n\n\t\treturn sent2act, sent_len\n\n\tdef parse_file(self, filename):\n\t\tfile = open(filename, 'r')\n\t\ttext = file.read()\n\n\t\ttext = text.lower().replace('\\n', ' ').replace('\\t', ' ').replace('\\xa0', ' ')\n\t\ttext = \" \".join(text.split())\n\n\t\tsentences, sent_lookup = self.pos_tag(text)\n\n\t\tembeddings = self.embed(sentences, signature=\"default\", as_dict=True)[\"default\"]\n\n\t\tx = []\n\t\twith Session() as sess:\n\t\t\tsess.run(global_variables_initializer())\n\t\t\tsess.run(tables_initializer())\n\t\t\tx = sess.run(embeddings)\n\t\t\n\t\tverb_classes = self.classify(x)\n\n\t\tresult = []\n\t\taux = []\n\t\tidx = 0\n\t\tfor i in sent_lookup:\n\t\t\tresult.append(verb_classes[idx:idx+i])\n\t\t\taux.append(sentences[idx:idx+i])\n\t\t\tidx += i\n\t\t\n\t\treturn aux, result\n\n\nac = action_classifier()\nac.create_dict()\naux, sent = ac.parse_file('recipe.txt')\nfor i in range(len(aux)):\n\tprint(aux[i])\n\tprint(sent[i])\n\tprint()\n\n\n\n\n\n","sub_path":"elmo_avg.py","file_name":"elmo_avg.py","file_ext":"py","file_size_in_byte":3666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"524810451","text":"#Code by: Cris Laney, Ayalew Lidete, Robert Posada, Griffin Dutson, and Joey Bush\nimport random\nfrom poker_hands import *\nmoney = 100\n\ndef straight(hand):\n list_of_values = []\n for card in hand:\n \tvalue = card[:-1].lower() \n \tlist_of_values.append(value)\n new_values = []\n \n for i in list_of_values:\n if i == \"a\":\n i = 14\n if i == \"k\":\n i = 13\n if i == \"q\":\n i = 12\n if i == \"j\":\n i = 11\n if i == \"1\":\n i = 1\n if i == \"2\":\n i = 2\n if i == \"3\":\n i = 3\n if i == \"4\":\n i = 4\n if i == \"5\":\n i = 5\n if i == \"6\":\n i = 6\n if i == \"7\":\n i = 7\n if i == \"8\":\n i = 8\n if i == \"9\":\n i = 9\n if i == \"10\":\n i = 10\n if i == \"z\":\n i = 100\n \n else:\n new_values.append(i)\n\n length = len(new_values)\n for counter in range(length):\n for index in range(length - 1):\n if new_values[index] > new_values[index + 1]:\n switch = new_values[index]\n new_values[index] = new_values[index + 1]\n new_values[index + 1] = switch\n counter = 0\n for i in range(length-1):\n if new_values[i] + 1 == new_values[i+1]:\n counter = counter + 1\n if counter == 4:\n return True\n \ndef royal_flush(hand):\n list_of_values = []\n for card in hand:\n \tvalue = card[:-1].lower() \n \tlist_of_values.append(value)\n for item in list_of_values:\n if item == A:\n return True\n\ndef deal():\n deck = [\"Ah\", \"2h\", \"3h\", \"4h\", \"5h\", \"6h\", \"7h\", \"8h\", \"9h\", \"10h\", \"Jh\", \"Qh\", \"Kh\", \"As\", \"2s\", \"3s\", \"4s\", \"5s\", \"6s\", \"7s\", \"8s\", \"9s\", \"10s\", \"Js\", \"Qs\", \"Ks\", \"Ac\", \"2c\", \"3c\", \"4c\", \"5c\", \"6c\", \"7c\", \"8c\", \"9c\", \"10c\", \"Jc\", \"Qc\",\"Kc\", \"Ad\", \"2d\", \"3d\", \"4d\", \"5d\", \"6d\", \"7d\", \"8d\", \"9d\", \"10d\", \"Jd\", \"Qd\", \"Kd\"]\n hand = random.sample(deck,5)\n return hand\n\ndef name(hand):\n if four_of_a_kind(hand) == True:\n return(\"4 of a kind\")\n \n elif full_house(hand) == True:\n return(\"full house\")\n \n elif flush(hand) == True:\n return(\"flush\")\n if straight(hand) == True and royal_flush(hand) == True:\n return (\"royal flush\")\n \n elif straight(hand) == True:\n return(\"straight flush\")\n \n else:\n return(\"flush\")\n \n elif straight(hand) == True:\n return(\"straight\")\n \n elif three_of_a_kind(hand) == True:\n return(\"3 of a kind\")\n \n elif two_pair(hand) == True:\n return(\"2 pair\")\n \n elif pair(hand) == True:\n return(\"1 pair\")\n else:\n return(\"High Card\")\n \ndef point_value(hand):\n if hand == \"royal flush\":\n value = 10\n elif hand == \"straight flush\":\n value = 9\n elif hand == \"4 of a kind\":\n value = 8\n elif hand == \"full house\":\n value = 7\n elif hand == \"flush\":\n value = 6\n elif hand == \"straight\":\n value = 5\n elif hand == \"3 of a kind\":\n value = 4\n elif hand == \"2 pair\":\n value = 3\n elif hand == \"1 pair\":\n value = 2\n else:\n value = 1\n return value\n\ndef display(hand):\n for item in hand:\n if len(item)>2:\n print(\" ------ \")\n print(\"| |\")\n print(\"| \", item, \"|\")\n print(\"| |\")\n print(\" ------ \")\n else:\n print(\" ------ \")\n print(\"| |\")\n print(\"| \", item, \" |\")\n print(\"| |\")\n print(\" ------ \")\n\ndef winner(player_1,player_2):\n if player_1 > player_2:\n return(\"You win\")\n elif player_2 > player_1:\n return(\"Your oponent wins\")\n else:\n return(\"It's a tie \")\n \ndef bet_win(money, win, bet):\n\n if win == \"You win\":\n money = money + bet*2\n return(money)\n \n elif win == \"Your oponent wins\":\n money = money - bet\n return(money)\n\n else:\n bet = 0\n money = money\n return(money)\n \ndef if_bet(betting, money):\n if betting.lower() == \"y\":\n \n play_bet = eval(input(\"How much would you like to bet?\"))\n \n if play_bet > money:\n print(\"You do not have enough money for that bet. No bet will be placed.\")\n bet = 0\n return(bet)\n \n elif play_bet < 0:\n print(\"Not a valid bet. No bet will be placed.\")\n bet = 0\n return(bet)\n else:\n bet = play_bet\n return(bet)\n\n elif betting.lower() == \"n\":\n bet = 0\n return(bet)\n \n else:\n bet = 0\n return(bet)\ndef count(money, money_fin):\n money = 100\n\n if money_fin >= 0:\n money = money_fin\n return money\n \n else:\n money = 100\n return money\n\ndef main():\n cont = True\n \n while(cont == True):\n \n gostop = input(\"Would you like to play a game of poker? [Y/N]\")\n if gostop.lower() == \"y\":\n hand1 = deal()\n hand2 = deal()\n global money\n hand1_val = name(hand1)\n hand2_val = name(hand2)\n hand1_pts = point_value(hand1_val)\n hand2_pts = point_value(hand2_val)\n \n bet = 0\n \n print(\"Your hand:\")\n show_hand1 = display(hand1)\n print(\"You have a \", hand1_val)\n betting = input(\"Would you like to bet? [Y/N]\")\n \n bet = if_bet(betting, money)\n print(\"---------------------\")\n \n \n\n print(\"Your oponents hand:\")\n show_hand2 = display(hand2)\n print(\"Your oponent has a \", hand2_val)\n win = winner(hand1_pts, hand2_pts)\n print(win)\n\n money = bet_win(money, win, bet)\n \n\n print(\"You now have \", money, \"$.\", sep = \"\")\n \n elif gostop.lower() == \"n\":\n cont = False\n \n else:\n print(\"I'm sorry that is not an available option\")\nmain()\n \n","sub_path":"poker_main.py","file_name":"poker_main.py","file_ext":"py","file_size_in_byte":6311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"239248760","text":"#from pessoa import Pessoa\n#from ave import Ave\n#from formageometrica import FormaGeometrica\nfrom triangulo import Triangulo\n'''\nclass Pessoa:\n def __init__(self, cpf):\n self.cpf = cpf\n\n def __str__(self):\n return self.nome\n \n def set_nome(self, nome):\n self.nome = nome\n\n def get_nome(self):\n return self.nome\n'''\n'''\n#Saída de dados(Isso é um comentário)\nprint(\"Olá mundo\")\n\na = int(\"2\")\nb = str(1)\nc = 1.5\nd = True\ne = False\n\nf = a + c\n\nprint(a, b, c, d, e, f)\n\nsoma = eval(input())\nprint(soma)\n\na = input()\nprint(a)\n\na = int(input())\nprint(a+1)\n\neval(\"print('ola mundo')\")\n'''\n\n'''\nsoma = 1+1\ndivisao = int(2/2)\nmult = 3*3.0\nsubtracao = 3 - 5.0\n\ndivInt = 5//2\nx = 3%2\nexp = 2**2.0\n\nprint(soma, divisao, mult, subtracao, divInt, x, exp)\n'''\n'''\na = int(input())\nif a >= 0:\n print(\"a é maior ou igual 0\")\nelif a == 100:\n print(\"a é 100\")\nelif (a != 100) or (a <= 0):\n print(\"a é diferente de 100\")\n print(\"a menor que 0\")\n'''\n'''\na = int(input())\n\nswitcher = {1:\"a\"}\n\nprint(switcher.get(a))\n'''\n'''\nnomes = [\"Alef\", \"Tiago\", \"Cleiton\"]\n\nfor n in nomes:\n print(n)\nelse:\n print(nomes)\n\ni = 0\nwhile i < 3:\n print(nomes[i])\n i = i + 1\nelse: \n print(nomes)\n'''\n'''\ndef funcao(numero):\n numero = numero + 1\n print(numero)\n\nnumero = 1\nprint(numero)\nfuncao(numero)\nprint(numero)\n'''\n'''\na = []\na = \"b\"\na = \"a\"\nprint(a[1])\n'''\n\n'''\ntiago = Pessoa()\ntiago.set_nome(\"Tiago\")\nprint(tiago.get_nome())\n'''\n'''\nbird = Ave(\"Galinha\")\nprint(bird)\nbird.corre()\n'''\n\ntri = Triangulo(2, 4)\ntri.calcula_area()\nprint(tri.area)\n","sub_path":"testes/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"375771518","text":"__all__ = ['QmlReceive']\n\nimport PyQt5.QtCore\nimport save\nimport csv\n\n\nclass QmlReceive(PyQt5.QtCore.QObject):\n def __init__(self, parent=None):\n super(__class__, self).__init__(parent)\n\n @PyQt5.QtCore.pyqtSlot(result=str, name='root_path')\n def root_path(self, qml_mode=True) -> str:\n return save.path.root_path(qml_mode)\n\n @PyQt5.QtCore.pyqtSlot(str, result=list, name='load_config')\n def load_config(self, config: str) -> list:\n if config == 'role':\n return [save.role.load()]\n if config == 'equip':\n return [save.equip.load()]\n\n @PyQt5.QtCore.pyqtSlot(str, list, name='save_config')\n def save_config(self, config: str, config_info: list) -> None:\n if config == 'role':\n save.role.save(config_info[0])\n if config == 'equip':\n save.equip.save(config_info[0])\n\n def equip_filter(self, only=False):\n def _equip_filter(region: str, filter_func) -> list:\n result = []\n with open(prefix_path + region + '.csv', 'r', encoding='UTF-8') as f:\n csv_reader = csv.DictReader(f)\n for item in csv_reader:\n if filter_func(item['id'], item['limitLVMin'], item['menpai']):\n if only:\n return [dict(item)]\n result.append(\n {'index': item['id'],\n 'name': item['name'],\n 'icon': item['icon'],\n 'desc': item['catDesc']}\n )\n return result\n\n prefix_path = self.root_path(False) + '/data/equip/'\n return _equip_filter\n\n @PyQt5.QtCore.pyqtSlot(str, int, int, result=list, name='usable_list')\n def usable_list(self, region: str, level: int, menpai: int) -> list:\n def __equip_filter(_, _level: str, _menpai: str) -> bool:\n if int(_level) > level:\n return False\n if (_menpai != '-1') and (_menpai != str(menpai)):\n return False\n return True\n return self.equip_filter()(region, __equip_filter)\n\n @PyQt5.QtCore.pyqtSlot(str, str, result=list, name='equip_detail')\n def equip_detail(self, region: str, index: str):\n def __index_filter(_index: str, _1, _2) -> bool:\n if _index == index:\n return True\n return False\n result = self.equip_filter(only=True)(region, __index_filter)\n return result\n","sub_path":"dmail/qmlReceive.py","file_name":"qmlReceive.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"20452843","text":"import curses\nfrom os import system\nimport curses.textpad\n\n#Create a list of buttons, vertically stacked, at the xy position\nclass Buttons:\n def __init__(self, parent, loc_y, loc_x, width, button_list):\n self.Active = False\n self.parent = parent\n self.cur_button = 0\n self.button_list = []\n y = loc_y + 1\n x = loc_x + 1\n self.y = y\n self.x = x\n self.loc_y = loc_y\n self.loc_x = loc_x\n self.width = width\n self.button_list_string = button_list\n button_height = 3\n self.button_box = parent.derwin(button_height * len(button_list) + 2, width + 2, loc_y, loc_x)\n self.button_box.border(0)\n self.button_box.box()\n curses.textpad.Textbox(self.button_box)\n self.button_box.refresh()\n for i in range (0, len(button_list)):\n self.button_list.append(Button(y, x, button_height, width, button_list[i][0], button_list[i][1]))\n y += button_height\n self.cur_button = 0\n self.button_list[self.cur_button].highlight()\n\n def ActiveColors(self):\n curses.init_pair(3, curses.COLOR_BLUE, curses.COLOR_BLACK)\n self.button_box.bkgd(' ', curses.color_pair(3))\n self.button_box.refresh()\n for button in self.button_list:\n curses.init_pair(4, curses.COLOR_WHITE, curses.COLOR_BLACK)\n button.button.bkgd(' ', curses.color_pair(4))\n button.button.refresh()\n \n def Deactivate(self):\n self.Active = False\n self.button_box.bkgd(' ', curses.color_pair(4))\n self.button_box.refresh()\n for button in self.button_list:\n button.button.bkgd(' ', curses.color_pair(4))\n button.button.refresh()\n \n\n def Activate(self):\n self.Active = True\n self.ActiveColors()\n self.button_list[self.cur_button].highlight()\n self.button_list[self.cur_button].highlight()\n cont = True\n while(cont):\n ch = self.button_box.getch()\n if ch == curses.KEY_UP and self.cur_button > 0:\n self.button_list[self.cur_button].highlight_off()\n self.cur_button -= 1\n self.button_list[self.cur_button].highlight()\n elif ch == curses.KEY_DOWN and self.cur_button < len(self.button_list) - 1:\n self.button_list[self.cur_button].highlight_off()\n self.cur_button += 1\n self.button_list[self.cur_button].highlight()\n elif ch == 10 or ch == curses.KEY_ENTER:\n self.button_list[self.cur_button].execute()\n return 0\n elif ch == 9:\n return 1\n cont = False\n\n def clear(self):\n self.button_box.clear()\n curses.endwin()\n self.button_box.refresh()\n\n def reshow(self):\n self.button_list = []\n button_height = 3\n self.button_box = curses.newwin(button_height * len(self.button_list_string) + 2, self.width + 2, self.loc_y, self.loc_x)\n self.button_box.border(0)\n self.button_box.box()\n curses.textpad.Textbox(self.button_box)\n y = self.y\n self.button_box.refresh()\n for i in range (0, len(self.button_list_string)):\n self.button_list.append(Button(y, self.x, button_height, self.width, self.button_list_string[i][0], self.button_list_string[i][1]))\n y += button_height\n self.button_list[self.cur_button].highlight()\n\nclass Button:\n def __init__(self, loc_y, loc_x, height, width, title, function):\n self.title = title\n self.function = function\n self.height = height\n self.width = width\n self.button = curses.newwin(height, width, loc_y, loc_x)\n self.button.border(0)\n self.button.box()\n self.loc_y = loc_y\n self.button.move(height / 2, width / 2 - len(title) / 2)\n self.button.addstr(title)\n self.button.refresh()\n\n def highlight(self):\n self.button.bkgd(' ', curses.color_pair(2))\n self.button.refresh()\n\n def highlight_off(self):\n self.button.bkgd(' ', curses.color_pair(1))\n self.button.refresh()\n\n def execute(self):\n self.function(self.loc_y)\n\n\n '''\n curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLUE)\n self.button.move(self.height / 2, self.width / 2 - len(self.title) / 2)\n self.button.addstr(self.title, curses.color_pair)\n self.button.refresh()\n '''","sub_path":"Python/nCurses, MySQL/Button.py","file_name":"Button.py","file_ext":"py","file_size_in_byte":4497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"312861906","text":"\nimport sys\nimport re\nfrom collections import namedtuple\nfrom KeyWords import *\n\nToken = namedtuple('Token','type value index')\nWord = namedtuple('Word','type RE')\nclass Scanner:\n '''\n 类构造函数,输入需要被分析文件的路径\n '''\n def __init__(self,filePath):\n self.data = ''\n self.REs = []\n self.index = 0\n self.sizeofFile = 0\n with open(filePath) as f:\n self.data = f.read()\n # 解决注释\n self.data,_ = re.subn(RE_REMARK0,'\\n',self.data)\n self.data,_ = re.subn(RE_REMARK1,'',self.data)\n self.sizeofFile = len(self.data)\n # 准备匹配\n keyList = keyWords + triple_separetor + double_separetor + single_separetor \n self.REs.append(Word('KEY_WORDS',re.compile('|'.join(keyList))))\n self.REs.append(Word(IDENTIFIER,re.compile(RE_ID)))\n self.REs.append(Word(INTEGER,re.compile(RE_INT)))\n self.REs.append(Word(FLOAT,re.compile(RE_FLOAT)))\n self.REs.append(Word(STRING,re.compile(RE_STRING)))\n self.REs.append(Word(CHAR,re.compile(RE_CHAR)))\n self.REs.append(Word(SPACE,re.compile(RE_SPACE)))\n self.REs.append(Word(ERR_ID,re.compile(RE_ERR_ID))) \n '''\n 调用此方法可生成一个迭代器,用于返回下个词的Token\n 此方法调用scan函数,处理其返回的Token并返回\n '''\n def next(self):\n while self.index < self.sizeofFile:\n res = self.scan()\n if res.type != SPACE:\n if res.type == 'KEY_WORDS':\n yield Token(\"'%s'\" % res.value,res.value,res.index)\n elif res.type == ERR_ID:\n yield Token(\"ERROR\",\"ID_INVALID\",res.index)\n else:\n yield res\n yield Token(\"$\",\"$\",self.index)\n \n '''\n 私有函数,使用正则表达式进行匹配,返回当前匹配到的一个词的Token\n ''' \n def scan(self):\n maxIndex = self.index\n res = Token('','','')\n for this in self.REs:\n temp = this.RE.match(self.data,self.index)\n if temp:\n if temp.span()[1] > maxIndex:\n maxIndex = temp.span()[1]\n res = Token(this.type,temp.group(),maxIndex)\n if self.index != maxIndex:\n self.index = maxIndex\n else:\n return Token('ERROR','UNABLE_TO_MATCH',self.index)\n return res\n","sub_path":"Scanner.py","file_name":"Scanner.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"406251722","text":"#!/usr/bin/env python\nfrom optparse import OptionParser\nfrom collections import Counter\nimport os, pdb, shutil, subprocess\nimport fdr, gff, ggplot, math, stats, te\n\n################################################################################\n# te_cuffdiff.py\n#\n# Compute stats and plot differential expression fold changes for genes\n# w/ and w/o each TE family.\n################################################################################\n\n\n################################################################################\n# main\n################################################################################\ndef main():\n usage = 'usage: %prog [options] '\n parser = OptionParser(usage)\n parser.add_option('-o', dest='out_dir', default='te_diff', help='Output directory [Default: %default]')\n parser.add_option('-t', dest='te_gff', default='%s/hg19.fa.out.tpf.gff'%os.environ['MASK'])\n (options,args) = parser.parse_args()\n\n if len(args) != 2:\n parser.error('Must provide .gtf and .diff files')\n else:\n gtf_file = args[0]\n diff_file = args[1]\n\n # hash genes -> TEs\n gene_tes = te.hash_genes_repeats(gtf_file, options.te_gff, gene_key='transcript_id', add_star=True, stranded=True)\n\n # create a fake family for unrepetitive genes\n for line in open(gtf_file):\n a = line.split('\\t')\n gene_id = gff.gtf_kv(a[8])['transcript_id']\n if not gene_id in gene_tes:\n gene_tes[gene_id] = set([('-','-','*')])\n\n # get diffs stats\n gene_diffs, te_diffs = get_diff_stats(diff_file, gene_tes)\n\n # clean plot directory\n if os.path.isdir(options.out_dir):\n shutil.rmtree(options.out_dir)\n os.mkdir(options.out_dir)\n\n # stats\n table_lines, pvals = compute_stats(te_diffs, gene_diffs, options.out_dir)\n\n # perform multiple hypothesis correction\n qvals = fdr.ben_hoch(pvals)\n\n table_out = open('%s/table.txt' % options.out_dir, 'w')\n for i in range(len(table_lines)):\n print >> table_out, '%s %10.2e' % (table_lines[i],qvals[i])\n table_out.close()\n\n\n################################################################################\n# cdf_plot\n################################################################################\ndef cdf_plot(te_or, w_te, wo_te, out_pdf):\n rep, fam, orient = te_or\n\n # name plot\n if fam == '-':\n label = 'dTE-RNAs/%s' % orient\n elif fam == '*':\n label = 'TE-RNAs/%s' % orient\n elif rep == '*':\n label = '%s-RNAs/%s' % (fam,orient)\n else:\n label = '%s-RNAs/%s' % (rep,orient)\n\n # construct data frame\n df = {}\n df['fold'] = wo_te + w_te\n df['class'] = ['d%s' % label]*len(wo_te) + [label]*len(w_te)\n\n ggplot.plot('te_diff.r', df, [out_pdf])\n\n\n################################################################################\n# compute_stats\n################################################################################\ndef compute_stats(te_diffs, gene_diffs, plot_dir):\n pvals = []\n table_lines = []\n\n for te_or in te_diffs:\n rep, fam, orient = te_or\n \n for sample_key in te_diffs[te_or]: \n sample1, sample2 = sample_key\n\n # if enough data\n if len(te_diffs[te_or][sample_key]) >= 10:\n wo_te = list((gene_diffs[sample_key] - te_diffs[te_or][sample_key]).elements())\n w_te = list(te_diffs[te_or][sample_key].elements())\n\n wo_mean = stats.mean(wo_te)\n w_mean = stats.mean(w_te)\n\n z, p = stats.mannwhitneyu(w_te, wo_te)\n\n cols = (rep, fam, orient, sample1, sample2, len(w_te), w_mean, wo_mean, z, p)\n table_lines.append('%-17s %-17s %1s %-10s %-10s %6d %9.2f %9.2f %8.2f %10.2e' % cols)\n\n pvals.append(p)\n\n # plot ...\n if rep in ['*'] and fam in ['*','LINE/L1','SINE/Alu','LTR/ERV1','LTR/ERVL-MaLR','LINE/L2','LTR/ERVL','SINE/MIR','DNA/hAT-Charlie','LTR/ERVK','DNA/TcMar-Tigger']:\n out_pdf = '%s/%s_%s_%s_%s-%s.pdf' % (plot_dir,rep.replace('/','-'),fam.replace('/','-'),orient,sample1,sample2)\n cdf_plot(te_or, w_te, wo_te, out_pdf)\n\n return table_lines, pvals\n\n\n################################################################################\n# get_diff_stats\n################################################################################\ndef get_diff_stats(diff_file, gene_tes):\n # initialize diff counters\n gene_diffs = {}\n te_diffs = {}\n\n # read diff file\n diff_in = open(diff_file)\n headers = diff_in.readline()\n line = diff_in.readline()\n while line:\n a = line.split('\\t')\n\n gene_id = a[0]\n sample1 = a[4]\n sample2 = a[5]\n status = a[6]\n fpkm1 = float(a[7])\n fpkm2 = float(a[8])\n fold = float(a[9])\n tstat = float(a[10])\n sig = a[-1].rstrip()\n\n if sample2 == 'input':\n sample1, sample2 = sample2, sample1\n fpkm1, fpkm2 = fpkm2, fpkm1\n fold *= -1\n tstat *= -1\n\n # cap fold/tstat\n fold = min(fold, 6)\n fold = max(fold, -6)\n tstat = min(tstat, 6)\n tstat = max(tstat, -6)\n\n if gene_id in gene_tes and status == 'OK' and not math.isnan(tstat):\n # save for global\n #gene_diffs.setdefault((sample1,sample2),Counter())[tstat] += 1\n gene_diffs.setdefault((sample1,sample2),Counter())[fold] += 1\n\n # save for TEs\n for te_or in gene_tes[gene_id]:\n if not te_or in te_diffs:\n te_diffs[te_or] = {}\n #te_diffs[te_or].setdefault((sample1,sample2),Counter())[tstat] += 1\n te_diffs[te_or].setdefault((sample1,sample2),Counter())[fold] += 1\n\n line = diff_in.readline()\n diff_in.close()\n\n return gene_diffs, te_diffs\n\n\n################################################################################\n# __main__\n################################################################################\nif __name__ == '__main__':\n main()\n #pdb.runcall(main)\n","sub_path":"te_diff.py","file_name":"te_diff.py","file_ext":"py","file_size_in_byte":6143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"568236084","text":"import time\nimport threading\nimport random\n\nhhhh = 0\nlock = threading.Lock()\n\ndef add_number():\n global hhhh\n for i in range(10):\n print(\"before add: \", i, hhhh)\n hhhh += 1\n print(\"after add: \", i, hhhh)\n \ndef subtract_number():\n global hhhh\n for i in range(10):\n print(\"before substract: \", i, hhhh)\n hhhh -= 1\n print(\"after subtract: \", i, hhhh)\n \n \njob_list = []\njob_list.append(threading.Thread(target=subtract_number, args=()))\njob_list.append(threading.Thread(target=add_number, args=()))\n\nfor t in job_list:\n t.start()\n\nfor t in job_list:\n t.join()\n \nprint(\"Done\")\n","sub_path":"tools/py_process_thread/threading_no_lock.py","file_name":"threading_no_lock.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"648379980","text":"import os\nimport numpy as np\nimport math\n\n\ndef parse_res(filename):\n for letter in range(len(filename)):\n if filename[letter:letter+2] == 'rr':\n res = 1/int(filename[letter+2:letter+6])\n return res\n\n\ndef parse_mdt(filename):\n r\"\"\"Checks whether input file is MDT by counting number of underscores.\n\n Args:\n filename (String)\n\n Returns:\n boolean\n\n \"\"\"\n if filename.count(\"_\") == 3:\n return True\n else:\n return False\n\n\ndef read_surface(filename, path=None, fortran=True, nans=True,\n transpose=False, rotate=True):\n r\"\"\"Reshapes surface from 1d array into an array of\n (II, JJ) records.\n\n Ignores the header and footer of each record.\n\n Args:\n file (np.array): A .dat file containing a 1D array of floats\n respresenting input surface.\n\n Returns:\n np.array: data of size (II, JJ)\n \"\"\"\n order = 'F' if fortran else 'C'\n\n if path is None:\n path = \"\"\n\n filepath = os.path.join(os.path.normpath(path), filename)\n fid = open(filepath, mode='rb')\n buffer = fid.read(4)\n size = np.frombuffer(buffer, dtype=np.int32)[0]\n shape = (int(math.sqrt(size//8)*2), int(math.sqrt(size//8)))\n fid.seek(0)\n\n # Loads Fortran array (CxR) or Python array (RxC)\n floats = np.array(np.frombuffer(fid.read(), dtype=np.float32), order=order)\n floats = floats[1:len(floats)-1]\n floats = np.reshape(floats, shape, order=order)\n\n if nans:\n floats[floats <= -1.7e7] = np.nan\n if transpose:\n return floats.T\n if rotate:\n return np.rot90(floats, 1)\n\n return floats\n\n\ndef read_surfaces(filename, path=None, fortran=True, nans=True,\n transpose=True, number=1, start=None):\n r\"\"\"\n \"\"\"\n order = 'F' if fortran else 'C'\n\n if path is None:\n path = \"\"\n\n arr = []\n filepath = os.path.join(os.path.normpath(path), filename)\n fid = open(filepath, mode='rb')\n buffer = fid.read(4)\n size = np.frombuffer(buffer, dtype=np.int32)[0]\n shape = (int(math.sqrt(size//8)*2), int(math.sqrt(size//8)))\n\n hdr_pointer = (shape[0]*shape[1]+2)*4\n if start is not None:\n fid.seek(start*hdr_pointer)\n\n # Loads Fortran array (CxR) or Python array (RxC)\n while buffer != b'' and len(arr) <= number-1:\n floats = np.array(np.frombuffer(fid.read(size),\n dtype=np.float32), order=order)\n floats = np.reshape(floats, shape, order=order)\n arr.append(floats)\n print(f'Loaded MDT #{(start+len(arr))}')\n footer_value = np.frombuffer(fid.read(4), dtype=np.int32)[0]\n buffer = fid.read(4)\n\n arr = np.array(arr)\n if nans:\n arr[arr <= -1.7e7] = np.nan\n if transpose:\n return np.transpose(arr, (0, 2, 1))\n\n return arr\n\n\ndef write_surface(filename, arr, path=None, fortran=False, nan_mask=None,\n overwrite=False):\n r\"\"\"\n \"\"\"\n order = 'F' if fortran else 'C'\n \n if path is None:\n path = \"\"\n filepath = os.path.join(path, filename)\n\n if os.path.exists(filepath) and not overwrite:\n raise OSError(\"File already exists. Pass overwrite=True to overwrite.\")\n\n if filepath[len(filepath)-4:] != '.dat':\n filepath += '.dat'\n\n arr = arr.astype('float32')\n floats = arr.flatten(order=order)\n\n if nan_mask is not None:\n floats = floats * nan_mask\n floats[np.isnan(floats)] = -1.9e+19\n\n # Calculate header (number of total bytes in MDT)\n print('array size = ', floats.size)\n header = np.array(arr.size * 4)\n\n # Convert everything to bytes and write\n floats = floats.tobytes()\n header = header.tobytes()\n footer = header\n fid = open(filepath, mode='wb')\n fid.write(header)\n fid.write(floats)\n fid.write(footer)\n fid.close()\n\n\ndef read_params(txt_file, path):\n filepath = os.path.join(os.path.normpath(path), txt_file)\n f = open(filepath, \"r\")\n f.readline()\n params = []\n for line in f.read().splitlines():\n params.append(line.split())\n params = np.array(params)\n \n return params\n\n\ndef main():\n print(\"read_data.py main\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"mdt_calculations/data_utils/dat.py","file_name":"dat.py","file_ext":"py","file_size_in_byte":4208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"425608103","text":"from collections import defaultdict\n\n\ndef can_finish(num_courses, prerequisites):\n graph = build_graph(prerequisites)\n\n for course in graph:\n visited = set()\n if not can_take(course, graph, visited):\n return False\n\n return True\n\n\ndef can_take(course, graph, visited):\n visited.add(course)\n\n for prereq in graph[course]:\n if prereq in visited:\n return False\n\n if not can_take(prereq, graph, visited):\n return False\n\n visited.remove(course)\n\n return True\n\n\ndef build_graph(prerequisites):\n graph = defaultdict(list)\n for c1, c2 in prerequisites:\n graph[c1].append(c2)\n graph.setdefault(c2, [])\n return graph\n","sub_path":"course_schedule.py","file_name":"course_schedule.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"463908596","text":"from kivy.app import App\nfrom kivy.config import Config\nfrom kivy.clock import Clock\nfrom kivy.uix.widget import Widget\nfrom kivy.uix.label import Label\nfrom kivy.properties import ObjectProperty\nimport time\nfrom kivy.uix.screenmanager import ScreenManager,Screen\nfrom kivy.properties import StringProperty\nfrom kivy.core.audio import SoundLoader\nimport string\nimport random\nfrom questions import getQuestion\n\nalarms=[]\nwith open('alarms.txt',mode='r+') as file:\n contents=file.read().split('\\n')\n\nfor i in range(0,len(contents)-1):\n alarms.append(contents[i].split(','))\n\nturn=0\n\ndef alarm_txt_update():\n\n with open(\"alarms.txt\", mode=\"r+\") as file:\n file.truncate()\n for i in range(len(alarms)):\n for j in range(len(alarms[i])):\n if j!=2:\n file.write(alarms[i][j] + \",\")\n else:\n file.write(alarms[i][j])\n file.write('\\n')\n\n\nclass Setting(Screen,Widget):\n hour_=ObjectProperty(None)\n minute_=ObjectProperty(None)\n ampm_=ObjectProperty(None)\n def alarm_set(self):\n hour_restraint=list(str(x) for x in range(1,13))\n minute_restraint=list(str(x) for x in range(0,60))\n ampm_restraint=[\"AM\",\"PM\"]\n global alarms\n global turn\n if(self.hour_.text not in hour_restraint or self.minute_.text not in minute_restraint or self.ampm_.text.upper() not in ampm_restraint):\n print(\"Invalid Input\\n Try Again\")\n alarms[turn-1][0]=\"null\"\n print(alarms)\n alarm_txt_update()\n else:\n ampm_data=self.ampm_.text.upper()\n min_data=int(self.minute_.text)\n hour_data=int(self.hour_.text)\n if hour_data<10:\n self.hour_.text=\"0\"+str(hour_data)\n if min_data<10:\n self.minute_.text=\"0\"+str(min_data)\n\n time_string=self.hour_.text+\":\"+self.minute_.text+\" \"+ampm_data\n print(time_string)\n\n\n alarms[turn-1][0]=time_string\n print(alarms)\n #New.check_alarm()\n alarm_txt_update()\n\n\nsound=SoundLoader.load('default.wav')\n\nclass New(Screen):\n\n clock_label=ObjectProperty()\n\n text_label1=StringProperty('')\n text_label2=StringProperty('')\n text_label3=StringProperty('')\n text_label4=StringProperty('')\n text_label5=StringProperty('')\n\n\n\n def __init__(self, **kwargs):\n super(New, self).__init__(**kwargs)\n #self.check_alarm()\n Clock.schedule_interval(self.clock_label.update,1)\n Clock.schedule_interval(lambda dt:self.text1(),1)\n Clock.schedule_interval(lambda dt:self.check_alarm(),60)\n\n def check_alarm(self):\n time_str = time.strftime('%I:%M %p')\n global turn\n for i in range(len(alarms)):\n if time_str in alarms[i] and alarms[i][2] == \"on\":\n print(\"true\")\n turn=i\n sound.play()\n sm.current = \"Selection\"\n sm.transition.direction = \"right\"\n\n\n def turn_update(self,number):\n global turn\n turn=number\n print(\"Setting Alarm: \",turn)\n\n def text1(self):\n if alarms[0][0]==\"null\":\n self.text_label1=\"Alarm 1\"\n\n else:\n self.text_label1= alarms[0][0]\n if alarms[1][0]==\"null\":\n self.text_label2=\"Alarm 2\"\n\n else:\n self.text_label2= alarms[1][0]\n\n if alarms[2][0]==\"null\":\n self.text_label3=\"Alarm 3\"\n\n else:\n self.text_label3= alarms[2][0]\n\n if alarms[3][0]==\"null\":\n self.text_label4=\"Alarm 4\"\n\n else:\n self.text_label4= alarms[3][0]\n if alarms[4][0]==\"null\":\n self.text_label5=\"Alarm 5\"\n\n else:\n self.text_label5= alarms[4][0]\n\n def get_switch_state(self, id):\n if (alarms[id-1][2] == \"on\"):\n return True\n else:\n return False\n\n # Change status of alarm\n def toogle_status(self, object, active, id):\n if (active):\n alarms[id-1][2] = \"on\"\n else:\n alarms[id-1][2] = \"off\"\n alarm_txt_update()\n\n def text(self,num2):\n if alarms[num2-1][0]==\"null\":\n return \"Alarm\"+str(num2)\n\n else:\n return alarms[num2-1][0]\n\n\nclass ClockLabel(Label):\n def update(self, *args):\n self.text = time.strftime('%I:%M:%S %p')\n\n\nclass Selection(Screen):\n pass\n\nclass Captcha(Screen):\n\n def get_captcha(self):\n charset = string.ascii_letters + string.digits\n return ''.join(random.choice(charset) for i in range(10))\n\n def validate_captcha(self):\n captcha = self.ids.captcha\n text_input = self.ids.text_input\n result = (captcha.text == text_input.text)\n captcha.text = self.get_captcha()\n text_input.text = \"\"\n return result\n\n def get_screen(self, value):\n if(value):\n #stop_alarm()\n sound.stop()\n return \"New\"\n else:\n return \"Captcha\"\n\n\n\ncorrect_counter = 0\noptions = []\nanswer = \"\"\nquestion = \"\"\ndef get_quiz():\n global question,lis,options,answer\n options.clear()\n question = \"\"\n answer = \"\"\n quiz = getQuestion()\n question = quiz['question']\n answer = quiz['options'][quiz['answer']]\n for i in quiz['options'].keys():\n options.append(quiz['options'][i])\nget_quiz()\n\n\nclass Quiz(Screen):\n global options,answer,question\n global correct_counter\n quiz_question = StringProperty('')\n option1 = StringProperty('')\n option2 = StringProperty('')\n option3 = StringProperty('')\n option4 = StringProperty('')\n \n def new_quiz(self):\n get_quiz()\n self.get_question()\n self.get_option()\n\n\n def get_question(self):\n #return \"Question 1\"\n global question \n self.quiz_question = question \n #return quiz_question\n\n def get_option(self):\n #self.get_question()\n self.option1 = str(options[0])\n self.option2 = str(options[1])\n self.option3 = str(options[2])\n self.option4 = str(options[3])\n \n def validate(self,id):\n print(options[id])\n \n if(str(options[id]) == str(answer)):\n #print(\"TRUE\")\n return True\n else:\n #print(\"FALSE\")\n return False\n\n def validate_quiz(self, answer):\n global correct_counter\n if(answer):\n correct_counter += 1\n if(correct_counter < 5):\n #get_quiz()\n sm.current = \"Quiz\"\n sm.transition.direction = \"right\"\n return None\n correct_counter = 0\n sound.stop()\n sm.current = \"New\"\n sm.transition.direction = \"right\"\n return None\n else:\n sm.current = \"Quiz\"\n sm.transition.direction = \"right\"\n\n def snooze_button(self):\n global turn\n a,b=alarms[turn][0].split(\":\")\n b, c = b.split()\n a = int(a)\n b = int(b)\n b += 10\n if b >= 60:\n b -= 60\n a += 1\n if a > 11:\n\n if c == \"PM\":\n c = \"AM\"\n else:\n c = \"PM\"\n if a < 10:\n a_str = \"0\" + str(a)\n else:\n a_str = str(a)\n if b < 10:\n b_str = \"0\" + str(b)\n else:\n b_str = str(b)\n time_str = a_str + \":\" + b_str + \" \" + c\n alarms[turn][0]=time_str\n alarm_txt_update()\n sound.stop()\n sm.current=\"New\"\n sm.transition.direction=\"left\"\n\n\n\nsm=ScreenManager()\nclass My1App(App):\n def build(self):\n\n sm.add_widget(New(name=\"New\"))\n sm.add_widget(Setting(name=\"Setting\"))\n sm.add_widget(Selection(name=\"Selection\"))\n sm.add_widget(Captcha(name=\"Captcha\"))\n sm.add_widget(Quiz(name=\"Quiz\"))\n\n return sm\n\n\nif __name__==\"__main__\":\n My1App().run()","sub_path":"Python-Mini-Project-master/Python-Mini-Project-master/001 final_draft/test2_with_quiz.py","file_name":"test2_with_quiz.py","file_ext":"py","file_size_in_byte":8037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"645949705","text":"\nimport sqlalchemy\nfrom sqlalchemy import orm, Column, types, inspect\nfrom sqlalchemy.orm.exc import UnmappedClassError\n\nfrom alchy import model, query, manager, events\n\nfrom tests.base import TestQueryBase\nfrom tests import fixtures\nfrom tests.fixtures import Foo, Bar, Baz, Qux, AutoGenTableName, MultiplePrimaryKey, Model\n\n\nclass TestModel(TestQueryBase):\n\n records = {\n 'Foo': [{\n '_id': 100,\n 'string': 'foo',\n 'number': 3,\n 'boolean': False,\n 'ignored_field': 'bar'\n }]\n }\n\n def assertRecordValid(self, record, data):\n self.assertEqual(record._id, data['_id'])\n self.assertEqual(record.string, data['string'])\n self.assertEqual(record.number, data['number'])\n self.assertEqual(record.boolean, data['boolean'])\n\n self.assertFalse(hasattr(record, 'ignored_field'))\n\n def assertIsSubset(self, subset, superset):\n self.assertTrue(all(item in superset.items() for item in subset.items()))\n\n def assertIsNotSubset(self, subset, superset):\n self.assertRaises(AssertionError, self.assertIsSubset, subset, superset)\n\n def test_update(self):\n data = self.records['Foo'][0]\n\n record = Foo()\n\n # it should accept a dict\n record.update(data)\n self.assertRecordValid(record, data)\n\n record = Foo()\n\n # it should accept keyword args\n record.update(**data)\n self.assertRecordValid(record, data)\n\n # it should be used by __init__\n self.assertRecordValid(Foo(data), data)\n self.assertRecordValid(Foo(**data), data)\n\n def test_update_nested(self):\n bar = Bar.get(1)\n test = {'foo': {'string': 'BAR'}}\n\n bar.update(test)\n self.db.commit()\n foo = Foo.get(bar.foo_id)\n\n self.assertEqual(foo.string, test['foo']['string'])\n\n def test_update_null_relationship_with_empty_dict(self):\n bar = Bar.get(4)\n\n self.assertIsNone(bar.foo)\n\n test = {'foo': {}}\n\n bar.update(test)\n\n self.assertIsNone(bar.foo)\n\n def test_update_strict(self):\n bar = Bar.get(1)\n test = {'foo': {'string': 'BAR'}}\n\n bar.update(test, strict=True)\n self.db.commit()\n foo = Foo.get(bar.foo_id)\n\n self.assertNotEqual(foo.string, test['foo']['string'])\n\n def test_query_property(self):\n self.assertIsInstance(Foo.query, query.Query)\n self.assertEqual(self.db.query(Foo).filter_by(number=3).all(), Foo.query.filter_by(number=3).all())\n\n def test_query_class_missing_default(self):\n \"\"\"Test that models defined with query_class=None have default Query class for query_property\"\"\"\n class TestModel(Model):\n __tablename__ = 'test'\n _id = Column(types.Integer(), primary_key=True)\n\n query_class = None\n\n self.db.create_all()\n\n self.db.add_commit(TestModel(), TestModel())\n\n records = self.db.query(TestModel).all()\n\n self.assertTrue(len(records) > 0)\n self.assertEqual(\n TestModel.query.all(),\n records,\n \"Model's query property should return same results as session query\"\n )\n self.assertIsInstance(\n TestModel.query,\n query.Query,\n \"Model's query property should be an instance of query.Query\"\n )\n\n def test_query_property_with_unmapped(self):\n class Unmapped(object):\n query = model.QueryProperty(None)\n\n self.assertRaises(UnmappedClassError, lambda: Unmapped.query)\n\n def test_to_dict_with_lazy(self):\n data = fixtures.data['Foo'][0]\n record = self.db.query(Foo).get(data['_id'])\n\n as_dict = record.to_dict()\n\n # it should use default loading which is lazy\n self.assertIsSubset(data, as_dict)\n self.assertEqual(set(as_dict.keys()), set(['_id', 'string', 'number', 'boolean']))\n\n def test_to_dict_with_joined(self):\n data = fixtures.data['Foo'][0]\n record = self.db.query(Foo).options(\n orm.joinedload('bars').joinedload('bazs'),\n orm.joinedload('quxs')\n ).get(data['_id'])\n\n as_dict = record.to_dict()\n\n # it should load relationships\n self.assertIsSubset(data, as_dict)\n self.assertEqual(set(as_dict.keys()), set(['_id', 'string', 'number', 'boolean', 'quxs', 'bars']))\n\n # and relationship's relationships\n self.assertIn('bazs', as_dict['bars'][0])\n\n def test_to_dict_after_commit(self):\n record = Foo()\n self.assertEqual(record.to_dict(), {})\n\n self.db.add_commit(record)\n self.assertEqual(record.to_dict(refresh_on_empty=False), {})\n self.assertNotEqual(record.to_dict(), {})\n\n def test_dict_to_dict(self):\n data = Foo.get(1)\n\n self.assertEqual(dict(data), data.to_dict())\n\n def test_to_dict_hook(self):\n foo = Foo.get(1)\n\n def bar_to_dict():\n return [i for i, bar in enumerate(foo.bars)]\n\n foo.bars.to_dict = bar_to_dict\n data = foo.to_dict()\n self.assertEqual(data['bars'], bar_to_dict())\n\n def test_attrs(self):\n baz = Baz.get(1)\n\n # it should be a class and instance property\n self.assertEqual(Baz.attrs, baz.attrs)\n self.assertEqual(set(Baz.attrs), set(['_id', 'string', 'number', 'bar_id', 'bar']))\n\n def test_columns(self):\n baz = Baz.get(1)\n\n # it should be a class and instance property\n self.assertEqual(Baz.columns, baz.columns)\n self.assertEqual(set(Baz.columns), set(['_id', 'string', 'number', 'bar_id']))\n\n def test_column_attrs(self):\n baz = Baz.get(1)\n\n # it should be a class and instance property\n self.assertEqual(Baz.column_attrs, baz.column_attrs)\n self.assertEqual(\n set(Baz.column_attrs),\n set([Baz._id.property, Baz.string.property, Baz.number.property, Baz.bar_id.property])\n )\n\n def test_descriptors(self):\n baz = Baz.get(1)\n\n # it should be a class and instance property\n self.assertEqual(Baz.descriptors, baz.descriptors)\n self.assertEqual(set(Baz.descriptors), set(['_id', 'string', 'number', 'bar_id', 'bar', 'hybrid_number']))\n\n def test_relationships(self):\n baz = Baz.get(1)\n\n # it should be a class and instance property\n self.assertEqual(Baz.relationships, baz.relationships)\n self.assertEqual(set(Baz.relationships), set(['bar']))\n\n def test_get(self):\n self.assertEqual(Foo.get(1), self.db.query(Foo).get(1))\n\n def test_get_by(self):\n self.assertEqual(Foo.get_by(string='Joe Smith'), self.db.query(Foo).filter_by(string='Joe Smith').first())\n self.assertEqual(Foo.get_by(dict(string='Joe Smith')), self.db.query(Foo).filter_by(string='Joe Smith').first())\n\n def test_session(self):\n record = Foo.get(1)\n self.assertIs(record.session, self.db.session.object_session(record))\n\n def test_flush(self):\n record = Baz()\n self.db.add(record)\n\n self.assertIsNone(record._id)\n\n record.flush()\n\n self.assertIsNotNone(record._id)\n\n def test_delete(self):\n record = Baz()\n self.db.add_commit(record)\n\n _id = record._id\n\n self.assertIsNotNone(Baz.get(_id))\n\n record.delete()\n self.db.commit()\n\n self.assertIsNone(Baz.get(_id))\n\n def test_save(self):\n record = Foo.get(1)\n new_number = record.number * record.number + 1\n record.number = new_number\n\n record.save()\n self.db.commit()\n\n result = self.db.execute('select number from foo where _id=1')\n self.assertEqual(result.fetchone()[0], new_number)\n\n def test_expire(self):\n record = Foo.get(1)\n\n number = record.number\n new_number = number * number + 1\n\n # execute non-ORM transaction\n self.db.execute('update foo set number = :n where _id = 1', params={'n': new_number})\n\n # it's value hasn't changed\n self.assertEqual(record.number, number)\n\n record.expire()\n\n # it's values are empty\n self.assertEqual(record.to_dict(refresh_on_empty=False), {})\n\n # it's values are reloaded on access\n self.assertEqual(record.number, new_number)\n self.assertNotEquals(record.to_dict(refresh_on_empty=False), {})\n\n def test_refresh(self):\n record = Foo.get(1)\n\n number = record.number\n new_number = number * number + 1\n\n # execute non-ORM transaction\n self.db.execute('update foo set number = :n where _id = 1', params={'n': new_number})\n\n # it's value hasn't changed\n self.assertEqual(record.number, number)\n\n record.refresh()\n\n # it's values are empty\n # it's values are reloaded immediately\n self.assertNotEquals(record.to_dict(), {})\n self.assertEqual(record.number, new_number)\n\n def test_expunge(self):\n _id = 10\n record = Foo(_id=_id)\n\n # add record to session, expunge it, and then commit\n self.db.add(record)\n record.expunge()\n self.db.commit()\n\n # it should not have been added to the database\n self.assertIsNone(Foo.get(_id))\n\n def test_autogenerated_tablename(self):\n self.assertEqual(AutoGenTableName.__tablename__, 'auto_gen_table_name')\n\n def test_single_primary_key(self):\n self.assertEqual(Foo.primary_key, inspect(Foo).primary_key[0])\n\n def test_multiple_primary_keys(self):\n self.assertEqual(MultiplePrimaryKey.primary_key, inspect(MultiplePrimaryKey).primary_key)\n","sub_path":"tests/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":9637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"401207865","text":"# coding=utf-8\nimport datetime\nimport pandas as pd\nimport gen_k_data as gkd\nfrom zibiao import ZB\n\npd.set_option(\"display.max_columns\", 500)\n\n\nclass BuySelector(object):\n\n @classmethod\n def gen_date(cls, dt, target_ktype=\"60\"):\n\n ns_30 = [\n (930, 1000), (1000, 1030), (1030, 1100), (1100, 1130),\n (1300, 1330), (1330, 1400), (1400, 1430), (1430, 1500)\n ]\n\n ns_60 = [\n (930, 1030), (1030, 1130), (1300, 1400), (1400, 1500)\n ]\n k = target_ktype\n\n year, month, day = dt.year, dt.month, dt.day\n num = int(str(dt.hour).zfill(2) + str(dt.minute).zfill(2))\n\n if k == \"D\":\n _time_str = \"0000\"\n return datetime.datetime.strptime(f\"{year}-{month}-{day} {_time_str}\", \"%Y/%m/%d %H%M\")\n\n if k == \"30\":\n for start, end in ns_30:\n if start < num <= end:\n _time_str = str(end).zfill(4)\n return datetime.datetime.strptime(f\"{year}-{month}-{day} {_time_str}\", \"%Y/%m/%d %H%M\")\n\n if k == \"60\":\n for start, end in ns_60:\n if start < num <= end:\n _time_str = str(end).zfill(4)\n return datetime.datetime.strptime(f\"{year}-{month}-{day} {_time_str}\", \"%Y/%m/%d %H%M\")\n\n _time_str = \"0000\"\n _time_str = str(end).zfill(4)\n return datetime.datetime.strptime(f\"{year}-{month}-{day} {_time_str}\", \"%Y/%m/%d %H%M\")\n\n @classmethod\n def match(cls, df, selected_col_name=\"selected\"):\n\n if df.empty:\n return False\n\n tdf = df.tail(1)\n tdf = tdf[tdf[selected_col_name] == True]\n if tdf.empty:\n return False\n\n return True\n\n @classmethod\n def select_x003(cls, st_df):\n \"\"\"\n k30\n \"\"\"\n name = \"selected\"\n\n mdi_df = ZB.mdi(st_df)\n\n x1 = mdi_df[\"DIFF\"] > mdi_df[\"DIFF\"].shift(1)\n x2 = ZB.cross(mdi_df['ADXR'], mdi_df['ADX'])\n\n t1 = (mdi_df.index.hour > 10) & (mdi_df.index.hour <= 14)\n mdi_df[name] = False\n\n mdi_df.ix[t1 & x1 & x2, name] = True\n\n return mdi_df.ix[:, [name]]\n\n @classmethod\n def select_x004(cls, st_df):\n \"\"\"\n k60\n \"\"\"\n name = \"selected\"\n\n mdi_df = ZB.mdi(st_df)\n x1 = mdi_df['DIFF'] > mdi_df['DIFF'].shift(1)\n\n mdi_df[name] = False\n\n mdi_df.ix[x1, name] = True\n\n return mdi_df.ix[:, [name]]\n\n @classmethod\n def select_x005(cls, st_df):\n \"\"\"\n kd\n \"\"\"\n name = \"selected\"\n\n kdj_df = ZB.kdj(st_df)\n\n x1 = kdj_df['kdj_k'] > kdj_df['kdj_k'].shift()\n\n kdj_df[name] = False\n\n kdj_df.ix[x1, name] = True\n\n return kdj_df.ix[:, [name]]\n\n @classmethod\n def search(cls, df30):\n\n buy_30 = BuySelector.select_x003(df30)\n buy_30 = buy_30[buy_30['selected'] == True]\n\n for index, row in buy_30.iterrows():\n base_df = df30[df30.index <= index]\n\n df60 = gkd.Generator.generate_k_data(base_df, ktype=\"60\").tail(300)\n buy_60 = BuySelector.select_x004(df60)\n match_60 = cls.match(buy_60)\n\n if not match_60: continue\n\n df240 = gkd.Generator.generate_k_data(base_df, ktype=\"D\").tail(300)\n buy_240 = BuySelector.select_x005(df240)\n match_240 = cls.match(buy_240)\n\n if match_60 and match_240:\n yield index\n\n\nclass SaleSelector(object):\n\n @classmethod\n def gen_next_date(cls, dt):\n year, month, day = dt.year, dt.month, dt.day\n\n next_start_dt = datetime.datetime(year, month, day, 9, 30) + datetime.timedelta(days=1)\n return next_start_dt\n\n @classmethod\n def select_x004(cls, st_df):\n \"\"\"\n k60\n \"\"\"\n name = \"selected\"\n\n mdi_df = ZB.mdi(st_df, d=6)\n kdj_df = ZB.kdj(st_df)\n\n x1 = mdi_df[\"DIFF\"] < mdi_df[\"DIFF\"].shift(1)\n k1 = kdj_df[\"kdj_d\"] < kdj_df[\"kdj_d\"].shift(1)\n\n max_close = st_df['high'].rolling(center=False, min_periods=1, window=6).max()\n x2 = st_df['close'] <= (max_close - (max_close * 0.03))\n\n mdi_df[name] = False\n\n mdi_df.ix[(x1 & k1) | x2, name] = True\n\n return mdi_df.ix[:, [name]]\n\n @classmethod\n def find_sale_point(cls, dt, sale_point_df):\n find_df = sale_point_df[sale_point_df.index >= dt].head(1)\n if find_df.empty:\n return None\n\n for index, row in find_df.iterrows():\n return index\n\n\n","sub_path":"src/myselect/buy_sale.py","file_name":"buy_sale.py","file_ext":"py","file_size_in_byte":4539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"471210863","text":"# Contains the uncertainty filter and the unknown nonlinear gain between the em_input and poola_out \nerror_observed = mpc_plant[:,:nx]-mpc_model_reinitialize\nem_input = mpc_control-NP.kron(u0,NP.ones((index_mpc+1,1)))\n\n# UF is the linear model error model\nUF_in = deepcopy(em_input)\nUF_out = NP.zeros(mpc_model_reinitialize.shape) # predicted error\nUF_NU_out = NP.zeros(mpc_model_reinitialize.shape) # predicted error gain update\n\ne0_sim_UF = deepcopy(e0)\ne0_sim_UF_NU = deepcopy(e0)\nUF_out[0,:] = e0_sim_UF\nUF_NU_out[0,:] = e0_sim_UF_NU\nfor index_1 in range(1,mpc_control.shape[0]):\n \n e0_sim_UF = error_simulator(uncertainty_model['A'], uncertainty_model['B'], uncertainty_model['C'], uncertainty_model['D'], e0_sim_UF, em_input[index_1,:])\n \n e0_sim_UF_NU = error_simulator(uncertainty_model_norm_update['A'], uncertainty_model_norm_update['B'], uncertainty_model_norm_update['C'], uncertainty_model_norm_update['D'], e0_sim_UF_NU, em_input[index_1,:])\n\n UF_out[index_1,:] = NP.squeeze(e0_sim_UF)\n UF_NU_out[index_1,:] = NP.squeeze(e0_sim_UF_NU)\n\n# Store the norm value\nnorm_val = NP.ones(nx)\nnorm_val_NU = NP.ones(nx)\nfor index_x in range(nx):\n gamma_val = abs(error_observed[-1, index_x])/abs(e0_sim_UF[index_x])\n norm_val[index_x] = gamma_val\n\n gamma_val = abs(error_observed[-1, index_x])/abs(e0_sim_UF_NU[index_x])\n norm_val_NU[index_x] = gamma_val\n\n if gamma_val > 1.0:\n uncertainty_model_norm_update['B'][index_x,:] = uncertainty_model_norm_update['B'][index_x,:]*gamma_val\n\n# Get the error model after norm update\nmpc_error_NU = deepcopy(UF_NU_out)\n\n# ########################################################################################################################\n# # Owerrite the NLPsolver parameters \n# ########################################################################################################################\noffset_w = 0\n# System matrix A\nms_arg['p'][offset_w:offset_w+nx*nx] = uncertainty_model_norm_update['A'].reshape((nx*nx))\noffset_w += nx*nx\n# Input matrix B\nms_arg['p'][offset_w:offset_w+nx*nu] = uncertainty_model_norm_update['B'].reshape((nx*nu))\noffset_w += nx*nu\n# Output matrix C\nms_arg['p'][offset_w:offset_w+nx*nx] = uncertainty_model_norm_update['C'].reshape((nx*nx))\noffset_w += nx*nx\n# No feed forward D (Eliminate)\nms_arg['p'][offset_w:offset_w+nx*nu] = uncertainty_model_norm_update['D'].reshape((nx*nu))\noffset_w += nx*nu \n# Model error model initial point\nms_arg['p'][offset_w:offset_w+nx] = mpc_error_NU[-1,:]\noffset_w += nx\n# Previous control input\nms_arg['p'][offset_w:offset_w+nu] = u_mpc\noffset_w += nu\n\nassert (offset_w == ms_arg['p'].shape[0])","sub_path":"without_meas_error/python_script/update_norm_MEM_backup.py","file_name":"update_norm_MEM_backup.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"545736134","text":"from enum import IntEnum\nfrom decimal import *\nfrom .ISerialize import ISerialize\n\n\nclass Formula(ISerialize):\n\n\tclass Methods(IntEnum):\n\t\tSIMPLE = 1\n\t\tENOUGH = 2\n\t\tTOO = 3\n\t\tNOT = 4\n\t\t\n\tdef __init__(self, num, denom = None, method = None, debug = False):\n\t\tif type(num) is dict:\n\t\t\tself.load_dict(num)\n\t\t\treturn\n\t\t\n\t\tfor v in (num, denom):\n\t\t\tassert isinstance(v, (int, float, Decimal))\n\t\t\n\t\tif (method is None) or (type(method) is not self.Methods):\n\t\t\tmethod = self.Methods.SIMPLE\n\t\t\t\n\t\tself.num = Decimal(num)\n\t\tself.denom = Decimal(denom)\n\t\tself.method = method\n\t\tself.debug = debug\n\t\t\n\t\tgetcontext().prec = 10\n\t\t\n\tdef calculate(self, value):\n\t\tMethods = self.Methods\n\t\tmethods = { Methods.SIMPLE : self.__simple,\n\t\t\t\t\tMethods.ENOUGH : self.__enough,\n\t\t\t\t\tMethods.TOO : self.__too,\n\t\t\t\t\tMethods.NOT : self.__not}\n\t\t\n\t\tvalue = Decimal(value)\n\t\tbase = self.__base(value)\n\t\t\n\t\tif self.debug:\n\t\t\tprint(str(1/(1+methods[self.method](base))))\n\t\t\t\n\t\treturn float(1 / (1 + methods[self.method](base)))\n\t\t\n\tdef __base(self, value):\n\t\treturn abs((value - self.num)/self.denom)\n\t\t\n\tdef __simple(self, base):\n\t\treturn base\n\t\t\n\tdef __enough(self, base):\n\t\treturn base.sqrt()\n\t\t\n\tdef __too(self, base):\n\t\treturn base ** 2\n\t\t\n\tdef __not(self, base):\n\t\treturn abs(1-base)\n\t\n\tdef __eq__(self, other):\n\t\treturn self.num == other.num \\\n\t\t and self.denom == other.denom \\\n\t\t and self.method == other.method\n\t\n\t\n\t# ISerialize\n\t__TO_SAVE = [\"num\", \"denom\", \"debug\"]\n\t\n\tdef get_dict(self):\n\t\tret = {}\n\t\tfor name in self.__TO_SAVE:\n\t\t\tvalue = getattr(self, name, None)\n\t\t\tif value is not None:\n\t\t\t\tif type(value) is Decimal:\n\t\t\t\t\tvalue = float(value)\n\t\t\t\tret[name] = value\n\t\t\n\t\tret[\"method\"] = int(self.method)\n\t\t\n\t\treturn ret\n\t\t\n\tdef load_dict(self, data):\n\t\tfor name, value in data.items():\n\t\t\tif name in self.__TO_SAVE:\n\t\t\t\tsetattr(self, name, value if type(value) is bool else Decimal(value))\n\t\t\n\t\tif \"method\" in data:\n\t\t\tsetattr(self, \"method\", self.Methods(data[\"method\"]))\n","sub_path":"req/Formula.py","file_name":"Formula.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"498632246","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport matplotlib.lines as mlines\nimport seaborn as sns\nmpl.rcParams['pdf.fonttype'] = 42\n\nh1 = np.load('/home/nquach/DeepCell2/trained_networks/2016-08-02_ecoli_all_31x31_bn_feature_net_31x31_0.npz')['loss_history'][()]\nh2 = np.load('/home/nquach/DeepCell2/trained_networks/2016-08-02_ecoli_all_31x31_bn_feature_net_31x31_1.npz')['loss_history'][()]\nh3 = np.load('/home/nquach/DeepCell2/trained_networks/2016-08-02_ecoli_all_31x31_bn_feature_net_31x31_2.npz')['loss_history'][()]\nh4 = np.load('/home/nquach/DeepCell2/trained_networks/2016-08-02_ecoli_all_31x31_bn_feature_net_31x31_3.npz')['loss_history'][()]\nh5 = np.load('/home/nquach/DeepCell2/trained_networks/2016-08-02_ecoli_all_31x31_bn_feature_net_31x31_4.npz')['loss_history'][()]\n\ne1 = np.subtract(1, h1['acc'])\ne2 = np.subtract(1, h2['acc'])\ne3 = np.subtract(1, h3['acc'])\ne4 = np.subtract(1, h4['acc'])\ne5 = np.subtract(1, h5['acc'])\nestack = np.stack([e1, e2, e3, e4, e5], axis=0)\nemu = np.mean(estack, axis = 0)\nes = np.std(estack, axis = 0)\n\nv1 = np.subtract(1, h1['val_acc'])\nv2 = np.subtract(1, h2['val_acc'])\nv3 = np.subtract(1, h3['val_acc'])\nv4 = np.subtract(1, h4['val_acc'])\nv5 = np.subtract(1, h5['val_acc'])\nvstack = np.stack([v1, v2, v3, v4, v5], axis=0)\nvmu = np.mean(vstack, axis=0)\nvs = np.std(vstack, axis=0)\n\nepoch = np.arange(1, len(e1)+1, 1)\n\nsns.set_style(\"white\")\nsolid = mlines.Line2D([], [], color='black', linestyle = '-', label = 'Training')\ndashed = mlines.Line2D([],[], color='black', linestyle = '--', label= 'Validation')\n\nplt.figure(0)\nplt.plot(epoch, e1, 'k-', epoch, v1, 'k--', epoch, e2, 'k-', epoch, v2, 'k--', epoch, e3, 'k-', epoch, v3, 'k--', epoch, e4, 'k-', epoch, v4, 'k--', epoch, e5, 'k-', epoch, v5, 'k--')\nplt.xlabel('Epoch')\nplt.ylabel('Error')\nplt.legend(handles=[solid,dashed])\nplt.title('Training and validation error: E. coli all 31x31 feature net')\nfilename = '/home/nquach/DeepCell2/prototypes/plots/080316_plots/bn_feature_net_31x31_ecoli_all.pdf'\nplt.savefig(filename, format = 'pdf')\nplt.close()\n\nplt.figure(1)\nplt.errorbar(epoch, emu, yerr = es, ls = '-', color=(0.835,0.369,0))\nplt.errorbar(epoch, vmu, yerr = vs, ls= '--', color=(0.835,0.369,0))\nplt.xlabel('Epoch')\nplt.ylabel('Error')\nplt.title('Average training and validation error: E. coli 31x31 feature net')\nplt.legend(['training','validation'], loc='upper right')\nfilename = '/home/nquach/DeepCell2/prototypes/plots/080316_plots/bn_feature_net_31x31_ave_ecoli_all.pdf'\nplt.savefig(filename, format='pdf')\nplt.close()\n\n\n\n","sub_path":"prototypes/080316_plotter.py","file_name":"080316_plotter.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"404590961","text":"# foo returns a globally-accessible mutable value, so if you memoize its\n# return value to disk, then re-using that memoized value will lead to\n# different behavior if you mutate the value\n\n# (I hope this isn't a common case in practice ... but we must be able\n# to automatically detect it and deal accordingly)\n\nimport time\n\n# contains one field named b of type B\nclass A:\n def __eq__(self, other):\n if not isinstance(other, A):\n return False\n return self.b == other.b\n\n# contains one field named x of type int\nclass B:\n def __eq__(self, other):\n if not isinstance(other, B):\n return False\n return self.x == other.x\n\n\nmyA = A()\nmyA.b = B()\n\ndef foo(a):\n time.sleep(0.2)\n return a.b\n\n# foo now has a global read dependency on myA, and\n# its return value is myA.b\nb = foo(myA)\n\nmyA.b.x = 10\nb.x = 20 # this should be an alias for myA.b\n\n# if myA.b.x and b.x are NOT the same object, then this assertion will fail\nassert myA.b.x == b.x\n\n\n# let's try again ...\nb = foo(myA)\n\nmyA.b.x = 10\nb.x = 20 # this is an alias for myA.b\n\n# if myA.b.x and b.x are NOT the same object, then this assertion will fail\nassert myA.b.x == b.x\n\n","sub_path":"ret_shared_1/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"27988618","text":"\nfrom __future__ import print_function, absolute_import\n\nimport os\nimport subprocess\nimport platform\nimport glob\n\ntry:\n import urllib.request as urllib\nexcept ImportError:\n import urllib2 as urllib\n\n\nclass PackageMixIn(object):\n def _isCentOS(self):\n return platform.linux_distribution()[0].startswith('CentOS')\n\n def _isFedora(self):\n return os.path.exists('/etc/fedora-release')\n\n def _isGentoo(self):\n return os.path.exists('/etc/gentoo-release')\n\n def _isArchLinux(self):\n # There are cases when platform.linux_distribution() is empty on Arch\n return (os.path.exists('/etc/arch-release') or\n platform.linux_distribution()[0].startswith('arch'))\n\n def _isDebian(self):\n return platform.linux_distribution()[0].startswith('debian')\n\n def _isUbuntu(self):\n return platform.linux_distribution()[0].startswith('Ubuntu')\n\n def _isOracleLinux(self):\n return platform.linux_distribution()[0].startswith('Oracle Linux')\n\n def _isRHEL(self):\n return platform.linux_distribution()[0].startswith('Red Hat Enterprise Linux')\n\n def isOpenSUSE(self):\n return platform.linux_distribution()[0].startswith('openSUSE')\n\n def isSLES(self):\n return platform.linux_distribution()[0].startswith('SUSE Linux Enterprise')\n\n def _isMacOS(self):\n return platform.system() == 'Darwin'\n\n def _isAMD64(self):\n return platform.machine() == 'x86_64'\n\n def _osCodeName(self):\n codename = subprocess.check_output(['lsb_release', '-cs'])\n\n try:\n codename = str(codename, 'utf8')\n except:\n pass\n\n return codename.strip()\n\n def _requireDeb(self, packages):\n apt_get = self._which('apt-get')\n\n if apt_get:\n if not isinstance(packages, list):\n packages = [packages]\n\n self._trySudoCall(\n [apt_get, 'install', '-y'] + packages,\n errmsg='you may need to install the packages manually !'\n )\n\n def _requireYum(self, packages):\n yum = self._which('dnf')\n\n if not yum:\n yum = self._which('yum')\n\n if yum:\n if not isinstance(packages, list):\n packages = [packages]\n\n self._trySudoCall(\n [yum, 'install', '-y'] + packages,\n errmsg='you may need to install the packages manually !'\n )\n\n def _requireZypper(self, packages):\n zypper = self._which('zypper')\n\n if zypper:\n if not isinstance(packages, list):\n packages = [packages]\n\n self._trySudoCall(\n [zypper, 'install', '-y'] + packages,\n errmsg='you may need to install the packages manually !'\n )\n\n def _requireRpm(self, packages):\n self._requireYum(packages)\n self._requireZypper(packages)\n\n def _requirePackages(self, packages):\n self._requireDeb(packages)\n self._requireRpm(packages)\n\n def _requireEmerge(self, packages):\n emerge = self._which('emerge')\n\n if emerge:\n if not isinstance(packages, list):\n packages = [packages]\n\n self._trySudoCall(\n [emerge] + packages,\n errmsg='you may need to install the build deps manually !'\n )\n\n def _requireEmergeDepsOnly(self, packages):\n if not isinstance(packages, list):\n packages = [packages]\n\n self._requireEmerge(['--onlydeps'] + packages)\n\n def _requirePacman(self, packages):\n pacman = self._which('pacman')\n\n if pacman:\n if not isinstance(packages, list):\n packages = [packages]\n\n self._trySudoCall(\n [pacman, '-S', '--noconfirm', '--needed'] + packages,\n errmsg='you may need to install the build deps manually !'\n )\n\n def _addAptRepo(self, name, entry, gpg_key=None, codename_map=None, repo_base=None):\n self._requireDeb([\n 'software-properties-common',\n 'apt-transport-https',\n 'ca-certificates',\n 'lsb-release',\n ])\n apt_add_repository = self._which('apt-add-repository')\n\n if not apt_add_repository:\n return\n\n if gpg_key:\n try:\n gpg_key = gpg_key.encode(encoding='UTF-8')\n except:\n pass\n\n tmp_dir = self._tmpCacheDir(prefix='cidgpg')\n tf = os.path.join(tmp_dir, 'key.gpg')\n self._writeBinaryFile(tf, gpg_key)\n\n self._trySudoCall(\n ['apt-key', 'add', tf],\n errmsg='you may need to import the PGP key manually!'\n )\n\n os.remove(tf)\n\n codename = self._osCodeName()\n\n if codename_map:\n try:\n repo_info = urllib.urlopen(\n '{0}/{1}'.format(repo_base, codename)).read()\n except:\n fallback_codename = codename_map.get(codename, codename)\n self._warn('Fallback to codename: {0}'.format(\n fallback_codename))\n codename = fallback_codename\n\n entry = entry.replace('$codename$', codename)\n\n self._trySudoCall(\n [apt_add_repository, '--yes', entry],\n errmsg='you may need to add the repo manually!'\n )\n\n self._trySudoCall(\n ['apt-get', 'update'],\n errmsg='you may need to update APT cache manually!'\n )\n\n def _addRpmKey(self, gpg_key):\n if not gpg_key:\n return\n\n rpm = self._which('rpm')\n\n if not rpm:\n return\n\n tmp_dir = self._tmpCacheDir(prefix='cidgpg')\n tf = os.path.join(tmp_dir, 'key.gpg')\n self._writeBinaryFile(tf, gpg_key)\n\n self._trySudoCall(\n [rpm, '--import', tf],\n errmsg='you may need to import the PGP key manually!'\n )\n\n os.remove(tf)\n\n def _addYumRepo(self, name, url, gpg_key=None, releasevermax=None):\n self._addRpmKey(gpg_key)\n\n dnf = self._which('dnf')\n yum = self._which('yum')\n\n if dnf:\n self._requireYum(['dnf-plugins-core'])\n repo_file = None\n\n if releasevermax is not None:\n dump = self._callExternal(\n [dnf, 'config-manager', '--dump'], verbose=False)\n for l in dump.split(\"\\n\"):\n l = l.split(' = ')\n\n if l[0] == 'releasever':\n if int(l[1]) > releasevermax:\n repo_info = urllib.urlopen(url).read()\n\n try:\n repo_info = str(repo_info, 'utf8')\n except:\n pass\n\n repo_info = repo_info.replace(\n '$releasever', str(releasevermax))\n\n tmp_dir = self._tmpCacheDir(prefix='cidrepo')\n repo_file = url.split('/')[-1]\n repo_file = os.path.join(tmp_dir, repo_file)\n\n with open(repo_file, 'w') as f:\n f.write(repo_info)\n\n url = repo_file\n break\n\n self._trySudoCall(\n [dnf, 'config-manager', '--add-repo', url],\n errmsg='you may need to add the repo manually!'\n )\n\n if repo_file:\n os.remove(repo_file)\n\n elif yum:\n self._requireYum(['yum-utils'])\n yumcfgmgr = self._which('yum-config-manager')\n self._trySudoCall(\n [yumcfgmgr, '--add-repo', url],\n errmsg='you may need to add the repo manually!'\n )\n\n def _addZypperRepo(self, name, url, gpg_key=None, yum=False):\n self._addRpmKey(gpg_key)\n\n zypper = self._which('zypper')\n\n if zypper:\n if yum:\n cmd = [zypper, 'addrepo', '-t', 'YUM', url, name]\n else:\n cmd = [zypper, 'addrepo', url, name]\n\n self._trySudoCall(\n cmd,\n errmsg='you may need to add the repo manually!'\n )\n\n def _requireYumEPEL(self):\n if self._isOracleLinux() or self._isRHEL():\n ver = platform.linux_distribution()[1].split('.')[0]\n self._requireYum(\n ['https://dl.fedoraproject.org/pub/epel/epel-release-latest-{0}.noarch.rpm'.format(ver)])\n else:\n self._requireYum(['epel-release'])\n\n def _yumEnable(self, repo):\n self._requireYum(['yum-utils'])\n\n yumcfgmgr = self._which('yum-config-manager')\n\n self._trySudoCall(\n [yumcfgmgr, '--enable', repo],\n errmsg='You may need to enable the repo manually'\n )\n\n def _isSCLSupported(self):\n \"Check if Software Collections are supported\"\n return (\n self._isCentOS() or\n self._isRHEL() or\n self._isOracleLinux()\n )\n\n def _requireSCL(self):\n if self._isRHEL():\n self._yumEnable('rhel-server-rhscl-7-rpms')\n elif self._isCentOS():\n self._requireYum('centos-release-scl-rh')\n elif self._isOracleLinux():\n self._addYumRepo('public-yum-o17',\n 'http://yum.oracle.com/public-yum-ol7.repo')\n self._yumEnable('ol7_software_collections')\n self._yumEnable('ol7_latest')\n self._yumEnable('ol7_optional_latest')\n\n self._requireYum('scl-utils')\n\n def _requireHomebrew(self, packages):\n if not self._isMacOS():\n return\n\n if not isinstance(packages, list):\n packages = [packages]\n\n brew = self._which('brew')\n\n if not brew:\n curl = self._which('curl')\n ruby = self._which('ruby')\n\n # TODO: change to use env timeouts\n brew_installer = self._callExternal([\n curl, '-fsSL',\n '--connect-timeout', '10',\n '--max-time', '300',\n 'https://raw.githubusercontent.com/Homebrew/install/master/install'\n ])\n\n self._callExternal([ruby, '-e', brew_installer])\n\n brew = self._which('brew')\n\n for package in packages:\n self._callExternal([brew, 'install', package])\n\n def _requireDmg(self, packages):\n if not self._isMacOS():\n return\n\n if not isinstance(packages, list):\n packages = [packages]\n\n curl = self._which('curl')\n hdiutil = self._which('hdiutil')\n installer = self._which('installer')\n volumes_dir = '/Volumes'\n\n for package in packages:\n base_name = package.split('/')[-1]\n local_name = os.path.join(os.environ['HOME'])\n\n # TODO: change to use env timeouts\n self._callExternal([\n curl,\n '-fsSL',\n '--connect-timeout', '10',\n '--max-time', '300',\n '-o', base_name,\n package\n ])\n\n volumes = set(os.listdir(volumes_dir))\n self._trySudoCall([hdiutil, 'attach', local_name])\n volume = (set(os.listdir(volumes_dir)) - volumes)[0]\n\n pkg = glob.glob(os.path.join(volumes_dir, volume, '*.pkg'))\n self._trySudoCall([installer, '-package', pkg, '-target', '/'])\n\n self._trySudoCall([hdiutil, 'dettach', local_name])\n","sub_path":"futoin/cid/mixins/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":11664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"507661674","text":"from django.shortcuts import render, HttpResponse, redirect \nfrom django.contrib import messages\nfrom .models import *\n\ndef index(request):\n if 'user_id' not in request.session:\n return redirect('/')\n else:\n user = User.objects.get(id=request.session['user_id'])\n context={\n \"user\": user,\n \"books\": Book.objects.all()\n }\n return render(request, \"submit_book.html\", context)\n\ndef add_books(request):\n if 'user_id' not in request.session:\n return redirect('/')\n else:\n errors=Book.objects.book_validator(request.POST)\n \n if len(errors)>0:\n for k, v in errors.items():\n messages.error(request,v)\n return redirect('/books')\n \n new_book = Book.objects.create(\n title = request.POST['title'], \n description = request.POST['description'],\n user_who_uploaded = User.objects.get(id=request.session['user_id']),\n )\n user = User.objects.get(id=request.session['user_id'])\n new_book.users_who_liked.add(user)\n\n return redirect('/books')\n\n\ndef more_info(request, book_id):\n if 'user_id' not in request.session:\n return redirect('/')\n else:\n context={\n \"book\": Book.objects.get(id=book_id),\n \"user\": User.objects.get(id=request.session['user_id'])\n }\n return render(request, \"more_info.html\", context)\n\ndef add_fav(request, book_id):\n if 'user_id' not in request.session:\n return redirect('/')\n else:\n user = User.objects.get(id=request.session['user_id'])\n new_fav = Book.objects.get(id=book_id)\n new_fav.users_who_liked.add(user)\n \n return redirect('/books')\n\n\ndef unfav(request, book_id):\n if 'user_id' not in request.session:\n return redirect('/')\n else:\n user = User.objects.get(id=request.session['user_id'])\n unfav = Book.objects.get(id=book_id)\n unfav.users_who_liked.remove(user)\n \n return redirect('/books')\n\ndef edit_book(request, book_id):\n if 'user_id' not in request.session:\n return redirect('/')\n else:\n errors=Book.objects.book_validator(request.POST)\n \n if len(errors)>0:\n for k, v in errors.items():\n messages.error(request,v)\n return redirect(f'/books/{book_id}')\n \n edited_book = Book.objects.get(id=book_id)\n edited_book.title = request.POST['title']\n edited_book.description = request.POST['description']\n edited_book.save()\n \n return redirect('/books')\n\ndef delete_book(request, book_id):\n if 'user_id' not in request.session:\n return redirect('/')\n else:\n deleted_book = Book.objects.get(id=book_id)\n deleted_book.delete()\n \n return redirect('/books')\n\ndef fav_page(request):\n if 'user_id' not in request.session:\n return redirect('/')\n else:\n user = User.objects.get(id=request.session['user_id'])\n context={\n \"fav_books\": user.books_liked.all()\n }\n return render(request, \"fav_page.html\", context)\n","sub_path":"django/django_fullstack/favorite_books/favorite_books_project/favorite_books_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"19071782","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2015-2016, Exa Analytics Development Team\n# Distributed under the terms of the Apache License 2.0\n\"\"\"\nNWChem Editor\n##################\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom io import StringIO\nfrom exatomic.container import Universe\nfrom exatomic.editor import Editor as AtomicEditor\nfrom exatomic.basis import BasisSetSummary\ntry:\n from exatomic.algorithms.basis import spher_lml_count, cart_lml_count, rlmap\nexcept ImportError:\n from exatomic.algorithms.basis import spher_ml_count, cart_ml_count, lmap, lorder\n rlmap = {value: key for key, value in lmap.items() if len(key) == 1}\n spher_lml_count = {lorder.index(key): value for key, value in spher_ml_count.items()}\n cart_lml_count = {lorder.index(key): value for key, value in cart_ml_count.items()}\n\n\nclass Editor(AtomicEditor):\n \"\"\"\n Base NWChem editor\n \"\"\"\n def _expand_summary(self):\n '''\n Adds basis set information to the basis set summary table.\n Requires a parsed basis set object.\n '''\n if any('bas_' in col for col in self.basis_set_summary):\n return\n #lcounts = bfns.apply(lambda y: y['L'].values[0]).value_counts()\n #for l, lc in lcounts.items():\n # lcounts[l] = lc * spher_lml_count[l] // cart_lml_count[l]\n # lc = lcounts.sum()\n #rlmap = {value: key for key, value in lmap.items() if len(key) == 1}\n lmax = self.gaussian_basis_set['L'].cat.as_ordered().max()\n bs = self.gaussian_basis_set.groupby('set')\n bss = self.basis_set_summary\n pdata = []\n bdata = []\n cartcnt = []\n for seht in bss.index:\n cartcount = pd.Series([0] * len(bss.index))\n pdata.append([])\n bdata.append([])\n cartcnt.append(0)\n b = bs.get_group(seht)\n prims = b['L'].value_counts()\n bsfns = b.groupby('L').apply(lambda x: len(x['shell_function'].unique()))\n for i in range(lmax + 1):\n try:\n pdata[-1].append(prims.ix[i])\n bdata[-1].append(bsfns.ix[i])\n cartcnt[-1] += bsfns.ix[i] * cart_lml_count[i]\n except KeyError:\n pdata[-1].append(0)\n bdata[-1].append(0)\n pdata = pd.DataFrame(pdata)\n bdata = pd.DataFrame(bdata)\n data = pd.concat([pdata, bdata], axis=1)\n data.index.name = 'set'\n sl = len(data.columns) // 2\n data.columns = ['prim_' + rlmap[i] for i in data.columns[:sl]] + \\\n ['bas_' + rlmap[i] for i in data.columns[sl:]]\n cartperatom = pd.Series(cartcnt)\n cartperatom.index.name = 'set'\n data['cart_per_atom'] = cartperatom\n data['cartesian_count'] = cartperatom * self.atom['set'].value_counts()\n self.basis_set_summary = pd.concat([bss, data], axis=1)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.meta is None:\n self.meta = {}\n self.meta['program'] = 'NWChem'\n","sub_path":"exnwchem/editor.py","file_name":"editor.py","file_ext":"py","file_size_in_byte":3134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"73001689","text":"\"\"\"\nCreated at 27/10/16\n__author__ = 'Sergio Padilla'\n\n\"\"\"\nfrom flask import Flask\nimport svgwrite\nfrom random import randint\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef init():\n width = str(randint(100, 200))\n height = str(randint(50, 100))\n r = str(randint(0, 255))\n g = str(randint(0, 255))\n b = str(randint(0, 255))\n svg_document = svgwrite.Drawing(filename=\"static/test-svgwrite.svg\", size=(\"800px\", \"600px\"))\n m = randint(0, 3)\n if m == 1:\n color = 'green'\n elif m == 2:\n color = 'black'\n else:\n color = 'pink'\n n = randint(0, 3)\n if n == 1:\n svg_document.add(svg_document.rect(insert=(randint(100, 500), randint(100, 500)),\n size=(width + \"px\", height + \"px\"),\n stroke_width=\"1\",\n stroke=color,\n fill=\"rgb(\" + r + \",\" + g + \",\" + b + \")\"))\n elif n == 2:\n svg_document.add(svg_document.ellipse(center=(randint(500, 1000), randint(500, 1000)),\n r=(randint(100, 500), randint(100, 500)),\n stroke_width=\"1\",\n stroke=color,\n fill=\"rgb(\" + r + \",\" + g + \",\" + b + \")\"))\n else:\n svg_document.add(svg_document.circle(center=(randint(500, 1000), randint(500, 1000)),\n r=randint(100, 500),\n stroke_width=\"1\",\n stroke=color,\n fill=\"rgb(\" + r + \",\" + g + \",\" + b + \")\"))\n print(svg_document.tostring())\n svg_document.save()\n return \"\"\"\n \n \n \n \"\"\"\n\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"Practica2/DynamicImagesSVG.py","file_name":"DynamicImagesSVG.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"529237825","text":"#!/usr/bin/env python\nimport sys\nimport argparse\nimport numpy as np\nfrom scipy.optimize import curve_fit\nimport matplotlib as mpl\nmpl.use('GTKCairo')\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\nfrom matplotlib import rc\nfrom itertools import cycle\nfrom settings import *\nfrom kandiLib import *\n\n'''\nPlot fit for wind profiles\n'''\n\n#==========================================================#\n\nparser = argparse.ArgumentParser(\n prog='plotWindProfileFit.py', description='''Plot time-averaged profiles.''')\nparser.add_argument(\"-f\", \"--files\", type=str, nargs='+',default=None,\n help=\"Name of the input netCDF4 files.\")\nparser.add_argument(\"-d\", \"--domains\", type=str, nargs='+',\n default=['0'], help=\"Statistical domains to process. Default: 00\")\nparser.add_argument(\"-s\", \"--save\", type=str, help=\"Save resulting figure as.\")\nparser.add_argument(\"-ft\", \"--fit\", type=int, nargs=2, default=[30,60], help=\"Range of vertical grid points to fit to.\")\nparser.add_argument(\"-x\", \"--xlims\", type=float, nargs=2, help=\"Set x axis limits manually.\")\nparser.add_argument(\"-y\", \"--ylims\", type=float, nargs=2, help=\"Set y axis limits manually.\")\nparser.add_argument(\"-pr\", \"--profile\", help=\"Profile to be used\", type=str, default=\"log\")\nparser.add_argument(\"-blh\", \"--blh\", type=float, help=\"Boundary layer height for the Gryning profile, default is fricVel/(12*1e-4)\")\nargs = parser.parse_args()\n\n#==========================================================#\n\nmpl.rcParams[\"mathtext.fontset\"] =\"cm\"\n\n# Read all datasets into a list\ndsList = []; tpList = {}; nameList = {}\nfor fname in args.files:\n ds = openDataSet(fname)\n nameList[ds] = fname\n dsList.append(ds)\n t_inds, = np.where(np.logical_and(ds.variables['time'][:] >= timespan[0], ds.variables['time'][:] <= timespan[1]))\n tpList[ds] = t_inds\n\nplt.figure(1)\nplt.grid()\naxes = plt.gca()\nif (args.xlims):\n axes.set_xlim(args.xlims)\nif (args.ylims):\n axes.set_ylim(args.ylims)\nelse:\n axes.set_ylim([0, 128])\nplt.ylabel(\"$z\\/\\mathrm{(m)}$\",fontsize=14)\ncolor_cycle = ['b', 'g', 'r', 'c', 'm', 'y', 'fuchsia', 'gold', 'orange', 'lightcoral', 'lightslategrey','tan']\ni=0\n\nfor ds in dsList:\n for domain in args.domains:\n flux1=averageProfilesMomentumFluxes(domain, tpList[ds], pr_heights_plot, ds)[0]\n flux2=averageProfilesMomentumFluxes(domain, tpList[ds], pr_heights_plot, ds)[1]\n flux = (flux1**2.0 + flux2**2.0)**0.25\n fricVel= np.mean(flux[args.fit[0]:args.fit[1]])\n\n datalist=averageProfilesWS(domain, tpList[ds], pr_heights_plot, ds)\n hwind = datalist[3]\n plt.xlabel(\"$\\mathbf{u}/\\mathbf{u_*}\\/\\mathrm{(m/s)}$\",fontsize=14)\n plt.plot(hwind,pr_heights_plot, label=r'Run: {}, simulated'.format(nameList[ds][4], domain),color=color_cycle[i])\n\n z=pr_heights_plot[args.fit[0]:args.fit[1]]\n u_profile = hwind[args.fit[0]:args.fit[1]]\n np.seterr(invalid='ignore') # Ignore invalid logarithm values\n\n if (args.profile==\"log\"):\n funcLogProfile = lambda z,a,b : (fricVel/0.4)*np.log((z-b)/a)\n elif (args.profile==\"blackadar\"):\n zi=1000. # Wind maximum\n eta=63e-4*fricVel/1e-4 # eta=kb1*u*/f_c\n funcLogProfile = lambda z,a,b : (fricVel/0.4)*(np.log((z-b)/a)+(0.4*(z-b)/eta)-(z/zi)*((0.4*z)/(2*eta)+1))\n elif (args.profile==\"gryning\"):\n if(args.blh):\n h=args.blh\n else:\n h=fricVel/(12*1e-4)\n f=1e-4\n funcLogProfile = lambda z,a,b : (fricVel/0.4)*(np.log((z-b)/a)+(z-b)/(h/(2*(((np.log(fricVel/(1e-4*a))-1.9)**2+4.9**2)**0.5-np.log(h/a))))-((z-b)/h)*((z-b)/(h/(((np.log(fricVel/(1e-4*a))-1.9)**2+4.9**2)**0.5-np.log(h/a)))))\n\n fitSolution, pcov = curve_fit(funcLogProfile,z,u_profile)\n perr = np.sqrt(np.diag(pcov))\n print(\"\")\n print(\"u*: {}\".format(fricVel))\n print(\"z_0: {} +/- {}\".format(fitSolution[0],perr[0]))\n print(\"z_d: {} +/- {}\".format(fitSolution[1],perr[1]))\n pr=funcLogProfile(pr_heights_plot,fitSolution[0],fitSolution[1])\n\n # plt.plot(pr,pr_heights_plot, label=r'Run: {}, logprofile'.format(nameList[ds][4:], domain))\n\n if (args.ylims):\n axes.set_ylim([args.ylims[0],args.ylims[1]])\n if (args.xlims):\n axes.set_xlim([args.xlims[0],args.xlims[1]])\n\n plt.plot(pr,pr_heights_plot, label=r'Run: {}, log profile'.format(nameList[ds][4]), linestyle='--', color=color_cycle[i])\n i=i+1\n\n#axes.fill_between(np.linspace(0,12.0), 16, 32, facecolor='yellow', alpha=0.3,\n# label='Roof level < h < 0.3*BLH')\nleg = plt.legend(loc=0, fontsize=9)\nfor legobj in leg.legendHandles:\n legobj.set_linewidth(2.0)\n\nif (args.save):\n plt.savefig(args.save)\n print(\"Figure {} saved.\".format(args.save))\nplt.show()\n","sub_path":"plotWindProfileFit.py","file_name":"plotWindProfileFit.py","file_ext":"py","file_size_in_byte":4689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"257169708","text":"\nfrom django import forms\nfrom .models import Measurementunit, Threaddiameter, Pitch2Threaddiameter, Threaddesignation, Partnumber\n\ntpdid = Partnumber.objects.values('threaddesignationid').distinct()\np2did = Threaddesignation.objects.filter(pk__in=tpdid).values('pitch2threaddiameterid').distinct()\nthdid = Pitch2Threaddiameter.objects.filter(pk__in=p2did).values('threaddiameterid').distinct()\nuntid = Threaddiameter.objects.filter(pk__in=thdid).values('measurementunitid').distinct()\n\nPARTSLIST = Partnumber.objects.distinct()\n","sub_path":"forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"96447434","text":"from collections import Counter\nimport re\n\ndef load_text(filepath):\n text = open(filepath).read().lower()\n return text\n\ndef get_most_frequent_words(text):\n words = re.findall(r'\\w+', text) #Список слов\n cnt_words = Counter(words) #Счетчик слов\n top_ten = cnt_words.most_common(10)\n return top_ten\n\nif __name__ == '__main__':\n text = load_text(input(\"Введите путь: \"))\n print('10 наиболее частых слов, встречающихся в тексте:')\n top_ten = get_most_frequent_words(text)\n for word in top_ten:\n print(word[0])\n\n\n","sub_path":"lang_frequency.py","file_name":"lang_frequency.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"531879107","text":"from unittest import TestCase\nfrom square import Square\nfrom quadrilateral import Quadrilateral\n\nclass TestSquare(TestCase):\n\n def test_snap(self):\n s1 = Square(0, 2.7, 2.7, 2.7, 2.7, 0, 0, 0)\n s2 = Square(0, 0.1, 0.1, 0.1, 0.1, 0, 0, 0)\n self.assertEqual(s1.snap(), Quadrilateral(0, 3, 3, 3, 3, 0, 0, 0))\n self.assertEqual(s2.snap(), s2)\n\n def test___str(self):\n s1 = Square(0, 10, 10, 10, 10, 0, 0, 0)\n self.assertEqual(s1.__str__(), \"Square: (0, 10) (10, 10) (10, 0) (0, 0) \")\n\n def test__is_member(self):\n self.assertRaises(TypeError, (Square, *(0, 11, 10, 10, 10, 0, 0, 0)))\n self.assertRaises(TypeError, (Square, *()))\n\n def test__eq(self):\n s1 = Square(0, 10, 10, 10, 10, 0, 0, 0)\n s2 = Square(0, 10, 10, 10, 10, 0, 0, 0)\n s3 = Square(0, 2, 2, 2, 2, 0, 0, 0)\n self.assertTrue(s1.__eq__(s2))\n self.assertFalse(s1.__eq__(s3))\n\n","sub_path":"cse216_hw5/test_square.py","file_name":"test_square.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"33422480","text":"from bcc import BPF\nfrom bcc.utils import printb\n\nebpf_prog = \"\"\"\n#include \n\nint start(struct pt_regs* ctx)\n{\n bpf_trace_printk(\"Intercepted sys call clone() at start\\\\n\");\n return 0;\n}\n\nint end(struct pt_regs* ctx)\n{\n bpf_trace_printk(\"Intercepted sys call clone() at end ret:%d\\\\n\", PT_REGS_RC(ctx));\n return 0;\n}\n\"\"\"\n\nsys_call_name = \"__x64_sys_clone\"\n\nb = BPF(text=ebpf_prog)\n#b.attach_kprobe(event=b.get_syscall_fnname(\"clone\"), fn_name=\"hello\")\nb.attach_kprobe(event=sys_call_name, fn_name=\"start\")\nb.attach_kretprobe(event=sys_call_name, fn_name=\"end\")\n\nprint(\"%-18s %-16s %-6s %s\" % (\"TIME(s)\", \"COMM\", \"PID\", \"MESSAGE\"))\nwhile 1:\n try:\n (task, pid, cpu, flags, ts, msg) = b.trace_fields()\n except ValueError:\n continue\n except KeyboardInterrupt:\n exit()\n printb(b\"%-18.9f %-16s %-6d %s\" % (ts, task, pid, msg))\n","sub_path":"python/scripts/kprobe.py","file_name":"kprobe.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"544769233","text":"# Take 10 integers from user and print it on screen\n\nfor i in range(10):\n y = int(input(\"Enter a value : \"))\n print(y)\n\n\n# Write an infinite loop.\n\na = 1\nwhile a > 0:\n print(a)\n \n\n#Question 3\n\n\nx = []\nfor i in range(5):\n y = int(input(\"Enter a value : \"))\n x.append(y)\nprint(x)\nz = []\nfor j in x:\n a = j*j\n z.append(a)\nprint(z)\n\n\n#Question 4\n\n\nl = [15,'Google',2.5,'Facebook',9.9,25,'Apple']\nintl = [x for x in l if isinstance(x, int)]\nfloatl = [x for x in l if isinstance(x, float)]\nstrl = [x for x in l if isinstance(x, str)]\nprint(intl)\nprint(floatl)\nprint(strl)\n\n\n#Question 5\n\na = []\nb = []\nfor i in range(1,101):\n if (i % 2 == 0):\n a.append(i)\n else:\n b.append(i)\nprint(\"Even lists: \",a)\nprint(\"Odd lists: \",b)\n\n\n#Question 6\n\n\nfor i in range(1,5):\n for j in range(1,i+1):\n print(\"*\", end = \" \")\n print()\n\n\n#Question 7\n\n\ndataDict = {\"a\": 1,\"b\": 2,\"c\": 3}\nfor key,value in dataDict.items():\n print(key, value)\n\n\n#Question 8\n\n\nx = []\nfor i in range(5):\n y = input(\"Enter elements of list : \")\n x.append(y)\nprint(x)\nz = input(\"Enter element to delete from the list : \")\nx.remove(z)\nprint('entered element is deleted from the list')\nprint(x)","sub_path":"Assignment_6.py","file_name":"Assignment_6.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"23902710","text":"\"\"\"\nTest the \"external\" interface.\n\nThe \"external\" interface is what the user sees. It should be pythonic and easy\nto use.\n\"\"\"\n\n\nfrom datetime import timedelta\nfrom unittest.mock import patch, call\nimport unittest\n\nfrom puresnmp import (\n BulkResult,\n bulkget,\n bulkwalk,\n get,\n getnext,\n multiget,\n multiset,\n multiwalk,\n set,\n table,\n walk,\n)\nfrom puresnmp.const import Version\nfrom puresnmp.exc import SnmpError, NoSuchOID\nfrom puresnmp.pdu import GetRequest, VarBind, GetNextRequest, BulkGetRequest\nfrom puresnmp.x690.types import (\n Integer,\n ObjectIdentifier,\n OctetString,\n Sequence,\n)\n\nfrom . import readbytes\n\n\nclass TestGet(unittest.TestCase):\n\n def test_get_call_args(self):\n \"\"\"\n Test the call arguments of \"get\"\n \"\"\"\n data = readbytes('dummy.hex') # any dump would do\n packet = Sequence(\n Integer(Version.V2C),\n OctetString('public'),\n GetRequest(0, ObjectIdentifier(1, 2, 3))\n )\n with patch('puresnmp.send') as mck, \\\n patch('puresnmp.get_request_id') as mck2:\n mck2.return_value = 0\n mck.return_value = data\n get('::1', 'public', '1.2.3')\n mck.assert_called_with('::1', 161, bytes(packet), timeout=2)\n\n def test_get_string(self):\n data = readbytes('get_sysdescr_01.hex')\n expected = (b'Linux d24cf7f36138 4.4.0-28-generic #47-Ubuntu SMP '\n b'Fri Jun 24 10:09:13 UTC 2016 x86_64')\n with patch('puresnmp.send') as mck:\n mck.return_value = data\n result = get('::1', 'private', '1.2.3')\n self.assertEqual(result, expected)\n\n def test_get_oid(self):\n data = readbytes('get_sysoid_01.hex')\n expected = ('1.3.6.1.4.1.8072.3.2.10')\n with patch('puresnmp.send') as mck:\n mck.return_value = data\n result = get('::1', 'private', '1.2.3')\n self.assertEqual(result, expected)\n\n def test_get_multiple_return_binds(self):\n \"\"\"\n A \"GET\" response should only return one varbind.\n \"\"\"\n data = readbytes('get_sysoid_01_error.hex')\n with patch('puresnmp.send') as mck:\n mck.return_value = data\n with self.assertRaisesRegex(SnmpError, 'varbind'):\n get('::1', 'private', '1.2.3')\n\n def test_get_non_existing_oid(self):\n \"\"\"\n A \"GET\" response on a non-existing OID should raise an appropriate\n exception.\n \"\"\"\n data = readbytes('get_non_existing.hex')\n with patch('puresnmp.send') as mck:\n mck.return_value = data\n with self.assertRaises(NoSuchOID):\n get('::1', 'private', '1.2.3')\n\n\nclass TestWalk(unittest.TestCase):\n\n def test_walk(self):\n response_1 = readbytes('walk_response_1.hex')\n response_2 = readbytes('walk_response_2.hex')\n response_3 = readbytes('walk_response_3.hex')\n\n expected = [VarBind(\n ObjectIdentifier.from_string('1.3.6.1.2.1.2.2.1.5.1'), 10000000\n ), VarBind(\n ObjectIdentifier.from_string('1.3.6.1.2.1.2.2.1.5.13'), 4294967295\n )]\n\n with patch('puresnmp.send') as mck:\n mck.side_effect = [response_1, response_2, response_3]\n result = list(walk('::1', 'public', '1.3.6.1.2.1.2.2.1.5'))\n self.assertEqual(result, expected)\n\n def test_walk_multiple_return_binds(self):\n \"\"\"\n A \"WALK\" response should only return one varbind.\n \"\"\"\n data = readbytes('get_sysoid_01_error.hex')\n with patch('puresnmp.send') as mck:\n mck.return_value = data\n with self.assertRaisesRegex(SnmpError, 'varbind'):\n next(walk('::1', 'private', '1.2.3'))\n\n\nclass TestSet(unittest.TestCase):\n\n def test_set_without_type(self):\n \"\"\"\n As we need typing information, we have to hand in an instance of\n supported types (a subclass of puresnmp.x690.Type).\n \"\"\"\n with patch('puresnmp.send'):\n with self.assertRaisesRegex(TypeError, 'Type'):\n set('::1', 'private', '1.2.3', 12)\n\n def test_set(self):\n data = readbytes('set_response.hex')\n with patch('puresnmp.send') as mck:\n mck.return_value = data\n set('::1', 'private', '1.3.6.1.2.1.1.4.0',\n OctetString(b'hello@world.com'))\n\n def test_set_multiple_varbind(self):\n \"\"\"\n SET responses should only contain one varbind.\n \"\"\"\n data = readbytes('set_response_multiple.hex')\n with patch('puresnmp.send') as mck:\n mck.return_value = data\n with self.assertRaisesRegex(SnmpError, 'varbind'):\n set('::1', 'private', '1.3.6.1.2.1.1.4.0',\n OctetString(b'hello@world.com'))\n\n\nclass TestMultiGet(unittest.TestCase):\n\n def test_multiget(self):\n data = readbytes('multiget_response.hex')\n expected = ['1.3.6.1.4.1.8072.3.2.10',\n b\"Linux 7fbf2f0c363d 4.4.0-28-generic #47-Ubuntu SMP Fri \"\n b\"Jun 24 10:09:13 UTC 2016 x86_64\"]\n with patch('puresnmp.send') as mck:\n mck.return_value = data\n result = multiget('::1', 'private', [\n '1.3.6.1.2.1.1.2.0',\n '1.3.6.1.2.1.1.1.0',\n ])\n self.assertEqual(result, expected)\n\n\nclass TestMultiWalk(unittest.TestCase):\n\n def test_multi_walk(self):\n response_1 = readbytes('multiwalk_response_1.hex')\n response_2 = readbytes('multiwalk_response_2.hex')\n response_3 = readbytes('multiwalk_response_3.hex')\n\n expected = [VarBind(\n ObjectIdentifier.from_string('1.3.6.1.2.1.2.2.1.1.1'), 1\n ), VarBind(\n ObjectIdentifier.from_string('1.3.6.1.2.1.2.2.1.2.1'), b'lo'\n ), VarBind(\n ObjectIdentifier.from_string('1.3.6.1.2.1.2.2.1.1.78'), 78\n ), VarBind(\n ObjectIdentifier.from_string('1.3.6.1.2.1.2.2.1.2.78'), b'eth0'\n )]\n\n with patch('puresnmp.send') as mck:\n mck.side_effect = [response_1, response_2, response_3]\n result = list(multiwalk('::1', 'public', [\n '1.3.6.1.2.1.2.2.1.1',\n '1.3.6.1.2.1.2.2.1.2'\n ]))\n # TODO (advanced): should order matter in the following result?\n self.assertCountEqual(result, expected)\n\n\nclass TestMultiSet(unittest.TestCase):\n\n def test_multiset(self):\n \"\"\"\n Test setting multiple OIDs at once.\n\n NOTE: The OID '1.3.6.1.2.1.1.5.0' below is manually edited for\n unit-testing. It probably has a different type in the real world!\n \"\"\"\n data = readbytes('multiset_response.hex')\n with patch('puresnmp.send') as mck:\n mck.return_value = data\n result = multiset('::1', 'private', [\n ('1.3.6.1.2.1.1.4.0', OctetString(b'hello@world.com')),\n ('1.3.6.1.2.1.1.5.0', OctetString(b'hello@world.com')),\n ])\n expected = {\n '1.3.6.1.2.1.1.4.0': b'hello@world.com',\n '1.3.6.1.2.1.1.5.0': b'hello@world.com',\n }\n self.assertEqual(result, expected)\n\n\nclass TestGetNext(unittest.TestCase):\n\n def test_get_call_args(self):\n data = readbytes('dummy.hex') # any dump would do\n packet = Sequence(\n Integer(Version.V2C),\n OctetString('public'),\n GetNextRequest(0, ObjectIdentifier(1, 2, 3))\n )\n with patch('puresnmp.send') as mck, \\\n patch('puresnmp.get_request_id') as mck2:\n mck2.return_value = 0\n mck.return_value = data\n getnext('::1', 'public', '1.2.3')\n mck.assert_called_with('::1', 161, bytes(packet), timeout=2)\n\n def test_getnext(self):\n data = readbytes('getnext_response.hex')\n expected = VarBind('1.3.6.1.6.3.1.1.6.1.0', 354522558)\n\n with patch('puresnmp.send') as mck:\n mck.return_value = data\n result = getnext('::1', 'private', '1.3.6.1.5')\n self.assertEqual(result, expected)\n\n\nclass TestGetBulkGet(unittest.TestCase):\n\n def test_get_call_args(self):\n data = readbytes('dummy.hex') # any dump would do\n packet = Sequence(\n Integer(Version.V2C),\n OctetString('public'),\n BulkGetRequest(0, 1, 2,\n ObjectIdentifier(1, 2, 3),\n ObjectIdentifier(1, 2, 4))\n )\n with patch('puresnmp.send') as mck, \\\n patch('puresnmp.get_request_id') as mck2:\n mck2.return_value = 0\n mck.return_value = data\n bulkget('::1', 'public',\n ['1.2.3'],\n ['1.2.4'],\n max_list_size=2)\n mck.assert_called_with('::1', 161, bytes(packet), timeout=2)\n\n\n def test_bulkget(self):\n data = readbytes('bulk_get_response.hex')\n expected = BulkResult(\n {'1.3.6.1.2.1.1.1.0': b'Linux 7e68e60fe303 4.4.0-28-generic '\n b'#47-Ubuntu SMP Fri Jun 24 10:09:13 UTC 2016 x86_64'},\n {'1.3.6.1.2.1.3.1.1.1.10.1.172.17.0.1': 10,\n '1.3.6.1.2.1.3.1.1.2.10.1.172.17.0.1': b'\\x02B\\xe2\\xc5\\x8d\\t',\n '1.3.6.1.2.1.3.1.1.3.10.1.172.17.0.1': b'\\xac\\x11\\x00\\x01',\n '1.3.6.1.2.1.4.1.0': 1,\n '1.3.6.1.2.1.4.3.0': 57})\n\n with patch('puresnmp.send') as mck:\n mck.return_value = data\n result = bulkget('::1', 'public',\n ['1.3.6.1.2.1.1.1'],\n ['1.3.6.1.2.1.3.1'],\n max_list_size=5)\n self.assertEqual(result, expected)\n\n\nclass TestGetBulkWalk(unittest.TestCase):\n\n def test_get_call_args(self):\n data = readbytes('dummy.hex') # any dump would do\n packet = Sequence(\n Integer(Version.V2C),\n OctetString('public'),\n BulkGetRequest(0, 0, 2, ObjectIdentifier(1, 2, 3))\n )\n with patch('puresnmp.send') as mck, \\\n patch('puresnmp.get_request_id') as mck2:\n mck2.return_value = 0\n mck.return_value = data\n\n # we need to wrap this in a list to consume the generator.\n list(bulkwalk('::1', 'public',\n ['1.2.3'],\n bulk_size=2))\n mck.assert_called_with('::1', 161, bytes(packet), timeout=2)\n\n def test_get_call_args_issue_22(self):\n data = readbytes('dummy.hex') # any dump would do\n packet = Sequence(\n Integer(Version.V2C),\n OctetString('public'),\n BulkGetRequest(0, 0, 2, ObjectIdentifier(1, 2, 3))\n )\n with patch('puresnmp.send') as mck, \\\n patch('puresnmp.get_request_id') as mck2:\n mck2.return_value = 0\n mck.return_value = data\n\n with self.assertRaisesRegex(TypeError, 'OIDS.*list'):\n # we need to wrap this in a list to consume the generator.\n list(bulkwalk('::1', 'public', '1.2.3', bulk_size=2))\n\n @patch('puresnmp.send')\n @patch('puresnmp.get_request_id')\n def test_bulkwalk(self, mck_rid, mck_send):\n req1 = readbytes('bulkwalk_request_1.hex')\n req2 = readbytes('bulkwalk_request_2.hex')\n req3 = readbytes('bulkwalk_request_3.hex')\n\n responses = [\n readbytes('bulkwalk_response_1.hex'),\n readbytes('bulkwalk_response_2.hex'),\n readbytes('bulkwalk_response_3.hex'),\n ]\n mck_send.side_effect = responses\n\n request_ids = [1001613222, 1001613223, 1001613224]\n mck_rid.side_effect = request_ids\n\n result = list(bulkwalk('127.0.0.1', 'private', ['1.3.6.1.2.1.2.2'],\n bulk_size=20))\n\n self.assertEqual(mck_send.mock_calls, [\n call('127.0.0.1', 161, req1, timeout=2),\n call('127.0.0.1', 161, req2, timeout=2),\n call('127.0.0.1', 161, req3, timeout=2),\n ])\n\n # TODO (advanced): Type information is lost for timeticks and OIDs\n expected = [\n VarBind('1.3.6.1.2.1.2.2.1.1.1', 1),\n VarBind('1.3.6.1.2.1.2.2.1.1.10', 10),\n VarBind('1.3.6.1.2.1.2.2.1.2.1', b\"lo\"),\n VarBind('1.3.6.1.2.1.2.2.1.2.10', b\"eth0\"),\n VarBind('1.3.6.1.2.1.2.2.1.3.1', 24),\n VarBind('1.3.6.1.2.1.2.2.1.3.10', 6),\n VarBind('1.3.6.1.2.1.2.2.1.4.1', 65536),\n VarBind('1.3.6.1.2.1.2.2.1.4.10', 1500),\n VarBind('1.3.6.1.2.1.2.2.1.5.1', 10000000),\n VarBind('1.3.6.1.2.1.2.2.1.5.10', 4294967295),\n VarBind('1.3.6.1.2.1.2.2.1.6.1', b\"\"),\n VarBind('1.3.6.1.2.1.2.2.1.6.10', b\"\\x02\\x42\\xAC\\x11\\x00\\x02\"),\n VarBind('1.3.6.1.2.1.2.2.1.7.1', 1),\n VarBind('1.3.6.1.2.1.2.2.1.7.10', 1),\n VarBind('1.3.6.1.2.1.2.2.1.8.1', 1),\n VarBind('1.3.6.1.2.1.2.2.1.8.10', 1),\n VarBind('1.3.6.1.2.1.2.2.1.9.1', timedelta(0)),\n VarBind('1.3.6.1.2.1.2.2.1.9.10', timedelta(0)),\n VarBind('1.3.6.1.2.1.2.2.1.10.1', 172),\n VarBind('1.3.6.1.2.1.2.2.1.10.10', 60558),\n VarBind('1.3.6.1.2.1.2.2.1.11.1', 2),\n VarBind('1.3.6.1.2.1.2.2.1.11.10', 564),\n VarBind('1.3.6.1.2.1.2.2.1.12.1', 0),\n VarBind('1.3.6.1.2.1.2.2.1.12.10', 0),\n VarBind('1.3.6.1.2.1.2.2.1.13.1', 0),\n VarBind('1.3.6.1.2.1.2.2.1.13.10', 0),\n VarBind('1.3.6.1.2.1.2.2.1.14.1', 0),\n VarBind('1.3.6.1.2.1.2.2.1.14.10', 0),\n VarBind('1.3.6.1.2.1.2.2.1.15.1', 0),\n VarBind('1.3.6.1.2.1.2.2.1.15.10', 0),\n VarBind('1.3.6.1.2.1.2.2.1.16.1', 172),\n VarBind('1.3.6.1.2.1.2.2.1.16.10', 44295),\n VarBind('1.3.6.1.2.1.2.2.1.17.1', 2),\n VarBind('1.3.6.1.2.1.2.2.1.17.10', 442),\n VarBind('1.3.6.1.2.1.2.2.1.18.1', 0),\n VarBind('1.3.6.1.2.1.2.2.1.18.10', 0),\n VarBind('1.3.6.1.2.1.2.2.1.19.1', 0),\n VarBind('1.3.6.1.2.1.2.2.1.19.10', 0),\n VarBind('1.3.6.1.2.1.2.2.1.20.1', 0),\n VarBind('1.3.6.1.2.1.2.2.1.20.10', 0),\n VarBind('1.3.6.1.2.1.2.2.1.21.1', 0),\n VarBind('1.3.6.1.2.1.2.2.1.21.10', 0),\n VarBind('1.3.6.1.2.1.2.2.1.22.1', '0.0'), # TODO: type info is lost\n VarBind('1.3.6.1.2.1.2.2.1.22.10', '0.0'), # TODO: type info is lost\n ]\n\n # TODO: Expected types per OID:\n # 1.3.6.1.2.1.2.2.1.1.1 = INTEGER: 1\n # 1.3.6.1.2.1.2.2.1.1.10 = INTEGER: 10\n # 1.3.6.1.2.1.2.2.1.2.1 = STRING: \"lo\"\n # 1.3.6.1.2.1.2.2.1.2.10 = STRING: \"eth0\"\n # 1.3.6.1.2.1.2.2.1.3.1 = INTEGER: 24\n # 1.3.6.1.2.1.2.2.1.3.10 = INTEGER: 6\n # 1.3.6.1.2.1.2.2.1.4.1 = INTEGER: 65536\n # 1.3.6.1.2.1.2.2.1.4.10 = INTEGER: 1500\n # 1.3.6.1.2.1.2.2.1.5.1 = Gauge32: 10000000\n # 1.3.6.1.2.1.2.2.1.5.10 = Gauge32: 4294967295\n # 1.3.6.1.2.1.2.2.1.6.1 = \"\"\n # 1.3.6.1.2.1.2.2.1.6.10 = Hex-STRING: 02 42 AC 11 00 02\n # 1.3.6.1.2.1.2.2.1.7.1 = INTEGER: 1\n # 1.3.6.1.2.1.2.2.1.7.10 = INTEGER: 1\n # 1.3.6.1.2.1.2.2.1.8.1 = INTEGER: 1\n # 1.3.6.1.2.1.2.2.1.8.10 = INTEGER: 1\n # 1.3.6.1.2.1.2.2.1.9.1 = Timeticks: (0) 0:00:00.00\n # 1.3.6.1.2.1.2.2.1.9.10 = Timeticks: (0) 0:00:00.00\n # 1.3.6.1.2.1.2.2.1.10.1 = Counter32: 172\n # 1.3.6.1.2.1.2.2.1.10.10 = Counter32: 60558\n\n # 1.3.6.1.2.1.2.2.1.11.1 = Counter32: 2\n # 1.3.6.1.2.1.2.2.1.11.10 = Counter32: 564\n # 1.3.6.1.2.1.2.2.1.12.1 = Counter32: 0\n # 1.3.6.1.2.1.2.2.1.12.10 = Counter32: 0\n # 1.3.6.1.2.1.2.2.1.13.1 = Counter32: 0\n # 1.3.6.1.2.1.2.2.1.13.10 = Counter32: 0\n # 1.3.6.1.2.1.2.2.1.14.1 = Counter32: 0\n # 1.3.6.1.2.1.2.2.1.14.10 = Counter32: 0\n # 1.3.6.1.2.1.2.2.1.15.1 = Counter32: 0\n # 1.3.6.1.2.1.2.2.1.15.10 = Counter32: 0\n # 1.3.6.1.2.1.2.2.1.16.1 = Counter32: 172\n # 1.3.6.1.2.1.2.2.1.16.10 = Counter32: 44295\n # 1.3.6.1.2.1.2.2.1.17.1 = Counter32: 2\n # 1.3.6.1.2.1.2.2.1.17.10 = Counter32: 442\n # 1.3.6.1.2.1.2.2.1.18.1 = Counter32: 0\n # 1.3.6.1.2.1.2.2.1.18.10 = Counter32: 0\n # 1.3.6.1.2.1.2.2.1.19.1 = Counter32: 0\n # 1.3.6.1.2.1.2.2.1.19.10 = Counter32: 0\n # 1.3.6.1.2.1.2.2.1.20.1 = Counter32: 0\n # 1.3.6.1.2.1.2.2.1.20.10 = Counter32: 0\n\n # 1.3.6.1.2.1.2.2.1.21.1 = Gauge32: 0\n # 1.3.6.1.2.1.2.2.1.21.10 = Gauge32: 0\n # 1.3.6.1.2.1.2.2.1.22.1 = OID: ccitt.0\n # 1.3.6.1.2.1.2.2.1.22.10 = OID: ccitt.0\n self.assertEqual(result, expected)\n\n\nclass TestGetTable(unittest.TestCase):\n\n @patch('puresnmp.walk')\n @patch('puresnmp.tablify')\n @patch('puresnmp.get_request_id')\n def test_table(self, mck_rid, mck_tablify, mck_walk):\n mck_rid.return_value = 0\n tmp = object() # dummy return value\n mck_walk.return_value = tmp\n table('::1', 'public', '1.2.3.4', port=161, num_base_nodes=2)\n mck_walk.assert_called_with('::1', 'public', '1.2.3.4', port=161)\n mck_tablify.assert_called_with(tmp, num_base_nodes=2)\n","sub_path":"puresnmp/test/test_package_root.py","file_name":"test_package_root.py","file_ext":"py","file_size_in_byte":17373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"451068555","text":"#Pemakaian scikit learn pada normalisasi data\nimport pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing\n\ncsv_data = pd.read_csv(\"https://storage.googleapis.com/dqlab-dataset/shopping_data.csv\")\narray = csv_data.values\n\n#X merupakan matriks yang berisi fitur dataset yang digunakan pada machine learning, baik untuk regresi, klasifikasi, pengklusteran, atau normalisasi.\n#X berisi fitur pada case ini adalah dengan min-max scaler\n\nX = array[:,2:5] #memisahkan fitur dari dataset.\nY = array[:,0:1] #memisahkan class dari dataset.\n\ndataset=pd.DataFrame({'Customer ID':array[:,0],'Gender':array[:,1],'Age':array[:,2],'Income':array[:,3],'Spending Score':array[:,4]})\nprint(\"dataset sebelum dinormalisasi :\")\nprint(dataset.head(10))\n\nmin_max_scaler = preprocessing.MinMaxScaler(feature_range=(0,1)) #inisialisasi normalisasi MinMax\ndata = min_max_scaler.fit_transform(X) #transformasi MinMax untuk fitur\ndataset = pd.DataFrame({'Age':data[:,0],'Income':data[:,1],'Spending Score':data[:,2],'Customer ID':array[:,0],'Gender':array[:,1]})\nprint(\"dataset setelah dinormalisasi :\")\nprint(dataset.head(10))\n","sub_path":"Bagian Data Wrangling/Normalisasi_data.py","file_name":"Normalisasi_data.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"376131433","text":"import json\n\nfrom simple_amqp import AmqpMsg\n\nfrom .data import Event\n\nCONTENT_TYPE_MSGPACK = 'application/msgpack'\n\n\ndef encode_event(event: Event) -> AmqpMsg:\n payload = json.dumps({\n 'source': event.source,\n 'topic': event.topic,\n 'payload': event.payload,\n })\n payload = payload.encode('utf8')\n headers = {}\n if event.retry_count > 0:\n headers['retry_count'] = event.retry_count\n\n return AmqpMsg(\n payload=payload,\n content_type=CONTENT_TYPE_MSGPACK,\n headers=headers,\n )\n\n\ndef decode_event(msg: AmqpMsg, pipe_name: str) -> Event:\n payload = json.loads(msg.payload)\n retry_count = msg.headers.get('retry_count', 0)\n\n return Event(\n source=payload['source'],\n topic=payload['topic'],\n payload=payload['payload'],\n pipe=pipe_name,\n retry_count=retry_count,\n )\n","sub_path":"simple_amqp_pubsub/encoding.py","file_name":"encoding.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"351979993","text":"\"\"\"\n Defining some global constants\n\n\"\"\"\n\nIMPORT_COLUMNS = ['pid', 'ax', 'ay', 'az', 'error']\nCUT_OFF_LENGTH = 0 # cut-off lengths in seconds, raw signal will be shortened\nGAME1 = 'futurocube'\nLEVEL_TIME_INTERVALS = [40, 80, 120] # in seconds between game levels\nDEBUG_LEVEL = 2\nSAMPLE_FREQUENCY_FUTUROCUBE = 20.5 # 70 Hz for futurocube according to last info of Antoine/Pascal\n # needs to be adjusted per game device\n\n# if we want to calculate the features for one window across the whole file length\n# we need to approximate \"exp_2 = np.floor(np.log2(WINDOW_SIZE * freq))\"\nWINDOW_SIZE = 30 # in seconds\nOVERLAP_COEFFICIENT = 1 # 50% overlap of windows, important parameter!\n # 1 means no sliding window approach, can be used for hard cuts\nMEAN_FILE_LENGTH = 3750 # in samples\n# ['min', 'max', 'mean', 'std', 'median', 'rms', 'range', 'dc', 'energy', 'power_spec_entropy',\n# 'dominant freq', \"cos_sim\"]\nFEATURE_LIST = ['minf', 'maxf', 'mean', 'std', 'median', 'range', 'rms', 'mean_squared_jerk', 'dc',\n 'energy', 'power_spec_entropy', 'cos_sim']\n\nLABELS = ['ID', 'CLASS', 'AGE', 'SEX', 'HANDED']\nRAW_DATA_ARRAY = 'raw_data'\nDATA_ARRAY = 'feature_data'\nLABEL_ARRAY = 'label_data'\n\n","sub_path":"preprocessing/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"338609852","text":"import dotenv\nimport os\n\n\nPROJECT_DIR = os.path.join(os.path.dirname(__file__), os.pardir)\n\ndotenv_path = os.path.join(PROJECT_DIR, '.env.prod')\ndotenv.load_dotenv(dotenv_path)\n\n# Cloudant\nMODEL_CATALOG_SERVICE_NAME = 'MODEL_CATALOG_TFM'\nMODEL_CATALOG_DB = 'model'\nMODEL_PREDICTIONS_DB = 'predictions'\n\n# COS (Cloud Object Storage)\nCOS_ENDPOINT = os.getenv('COS_ENDPOINT')\nCOS_API_KEY_ID = os.getenv('COS_API_KEY_ID')\nCOS_INSTANCE_CRN = os.getenv('COS_INSTANCE_CRN')\nCOS_MODEL_STORAGE_BUCKET = os.getenv('COS_MODEL_STORAGE_BUCKET')\n","sub_path":"app/config.prod.py","file_name":"config.prod.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"385545562","text":"from glob import glob\nimport os\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nfrom sklearn.metrics import mean_squared_error\n\n\ndef rearrange_pred_labs(preds, labs, train_test_split):\n preds = [list(p) for p in preds]\n labs = [list(l) for l in labs]\n results_pred = []\n results_lab = []\n for s in train_test_split:\n results_pred.append(preds[s].pop(0))\n results_lab.append(labs[s].pop(0))\n results_pred = np.array(results_pred)\n results_lab = np.array(results_lab)\n return results_pred, results_lab\n\n\ndef get_top_subjects(number):\n global subs\n global preds\n global labs\n perc_lab = np.percentile(labs, 100 - number*100/1137)\n subs_lab = subs[labs>perc_lab]\n count_lab = number\n while len(np.unique(subs_lab)) < number:\n count_lab += 1\n perc_lab = np.percentile(labs, 100 - count_lab*100/1137)\n subs_lab = subs[labs>perc_lab]\n perc_pred = np.percentile(preds, 100 - number*100/1137)\n subs_pred = subs[preds>perc_pred]\n count_pred = number\n while len(np.unique(subs_pred)) < number:\n count_pred += 1\n perc_pred = np.percentile(preds, 100 - count_pred*100/1137)\n subs_pred = subs[preds>perc_pred]\n # percentile might get bigger number, if both have the dame delta SUVR pred/lab\n return perc_pred, perc_lab\n\n\ndef get_unique_t0_suvr():\n global t0_suvr\n global subs\n t0, t0_idx = np.unique(subs, return_index=True)\n suvr_unique = t0_suvr[t0_idx]\n return suvr_unique\n\ndef get_matches(number):\n global preds\n global labs\n global subs\n perc_pred = np.percentile(preds, 100 - number*100 / 1137)\n perc_lab = np.percentile(labs, 100 - number*100 / 1137)\n # perc_pred, perc_lab = get_top_subjects(76)\n # _, perc_lab = get_top_subjects(77)\n median_lab = np.percentile(labs, 50)\n sub_ids = np.unique(subs[(labs > perc_lab) & (preds > perc_pred)])\n sub_ids_study, study_idx = np.unique(subs[preds>perc_pred], return_index=True)\n sub_ids_label = np.unique(subs[labs>perc_lab])\n print(f\"{len(sub_ids_study)} in study, {len(np.unique(sub_ids))} also in top {len(sub_ids_label)} of labels.\")\n # print(result)\n # print(len(np.unique(sub_ids)), sub_ids)\n return (labs > perc_lab) & (preds > perc_pred)\n\n\ndef get_matches_target(study_size, target_size):\n global preds\n global labs\n global subs\n # perc_pred = np.percentile(preds, 100 - number*100 / 1137)\n # perc_lab = np.percentile(labs, 100 - number*100 / 1137)\n perc_pred, perc_lab = get_top_subjects(study_size)\n _, perc_lab = get_top_subjects(target_size)\n median_lab = np.percentile(labs, 50)\n sub_ids = np.unique(subs[(labs > perc_lab) & (preds > perc_pred)])\n sub_ids_study, study_idx = np.unique(subs[preds>perc_pred], return_index=True)\n sub_ids_label = np.unique(subs[labs>perc_lab])\n print(f\"{len(sub_ids_study)} in study, {len(np.unique(sub_ids))} also in top {len(sub_ids_label)} of labels.\")\n # print(result)\n # print(len(np.unique(sub_ids)), sub_ids)\n return (labs > perc_lab) & (preds > perc_pred)\n\n\ndef get_high_t0suvr_matches(number):\n global preds\n global labs\n global t0_suvr\n high_suvr = np.percentile(t0_suvr, 100 - number*100 / 1137)\n perc_lab = np.percentile(labs, 100 - number*100 / 1137)\n # high_suvr, perc_lab = get_top_subjects(number)\n median_lab = np.percentile(labs, 50)\n sub_ids = np.unique(subs[(labs > perc_lab) & (t0_suvr > high_suvr)])\n sub_ids_study = np.unique(subs[t0_suvr>high_suvr])\n sub_ids_label = np.unique(subs[labs>perc_lab])\n print('matches based on high suvr selection')\n print(f\"{len(sub_ids_study)} in study, {len(np.unique(sub_ids))} also in top {len(sub_ids_label)} of labels.\")\n # print(result)\n # print(len(np.unique(sub_ids)), sub_ids)\n return (labs > perc_lab) & (t0_suvr > high_suvr)\n\n\ndef get_minimal_change_matches(number):\n global preds\n global labs\n perc_low = np.percentile(preds, 50 - number*50/1137)\n perc_high = np.percentile(preds, 50 + number*50.5/1137)\n perc2_low = np.percentile(labs, 50 - number*50/1137)\n perc2_high = np.percentile(labs, 50 + number*50/1137)\n sub_ids = np.unique(subs[(preds > perc_low) & (preds < perc_high) & (labs > perc2_low) & (labs < perc2_high)])\n sub_ids_study = np.unique(subs[(preds > perc_low) & (preds < perc_high)])\n sub_ids_label = np.unique(subs[(labs > perc2_low) & (labs < perc2_high)])\n print(f\"{len(sub_ids_study)} in study, {len(np.unique(sub_ids))} also in top {len(sub_ids_label)} of labels.\")\n\n\ndef plot_densities():\n global preds\n global labs\n sns.kdeplot(preds, label='GBDT predictions')\n sns.kdeplot(labs, label='Ground truth')\n plt.legend()\n plt.show()\n\n\ndef rmse(target, prediction):\n return mean_squared_error(target, prediction)**0.5\n\n\ndef rmse_over_time():\n global preds\n global labs\n global delta_time\n delta_time_yrs = delta_time/(365*24*60*60)\n results = []\n for i in range(4):\n start = 1+i*2\n end = 3+i*2\n time_filter = (delta_time_yrs>=start) & (delta_time_yrs < end)\n time_preds = preds[time_filter]\n time_labs = labs[time_filter]\n results.append(rmse(time_preds, time_labs))\n print(results)\n return 0\n\n\ndef get_examdate(s, delta_time):\n global subs\n global extra_exam_date\n global extra_subs\n fi = extra_subs == s\n fi2 = subs==s\n exam_dates = extra_exam_date[fi]\n deltas = delta_time[fi2]\n s1 = np.argsort(exam_dates)\n s2 = np.argsort(deltas)\n # get exam dates in same relative order as deltas (required for correct insertion later on)\n exam_dates = exam_dates[s1][np.argsort(s2)]\n return exam_dates\n\ndef get_diagnosis(s, ex, d_data, name='-1'):\n \"\"\"\n Looks into the berkeley study csv file and returns the corresponding label.\n If rid+examdate yield more than one corresponding row, it returns -1, as we then cannot find the\n exact label in the table.\n :param s:\n :param ex:\n :return:\n \"\"\"\n global diagnosis_data\n ts = pd.Timestamp(ex, unit='s')\n # d_data = diagnosis_data['EXAMDATE']\n # d_data = list(d_data)\n # d_results = []\n # for d in d_data:\n # try:\n # d = d.timestamp()\n # except:\n # d = -1\n # d_results.append(d)\n # d_data = np.array(d_results)\n # d_data = d_data[diagnosis_data['RID'] == s]\n e = ts.timestamp()\n diffs = np.abs(d_data-e)/(3600*24)\n\n row_idx = (diffs<50) & (diagnosis_data['RID'] == s)\n if row_idx.sum() > 1:\n print('more than one result for rid & examdate combination in berkeley study data')\n print(f'file name is {name}, rid is {s}, exam date is {ts}')\n return -1, -1\n if row_idx.sum() == 0:\n print('no result found for rid & examdate combination in berkeley study data')\n print(f'file name is {name}, rid is {s}, exam date is {ts}')\n return -1, -1\n label = diagnosis_data['CDGLOBAL'][row_idx]\n return float(label), float(diffs[row_idx])\n\n\ndef get_ed_registry(r, v, u):\n global not_found_counter\n # look for examdate in registry, as recommended by ADNI\n global registry_data\n row_idx = (registry_data['VISCODE'] == v) & (registry_data['RID'] == r) & (registry_data['USERDATE'] == u)\n reg_ex_date = registry_data['EXAMDATE'][row_idx]\n if len(reg_ex_date) > 1:\n print(len(reg_ex_date))\n print('not found exactly, using userdate')\n not_found_counter += 1\n return u.timestamp()\n if len(reg_ex_date) < 1:\n print('not found, using userdate')\n not_found_counter += 1\n return u.timestamp()\n if pd.isnull(reg_ex_date.iloc[0]):\n print('not found, using userdate')\n not_found_counter += 1\n return u.timestamp()\n reg_ex_date = reg_ex_date.iloc[0].timestamp()\n return reg_ex_date\n\n\ndef create_diagnosis_data():\n # I want diagnosis, suvr, faqtotal, subid, examdate, delta_time, delta_suvr, t0_suvr\n # I want to add t0 values\n global subs\n global t0_suvr\n global delta_suvr\n global delta_time\n global faq\n global suvr\n # append numbers for t0 values\n zeros = np.zeros(np.unique(subs).shape)\n delta_time = np.append(delta_time, zeros)\n delta_suvr = np.append(delta_suvr, zeros)\n append_t0_suvr = []\n for s in np.unique(subs):\n t0_s = t0_suvr[s==subs][0]\n append_t0_suvr.append(t0_s)\n append_t0_suvr = np.array(append_t0_suvr)\n t0_suvr = np.append(t0_suvr, append_t0_suvr)\n suvr = np.append(suvr, append_t0_suvr)\n subs = np.append(subs, np.unique(subs))\n exam_dates = np.copy(delta_time)\n diagnoses = np.copy(delta_time)\n differences = np.copy(delta_time)\n t0_diagnoses = np.copy(delta_time)\n ex_date = diagnosis_data['EXAMDATE']\n # name not found dates via -1\n ex_data = []\n for d in ex_date:\n try:\n d = d.timestamp()\n except:\n d = -1\n ex_data.append(d)\n ex_rid = diagnosis_data['RID']\n ex_vis = diagnosis_data['VISCODE']\n ex_us = diagnosis_data['USERDATE']\n new_ex_data = ex_data.copy()\n\n # fill in exam dates extracted from registry metadata\n for i, (ed, r, v, u) in enumerate(zip(ex_data, ex_rid, ex_vis, ex_us)):\n if ed == -1:\n ex_date = get_ed_registry(r, v, u)\n new_ex_data[i] = ex_date\n new_ex_data = np.array(new_ex_data)\n\n # search for diagnosis\n found = []\n found_t0 = []\n for s in np.unique(subs):\n # delta time is to order correctly. Needed to put exam_data on the correct spot.\n ex = get_examdate(s, delta_time)\n digs = []\n diffs = []\n for e in ex:\n dig, diff = get_diagnosis(s, e, new_ex_data)\n digs.append(dig)\n diffs.append(diff)\n if min(digs)==-1:\n found.append(-1)\n else:\n found.append(1)\n if digs[ex.argmin()] == -1:\n found_t0.append(0)\n else:\n found_t0.append(1)\n exam_dates[subs==s] = ex\n diagnoses[subs==s] = digs\n differences[subs==s] = diffs\n t0_diagnoses[subs==s] = digs[ex.argmin()]\n found = np.array(found)\n found_t0 = np.array(found_t0)\n output_folder = r'C:\\Users\\Fabian\\stanford'\n results = {'diagnoses': diagnoses, 'differences': differences, 'subs': subs, 'suvr': suvr,\n 't0_suvr': t0_suvr, 'exam_dates': exam_dates, 'delta_time': delta_time, 'delta_suvr': delta_suvr, 'found_subject': found,\n 't0_diagnoses': t0_diagnoses, 'found_t0': found_t0}\n with open(os.path.join(output_folder, 'diagnoses_DXSUM.pickle'), 'wb') as f:\n pickle.dump(results, f)\n print('nice')\n\n\n\ntarget_folder = r'C:\\Users\\Fabian\\stanford\\gbdt\\analysis'\ntarget_file = 'all_results.pickle'\nfolders = glob(os.path.join(target_folder, '157*'))\n\n\n# select GBDT predictions in folder\nfolder = folders[-1]\ntarget = os.path.join(folder, target_file)\nwith open(target, 'rb') as f:\n data = pickle.load(f)\n\n# load additional data\nin_path = r'C:\\Users\\Fabian\\stanford\\fed_learning\\rsync\\fl\\rf_data_train_test_crossval.pickle'\nwith open(in_path, 'rb') as f:\n more_data = pickle.load(f)\n\npath_detailled_data = r'C:\\Users\\Fabian\\stanford\\fed_learning\\federated_learning_data\\test_meta_data_complete.pickle'\nwith open(path_detailled_data, 'rb') as f:\n detailled_data = pickle.load(f)\n\ndiagnosis_csv = r'C:\\Users\\Fabian\\stanford\\CDR.csv'\ndiagnosis_data = pd.read_csv(diagnosis_csv, parse_dates=['EXAMDATE', 'USERDATE'])\n\nregistry_csv = r'C:\\Users\\Fabian\\stanford\\REGISTRY.csv'\nregistry_data = pd.read_csv(registry_csv, parse_dates=['EXAMDATE', 'USERDATE'])\n\n# get data as variables\ngbms = data['gbm']\nx_names = data['x_names']\npreds = data['predictions']\nlabs = data['labels']\ny = data['y']\ntrain_test_split = data['train_test_split']\npreds, labs = rearrange_pred_labs(preds, labs, train_test_split)\nsubs = more_data['subs']\nt0_suvr = more_data['t0_suvr']\nage = more_data['age']\nweight = more_data['age']\napoe = more_data['apoe']\nfaq = more_data['faqtotal']\nmmse = more_data['mmsescore']\ndelta_suvr = more_data['delta_suvr']\ndelta_time = more_data['delta_time']\nsuvr = more_data['suvr']\n\nextra_subs = detailled_data['sub_id']\nextra_exam_date = detailled_data['exam_date']\n\n#########################################\n# get diagnosis data\n#########################################\nnot_found_counter = 0\ncreate_diagnosis_data()\n","sub_path":"dl/analyze_results/diagnostis_numbers.py","file_name":"diagnostis_numbers.py","file_ext":"py","file_size_in_byte":12446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"18764852","text":"import os\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom ATSFramework import SeleniumFL\n# https://pypi.org/project/prettytable/\nfrom prettytable import PrettyTable\n\nfrom ATSFramework import EmailFL\nfrom selenium_driver_factory import WebDriverFactory\n\n\ndef washington_org():\n wdf = WebDriverFactory('chrome_headless')\n url = \"https://washington.org/things-to-do-this-weekend-washington-dc\"\n driver = wdf.get_webdriver_instance(url=url)\n try:\n ws = SeleniumFL(driver)\n articles = ws.get_elements(By.XPATH, \"//article/following-sibling::p\")\n x = PrettyTable()\n x.field_names = [\"Article\"]\n # align left\n x.align = \"l\"\n x.format= True\n # add lines between rows\n x.hrules = 1\n for article in articles:\n x.add_row([article.text])\n print(x)\n EmailFL.send_email_gmail([os.getenv('GMAIL_ID')], \"Weekend Events\",f\"{url}
{x.get_html_string()}\")\n\n except Exception as e:\n print(f\"Something went wrong while checking {url} {e} \")\n finally:\n driver.quit()\n\ndef capital_one_center():\n wdf = WebDriverFactory('chrome_headless')\n url = 'https://www.eventbrite.com/o/capital-one-center-26413794281'\n driver = wdf.get_webdriver_instance(url=url)\n try:\n if 'Sorry, there are no upcoming events' in driver.page_source:\n print(\"No Events found at capital one\")\n else:\n print(\"Events found at capital one..sending email\")\n EmailFL.send_email_gmail([os.getenv('GMAIL_ID')], \"Events found at capital one center\",\n f\"Check website https://www.eventbrite.com/o/capital-one-center-26413794281\")\n except:\n print(f\"Something went wrong while checking {url} \")\n EmailFL.send_email_gmail([os.getenv('GMAIL_ID')], \" *** ERROR *** Events found at capital one center\",\n f\" Something went wrong while Checking website https://www.eventbrite.com/o/capital-one-center-26413794281\")\n finally:\n driver.quit()\n\ndef africal_american_museum():\n wdf = WebDriverFactory('chrome_headless')\n url = 'https://event.etix.com/ticket/e/1018702/national-museum-of-african-american-history-and-culture-timedentry-passes-washington-national-museum-of-african-american-history-and-culture-general-public'\n driver = wdf.get_webdriver_instance(url=url)\n try:\n if 'No Passes Available' in driver.page_source:\n print(\"No tickets found at africal_american_museum\")\n else:\n print(\"Tickets found..sending email\")\n EmailFL.send_email_gmail([os.getenv('GMAIL_ID')], \"Tickets availalbe at Museum of African American History and Culture\",\n f\"Check website {url}\")\n except:\n print(f\"Something went wrong while checking {url} \")\n EmailFL.send_email_gmail([os.getenv('GMAIL_ID')], \" *** ERROR *** Museum of African American History and Culture ticket check\",\n f\" Something went wrong while Checking website {url}\")\n finally:\n driver.quit()\n\ncapital_one_center()\nafrical_american_museum()\nwashington_org()\n","sub_path":"selenium_event_checker.py","file_name":"selenium_event_checker.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"415191104","text":"# -*- coding: utf-8 -*-\n#\nfrom krypy.linsys import LinearSystem, Minres as KrypyMinres\nfrom krypy.deflation import DeflatedMinres as KrypyDeflatedMinres\n\nfrom .linear_operator import LinearOperator, wrap_linear_operator, wrap_inner_product\n\n\nclass Minres(object):\n def __init__(self, obj):\n self.MMlr0 = obj.MMlr0[:, 0]\n self.MMlr0_norm = obj.MMlr0_norm\n self.MlAMr = obj.MlAMr\n self.Mlr0 = obj.Mlr0[:, 0]\n self.flat_vecs = obj.flat_vecs\n self.store_arnoldi = obj.store_arnoldi\n self.ortho = obj.ortho\n self.maxiter = obj.maxiter\n self.iter = obj.iter\n self.explicit_residual = obj.explicit_residual\n self.resnorms = obj.resnorms\n self.tol = obj.tol\n self.x0 = obj.x0[:, 0]\n self.xk = obj.xk[:, 0]\n return\n\n def __repr__(self):\n string = \"pykry MINRES object\\n\"\n string += \" MMlr0 = [{}, ..., {}]\\n\".format(self.MMlr0[0], self.MMlr0[-1])\n string += \" MMlr0_norm = {}\\n\".format(self.MMlr0_norm)\n string += \" MlAMr: {} x {} matrix\\n\".format(*self.MlAMr.shape)\n string += \" Mlr0: [{}, ..., {}]\\n\".format(self.Mlr0[0], self.Mlr0[-1])\n string += \" flat_vecs: {}\\n\".format(self.flat_vecs)\n string += \" store_arnoldi: {}\\n\".format(self.store_arnoldi)\n string += \" ortho: {}\\n\".format(self.ortho)\n string += \" tol: {}\\n\".format(self.tol)\n string += \" maxiter: {}\\n\".format(self.maxiter)\n string += \" iter: {}\\n\".format(self.iter)\n string += \" explicit residual: {}\\n\".format(self.explicit_residual)\n string += \" resnorms: [{}, ..., {}]\\n\".format(\n self.resnorms[0], self.resnorms[-1]\n )\n string += \" x0: [{}, ..., {}]\\n\".format(self.x0[0], self.x0[-1])\n string += \" xk: [{}, ..., {}]\".format(self.xk[0], self.xk[-1])\n return string\n\n\ndef minres(\n A,\n b,\n M=None,\n Minv=None,\n Ml=None,\n Mr=None,\n inner_product=None,\n exact_solution=None,\n ortho=\"mgs\",\n x0=None,\n U=None,\n tol=1e-5,\n maxiter=None,\n use_explicit_residual=False,\n store_arnoldi=False,\n):\n assert len(A.shape) == 2\n assert A.shape[0] == A.shape[1]\n assert A.shape[1] == b.shape[0]\n\n if isinstance(A, LinearOperator):\n A = wrap_linear_operator(A)\n\n if isinstance(M, LinearOperator):\n M = wrap_linear_operator(M)\n\n if isinstance(Minv, LinearOperator):\n Minv = wrap_linear_operator(Minv)\n\n if isinstance(Ml, LinearOperator):\n Ml = wrap_linear_operator(Ml)\n\n if isinstance(Mr, LinearOperator):\n Mr = wrap_linear_operator(Mr)\n\n if inner_product:\n inner_product = wrap_inner_product(inner_product)\n\n # Make sure that the input vectors have two dimensions\n if U is not None:\n U = U.reshape(U.shape[0], -1)\n if x0 is not None:\n x0 = x0.reshape(U.shape[0], -1)\n\n linear_system = LinearSystem(\n A=A,\n b=b,\n M=M,\n Minv=Minv,\n Ml=Ml,\n ip_B=inner_product,\n # setting self_adjoin=True avoids a warning\n self_adjoint=True,\n exact_solution=exact_solution,\n )\n if U is None:\n out = KrypyMinres(\n linear_system,\n ortho=ortho,\n x0=x0,\n tol=tol,\n maxiter=maxiter,\n explicit_residual=use_explicit_residual,\n store_arnoldi=store_arnoldi,\n )\n else:\n out = KrypyDeflatedMinres(\n linear_system,\n ortho=ortho,\n x0=x0,\n U=U,\n tol=tol,\n maxiter=maxiter,\n explicit_residual=use_explicit_residual,\n store_arnoldi=store_arnoldi,\n )\n\n sol = Minres(out)\n return sol\n","sub_path":"pykry/minres.py","file_name":"minres.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"136447668","text":"'''\nWrite a function called answer(s) that, given a non-empty\nstring less than 200 characters in length describing the\nsequence of M&Ms, returns the maximum number of equal parts\nthat can be cut from the cake without leaving any leftovers.\n\nInputs:\n (string) s = \"abccbaabccba\"\nOutput:\n (int) 2\n\nInputs:\n (string) s = \"abcabcabcabc\"\nOutput:\n (int) 4\n'''\n\nimport random\n\nEXAMPLE1 = 'abccbaabccba'\nEXAMPLE2 = 'abcabcabcabc'\n\ndef answer(s):\n input_len = len(s)\n answer = 1;\n for num_slices in range(1, 200):\n if input_len % num_slices == 0:\n slice_length = input_len / num_slices\n slices = slice_cake(s, slice_length)\n if slices_equal(slices):\n answer = num_slices\n return answer\n\ndef slices_equal(parts):\n for part in parts:\n if part != parts[0]:\n return False\n return True\n\ndef slice_cake(s, pieces):\n return [''.join(x) for x in zip(*[list(s[z::pieces]) for z in range(pieces)])]\n\ndef generate(total_length, sequence_length):\n num_subs = total_length / sequence_length\n assert num_subs > 0\n sub = ''.join([random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(sequence_length)])\n stuff = ''.join(sub * (num_subs))\n assert len(stuff) > 0 and len(stuff) <= 200 \n return stuff\n \nif __name__ == '__main__':\n assert answer(EXAMPLE1) == 2\n assert answer(EXAMPLE2) == 4\n assert answer(generate(1, 1)) == 1\n assert answer(generate(200, 200)) == 1\n assert answer(generate(12, 6)) == 2\n assert answer(generate(50, 6)) == 8\n assert answer(generate(59, 13)) == 4\n assert answer(generate(52, 13)) == 4\n assert answer(generate(52, 1)) == 52\n\n","sub_path":"foo.bar/level1/challenge1/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"413179116","text":"from functools import reduce\n\nfrom ground.core.arithmetic import robust_divide\nfrom ground.core.enums import Relation\nfrom ground.core.hints import (Expansion,\n Point,\n QuaternaryPointFunction,\n Scalar)\nfrom ground.core.shewchuk import (add_to_expansion,\n scale_expansion,\n sum_expansions,\n two_mul,\n two_one_mul,\n two_sub)\n\n\ndef point_squared_distance(start: Point,\n end: Point,\n point: Point,\n dot_producer: QuaternaryPointFunction[Scalar]\n ) -> Scalar:\n segment_squared_norm = dot_producer(start, end, start, end)\n end_factor_numerator = max(0, min(segment_squared_norm,\n dot_producer(start, point, start, end)))\n end_factor = robust_divide(end_factor_numerator, segment_squared_norm)\n start_factor_tail, start_factor_head = two_sub(1, end_factor)\n return sum_expansions(\n square_expansion(add_to_expansion(sum_expansions(\n two_one_mul(start_factor_tail, start_factor_head, start.x),\n two_mul(end_factor, end.x)), -point.x)),\n square_expansion(add_to_expansion(sum_expansions(\n two_one_mul(start_factor_tail, start_factor_head, start.y),\n two_mul(end_factor, end.y)), -point.y)))[-1]\n\n\ndef square_expansion(expansion: Expansion) -> Expansion:\n return reduce(sum_expansions, [scale_expansion(expansion, component)\n for component in expansion])\n\n\ndef segment_squared_distance(first_start: Point,\n first_end: Point,\n second_start: Point,\n second_end: Point,\n dot_producer: QuaternaryPointFunction[Scalar],\n segments_relater\n : QuaternaryPointFunction[Relation]\n ) -> Scalar:\n return (min(point_squared_distance(first_start, first_end, second_start,\n dot_producer),\n point_squared_distance(first_start, first_end, second_end,\n dot_producer),\n point_squared_distance(second_start, second_end, first_start,\n dot_producer),\n point_squared_distance(second_start, second_end, first_end,\n dot_producer))\n if segments_relater(first_start, first_end, second_start,\n second_end) is Relation.DISJOINT\n else 0)\n","sub_path":"Python/displaying data/learn_venv/Lib/site-packages/ground/core/metric/robust/segment.py","file_name":"segment.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"167500507","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @file\t : Load_Test_Python_Code.Py\r\n# @Date : 2018-06-02 14:14:46\r\n# @Author : Destroyers (https://github.com/lguobin)\r\n# @Link : https://github.com/lguobin\r\n# @Version : $1.0$\r\n\r\nimport requests\r\n\r\nimport urllib.request\r\nfrom concurrent.futures import ProcessPoolExecutor\r\nimport threading\r\nimport os\r\nimport json\r\nimport time\r\n\r\nclass Requests():\r\n\t'''\r\n\t\t# Load_Test_Python_Code\r\n\t\t# Give you 2 ways requests\r\n\t\t# 1. requests package\r\n\t\t# 2. Python urllib.request package\r\n\t'''\r\n\tdef __init__(self):\r\n\t\t'''\r\n\t\t\t# Test info\r\n\t\t'''\r\n\t\tself.url = 'https://www.google.com'\r\n\t\tself.login = 'https://www.google.com/login'\r\n\t\tself.login_body = {\"name\":\"name\",\"password\":\"password\"}\r\n\r\n\tdef get(self):\r\n\t\ttry:\r\n\t\t\t# r = urllib.request.urlopen(self.url)\r\n\t\t\t# r = r.read().decode(\"utf-8\")\r\n\t\t\tr = requests.get(self.url)\r\n\t\t\tr.encoding = r.apparent_encoding\r\n\t\t\t'''\r\n\t\t\t\t# Warning\r\n\t\t\t\t# -->\t{'DATA'} is your response msg or HTTP code\r\n\t\t\t'''\r\n\t\t\tif \"{'DATA'}\" in r.text: \r\n\t\t\t\tprint(r)\r\n\t\t\telse:\r\n\t\t\t\tprint(\"Sorry Not DATA match !!\")\r\n\t\texcept Exception as e:\r\n\t\t\tprint(e)\r\n\r\n\tdef post(self):\r\n\t\ttry:\r\n\t\t\tr = requests.post(self.login , json = self.login_body)\r\n\t\t\tif \"access_token\" in r.text and \"code\" not in r.text:\r\n\t\t\t\tif \"a\" in r.text:\r\n\t\t\t\t\tprint(r)\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint(\"Sorry Not DATA match !!\")\r\n\t\t\telse:\r\n\t\t\t\tprint(\"Sorry Not DATA match !!\")\r\n\t\texcept Exception as e:\r\n\t\t\tprint(e)\r\n\r\ndef start():\r\n\tstart = Requests()\r\n\t# return start.get()\r\n\treturn start.post()\r\n\r\nif __name__ == '__main__':\r\n\ttry:\r\n\t\ti = 0\r\n\t\t# Load_test_start\r\n\t\t# tasks_number is send Requests Quantity\r\n\t\ttasks_number = 10\r\n\t\tprint('start~~')\r\n\t\ttime1 = time.clock()\r\n\t\twhile i < tasks_number:\r\n\t\t\tt = threading.Thread(target=start())\r\n\t\t\tt.start()\r\n\t\t\ti +=1\r\n\t\ttime2 = time.clock()\r\n\t\ttimes = time2 - time1\r\n\t\tprint(f\"{times/tasks_number}\")\r\n\texcept Exception as e:\r\n\t\tprint(e)","sub_path":"Load_Test_Python_Code.py","file_name":"Load_Test_Python_Code.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"179331424","text":"import json\nimport os.path\nimport numpy as np\nimport pandas as pd\nimport tkinter as tk\nfrom tkinter import filedialog\nimport matplotlib.pyplot as plt\nfrom ntpath import split, basename\nfrom matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2Tk)\n\n\ndef string_to_list(list_str, separator=',', delimiter='[]'):\n return list_str.strip(delimiter).split(separator)\n\n\ndef file_name_from_path(path):\n head, tail = split(path)\n filename = tail or basename(head)\n # removing filename extension\n if \".\" in filename:\n i = -1\n while filename[i] != '.':\n i -= 1\n filename = filename[:i]\n return filename\n\n\ndef sort_programs_dict(cluster_dict, sort_by):\n for program in cluster_dict:\n cluster_dict[program] = dict(sorted(\n cluster_dict[program].items(), key=lambda item: item[1][sort_by], reverse=True))\n return cluster_dict\n\n\ndef count_qualis_occurances_for_programs(src):\n df = pd.read_csv(src)\n cluster_dict = {}\n programs_list = []\n for i in df.index:\n qualis = df[\"qualis\"][i]\n cluster = df[\"Cluster\"][i]\n programs_list = string_to_list(\n df[\"programas\"][i], separator='-')\n for program in programs_list:\n if cluster not in cluster_dict:\n cluster_dict[cluster] = {}\n if program not in cluster_dict[cluster]:\n cluster_dict[cluster][program] = {\"A1\": 0,\n \"A2\": 0,\n \"A3\": 0,\n \"A4\": 0,\n \"B1\": 0,\n \"B2\": 0,\n \"B3\": 0,\n \"B4\": 0,\n \"C/NI/NP\": 0,\n \"A1/A2/A3/A4\": 0,\n \"B1/B2/B3/B4\": 0,\n \"total\": 0}\n cluster_dict[cluster][program][qualis] += 1\n cluster_dict[cluster][program][\"total\"] += 1\n if qualis in (\"A1\", \"A2\", \"A3\", \"A4\"):\n cluster_dict[cluster][program][\"A1/A2/A3/A4\"] += 1\n elif qualis in (\"B1\", \"B2\", \"B3\", \"B4\"):\n cluster_dict[cluster][program][\"B1/B2/B3/B4\"] += 1\n return cluster_dict\n\n\ndef dict_to_json(filename, dictionary):\n # converting dictionary into a json string\n json_dict = json.dumps(dictionary)\n with open(filename, \"w\") as file:\n file.write(json_dict)\n\n\ndef dict_from_json(src):\n with open(src) as json_file:\n dictionary = json.load(json_file)\n return dictionary\n\n\ndef make_dict(csv_file, cluster_list=None, sort_by=\"A1/A2/A3/A4\"):\n cluster_dict = sort_programs_dict(count_qualis_occurances_for_programs(csv_file), sort_by)\n selected_clusters = cluster_dict\n # checking if a list of clusters was specified\n if cluster_list is not None:\n selected_clusters = string_to_list(cluster_list, separator='-')\n filename = \"./\" + file_name_from_path(csv_file) + \".json\"\n dict_to_json(filename, {key: value for key, value in cluster_dict.items() if key in selected_clusters})\n\n\ndef make_values_dict_from_keys(dictionary, keys):\n values_dict = {k: [] for k in keys}\n for key in dictionary:\n for value in values_dict:\n values_dict[value].append(dictionary[key][value])\n return values_dict\n\n\ndef label_bar(ax, bars):\n for bar in bars:\n height = bar.get_height()\n ax.annotate('{}'.format(height),\n xy=(bar.get_x() + bar.get_width() / 2, height),\n xytext=(0, 3),\n textcoords=\"offset points\",\n ha='center', va='bottom')\n\n\ndef get_program_name(program):\n programs = {\n '23001011005P7': 'CIÊNCIAS ODONTOLÓGICAS',\n '23001011031P8': 'CIÊNCIAS DA SAÚDE',\n '23001011033P0': 'SAÚDE COLETIVA',\n '23001011001P1': 'EDUCAÇÃO',\n '23001011036P0': 'DESENVOLVIMENTO E MEIO AMBIENTE',\n '23001011018P1': 'GEODINÂMICA E GEOFÍSICA',\n '23001011028P7': 'GEOGRAFIA',\n '23001011051P9': 'ENGENHARIA CIVIL',\n '23001011066P6': 'GESTÃO DE PROCESSOS INSTITUCIONAIS',\n '23001011021P2': 'ENGENHARIA DE PRODUÇÃO',\n '23001011010P0': 'FÍSICA',\n '23001011024P1': 'ARQUITETURA E URBANISMO',\n '23001011052P5': 'ARQUITETURA, PROJETO E MEIO AMBIENTE',\n '23001011059P0': 'SISTEMÁTICA E EVOLUÇÃO',\n '23001011061P4': 'CIÊNCIAS FLORESTAIS',\n '23001011056P0': 'CIÊNCIAS CLIMÁTICAS',\n '33283010001P5': 'ENSINO DE FÍSICA - PROFIS',\n '23001011012P3': 'QUÍMICA',\n '23001011040P7': 'CIÊNCIAS FARMACÊUTICAS',\n '23001011047P1': 'DESENVOLVIMENTO E INOVAÇÃO TECNOLÓGICA EM MEDICAMENTOS',\n '23001011008P6': 'ENGENHARIA ELÉTRICA',\n '23001011026P4': 'CIÊNCIA E ENGENHARIA DE MATERIAIS',\n '23001011174P3': 'INOVAÇÃO EM TECNOLOGIAS EDUCACIONAIS',\n '23001011003P4': 'PSICOBIOLOGIA',\n '23001011054P8': 'NEUROCIÊNCIAS',\n '23001011020P6': 'BIOQUÍMICA',\n '23001011038P2': 'HISTÓRIA',\n '23001011072P6': 'GESTÃO DA QUALIDADE EM SERVIÇOS DE SAÚDE',\n '31010016027P9': 'SAÚDE DA FAMÍLIA',\n '23001011030P1': 'FILOSOFIA',\n '23001011037P6': 'ANTROPOLOGIA SOCIAL',\n '22003010017P5': 'BIOTECNOLOGIA - Rede RENORBIO',\n '23001011011P7': 'ENGENHARIA QUÍMICA',\n '23001011074P9': 'ENGENHARIA MECATRÔNICA',\n '23001011025P8': 'PSICOLOGIA',\n '23001011075P5': 'NUTRIÇÃO',\n '23001011041P3': 'CIÊNCIA E ENGENHARIA DE PETRÓLEO',\n '23001011170P8': 'BIOINFORMÁTICA',\n '23001011058P3': 'DEMOGRAFIA',\n '23001011053P1': 'ESTUDOS DA MÍDIA',\n '23001011022P9': 'SISTEMAS E COMPUTAÇÃO',\n '23001011176P6': 'ENGENHARIA TÊXTIL',\n '23001011034P7': 'DIREITO',\n '23001011046P5': 'TURISMO',\n '23001011029P3': 'ENFERMAGEM',\n '23001011071P0': 'TECNOLOGIA DA INFORMAÇÃO',\n '23001011023P5': 'ENGENHARIA SANITÁRIA E AMBIENTAL',\n '23001011027P0': 'SERVIÇO SOCIAL',\n '23001011060P8': 'EDUCAÇÃO FÍSICA',\n '23001011063P7': 'ENSINO NA SAÚDE',\n '23001011013P0': 'ESTUDOS DA LINGUAGEM',\n '23001011069P5': 'LETRAS',\n '23001011050P2': 'PRODUÇÃO ANIMAL',\n '23001011015P2': 'ECOLOGIA',\n '23001011067P2': 'MÚSICA',\n '23001011070P3': 'BIOLOGIA ESTRUTURAL E FUNCIONAL',\n '23001011172P0': 'EDUCAÇÃO, TRABALHO E INOVAÇÃO EM MEDICINA',\n '23001011055P4': 'ESTUDOS URBANOS E REGIONAIS',\n '23001011007P0': 'ADMINISTRAÇÃO',\n '23001011076P1': 'CIÊNCIAS CONTÁBEIS',\n '23001011009P2': 'ENGENHARIA MECÂNICA',\n '23001011177P2': 'SAÚDE E SOCIEDADE',\n '23001011035P3': 'CIÊNCIAS BIOLÓGICAS',\n '23001011073P2': 'BIOLOGIA PARASITÁRIA',\n '23001011043P6': 'FISIOTERAPIA',\n '23001011171P4': 'SAÚDE COLETIVA',\n '23001011032P4': 'ENSINO DE CIÊNCIAS NATURAIS E MATEMÁTICA',\n '23001011077P8': 'ENSINO DE CIÊNCIAS E MATEMÁTICA',\n '24001015081P8': 'FONOAUDIOLOGIA',\n '31001017169P2': 'QUÍMICA EM REDE NACIONAL',\n '23001011062P0': 'DESIGN',\n '23001011057P7': 'GESTÃO PÚBLICA',\n '23001011004P0': 'CIÊNCIAS SOCIAIS',\n '23001011173P7': 'CIÊNCIAS DA REABILITAÇÃO',\n '23001011042P0': 'MATEMÁTICA APLICADA E ESTATISTICA',\n '52001016048P0': 'NANOTECNOLOGIA FARMACÊUTICA',\n '23001011044P2': 'ARTES CÊNICAS',\n '41002016026P1': 'PROFARTES',\n '23001011078P4': 'GEOGRAFIA',\n '33004137068P8': 'EDUCAÇÃO FÍSICA',\n '22001018074P6': 'DESENVOLVIMENTO E MEIO AMBIENTE',\n '23001011080P9': 'GESTÃO DA INFORMAÇÃO E DO CONHECIMENTO',\n '23001011175P0': 'GESTÃO E INOVAÇÃO EM SAÚDE',\n '23001011079P0': 'CIÊNCIA, TECNOLOGIA E INOVAÇÃO',\n '23001011039P9': 'ECONOMIA',\n '23001011068P9': 'ENERGIA ELÉTRICA',\n '31001017155P1': 'ENSINO DE HISTÓRIA',\n '24001015046P8': 'FILOSOFIA',\n '31075010001P2': 'MATEMÁTICA EM REDE NACIONAL',\n '53001010073P0': 'CONTABILIDADE'\n }\n return programs[program]\n\n\ndef plot(subdict, program_code_list, count_dict, title, program_name, image_name, template=\"program\", comparing=False,\n auto_label=False, cluster=False, in_this_frame=None):\n # defining some specific attributes based on what template is being used for the graphs\n if template == \"program\":\n bar_width = 0.08\n colors = [\"#54c73a\", \"#abd216\", \"#ccce0f\", \"#f0f200\",\n \"#ffce00\", \"#ff9a00\", \"#ff6700\", \"#ff3300\", \"#ff0000\"]\n tick_translate = 4\n axvline_translate = 10 * bar_width + bar_width / 3\n elif template == \"compare\":\n bar_width = 0.15\n colors = [\"#d6af36\", \"#a7a7ad\", \"#a77044\"]\n tick_translate = 1\n axvline_translate = 4 * bar_width + bar_width / 3\n fig, ax = plt.subplots()\n x = np.arange(len(program_code_list))\n multiplier = 0\n for key in count_dict:\n bar = ax.bar(x + bar_width * multiplier, count_dict[key], width=bar_width, label=key,\n color=colors[multiplier])\n if auto_label:\n # labeling each bar\n label_bar(ax, bar)\n multiplier += 1\n # plotting vertical lines between each program or group\n j = 0\n for i in range(len(program_code_list) - 1):\n if i > 0:\n j = bar_width + bar_width / 3\n plt.axvline(x=axvline_translate + i * (axvline_translate + bar_width + j), color=\"#DDDDDD\")\n if program_name:\n program_code_list = [get_program_name(program) for program in program_code_list]\n # setting up x labels and legend\n ax.set_xticks(x + tick_translate * bar_width)\n ax.set_xticklabels(program_code_list)\n ax.legend(bbox_to_anchor=(1.04, 0), loc=\"lower left\")\n # setting up plot design\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.spines['bottom'].set_color('#DDDDDD')\n ax.tick_params(left=False, bottom=False)\n ax.set_axisbelow(True)\n ax.yaxis.grid(True, color='#DDDDDD')\n ax.set_title(title)\n ax.set_ylabel(\"Quantidade\")\n if cluster:\n ax.set_xlabel(\"Grupos\")\n else:\n ax.set_xlabel(\"Programas\")\n # rotating x labels if number of programs/groups is greater than 3\n # this way labels won't overlap each other\n if len(subdict) > 3:\n fig.autofmt_xdate()\n fig.tight_layout()\n if comparing:\n plt.gca().get_xticklabels()[-1].set_color('red')\n if in_this_frame is None:\n fig.savefig(image_name)\n else:\n canvas = FigureCanvasTkAgg(fig, master=in_this_frame)\n canvas.draw()\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n toolbar = NavigationToolbar2Tk(canvas, in_this_frame)\n toolbar.update()\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\n\ndef plot_program(dict_src, program_code_list_str, cluster, image_name=None, comparing=False, program_name=False,\n where_to_store=\"./\", in_this_frame=None):\n program_code_list = string_to_list(program_code_list_str, separator=\"-\")\n subdict = {key: value for key, value in dict_from_json(\n dict_src)[cluster].items() if key in program_code_list}\n count_dict = make_values_dict_from_keys(subdict, [\"A1\", \"A2\", \"A3\", \"A4\", \"B1\", \"B2\", \"B3\", \"B4\", \"C/NI/NP\"])\n if image_name is None:\n image_name = where_to_store + file_name_from_path(\n dict_src) + \"_\" + program_code_list_str + \".png\"\n plot(subdict, program_code_list, count_dict, \"Produções por programa\", program_name, image_name=image_name,\n comparing=comparing, in_this_frame=in_this_frame)\n\n\ndef plot_n_best(dict_src, n_str, cluster, compare_to=None, image_name=None, program_name=False, in_this_frame=None):\n n = int(n_str)\n program_code_list_str = \"[\"\n count = 0\n for program in dict_from_json(dict_src)[cluster]:\n if count == n:\n break\n if count != n - 1:\n program_code_list_str += program + '-'\n else:\n program_code_list_str += program\n if compare_to is not None:\n comparing = True\n program_code_list_str += '-' + compare_to + ']'\n else:\n comparing = False\n program_code_list_str += ']'\n count += 1\n plot_program(dict_src, program_code_list_str, cluster, program_name=program_name, image_name=image_name,\n comparing=comparing, in_this_frame=in_this_frame)\n\n\ndef plot_compare(dict_src, program_code_list_str, cluster, image_name=None, program_name=False, where_to_store=\"./\",\n in_this_frame=None):\n program_code_list = string_to_list(program_code_list_str, separator=\"-\")\n subdict = {key: value for key, value in dict_from_json(\n dict_src)[cluster].items() if key in program_code_list}\n count_dict = make_values_dict_from_keys(subdict, [\"A1/A2/A3/A4\", \"B1/B2/B3/B4\", \"C/NI/NP\"])\n if image_name is None:\n image_name = where_to_store + file_name_from_path(\n dict_src) + \"_\" + program_code_list_str + \"_compare.png\"\n plot(subdict, program_code_list, count_dict, \"Comparação entre programas\", program_name, image_name=image_name,\n template=\"compare\", in_this_frame=in_this_frame)\n\n\ndef plot_program_cluster(dict_src, program, cluster_list_str=None, image_name=None, where_to_store=\"./\",\n in_this_frame=None):\n dictionary = dict_from_json(dict_src)\n if cluster_list_str is not None:\n selected_clusters = string_to_list(cluster_list_str, separator='-')\n else:\n selected_clusters = dictionary\n subdict = {key: dictionary[key][program] for key in selected_clusters if program in dictionary[key]}\n count_dict = make_values_dict_from_keys(subdict, [\"A1\", \"A2\", \"A3\", \"A4\", \"B1\", \"B2\", \"B3\", \"B4\", \"C/NI/NP\"])\n if image_name is None:\n image_name = where_to_store + file_name_from_path(\n dict_src) + \"_\" + program + '_' + \".png\"\n plot(subdict, subdict.keys(), count_dict, \"Publicações por grupo\", False, image_name=image_name, cluster=True,\n in_this_frame=in_this_frame)\n\n\ndef plot_cluster_compare(dict_src, program, cluster_list_str=None, image_name=None, where_to_store=\"./\",\n in_this_frame=None):\n dictionary = dict_from_json(dict_src)\n if cluster_list_str is not None:\n selected_clusters = string_to_list(cluster_list_str, separator='-')\n else:\n selected_clusters = dictionary\n subdict = {key: dictionary[key][program] for key in selected_clusters if program in dictionary[key]}\n count_dict = make_values_dict_from_keys(subdict, [\"A1/A2/A3/A4\", \"B1/B2/B3/B4\", \"C/NI/NP\"])\n if image_name is None:\n image_name = where_to_store + file_name_from_path(\n dict_src) + \"_\" + program + '_' + \"compare.png\"\n plot(subdict, subdict.keys(), count_dict, \"Comparação entre grupos\", False, image_name=image_name,\n template=\"compare\", cluster=True, in_this_frame=in_this_frame)\n\n\nclass DataInputCell(tk.Frame):\n def __init__(self, master=None, name=None):\n super().__init__(master)\n self.master = master\n self.name = name\n self.label_name = tk.Label(self, text=self.name, width=20)\n self.label_name.pack(side=tk.LEFT)\n self.entry_name = tk.Entry(self, text=self.name, width=40)\n self.entry_name.pack(side=tk.RIGHT)\n self.pack()\n\n\nclass DataInput(tk.Frame):\n def __init__(self, master=None, names=None):\n super().__init__(master)\n self.master = master\n self.names = names\n self.data = {}\n for self.name in self.names:\n self.data[self.name] = DataInputCell(self, self.name)\n self.pack()\n\n\nclass OneProgram(tk.Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self.master = master\n self.pack(expand=1, fill=tk.BOTH)\n self.control_area = tk.Frame(self)\n self.control_area.pack(side=tk.LEFT)\n self.plotting_area = tk.Frame(self)\n self.plotting_area.pack(side=tk.RIGHT)\n self.dict_src = \"./Particoes.json\"\n self.control()\n\n def plot(self):\n self.plotting_area.destroy()\n self.plotting_area = tk.Frame(self)\n self.plotting_area.pack(side=tk.RIGHT)\n self.program = self.data_input.data[\"Program\"].entry_name.get()\n self.cluster_list_str = self.data_input.data[\"Cluster List\"].entry_name.get()\n plot_cluster_compare(dict_src=self.dict_src, program=self.program, cluster_list_str=self.cluster_list_str,\n in_this_frame=self.plotting_area)\n\n def control(self):\n self.names = [\"Program\", \"Cluster List\"]\n self.data_input = DataInput(self.control_area, self.names)\n if self.data_input.data[\"Program\"].entry_name.get() == \"\":\n self.data_input.data[\"Program\"].entry_name.insert(tk.END, '23001011010P0')\n if self.data_input.data[\"Cluster List\"].entry_name.get() == \"\":\n self.data_input.data[\"Cluster List\"].entry_name.insert(tk.END, '[cluster1-cluster2]')\n self.plotting_button = tk.Button(self.control_area, text=\"Plot\", command=self.plot)\n self.plotting_button.pack(side=tk.RIGHT)\n\n\nclass ProgramCompare(tk.Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self.master = master\n self.pack(expand=1, fill=tk.BOTH)\n self.control_area = tk.Frame(self)\n self.control_area.pack(side=tk.LEFT)\n self.plotting_area = tk.Frame(self)\n self.plotting_area.pack(side=tk.RIGHT)\n self.dict_src = \"./Particoes.json\"\n self.control()\n\n def plot(self):\n self.plotting_area.destroy()\n self.plotting_area = tk.Frame(self)\n self.plotting_area.pack(side=tk.RIGHT)\n self.program_code_list_str = self.data_input.data[\"Program List\"].entry_name.get()\n self.cluster = self.data_input.data[\"Cluster \"].entry_name.get()\n plot_compare(dict_src=self.dict_src, program_code_list_str=self.program_code_list_str, cluster=self.cluster,\n program_name=self.program_name, in_this_frame=self.plotting_area)\n\n def control(self):\n self.names = [\"Program List\", \"Cluster \"]\n self.data_input = DataInput(self.control_area, self.names)\n if self.data_input.data[\"Program List\"].entry_name.get() == \"\":\n self.data_input.data[\"Program List\"].entry_name.insert(tk.END, '[23001011010P0-23001011031P8-23001011020P6]')\n if self.data_input.data[\"Cluster \"].entry_name.get() == \"\":\n self.data_input.data[\"Cluster \"].entry_name.insert(tk.END, 'cluster1')\n self.program_name = True\n self.plotting_button = tk.Button(self.control_area, text=\"Plot\", command=self.plot)\n self.plotting_button.pack(side=tk.RIGHT)\n\n\nclass NBest(tk.Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self.master = master\n self.pack(expand=1, fill=tk.BOTH)\n self.control_area = tk.Frame(self)\n self.control_area.pack(side=tk.LEFT)\n self.plotting_area = tk.Frame(self)\n self.plotting_area.pack(side=tk.RIGHT)\n self.dict_src = \"./Particoes.json\"\n self.control()\n\n def plot(self):\n self.plotting_area.destroy()\n self.plotting_area = tk.Frame(self)\n self.plotting_area.pack(side=tk.RIGHT)\n self.n = int(self.data_input.data[\"Number of Programs\"].entry_name.get())\n self.cluster = self.data_input.data[\"Cluster\"].entry_name.get()\n self.compare_to = self.data_input.data[\"Compare to\"].entry_name.get()\n plot_n_best(dict_src=self.dict_src, n_str=self.n, cluster=self.cluster, compare_to=self.compare_to,\n program_name=self.program_name, in_this_frame=self.plotting_area)\n\n def control(self):\n self.names = [\"Number of Programs\", \"Cluster\", \"Compare to\"]\n self.data_input = DataInput(self.control_area, self.names)\n if self.data_input.data[\"Number of Programs\"].entry_name.get() == \"\":\n self.data_input.data[\"Number of Programs\"].entry_name.insert(tk.END, '2')\n if self.data_input.data[\"Cluster\"].entry_name.get() == \"\":\n self.data_input.data[\"Cluster\"].entry_name.insert(tk.END, 'cluster1')\n if self.data_input.data[\"Compare to\"].entry_name.get() == \"\":\n self.data_input.data[\"Compare to\"].entry_name.insert(tk.END, '23001011030P1')\n self.program_name = True\n self.plotting_button = tk.Button(self.control_area, text=\"Plot\", command=self.plot)\n self.plotting_button.pack(side=tk.RIGHT)\n\n\nclass ProgramPerCluster(tk.Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self.master = master\n self.pack(expand=1, fill=tk.BOTH)\n self.control_area = tk.Frame(self)\n self.control_area.pack(side=tk.LEFT)\n self.plotting_area = tk.Frame(self)\n self.plotting_area.pack(side=tk.RIGHT)\n self.dict_src = \"./Particoes.json\"\n self.control()\n\n def plot(self):\n self.plotting_area.destroy()\n self.plotting_area = tk.Frame(self)\n self.plotting_area.pack(side=tk.RIGHT)\n self.program = self.data_input.data[\"Program \"].entry_name.get()\n self.cluster_list_str = self.data_input.data[\"Cluster List \"].entry_name.get()\n plot_program_cluster(dict_src=self.dict_src, program=self.program, cluster_list_str=self.cluster_list_str,\n in_this_frame=self.plotting_area)\n\n def control(self):\n self.names = [\"Program \", \"Cluster List \"]\n self.data_input = DataInput(self.control_area, self.names)\n if self.data_input.data[\"Program \"].entry_name.get() == \"\":\n self.data_input.data[\"Program \"].entry_name.insert(tk.END, '23001011010P0')\n if self.data_input.data[\"Cluster List \"].entry_name.get() == \"\":\n self.data_input.data[\"Cluster List \"].entry_name.insert(tk.END, '[cluster1-cluster2]')\n self.plotting_button = tk.Button(self.control_area, text=\"Plot\", command=self.plot)\n self.plotting_button.pack(side=tk.RIGHT)\n\n\nclass ApplicationStatusBar(tk.Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self.master = master\n self.bar = tk.Label(self, text=\"Welcome to Scylax!\", bd=1, relief=tk.SUNKEN, anchor=tk.W)\n self.bar.pack(expand=1, fill=tk.X)\n self.pack(side=tk.BOTTOM, fill=tk.X)\n\n\nclass ApplicationBody(tk.Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self.master = master\n self.present_screen = None\n self.pack(expand=1, fill=tk.BOTH)\n\n def one_program(self):\n self.present_screen is not None and self.present_screen.destroy()\n self.present_screen = OneProgram(self)\n\n def program_compare(self):\n self.present_screen is not None and self.present_screen.destroy()\n self.present_screen = ProgramCompare(self)\n\n def n_best(self):\n self.present_screen is not None and self.present_screen.destroy()\n self.present_screen = NBest(self)\n\n def program_per_cluster(self):\n self.present_screen is not None and self.present_screen.destroy()\n self.present_screen = ProgramPerCluster(self)\n\n\nclass ApplicationTopMenu:\n def __init__(self, master=None):\n self.master = master\n self.master.master.menu_bar = tk.Menu(self.master.master)\n self.master.master.file_menu = tk.Menu(self.master.master.menu_bar, tearoff=0)\n self.master.master.file_menu.add_command(label=\"Load\", command=self.load_data)\n self.master.master.menu_bar.add_cascade(label=\"Data\", menu=self.master.master.file_menu)\n self.master.master.edit_menu = tk.Menu(self.master.master.menu_bar, tearoff=0)\n self.master.master.edit_menu.add_command(label=\"One Program\", command=self.master.application_body.one_program)\n self.master.master.edit_menu.add_command(label=\"Program Compare\",\n command=self.master.application_body.program_compare)\n self.master.master.edit_menu.add_command(label=\"N Best\", command=self.master.application_body.n_best)\n self.master.master.edit_menu.add_command(label=\"Program per cluster\",\n command=self.master.application_body.program_per_cluster)\n self.master.master.menu_bar.add_cascade(label=\"Plot\", menu=self.master.master.edit_menu)\n self.master.master.config(menu=self.master.master.menu_bar)\n\n def load_data(self):\n self.master.application_status_bar.bar[\"text\"] = \"Loading...\"\n try:\n if os.path.exists(\"./Particoes.json\"):\n if tk.messagebox.askquestion(\"This data has been imported before\",\n \"Would you like to import anyway?\") == \"no\":\n self.master.application_status_bar.bar[\"text\"] = \"Finished!\"\n return\n file_name = filedialog.askopenfilename(initialdir=\".\", title=\"Select file to load data\",\n filetypes=((\"csv files\", \"*.csv\"), (\"all files\", \"*.*\")))\n make_dict(file_name)\n self.master.application_status_bar.bar[\"text\"] = \"Finished!\"\n except:\n self.master.application_status_bar.bar[\"text\"] = \"Fail!\"\n\n\nclass Application(tk.Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self.master = master\n self.master.title(\"Scylax\")\n w = int(self.master.winfo_screenwidth())\n h = int(self.master.winfo_screenheight())\n self.master.geometry(\"{}x{}\".format(w, h))\n self.application_status_bar = ApplicationStatusBar(self)\n self.application_body = ApplicationBody(self)\n self.application_top_menu = ApplicationTopMenu(self)\n self.pack(expand=1, fill=tk.BOTH)\n\n\nroot = tk.Tk()\napp = Application(master=root)\nroot.mainloop()\n","sub_path":"summarizer/src/summarizer.py","file_name":"summarizer.py","file_ext":"py","file_size_in_byte":26444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"73699827","text":"import responses\nimport quantaq\nimport os\nimport sys\nimport pandas as pd\nimport pytest\n\nfrom quantaq.exceptions import QuantAQAPIException\n\n@responses.activate\ndef test_models_get():\n responses.add(responses.GET, \"https://localhost/device-api/v1/calibration-models/SN000-000\", \n status=200, \n json={\n \"co\": {\n \"calibration\": None,\n \"id\": 1,\n \"model\": None,\n \"param\": \"co\"\n }, \n }\n )\n \n client = quantaq.client.APIClient(\n \"https://localhost/device-api/\", \n api_key=\"a123\", version=\"v1\")\n\n # test the GET verb\n resp = client.models.get(sn=\"SN000-000\")\n\n assert type(resp) == dict\n\n@responses.activate\ndef test_models_add():\n responses.add(responses.POST, \"https://localhost/device-api/v1/calibration-models/\", \n status=201, \n json={}\n )\n \n client = quantaq.client.APIClient(\n \"https://localhost/device-api/\", \n api_key=\"a123\", version=\"v1\")\n\n # test the GET verb\n resp = client.models.add(\n sn=\"SN000-000\", \n param='co',\n name='test-co-model',\n object_name='file1.sav',\n training_file='obj1/training.csv',\n error=dict(r2=0.87, rmse=.97, mae=.5)\n )\n\n assert type(resp) == dict\n assert responses.calls[0].response.status_code == 201\n","sub_path":"tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"475851045","text":"import numpy as np\n\n\nclass RandomEpisodeMarket:\n\n def __init__(self, sampler, window_state, open_cost, direction, risk_averse, additional_features):\n self.sampler = sampler\n self.window_state = window_state\n self.additional_features = additional_features\n self.open_cost = open_cost\n self.direction = direction\n self.risk_averse = risk_averse\n\n self.action_labels = ['empty', 'open', 'keep']\n self.n_action = len(self.action_labels)\n self.state_shape = (window_state + additional_features, self.sampler.n_var)\n self.t0 = window_state - 1\n self.empty = False\n self.t = None\n self._max_profit = 0\n self.price_df = None\n self.min_max_df = None\n self.title = None\n self.t_max = None\n\n @staticmethod\n def find_ideal(p, just_once):\n if not just_once:\n diff = p[1:] - p[:-1]\n return sum(np.minimum(np.zeros(diff.shape), diff))\n else:\n best = 0\n max_price = p[0]\n min_price = p[0]\n for price in p:\n if price < min_price:\n min_price = price\n if price > max_price:\n max_price = price\n min_price = price\n delta = max_price - min_price\n if delta > best:\n best = delta\n return best\n\n def get_ideal(self):\n return self.find_ideal(self.price_df.price.values[self.t:], just_once=True)\n\n def get_state(self):\n state = self.price_df.price.values[self.t - self.window_state:self.t]\n state_min = min(state)\n state = state - state_min\n t_timestamp = self.price_df.timestamp.values[self.t]\n min_max_state = self.min_max_df[\n self.min_max_df.timestamp <= (t_timestamp - np.timedelta64(8, 'm'))\n ].price.values[-self.additional_features:]\n if len(min_max_state) != self.additional_features:\n raise RuntimeError(\n 'len(min_max_state) {} != {}. start_time_key = {}'.format(\n len(min_max_state), self.additional_features, self.sampler.start_time_key\n )\n )\n min_max_state = min_max_state - state_min\n return np.concatenate([min_max_state, state])\n\n def get_valid_actions(self):\n if self.empty:\n return [0, 1] # wait, open\n else:\n return [0, 2] # close, keep\n\n def get_noncash_reward(self, empty=None):\n t = self.t\n if empty is None:\n empty = self.empty\n reward = self.direction * (self.price_df.price.values[t + 1] - self.price_df.price.values[t])\n if empty:\n reward -= self.open_cost\n if reward < 0:\n reward *= (1. + self.risk_averse)\n return reward\n\n def step(self, action):\n if action == 0: # cash (wait/close)\n reward = 0.\n self.empty = True\n elif action == 1: # open\n reward = self.get_noncash_reward()\n self.empty = False\n elif action == 2: # keep\n reward = self.get_noncash_reward()\n else:\n raise ValueError('no such action: ' + str(action))\n self.t += 1\n return self.get_state(), reward, self.t == self.t_max, self.get_valid_actions()\n\n def reset(self, rand_price=True, start_time_key=None):\n self.empty = True\n self.t = self.t0 + 1\n if rand_price:\n self.price_df, self.min_max_df, start_time, self.title = self.sampler.sample(\n start_time_key=start_time_key)\n self.t_max = len(self.price_df) - 1\n self._max_profit = self.find_ideal(self.price_df.price.values[self.t:], just_once=True)\n return self.get_state(), self.get_valid_actions()\n","sub_path":"src/emulator.py","file_name":"emulator.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"74402101","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jul 2 11:02:44 2018\r\n题目十二:使用re爬取天气信息\r\n1.天气描述,天气温度,天气气压\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\nimport urllib.request as r#导入联网工具包,命令为r\r\nurl='http://api.openweathermap.org/data/2.5/forecast?q=guilin,cn&mode=json&lang=zh_cn&&APPID=6a67ed641c0fda8b69715c43518b6996&units=metric'\r\ndata=r.urlopen(url).read().decode('utf-8')\r\n#print(data)\r\nimport re\r\nls1=re.compile('\"description\":\"(.*?)\"').findall(data)##天气描述\r\nls2=re.compile('\"temp\":(.*?)\"').findall(data)##天气温度\r\nls3=re.compile('\"pressure\":(.*?)\"').findall(data)##天气气压\r\nprint(\"桂林未来5天的天气描述是:{}\".format(ls1))\r\nprint(\"桂林未来5天的天气温度是:{}\".format(ls2))\r\nprint(\"桂林未来5天的天气气压是:{}\".format(ls3))","sub_path":"使用正则爬取天气.py","file_name":"使用正则爬取天气.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"19321411","text":"from django import template\nfrom django.template import Context\nfrom django.core.paginator import Paginator, InvalidPage\n\nregister = template.Library()\n\nfrom paginator.paginator import Paginator\n\nclass PaginatorNode(template.Node):\n def __init__(self, object_list, per_page, max_page_nav, max_jumper, template):\n self.paginator = Paginator(object_list,\n per_page=per_page,\n max_page_nav=max_page_nav,\n max_jumper=max_jumper)\n self.template = template\n\n def render(self, context):\n t = template.loader.get_template(self.template)\n return t.render(\n Context({'paginator': paginator},\n autoescape=context.autoescape)\n )\n\n\ndef do_render_page_nav(parser, token):\n try:\n # split_contents() knows not to split quoted strings.\n tag_name, date_to_be_formatted, format_string = token.split_contents()\n except ValueError:\n raise template.TemplateSyntaxError(\"%r tag requires exactly two arguments\" % token.contents.split()[0])\n\n if not (format_string[0] == format_string[-1] and format_string[0] in ('\"', \"'\")):\n raise template.TemplateSyntaxError(\"%r tag's argument should be in quotes\" % tag_name)\n return RenderPageNavNode(date_to_be_formatted, format_string[1:-1])\n\nregister.tag('render_page_nav', do_render_page_nav)\n\n\n\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\n@register.filter()\ndef render_nav(paginator):\n t = template.loader.get_template('paginator/paginator.html')\n return mark_safe(t.render(\n Context({'paginator': paginator},\n )))\n\n","sub_path":"paginator/templatetags/paginator_tags.py","file_name":"paginator_tags.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"385162251","text":"# coding: utf-8\nfrom sqlalchemy import Boolean, CheckConstraint, Column, Date, DateTime, ForeignKey, Integer, LargeBinary, String, Text, UniqueConstraint\nfrom sqlalchemy.schema import FetchedValue\nfrom sqlalchemy.orm import relationship\nfrom flask_sqlalchemy import SQLAlchemy\n\n\nfrom app import db\n\nfrom flask import url_for\n\n\nclass PaginatedAPIMixin(object):\n @staticmethod\n def to_collection_dict(query, page, per_page, endpoint, **kwargs):\n resources = query.paginate(page, per_page, False)\n data = {\n 'items': [\n dict(item) for item in resources.items],\n '_meta': {\n 'page': page,\n 'per_page': per_page,\n 'total_pages': resources.pages,\n 'total_items': resources.total},\n '_links': {\n 'self': url_for(\n endpoint,\n _external=True,\n page=page,\n per_page=per_page,\n **kwargs),\n 'next': url_for(\n endpoint,\n _external=True,\n page=page + 1,\n per_page=per_page,\n **kwargs) if resources.has_next else None,\n 'prev': url_for(\n endpoint,\n _external=True,\n page=page - 1,\n per_page=per_page,\n **kwargs) if resources.has_prev else None}}\n return data\n\n\nclass Agenda(db.Model):\n __tablename__ = 'agenda'\n __table_args__ = {'schema': 'public'}\n\n agenda_id = db.Column(\n db.Integer,\n primary_key=True,\n server_default=db.FetchedValue())\n title = db.Column(db.String(128), nullable=False)\n sitting_id = db.Column(db.ForeignKey('public.sitting.sitting_id'))\n body = db.Column(db.Text)\n\n sitting = db.relationship(\n 'Sitting',\n primaryjoin='Agenda.sitting_id == Sitting.sitting_id',\n backref='agendas')\n\n\nclass Alfie(db.Model):\n __tablename__ = 'alfie'\n __table_args__ = {'schema': 'public'}\n\n alfie_id = db.Column(\n db.Integer,\n primary_key=True,\n server_default=db.FetchedValue())\n head_id = db.Column(db.ForeignKey('public.doc.doc_id'), index=True)\n alfie_name = db.Column(db.Text, nullable=False)\n alfie_key = db.Column(db.Text, nullable=False)\n alfie_date = db.Column(db.Date, nullable=False)\n type = db.Column(db.String(7), nullable=False)\n sitting_id = db.Column(\n db.ForeignKey(\n 'public.sitting.sitting_id',\n match='FULL'))\n\n head = db.relationship(\n 'Doc',\n primaryjoin='Alfie.head_id == Doc.doc_id',\n backref='doc_alfies')\n sitting = db.relationship(\n 'Sitting',\n primaryjoin='Alfie.sitting_id == Sitting.sitting_id',\n backref='sitting_alfies')\n\n\nclass Country(db.Model):\n __tablename__ = 'country'\n __table_args__ = {'schema': 'public'}\n\n country_id = db.Column(db.String(2), primary_key=True)\n iso_name = db.Column(db.String(80), nullable=False)\n country_name = db.Column(db.String(80), nullable=False)\n iso3 = db.Column(db.String(3))\n numcode = db.Column(db.Integer)\n language = db.Column(db.String(5), nullable=False)\n\n\nclass Doc(db.Model):\n __tablename__ = 'doc'\n __table_args__ = {'schema': 'public'}\n\n doc_id = db.Column(db.Integer, primary_key=True)\n parliament_id = db.Column(db.ForeignKey('public.parliament.parliament_id'))\n owner_id = db.Column(db.ForeignKey('public.user.user_id'), nullable=False)\n type = db.Column(db.String(128), nullable=False)\n doc_type = db.Column(db.String(128))\n doc_procedure = db.Column(db.String(128))\n type_number = db.Column(db.Integer)\n registry_number = db.Column(db.String(128))\n uri = db.Column(db.String(1024))\n acronym = db.Column(db.String(48))\n title = db.Column(db.String(1024), nullable=False)\n description = db.Column(db.Text)\n language = db.Column(db.String(5), nullable=False)\n body = db.Column(db.Text)\n original_text = db.Column(db.Text)\n status = db.Column(db.String(48), index=True)\n status_date = db.Column(\n db.DateTime,\n nullable=False,\n server_default=db.FetchedValue())\n group_id = db.Column(db.ForeignKey('public.group.group_id'))\n subject = db.Column(db.Text)\n coverage = db.Column(db.Text)\n geolocation = db.Column(db.Text)\n head_id = db.Column(db.ForeignKey('public.doc.doc_id'))\n timestamp = db.Column(\n db.DateTime,\n nullable=False,\n server_default=db.FetchedValue())\n assignee_id = db.Column(db.Integer)\n sitting_id = db.Column(\n db.ForeignKey('public.sitting.sitting_id'),\n index=True)\n\n group = db.relationship(\n 'Group',\n primaryjoin='Doc.group_id == Group.group_id',\n backref='group_docs')\n head = db.relationship(\n 'Doc',\n remote_side=[doc_id],\n primaryjoin='Doc.head_id == Doc.doc_id',\n backref='docs')\n owner = db.relationship(\n 'User',\n primaryjoin='Doc.owner_id == User.user_id',\n backref='user_docs')\n parliament = db.relationship(\n 'Parliament',\n primaryjoin='Doc.parliament_id == Parliament.parliament_id',\n backref='parliament_docs')\n sitting = db.relationship(\n 'Sitting',\n primaryjoin='Doc.sitting_id == Sitting.sitting_id',\n backref='sitting_docs')\n\n\nclass Principal(db.Model):\n __tablename__ = 'principal'\n __table_args__ = {'schema': 'public'}\n\n principal_id = db.Column(db.Integer, primary_key=True)\n type = db.Column(db.String(30), nullable=False)\n\n\nclass Group(PaginatedAPIMixin, Principal):\n __tablename__ = 'group'\n __table_args__ = {'schema': 'public'}\n\n group_id = db.Column(\n db.ForeignKey('public.principal.principal_id'),\n primary_key=True)\n short_name = db.Column(db.String(512), nullable=False)\n full_name = db.Column(db.String(1024))\n acronym = db.Column(db.String(32))\n principal_name = db.Column(db.String(32), nullable=False, unique=True)\n description = db.Column(db.Text)\n status = db.Column(db.String(32))\n status_date = db.Column(\n db.DateTime,\n nullable=False,\n server_default=db.FetchedValue())\n start_date = db.Column(db.Date, nullable=False)\n end_date = db.Column(db.Date)\n sub_type = db.Column(db.String(128))\n parent_group_id = db.Column(db.ForeignKey('public.group.group_id'))\n language = db.Column(db.String(5), nullable=False)\n group_role = db.Column(db.String(256), nullable=False)\n cluster = db.Column(db.Text)\n prefix = db.Column(db.Text)\n custom3 = db.Column(db.Text)\n custom4 = db.Column(db.Text)\n\n parent_group = db.relationship(\n 'Group',\n remote_side=[group_id],\n primaryjoin='Group.parent_group_id == Group.group_id',\n backref='groups')\n\n def to_dict(self):\n data = {\n 'short_name': self.short_name,\n 'full_name': self.full_name,\n 'principal_name': self.principal_name,\n 'status': self.status,\n 'status_date': self.status_date,\n 'start_date': self.start_date,\n 'end_date': self.end_date,\n 'parent_group_id': self.parent_group_id,\n 'language': self.language,\n 'group_role': self.group_role,\n '_links': {\n 'self': url_for('api.get_group', group_id=self.group_id, _external=True),\n # 'groups': url_for('api.get_group.parent_group', group_id=self.group_id, _external=True)\n }\n }\n return data\n\n\nclass Parliament(Group):\n __tablename__ = 'parliament'\n __table_args__ = {'schema': 'public'}\n\n parliament_id = db.Column(\n db.ForeignKey('public.group.group_id'),\n primary_key=True)\n parliament_type = db.Column(db.String(30))\n election_date = db.Column(db.Date, nullable=False)\n\n\nclass User(PaginatedAPIMixin, Principal):\n __tablename__ = 'user'\n __table_args__ = (\n db.CheckConstraint(\"(active_p)::text = ANY (ARRAY[('A'::character varying)::text, ('I'::character varying)::text, ('D'::character varying)::text])\"),\n db.CheckConstraint(\"(gender)::text = ANY (ARRAY[('M'::character varying)::text, ('F'::character varying)::text])\"),\n {'schema': 'public'}\n )\n\n user_id = db.Column(\n db.ForeignKey('public.principal.principal_id'),\n primary_key=True)\n login = db.Column(db.String(80), nullable=False, unique=True)\n salutation = db.Column(db.String(128))\n title = db.Column(db.String(128))\n first_name = db.Column(db.String(256), nullable=False)\n last_name = db.Column(db.String(256), nullable=False)\n middle_name = db.Column(db.String(256))\n email = db.Column(db.String(512), nullable=False)\n gender = db.Column(db.String(1))\n date_of_birth = db.Column(db.Date)\n birth_country = db.Column(db.ForeignKey('public.country.country_id'))\n birth_nationality = db.Column(db.ForeignKey('public.country.country_id'))\n current_nationality = db.Column(db.ForeignKey('public.country.country_id'))\n marital_status = db.Column(db.String(128))\n uri = db.Column(db.String(1024), unique=True)\n date_of_death = db.Column(db.Date)\n type_of_id = db.Column(db.String(1))\n initials = db.Column(db.String(10))\n password = db.Column(db.String(36))\n salt = db.Column(db.String(24))\n description = db.Column(db.Text)\n remarks = db.Column(db.Text)\n image = db.Column(db.LargeBinary)\n active_p = db.Column(db.String(1))\n receive_notification = db.Column(db.Boolean)\n language = db.Column(db.String(5), nullable=False)\n\n country = db.relationship(\n 'Country',\n primaryjoin='User.birth_country == Country.country_id',\n backref='country_users')\n country1 = db.relationship(\n 'Country',\n primaryjoin='User.birth_nationality == Country.country_id',\n backref='country1_users')\n country2 = db.relationship(\n 'Country',\n primaryjoin='User.current_nationality == Country.country_id',\n backref='country2_users')\n\n def to_dict(self, include_email=False):\n data = {\n 'user_id': self.user_id,\n 'login': self.login,\n 'first_name': self.first_name,\n 'last_name': self.last_name,\n # 'email': self.email,\n # 'group_count': self.groups.count(),\n '_links': {\n 'self': url_for('api.get_user', user_id=self.user_id, _external=True),\n 'groups': url_for('api.get_groups', user_id=self.user_id),\n # 'avatar': self.avatar(128)\n }\n }\n if include_email:\n data['email'] = self.email\n return data\n\n def from_dict(self, data, new_user=False):\n for field in ['login', 'email']:\n if field in data:\n setattr(self, field, data[field])\n if new_user and 'password' in data:\n self.set_password(data['password'])\n\n\nclass Session(db.Model):\n __tablename__ = 'session'\n __table_args__ = {'schema': 'public'}\n\n session_id = db.Column(\n db.Integer,\n primary_key=True,\n server_default=db.FetchedValue())\n parliament_id = db.Column(\n db.ForeignKey('public.parliament.parliament_id'),\n nullable=False)\n short_name = db.Column(db.String(512), nullable=False)\n full_name = db.Column(db.String(1024), nullable=False)\n start_date = db.Column(db.Date, nullable=False)\n end_date = db.Column(db.Date)\n notes = db.Column(db.Text)\n language = db.Column(db.String(5), nullable=False)\n\n parliament = db.relationship(\n 'Parliament',\n primaryjoin='Session.parliament_id == Parliament.parliament_id',\n backref='sessions')\n\n\nclass Sitting(db.Model):\n __tablename__ = 'sitting'\n __table_args__ = {'schema': 'public'}\n\n sitting_id = db.Column(\n db.Integer,\n primary_key=True,\n server_default=db.FetchedValue())\n group_id = db.Column(\n db.ForeignKey('public.group.group_id'),\n nullable=False)\n session_id = db.Column(db.ForeignKey('public.session.session_id'))\n short_name = db.Column(db.String(512))\n start_date = db.Column(db.DateTime, nullable=False)\n end_date = db.Column(db.DateTime, nullable=False)\n sitting_length = db.Column(db.Integer)\n recurring_id = db.Column(db.Integer)\n recurring_type = db.Column(db.String(32))\n recurring_end_date = db.Column(db.DateTime)\n status = db.Column(db.String(48))\n status_date = db.Column(\n db.DateTime,\n nullable=False,\n server_default=db.FetchedValue())\n venue_id = db.Column(db.ForeignKey('public.venue.venue_id'))\n language = db.Column(db.String(5), nullable=False)\n activity_type = db.Column(db.String(1024))\n meeting_type = db.Column(db.String(1024))\n convocation_type = db.Column(db.String(1024))\n cancel_reason = db.Column(db.String(1024))\n\n group = db.relationship(\n 'Group',\n primaryjoin='Sitting.group_id == Group.group_id',\n backref='group_sittings')\n session = db.relationship(\n 'Session',\n primaryjoin='Sitting.session_id == Session.session_id',\n backref='session_sittings')\n venue = db.relationship(\n 'Venue',\n primaryjoin='Sitting.venue_id == Venue.venue_id',\n backref='venue_sittings')\n\n\nclass UserGroupMembership(db.Model):\n __tablename__ = 'user_group_membership'\n __table_args__ = (\n db.UniqueConstraint('user_id', 'group_id'),\n {'schema': 'public'}\n )\n\n membership_id = db.Column(\n db.Integer,\n primary_key=True,\n server_default=db.FetchedValue())\n user_id = db.Column(db.ForeignKey('public.user.user_id'), nullable=False)\n group_id = db.Column(\n db.ForeignKey('public.group.group_id'),\n nullable=False)\n status = db.Column(db.String(32))\n status_date = db.Column(\n db.DateTime,\n nullable=False,\n server_default=db.FetchedValue())\n start_date = db.Column(db.Date, nullable=False)\n end_date = db.Column(db.Date)\n notes = db.Column(db.Text)\n active_p = db.Column(db.Boolean)\n replaced_id = db.Column(\n db.ForeignKey('public.user_group_membership.membership_id'),\n unique=True)\n substitution_type = db.Column(db.String(100))\n membership_type = db.Column(db.String(30), nullable=False)\n language = db.Column(db.String(5), nullable=False)\n\n group = db.relationship(\n 'Group',\n primaryjoin='UserGroupMembership.group_id == Group.group_id',\n backref='group_memberships')\n replaced = db.relationship(\n 'UserGroupMembership',\n uselist=False,\n remote_side=[membership_id],\n primaryjoin='UserGroupMembership.replaced_id == UserGroupMembership.membership_id',\n backref='user_group_memberships')\n user = db.relationship(\n 'User',\n primaryjoin='UserGroupMembership.user_id == User.user_id',\n backref='user_memberships')\n\n\nclass Venue(db.Model):\n __tablename__ = 'venue'\n __table_args__ = {'schema': 'public'}\n\n venue_id = db.Column(\n db.Integer,\n primary_key=True,\n server_default=db.FetchedValue())\n short_name = db.Column(db.String(512), nullable=False)\n description = db.Column(db.Text)\n language = db.Column(db.String(5), nullable=False)\n group_id = db.Column(db.ForeignKey('public.group.group_id'))\n\n group = db.relationship(\n 'Group',\n primaryjoin='Venue.group_id == Group.group_id',\n backref='venues')\n","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":15659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"531369038","text":"from django.urls import path\nfrom . import views\n\napp_name = 'orders'\n\nurlpatterns = [\n path('create/',\n views.order_create,\n name='order_create'),\n\n path('admin/order//',\n views.admin_order_detail,\n name='admin_order_detail'),\n path('order//',\n views.user_order_detail,\n name='user_order_detail'),\n\n path('order//modify/',\n views.user_order_address_modify,\n name='user_order_address_modify'),\n path('order//upload/',\n views.user_order_doc_upload,\n name='user_order_doc_upload'),\n path('order/modify/',\n views.user_order_address_modify,\n name='user_order_address_modify'),\n\n path('admin/order//pdf/',\n views.admin_order_pdf,\n name='admin_order_pdf'),\n path('order//pdf/',\n views.admin_order_detail,\n name='admin_order_pdf'),\n ]\n","sub_path":"orders/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"93334661","text":"from sys import argv\nfrom os.path import exists\nscript, from_file, to = argv\n\nprint(f\"Copying stuff from {from_file} to {to}\")\n\n# Later on i will rewrite this code to sum up both lines\nindata = open(from_file).read()\n\nprint(f\"The input file is {len(indata)} bytes long\")\nprint(f\"Does the output file exists?{exists(to)}\")\nprint(\"Ready ! Press return to continue or ctrl+c to abort.\")\ninput()\n\nto = open(to, \"w\")\nto.write(indata)\nprint(\"Haah! All done\")\n\nto.close()\n","sub_path":"nineteen.py","file_name":"nineteen.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"178007827","text":"#!/usr/bin/env python3\n\nimport abscplane\nimport numpy as np\nimport pandas as pd\nimport numba as nb\n\n\"\"\"\nImplementation for Abstract Base Class AbsComplexPlane\nA complex plane is a 2D grid of complex numbers, having\nthe form (x + y*1j), where 1j is the unit imaginary number,\nand one can think of x and y as the coordinates for\nthe horizontal axis and the vertical axis of the plane, \nrespectively.\n\"\"\"\n\nclass ComplexPlaneNP(abscplane.AbsComplexPlane):\n \"\"\"Create and manipulate a complex plane\n In addition to generating the 2D grid of numbers (x + y*1j),\n the class supports transformations of the plane with\n an arbitrary function f. The attribute self.plane\n stores a 2D grid of numbers f(x + y*1j) such that the\n parameter x ranges from self.xmin to self.xmax with self.xlen\n total points, while the parameter y ranges from self.ymin to\n self.ymax with self.ylen total points. By default, the function\n f is the identity function lamdax:x, which does nothing to\n the bare complex plane.\n Attributes:\n xmax (float) : maximum horizontal axis value\n xmin (float) : minimum horizontal axis value\n xlen (int) : number of horizontal points\n ymax (float) : maximum vertical axis value\n ymin (float) : minimum vertical axis value\n ylen (int) : number of vertical points\n plane : stored complex plane implementation\n f (func) : function displayed in the plane\n \"\"\"\n \n def __init__(self, xmin, xmax, xlen, ymin, ymax, ylen):\n \"\"\"\n Args:\n xmax (float) : maximum horizontal axis value\n xmin (float) : minimum horizontal axis value\n xlen (int) : number of horizontal points\n ymax (float) : maximum vertical axis value\n ymin (float) : minimum vertical axis value\n ylen (int) : number of vertical points\n \"\"\"\n self.xmin = xmin\n self.xmax = xmax\n self.xlen = xlen \n self.ymin = ymin\n self.ymax = ymax\n self.ylen = ylen\n self.f = lambda x:x\n \n self.refresh()\n \n def refresh(self):\n \"\"\"Regenerate complex plane.\n For every point (x + y*1j) in self.plane, replace\n the point with the value self.f(x + y*1j). \n \"\"\"\n \n real = np.linspace(self.xmin,self.xmax,self.xlen) #create real axis\n imaginary = np.linspace(self.ymin,self.ymax,self.ylen) #create imaginary axis\n x, y= np.meshgrid(real,imaginary) #create a 2D grid with each real component matched with every imginary component\n z = x+ y*1j #create complex numbers with real and imaginary parts\n rl = np.linspace(self.xmin,self.xmax,self.xlen) #labels (real components) for columns \n imag = np.linspace(self.ymin,self.ymax,self.ylen) #labels (imaginary components) for rows\n self.plane = pd.DataFrame(self.f(z), index=imag, columns=rl)\n \n def zoom(self, xmin, xmax, xlen, ymin, ymax, ylen):\n \"\"\"Reset self.xmin, self.xmax, and/or self.xlen.\n Also reset self.ymin, self.ymax, and/or self.ylen.\n Zoom into the indicated range of the x- and y-axes.\n Refresh the plane as needed.\n Args:\n xmax (float) : maximum horizontal axis value\n xmin (float) : minimum horizontal axis value\n xlen (int) : number of horizontal points\n ymax (float) : maximum vertical axis value\n ymin (float) : minimum vertical axis value\n ylen (int) : number of vertical points\n \"\"\"\n\t\t\n self.xmin = xmin\n self.xmax = xmax\n self.xlen = xlen \n self.ymin = ymin\n self.ymax = ymax\n self.ylen = ylen\n\n self.refresh()\n\n def set_f(self, function):\n \"\"\"Reset the transformation function f.\n Refreshes the plane after setting attribute \n f to function.\n Args:\n function (function) : function to apply to \n points of complex plane.\n \"\"\"\n\n self.f = np.vectorize(function)\n self.refresh() #calls refresh to have function change take effect\n \n def __repr__(self):\n \"\"\"Represent the complex plane as an even grid \n of complex numbers\n \"\"\"\n\n return self.plane.to_string()\n\ndef julia(c, max=100):\n def f(z):\n n = 1\n mag = abs(z)\n if mag > 2:\n return 1 #return 1 if |z|>2 before transformation\n while n <= max: #continue transformation until max is exceeded\n z = z**2 + c\n mag = abs(z)\n if mag > 2:\n return n #return number of transformations before |z|>2\n else:\n n += 1\n return 0 #return 0 if max is reached before |z|>2\n return f\n\n\ndef test_julia():\n f = julia( -1.037 + 0.17j ) # c=-1.037 + 0.17j\n assert f(-1.00 - 0.2j) == 0 # z=-1.00 - 0.2j\n assert f(-1.01 - 0.2j) == 20\n assert f(-1.02 - 0.2j) == 13\n assert f(-1.03 - 0.2j) == 10\n assert f(5j) == 1\n\ntest_julia()\n","sub_path":"cplane_np.py","file_name":"cplane_np.py","file_ext":"py","file_size_in_byte":5129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"383366642","text":"from PIL import Image\r\n\r\n\r\ndef twovalue(filep):\r\n pic = Image.open(filep).convert(mode='RGB')\r\n# source = pic.split()\r\n# r, g, b = 0, 1, 2\r\n# R = source[r].point(lambda i: int(i >= 10 and 255))\r\n# G = source[g].point(lambda i: int(i >= 10 and 255))\r\n# B = source[b].point(lambda i: int(i != 153 and 255))\r\n# pic = Image.merge(pic.mode, (R, G, B))\r\n for x in range(0, 72):\r\n for y in range(0, 27):\r\n value = pic.getpixel((x, y))\r\n if value[0] > 60 or value[1] > 60 or 110 > value[2] or value[2] > 210:\r\n pic.putpixel((x, y), (255, 255, 255))\r\n for x in range(1, 71):\r\n for y in range(1, 26):\r\n if x == 0 or x == 71 or y == 0 or y == 26:\r\n pic.putpixel((x, y), (255, 255, 255))\r\n else:\r\n circle = [0, 0, 0,\r\n 0, 0,\r\n 0, 0, 0]\r\n circle[0] = int(pic.getpixel((x-1, y-1)) != (255, 255, 255))\r\n circle[1] = int(pic.getpixel((x, y-1)) != (255, 255, 255))\r\n circle[2] = int(pic.getpixel((x+1, y-1)) != (255, 255, 255))\r\n circle[3] = int(pic.getpixel((x-1, y)) != (255, 255, 255))\r\n circle[4] = int(pic.getpixel((x+1, y)) != (255, 255, 255))\r\n circle[5] = int(pic.getpixel((x-1, y+1)) != (255, 255, 255))\r\n circle[6] = int(pic.getpixel((x, y+1)) != (255, 255, 255))\r\n circle[7] = int(pic.getpixel((x+1, y+1)) != (255, 255, 255))\r\n if sum(circle) == 0:\r\n pic.putpixel((x, y), (255, 255, 255))\r\n for x in range(0, 72):\r\n for y in range(0, 27):\r\n if pic.getpixel((x, y)) != (255, 255, 255):\r\n pic.putpixel((x, y), (0, 0, 0))\r\n return pic\r\n\r\n\r\ndef slicechar(pic):\r\n seq = []\r\n for x in range(0, 72):\r\n pix = 0\r\n for y in range(0, 27):\r\n if pic.getpixel((x, y)) == (0, 0, 0):\r\n pix += 1\r\n seq.append(pix)\r\n flag = 0\r\n cut = []\r\n for i in range(0, 72):\r\n if seq[i] == 0 and flag == 1:\r\n cut.append(i)\r\n flag = 0\r\n elif seq[i] != 0 and flag == 0:\r\n cut.append(i-1)\r\n flag = 1\r\n return cut\r\n\r\n","sub_path":"pre.py","file_name":"pre.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"428127092","text":"from django.shortcuts import render\nfrom django.http import HttpResponseBadRequest\nfrom django.http import HttpResponseNotFound\n\nfrom .models import Entry\nfrom .models import Client\nfrom .models import Project\n\n\ndef entries(request):\n entry_list = Entry.objects.all()\n return render(request, 'entries.html', {\n 'entry_list': entry_list,\n })\n\ndef clients(request):\n client_list = Client.objects.all()\n return render(request, 'clients.html', {\n 'client_list': client_list,\n })\n\ndef projects(request):\n project_list = Project.objects.all()\n return render(request, 'projects.html', {\n 'project_list': project_list,\n })\n\ndef client(request):\n client_id = request.GET.get('client_id')\n if client_id:\n try:\n client_id = int(client_id)\n except BaseException:\n # client_id should be an integer, return a 400\n raise HttpResponseBadRequest\n\n client = Client.objects.filter(id=client_id)[0]\n project_list = Project.objects.filter(client=client)\n entry_dict = {}\n for project in project_list:\n entries = Entry.objects.filter(project=project)\n entry_dict[project.id] = entries\n\n return render(request, 'client.html', {\n 'client': client,\n 'project_list': project_list,\n 'entry_dict': entry_dict,\n })\n else:\n raise HttpResponseNotFound\n","sub_path":"timetracker/entries/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"471414787","text":"#!python\nimport os\n\n# build with `scons --debug_build` for debug.\nAddOption(\n '--debug_build',\n action='store_true',\n help='debug build',\n default=False)\n\nenv = Environment(ENV = os.environ, tools = ['mingw'])\npath = ['C:\\\\MinGW_63\\\\bin']\nenv.PrependENVPath('PATH', path)\nenv.Append(CXXFLAGS='--std=c++17') \n\n \nif GetOption('debug_build'):\n env.ParseFlags('-DDEBUG')\n variant_dir = 'build/BlackJack_debug'\nelse:\n variant_dir = 'build/BlackJack_release'\n \nSConscript('src/SConscript', variant_dir=variant_dir, duplicate=False, exports=['env'])","sub_path":"BlackJack/SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"40812648","text":"from typing import List, DefaultDict, Tuple\nfrom collections import defaultdict\nfrom math import cos, sin, radians\nfrom day05 import IntcodeComputer, read_program\nfrom day08 import print_image\n\n\ndef plot_panel_map(panel_map: DefaultDict[Tuple[int, int], int]) -> None:\n \n min_x = min(loc[0] for loc in panel_map)\n max_x = max(loc[0] for loc in panel_map)\n min_y = min(loc[1] for loc in panel_map)\n max_y = max(loc[1] for loc in panel_map)\n\n nx = max_x - min_x + 1\n ny = max_y - min_y + 1\n\n panel_grid = [[0 for _ in range(nx)] for _ in range(ny)]\n\n for loc in panel_map:\n color = panel_map[loc]\n panel_grid[loc[1] - min_y][loc[0] - min_x] = color\n\n print_image(panel_grid)\n \n return \n\n\nclass PaintingRobot(object):\n def __init__(self, program: List[int]) -> None:\n self.computer = IntcodeComputer(program)\n self.x: int = 0\n self.y: int = 0\n self.direction: int = 90\n \n def step(self, panel: DefaultDict[Tuple[int, int], int]) -> bool:\n\n current_panel_color = panel[(self.x, self.y)]\n \n color_to_paint = self.computer.run_and_halt(current_panel_color)\n if color_to_paint is None:\n return True\n\n direction_to_turn = self.computer.run_and_halt()\n if direction_to_turn is None:\n return True\n\n direction_to_turn = 90 if direction_to_turn == 0 else -90\n\n panel[(self.x, self.y)] = color_to_paint\n self.direction += direction_to_turn\n\n self.x = int(round(self.x + cos(radians(self.direction))))\n self.y = int(round(self.y + sin(radians(self.direction))))\n\n return False\n\n\nif __name__ == '__main__':\n\n program = read_program('./inputs/day11.txt')\n robot = PaintingRobot(program)\n panel_map = defaultdict(lambda: 0)\n\n program_halted = False\n\n while not program_halted:\n program_halted = robot.step(panel_map)\n\n print(f'Number of panels painted, starting w/ black panel: {len(panel_map)}')\n\n robot = PaintingRobot(program)\n panel_map = defaultdict(lambda: 0)\n panel_map[(0, 0)] = 1\n\n program_halted = False\n\n while not program_halted:\n program_halted = robot.step(panel_map)\n\n print(f'Number of panels painted, starting w/ white panel: {len(panel_map)}')\n\n plot_panel_map(panel_map)\n","sub_path":"day11.py","file_name":"day11.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"381870732","text":"#!/usr/bin/env python3\n\nimport argparse\nimport collections\nimport configparser\nfrom datetime import date\nfrom datetime import datetime\nimport glob\nimport json\nimport os\nimport re\nfrom shutil import copyfile, move\nfrom subprocess import Popen, PIPE\nfrom tqdm import tqdm\n\n\n# Partly inspired by https://stackoverflow.com/a/11415816/1177851\nclass append_readable_dir(argparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n input_dir = os.path.expanduser(values)\n\n if not os.path.isdir(input_dir):\n raise argparse.ArgumentTypeError(\n \"readable_dir:{0} is not valid\".format(input_dir))\n\n if os.access(input_dir, os.R_OK):\n dir_list = argparse._copy.copy(\n argparse._ensure_value(namespace, self.dest, []))\n dir_list.append(input_dir)\n setattr(namespace, self.dest, dir_list)\n else:\n raise argparse.ArgumentTypeError(\n \"readable_dir:{0} is not readable\".format(input_dir))\n\n\nclass append_readable_file(argparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n input_files = glob.glob(os.path.expanduser(values))\n temp_list = []\n\n for input_file in input_files:\n print(input_file)\n if not os.path.isfile(input_file):\n raise argparse.ArgumentTypeError(\n \"readable_file:{0} is not valid\".format(input_file))\n\n if os.access(input_file, os.R_OK):\n temp_list.append(input_file)\n else:\n raise argparse.ArgumentTypeError(\n \"readable_file:{0} is not readable\".format(input_file))\n\n if len(temp_list) > 0:\n file_list = argparse._copy.copy(\n argparse._ensure_value(namespace, self.dest, []))\n file_list = file_list + temp_list\n setattr(namespace, self.dest, file_list)\n\n\nclass ArchiveToolkit:\n\n _config_file = 'config.ini'\n _config_file_example = 'config.ini.example'\n _file_extension = '.pdf'\n _date_format = '%Y-%m-%d'\n _date_sep = '--'\n _tags_sep = '__'\n _tag_sep = '_'\n\n file_list = []\n\n def __init__(self):\n self._basepath = os.path.dirname(os.path.realpath(__file__))\n self._config_path = os.path.join(self._basepath, self._config_file)\n\n def parse_config_file(self):\n self._config = configparser.ConfigParser(allow_no_value=True)\n self._config.read(self._config_path)\n\n if len(self._config.sections()) == 0:\n raise Exception('Config file is empty or does not exist.')\n\n input_paths = self._config['Directories'].get('input_paths')\n if input_paths is not None:\n input_paths = [os.path.expanduser(in_path) for in_path in\n json.loads(input_paths)]\n\n self.file_list = self.file_list + glob_directory(input_paths,\n self._file_extension)\n\n self.archive_path = os.path.expanduser(\n self._config['Directories'].get('output_path'))\n if self.archive_path is None:\n raise Exception('No output path specified.')\n\n self._movefile = self._config['Defaults'].get('copy_or_move') == 'move'\n self._yearly_subfolder = self._config['Defaults'].getboolean('yearly_subfolder', 'False')\n self._add_mac_tags = self._config['Defaults'].getboolean('add_mac_tags', 'False')\n self._num_tags_top = self._config['Defaults'].getint('num_top_tags')\n self._open_pdf_in = self._config['Defaults'].get('open_pdf_in')\n\n self.gather_tags_from_archive()\n if len(self.tag_list) == 0:\n raise Exception('No tags specified.')\n\n def parse_command_line(self):\n parser = argparse.ArgumentParser(description='''\n Archive4All – Toolkit for file tagging and archiving tasks.\n ''',\n fromfile_prefix_chars=\"@\")\n parser.add_argument('-d', '--dir',\n metavar='DIRECTORY', dest='directory_list',\n action=append_readable_dir, default=[],\n help='''\n Add files from the existing and readable DIRECTORY for processing.\n This option may be used multiple times, each time adding another\n directory.\n ''')\n parser.add_argument('-f', '--file',\n metavar='FILE', dest='file_list',\n action=append_readable_file, default=[],\n help='''\n Add an existing and reable FILE for processing. Just as -d/--dir\n this option may be used multiple times, each time adding another\n file. Of course it is allowed to use glob patterns (i.e.\n \"path/to/*.pdf\").\n ''')\n parser.add_argument('-c', '--config',\n metavar='CONFIGFILE', dest='config_file',\n default=self._config_path,\n help='''\n Change the path to the config file to wherever you want. By default\n the config.ini is loaded from the Toolkit's base directory. Both\n absolute and relative paths are allowed, with the latter of course\n being relative to the current working directory\n ''')\n parser.add_argument('-nc', '--new-config',\n dest='new_config',\n action='store_true',\n help='''\n When this option is set, the Toolkit will simply create a new\n config file in the default location an exit. A neat little shortcut\n for copying the example configuration file. In conjunction with -c/\n --config this will cause the config to be created in a non-default\n place.\n ''')\n\n self._args = parser.parse_args()\n\n # Overwrite current config file/path\n self._config_file = os.path.basename(self._args.config_file)\n self._config_path = self._args.config_file\n\n # Copy example config to new location\n if self._args.new_config:\n if os.path.isfile(self._config_path):\n raise Exception('Config file already exists.')\n\n from shutil import copyfile\n copyfile(os.path.join(self._basepath, self._config_file_example),\n os.path.join(self._basepath, self._config_file))\n\n return\n\n input_paths = self._args.directory_list\n self.file_list = self.file_list + glob_directory(input_paths,\n self._file_extension)\n\n input_files = self._args.file_list\n self.file_list = self.file_list + input_files\n\n def main(self):\n \"\"\"\n Main method to run everything in ArchiveToolkit the way it's\n intended to be. When called directly (not from inside another Python\n function or shell) this function handles parsing of config, command\n line arguments, and –after that— the processing of the given files.\n \"\"\"\n\n self.parse_command_line()\n self.parse_config_file()\n\n self.process_files()\n\n def process_files(self):\n\n if len(self.file_list) == 0:\n raise Exception('No files have been added for processing.')\n\n for path in tqdm(self.file_list):\n self.q_and_a(path)\n\n def q_and_a(self, file_path):\n print('>>> ' + file_path.split(os.path.dirname(file_path) + '/')[1])\n p = Popen(['open', '--background', '-a', self._open_pdf_in, file_path])\n obj = ArchiveFile(self, file_path)\n # save creation time of file as default\n\n # set year\n year = input('Year [{}]: '.format(obj.date.year))\n year = year or obj.date.year\n year = int(year)\n if year < 100:\n year += 2000\n\n # set month\n month = input('Month [{}]: '.format(obj.date.month))\n month = month or obj.date.month\n month = int(month)\n\n # set day\n day = input('Day [{}]: '.format(obj.date.day))\n day = day or obj.date.day\n day = int(day)\n obj.date = date(year, month, day)\n\n # set name\n name = input('Name [{}]: '.format(obj.name))\n obj.name = name or obj.name\n\n # set tags\n ## config tags\n print('\\nID: name')\n print('=' * 10)\n for idx, cur_tag in enumerate(self.tag_list_config):\n print('{}: {}'.format(idx, cur_tag))\n\n ## top tags\n print('-' * 10)\n # order of elements not relevant!?\n tag_list_top = list(set(self.tag_list_top) - set(self.tag_list_config))\n tag_list_top.sort()\n for idx, cur_tag in enumerate(tag_list_top):\n print('{}: {}'.format(idx + len(self.tag_list_config), cur_tag))\n\n ## other tags\n print('-' * 10)\n tag_list_other = list(set(self.tag_list) - set(self.tag_list_config + self.tag_list_top))\n tag_list_other.sort()\n for idx, cur_tag in enumerate(tag_list_other):\n print('{}: {}'.format(idx + len(self.tag_list_config + self.tag_list_top), cur_tag))\n\n #TODO: tags is not empty here, if parsing was successful\n # set them as default tags, when there is a UI to remove tags\n obj.tags = []\n while True:\n print('\\ncurrent tags:')\n print(obj.tags)\n ans = input('choose tag ID or write tag: ')\n\n # Empty string exits the loop\n if ans == '':\n break\n\n # A regex match for digit-only value is interpreted as ID\n matched_numeral = re.match('^(\\d+)$', ans)\n if matched_numeral is not None:\n try:\n idx = int(matched_numeral.group(0))\n # chosen: config tag\n if idx < len(self.tag_list_config):\n obj.tags.append(self.tag_list_config[idx])\n\n # chosen: top tag\n elif len(self.tag_list_config) <= idx < len(self.tag_list_config + self.tag_list_top):\n idx -= len(self.tag_list_config)\n obj.tags.append(tag_list_top[idx])\n\n # chosen: other tag\n else:\n idx -= len(self.tag_list_top)\n idx -= len(self.tag_list_config)\n obj.tags.append(tag_list_other[idx])\n\n except IndexError:\n print('No tag with that ID.')\n continue\n\n # A non-match will be added as a new tag\n else:\n ans = ans.lower()\n obj.tags.append(ans)\n self.tag_list.append(ans)\n self.tag_list.sort()\n\n obj.write_file()\n\n def gather_tags_from_archive(self):\n # get all tags from archive\n all_tags = []\n for cur_file in glob_directory(self.archive_path, self._file_extension):\n _, _, file_tags = self.parse_archive_file(cur_file)\n all_tags += file_tags\n\n self.tag_list = list(set(all_tags))\n self.tag_list.sort()\n\n # get the TopX of the archive tags\n self.tag_list_top = []\n for name, _ in collections.Counter(all_tags).most_common(self._num_tags_top):\n self.tag_list_top.append(name)\n\n # get tags from config\n self.tag_list_config = list(self._config['Tags'].keys())\n\n def parse_archive_file(self, file_path):\n file_name = os.path.basename(file_path)[:-\n len(self._file_extension)]\n\n name_regex = re.match('(.*){}(.*){}(.*)'.format(self._date_sep,\n self._tags_sep),\n file_name)\n\n if name_regex is None:\n raise Exception('File name cannot be parsed.')\n # TODO: Turn into soft error?\n\n file_date = datetime.strptime(name_regex.group(1),\n self._date_format).date()\n file_name = name_regex.group(2)\n file_tags = name_regex.group(3).split(self._tag_sep)\n\n return file_date, file_name, file_tags\n\n\nclass ArchiveFile:\n def __init__(self, toolkit, file_in):\n # TODO: relative path to absolute path?\n self._file = file_in\n self._toolkit = toolkit\n self._basepath = os.path.dirname(os.path.realpath(__file__))\n\n # try to parse data from filename\n try:\n self.date, self.name, self.tags = self._toolkit.parse_archive_file(file_in)\n except Exception:\n self.date = datetime.fromtimestamp(os.path.getctime(self._file))\n self.name = ''\n self.tags = []\n\n def write_file(self):\n # TODO: error checking would be nice\n date = self.date.strftime(self._toolkit._date_format)\n name = _strnorm(self.name)\n self.tags.sort()\n tags = self._toolkit._tag_sep.join(self.tags)\n ext = os.path.splitext(self._file)[-1]\n filename = '{}{}{}{}{}{}'.format(date,\n self._toolkit._date_sep,\n name,\n self._toolkit._tags_sep,\n tags,\n ext)\n\n # archive files in yearly subfolders\n if self._toolkit._yearly_subfolder:\n year_dir = self.date.strftime('%Y')\n else:\n year_dir = ''\n\n # create a new directory if it does not already exist\n target_path = os.path.join(self._toolkit.archive_path,\n year_dir)\n target_file = os.path.join(self._toolkit.archive_path,\n year_dir, filename)\n\n if not os.path.isdir(target_path):\n os.makedirs(target_path)\n\n # rename and move/copy file\n if os.path.isfile(target_file):\n raise RuntimeError('File already exists!')\n\n if self._toolkit._movefile:\n move(self._file, target_file)\n else:\n copyfile(self._file, target_file)\n\n if self._toolkit._add_mac_tags:\n update_mac_tags(target_file)\n\ndef update_mac_tags(file_path):\n p = Popen(['tag', '--list', '--garrulous', '--no-name', file_path], stdout=PIPE)\n file_tags = p.stdout.read().decode(\"utf-8\")[:-1]\n file_tags = set(file_tags.split('\\n'))\n name_tags = set(name2tags(file_path))\n\n # delete tags from attr\n for attr in file_tags - name_tags:\n Popen(['tag', '--remove', attr, file_path])\n\n # add tags to attr\n for attr in name_tags - file_tags:\n Popen(['tag', '--add', attr, file_path])\n\ndef name2tags(file):\n # list all files (not just pdf files)\n file = file.split(os.path.dirname(file) + '/')[1] # filename\n file = file.split('__')[1] # tags + ext\n file = file.split('.')[0] # tags\n return file.split('_')\n\ndef _strnorm(sz):\n sz = sz.lower()\n sz = sz.replace(' ', '-')\n sz = sz.replace('ä', 'ae')\n sz = sz.replace('ö', 'oe')\n sz = sz.replace('ü', 'ue')\n sz = sz.replace('ß', 'ss')\n return sz\n\n\ndef glob_directory(path_or_paths, file_extension):\n\n if type(path_or_paths) is str:\n path_or_paths = [path_or_paths]\n\n file_list = []\n for path in path_or_paths:\n path = os.path.expanduser(path)\n if not os.path.isdir(path):\n raise Exception('Path does not exist')\n\n file_list = file_list + glob.glob(os.path.join(path,\n '**',\n '*' +\n file_extension),\n recursive=True)\n\n return file_list\n\n\nif __name__ == '__main__':\n\n at = ArchiveToolkit()\n at.main()\n","sub_path":"archive.py","file_name":"archive.py","file_ext":"py","file_size_in_byte":16264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"372079754","text":"#!/usr/bin/env python3\n\nimport argparse\nfrom glob import glob\nfrom operator import itemgetter\nimport os\nimport pickle\n\nimport numpy as np\nimport requests\nfrom skimage.io import imread, imsave\nfrom skimage.util import invert\nfrom skimage.color import rgb2gray\nfrom skimage.exposure import rescale_intensity\nfrom tqdm import tqdm\n\n\nIMAGE_FILENAME = 'val_256.tar'\nIMAGE_DIR = 'images'\n\n\ndef download_images():\n print('>> download_images() called.')\n\n if not os.path.exists(IMAGE_FILENAME):\n\n # https://stackoverflow.com/a/37573701\n url = 'http://data.csail.mit.edu/places/places365/val_256.tar'\n response = requests.get(url, stream=True)\n total_size_in_bytes = int(response.headers.get('content-length', 0))\n block_size = 1024\n progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)\n with open(IMAGE_FILENAME, 'wb') as file:\n for data in response.iter_content(block_size):\n progress_bar.update(len(data))\n file.write(data)\n progress_bar.close()\n if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:\n print(\"ERROR, something went wrong\")\n\n\n if not os.path.exists(IMAGE_DIR):\n cmd = f'''\n tar -xf {IMAGE_FILENAME};\n mv val_256 {IMAGE_DIR}\n '''\n os.system(cmd)\n\ndef compute_scores():\n print('>> compute_scores() called.')\n\n hands = {}\n for fname in glob('hands/*png'):\n hand = imread(fname)\n hand = rgb2gray(hand)\n hand = invert(hand)\n hands[fname] = hand\n\n def compute_similarity(hand, img):\n return ((hand - 0.01) * img).sum()\n\n scores = {}\n\n for fname in tqdm(glob(f'{IMAGE_DIR}/*jpg')):\n image = rgb2gray(imread(fname))\n\n for hand_key in hands.keys():\n if hand_key not in scores:\n scores[hand_key] = []\n\n score = compute_similarity(hands[hand_key], image)\n scores[hand_key].append((fname, score))\n\n for key in scores:\n scores[key] = sorted(scores[key], key=itemgetter(1), reverse=True)\n\n with open('scores.pkl', 'wb') as f:\n pickle.dump(scores, f)\n\ndef make_hourly_images():\n print('>> make_hourly_images() called.')\n\n cmd = 'mkdir -p hourly'\n os.system(cmd)\n\n scores = None\n with open('scores.pkl', 'rb') as f:\n scores = pickle.load(f)\n\n n_layered = 20\n for i in tqdm(range(12)):\n hour = '{:02d}'.format(i)\n key = f'hands/hands-{hour}.png'\n\n layered = np.zeros((256, 256), dtype=np.float64)\n candidates = [fname for fname, _ in scores[key][10:n_layered]]\n for i, fname in enumerate(candidates):\n image = rgb2gray(imread(fname))\n layered = 0.8 * layered + 0.2 * image\n\n layered = rescale_intensity(layered)\n imsave(f'hourly/{hour}.png', layered)\n\ndef make_minutely_images():\n print('>> make_minutely_images() called.')\n\n cmd = 'mkdir -p minutely'\n os.system(cmd)\n\n scores = None\n with open('scores.pkl', 'rb') as f:\n scores = pickle.load(f)\n\n for hour in range(12):\n for minute in range(60):\n print('>> processing {:02}:{:02} ..'.format(hour, minute))\n\n ratio = 1 - (float(minute) / 60)\n\n layered = np.zeros((256, 256), dtype=np.float64)\n image1 = rgb2gray(imread('hourly/{:02d}.png'.format(hour)))\n image2 = rgb2gray(imread('hourly/{:02d}.png'.format((hour + 1) % 12)))\n\n layered = 0.8 * layered + 0.2 * ratio * image1\n layered = 0.8 * layered + 0.2 * (1 - ratio) * image2\n\n layered = rescale_intensity(layered)\n imsave('minutely/{:02}{:02}.png'.format(hour, minute), layered)\n\ndef make_animated_output():\n print('>> make_animated_output() called.')\n\n cmd = 'convert minutely/*png clock-like-output.gif'\n os.system(cmd)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Clock-like image generator')\n parser.add_argument(\n 'command', type=str,\n choices=[\n 'download-images',\n 'compute-scores',\n 'make-hourly-images',\n 'make-minutely-images',\n 'make-animated-output',\n ])\n args = parser.parse_args()\n\n if args.command == 'download-images':\n download_images()\n elif args.command == 'compute-scores':\n compute_scores()\n elif args.command == 'make-hourly-images':\n make_hourly_images()\n elif args.command == 'make-minutely-images':\n make_minutely_images()\n elif args.command == 'make-animated-output':\n make_animated_output()\n else:\n print(f'Command \"{args.command}\" not supported')\n","sub_path":"clock-like/clock-like.py","file_name":"clock-like.py","file_ext":"py","file_size_in_byte":4718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"12820331","text":"import pymongo\nimport sys\nimport logging\nfrom pymongo import MongoClient\nimport time\nimport glob\n\ncon=None\ndef get_mongo_connection(host='localhost', port=27017):\n global con\n if con is None:\n print (\"Establishing connection %s host and port %d\" %(host,port))\n try:\n con = pymongo.MongoClient(host, port)\n except Exception as e:\n print (e)\n return None\n return con\n\ndef get_count_of_data(fromTS, toTS):\n return get_mongo_connection().mesowest.mesowest.find({ \"timestamp_utc\" : { \"$gt\" : fromTS, \"$lt\" : toTS}}).count()\n\ndef get_dates():\n return get_mongo_connection().mesowest.mesowest.distinct(\"date_utc\")\n\ndef get_data(fromTS, toTS, offset, limit):\n content= \"\"\n if offset or limit:\n data = list(get_mongo_connection().mesowest.mesowest.find({ \"timestamp_utc\" : { \"$gt\" : fromTS, \"$lt\" : toTS}}).skip(offset).limit(limit))\n else:\n data = list(get_mongo_connection().mesowest.mesowest.find({ \"timestamp_utc\" : { \"$gt\" : fromTS, \"$lt\" : toTS}})) \n for d in data:\n content+=d['STN']+','+d['YYMMDD/HHMM']+','+d['MNET']+','+d['SLAT']+','+d['SLON']+','+d['SELV']+','+d['TMPF']+','+d['SKNT']+','+d['DRCT']+','+d['GUST']+','+d['PMSL']+','+d['ALTI']+','+d['DWPF']+','+d['RELH']+','+d['WTHR']+','+d['P24I']+'\\n'\n return content\n\ndef put_data(content):\n splittedArray = []\n bulkInsertArray = []\n for line in content.split('\\n'):\n line = line.strip()\n splittedArray = line.split(',')\n if len(splittedArray)>10 and splittedArray[0]!=\"STN\":\n singleTuple = {'STN':splittedArray[0],'YYMMDD/HHMM':splittedArray[1],'MNET':splittedArray[2],'SLAT':splittedArray[3],'SLON':splittedArray[4],'SELV':splittedArray[5],'TMPF':splittedArray[6],'SKNT':splittedArray[7],'DRCT':splittedArray[8],'GUST':splittedArray[9],'PMSL':splittedArray[10],'ALTI':splittedArray[11],'DWPF':splittedArray[12],'RELH':splittedArray[13],'WTHR':splittedArray[14],'P24I':splittedArray[14],'date_utc':(splittedArray[1].split(' '))[0].replace('-',''),'timestamp_utc':int(time.mktime(time.strptime(splittedArray[1], '%Y-%m-%d %H:%M:%S')))*1000}\n #singleTuple = {\"STN\":splittedArray[0],\"timestamp_utc\":int(time.mktime(time.strptime(splittedArray[1], '%Y%m%d/%H%M'))) * 1000, \"date_utc\": splittedArray[1].split('/')[0],\"raw\":line}\n bulkInsertArray.append(singleTuple)\n\n \"\"\"if len(splittedArray)>10 and splittedArray[0]!=\"STID\":\n singleTuple = {\"station\":splittedArray[0],\"timestamp_utc\":int(time.mktime(time.strptime(splittedArray[1], '%Y%m%d/%H%M'))) * 1000, \"date_utc\": splittedArray[1].split('/')[0],\"raw\":line}\n bulkInsertArray.append(singleTuple)\"\"\"\n\n get_mongo_connection().mesowest.mesowest.insert_many(bulkInsertArray)\n return True\n \nif __name__ == '__main__':\n #list_of_files = glob.glob('./data/*.out') \n #for file_name in list_of_files:\n # file = open(file_name).readlines()\n # put_data(file)\n\n print(get_count_of_data(1483228800000, 2483228800000))\n print(get_data(1483228800000, 2483228800000,0,100))\n #put_data(\"BULLF,2018-03-16 21:45:00,8.00,37.52,-110.73,1128.00,-9999.00,6.33,-9999.00,8.26,-9999.00,-9999.00,-9999.00,-9999.00,-9999.00,-9999.00\\nBULLF,2012-02-01 18:15:00,8.00,37.52,-110.73,1128.00,41.00,1.62,168.80,2.86,-9999.00-9999.00,-90.26,0.07,-9999.00,-9999.00\\nBULLF,2012-02-01 18:30:00,8.00,37.52,-110.73,1128.00,41.55,1.23,151.10,2.70,-9999.00,-9999.00,-89.99,0.07,-9999.00,-9999.00\\nBULLF,2012-02-01 18:45:00,8.00,37.52,-110.73,1128.00,41.96,1.88,161.00,5.10,-9999.00,-9999.00,-89.79,0.07,-9999.00,-9999.00\")\n print(get_dates())","sub_path":"src/mongoTestNew.py","file_name":"mongoTestNew.py","file_ext":"py","file_size_in_byte":3649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"626804858","text":"import numpy as np\nimport sys\nimport pickle\n\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport matplotlib.pyplot as plt\nimport skimage.transform\n\nimport theano\nimport theano.tensor as T\nimport lasagne\n\nfrom lasagne.utils import floatX\nfrom lasagne.layers import InputLayer, ConcatLayer\nfrom lasagne.layers import Conv2DLayer as ConvLayer\nfrom lasagne.layers import Pool2DLayer as PoolLayer\n\ndef prep_image(im):\n MEAN_VALUES = np.array([104, 117, 123]).reshape((3,1,1))\n\n if len(im.shape) == 2:\n im = im[:, :, np.newaxis]\n im = np.repeat(im, 3, axis=2)\n h, w, _ = im.shape\n\n if h < w:\n im = skimage.transform.resize(im, (IMAGE_W, int(w*IMAGE_W/h)), preserve_range=True)\n else:\n im = skimage.transform.resize(im, (int(h*IMAGE_W/w), IMAGE_W), preserve_range=True)\n\n # Central crop\n h, w, _ = im.shape\n im = im[h//2-IMAGE_W//2:h//2+IMAGE_W//2, w//2-IMAGE_W//2:w//2+IMAGE_W//2]\n\n rawim = np.copy(im).astype('uint8')\n\n # Shuffle axes to c01\n im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)\n\n # Convert RGB to BGR\n im = im[::-1, :, :]\n\n im = im - MEAN_VALUES\n return rawim, floatX(im[np.newaxis])\n\ndef deprocess(x):\n MEAN_VALUES = np.array([104, 117, 123]).reshape((3,1,1))\n x = np.copy(x[0])\n x += MEAN_VALUES\n\n x = x[::-1]\n x = np.swapaxes(np.swapaxes(x, 0, 1), 1, 2)\n\n x = np.clip(x, 0, 255).astype('uint8')\n return x\n\ndef gram_matrix(x):\n x = x.flatten(ndim=3)\n g = T.tensordot(x, x, axes=([2], [2]))\n return g\n\ndef style_loss_relative(A, X, layer, layers):\n a = A[layer]\n x = X[layer]\n \n G_all = []\n for l in layers:\n G_layer = gram_matrix(A[l])\n G_all.append((G_layer**2).mean())\n G_all = T.sum(G_all)\n\n A = gram_matrix(a)\n G = gram_matrix(x)\n\n loss = ((G - A)**2).mean() / G_all\n return loss\n\ndef build_vgg_model():\n net = {}\n net['input'] = InputLayer((1, 3, IMAGE_W, IMAGE_W))\n net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1, flip_filters=False)\n net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1, flip_filters=False)\n net['pool1'] = PoolLayer(net['conv1_2'], 2, mode='average_exc_pad')\n net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1, flip_filters=False)\n net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1, flip_filters=False)\n net['pool2'] = PoolLayer(net['conv2_2'], 2, mode='average_exc_pad')\n net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1, flip_filters=False)\n net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1, flip_filters=False)\n net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1, flip_filters=False)\n net['conv3_4'] = ConvLayer(net['conv3_3'], 256, 3, pad=1, flip_filters=False)\n net['pool3'] = PoolLayer(net['conv3_4'], 2, mode='average_exc_pad')\n net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1, flip_filters=False)\n net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1, flip_filters=False)\n net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1, flip_filters=False)\n net['conv4_4'] = ConvLayer(net['conv4_3'], 512, 3, pad=1, flip_filters=False)\n net['pool4'] = PoolLayer(net['conv4_4'], 2, mode='average_exc_pad')\n net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1, flip_filters=False)\n net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1, flip_filters=False)\n net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1, flip_filters=False)\n net['conv5_4'] = ConvLayer(net['conv5_3'], 512, 3, pad=1, flip_filters=False)\n net['pool5'] = PoolLayer(net['conv5_4'], 2, mode='average_exc_pad')\n return net\n\ndef vgg_loss(texture_file, texture_file_synthesized, net):\n texture = plt.imread(texture_file) \n rawim, texture = prep_image(texture)\n\n texture_synt = plt.imread(texture_file_synthesized) \n rawim_synt, texture_synt = prep_image(texture_synt)\n\n layers = ['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']\n layers = {k: net[k] for k in layers}\n\n input_im_theano = T.tensor4()\n outputs = lasagne.layers.get_output(layers.values(), input_im_theano)\n texture_features = {k: theano.shared(output.eval({input_im_theano: texture}))\n for k, output in zip(layers.keys(), outputs)}\n\n generated_image = theano.shared(floatX(texture_synt))\n gen_features = lasagne.layers.get_output(layers.values(), generated_image)\n gen_features = {k: v for k, v in zip(layers.keys(), gen_features)}\n\n losses_test = []\n layers_all = ['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']\n losses_test.append(0.2 * style_loss_relative(texture_features, gen_features, 'conv1_1', layers_all))\n losses_test.append(0.2 * style_loss_relative(texture_features, gen_features, 'conv2_1', layers_all))\n losses_test.append(0.2 * style_loss_relative(texture_features, gen_features, 'conv3_1', layers_all))\n losses_test.append(0.2 * style_loss_relative(texture_features, gen_features, 'conv4_1', layers_all))\n losses_test.append(0.2 * style_loss_relative(texture_features, gen_features, 'conv5_1', layers_all))\n total_loss_test = sum(losses_test)\n\n f_test_loss = theano.function([], total_loss_test)\n\n def test_loss(x0):\n x0 = floatX(x0.reshape((1, 3, IMAGE_W, IMAGE_W)))\n generated_image.set_value(x0)\n return f_test_loss().astype('float64')\n\n generated_image.set_value(floatX(texture_synt))\n loss = test_loss(generated_image.get_value().astype('float64'))\n\n return loss\n\ndef main(texture_file, texture_file_synthesised):\n global IMAGE_W\n IMAGE_W = 200\n\n net = build_vgg_model()\n values = pickle.load(open('../vgg19_normalized.pkl', 'rb'), encoding='latin1')['param values']\n lasagne.layers.set_all_param_values(net['pool5'], values)\n\n loss = vgg_loss(texture_file, texture_file_synthesised, net)\n\n return loss\n","sub_path":"figure4/vgg_loss.py","file_name":"vgg_loss.py","file_ext":"py","file_size_in_byte":5897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"614813995","text":"import pickle\nimport os\nimport sys\nimport numpy as np\nimport cv2\nname = ['airplane', 'automobile', 'bird', 'cat', 'deer',\n 'dog', 'frog', 'horse', 'ship', 'truck']\n\n\n\ndef unpickle(file):\n f = open(file, 'rb')\n dict = pickle.load(f, encoding='latin1')\n f.close()\n return dict\n\n\ndef one_hot_(labels):\n _labels = np.zeros((len(labels), len(name)), np.uint8)\n for i,label in enumerate(labels):\n _labels[i,label] = 1\n return _labels\n\n\ndef read_data_sets_train(dirName='cifar-10-batches-py', one_hot=True):\n data = []\n labels = []\n for i in range(1,6):\n dict = unpickle(os.path.join(dirName, 'data_batch_{0}'.format(i)))\n data += [dict['data']]\n labels += dict['labels']\n data = np.moveaxis(np.vstack(data).reshape(-1,3,32,32), 1, -1)\n for i, img in enumerate(data):\n data[i] = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n \n if one_hot:\n labels = one_hot_(labels)\n else:\n labels = np.array(labels, np.uint8)\n \n return data, labels\n\n\ndef read_data_sets_test(dirName='cifar-10-batches-py', one_hot=True):\n dict = unpickle(os.path.join(dirName, 'test_batch'))\n data = [dict['data']]\n labels = dict['labels']\n data = np.moveaxis(np.vstack(data).reshape(-1,3,32,32), 1, -1)\n for i, img in enumerate(data):\n data[i] = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n \n if one_hot:\n labels = one_hot_(labels)\n else:\n labels = np.array(labels, np.uint8)\n\n return data, labels\n\n\ndef read_data_sets(dirName='cifar-10-batches-py', one_hot=True):\n x_tr, y_tr = read_data_sets_train(dirName, one_hot)\n x_te, y_te = read_data_sets_test(dirName, one_hot)\n\n return x_tr, y_tr,x_te, y_te\n\n\nif __name__ == '__main__':\n import cv2\n\n\n x_tr, y_tr, x_te, y_te = read_data_sets()\n\n for x_te, y_te in zip(x_te, y_te):\n img = cv2.cvtColor(x_te, cv2.COLOR_RGB2BGR)\n cv2.imshow('', img)\n k = cv2.waitKey(1)\n if k == 27:\n break\n cv2.destroyAllWindows()\n","sub_path":"python/sandbox/main/cifar.py","file_name":"cifar.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"624276781","text":"\"\"\"\nUnit test the config module.\n\"\"\"\n# pylint: disable=missing-class-docstring,missing-function-docstring,no-self-use,\nfrom typing import List\n\nimport pytest\nfrom pydantic import AnyHttpUrl, ValidationError\n\nfrom app.core.config import Settings, get_settings\n\n\n@pytest.mark.unit\nclass TestSettings:\n @pytest.mark.parametrize(\n \"config_key, config_value, value_type\",\n [\n (\n \"API_V1_STR\",\n \"/api/vDummy\",\n str,\n ),\n (\n \"PROJECT_NAME\",\n \"pytest_dummy_project\",\n str,\n ),\n (\n \"PYDEVD\",\n \"True\",\n bool,\n ),\n (\n \"PYDEVD_PORT\",\n \"12345\",\n int,\n ),\n (\n \"PYDEVD_HOST\",\n \"magicHost\",\n str,\n ),\n ],\n )\n def test_env_var_injection(self, monkeypatch, config_key, config_value, value_type):\n monkeypatch.setenv(config_key, config_value)\n\n settings = Settings()\n\n assert getattr(settings, config_key) == value_type(config_value)\n\n def test_backend_cors__values(self, monkeypatch):\n url_1 = \"http://localhost\"\n url_2 = \"http://localhost:4200\"\n url_3 = \"http://localhost:3000\"\n monkeypatch.setenv(\"BACKEND_CORS_ORIGINS\", f'[\"{url_1}\",\"{url_2}\",\"{url_3}\"]')\n\n settings = Settings()\n cors: List[AnyHttpUrl] = settings.BACKEND_CORS_ORIGINS\n\n assert cors[0] == AnyHttpUrl(url_1)\n assert cors[1] == AnyHttpUrl(url_2)\n assert cors[2] == AnyHttpUrl(url_3)\n\n def test_backend_cors__single_value(self, monkeypatch):\n url_1 = \"http://localhost\"\n monkeypatch.setenv(\"BACKEND_CORS_ORIGINS\", f'\"{url_1}\"')\n\n settings = Settings()\n cors: List[AnyHttpUrl] = settings.BACKEND_CORS_ORIGINS\n\n assert cors[0] == AnyHttpUrl(url_1)\n\n def test_project_name__single_value(self, monkeypatch):\n expected_project_name = \"cool project name\"\n monkeypatch.setenv(\"PROJECT_NAME\", expected_project_name)\n\n settings = Settings()\n\n assert settings.PROJECT_NAME == expected_project_name\n\n @pytest.mark.parametrize(\"config_key\", [\"BACKEND_CORS_ORIGINS\"])\n @pytest.mark.parametrize(\n \"invalid_url\", [\"brokenUrl\", \"brokenUrl:12345\", \"[]\", \"12345\"]\n )\n def test_backend_cors__invalid_value(self, monkeypatch, config_key, invalid_url):\n monkeypatch.setenv(config_key, f'\"{invalid_url}\"')\n\n with pytest.raises(ValidationError):\n _ = Settings()\n\n @pytest.mark.parametrize(\n \"config_key,config_value,version\",\n [\n # GfK (or at least DIQC) convention: the app version is in\n # $VERSION. in every component / container, for every app.\n (\"VERSION\", \"x.z.y\", \"x.z.y\"),\n (\"VERSION\", \"9.1.0\", \"9.1.0\"),\n (\"VERSION\", \"9.1.1\", \"9.1.1\"),\n (\" \", \" \", \"0.1.0\"),\n ],\n ids=[\"Version x.z.y\", \"Version 9.1.0\", \"Version 9.1.1\", \"Default field value\"],\n )\n def test_settings__version(self, monkeypatch, config_key, config_value, version):\n monkeypatch.setenv(config_key, config_value)\n settings = Settings()\n assert settings.VERSION == version\n\n def test_get_settings__cache(self):\n settings_1 = get_settings()\n settings_2 = get_settings()\n\n assert settings_1 is not None\n assert settings_2 is not None\n assert settings_1 is settings_2\n","sub_path":"tests/unit/core/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"450980969","text":"from django.utils.translation import ugettext_lazy as _\n\nfrom base import conf\n\nCFGSERVER_PREFIX = \"CFGSERVER\"\n\nCFGSERVER_VERBOSE_NAME = _(\"CfgServer\")\nCFGSERVER_VERBOSE_NAME_PLURAL = _(\"CfgServer\")\n\nCFGSERVER_LIST_URL_NAME = CFGSERVER_PREFIX + conf.LIST_SUFFIX\nCFGSERVER_CREATE_URL_NAME = CFGSERVER_PREFIX + conf.CREATE_SUFFIX\nCFGSERVER_DETAIL_URL_NAME = CFGSERVER_PREFIX + conf.DETAIL_SUFFIX\nCFGSERVER_UPDATE_URL_NAME = CFGSERVER_PREFIX + conf.UPDATE_SUFFIX\nCFGSERVER_DELETE_URL_NAME = CFGSERVER_PREFIX + conf.DELETE_SUFFIX\n\nCFGCOLORS_PREFIX = \"CFGCOLORS\"\n\nCFGCOLORS_VERBOSE_NAME = _(\"CfgColors\")\nCFGCOLORS_VERBOSE_NAME_PLURAL = _(\"CfgColors\")\n\nCFGCOLORS_LIST_URL_NAME = CFGCOLORS_PREFIX + conf.LIST_SUFFIX\nCFGCOLORS_CREATE_URL_NAME = CFGCOLORS_PREFIX + conf.CREATE_SUFFIX\nCFGCOLORS_DETAIL_URL_NAME = CFGCOLORS_PREFIX + conf.DETAIL_SUFFIX\nCFGCOLORS_UPDATE_URL_NAME = CFGCOLORS_PREFIX + conf.UPDATE_SUFFIX\nCFGCOLORS_DELETE_URL_NAME = CFGCOLORS_PREFIX + conf.DELETE_SUFFIX\n\nCFGCONFIG_PREFIX = \"CFGCONFIG\"\n\nCFGCONFIG_VERBOSE_NAME = _(\"CfgConfig\")\nCFGCONFIG_VERBOSE_NAME_PLURAL = _(\"CfgConfig\")\n\nCFGCONFIG_LIST_URL_NAME = CFGCONFIG_PREFIX + conf.LIST_SUFFIX\nCFGCONFIG_CREATE_URL_NAME = CFGCONFIG_PREFIX + conf.CREATE_SUFFIX\nCFGCONFIG_DETAIL_URL_NAME = CFGCONFIG_PREFIX + conf.DETAIL_SUFFIX\nCFGCONFIG_UPDATE_URL_NAME = CFGCONFIG_PREFIX + conf.UPDATE_SUFFIX\nCFGCONFIG_DELETE_URL_NAME = CFGCONFIG_PREFIX + conf.DELETE_SUFFIX\n","sub_path":"application/pytsm/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"116441113","text":"from basic import db, Puppy\n\n# create\nmy_puppy = Puppy('Rufus',5)\ndb.session.add(my_puppy)\ndb.session.commit()\nfrank = Puppy('Frankie', 4)\ndb.session.add(frank)\n# read\nall_puppies = Puppy.query.all() # list of puppie obj in table\nprint(\"PRINTING ALL PUPPIES\")\nprint(all_puppies)\n\n# select by id\npuppy_one = Puppy.query.get(1)\n\nprint(\"PRINTING ONE PUPPY\")\nprint(puppy_one.name)\n\n# filters\n# produce some sql code\npuppy_frankie = Puppy.query.filter_by(name=\"Frankie\")\nprint(\"PRINTING ALL FRANKIES\")\nprint(puppy_frankie.all())\n\n# Update\nfirst_puppy = Puppy.query.get(1)\nfirst_puppy.age = 10\ndb.session.add(first_puppy)\ndb.session.commit()\n\n# delete\nsecond_pup = Puppy.query.get(3)\nprint(\"PRINTING SECOND PUP\\n\", second_pup)\ndb.session.delete(second_pup)\ndb.session.commit()\n\n#\nall_puppies = Puppy.query.all()\nprint(\"PRINTING ALL PUPPIES\")\nprint(all_puppies)\n\n","sub_path":"flask_database/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"38190230","text":"from selenium import webdriver\nimport unittest\n\nclass GetInfoByChrome(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Chrome()\n #隐式等待\n self.driver.implicitly_wait(10)\n\n def test_getBasicInfo(self):\n url = \"http://www.baidu.com\"\n self.driver.get(url)\n self.driver.maximize_window()\n #查找“地图”链接文本\n getElementInfo = self.driver.find_element_by_link_text(\"地图\")\n #打印元素的大小标签名\n print(\"元素的size:\", getElementInfo.size)\n print(\"元素的标签名:\",getElementInfo.tag_name)\n\n def tearDown(self):\n self.driver.quit()\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"WebDriverAPI/07GetElementBasicInfo.py","file_name":"07GetElementBasicInfo.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"74800339","text":"from vk_api.utils import get_random_id\nfrom rank_bm25 import BM25Okapi\nimport numpy as np\nfrom app import static\nimport requests\nimport json\nfrom numpy.random import randint\nimport re\nfrom collections import Counter, defaultdict\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\n\n\ntokenized_corpus = [doc.split() for doc in static.interactive_corpus]\nregex = re.compile(\"\\d+\")\n\ncache = {}\n\nbm25 = BM25Okapi(tokenized_corpus)\n\ndef sayhello(message, vk, upload):\n peer_id = message[\"peer_id\"]\n vk.messages.send(\n message=u'Привет, я Вика - новый бот, названный в честь виртуального помощника из романа Сергея Лукьяненко \"Лабиринт отражений\"',\n random_id=get_random_id(),\n peer_id=peer_id\n )\n\ndef interactive(message, vk, upload):\n text: str = message[\"text\"]\n peer_id = message[\"peer_id\"]\n phrase: str = text[1:].strip().split()\n number = np.argmax(bm25.get_scores(phrase))\n\n if number in cache:\n attachment = f\"photo{cache[number]['owner_id']}_{cache[number]['id']}\"\n else:\n photos = upload.photo_messages(photos=f\"images/{number}.jpg\")\n photo = photos[0]\n attachment = f\"photo{photo['owner_id']}_{photo['id']}\"\n cache[number] = {'owner_id': photo['owner_id'], 'id': photo['id']}\n\n vk.messages.send(peer_id=peer_id, attachment=attachment, random_id=get_random_id())\n\n\ndef make_nav(message, vk, upload):\n peer_id = message[\"peer_id\"]\n info = vk.messages.getConversationMembers(peer_id=peer_id)['profiles']\n rd = randint(0, len(info))\n name = f\"{info[rd]['first_name']} {info[rd]['last_name']}\"\n\n json_data = {\"name\": name}\n answer = requests.post(\"https://navalny.lol/api/generator\", json=json_data)\n file = json.loads(answer.text)['file']\n\n receive = requests.get(f'https://navalny.lol/output/{file}')\n with open(f'nav/{file}', 'wb') as f:\n f.write(receive.content)\n\n photos = upload.photo_messages(photos=f'nav/{file}')\n photo = photos[0]\n attachment = f\"photo{photo['owner_id']}_{photo['id']}\"\n vk.messages.send(peer_id=peer_id, attachment=attachment, random_id=get_random_id())\n\n\ndef hw1(message, vk, user_vk, upload):\n peer_id = message[\"peer_id\"]\n text: str = message[\"text\"].strip().split(' ')\n id_or_nickname = text[1]\n flag_nickname = True\n\n if regex.match(id_or_nickname):\n flag_nickname = False\n id_or_nickname = int(id_or_nickname)\n\n try:\n if flag_nickname:\n info = user_vk.wall.get(domain=id_or_nickname, count=100)\n else:\n info = user_vk.wall.get(owner_id=id_or_nickname, count=100)\n\n posts = info[\"items\"]\n count = len(posts)\n\n counter = Counter()\n for post in posts:\n counter[datetime.fromtimestamp(post[\"date\"]).hour] += 1\n\n parts = defaultdict(float)\n\n for i in range(0, 24, 3):\n for j in range(i, i+3):\n parts[f\"{i}-{i+2}\"] += counter[j] / count * 100\n\n keys = list(parts.keys())\n values = list(parts.values())\n\n x = np.arange(len(keys))\n width = 0.75\n\n fig, ax = plt.subplots()\n rects = ax.bar(x, values, width)\n\n ax.set_ylabel('% постов')\n ax.set_title('Посты по временным отрезкам')\n ax.set_xticks(x)\n ax.set_xticklabels(keys)\n\n fig.savefig(f\"hw1/{id_or_nickname}.jpg\")\n\n photos = upload.photo_messages(photos=f\"hw1/{id_or_nickname}.jpg\")\n photo = photos[0]\n attachment = f\"photo{photo['owner_id']}_{photo['id']}\"\n vk.messages.send(peer_id=peer_id, attachment=attachment, random_id=get_random_id())\n except:\n vk.messages.send(peer_id=peer_id, message=\"Какая-то ошибка\", random_id=get_random_id())\n\n\n\n ","sub_path":"app/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":3869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"606765476","text":"\n\n#calss header\nclass _POPLIN():\n\tdef __init__(self,): \n\t\tself.name = \"POPLIN\"\n\t\tself.definitions = [u'a type of slightly shiny cotton cloth: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_poplin.py","file_name":"_poplin.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"584337278","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Author: kerlomz \n\nimport base64\nimport binascii\nimport imghdr\nimport io\n\nimport tensorflow as tf\nfrom PIL import Image as PIL_Image\nfrom tensorflow.python.framework.errors_impl import NotFoundError\n\nfrom config import *\nfrom handler import preprocessing\nfrom predict import predict_func\n\nos.environ['CUDA_VISIBLE_DEVICES'] = DEVICE\nsess = tf.Session()\ntry:\n with tf.gfile.GFile(COMPILE_MODEL_PATH, \"rb\") as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name=\"\")\nexcept NotFoundError:\n exception('The system cannot find the model specified.')\n\ninit = tf.global_variables_initializer()\nsess.run(init)\n\npredict = sess.graph.get_tensor_by_name(\"output/predict:0\")\nx = sess.graph.get_tensor_by_name('input:0')\nkeep_prob = sess.graph.get_tensor_by_name('keep_prob:0')\n\nprint('Session Init')\n\n\ndef predict_b64(base64_img):\n # result, code, success = None, 200, True\n try:\n image_bytes = base64.b64decode(base64_img.encode('utf-8'))\n except binascii.Error:\n return None, 50002, False\n img_type = imghdr.what(None, h=image_bytes)\n if not img_type:\n return None, 50001, False\n try:\n result = predict_byte(image_bytes)\n return result, 200, True\n except OSError:\n return None, 50003, False\n except ValueError as e:\n print(e)\n return None, 50004, False\n\n\ndef predict_byte(image_bytes):\n data_stream = io.BytesIO(image_bytes)\n pil_image = PIL_Image.open(data_stream)\n origin_size = pil_image.size\n define_size = (origin_size[0] * MAGNIFICATION, origin_size[1] * MAGNIFICATION)\n if define_size != pil_image.size:\n pil_image = pil_image.resize(define_size)\n captcha_image = preprocessing(\n pil_image,\n binaryzation=BINARYZATION,\n smooth=SMOOTH,\n blur=BLUR,\n original_color=IMAGE_ORIGINAL_COLOR,\n invert=INVERT\n )\n image = captcha_image.flatten() / 255\n predict_text = predict_func(image, sess, predict, x, keep_prob)\n return predict_text\n","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"126920133","text":"#!/usr/bin/python\nimport click\nimport json\nimport pycurl\nimport urllib\nimport wget\nimport colorama\n\nfrom StringIO import StringIO\n\nAPI_BASE = 'http://databrainz.com/api/'\nSEARCH = 'search_api.cgi'\nDATA = 'data_api_new.cgi'\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n@click.group()\ndef cli():\n\tpass\n\n\n@cli.command()\n@click.option('--count', default=10, show_default=True, help='number of result to show')\n@click.argument('name', required=True)\ndef search(name,count):\n\t\"\"\" Search MP3 \"\"\"\n\tparams = {'qry': name, 'format': 'json', 'mh': count}\n\tbuffer = StringIO()\n\tc = pycurl.Curl()\n\tc.setopt(c.URL, API_BASE + SEARCH + '?' + urllib.urlencode(params))\n\tc.setopt(c.WRITEDATA, buffer)\n\tc.perform()\n\tc.close()\n\tbody = buffer.getvalue()\n\t#print(bcolors.OKBLUE + body)\n\tres = json.loads(body)\n\tprint(str(len(res['results'])) + ' results found')\n\tfor entry in res['results']:\n\t\tprint(bcolors.OKGREEN + entry['artist'] + ' - ' + bcolors.OKBLUE + entry['title'] + ' > uri: '+ bcolors.WARNING + entry['url'])\n\tif click.confirm('Anything interesting to download?'):\n\t\ttag = click.prompt('Sweet! Which one you\\'d like? ', type=str)\n\t\tdownload(tag)\n\telse:\n\t\texit()\n\t#print(json.dumps(res['results'],sort_keys=True,indent=2,separators=(',', ': ')))\n\t#print(res['results'][0]['description'])\n\t#print(res['results'])\n\t#curl 'http://databrainz.com/api/search_api.cgi?qry=daft&format=json&mh=50'\n\n\t#curl -I 'http://databrainz.com/api/data_api_new.cgi?id=f969f1c55baacf172ad4b0aa295c1402&r=api&format=json'\n\n#@cli.command()\n#@click.argument('tag', required=True)\ndef download(tag):\n\t\"\"\" this will just show the ID of the desired mp3 \"\"\"\n\tprint('you entered '+ tag)\n\tparams = {'id': tag, 'format': 'json', 'r': 'api'}\n\tbuffer = StringIO()\n\tc = pycurl.Curl()\n\tc.setopt(c.URL, API_BASE + DATA + '?' + urllib.urlencode(params))\n\tc.setopt(c.WRITEDATA, buffer)\n\tc.perform()\n\tc.close()\n\tbody = buffer.getvalue()\n\t#print(body)\n\tres = json.loads(body)\n\t#print(json.dumps(res['song'],sort_keys=True,indent=2,separators=(',', ': ')))\n\tgetIt(res['song']['url'])\n\n#@cli.command()\n#@click.confirmation_option(help='Are you sure you want to download this music?')\ndef getIt(tag):\n\tclick.secho('Downloading the music!')\n\tfn = wget.download(tag)\n\tclick.secho('Congrats! You just downloaded '+fn , fg='green')\t\n\n#if __name__ == '__main__':\n#\tsearch()\n#\tgetFile()\n","sub_path":"mp3.py","file_name":"mp3.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"433141937","text":"#-*-coding:utf-8-*-\nimport os\nimport sys\nimport logging\nimport MySQLdb.cursors as cursors\nfrom sdmslog import SdmsLogger\nimport sdmsdb \n\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\nlogger = SdmsLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\nclass LineType(object):\n\n def __init__(self):\n pass\n\n def __str__(self):\n return repr(self.__dict__)\n\n def get_attr_value(self):\n return self.__dict__\n\nclass LineTypeDao(object):\n\n def __init__(self, sql=\"MYSQL\"):\n self._sql = sql\n\n def fetch_linetype(self):\n pass\n\n def save(self, linetype):\n sql_query = \"INSERT INTO machine_types (%s) VALUES (%s)\" % (self.__extract_args(machine))\n with sdmsdb.connection() as con: \n cursor = con.cursor()\n cursor.execute(sql_query)\n con.commit()\n \n logger.info(sql_query)\n \n def __extract_args(self, linetype):\n columns, values = [], []\n for attr, value in linetype.get_attr_value().items():\n columns.append(attr)\n values.append(value)\n\n return self.__format_column(columns), self.__format_values(values)\n \n \n def __format_values(self, values):\n fmt = lambda x:\"'%s'\"%(x)\n fmt_values = [fmt(value) for value in values]\n return \", \".join(fmt_values)\n\n def __format_column(self, columns):\n return \", \".join(columns) \n\n def fetch_all_linetype(self, index=0, limit=5, **argk):\n sql_query = \"SELECT * FROM line_types limit %d, %d\"% (index, limit)\n results = []\n with sdmsdb.connection() as con: \n cursor = con.cursor(cursors.DictCursor)\n cursor.execute(sql_query)\n results = cursor.fetchall()\n return self._wrap_objects(results)\n \n def _wrap_objects(self, results):\n for result in results:\n yield self._wrap_object(result) \n\n def _wrap_object(self, result): \n line_type = LineType()\n for key, value in result.items():\n setattr(line_type, key, value)\n\n return line_type \n\nif __name__ == \"__main__\":\n line_type_dao = LineTypeDao() \n results = line_type_dao.fetch_all_linetype()\n for result in results:\n print(str(result.LineType))\n","sub_path":"app/models/sdms_line_types.py","file_name":"sdms_line_types.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"198794493","text":"import numpy as np\nimport os\nimport shutil\n\n\nclass Producer(object):\n def __init__(self):\n self.inputRootPath = 'input'\n self.outputRootPath = 'output'\n self.trainSize = 0.8\n self.valSize = 0.1\n self.operator = list()\n self.init_operator_list()\n\n def init_operator_list(self):\n self.operator.append(self.init_data_param('train'))\n self.operator.append(self.init_data_param('val'))\n self.operator.append(self.init_data_param('test'))\n print(self.operator)\n\n def init_data_param(self, data_set_name):\n current_set = dict()\n current_set['index'] = 0\n current_set['directory'] = os.path.join(self.outputRootPath, data_set_name)\n current_set['file'] = open(os.path.join('output', data_set_name + '.txt'), 'w')\n self.init_data_directory(current_set['directory'])\n return current_set\n\n @staticmethod\n def init_data_directory(directory_name):\n if not os.path.exists(directory_name):\n os.mkdir(directory_name)\n\n def close_root_file(self):\n for data_set in self.operator:\n data_set['file'].close()\n\n def init_end_index(self, max_length):\n self.operator[0]['index'] = int(max_length * self.trainSize)\n self.operator[1]['index'] = int(max_length * self.valSize) + self.operator[0]['index']\n self.operator[2]['index'] = max_length\n\n def create_class_directory(self, class_name):\n for data_set in self.operator:\n class_directory = os.path.join(data_set['directory'], class_name)\n self.init_data_directory(class_directory)\n\n def produce(self):\n class_list = np.array(os.listdir(self.inputRootPath))\n class_list.sort()\n class_index = 0\n\n for each_class in class_list:\n self.create_class_directory(each_class)\n random_list = np.random.permutation(os.listdir(\"{}/{}\".format(self.inputRootPath, each_class)))\n self.init_end_index(len(random_list))\n start_index = 0\n\n for data_set in self.operator:\n for index in range(start_index, data_set['index']):\n file_path = \"{}/{}\".format(each_class, random_list[index])\n data_set['file'].write(\"{} {}\\n\".format(file_path, class_index))\n shutil.copyfile(os.path.join(self.inputRootPath, file_path),\n os.path.join(data_set['directory'], file_path))\n start_index = data_set['index']\n class_index = class_index + 1\n\n\nif __name__ == '__main__':\n Producer = Producer()\n Producer.produce()\n Producer.close_root_file()\n","sub_path":"SplitDataset.py","file_name":"SplitDataset.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"59159312","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\n\n\nfrom flask import request, abort, current_app\n\nfrom oar.lib import config\nfrom oar.lib.utils import reraise, to_unicode, integer_types\n\n\nclass WSGIProxyFix(object):\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n user = os.environ.get('AUTHENTICATE_UID', None)\n if user is not None:\n environ['USER'] = user\n else:\n if config.get('API_TRUST_IDENT', 0) == 1:\n user = environ.pop('HTTP_X_REMOTE_IDENT', None)\n if user not in (\"\", \"unknown\", \"(null)\"):\n environ['USER'] = user\n return self.app(environ, start_response)\n\nclass PrefixMiddleware(object):\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n prefix = environ.pop('HTTP_X_API_PATH_PREFIX', None)\n if prefix is not None:\n environ['SCRIPT_NAME'] = prefix\n return self.app(environ, start_response)\n\nclass Arg(object):\n \"\"\"Request argument type.\"\"\"\n\n DEFAULT_LOCATIONS = ('querystring', 'form', 'json')\n\n def __init__(self, type_=None, default=None, required=False,\n error=None, locations=None, dest=None):\n if isinstance(type_, (tuple, list)):\n if len(type_) >= 2:\n self.type = ListArg(type_[0], type_[1])\n elif len(type_):\n self.type = ListArg(type_[0])\n else:\n self.type = ListArg()\n elif type_ == list:\n self.type = ListArg()\n elif type_ is None:\n self.type = lambda x: x # default to no type conversion\n else:\n self.type = type_\n if isinstance(self.type, ListArg) and default is None:\n self.default = []\n else:\n self.default = default\n self.required = required\n self.dest = dest\n self.error = error\n self.locations = locations or self.DEFAULT_LOCATIONS\n\n def raw_value(self, value):\n if value is not None:\n if isinstance(self.type, ListArg):\n if len(value) > 0:\n return self.type.raw_value(value)\n else:\n return to_unicode(value)\n\n\nclass ListArg(object):\n def __init__(self, type_=str, sep=None):\n self.type = type_\n self.sep = sep\n\n def __call__(self, value, callback):\n if not self.sep: # TOFINISH\n return value\n def convert():\n string = to_unicode(value)\n if string:\n for item in string.split(self.sep):\n yield callback(item, self.type)\n return list(convert())\n\n def raw_value(self, values):\n sep = self.sep if self.sep else ', ' \n return to_unicode(sep.join((\"%s\" % v for v in values)))\n\n\nclass ArgParser(object):\n \"\"\"Flask request argument parser.\"\"\"\n\n MISSING = object()\n\n def __init__(self, argmap):\n self.argmap = argmap\n\n def get_value(self, data, name, argobj):\n if isinstance(argobj.type, ListArg) and not argobj.type.sep:\n return data.getlist(name)\n else:\n return data.get(name, self.MISSING) \n\n def parse_arg(self, argname, argobj):\n \"\"\"Pull a form value from the request.\"\"\"\n for location in argobj.locations:\n value = self.MISSING\n if location == \"querystring\" and request.args:\n value = self.get_value(request.args, argname, argobj)\n elif location == \"json\":\n json_data = request.get_json(silent=True, force=True)\n if json_data:\n value = self.get_value(json_data, argname, argobj)\n else:\n value = self.MISSING\n elif location == \"form\":\n value = self.get_value(request.form, argname, argobj)\n if value is not self.MISSING:\n return value\n return self.MISSING\n\n def convert_bool(self, value):\n \"\"\" Try to convert ``value`` to a Boolean.\"\"\"\n if value.lower() in ('True', 'yes', '1'):\n return True\n if value.lower() in ('false', 'no', '0'):\n return False\n raise ValueError(\"Cannot convert '%s' to a Boolean value\" % value)\n\n def convert_int(self, value):\n \"\"\" Try to convert ``value`` to an Integer.\"\"\"\n try:\n value = float(value)\n except:\n pass\n for _type in integer_types:\n try:\n return _type(value)\n except:\n pass\n raise ValueError(\"Cannot convert '%s' to a Integer value\" % value)\n\n def convert(self, value, argtype):\n if argtype == str:\n return to_unicode(value)\n elif argtype == bool:\n return self.convert_bool(value)\n elif argtype in integer_types:\n return self.convert_int(value)\n if isinstance(argtype, ListArg):\n return argtype(value, self.convert)\n else:\n return argtype(value)\n\n def parse(self):\n \"\"\"Parses the request arguments.\"\"\"\n parsed_kwargs = {}\n raw_kwargs = {}\n for argname, argobj in self.argmap.items():\n dest = argobj.dest if argobj.dest is not None else argname\n parsed_value = self.parse_arg(argname, argobj)\n if parsed_value is not self.MISSING:\n try:\n parsed_kwargs[dest] = self.convert(parsed_value,\n argobj.type)\n except Exception as e:\n msg = (\"The parameter '%s' specified in the request \"\n \"URI is not supported. %s\" % (argname, e))\n try:\n abort(400)\n except Exception:\n exc_type, exc_value, tb = sys.exc_info()\n exc_value.data = msg\n reraise(exc_type, exc_value, tb.tb_next)\n else:\n parsed_kwargs[dest] = argobj.default\n raw_value = argobj.raw_value(parsed_kwargs[dest])\n if raw_value is not None:\n raw_kwargs[argname] = raw_value\n return parsed_kwargs, raw_kwargs\n\ndef list_paginate(items, offset, limit, error_out=True):\n if error_out and (offset < 0 or offset > len(items)):\n abort(404)\n\n if limit is None:\n limit = current_app.config.get(\"API_DEFAULT_MAX_ITEMS_NUMBER\")\n\n items_paginated = items[offset:min(len(items), offset + limit)]\n\n return items_paginated\n","sub_path":"oar/rest_api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"497926111","text":"'''\r\n% [q_unit] = quat_unity (q)\r\n% To normalize the quaternion from a non-unity quaternion.\r\n% Normalization is based on euclidian norm:\r\n% q_unit = q/sqrt(q'*q);\r\n%\r\n% inputs:\r\n% q\r\n% Input quaternion (4)\r\n%\r\n% outputs:\r\n% q_unit\r\n% unity quaternion, such that:\r\n% q'*q = 1\r\n% \r\n% Valdemir Carrara, May, 2015.\r\n'''\r\n\r\nimport numpy as np\r\nimport math\r\n\r\ndef quat_unity(q):\r\n\r\n\tqnorm = math.sqrt(np.dot(q, q.T))\r\n\tq_unit = np.zeros(4)\r\n\r\n\tif qnorm != 0:\r\n\t\tq_unit = q/qnorm\r\n\telse:\r\n\t\tq_unit[3] = 1\r\n\r\n\treturn q_unit\r\n\r\nif __name__ == \"__main__\":\r\n\tq = np.array([0,0,0,0])\r\n\tq = np.array([1,1,-1,-1])\r\n\tprint(quat_unity(q))","sub_path":"Propat/quat_unity.py","file_name":"quat_unity.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"154446219","text":"#input1=[1,2,3,4,5,6,7]\n#input2=[ [1,4], [1,5], [4,2], [4,3], [2,3] ]\ninput1=[\"a\",\"b\",\"c\",\"d\"]\ninput2=[ [\"a\",4], [\"a\",5], [\"b\",4], [\"b\",3], [\"c\",7] ]\n\n# Output below:\n#a: 4,5\n#b: 4,3\n#c: 7\n#d: None\n\n# ============= START CODE ===================\nmyDict={}\nrelationships = [];\n# Solution 1:\nfor k in input2:\n newRelationship = {}\n newRelationship[\"friend_id\"] = k[0]\n newRelationship[\"user_id\"]= k[1]\n relationships.append(newRelationship);\n# Solution 2: \n if k[0] not in myDict:\n myDict[k[0]] = [];\n if k[1] not in myDict:\n myDict[k[1]] = []; \n myDict[k[0]].append(k[1])\n myDict[k[1]].append(k[0])\n\nprint(myDict)\nprint(relationships)\n\nfor i,k in myDict.items():\n print(\"%s has the corresponding %s\" % (i,k))\n\n# =========== END CODE ================\n\n# for i in input1:\n# print(i)\n# for k in input2:\n# print(k[1])\n\n# dict1={}\n# dict1[1]=10\n# dict1[2]=20\n# dict1[3]=30\n# print(dict1)\n\n","sub_path":"Data/minted2-copy.py","file_name":"minted2-copy.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"114142049","text":"from django.conf.urls.defaults import *\nfrom django.conf import settings\nimport os\n\nurlpatterns = patterns(\n 'transfr.app.views',\n url(r'^accounts/login/$', 'mylogin', name='login'),\n url(r'^accounts/logout/$', 'mylogout', name='logout'),\n url(r'^disconnected/$', 'disconnected', name='disconnected'),\n url(r'^$', 'file_list', name='file_list'),\n url(r'^add/(\\d+)/$', 'add_file', name='add_file'),\n url(r'^additional-form/(\\d+)/$', 'additional_upload_form', name='additional_upload_form'),\n url(r'^delete/$', 'delete_file', name='delete_file'),\n url(r'^instructions/(\\d+)/$', 'send_instructions', name='send_instructions'),\n url(r'^help/$', 'help', name='help'),\n url(r'^users/$', 'manage_users', name='manage_users'),\n url(r'^users/password/(\\d+)/$', 'set_password', name='set_password'),\n url(r'^users/delete/(\\d+)/$', 'delete_user', name='delete_user'),\n url(r'^upload/progress/$', 'upload_progress', name='upload_progress'),\n url(r'^thumbnail/(?P\\d+)/$', 'view_thumbnail', name='view_thumbnail'),\n #url(r'^download/(?P\\d+)/$', 'download_file', name='download_file'),\n)\n\nurlpatterns += patterns(\n '',\n url(r'^media-app/(.*)$', 'django.views.static.serve', \n {'document_root': os.path.join(settings.PROJECT_PATH, 'app', 'media')},\n name='media_app'),\n\n)\n\nif settings.DEBUG:\n urlpatterns += patterns(\n '',\n url(r'^media/(.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}, name=\"media\"),\n url(r'^media-app/(.*)$', 'django.views.static.serve', \n {'document_root': os.path.join(settings.PROJECT_PATH, 'app', 'media')},\n name='media_app'),\n\n )\n\n\nif settings.USE_I18N:\n js_info_dict = {\n 'packages': ('transfr.app',),\n }\n urlpatterns += patterns(\n '',\n (r'^i18n/', include('django.conf.urls.i18n')),\n url(r'^jsi18n/$', 'django.views.i18n.javascript_catalog', js_info_dict, name=\"jsi18n\"),\n #(r'^jsi18n/(?P\\S+?)/$', 'django.views.i18n.javascript_catalog'),\n )\n","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"181708219","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nimport re\ndef get_strtime(text):\n text = text.replace(\"年\", \"-\").replace(\"月\", \"-\").replace(\"日\", \" \").replace(\"/\", \"-\").strip()\n text = re.sub(\"\\s+\", \" \", text)\n t = \"\"\n regex_list = [\n # 2013年8月15日 22:46:21\n \"(\\d{4}.\\d{1,2}—\\d{4}.\\d{1,2})\"\n # \"2013年8月15日 22:46\"\n\n ]\n for regex in regex_list:\n t = re.search(regex, text)\n if t:\n# t = t.group(1)\n return t\n else:\n return ''\n\ndef list_2_dic(list1):\n #print(list1)\n dict1={}\n #循环统计数字出现的个数并将其添加到字典集合中\n for i in list1:\n skey=dict1.get(i)#获取字典中的键的值\n if skey==None:#判断键的值是否为空\n dict1[i]=1\n else:\n dict1[i]+=1\n #print(dict1)\n #获取字典中的键和值\n sk=dict1.keys()\n sv=dict1.values()\n #循环判断列表中的众数\n \n \n #将字典中的值转换为列表类型并进行排序反转\n sl=list(sv)\n sl.sort()\n sl.reverse()\n dict2={}\n for i in sl:\n for j in sk:\n # 循环判断字典中的值是否与列表中的值相等若相等则值存入一个新的字典中\n if dict1[j]==i:\n dict2[j] = i\n return dict2\n\n\ndef var_detect(var,target):\n a=var.values();\n b=var.keys();\n if target in a:\n return 0;\n else:\n return 1;\nfrom jpype import *\n\n#startJVM(getDefaultJVMPath(), \"-Djava.class.path=D:/NLP/hanlp/hanlp-1.7.4.jar;D:/NLP/hanlp\",\n# \"-Xms1g\",\n# \"-Xmx1g\")\n\nHanLP = JClass('com.hankcs.hanlp.HanLP')\n\nCRFnewSegment = HanLP.newSegment(\"crf\")\n\n#with open(\"out.txt\", \"r\",encoding='utf-8') as f:\n# data = f.readlines()\n#for index,thing in enumerate(data):\n# if(len(str(thing))>20):\n# if(index<60):\n# term_list = CRFnewSegment.seg(thing)\n# print(term_list)\nvar1=[]\nvar2=[]\nvar3=[] \ntimes=6;\n#with open(\"out_ori.txt\", \"r\",encoding='utf-8') as f:\n# data = f.readlines() \n\nwith open(\"ins.txt\", \"r\",encoding='utf-8') as f:\n a = f.read() \n a=a.replace('\\n',' ')\n data=a.split(' ')\nfor time in range(1,times):\n dict_=[]\n for index,thing in enumerate(data):\n # term_list = CRFnewSegment.seg(thing)\n source=thing\n term_list = CRFnewSegment.seg(source)\n for thing_1 in term_list:\n thing_1=str(thing_1)\n thing_1=thing_1.split('/')[0]\n if(len(thing_1)>=time):\n dict_.append(thing_1[-time:]) \n with open(str(time)+\".txt\",\"w\") as f:\n for thing in list_2_dic(dict_).items():\n if(thing[1]>=10):\n f.write(str(thing)+'\\n') \n \n \n#Segment = JClass(\"com.hankcs.hanlp.seg.Segment\")\n#Term = JClass(\"com.hankcs.hanlp.seg.common.Term\")\n#\n#segment = HanLP.newSegment().enableOrganizationRecognize(True)\n#for sentence in sentences:\n#\n#term_list = segment.seg(sentence)\n#print(term_list)\n#\n#print(\"n========== 机构名 标准分词器已经全部关闭 ==========n\")\n#print(CRFnewSegment.seg(sentences[0]))\n#\n#segment = HanLP.newSegment('crf').enableOrganizationRecognize(True)","sub_path":"Career_network_4_7/Parsing/get_frequencyfile/make.py","file_name":"make.py","file_ext":"py","file_size_in_byte":3266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"355145150","text":"import boto3\nimport json\nimport os\nimport time\n\nec2Client = boto3.client('ec2')\necsClient = boto3.client('ecs')\nautoscalingClient = boto3.client('autoscaling')\nsnsClient = boto3.client('sns')\nlambdaClient = boto3.client('lambda')\n\n\ndef publishSNSMessage(snsMessage, snsTopicArn):\n response = snsClient.publish(\n TopicArn=snsTopicArn,\n Message=json.dumps(snsMessage),\n Subject='reinvoking')\n\n\ndef setContainerInstanceStatusToDraining(ecsClusterName, containerInstanceArn):\n response = ecsClient.update_container_instances_state(\n cluster=ecsClusterName,\n containerInstances=[containerInstanceArn],\n status='DRAINING')\n\n\ndef sendLifecycleActionHeartbeat(\n hookName,\n asgName,\n lifecycleActionToken,\n ec2InstanceId):\n response = autoscalingClient.record_lifecycle_action_heartbeat(\n LifecycleHookName=hookName,\n AutoScalingGroupName=asgName,\n LifecycleActionToken=lifecycleActionToken,\n InstanceId=ec2InstanceId,\n )\n\n\ndef tasksRunning(\n ecsClusterName,\n ec2InstanceId,\n hookName,\n asgName,\n lifecycleActionToken):\n ecsContainerInstances = ecsClient.describe_container_instances(\n cluster=ecsClusterName, containerInstances=ecsClient.list_container_instances(\n cluster=ecsClusterName)['containerInstanceArns'])['containerInstances']\n for i in ecsContainerInstances:\n if i['ec2InstanceId'] == ec2InstanceId:\n if i['status'] == 'ACTIVE':\n setContainerInstanceStatusToDraining(\n ecsClusterName, i['containerInstanceArn'])\n return 1\n if (i['runningTasksCount'] > 0) or (i['pendingTasksCount'] > 0):\n sendLifecycleActionHeartbeat(\n hookName, asgName, lifecycleActionToken, ec2InstanceId)\n return 1\n return 0\n return 2\n\n\ndef handle(event, context):\n ecsClusterName = os.environ['CLUSTER_NAME']\n snsTopicArn = event['Records'][0]['Sns']['TopicArn']\n snsMessage = json.loads(event['Records'][0]['Sns']['Message'])\n\n # ignore non-lifecycle messages\n if not 'LifecycleHookName' in snsMessage:\n print('non-lifecycle message')\n return\n\n lifecycleHookName = snsMessage['LifecycleHookName']\n lifecycleActionToken = snsMessage['LifecycleActionToken']\n asgName = snsMessage['AutoScalingGroupName']\n ec2InstanceId = snsMessage['EC2InstanceId']\n\n checkTasks = tasksRunning(\n ecsClusterName,\n ec2InstanceId,\n lifecycleHookName,\n asgName,\n lifecycleActionToken)\n\n print('tasks still running', checkTasks)\n\n if checkTasks == 0:\n try:\n print('complete_lifecycle_action')\n\n response = autoscalingClient.complete_lifecycle_action(\n LifecycleHookName=lifecycleHookName,\n AutoScalingGroupName=asgName,\n LifecycleActionToken=lifecycleActionToken,\n LifecycleActionResult='CONTINUE')\n\n except BaseException as e:\n print(str(e))\n\n elif checkTasks == 1:\n # Sleep for 1 minute and re-send lifecycle termination message\n print('re-sending termination message')\n time.sleep(60)\n publishSNSMessage(snsMessage, snsTopicArn)\n","sub_path":"ecs_drain/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"648959364","text":"# -*- coding: utf-8 -*-\n# Group 4 Robot - Lea BESNARD, Laetitia KRUMEICH, Sofian BENJEBRIA,\n# Noémie DELOEUVRE , Morgan SEGUELA\n\nimport datetime as date\nimport re\n\nimport g4_utils_v33 as utilsg4\n\n# fileTarget = \"C:/Users/lea/Desktop/PROJET/\"\n# \"https://www.ladepeche.fr/services/flux-rss/\"\n\n\ndef recovery_link_new_articles_ld(url_rss):\n # We retrieve the rss feeds for each article page.\n # Each HTML-coded article is scanned with beautiful soup.\n soup = utilsg4.recovery_flux_url_rss(url_rss)\n list_link = []\n for link in soup.find_all(\"a\"):\n if link.get(\"class\") == [\"rss\"]:\n url = link.get(\"href\")\n url = \"https://www.ladepeche.fr/\" + url\n soup = utilsg4.recovery_flux_url_rss(url)\n items = soup.find_all(\"item\")\n # We retrieve all articles\n for item in items:\n list_link.append(re.search(r\"(.*)\", str(item))[1])\n return(list_link)\n\n\ndef recovery_new_articles_ld(\n file_target=\"data/clean/robot/\" + str(date.datetime.now().date()) + \"/\"):\n\n links = recovery_link_new_articles_ld(\n \"https://www.ladepeche.fr/services/flux-rss/\")\n\n list_articles = []\n i = 0\n for article in links:\n new_article = recovery_information_ld(article)\n list_articles.append(new_article)\n i += 1\n if i == 50:\n utilsg4.create_json(file_target, list_articles, \"ladepeche/\", \"LD\")\n\n i = 0\n list_articles = []\n\n utilsg4.create_json(file_target, list_articles, \"ladepeche/\", \"LD\")\n\n\ndef recovery_information_ld(url):\n\n soup = utilsg4.recovery_flux_url_rss(url)\n # Retrieve the title\n for meta in soup.find_all('meta'):\n if meta.get(\"property\") == 'og:title':\n title = meta.get(\"content\")\n\n # Retrieve the publication date\n for time in soup.find_all('time'):\n if time.get(\"itemprop\") == 'datePublished':\n date = time.get(\"itemprop\")\n for valeur in re.finditer('[0-9]{2}\\/[0-9]{2}\\/[0-9]{4}',\n str(time)):\n date = valeur.group(0)\n\n # Retrieve the author\n author = []\n for div in soup.find_all('div'):\n if div.get(\"class\") == ['article_author']:\n author.append(div.span.get_text())\n\n # Retrieve the content\n content = \"\"\n for div in soup.find_all('div'):\n if div.get(\"itemprop\") == 'articleBody':\n for p in div.find_all('p'):\n content += p.get_text() + \" \"\n\n # Retrieve the theme\n theme = \"\"\n for h2 in soup.find_all('h2'):\n if h2.get(\"itemprop\") == 'about':\n theme = h2.get_text()\n\n article = utilsg4.recovery_article(title, 'La Depeche', author,\n date, content, theme)\n return(article)\n\n\nif __name__ == '__main__':\n recovery_new_articles_ld()\n","sub_path":"Groupe4_Robot-master/LaDepeche/g4_ladepeche_v1.py","file_name":"g4_ladepeche_v1.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"426268063","text":"import pandas as pd\nimport pymysql\nimport sys\nimport io\nimport datetime\nimport numpy as np\n\nsys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding='utf-8')\nsys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding='utf-8')\n\n# 파일 읽기\ndf = pd.read_excel('C:/workspace/down/python_create_app_1-master/python_create_app_1-master/section5/5-4.1000 Rows.xlsx', header=None,\n names=['name', 'researcher', 'unit', 'sales', 'price', 'difference', 'location', 'type', 'cylinders'])\n\nisnullDf = pd.isnull(df)\nfor i in range(1, len(df)+1):\n for col in df.columns:\n if isnullDf.loc[i][col]:\n df.loc[i][col] = None\nprint(df.loc[8]['cylinders'])\npymysql.converters.encoders[np.int64] = pymysql.converters.escape_int\npymysql.converters.encoders[np.float64] = pymysql.converters.escape_float\npymysql.converters.conversions = pymysql.converters.encoders.copy()\npymysql.converters.conversions.update(pymysql.converters.decoders)\n\n# MySQL 연결\nconn = pymysql.connect(host='127.0.0.1', user='python', password='sh13657', charset='utf8', db='python_app1')\n\n# 현재시간 가져오기\nnow = datetime.datetime.now()\nnowDateTime = now.strftime('%Y-%m-%d %H-%M-%S')\n# print(nowDateTime)\n# 커서 가져오기\n# with conn.cursor() as c:\n# # Table 만들기\n# c.execute(\"CREATE TABLE IF NOT EXISTS products(id bigint(30) NOT NULL, name varchar(100) NOT NULL,\\\n# researcher varchar(20), unit bigint(30),\\\n# sales float(20), price float(20), difference float(20),\\\n# location varchar(40), type varchar(30), cylinders float(40), regdate varchar(20))\")\n# # 데이터 삽입\n# for i in range(1, len(df)+1):\n# # 데이터 리스트\n# data = []\n#\n# data.append(i)\n# # data.append(el for el in df.loc[i])\n# for el in df.loc[i]:\n# data.append(el)\n# data.append(nowDateTime)\n# c.execute(\"INSERT INTO products(id, name, researcher, unit, sales, price, difference, location, type, cylinders, regdate) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\", tuple(data))\n","sub_path":"section5/practice3.py","file_name":"practice3.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"220942206","text":"# This function is to apply binary search in normal manner\r\ndef binarysearch(cards, query):\r\n lo, hi= 0, len(cards)-1\r\n while(lo<=hi):\r\n\r\n mid= (lo+hi) // 2\r\n if cards[mid] == query:\r\n return mid\r\n elif cards[mid] < query:\r\n lo= mid+1\r\n else:\r\n hi= mid-1\r\n return -1\r\n\r\n# This function to apply binary serach in recursive manner\r\ndef binaryserachrec(lo, hi, condition):\r\n\r\n while(lo <= hi):\r\n mid = (lo+hi) // 2\r\n result= condition(mid)\r\n if result == 'found':\r\n return mid\r\n elif result == 'right':\r\n lo= mid+1\r\n else:\r\n hi= mid-1\r\n return -1\r\n\r\ndef locate_element(cards, query):\r\n def condition(mid):\r\n if cards[mid] == query:\r\n if cards[mid-1] == query:\r\n return 'left'\r\n else:\r\n return 'found'\r\n elif cards[mid] < query:\r\n return 'right'\r\n else:\r\n return 'left'\r\n\r\n return binaryserachrec(0, len(cards)-1, condition)\r\n\r\n\r\n\r\ncards= list(map(int, input('Enter the cards elements: ').split()))\r\ncards.sort()\r\nquery= int(input('Enter the element you want to serach in the cards: '))\r\n\r\noutput= locate_element(cards, query) # Uncomment this command to perform binary search with recursion\r\n # (if you are uncommenting the comment below command)\r\n\r\n#output= binarysearch(cards, query) # Uncomment this command to perform binary search without recursion\r\n # (if you are uncommenting the comment above command)\r\n\r\nif output == -1:\r\n print('Element for which you are searching for is NOT FOUND')\r\nelse:\r\n print('Element is Found at position: ', output)","sub_path":"DSA Problems with solution/Binary Search.py","file_name":"Binary Search.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"121517361","text":"\"\"\" All tasks come from www.codewars.com \"\"\"\n\n\"\"\"\nTASK: Complementary DNA\n\nDeoxyribonucleic acid (DNA) is a chemical found in the nucleus of cells and carries the \"instructions\" \nfor the development and functioning of living organisms.\n\nIn DNA strings, symbols \"A\" and \"T\" are complements of each other, as \"C\" and \"G\". \nYou have function with one side of the DNA (string, except for Haskell); you need to get the other complementary side. \nDNA strand is never empty or there is no DNA at all (again, except for Haskell).\n\nExamples:\nDNA_strand (\"ATTGC\") # return \"TAACG\"\nDNA_strand (\"GTAT\") # return \"CATA\"\n\"\"\"\n\n\ndef DNA_strand(dna):\n dna_sequence = {\"A\":\"T\",\n \"T\":\"A\",\n \"C\":\"G\",\n \"G\":\"C\"\n }\n return \"\".join([dna_sequence[x] for x in dna])\n\n","sub_path":"7_kyu/7_kyu_Complementary DNA.py","file_name":"7_kyu_Complementary DNA.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"565792071","text":"from django.shortcuts import render\nfrom .models import Lore\nfrom django.shortcuts import redirect, render, get_object_or_404\nfrom .forms import LoreForm\nfrom django.db.models import Q\n\n\n\n# Create your views here.\n\n#Exibição de dados\ndef lore_list(request):\n\tlores = Lore.objects.filter()\n\treturn render(request, 'wikaldeiraapp/lore_list.html', {'lores': lores})\n\ndef lore_detail(request, pk):\n\tlore = get_object_or_404(Lore, pk=pk)\n\treturn render(request, 'wikaldeiraapp/lore_detail.html', {'lore': lore})\n\n#Edição de dados\ndef lore_edit(request, pk):\n\tlore = get_object_or_404(Lore, pk=pk)\n\tif request.method == \"POST\":\n\t\tform = LoreForm(request.POST, request.FILES, instance=lore)\n\t\tif form.is_valid():\n\t\t\tlore = form.save(commit=False)\n\t\t\tlore.save()\n\t\t\treturn redirect('lore_detail', pk=lore.pk)\n\telse:\n\t\tform = LoreForm(instance=lore)\n\treturn render(request, 'wikaldeiraapp/lore_edit.html', {'form': form})\n\ndef lore_new(request):\n\tif request.method == \"POST\":\n\t\tform = LoreForm(request.POST, request.FILES)\n\t\tif form.is_valid():\n\t\t\tlore = form.save(commit=False)\n\t\t\tlore.save()\n\t\t\treturn redirect('lore_detail', pk=lore.pk)\n\telse:\n\t\tform = LoreForm()\n\treturn render(request, 'wikaldeiraapp/lore_edit.html', {'form': form})\n\ndef lore_delete(request, pk, template_name='wikaldeiraapp/lore_confirm_delete.html'):\n\tlore = get_object_or_404(Lore, pk=pk)\n\tif request.method == 'POST':\n\t\tlore.delete()\n\t\treturn redirect('lore_list')\n\treturn render(request, template_name, {'object': lore})\n\ndef lore_search(request):\n if request.method == 'GET':\n query = request.GET.get('search_box', None)\n if query:\n lores = Lore.objects.filter( Q(title__icontains=query) | Q(text__icontains=query))\n else:\n lores = Lore.objects.filter()\n return render(request, 'wikaldeiraapp/lore_list.html', {'lores': lores})\n\n\n\n\n\n\n","sub_path":"wikaldeiraapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"421173883","text":"import socket\nimport random\nfrom kivy.uix.label import Label\nfrom kivy.core.window import Window\n\n\nclass Player():\n def __init__(self, name=\"local\",main=None,data=None):\n self.comm=main\n \n self.name=NickName(self,name)\n self.number=len(self.comm.players)+1\n self.delta=0\n self.hand=[]\n self.expression_clear='0'\n self.exp = Expression(self)\n self.score=Score(self)\n self.artifacts=[]\n \n \n \n\n \n \n\n def get_localname(self):\n self.comm.main.add_widget()\n def calculate_card_pos(self):\n print(self.name.text)\n print('suka ya '+str(self.number))\n if self.number==1:\n \n \n return ({\"x\":10+138*(len(self.hand)),\"y\":460})\n elif self.number==2:\n return ({\"x\":10+138*(len(self.hand)),\"y\":15})\n def calculate_art_pos(self):\n if self.number==1:\n \n \n return ({\"x\":40+138*(len(self.artifacts)),\"y\":360})\n elif self.number==2:\n return ({\"x\":40+138*(len(self.artifacts)),\"y\":115})\n def take_card(self):\n if len(self.comm.deck)!=0:\n \n card=self.comm.deck.pop()\n card.appear(self)\n \n \n if card.type!=\"event\" and card.type!=\"artifact\":\n if len(self.hand)<7:\n self.hand.append(card)\n else:\n card.disenchant()\n \n \n elif card.type==\"artifact\":\n self.artifacts.append(card)\n \n else:\n self.comm.end_game()\n\n\n def update(self):\n self.score.update()\n copy = self.hand.copy()\n self.hand=[]\n for art in self.artifacts:\n if art.way==\"update\":\n art.update_action()\n for card in copy:\n card.appear(self)\n card.appear(self)\n self.hand.append(card)\n\n \nclass Score(Label):\n def __init__(self,player, **kwargs):\n super().__init__(**kwargs)\n self.player =player\n self.score=0\n self.width=50\n self.height=35\n self.font_size=30\n self.player.comm.main.add_widget(self)\n self.update()\n self.status='ok'\n \n \n\n def update(self):\n x= self.player.comm.X\n y= self.player.comm.Y\n z= self.player.comm.Z\n print(self.player.delta)\n try:\n self.score=self.player.delta+eval(self.player.expression_clear)\n self.status='ok'\n\n except:\n self.status='error'\n \n\n if self.score>0:\n add='+'\n else:\n add=''\n self.text=add+str(self.score)\n \nclass NickName(Label):\n def __init__(self,player,text, **kwargs):\n super().__init__(**kwargs)\n self.text=text\n self.width=150\n self.height=40 \n self.font_size=24\n self.player = player\n self.player.comm.main.add_widget(self)\nclass Expression(Label):\n def __init__(self,player):\n super().__init__()\n self.player=player\n self.player.comm.main.add_widget(self)\n sets =[['+',\"x\",\"+\",\"y\",\"-\",\"z\"],[\"-\",\"x\",\"+\",\"y\",\"+\",\"z\"],['+',\"x\",\"-\",\"y\",\"+\",\"z\"]]\n self.widgets =[]\n self.set= random.choice(sets)\n if self.player.number==1:\n self.data =(140,225)\n elif self.player.number==2:\n self.data =(140,375)\n self.reserve=''\n \n def on_touch_down(self, touch):\n super().on_touch_down(touch)\n mode = touch.button\n \n active=self.player.comm.active_card\n \n for w in self.widgets:\n \n if self.player.comm.active_card!=None:\n \n if w.type in active.targets or w.text in active.targets:\n \n if w.collide_point(touch.x,touch.y) and mode!=\"middle\":\n \n active.action(self,w,mode)\n \n self.player.comm.UI.clear_highlight()\n \n \n\n def update(self,dt=None):\n x=self.data[0]\n y=self.data[1]\n step=30\n width = 700\n start_pos=(x,y)\n start_pos=(start_pos[0]+(width-(len(self.set)*step))/2,start_pos[1])\n for w in self.widgets:\n self.player.comm.main.remove_widget(w)\n self.widgets=[]\n \n \n i=0\n self.player.expression_clear=''\n \n for part in self.set:\n part=str(part)\n lbl=Mini_Label(self.player.comm,text=part)\n \n if part.isnumeric():\n lbl.type=\"number\"\n \n \n elif part.isalpha():\n lbl.type=\"variable\"\n \n \n\n else:\n if part==\"(\" or part==\")\":\n lbl.type=\"bracket\"\n else:\n lbl.type=\"operation\"\n lbl.font_size=30\n lbl.width = 20\n lbl.height=20\n self.player.expression_clear+=part\n move(lbl,start_pos[0]+step*i,start_pos[1])\n self.player.comm.main.add_widget(lbl)\n self.widgets.append(lbl)\n i+=1\n for player in self.player.comm.players:\n player.update()\n\n\n\n\n\ndef move(widget,x,y):\n y+=widget.height/2\n widget.pos_hint=pixel_to_hint(x,y)\n widget.pos=(x,Window.size[1]-y)\ndef pixel_to_hint(width,height,mode=\"tuple\",invert=True):\n if invert: \n height=Window.size[1]-height\n data = width/Window.size[0],height/Window.size[1]\n \n if mode==\"tuple\":\n return (data)\n elif mode==\"dict\": \n return {\"x\":data[0],\"y\":data[1]}\nclass Mini_Label(Label):\n def __init__(self,main, **kwargs):\n super().__init__(**kwargs) \n self.main = main\n self.output=Label()\n self.output.font_size=40\n self.main.main.add_widget(self.output)\n self.output.text=''\n \n \n \n def show_value(self):\n if self.main.active_card==None:\n if self.text =='x':\n add =str(self.main.X)\n elif self.text=='y':\n add =str(self.main.Y)\n elif self.text=='z':\n add =str(self.main.Z)\n else:\n add=''\n self.output.text=add\n \n move( self.output, self.pos[0]-40,Window.size[1]-(self.pos[1]+40))\n \n Clock.schedule_once(self.delete,3)\n def delete(self,dt):\n self.output.text=''\n def on_touch_down(self, touch):\n super().on_touch_down(touch)\n if self.collide_point(touch.x,touch.y):\n self.show_value()","sub_path":"MathGame/testing/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":6860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"150679081","text":"def hey(greeting):\n\n remove_whitespace = '\\t \\n\\r'\n for character in remove_whitespace:\n greeting = greeting.replace(character, '')\n if greeting == '':\n return 'Fine. Be that way!'\n\n contains_alpha = False\n for letter in greeting:\n if letter.isalpha():\n contains_alpha = True\n\n if contains_alpha:\n yelling = True\n for letter in greeting:\n if letter.isalpha() and letter.upper() != letter:\n yelling = False\n if yelling:\n return 'Whoa, chill out!'\n\n if greeting[-1] == '?':\n return 'Sure.'\n\n return 'Whatever.'\n","sub_path":"bob/bob.py","file_name":"bob.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"100450730","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets.samples_generator import make_blobs\n\ndef LpNorm(a, b, p):\n s = 0\n for i in range(len(a)):\n s += abs(a[i] - b[i]) ** p\n return s ** (1/p)\n\ndef kNN(test_data, train_data, k, classes):\n classifications = []\n for target in test_data:\n distances = []\n for data in train_data:\n distances.append(LpNorm(target, data[:-1], p = 2))\n top_k = sorted(distances)[:k]\n top_k_classes = [train_data[distances.index(x)][-1] for x in top_k]\n counts = {top_k_classes.count(c): c for c in classes}\n classifications.append(counts[max(counts)])\n return classifications\n\nN = 100\ncenters = [(2, 2), (4, 4)]\ncluster_std = [1.5, 2]\nX, classes = make_blobs(n_samples = N, cluster_std = cluster_std, centers = centers, n_features = 2)\ntrain_data = [[X[i][0], X[i][1], classes[i]] for i in range(N)]\nplt.scatter(X[classes == 0, 0], X[classes == 0, 1], color = \"red\")\nplt.scatter(X[classes == 1, 0], X[classes == 1, 1], color = \"blue\")\nplt.suptitle(\"k-Nearest Neighbor Classification\")\n\ntest_data = [[1, 2], [3, 5], [2, 3], [5, 7], [0, 3]]\nk = 5\nclassifications = kNN(test_data, train_data, k, [0, 1])\nplt.title(\"k = %d\" % k)\nfor i in range(len(test_data)):\n plt.scatter(test_data[i][0], test_data[i][1], color = \"black\")\n print(\"Point at (%.3f, %.3f) belongs to classification %s\" % (test_data[i][0], test_data[i][1], [\"red\", \"blue\"][classifications[i]]))\nplt.show()\n","sub_path":"python/kNearestNeighbor.py","file_name":"kNearestNeighbor.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"226630555","text":"#!/usr/bin/python3\n# -*- coding: iso-8859-1 -*-\n\nimport re\nimport time\nimport sys, getopt\nimport os\nfrom os.path import basename\nfrom PIL import Image\nimport wget\nfrom urllib.request import urlopen\nimport urllib.request\nimport re\nimport requests\nfrom bs4 import BeautifulSoup\nimport array\nfrom concurrent.futures import ProcessPoolExecutor\nimport io\n# def download(url, NOMBRE):\n# try:\n# furl = urllib2.urlopen(url)\n# f = file(\"%s.png\"%NOMBRE,'wb')\n# f.write(furl.read())\n# f.close()\n# except:\n# print 'Unable to download file'\n\n# print \"Descargar imagenes desde internet:\\n\"\n# entrada = raw_input(\"ingrese url: \")\n# renombrar = raw_input(\"nombre: \")\n# download(entrada,renombrar)\n\nEXTENSIONS = ['.jpg','.png','.gif','.jpeg','.jfif','.svg' ]\n \ndef download_images_from_url(url):\n if not url.lower().startswith('http://') and not url.lower().startswith('https://'):\n url = 'http://%s'%url\n print ('Descargando de %s...'%url)\n #print (os.getpid())\n with urllib.request.urlopen(url) as f:\n \thtml = f.read()\n \t#print (type(html))\n \t#aca html tiene el codigo literalmente en html\n \thtml = html.decode('utf-8')\n \twebpage_regex = re.compile('img .*?src=\"(.*?)\"',re.IGNORECASE)\n \tprint (\"webpage\",webpage_regex)\n\n \tlinks = webpage_regex.findall(html)\n \t#print (\"acaa tenemos\",links)\n #urlContent = urllib.request.urlopen(url).read()\n #print (type(urlContent)) urlContent=html\n\n\n # Búsqueda del tag img en la página web.\n # HTML image tag: \"some_text\"/\n #htmltext = urlContent.read().decode('utf-8')\n #print (type(htmltext))\n #imgUrls = re.findall('img .*?src=\"(.*?)\"', htmltext)\n executor=ProcessPoolExecutor(max_workers = 3)\n \n # Descargar todas las imágenes\n for imgUrl in links:\n \t\t\n # El print me sirvio para identificar los url relativos,\n # a continuación intenta descargar el archivo con wget.\n\n\t #print (url+'/'+imgUrl)\n urlFinal=url+'/'+imgUrl\n #print(imgUrl)\n #print (re.split(r\"\\|.|_/\",imgUrl))\n \n \n \n #directorio = imgUrl\n #print (directorio)\n \n #try:\n #os.stat(directorio)\n #except:\n #os.mkdir(directorio)\n \n\t #os.system('wget -q -nc ' + imgUrl)\n\t #arreglo=\"archivo\"\n file_name=imgUrl.split(\"/\")[-1]\n print (file_name) #file_name =str\n print (\"executor\")\n \n\t #executor = ProcessPoolExecutor(max_workers=2)\n executor.submit(imagen,file_name,urlFinal)\n\t #task = executor.submit(imagen(file_name,urlFinal))\n\t \n\t #task2 = executor.submit(imagen(file_name,urlFinal))\n\t #imagen(file_name,urlFinal)\n\t \ndef imagen(file_name,urlFinal):\t \n\t #furl = urllib.request.urlopen(urlFinal)\n\t \n\t print (\"SOY EL PROCESO:\",os.getpid())\n\t with urllib.request.urlopen(urlFinal) as response, open(file_name, 'wb') as out_file:\n\t \tRUTA=\"TEMPORAL\"\n\t \tdata = response.read() # a `bytes` object\n\n\t \tout_file.write(data)\n\t \t\n\t \tim = Image.open(file_name)\n\t \t\n\t \t#print(im)\n\t \trgb_im = im.convert('RGB')\n\t \trgb_im.save(RUTA+'/'+file_name+'.jpg')\n\t \tim2 = Image.open(RUTA+'/'+file_name+'.jpg')\n\t \tRUTA2=\"TEMPORAL2\"\n\t \tim2.save(RUTA2+'/'+file_name+'.ppm')\n\t \twidth, height = im.size\n\t \tfd = os.open(RUTA2+'/'+file_name+'.ppm', os.O_RDONLY)\n\t \tcabecera = os.read(fd,50)\n\t \t#print (cabecera)\n\t \timorig = os.read(fd, 18000000)\n\t \t# PPM header\n\t \t#width = 200\n\t \t#height = 298\n\t \tmaxval = 255\n\t \tppm_header = f'P6 {width} {height} {maxval}\\n'\n\t \t# PPM image data (filled with blue)\n\t \timage = array.array('B', [0, 0, 0] * width * height)\n\t \timage2 = array.array('B', [0, 0, 0] * width * height)\n\t \timage3 = array.array('B', [0, 0, 0] * width * height)\n\t \t# Fill with red the rectangle with origin at (10, 10) and width x height = 50 x 80 pixels\n\t for x in range(0, width - 1):\n\t \tfor y in range(0, height - 1):\n\t \t\tindex = 3 * (y * width + x)\n\t \t\timage[index] = imorig[index] # red channel\n\t \t\timage[index + 1] = 0\n\t \t\timage[index + 2] = 0\n\t RUTA3=\"RGB\"\n\t f = open(RUTA3+'/'+file_name+'red'+'.ppm', 'wb')\n\t f.write(bytearray(ppm_header, 'ascii'))\n\t image.tofile(f)\n\n\t for x in range(0, width - 1):\n\t \tfor y in range(0, height - 1):\n\t \t\tindex = 3 * (y * width + x)\n\t \t\timage2[index] = 0 # red channel\n\t \t\timage2[index + 1] = imorig[index]\n\t \t\timage2[index + 2] = 0\n\t f2 = open(RUTA3+'/'+file_name+'green'+'.ppm', 'wb')\n\t f2.write(bytearray(ppm_header, 'ascii'))\n\t image2.tofile(f2)\n\t \n\t for x in range(0, width - 1):\n\t \tfor y in range(0, height - 1):\n\t \t\tindex = 3 * (y * width + x)\n\t \t\timage3[index] = 0 # red channel\n\t \t\timage3[index + 1] = 0\n\t \t\timage3[index + 2] = imorig[index]\n\t f3 = open(RUTA3+'/'+file_name+'blue'+'.ppm', 'wb')\n\t f3.write(bytearray(ppm_header, 'ascii'))\n\t image3.tofile(f3)\n\t \n\t \n\t \n\t # with urllib.request.urlopen(urlFinal) as g:\n\t # \tfinal = g.read()\n\t # \tfinal2 = file(final,'wb')\n\t # \tfinal2.write(final.read())\n\t # \tfinal2.close()\n\n\t #f = file(imgUrl,'wb')\n\t #f.write(furl.read())\n\t #f.close()\n\t \n\t return 0\n \n\n\n\nif __name__ == '__main__':\n args = sys.argv\n \n work=4\n task=[1,2,3,4]\n if len(args) < 2:\n print ('Necesito una dirección URL para descargar imágenes')\n exit(-1)\n #print (args)\n \n for i in range(1,len(args)):\n #print (args[i])\n #print(task[i])\n #executor = ProcessPoolExecutor(max_workers=work)\n #task[i] = executor.submit(download_images_from_url(args[i]))\n download_images_from_url(args[i])\n #fullCmdArguments = sys.argv\n #task=[]\n # - further arguments\n #argumentList = fullCmdArguments[1:]\n \n #for i in range(len(sys.argv)):\n #print (argumentList[1])\n \n \n \n #executor = ProcessPoolExecutor(max_workers=work)\n #print(args[i])\n \n \n #task1 = executor.submit(download_images_from_url(args[1]))\n #task2 = executor.submit(download_images_from_url(args[2]))\n #task1 = executor.submit(download_images_from_url(args[1])) \n #print (args[2])\n #executor = ProcessPoolExecutor(max_workers=3)\n \n \n #task1 = executor.submit(download_images_from_url(args[1]))\n #task2 = executor.submit(download_images_from_url(args[2]))\n\n\n #download_images_from_url(args[1])\n exit(0)\n\n\n\n\n\n\n\n\n\n","sub_path":"tps/tp4/tp4.py","file_name":"tp4.py","file_ext":"py","file_size_in_byte":8052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"207477819","text":"\nfrom tkinter import *\nfrom PIL import Image as Img\nfrom tkinter.filedialog import *\n\n\ndef select():\n filenames = askopenfilenames()\n listbox = app.children['lbox']\n # info = list(filenames) # 赋值操作只作用在函数内部\n for name in filenames:\n listbox.insert(END, name.split('/')[-1])\n info.append(name)\n print(info)\n\n\ndef compress():\n for f_path in info:\n output = 'C:/Users/chuzhuxi/Pictures/Saved Pictures/'\n name = f_path.split('/')[-1]\n image = Img.open(f_path)\n image.save(output + 'compressed' + name, quality=60)\n\n\ndef make_app():\n app = Tk()\n Label(text='图片压缩工具').pack()\n Listbox(name='lbox', bg='#f2f2f2').pack(fill=BOTH, expand=True)\n Button(text='选择图片', command=select).pack()\n Button(text='压缩', command=compress).pack()\n app.geometry('400x450')\n return app\n\n\ninfo = []\napp = make_app()\napp.mainloop()\n","sub_path":"Tkinter/压缩图片小工具.py","file_name":"压缩图片小工具.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"587422720","text":"log = __import__(\"logging\").getLogger(\"bungeni.core.workflow.utils\")\n\nimport datetime\n\nfrom zope import component\nfrom zope import interface\nfrom zope.security.proxy import removeSecurityProxy\nfrom zope.securitypolicy.interfaces import IPrincipalRoleMap\nfrom zope.security.management import getInteraction\nfrom zope.publisher.interfaces import IRequest\n\nfrom bungeni.alchemist import Session\nfrom ore.workflow.interfaces import IWorkflowInfo, InvalidTransitionError\nfrom ore.workflow.interfaces import NoTransitionAvailableError\nimport ore.workflow.workflow\n\nimport bungeni.models.interfaces as interfaces\nimport bungeni.models.domain as domain\nfrom bungeni.models.utils import get_principal_id\nfrom bungeni.core.app import BungeniApp\nimport bungeni.core.interfaces\nimport bungeni.core.globalsettings as prefs\nfrom bungeni.ui.utils import common\nfrom bungeni.ui.interfaces import IFormEditLayer\n\nimport dbutils\n\nclass conditions(object):\n \"\"\"Commonly used transition conditions.\n \"\"\"\n \n # the condition for the transition from \"\" (None) to either \"draft\" or to \n # \"working_draft\" seems to need the explicit condition (and negation of \n # condition) on each of the two transition options \n @staticmethod\n def user_is_not_context_owner(info, context):\n return not user_is_context_owner(context)\n @staticmethod\n def user_is_context_owner(info, context):\n return user_is_context_owner(context)\n\n\ndef get_parliament(context):\n \"\"\"go up until we find a parliament \"\"\"\n parent = context.__parent__\n while parent:\n if interfaces.IParliament.providedBy(parent):\n return parent\n else:\n try:\n parent = parent.__parent__\n except:\n parent = None\n if not parent:\n parliament_id = context.parliament_id\n session = Session()\n parliament = session.query(domain.Parliament).get(parliament_id)\n return parliament\n \n\ndef _get_group_local_role(group):\n if interfaces.IParliament.providedBy(group):\n return \"bungeni.MP\"\n elif interfaces.IMinistry.providedBy(group):\n return \"bungeni.Minister\"\n elif interfaces.ICommittee.providedBy(group): \n return \"bungeni.CommitteeMember\"\n elif interfaces.IPoliticalGroup.providedBy(group):\n return \"bungeni.PartyMember\"\n elif interfaces.IGovernment.providedBy(group):\n return \"bungeni.Government\"\n elif interfaces.IOffice.providedBy(group):\n if group.office_type == \"S\":\n return \"bungeni.Speaker\"\n elif group.office_type == \"C\":\n return \"bungeni.Clerk\"\n elif group.office_type == \"T\":\n return \"bungeni.Translator\"\n else: \n raise NotImplementedError \n else:\n return \"bungeni.GroupMember\"\n \ndef _get_group_context(context):\n if interfaces.IOffice.providedBy(context):\n return BungeniApp() #get_parliament(context)\n else:\n return removeSecurityProxy(context)\n\ndef set_group_local_role(context):\n role = _get_group_local_role(context)\n group = removeSecurityProxy(context)\n ctx = _get_group_context(context) \n IPrincipalRoleMap(ctx).assignRoleToPrincipal(\n role, group.group_principal_id)\n \ndef unset_group_local_role(context):\n role = _get_group_local_role(context)\n group = removeSecurityProxy(context)\n ctx = _get_group_context(context)\n IPrincipalRoleMap(ctx).unsetRoleForPrincipal(\n role, group.group_principal_id)\n\ndef getOwnerId(context):\n if context:\n owner_id = getattr(context, 'owner_id', None)\n return dbutils.get_user_login(owner_id)\n\ndef user_is_context_owner(context):\n \"\"\"Test if current user is the context owner e.g. to check if someone \n manipulating the context object is other than the owner of the object.\n \"\"\"\n user_id = get_principal_id()\n owner_id = getOwnerId(context)\n return user_id==owner_id\n \ndef setBungeniOwner(context):\n user_id = get_principal_id()\n if not user_id: \n user_id = \"-\"\n owner_id = getOwnerId(context)\n log.debug(\"setBungeniOwner [%s] user_id:%s owner_id:%s\" % (\n context, user_id, owner_id))\n if user_id:\n IPrincipalRoleMap(context).assignRoleToPrincipal(u'bungeni.Owner', user_id)\n if owner_id and (owner_id!=user_id):\n IPrincipalRoleMap(context).assignRoleToPrincipal(u'bungeni.Owner', owner_id)\n\n\ndef createVersion(info, context, \n message=\"New version created upon workflow transition.\"\n):\n \"\"\"Create a new version of an object and return it.\"\"\"\n instance = removeSecurityProxy(context)\n versions = bungeni.core.interfaces.IVersioned(instance)\n versions.create(message)\n\ndef setQuestionDefaults(info, context):\n \"\"\"get the default values for a question.\n current parliament, ... \"\"\" \n instance = removeSecurityProxy(context)\n dbutils.setQuestionParliamentId(instance)\n dbutils.setQuestionMinistryId(instance)\n\ndef setRegistryNumber(info, context):\n \"\"\"A parliamentary_item's registry_number should be set on the item being \n submitted to parliament.\n \"\"\"\n instance = removeSecurityProxy(context)\n if instance.registry_number == None:\n dbutils.setRegistryNumber(instance)\n\ndef setApprovalDate(info, context):\n instance = removeSecurityProxy(context)\n if instance.approval_date == None:\n instance.approval_date = datetime.date.today()\n versions = bungeni.core.interfaces.IVersioned(instance)\n versions.create('New Version created upon approval by speakers office')\n if type(instance) == domain.Question:\n dbutils.setQuestionSerialNumber(instance)\n elif type(instance) == domain.Motion:\n dbutils.setMotionSerialNumber(instance) \n elif type(instance) == domain.TabledDocument:\n dbutils.setTabledDocumentSerialNumber(instance)\n if instance.registry_number == None:\n dbutils.setRegistryNumber(instance)\n\ndef setMinistrySubmissionDate(info, context):\n instance = removeSecurityProxy(context)\n if instance.ministry_submit_date == None:\n instance.ministry_submit_date = datetime.date.today()\n\ndef setQuestionScheduleHistory(info, context):\n question_id = context.question_id\n dbutils.removeQuestionFromItemSchedule(question_id)\n \n\ndef getQuestionMinistry(info, context):\n ministry_id = context.ministry_id\n return ministry_id != None\n\n''' UNUSUED (and incorrect) :\ndef getQuestionSchedule(info, context):\n question_id = context.question_id\n return dbutils.isItemScheduled(question_id)\n\ndef getMotionSchedule(info, context):\n motion_id = context.motion_id\n return dbutils.isItemScheduled(motion_id)\n'''\n\ndef getQuestionSubmissionAllowed(info, context):\n return prefs.getQuestionSubmissionAllowed()\n\ndef setBillPublicationDate( info, context ):\n instance = removeSecurityProxy(context)\n if instance.publication_date == None:\n instance.publication_date = datetime.date.today()\n\ndef setAgendaItemHistory(info, context):\n pass\n \ndef setTabledDocumentHistory(info, context):\n pass\n\n\ndef setParliamentId(info, context):\n instance = removeSecurityProxy(context)\n if not instance.parliament_id:\n parliamentId = prefs.getCurrentParliamentId()\n instance.parliament_id = parliamentId\n \ndef response_allow_submit(info, context):\n instance = removeSecurityProxy(context)\n # The \"submit_response\" workflow transition should NOT be displayed when \n # the UI is displaying the question in \"edit\" mode (as this transition\n # will cause deny of bungeni.Question.Edit to the Minister).\n request = common.get_request()\n if IFormEditLayer.providedBy(request):\n return False\n if instance.response_text is None:\n return False\n else:\n return True\n \ndef dissolveChildGroups(groups, context):\n for group in groups:\n IWorkflowInfo(group).fireTransition('dissolve', check_security=False)\n \n \ndef schedule_sitting_items(info, context):\n instance = removeSecurityProxy(context)\n for schedule in instance.item_schedule:\n item = schedule.item\n if interfaces.IQuestion.providedBy(item):\n try:\n IWorkflowInfo(item).fireTransitionToward('scheduled', \n check_security=False)\n except NoTransitionAvailableError:\n pass\n elif interfaces.IMotion.providedBy(item):\n try:\n IWorkflowInfo(item).fireTransitionToward('scheduled', \n check_security=False)\n except NoTransitionAvailableError:\n pass\n elif interfaces.IAgendaItem.providedBy(item):\n try:\n IWorkflowInfo(item).fireTransitionToward('scheduled', \n check_security=False)\n except NoTransitionAvailableError:\n pass\n elif interfaces.ITabledDocument.providedBy(item):\n try:\n IWorkflowInfo(item).fireTransitionToward('scheduled', \n check_security=False)\n except NoTransitionAvailableError:\n pass\n\n\n","sub_path":"bungeni.main/branches/sterch-del-proxy/bungeni/core/workflows/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"198694545","text":"# Prompt: https://leetcode.com/problems/deepest-leaves-sum/\n# Runtime: 108 ms, faster than 30.33% of Python online submissions for Deepest Leaves Sum.\n# Memory Usage: 20.9 MB, less than 82.33% of Python online submissions for Deepest Leaves Sum.\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution(object):\n def __init__(self):\n self.valPerDepth = {}\n \n # populates self.valPerDepth\n def deepestLeavesSumRecursive(self, node, depth):\n \"\"\"\n :type node: TreeNode\n :type depth: int\n :rtype: None\n \"\"\"\n if node != None:\n if depth in self.valPerDepth:\n self.valPerDepth[depth] += node.val\n else:\n self.valPerDepth[depth] = node.val\n if node.left != None:\n self.deepestLeavesSumRecursive(node.left, depth+1)\n if node.right != None:\n self.deepestLeavesSumRecursive(node.right, depth+1)\n\n def deepestLeavesSum(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n self.deepestLeavesSumRecursive(root, 0)\n \n deepest = 0\n for depth in self.valPerDepth:\n if depth > deepest:\n deepest = depth\n \n return self.valPerDepth[deepest]\n","sub_path":"1. Medium/1302. Deepest Leaves Sum/deepest_leaves_sum.py","file_name":"deepest_leaves_sum.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"344987764","text":"import mGui.gui as gui\nimport maya.cmds as cmds\n\"\"\"\nThis example shows a cleaned up version of traditional Maya GUI using context managers and addressing but no other new features\n\nNote: buttons are non-functional; this just shows layout style\n\"\"\"\n\n\nwindow = gui.Window('main window', title=\"Ugly version\")\nwith gui.ColumnLayout('gui', width=256) as main:\n with gui.FrameLayout(\"t_buttons\", label=\"buttons column\"):\n with gui.ColumnLayout(\"col\"):\n gui.Button('mkSphere', label=\"Make Sphere\")\n gui.Button('mkCone', label=\"Make Cone\")\n gui.Button('mkCube', label=\"Make Cube\")\n\n with gui.FrameLayout(\"r_buttons\", label=\"buttons row\"):\n with gui.RowLayout (\"row\", numberOfColumns=3):\n gui.Button('mkSphere', label=\"Make Sphere\")\n gui.Button('mkCone', label=\"Make Cone\")\n gui.Button('mkCube', label=\"Make Cube\")\n\n with gui.FrameLayout(\"g_buttons\", label=\"buttons grid\"):\n with gui.GridLayout(\"grid\", numberOfColumns=2):\n gui.Button('mkSphere', label=\"Make Sphere\")\n gui.Button('mkCone', label=\"Make Cone\")\n gui.Button('mkCube', label=\"Make Cube\")\n gui.Button('mkCircle', label=\"Make Circle\")\n\n# using the iterability of the layout to set widths\n\nfor item in main.t_buttons:\n item.width = 256\n\nfor item in main.r_buttons.row:\n item.width = 85\n\n# last 'item' is the row itself...\nitem.width = 256\nitem.columnWidth3 = (85, 85, 85)\n\nfor item in main.g_buttons.grid:\n item.width = 128\n\n# last item is the grid...\nitem.width = 256\nitem.cellWidth = 128\n\ncmds.showWindow(window)\n","sub_path":"mGui/examples/traditional.py","file_name":"traditional.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"162398785","text":"from setuptools import setup, find_packages\n\nwith open('README.md') as f:\n readme = f.read()\n\nsetup(\n name='wsisampler',\n version='0.0.3',\n description='A package for WSI sampling and annotations processing',\n long_description=readme,\n author='Jevgenij Gamper & Peter Byfield',\n author_email='jevgenij.gamper5@gmail.com',\n url='https://github.com/jgamper/WholeSlideImageSampler',\n packages=find_packages(),\n install_requires=['numpy',\n 'opencv-python',\n 'openslide-python',\n 'matplotlib',\n 'jupyter',\n 'future',\n 'cython',\n 'pandas',\n 'scikit-image',\n 'xmltodict'\n ],\n classifiers=[\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 3.5'\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"197690881","text":"print(\"Content-type: json\\n\")\n\nimport json\nimport os\n\ndef path_hierarchy(path):\n hierarchy = {\n 'type': 'folder',\n 'text': os.path.basename(path), #'name': os.path.basename(path),\n 'path': path,\n }\n try:\n hierarchy['children'] = [ path_hierarchy(os.path.join(path, contents)) for contents in os.listdir(path) ]\n except OSError:\n hierarchy['type'] = 'file'\n hierarchy['icon'] = 'jstree-file'\n return hierarchy\n\nprint( json.dumps(path_hierarchy('../plugins/'), indent=2, sort_keys=True) )\n","sub_path":"ECM/ecm/ui/cgi-bin/plugin_tree.py","file_name":"plugin_tree.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"598100336","text":"import re\nimport sqlite3\nimport contextlib\nimport collections\n\nfrom tqdm import tqdm\nfrom pygeoroc.api import col_type\n\n\nclass Database:\n def __init__(self, fname):\n self.fname = fname\n\n def create(self, api):\n cols, files = {}, []\n for f in api.iter_files():\n files.append(f)\n for sample in f.iter_samples(api):\n for key in sample.data:\n if key:\n cols[key] = 'REAL' if col_type(key) is float else 'TEXT'\n break\n cols = collections.OrderedDict(sorted(\n cols.items(),\n key=lambda s: ('(' in s[0], bool(re.search('[0-9]', s[0])), s[0])))\n\n with sqlite3.connect(str(self.fname)) as conn:\n conn.execute('PRAGMA foreign_keys = ON;')\n with contextlib.closing(conn.cursor()) as cu:\n self._create_schema(cu, cols)\n self._load_data(cu, cols, api, files)\n\n def _create_schema(self, cu, cols):\n cu.execute(\"CREATE TABLE file (id TEXT PRIMARY KEY, date TEXT, section TEXT);\")\n cu.execute(\"CREATE TABLE reference (id INTEGER PRIMARY KEY, reference TEXT);\")\n colspec = ['`{}` {}'.format(k, v) for k, v in cols.items()]\n cu.execute(\"\"\"\nCREATE TABLE sample (\n id TEXT PRIMARY KEY,\n file_id TEXT,\n {},\n FOREIGN KEY (file_id) REFERENCES file(id)\n);\n\"\"\".format(',\\n'.join(colspec)))\n cu.execute(\"\"\"\nCREATE TABLE citation (\n sample_id TEXT,\n reference_id INTEGER,\n fields TEXT,\n FOREIGN KEY (sample_id) REFERENCES sample(id),\n FOREIGN KEY (reference_id) REFERENCES reference(id)\n);\n\"\"\")\n\n def _load_data(self, cu, cols, api, files):\n refs, samples = set(), set()\n for f in tqdm(files):\n cu.execute(\n \"INSERT INTO file (id, date, section) VALUES (?,?,?)\",\n (f.name, f.date, f.section))\n for id_, ref in f.iter_references(api):\n if id_ not in refs:\n cu.execute(\n \"INSERT INTO reference (id, reference) VALUES (?,?)\",\n (id_, ref))\n refs.add(id_)\n tuples, citations = [], []\n for sample in f.iter_samples(api):\n if sample.id not in samples:\n samples.add(sample.id)\n tuples.append(\n tuple([sample.id, f.name] + [sample.data.get(c) for c in cols]))\n citations.extend(\n [(sample.id, cit, ' '.join(fields))\n for cit, fields in sample.citations.items()])\n sql = \"INSERT INTO sample ({}) VALUES ({})\".format(\n ', '.join(['id', 'file_id'] + ['`{}`'.format(c) for c in cols]),\n ', '.join(['?' for _ in range(len(cols) + 2)]))\n cu.executemany(sql, tuples)\n cu.executemany(\n \"INSERT INTO citation (sample_id, reference_id, fields) VALUES (?, ?, ?)\",\n citations)\n","sub_path":"src/pygeoroc/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"125509321","text":"from turtle import *\n\n\nclass Grid():\n def __init__(self):\n g = Turtle(visible=False)\n g.hideturtle()\n g.shape('circle')\n g.color('red')\n g.setx(0)\n g.sety(0)\n g.shapesize(.1,.1,0)\n\n for i in range (-400,400,10):\n self.line(i,'vertical')\n self.line(i, 'horizontal')\n def line (self, coordinate, direction = 'vertical'):\n line = Turtle()\n line.screen.tracer(0,0)\n line.speed(None)\n line.penup()\n line.color('grey')\n offset = 7\n if direction == 'vertical':\n x = coordinate\n line.left(90)\n line.setx(x + offset)\n line.sety(0)\n if(x%50 ==0):\n if x!= 0:\n line.write(str(x))\n line.pensize(2)\n line.setx(x)\n elif direction == 'horizontal':\n y = coordinate\n line.sety(y + offset)\n line.setx(0 + offset)\n if(y%50 == 0):\n line.write(str(y))\n line.pensize(2)\n line.setx(0)\n line.sety(y)\n else:\n exit\n\n line.forward(1000)\n line.pendown()\n line.hideturtle()\n line.right(180)\n line.forward(2000)\n line.hideturtle()\n","sub_path":"random stuff/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"504431230","text":"\"\"\" Języki skryptowe - Ćwiczenia - Rafał Stępkowski GD30982 -\r\n\r\nZadanie 3 - Popraw program Wymieszane litery tak, żeby każdemu słowu towarzyszyła\r\npodpowiedź. Gracz powinien mieć możliwość zobaczenia podpowiedzi, jeśli\r\nutknie w martwym punkcie. Dodaj system punktacji, który nagradza graczy\r\nrozwiązujących anagram bez uciekania się do podpowiedzi. \"\"\"\r\n\r\n#! /usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\nimport random as rm\r\n \r\nEXAMPLE = (\"Mietek\",\r\n \"Heniek\",\r\n \"Zbyszek\")\r\n \r\ncorrect = rm.choice(EXAMPLE)\r\n \r\npoprawne = correct\r\n \r\npomieszane = \"\"\r\n \r\nwhile correct:\r\n pozycja = rm.randrange(len(correct))\r\n pomieszane += correct[pozycja]\r\n correct = correct[:pozycja] + correct[(pozycja + 1):]\r\n \r\nprint(\"3...2...1 Zaczynamy\")\r\nprint(\"Zgadnij wyraz: \",pomieszane)\r\n \r\ngosc = input (\"zgaduj: \")\r\nx=0\r\nz=0\r\np = \"\"\r\nwhile gosc != poprawne and gosc != \"\":\r\n print(\"zle\")\r\n x+=1\r\n if x>=3:\r\n print(\"Nie udało Ci się zgadnąć w 3 ruchach\")\r\n p = str(input(\"Potrzebujesz pomocy? Jeśli tak naciśnij y, jeśli wolisz zgadywać bez podpowiedzi naciśnij n: \"))\r\n if p == \"y\":\r\n print(\"pierwsze dwie litery to: \",poprawne[:2])\r\n z+=1\r\n elif p == \"n\":\r\n print(\"ok probuj dalej\")\r\n gosc = input(\"spróbuj jeszcze raz: \\n\")\r\n \r\nif gosc == poprawne and z==0:\r\n print (\"brawo, zgadłeś bez podpowiedzi otrzymujesz 3 punkty\")\r\nelif gosc == poprawne and z==1:\r\n print (\"brawo, udało Ci się, z podpowiedzią. Otrzymujesz 2 punkty\")\r\nelif gosc == poprawne and z>=2:\r\n print (\" otrzymujesz 1ptk\")\r\n \r\nprint(\"Dziękuję za udział w grze.\")\r\ninput(\"\\n\\nAby zakończyć program, naciśnij klawisz Enter.\")\r\n","sub_path":"1-wymieszane.py","file_name":"1-wymieszane.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"538706160","text":"#!/usr/bin/python3\r\n\r\nimport urllib.request\r\nimport urllib.parse\r\nimport sys\r\nimport os\r\nimport re\r\n\r\n#dirs = os.listdir()\r\narr = []\r\nlinks = []\r\nfile = sys.argv[1]\r\n\r\nG = open(\"test.txt\", 'w')\r\n\r\nwith open(file) as F:\r\n\t#check whether ', str(openURL))\r\n\r\n\t\t\tfor item in links:\r\n\t\t\t\titem = item.lower()\r\n\t\t\t\tif (re.match(r'.*?undergraduate.*?', item)):\r\n\t\t\t\t\turl = re.sub(r'\">.*$', \"\", item)\r\n\t\t\t\t\turl = \"http://sydney.edu.au/handbooks\"+url\r\n\r\n\t\t\t\t\tG.write(url+\"\\n\")\r\n\t\t\t\telif (re.match(r'.*?postgraduate.*?', item)):\r\n\t\t\t\t\t#item = re.findall(r'\">(.*$)', item)\r\n\t\t\t\t\turl = re.sub(r'\">.*$', \"\", item)\r\n\t\t\t\t\turl = \"http://sydney.edu.au/handbooks\"+url\r\n\r\n\t\t\t\t\tG.write(url+\"\\n\")\r\n\t\t\t\telif (re.match(r'.*?/ug/.*?', item)):\r\n\t\t\t\t\turl = re.sub(r'\">.*$', \"\", item)\r\n\t\t\t\t\turl = \"http://www.handbook.uts.edu.au/\"+url\r\n\r\n\t\t\t\t\tG.write(url+\"\\n\")\r\n\t\t\t\telif (re.match(r'.*?/pg/.*?', item)):\r\n\t\t\t\t\t#item = re.findall(r'\">(.*$)', item)\r\n\t\t\t\t\turl = re.sub(r'\">.*$', \"\", item)\r\n\t\t\t\t\turl = \"http://www.handbook.uts.edu.au/\"+url\r\n\r\n\t\t\t\t\tG.write(url+\"\\n\")\r\nG.close()","sub_path":"webscraper.py","file_name":"webscraper.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"212857130","text":"from flask import Flask, request, jsonify\nfrom sklearn.ensemble import RandomForestClassifier\n\nfrom sklearn.externals import joblib\nfrom sklearn.metrics import r2_score\n\nimport pandas as pd\nimport sys\n\n# declare constants\nHOST = '0.0.0.0'\nPORT = 8081\n\n# initialize flask application\napp = Flask(__name__)\n\n@app.route('/api/train', methods=['POST'])\ndef train():\n\n parameters = request.get_json()\n\n hearth = pd.read_csv(\"heart.csv\")\n X = hearth.iloc[:,0:13]\n y = hearth.iloc[:,-1]\n \n print('Data loaded', file=sys.stdout)\n\n clf=RandomForestClassifier(n_estimators=10)\n\n clf.fit(X, y)\n joblib.dump(clf, 'model.pkl')\n\n return jsonify({'accuracy': round(clf.score(X, y) * 100, 2)})\n\n@app.route('/api/predict', methods=['POST'])\ndef predict():\n\n X = request.get_json()\n print(X, file=sys.stdout)\n X = [[ float(X['Age']), float(X['Sex']), float(X['ChestPain']), float(X['Trestbps']), float(X['Chol']), float(X['Fbs']), float(X['RestEcg']), float(X['Thalach']), float(X['Exang']), float(X['Oldpeak']), float(X['Slope']), float(X['Ca']), float(X['Thal']) ]]\n\n clf = joblib.load('model.pkl')\n value = clf.predict(X)\n\n return jsonify({'value': str(value[0])})\n\nif __name__ == '__main__':\n # run web server\n app.run(host=HOST,\n debug=True,\n port=PORT)\n","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"157027985","text":"class Solution:\n\n def isPalindrome(self, x: int) -> bool:\n if x == 0:\n return True\n if x < 0 or x % 10 == 0:\n return False\n else:\n str_num = str(x)\n length = len(str_num)\n for i in range(length//2):\n if str_num[i] != str_num[length - 1 - i]:\n return False\n else:\n return True\n\ns = Solution()\n\nprint(s.isPalindrome(000))\n\n","sub_path":"Leet Code/easy/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"572559002","text":"from .Libattacks import *\nfrom random import choice, randint, shuffle\nfrom .General import Element, Character\n\n\nclass Story:\n\n def __init__(self, msg, opponent, recomp):\n self.msg = msg\n self.opponent = opponent\n self.recomp = recomp\n\n\nclass Biome:\n\n def __init__(self, name, livings, big_livings, recomps, big_recomps, fail_recomps, introduction, stories):\n self.name = name\n self.livings = livings\n self.big_livings = big_livings\n self.recomps = recomps\n self.big_recomps = big_recomps\n self.fail_recomps = fail_recomps\n self.introduction = introduction\n self.stories = stories\n self.progress = 0\n\n def is_complete(self):\n return self.progress >= len(self.stories)\n\n def set_complete(self):\n self.progress = len(self.stories)\n\n def complete_chapter(self):\n self.progress += 1\n\n def choose_song(self):\n return choice([self.name, self.name + \"_2\"])\n\n def choose_opponent(self):\n if self.is_complete():\n return (shuffle(self.livings)[:3] + [choice(self.big_livings)], None, \\\n choice(self.recomps), choice(self.fail_recomps), False, self.choose_song())\n else:\n st = self.stories[self.progress]\n return ([st.opponent], st.msg, st.recomp, choice(self.fail_recomps), True, self.choose_song())\n\n########################################################################################################################\n\nGorxx = Character(\"Gorxx\", [Element.Breton, Element.Sheep, Element.Geek, Element.God], 'H-F',\n Age=18,\n Str=12,\n Spe=14,\n Dex=12,\n App=1,\n Int=18,\n Wil=9,\n Res=20,\n Lif=140,\n power=48,\n Attacks=[\n [Database, Holiness],\n [Baconstriction],\n [Cpp_prog, OpenMP]\n [Genetic],\n [Burning_Fart]\n ], is_big=True).set_requirements(1200, [(\"Darkness Gene\", 6)])\n\nAimeridicule = Character(\"Aimeridicule\", [Element.Breton, Element.Sheep, Element.Geek], 'H',\n Age=16,\n Str=12,\n Spe=14,\n Dex=12,\n App=4,\n Int=16,\n Res=18,\n Wil=9,\n Lif=128,\n power=48,\n Attacks=[\n [Database],\n [Baconstriction],\n [C_prog, Cpp_prog]\n [Wool, Genetic],\n [Fart, Burning_Fart]\n ]\n ).set_requirements(80).set_evolution(Gorxx)\n\n\nAimerikiki = Character(\"Aimerikiki\", [Element.Breton, Element.Sheep, Element.Geek], 'H',\n Age=14,\n Str=12,\n Spe=13,\n Dex=12,\n App=3,\n Int=15,\n Wil=8,\n Res=16,\n Lif=124,\n power=48,\n Attacks=[\n [Database],\n [Browse, Baconstriction],\n [Wool],\n [Fart]\n ]).set_evolution(Aimeridicule).set_requirements(20)\n\nAimeric = Character(\"Aimeric\", [Element.Breton, Element.Sheep], 'H',\n Age=12,\n Str=10,\n Spe=11,\n Dex=10,\n App=2,\n Int=13,\n Wil=8,\n Res=14,\n Lif=112,\n power=48,\n Attacks=[\n [Bignou],\n [Punch, Browse],\n [Wool],\n [Fart]\n ]).set_evolution(Aimerikiki)\n\n\nclass Pedophile(Character):\n def __init__(self, name, types, sex, Age, Str, Spe, Dex, App, Int, Wil, Res, Lif, power, Attacks):\n super().__init__(name, types, sex, Age, Str, Spe, Dex, App, Int, Wil, Res, Lif, power, Attacks)\n\n def special_power(self, opponent, msg_panel):\n if opponent.Age < self.Age:\n msg_panel.add_txt(self.name + \" special effect:\\n\" + self.name + \" is exited by younger people.\\n\", 1000)\n msg_panel.pop_animation(\"Babtou\", self, \"boost\")\n diff = (self.Age - opponent.Age) // 5\n self.carac[\"Dexterity\"] += diff\n msg_panel.add_txt(self.name + \" Dexterity increase of \" + str(diff) + \"\\n\\n\", 500)\n\n\nFranfin = Pedophile(\"Franfin\", [Element.Babtou, Element.Geek, Element.Frost], 'H',\n Age=35,\n Str=9,\n Spe=15,\n Dex=17,\n App=2,\n Int=17,\n Wil=10,\n Res=12,\n Lif=120,\n power=48,\n Attacks=[\n [C_prog, Cpp_prog],\n [Perl],\n [Stigmatization, Vicious_tongue],\n [Rape, GangBang],\n [Stalking]\n ]).set_requirements(60)\n\nFrancois = Pedophile(\"Francois\", [Element.Babtou, Element.Geek], 'H',\n Age=23,\n Str=10,\n Spe=12,\n Dex=15,\n App=3,\n Int=14,\n Wil=10,\n Res=10,\n Lif=95,\n power=48,\n Attacks=[\n [C_prog],\n [Regex, Perl]\n [Rape],\n [Stalking]\n ]).set_evolution(Franfin)\n\nAlqadima = Character(\"Alqadima\", [Element.Islamist, Element.Fossil], 'F',\n Age=98,\n Str=4,\n Spe=22,\n Dex=18,\n App=7,\n Int=14,\n Wil=9,\n Res=10,\n Lif=135,\n power=48,\n Attacks=[\n [Pickpocket],\n [Wisdom, Old_as_the_planet],\n [Pypy, Cython],\n [Old_bones],\n [Parasitism]\n ]).set_requirements(350)\n\nYarrera = Character(\"Yarrera\", [Element.Islamist, Element.Fossil], 'F',\n Age=55,\n Str=5,\n Spe=18,\n Dex=15,\n App=13,\n Int=12,\n Wil=8,\n Res=10,\n Lif=125,\n power=48,\n Attacks=[\n [Pickpocket],\n [Wisdom],\n [Python, Pypy]\n [Old_bones],\n [Parasitism]\n ]\n ).set_requirements(70).set_evolution(Alqadima)\n\nYasmina = Character(\"Yasmina\", [Element.Islamist, Element.Fossil], 'F',\n Age=42,\n Str=3,\n Spe=16,\n Dex=14,\n App=12,\n Int=12,\n Wil=8,\n Res=9,\n Lif=105,\n power=48,\n Attacks=[\n [Steal, Pickpocket],\n [Python],\n [Old_bones],\n [Parasitism]\n ]).set_evolution(Yarrera)\n\nStarter = Biome(\"At_the_begining\",\n [Aimeric, Aimerikiki, Francois, Franfin, Yasmina, Yarrera],\n [Gorxx, Alqadima],\n [(\"Nucleotide\", 5), (\"Nucleotide\", 8),(\"Nucleotide\", 10)],\n [(\"Nucleotide\", 100), (\"Nucleotide\", 500), (\"Darkness Gene\", 1), (\"Darkness Gene\", 2)],\n [(\"Nucleotide\", 1), (\"Nucleotide\", 2)],\n \"In GENIOMHE community, the tyrant 'his holiness ginger-beard' rules for many times.\\n\" +\n \"Aimeric the little boy cannot accept this situation anymore.\\nBut Aimeric is weak and will need\" +\n \" some help to make revolution.\\nTry to convince other GENIOMHE members rebelling too.\\n\\n\" +\n \"\\n-Each Attack use power (pow) amount\\n\\n-The more an Attack is effective, the most it cost.\\n\\n\" +\n \"-Attacks damages depend of player's and opponent's characteristics (see \\n'Player Details').\\n\\n\" +\n \"-Attacks efficiencies depend of attack's and opponent's elements, \\nsome element are \" +\n \"stronger or wicker against other ones.\\n\\n\" +\n \"-Click on Attack buttons to play, button icon represent Attack's element.\\n\",\n [Story(\"Francois always been kind with Aimeric, giving candies and much caress.\\n\" +\n \"He love children as anyone else, he probably will help.\\n\\n\" +\n \"You made a deal, you fight and if you win he will follow you,\\n but \" +\n \"if you loose you will have to go with him in a strange cabin...\\n\\n\\n\" +\n \"Help: Francois is much stronger than you, defeat him using capabilities with duration effects.\",\n Francois,\n (\"Nucleotide\", 20)),\n Story(\"Yasmina always love francois,\\n even if he never show interest in her or her huge breast.\\n\\n\" +\n \"She is jealous and want to fight, but if you defeat her,\\n\" +\n \"maybe she will become an ally.\\n\\n\\n\" +\n \"Help: Some elements are stronger or weaker against other.\",\n Yasmina, (\"Nucleotide\", 30))])\n\n########################################################################################################################\n\nMarxelien = Character(\"Marxelien\", [Element.Babtou, Element.Geek, Element.Fossil], 'H',\n Age=23,\n Str=13,\n Spe=12,\n Dex=16,\n App=12,\n Int=24,\n Wil=12,\n Res=14,\n Lif=142,\n power=48,\n Attacks=[\n [Twerk],\n [Wisdom, Old_as_the_planet],\n [Perl],\n [Parpaing],\n [Populism, Totalitarism]\n ]).set_requirements(240)\n\nAurelenine = Character(\"Aurelenine\", [Element.Babtou, Element.Geek, Element.Fossil], 'H',\n Age=23,\n Str=13,\n Spe=12,\n Dex=16,\n App=7,\n Int=19,\n Wil=12,\n Res=14,\n Lif=134,\n power=48,\n Attacks=[\n [Twerk],\n [Database, Wisdom],\n [Perl],\n [Parpaing],\n [Greve, Populism]\n ]).set_requirements(110, [(\"Lightness Gene\", 1)]).set_evolution(Marxelien)\n\nAureldeux = Character(\"Aureldeux\", [Element.Babtou, Element.Geek], 'H',\n Age=24,\n Str=13,\n Spe=12,\n Dex=15,\n App=2,\n Int=14,\n Wil=10,\n Res=12,\n Lif=122,\n power=48,\n Attacks=[\n [Twerk],\n [Database],\n [Perl],\n [Punch, Parpaing],\n [Greve]\n ]).set_requirements(75).set_evolution(Aurelenine)\n\nAurelien = Character(\"Aurelien\", [Element.Babtou, Element.Geek], 'H',\n Age=23,\n Str=12,\n Spe=11,\n Dex=14,\n App=2,\n Int=12,\n Wil=9,\n Res=11,\n Lif=108,\n power=48,\n Attacks=[\n [Twerk],\n [Regex, Perl]\n [Punch],\n [Manif, Greve]\n ]).set_evolution(Aureldeux)\n\nMajdaesh = Character(\"Majdaesh\", [Element.Islamist, Element.Fire], 'F',\n Age=30,\n Str=9,\n Spe=15,\n Dex=14,\n App=14,\n Int=10,\n Wil=17,\n Res=12,\n Lif=132,\n power=48,\n Attacks=[\n [Kalachnikov],\n [Burka],\n [Invasion],\n [Circoncision],\n [Allah_akbar]\n ]).set_requirements(80)\n\nMajdattentat = Character(\"Majdattentat\", [Element.Islamist, Element.Fire], 'F',\n Age=28,\n Str=8,\n Spe=14,\n Dex=12,\n App=14,\n Int=12,\n Wil=15,\n Res=12,\n Lif=118,\n power=48,\n Attacks=[\n [Kalachnikov],\n [Burka],\n [Twerk, Invasion],\n [Allah_akbar]\n ]).set_requirements(80)\n\nMajda = Character(\"Majda\", [Element.Islamist], 'F',\n Age=25,\n Str=5,\n Spe=12,\n Dex=11,\n App=12,\n Int=14,\n Wil=14,\n Res=10,\n Lif=98,\n power=48,\n Attacks=[\n [Steal, Kalachnikov],\n [Circoncision, Burka],\n [Twerk],\n [Allah_akbar]\n ]).set_evolution(Majdattentat)\n\nGuedors_la_loi = Character(\"Guedors_la_loi\", [Element.Renoi], 'F',\n Age=28,\n Str=18,\n Spe=12,\n Dex=14,\n App=12,\n Int=12,\n Wil=17,\n Res=17,\n Lif=120,\n power=48,\n Attacks=[\n [Twerk],\n [Fist],\n [Hide_in_the_dark],\n [Tornado_punch]\n ]).set_requirements(100)\n\nOceane = Character(\"Oceane\", [Element.Renoi], 'F',\n Age=24,\n Str=14,\n Spe=11,\n Dex=12,\n App=10,\n Int=11,\n Wil=16,\n Res=15,\n Lif=108,\n power=48,\n Attacks=[\n [Tchip, Twerk],\n [Punch, Fist],\n [Tornado_punch]\n ]).set_evolution(Guedors_la_loi)\n\nJail = Biome(\"In_jail\", [Majda, Majdattentat, Oceane, Guedors_la_loi, Aurelien, Aureldeux, Aimeridicule],\n [Aurelenine, Marxelien, Majdaesh],\n [(\"Nucleotide\", 10), (\"Nucleotide\", 10), (\"Nucleotide\", 12), (\"Nucleotide\", 15)],\n [(\"Nucleotide\", 80), (\"Nucleotide\", 90), (\"Lightness Gene\", 1)],\n [(\"Nucleotide\", 5), (\"Nucleotide\", 7)],\n \"Sometimes you have to deal with the devil to achieve a greater purpose...\\n\" +\n \"So you went jail to convince dangerous criminals to help you defeat the tyrant.\\n\\n\" +\n \"-Some attacks can decrease Focus, when Focus <= 0 your player cannot play and \\n\" +\n \"pass turn.\\n\\n\" +\n \"-When Focus is lower than his basic value (for a player), player regain \\nFocus each turn.\" +\n \"\\n\\n-The Focus amount regain depend of player Intelligence, smartest people recover faster.\",\n [Story(\"First prisoner is a famous communist revolutionary, very proud of his soft and bounding ass.\\n\" +\n \"He will join you only if you defeat him in a battle, where he will show his twerk skills.\",\n Aurelien, (\"Nucleotide\", 50)),\n Story(\"Secondly, you will have to convince the most dangerous religious terrorist of the country\" +\n \"that you are the boss.\", Majda, (\"Nucleotide\", 60)),\n Story(\"And finally the stronger criminal, which has been incarcerate for the ultimate crime: be Black!\",\n Oceane, (\"Nucleotide\", 70))])\n\n########################################################################################################################\n\n\nclass Lazy(Character):\n\n def __init__(self, name, types, sex, Age, Str, Spe, Dex, App, Int, Wil, Res, Lif, power, Attacks, is_big=False):\n super().__init__(name, types, sex, Age, Str, Spe, Dex, App, Int, Wil, Res, Lif, power, Attacks, is_big)\n\n def special_power(self, opponent, msg_panel):\n if randint(0, 3) == 0:\n msg_panel.add_txt(self.name + \" decided to do nothing.\\n\", 500)\n self._pass_turn = True\n\n\nMaster_of_the_godpickaxe = Lazy(\"Master_of_the_God_Pickaxe\", [Element.God, Element.Poor, Element.Fire], 'H',\n Age=24,\n Str=18,\n Spe=15,\n Dex=14,\n App=26,\n Int=10,\n Wil=12,\n Res=5,\n Lif=142,\n power=48,\n Attacks=[\n [Bifle],\n [Eat_wastes],\n [On_heat],\n [Boiling_body_fluids, Seventh_heaven]\n ], is_big=True).set_requirements(1200, [(\"Pheromones Gene\", 4), (\"Lightness Gene\", 2)])\n\nKepine = Lazy(\"Kepine\", [Element.Poor, Element.Fire], 'H',\n Age=24,\n Str=16,\n Spe=14,\n Dex=12,\n App=17,\n Int=10,\n Wil=12,\n Res=5,\n Lif=124,\n power=48,\n Attacks=[\n [Fap, Bifle],\n [Eat_wastes],\n [On_heat],\n [GangBang]\n ], is_big=True).set_requirements(150, [(\"Pheromones Gene\", 1)])\n\nPierrickevin = Lazy(\"Pierrickevin\", [Element.Babtou, Element.Frost, Element.Poor, Element.Fossil],\n 'H',\n Age=50,\n Str=18,\n Spe=16,\n Dex=12,\n App=2,\n Int=10,\n Wil=12,\n Res=6,\n Lif=124,\n power=48,\n Attacks=[\n [Burp],\n [Eat_wastes],\n [Expulsion],\n [Weight_of_years]\n ], is_big=True).set_requirements(320)\n\nKevinutile = Lazy(\"Kevinutile\", [Element.Babtou, Element.Poor, Element.Frost], 'H',\n Age=24,\n Str=12,\n Spe=7,\n Dex=10,\n App=5,\n Int=10,\n Wil=11,\n Res=5,\n Lif=118,\n power=48,\n \n Attacks=[\n [Burp],\n [Eat_wastes],\n [No_Feelings, Frozen_heart],\n [Vomit]\n ]).set_requirements(90).set_evolution(Kepine)\n\nKevin = Lazy(\"Kevin\", [Element.Babtou, Element.Poor], 'H',\n Age=24,\n Str=12,\n Spe=6,\n Dex=9,\n App=4,\n Int=9,\n Wil=10,\n Res=4,\n Lif=112,\n power=48,\n Attacks=[\n [Manif, Burp],\n [Eat_wastes],\n [No_Feelings]\n ]).set_evolution(Kevinutile)\n\n\nclass PeeCharacter(Character):\n def __init__(self, name, pee_damages, types, sex, Age, Str, Spe, Dex, App, Int, Wil, Res, Lif, power,\n Attacks, is_big=False):\n super().__init__(name, types, sex, Age, Str, Spe, Dex, App, Int, Wil, Res, Lif, power, Attacks, is_big)\n self.pee_damages = pee_damages\n\n def special_power(self, opponent, msg_panel):\n if randint(0, 5) == 0:\n msg_panel.add_txt(self.name + \"'s bladder explode and \" + opponent.name + \" take \" + str(self.pee_damages) +\n \" damages by acid pee rain.\\n\", 500)\n opponent.carac[\"Life\"] -= self.pee_damages\n msg_panel.pop_animation(Element.Normal, opponent, \"hurt\")\n\nHashounasmine = PeeCharacter(\"Hashounasmine\", 8, [Element.Islamist, Element.Fire, Element.God], 'F',\n Age=28,\n Str=10,\n Spe=15,\n Dex=18,\n App=25,\n Int=12,\n Wil=15,\n Res=16,\n Lif=138,\n power=48,\n Attacks=[\n [On_heat],\n [Twerk, Soft_warm],\n [Sucking],\n [Boiling_body_fluids, Seventh_heaven]\n ], is_big=True).set_requirements(1200, [(\"Pheromones Gene\", 4), (\"Lightness Gene\", 1)])\n\nKahbasmine = PeeCharacter(\"Kahbasmine\", 8, [Element.Islamist, Element.Fire], 'F',\n Age=28,\n Str=10,\n Spe=14,\n Dex=20,\n App=23,\n Int=12,\n Wil=15,\n Res=15,\n Lif=135,\n power=48,\n Attacks=[\n [On_heat],\n [Clitorgnole],\n [Sucking],\n [Boiling_body_fluids, Kama_sutra]\n ], is_big=True).set_requirements(950, [(\"Pheromones Gene\", 5)])\n\nMoumasmine = PeeCharacter(\"Moumasmine\", 6, [Element.Islamist, Element.Fire], 'F',\n Age=28,\n Str=10,\n Spe=14,\n Dex=19,\n App=21,\n Int=12,\n Wil=14,\n Res=15,\n Lif=132,\n power=48,\n Attacks=[\n [On_heat],\n [Twerk],\n [Prostitute, Sucking],\n [Boiling_body_fluids]\n ], is_big=True).set_requirements(540, [(\"Pheromones Gene\", 3)])\\\n .set_evolution(Kahbasmine).set_evolution(Hashounasmine)\n\nMalikat_el_theldj = PeeCharacter(\"Malikat_el_theldj\", 5, [Element.Islamist, Element.Frost], 'F',\n Age=28,\n Str=10,\n Spe=15,\n Dex=18,\n App=19,\n Int=14,\n Wil=12,\n Res=15,\n Lif=128,\n power=48,\n \n Attacks=[\n [Twerk],\n [Prostitute],\n [Invasion, Frozen_heart],\n [Hurtful_speech]\n ]).set_requirements(220)\n\nMchenfa = PeeCharacter(\"Mchenfa\", 4, [Element.Islamist, Element.Frost], 'F',\n Age=26,\n Str=8,\n Spe=14,\n Dex=16,\n App=17,\n Int=12,\n Wil=12,\n Res=14,\n Lif=122,\n power=48,\n Attacks=[\n [Twerk],\n [Prostitute],\n [Vicious_tongue, Invasion],\n [Parasitism]\n ]).set_evolution(Malikat_el_theldj).set_requirements(180)\n\nYasmine = PeeCharacter(\"Yasmine\", 2, [Element.Islamist], 'F',\n Age=26,\n Str=8,\n Spe=12,\n Dex=15,\n App=16,\n Int=9,\n Wil=9,\n Res=12,\n Lif=110,\n power=48,\n Attacks=[\n [Steal],\n [Busbus, Twerk],\n [Prostitute],\n [Parasitism]\n ]).set_evolution(Mchenfa).set_evolution(Moumasmine)\n\nWood = Biome(\"In_the_wood\", [Kevin, Kevinutile, Kepine, Yasmine, Mchenfa, Malikat_el_theldj],\n [Kepine, Moumasmine, Kahbasmine],\n [(\"Nucleotide\", 15), (\"Nucleotide\", 20), (\"Nucleotide\", 50)],\n [(\"Nucleotide\", 80), (\"Nucleotide\", 100), (\"Nucleotide\", 110), (\"Pheromones Gene\", 1)],\n [(\"Nucleotide\", 8), (\"Nucleotide\", 9)],\n \"There is a legend, an evil, horrifying and powerful bitch living in woods.\\n\" +\n \"It is said that she always have his almost-human companion by his side.\\n\" +\n \"Let see if you can have his help.\",\n [Story(\"Oh no! The degenerate slave find you, only the witch can control his animals urges.\\n\" +\n \"you have to beat him to find her.\", Kevin, (\"Nucleotide\", 100)),\n Story(\"And know you see her... The greatest kahba you've ever seen.\\n\" +\n \"Obviously she won't help you (because she is a bitch), \" +\n \"so you will have to beat her too, to gain her allegiance.\", Yasmine,\n (\"Pheromones Gene\", 1))])\n\n########################################################################################################################\n\n\nclass Sick(Character):\n\n def __init__(self, name, types, sex, Age, Str, Spe, Dex, App, Int, Wil, Res, Lif, power, Attacks, is_big=False):\n super().__init__(name, types, sex, Age, Str, Spe, Dex, App, Int, Wil, Res, Lif, power, Attacks, is_big)\n\n def special_power(self, opponent, msg_panel):\n res = randint(0, 5)\n if res == 0:\n msg_panel.add_txt(self.name + \" have to go to hospital.\\nCannot play this turn.\\n\", 500)\n self._pass_turn = True\n elif res == 1:\n msg_panel.add_txt(self.name + \" have nose bleeding.\\nGet 3 damages.\\n\", 500)\n self.carac[\"Life\"] -= 3\n\n\nHamGod = Character(\"HamGod\", [Element.God, Element.Pig, Element.Babtou], 'H',\n Age=26,\n Str=21,\n Spe=19,\n Dex=17,\n App=22,\n Int=18,\n Wil=14,\n Res=18,\n Lif=142,\n power=48,\n Attacks=[\n [Baconstriction],\n [Stigmatization],\n [Ham_mer],\n [Holiness],\n [Charcutuerie]\n ], is_big=True).set_requirements(600, [(\"Darkness Gene\", 1), (\"Lightness Gene\", 5)])\n\nHamReaper = Sick(\"HamReaper\", [Element.Pig, Element.Babtou], 'H',\n Age=26,\n Str=19,\n Spe=16,\n Dex=16,\n App=17,\n Int=17,\n Wil=12,\n Res=17,\n Lif=135,\n power=48,\n Attacks=[\n [Baconstriction],\n [Stigmatization],\n [Ham_mer],\n [Charcutuerie]\n ], is_big=True).set_requirements(420, [(\"Darkness Gene\", 1)])\n\nHamSlayer = Sick(\"HamSlayer\", [Element.Pig, Element.Babtou], 'H',\n Age=26,\n Str=16,\n Spe=14,\n Dex=14,\n App=15,\n Int=17,\n Wil=12,\n Res=16,\n Lif=130,\n power=48,\n Attacks=[\n [Baconstriction],\n [Stigmatization],\n [Sauscissonage, Ham_mer],\n [Expulsion, Charcutuerie]\n ]\n ).set_requirements(350).set_evolution(HamReaper)\n\nHamBoy = Sick(\"HamBoy\", [Element.Babtou, Element.Pig], 'H',\n Age=26,\n Str=14,\n Spe=12,\n Dex=12,\n App=13,\n Int=16,\n Wil=10,\n Res=14,\n Lif=124,\n power=48,\n Attacks=[\n [Ham_hit, Baconstriction],\n [National_id, Stigmatization],\n [Sauscissonage],\n [Expulsion]\n ]).set_requirements(280).set_evolution(HamSlayer)\n\nJimmy = Sick(\"Jimmy\", [Element.Babtou, Element.Pig], 'H',\n Age=24,\n Str=12,\n Spe=10,\n Dex=12,\n App=12,\n Int=15,\n Wil=9,\n Res=14,\n Lif=118,\n power=48,\n Attacks=[\n [Punch, Ham_hit],\n [National_id],\n [Expulsion]\n ]).set_evolution(HamBoy)\n\nAtrine = Character(\"Atrine\", [Element.Islamist, Element.Fire], 'F',\n Age=1,\n Str=2,\n Spe=7,\n Dex=8,\n App=24,\n Int=2,\n Wil=12,\n Res=1,\n Lif=85,\n power=48,\n Attacks=[\n [Busbus],\n [Soft_warm],\n [Invasion],\n [Vomit, So_cute]\n ], is_big=True).set_requirements(120, [(\"Lightness Gene\", 4)])\n\nSamaternelle = Character(\"Samaternelle\", [Element.Islamist, Element.Fire], 'F',\n Age=27,\n Str=17,\n Spe=11,\n Dex=11,\n App=16,\n Int=15,\n Wil=19,\n Res=14,\n Lif=130,\n power=48,\n Attacks=[\n [Busbus, Twerk],\n [Soft_warm],\n [Circoncision],\n [Invasion],\n [Allah_akbar]\n ]).set_requirements(250).set_evolution(Atrine)\n\nSamaneh = Character(\"Samaneh\", [Element.Islamist, Element.Fire], 'F',\n Age=26,\n Str=15,\n Spe=11,\n Dex=10,\n App=16,\n Int=14,\n Wil=18,\n Res=12,\n Lif=115,\n power=48,\n Attacks=[\n [Busbus],\n [Soft_warm],\n [Harassment, Circoncision],\n [Invasion],\n [Allah_akbar]\n ]).set_evolution(Samaternelle)\n\n\nclass Old(Character):\n\n def __init__(self, name, types, sex, Age, Str, Spe, Dex, App, Int, Wil, Res, Lif, power, Attacks):\n super().__init__(name, types, sex, Age, Str, Spe, Dex, App, Int, Wil, Res, Lif, power, Attacks)\n\n def special_power(self, opponent, msg_panel):\n if randint(0, 5) == 0:\n msg_panel.add_txt(self.name + \" expire from old age.\\n\", 500)\n self.play_turn = False\n self.carac[\"Life\"] = 0\n\n\nPierristorique = Old(\"Pierristorique\", [Element.Babtou, Element.Fossil], 'H',\n Age=5000,\n Str=19,\n Spe=18,\n Dex=19,\n App=17,\n Int=14,\n Wil=15,\n Res=18,\n Lif=145,\n power=48,\n Attacks=[\n [National_id],\n [Wisdom, Old_as_the_planet],\n [Stigmatization, Battle_experience],\n [GangBang]\n ]).set_requirements(350)\n\nPierrick = Old(\"Pierrick\", [Element.Babtou, Element.Fossil], 'H',\n Age=300,\n Str=16,\n Spe=15,\n Dex=16,\n App=17,\n Int=12,\n Wil=14,\n Res=16,\n Lif=120,\n power=48,\n Attacks=[\n [Punch, National_id],\n [Wisdom],\n [Rape, Stigmatization],\n [Tornado_punch]\n ]).set_evolution(Pierristorique)\n\nHospital = Biome(\"At_the_hospital\",\n [Jimmy, HamBoy, HamSlayer, Samaneh, Samaternelle, Pierrick, Pierristorique],\n [HamReaper, HamGod, Pierrickevin, Atrine],\n [(\"Nucleotide\", 30), (\"Nucleotide\", 50), (\"Nucleotide\", 50), (\"Nucleotide\", 100),\n (\"Pheromones Gene\", 1), (\"Pheromones Gene\", 2)],\n [(\"Nucleotide\", 200), (\"Nucleotide\", 300), (\"Pheromones Gene\", 3), (\"Lightness Gene\", 1)],\n [(\"Nucleotide\", 14), (\"Nucleotide\", 15), (\"Nucleotide\", 20)],\n \"Some great warriors also have many injuries.\\nRumor say two great warriors lives in this hospital.\",\n [Story(\"Walking throw Emergency you meet Jimmy, an unstoppable warrior, who cant flinch except\" +\n \"by chronic nose bleeding. He is terribly strong against Islamists.\", Jimmy,\n (\"Nucleotide\", 120)),\n Story(\"That's unexpected! you were walking before maternity and you are stopped by a furious \" +\n \"woman.\\nShe won't let you pass without a fight!\", Samaneh, (\"Nucleotide\", 200)),\n Story(\"The oldest warrior of the world, the elder which is as strong as aged.\\nHere come the \" +\n \"ultimate warrior...\\nWith nurses on his side to help him peeing.\", Pierrick,\n (\"Darkness Gene\", 1))])\n\n########################################################################################################################\n\nLolitadvanced = Character(\"Lolitadvanced\", [Element.Breton, Element.Statistic, Element.Geek], 'F',\n Age=29,\n Str=12,\n Spe=15,\n Dex=16,\n App=19,\n Int=22,\n Wil=16,\n Res=12,\n Lif=124,\n power=48,\n Attacks=[\n [Normal_law],\n [Regex, Inference],\n [Database],\n [So_cute]\n ]).set_requirements(300)\n\nLolita = Character(\"Lolita\", [Element.Breton, Element.Statistic], 'F',\n Age=24,\n Str=12,\n Spe=15,\n Dex=15,\n App=17,\n Int=20,\n Wil=16,\n Res=12,\n Lif=115,\n power=48,\n Attacks=[\n [Bignou, Normal_law],\n [Sauscissonage, Regex],\n [Butter, Database],\n [Genetic, So_cute]\n ])\\\n .set_evolution(Lolitadvanced).set_requirements(200)\n\nLolitapprentie = Character(\"Lolitapprentie\", [Element.Breton], 'F',\n Age=14,\n Str=12,\n Spe=14,\n Dex=12,\n App=15,\n Int=18,\n Wil=12,\n Res=10,\n Lif=105,\n power=48,\n Attacks=[\n [Bignou],\n [Punch, Sauscissonage],\n [Butter],\n [Genetic]\n ]).set_evolution(Lolita)\n\nTuluk = Sick(\"Tuluk_Tuluk\", [Element.Islamist, Element.Sheep, Element.Fossil], 'H',\n Age=28,\n Str=14,\n Spe=18,\n Dex=16,\n App=11,\n Int=22,\n Wil=12,\n Res=10,\n Lif=132,\n power=48,\n Attacks=[\n [Evil_mind, Invasion],\n [Old_as_the_planet],\n [Wool, Old_bones],\n [Ram, Old_fart]\n ], is_big=True\n ).set_requirements(700, [(\"Lightness Gene\", 5)])\n\nSamir = Sick(\"Samir\", [Element.Islamist, Element.Sheep, Element.Fossil], 'H',\n Age=26,\n Str=13,\n Spe=16,\n Dex=15,\n App=11,\n Int=20,\n Wil=10,\n Res=9,\n Lif=119,\n power=48,\n Attacks=[\n [Browse],\n [Evil_mind, Invasion],\n [Wisdom, Old_as_the_planet],\n [Wool],\n [Ram]\n ], is_big=True).set_evolution(Tuluk)\n\nSwaggy = PeeCharacter(\"Swaggy_Boy\", 4, [Element.Babtou, Element.Fire, Element.God], 'H',\n Age=28,\n Str=14,\n Spe=16,\n Dex=15,\n App=24,\n Int=8,\n Wil=14,\n Res=15,\n Lif=140,\n power=48,\n Attacks=[\n [Bifle, Sucking],\n [Stigmatization],\n [On_heat],\n [Boiling_body_fluids, Kama_sutra]\n ], is_big=True\n ).set_requirements(850, [(\"Lightness Gene\", 2), (\"Pheromones Gene\", 12)])\n\nSimon = PeeCharacter(\"Simon\", 3, [Element.Babtou, Element.Fire], 'H',\n Age=25,\n Str=12,\n Spe=14,\n Dex=13,\n App=20,\n Int=8,\n Wil=14,\n Res=14,\n Lif=122,\n power=48,\n Attacks=[\n [Fap, Bifle],\n [Stigmatization],\n [On_heat],\n [Boiling_body_fluids]\n ], is_big=True).set_evolution(Swaggy)\n\nDeus_Lex_Machina = Character(\"Deus_Lex_Machina\",\n [Element.God, Element.Statistic, Element.Geek, Element.Babtou, Element.Fossil], 'H',\n Age=5000,\n Str=11,\n Spe=10,\n Dex=10,\n App=22,\n Int=25,\n Wil=12,\n Res=12,\n Lif=180,\n power=48,\n Attacks=[\n [Inference],\n [Old_as_the_planet],\n [Divine_Gift],\n [Holiness],\n [Seventh_heaven]\n ], is_big=True).set_requirements(2000, [(\"Lightness Gene\", 1)])\n\nAllah_Kisandr = Character(\"Allah_Kisandr\",\n [Element.God, Element.Statistic, Element.Geek, Element.Islamist], 'H',\n Age=25,\n Str=12,\n Spe=9,\n Dex=10,\n App=21,\n Int=19,\n Wil=18,\n Res=12,\n Lif=152,\n power=48,\n Attacks=[\n [Inference],\n [Invasion],\n [Divine_Gift],\n [Holiness],\n [Allah_akbar]\n ], is_big=True).set_requirements(1000, [(\"Darkness Gene\", 4)])\n\nSamsimal = Character(\"Samsimal\", [Element.God, Element.Sheep, Element.Fire], 'H',\n Age=24,\n Str=14,\n Spe=14,\n Dex=12,\n App=22,\n Int=25,\n Wil=14,\n Res=15,\n Lif=150,\n power=48,\n Attacks=[\n [On_heat],\n [Wool, Holiness],\n [Ram],\n [Boiling_body_fluids, Seventh_heaven]\n ], is_big=True\n ).set_requirements(800, [(\"Lightness Gene\", 8)])\n\nAlexandre = Character(\"Alexandre\", [Element.God, Element.Statistic, Element.Geek, Element.Babtou], 'H',\n Age=23,\n Str=11,\n Spe=9,\n Dex=10,\n App=20,\n Int=22,\n Wil=9,\n Res=11,\n Lif=140,\n power=48,\n Attacks=[\n [Regex, Inference],\n [Divine_Gift],\n [Holiness],\n [Seventh_heaven]\n ], is_big=True).set_evolution(Deus_Lex_Machina).set_evolution(Allah_Kisandr)\n\nFinal = Biome(\"Final\",\n [Alexandre, Lolitapprentie, Lolita, Lolitadvanced, Samir, Simon],\n [Master_of_the_godpickaxe, Swaggy, Tuluk, Deus_Lex_Machina, Allah_Kisandr, Samsimal],\n [(\"Nucleotide\", 55), (\"Nucleotide\", 100), (\"Nucleotide\", 200), (\"Nucleotide\", 400),\n (\"Pheromones Gene\", 1), (\"Darkness Gene\", 1), (\"Lightness Gene\", 1)],\n [(\"Nucleotide\", 500), (\"Nucleotide\", 500), (\"Nucleotide\", 800),\n (\"Darkness Gene\", 2), (\"Darkness Gene\", 2)],\n [(\"Nucleotide\", 50), (\"Nucleotide\", 55), (\"Pheromones Gene\", 1)],\n \"You've got a pretty strong army!\\nIt's now time to find and beat the Tyrant in his castle.\\n\" +\n \"Be careful, you will have to fight his lieutenants!\",\n [Story(\"As you are a coward you decide to firstly beat his weakest guard.\\n\" +\n \"The Tyrant's disciple, maybe she could become a very strong ally.\\n\" +\n \"She is breton, and maybe an Aimeric's cousin (and so a potential sex partner for him).\",\n Lolitapprentie, (\"Nucleotide\", 250)),\n Story(\"Dammit! The Tyrant's lover appear, his 'wife' has he said.\\n\" +\n \"He look weak but he is pretty strong:\\n\" +\n \"for a religious man, having sex with his own god give wonderful powers.\",\n Samir, (\"Lightness Gene\", 1)),\n Story(\"Not enough?\\nHas you can see being wife of god made you very strong,\\n\" +\n \"so imagine how much power being a god's sexual slave can give you!\\n\" +\n \"The Tyrant's right hand man, the luxury half-god, just leave bathroom and you must beat him!\",\n Simon, (\"Nucleotide\", 500)),\n Story(\"All efforts have payed!\\nNow let's kick some God's ass...\", Alexandre, (\"Nucleotide\", 800))])\n\n########################################################################################################################\n# SAISON 2\n########################################################################################################################\n\n\nclass Radicalist(Character):\n\n def __init__(self, name, types, sex, Age, Str, Spe, Dex, App, Int, Wil, Res, Lif, power, Attacks):\n super().__init__(name, types, sex, Age, Str, Spe, Dex, App, Int, Wil, Res, Lif, power, Attacks)\n self.not_radicalized = True\n\n def special_power(self, opponent, msg_panel):\n if self.not_radicalized and (Element.Islamist in opponent.types) and (randint(0, 3) == 0):\n msg_panel.add_txt(self.name + \" radicalize himself.\\n\", 500)\n self.types.append(Element.Islamist)\n self.Attacks.append(Allah_akbar)\n self.not_radicalized = False\n\nTavenger = Radicalist(\"Tavenger\", [Element.Nem], 'H',\n Age=28,\n Str=14,\n Spe=19,\n Dex=17,\n App=14,\n Int=14,\n Wil=14,\n Res=13,\n Lif=134,\n power=48,\n Attacks=[\n [Death_Rectal_Touch],\n [Fist, Dragon_Fist_Fucking],\n [Hentai],\n [Nunchacouille]\n ])\n\nChristophe = Radicalist(\"Christophe\", [Element.Nem], 'H',\n Age=26,\n Str=12,\n Spe=16,\n Dex=15,\n App=13,\n Int=14,\n Wil=12,\n Res=10,\n Lif=108,\n power=48,\n Attacks=[\n [Death_Rectal_Touch],\n [Punch, Fist],\n [Nunchacouille]\n ])\n\nBaheudoublant = Character(\"Baheudoublant\", [Element.Statistic, Element.Geek], 'F',\n Age=25,\n Str=15,\n Spe=11,\n Dex=15,\n App=12,\n Int=15,\n Wil=11,\n Res=14,\n Lif=112,\n power=48,\n Attacks=[\n [Punch],\n [Fap],\n [Fart],\n [Normal_law, Inference],\n [RMarkdown]\n ]).set_requirements(200)\n\nJulien = Character(\"Julien\", [Element.Statistic, Element.Geek], 'F',\n Age=25,\n Str=15,\n Spe=11,\n Dex=15,\n App=12,\n Int=15,\n Wil=11,\n Res=14,\n Lif=112,\n power=48,\n Attacks=[\n [Punch],\n [Fap],\n [Normal_law],\n [R_script, RMarkdown]\n ])\n\nJacquie = Character(\"Jacquie\", [Element.Breton, Element.Statistic, Element.Geek], 'F',\n Age=24,\n Str=10,\n Spe=17,\n Dex=16,\n App=18,\n Int=12,\n Wil=13,\n Res=12,\n Lif=125,\n power=48,\n Attacks=[\n [Bifle],\n [Eat_wastes],\n [Sucking],\n [Rape, GangBang]\n ]).set_requirements(150)\n\nMichal = Character(\"Michal\", [Element.Babtou], 'F',\n Age=24,\n Str=9,\n Spe=15,\n Dex=16,\n App=16,\n Int=12,\n Wil=12,\n Res=11,\n Lif=109,\n power=48,\n Attacks=[\n [Punch, Bifle],\n [Eat_wastes],\n [Prostitute, Sucking],\n [Rape]\n ])\n\nDuchesse = Character(\"Duchesse\", [Element.Breton, Element.Statistic, Element.Geek], 'F',\n Age=26,\n Str=12,\n Spe=16,\n Dex=17,\n App=20,\n Int=24,\n Wil=15,\n Res=16,\n Lif=138,\n power=182,\n Attacks=[\n [Frozen_heart],\n [Stigmatization, Expulsion],\n [Jeanne],\n [Hurtful_speech]\n ]).set_requirements(350)\n\nCamille = Character(\"Camille\", [Element.Babtou, Element.Frost], 'F',\n Age=26,\n Str=12,\n Spe=14,\n Dex=16,\n App=18,\n Int=19,\n Wil=15,\n Res=14,\n Lif=122,\n power=48,\n Attacks=[\n [Frozen_heart],\n [National_id, Stigmatization],\n [Vicious_tongue, Hurtful_speech]\n ])\n\n########################################################################################################################\n\n","sub_path":"TEUB_GAME/Libcharacters.py","file_name":"Libcharacters.py","file_ext":"py","file_size_in_byte":48916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"564211229","text":"# File: Day.py\r\n\r\n# Description: Given the day, month, and year within a 2 century period\r\n#\t\t(1900-2100 incl.), the program returns the day of the week.\r\n\r\n# Student Name: Stanley Urbanek\t\r\n\r\n# Student UT EID: su964\r\n\r\n# Course Name: CS 303E\r\n\r\n# Unique Number: 90110\r\n\r\n# Date Created: 7/2/2014\r\n\r\n# Date Last Modified: 7/3/2014\r\n\r\n#########################################\r\n\r\n\r\ndef main():\r\n #check year\r\n year = eval(input('Enter year: '))\r\n while ( (year < 1900) or (year > 2100)):\r\n year = eval(input('Enter year: '))\r\n #check month\r\n month = eval(input('Enter month: '))\r\n while ( (month < 1) or (month > 12)):\r\n month = eval(input('Enter month: '))\r\n #get day\r\n day = eval(input('Enter day: '))\r\n #check validity of day, depending on month\r\n ##first with 31 day months\r\n if ( (month == 1) or (month == 3) or (month == 5) or\\\r\n (month == 7) or (month == 8) or (month == 10) or (month == 12)):\r\n while ( (day < 1) or (day > 31)):\r\n day = eval(input('Enter day: '))\r\n ##now with 30 day months\r\n elif ( (month == 4) or (month == 6) or (month == 9) or (month == 11)):\r\n while ( (day < 1) or (day > 30)):\r\n day = eval(input('Enter day: '))\r\n ##now check if february is a leap year\r\n elif ( (year % 400 == 0) or ((year % 100 != 0) and (year % 4 == 0)) ):\r\n while ( (day < 1) or (day > 29) ):\r\n day = eval(input('Enter day: '))\r\n else:\r\n while ( (day < 1) or (day > 28) ):\r\n day = eval(input('Enter day: '))\r\n #changing calendar months (and year if necessary) for algorithm\r\n a = month - 2\r\n if (a < 1):\r\n a += 12\r\n year -= 1\r\n else:\r\n pass\r\n ##assigning rest of variables for algorithm\r\n b = day\r\n ##splitting year into two variables, c and d\r\n d = year // 100\r\n c = year - d * 100\r\n #now doing Rev. Zeller's algorithm\r\n w = (13 * a - 1) // 5\r\n x = c // 4\r\n y = d // 4\r\n z = w + x + y + b + c - 2 * d\r\n r = z % 7\r\n r = (r + 7) % 7\r\n #converting r into day of the week, and printing result\r\n if (r == 0):\r\n print('\\nThe day is Sunday.')\r\n elif (r == 1):\r\n print('\\nThe day is Monday.')\r\n elif (r == 2):\r\n print('\\nThe day is Tuesday.')\r\n elif (r == 3):\r\n print('\\nThe day is Wednesday.')\r\n elif (r == 4):\r\n print('\\nThe day is Thursday.')\r\n elif (r == 5):\r\n print('\\nThe day is Friday.')\r\n else:\r\n print('\\nThe day is Saturday.')\r\n\r\n\r\nmain()\r\n\r\n\r\n","sub_path":"University/Day.py","file_name":"Day.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"566069094","text":"'''\nTest Vid4 (SR) and REDS4 (SR-clean, SR-blur, deblur-clean, deblur-compression) datasets\n'''\n\nimport time\nt0 = time.perf_counter()\nimport os\nimport os.path as osp\nimport glob\nimport logging\nimport numpy as np\nimport cv2\nimport torch\nimport zmq\nimport argparse\n\nimport utils.util as util\nimport data.util as data_util\nimport models.archs.EDVR_arch as EDVR_arch\n\nt1 = time.perf_counter()\nprint(\"time for import: \" + str(t1-t0))\n\ndef send_array(socket, A, flags=0, copy=True, track=False):\n \"\"\"send a numpy array with metadata\"\"\"\n md = dict(\n dtype = str(A.dtype),\n shape = A.shape,\n )\n socket.send_json(md, flags|zmq.SNDMORE)\n return socket.send(A, flags, copy=copy, track=track)\n\ndef recv_array(socket, flags=0, copy=True, track=False):\n \"\"\"recv a numpy array\"\"\"\n md = socket.recv_json(flags=flags)\n msg = socket.recv(flags=flags, copy=copy, track=track)\n buf = memoryview(msg)\n A = np.frombuffer(buf, dtype=md['dtype'])\n return A.reshape(md['shape'])\n\ndef eval(images, N_in, model, device):\n images = images.astype(np.float32) / 255.\n images = torch.from_numpy(np.ascontiguousarray(np.transpose(images, (0, 3, 1, 2)))).float()\n imgs_in = images.index_select(0, torch.LongTensor(list(range(N_in)))).unsqueeze(0).to(device)\n output = util.single_forward(model, imgs_in)\n output = util.tensor2img(output.squeeze(0))\n output = np.ascontiguousarray(output)\n return output\n\ndef main():\n\n # Create object for parsing command-line options\n parser = argparse.ArgumentParser(description=\"Test with EDVR, require path to the pretrained model and the save_dir.\")\n # Add argument which takes path to a bag file as an input\n parser.add_argument(\"-m\", \"--model\", type=str, default='../experiments/pretrained_models/EDVR_Vimeo90K_SR_L.pth', help=\"Path to pretrained model\")\n parser.add_argument(\"-s\", \"--save_dir\", type=str, default='../results/LumoImg', help=\"Path to save_dir\")\n\n # Parse the command line arguments to an object\n args = parser.parse_args()\n\n #################\n # configurations\n #################\n device = torch.device('cuda')\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n\n ############################################################################\n #### model\n model_path = args.model\n N_in = 7 # use N_in images to restore one HR image\n predeblur, HR_in = False, False\n back_RBs = 40\n t0 = time.perf_counter()\n model = EDVR_arch.EDVR(128, N_in, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in)\n t1 = time.perf_counter()\n print(\"time for initiating model: \" + str(t1-t0))\n\n #### evaluation\n save_imgs = True\n img_name = \"super_res\"\n save_folder = args.save_dir\n util.mkdirs(save_folder)\n\n t0 = time.perf_counter()\n #### set up the models\n model.load_state_dict(torch.load(model_path), strict=True)\n model.eval()\n model = model.to(device)\n t1 = time.perf_counter()\n print(\"time for loading model: \" + str(t1-t0))\n\n ### server setting: if Address already in use => netstat -ltnp\n context = zmq.Context()\n socket = context.socket(zmq.REP)\n socket.bind(\"tcp://*:5555\")\n print(\"==============================\")\n print(\"Super Res Server is Listening:\")\n flag = 'c' # c stands for continue\n while True:\n flag = socket.recv_string()\n if flag != 'c':\n socket.send_string(\"done\")\n break\n else:\n socket.send_string(\"ok\")\n images = recv_array(socket)\n img_out = eval(images, N_in, model, device)\n send_array(socket, img_out)\n if save_imgs:\n cv2.imwrite(osp.join(save_folder, '{}.png'.format(img_name)), img_out)\n\nif __name__ == '__main__':\n t_main_0 = time.perf_counter()\n main()\n t_main_1 = time.perf_counter()\n print(\"time for main: \" + str(t_main_1-t_main_0))\n","sub_path":"codes/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"615741762","text":"\"\"\"\nYou are given an integer hoursBefore, the number of hours you have to travel to your meeting. To arrive at your meeting, you have to travel through n roads. The road lengths are given as an integer array dist of length n, where dist[i] describes the length of the ith road in kilometers. In addition, you are given an integer speed, which is the speed (in km/h) you will travel at.\n\nAfter you travel road i, you must rest and wait for the next integer hour before you can begin traveling on the next road. Note that you do not have to rest after traveling the last road because you are already at the meeting.\n\nFor example, if traveling a road takes 1.4 hours, you must wait until the 2 hour mark before traveling the next road. If traveling a road takes exactly 2 hours, you do not need to wait.\nHowever, you are allowed to skip some rests to be able to arrive on time, meaning you do not need to wait for the next integer hour. Note that this means you may finish traveling future roads at different hour marks.\n\nFor example, suppose traveling the first road takes 1.4 hours and traveling the second road takes 0.6 hours. Skipping the rest after the first road will mean you finish traveling the second road right at the 2 hour mark, letting you start traveling the third road immediately.\nReturn the minimum number of skips required to arrive at the meeting on time, or -1 if it is impossible.\n\n \n\nExample 1:\n\nInput: dist = [1,3,2], speed = 4, hoursBefore = 2\nOutput: 1\nExplanation:\nWithout skipping any rests, you will arrive in (1/4 + 3/4) + (3/4 + 1/4) + (2/4) = 2.5 hours.\nYou can skip the first rest to arrive in ((1/4 + 0) + (3/4 + 0)) + (2/4) = 1.5 hours.\nNote that the second rest is shortened because you finish traveling the second road at an integer hour due to skipping the first rest.\nExample 2:\n\nInput: dist = [7,3,5,5], speed = 2, hoursBefore = 10\nOutput: 2\nExplanation:\nWithout skipping any rests, you will arrive in (7/2 + 1/2) + (3/2 + 1/2) + (5/2 + 1/2) + (5/2) = 11.5 hours.\nYou can skip the first and third rest to arrive in ((7/2 + 0) + (3/2 + 0)) + ((5/2 + 0) + (5/2)) = 10 hours.\nExample 3:\n\nInput: dist = [7,3,5,5], speed = 1, hoursBefore = 10\nOutput: -1\nExplanation: It is impossible to arrive at the meeting on time even if you skip all the rests.\n \n\nConstraints:\n\nn == dist.length\n1 <= n <= 1000\n1 <= dist[i] <= 105\n1 <= speed <= 106\n1 <= hoursBefore <= 107\n\"\"\"\n\n\nimport math\nfrom typing import List\nclass Solution:\n def minSkips(self, dist: List[int], speed: int, hoursBefore: int) -> int:\n\n def calc(skip):\n pass\n \n\n min_time = sum([d/speed for d in dist])\n if min_time > hoursBefore:\n return -1\n left, right = 0, len(dist)\n while left < right:\n mid = (left + right) // 2\n if not calc(mid):\n left = mid + 1\n else:\n right = mid\n return left\n\n\n# https://leetcode-cn.com/problems/minimum-skips-to-arrive-at-meeting-on-time/solution/minimum-skips-to-arrive-at-meeting-on-ti-dp7v/\nfrom typing import List\nclass Solution:\n def minSkips(self, dist: List[int], speed: int, hoursBefore: int) -> int:\n\n length = len(dist)\n dp = [[float('inf')] * (length + 1) for _ in range(length + 1)]\n dp[0][0] = 0\n\n for i in range(1, length + 1):\n for j in range(i + 1):\n if j != 0:\n dp[i][j] = min(dp[i - 1][j - 1] + dist[i - 1], dp[i][j])\n if j != i:\n dp[i][j] = min(dp[i][j], math.ceil((dp[i - 1][j] + dist[i - 1]) / speed) * speed)\n # for row in dp:\n # print(row)\n for j in range(length):\n if dp[-1][j] <= speed * hoursBefore:\n return j\n return -1\n\n\n# https://leetcode.com/problems/minimum-skips-to-arrive-at-meeting-on-time/discuss/1239772/Python-dp-O(n2)\nclass Solution:\n def minSkips(self, dist: List[int], speed: int, hoursBefore: int) -> int:\n n=len(dist)\n dp=[[float('inf') for _ in range(n+1)] for _ in range(n+1)]\n dp[0][0]=0\n \n for i,d in enumerate(dist,1):\n dp[i][0]=(dp[i-1][0]+d+speed-1)//speed*speed\n for j in range(1,i+1):\n dp[i][j]=min(dp[i-1][j-1]+d,(dp[i-1][j]+d+speed-1)//speed*speed)\n \n for j,t in enumerate(dp[-1]):\n if t<=speed*hoursBefore:\n return j\n return -1\n\nclass Solution:\n def minSkips(self, dist: List[int], speed: int, hoursBefore: int) -> int:\n eps=1e-9\n \n n=len(dist)\n dp=[[10**10 for _ in range(n+1)] for _ in range(n+1)]\n dp[0][0]=0\n \n for i,d in enumerate(dist,1):\n dp[i][0]=ceil(dp[i-1][0]+d/speed-eps)\n for j in range(1,i+1):\n dp[i][j]=min(dp[i-1][j-1]+d/speed,ceil(dp[i-1][j]+d/speed-eps))\n \n for j,t in enumerate(dp[-1]):\n if t<=hoursBefore:\n return j\n return -1\n\nS = Solution()\ndist = [1,3,2]\nspeed = 4\nhoursBefore = 2\nprint(S.minSkips(dist, speed, hoursBefore))\ndist = [7,3,5,5]\nspeed = 2\nhoursBefore = 10\nprint(S.minSkips(dist, speed, hoursBefore))\ndist = [7,3,5,5]\nspeed = 1\nhoursBefore = 10\nprint(S.minSkips(dist, speed, hoursBefore))\n","sub_path":"Python/1883_MinimumSkipstoArriveatMeetingOnTime.py","file_name":"1883_MinimumSkipstoArriveatMeetingOnTime.py","file_ext":"py","file_size_in_byte":5288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"129114903","text":"import xlwings as xw\nfrom tkinter import *\nimport tkinter.font as tkFont\nimport os\n\n# 写入数据\ndef set_gui(new_file_name):\n root = Tk() # 创建TK对象\n root.title(\"输入要填充的颜色值\") # 设置标题\n root.geometry(\"380x80\") # 设置窗体大小\n entry = Entry(root, font=(\"Calibri\", 20))\n entry.pack(side=LEFT) # 设置输入框\n ft = tkFont.Font(family='Fixdsys', size=20, weight=tkFont.BOLD)\n\n def queding():\n get_value = entry.get()\n input_value(get_value, new_file_name)\n\n Button(root, text=\"确定\", command=queding,\n font=ft,\n bg=\"yellow\",\n width=10, height=2).pack(side=LEFT)\n\n root.mainloop()\n\n# 写入颜色值\ndef input_value(get_value, new_file_name):\n app = xw.App(visible=True, add_book=False)\n\n # 1.打开原工作簿\n workbook = app.books.open(new_file_name)\n\n # 2.获取所有工作表\n sheets = workbook.sheets\n n = 0\n for she in sheets:\n n += 1\n rows_num = she.used_range.address.split(\"$\")[-1]\n culomn_num = she.used_range.address.split(\"$\")[3]\n range_value = \"A\" + str(n) + \":\" + culomn_num + str(n)\n\n color_list = get_value.split(\",\")\n new_color_list = []\n for i in color_list:\n new_color_list.append(int(i))\n new_color_list = tuple(new_color_list)\n\n # 填充颜色\n for culomn in range(0, int(rows_num) + 1):\n range_value_new = \"A\" + str(culomn) + \":\" + culomn_num + str(culomn)\n if culomn % 2 == 1:\n she.range(range_value_new).color = new_color_list\n\n # 3.保存工作簿\n save_close(app, workbook)\n\n# 2.保存和退出程序\ndef save_close(app, workbook):\n workbook.save()\n workbook.close()\n app.quit()\n\n# 获取指定后缀的文件\ndef get_filename(path, filetype): # 输入路径、文件类型 例如'.csv'\n file_name = []\n for root, dirs, files in os.walk(path):\n for i in files:\n if filetype + ' ' in i + ' ': # 这里后面不加一个字母可能会出问题,加上一个(不一定是空格)可以解决99.99%的情况\n file_name.append(i)\n return file_name # 输出由有后缀的文件名组成的列表\n\ndef read_write_file(file_name):\n # 读取文件\n file = open(file_name, \"rb\")\n str_file = file.read()\n file.close()\n\n # 写入文件\n new_file_name = \"隔行填色\" + file_name[:-5] + \".xlsx\"\n f = open(new_file_name, \"wb\")\n f.write(str_file)\n f.close()\n return new_file_name\n\n# python程序执行入口\nif __name__ == \"__main__\":\n path = \"../source_material/07/01一键批量隔行填色\"\n os.chdir(path)\n # 1.先复制原工作簿\n file_name = get_filename(\".\", \".xlsx\")[0]\n new_file_name = read_write_file(file_name)\n\n # 2.获取颜色数据\n get_value = set_gui(new_file_name)\n","sub_path":"Python_Office_Automation/05-黑科技拓展/07-一键自动处理工作簿3/01-一键批量隔行填色.py","file_name":"01-一键批量隔行填色.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"533309566","text":"#! /usr/bin/env python\nimport abjad\nimport baca\nimport helpers\nimport manifolds\nimport os\nimport pathlib\nimport sys\n\n\nif __name__ == '__main__':\n\n os.system('clear')\n usage = 'usage: run.py [--nowrite|--write]'\n write = baca.check_score_builder_commandline(sys.argv, usage)\n\n print('Reading meter list from disk ...')\n meter_list = manifolds.etc.rhythm.meter_list\n\n print('Making score ...')\n score = abjad.Score()\n measures = abjad.make(meter_list)\n staff = abjad.RhythmicStaff(measures)\n score.append(staff)\n\n print('Applying global tempo ...')\n helpers.apply_global_tempo(score)\n\n print('Grouping measures into tempo regions ...')\n measure_groups = staff.group_prolated(\n staff[:],\n [staff.duration.prolated / 6],\n cyclic=True,\n )\n len_measure_groups = [len(group) for group in measure_groups]\n assert len_measure_groups == [45, 44, 46, 45, 42, 46]\n\n print('Applying proportional tempo spanners ...')\n tempo_regions = helpers.apply_tempo_indications(measure_groups)\n\n \"\"\"\n Resulting in six tempo regions:\n\n 2'50\" 170.5\n 2'09\" 129.6\n 2'27\" 147.3\n 2'15\" 135.0\n 2'04\" 124.6\n 1'51\" 111.7\n\n With a total duration of 13'38\".\n \"\"\"\n\n print('Writing measures per tempo region to disk ...')\n measures_per_tempo_region = [len(x) for x in tempo_regions]\n output = 'measures_per_tempo_region = %s' % measures_per_tempo_region\n baca.cache_output(output, 'measures_per_tempo_region', __file__)\n\n print('Laying out score ...')\n helpers.apply_score_layout(score)\n\n print('Checking score well-formedness ...')\n assert abjad.inspect(score).wellformed()\n\n print('Handling I/O ...')\n directory = pathlib.Path(__file__).parent\n file_path = directory / 'score' / 'tempo_regions'\n template_path = directory / 'score' / 'template.ly'\n title = ['Mannigfaltigkeiten', 'tempo regions']\n abjad.io.write_and_show(\n score,\n file_path,\n template_path,\n title,\n write=write,\n )\n print()\n","sub_path":"manifolds/etc/rhythm/ff_partition_measures_into_tempo_regions/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"636441584","text":"#Library\nimport turtle\n\n#Set up Colors\ncolors=['red','purple','blue','green','yellow','orange']\n\n#Set up objects for screen and cursor\nwn=turtle.Screen()\njave=turtle.Turtle()\n\n#Properties for cursor and background\njave.speed(0)\nwn.bgcolor('black')\n\n#Loop to keep spiral running\nfor x in range(360):\n jave.pencolor(colors[x%6])\n jave.width(x/100+1)\n jave.forward(x)\n jave.left(59)\n","sub_path":"Turtle/ColorfulSpiral.py","file_name":"ColorfulSpiral.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"339788736","text":"import pygame,playerobj\r\n\r\nclass Meteor(pygame.sprite.Sprite):\r\n def __init__(self,x,y,dirx,diry,player):\r\n \"\"\"obiekt sprite asteroid\"\"\"\r\n pygame.sprite.Sprite.__init__(self)\r\n self.speed=0.9+(0.1*player.getLevel())\r\n self.x=x\r\n self.y=y\r\n self.dirx=dirx\r\n self.diry=diry\r\n self.image=pygame.image.load('images/meteor.png')\r\n self.image.convert()\r\n self.rect=self.image.get_rect()\r\n\r\n def getxPosition(self):\r\n \"\"\"zwraca pozycje x\"\"\"\r\n return self.x\r\n\r\n def getyPostion(self):\r\n \"\"\"zwraca pozycje y\"\"\"\r\n return self.y\r\n\r\n def setPosition(self,x,y):\r\n \"\"\"zmienia pozycje x i y\"\"\"\r\n self.x=x\r\n self.y=y\r\n\r\n def collisionCheck(self,player):\r\n \"\"\"sprawdza czy nie zaszla kolizja pomiedzy obiektem asteroidy i gracza i w zaleznosci od tego usuwa obiekt asteroidy\"\"\"\r\n collision=pygame.sprite.collide_rect(self,player)\r\n if collision:\r\n player.reducePlayerHP(100)\r\n self.kill()\r\n\r\n def movement(self,screen_width,screen_height):\r\n \"\"\"zmienia wartosc polozenia x i y w zaleznosci od wartosci speed(predkosc)\"\"\"\r\n self.x+=self.dirx*self.speed\r\n self.y+=self.diry*self.speed\r\n\r\n if self.y>screen_height:\r\n self.kill()\r\n if self.y+32<0:\r\n self.kill()\r\n if self.x<0:\r\n self.kill()\r\n\r\n\r\n\r\n def update(self,player,screen_width,screen_height):\r\n \"\"\"aktualizuje obiekt asteroidy\"\"\"\r\n self.rect.x=self.x\r\n self.rect.y=self.y\r\n self.collisionCheck(player)\r\n self.movement(screen_width,screen_height)\r\n","sub_path":"meteors.py","file_name":"meteors.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"88182","text":"################################################################################\n# Licensed to the FIWARE Foundation (FF) under one\n# or more contributor license agreements. The FF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n################################################################################\n\n# This program takes either a keyvalues payload and converts it into a normalized version and the other way round\nimport json\n\ndef normalized2keyvalues(normalizedPayload):\n import json\n normalizedDict = normalizedPayload\n # normalizedDict = json.loads(normalizedPayload)\n output = {}\n # print(normalizedDict)\n for element in normalizedDict:\n print(normalizedDict[element])\n try:\n value = normalizedDict[element][\"value\"]\n output[element] = value\n except:\n output[element] = normalizedDict[element]\n\n print(json.dumps(output, indent=4, sort_keys=True))\n return output\n\n\ndef keyvalues2normalized(keyvaluesPayload):\n import json\n\n def valid_date(datestring):\n import re\n date = datestring.split(\"T\")[0]\n print(date)\n try:\n validDate = re.match('^[0-9]{2,4}[-/][0-9]{2}[-/][0-9]{2,4}$', date)\n print(validDate)\n except ValueError:\n return False\n\n if validDate is not None:\n return True\n else:\n return False\n\n keyvaluesDict = keyvaluesPayload\n output = {}\n # print(normalizedDict)\n for element in keyvaluesDict:\n item = {}\n print(keyvaluesDict[element])\n if isinstance(keyvaluesDict[element], list):\n # it is an array\n item[\"type\"] = \"array\"\n item[\"value\"] = keyvaluesDict[element]\n elif isinstance(keyvaluesDict[element], dict):\n # it is an object\n item[\"type\"] = \"object\"\n item[\"value\"] = keyvaluesDict[element]\n elif isinstance(keyvaluesDict[element], str):\n if valid_date(keyvaluesDict[element]):\n # it is a date\n item[\"format\"] = \"date-time\"\n # it is a string\n item[\"type\"] = \"string\"\n item[\"value\"] = keyvaluesDict[element]\n elif keyvaluesDict[element] == True:\n # it is an boolean\n item[\"type\"] = \"boolean\"\n item[\"value\"] = \"true\"\n elif keyvaluesDict[element] == False:\n # it is an boolean\n item[\"type\"] = \"boolean\"\n item[\"value\"] = \"false\"\n elif isinstance(keyvaluesDict[element], int) or isinstance(keyvaluesDict[element], float):\n # it is an number\n item[\"type\"] = \"number\"\n item[\"value\"] = keyvaluesDict[element]\n else:\n print(\"*** other type ***\")\n print(\"I do now know what is it\")\n print(keyvaluesDict[element])\n print(\"--- other type ---\")\n output[element] = item\n\n if \"id\" in output:\n output[\"id\"] = output[\"id\"][\"value\"]\n if \"type\" in output:\n output[\"type\"] = output[\"type\"][\"value\"]\n if \"@context\" in output:\n output[\"@context\"] = output[\"@context\"][\"value\"]\n print(output)\n with open(\"output.json\", \"w\") as outputfile:\n rawoutput = json.dumps(output, indent=4)\n outputfile.write(rawoutput)\n return output\n\n\nkeyvaluesPayload = {\n \"id\": \"0.E.6.AY1.A1\",\n \"type\": \"Action\",\n \"refProject\": \"O.E.6.AY1\",\n \"dateCreated\": \"2016-08-08T10:18:16Z\",\n \"dateModified\": \"2016-08-08T10:18:16Z\",\n \"name\": \"Realizacion de campafias de promoci6n en medios de comunicaci6n de la provincia\",\n \"executionPeriod\": \"2021S1\",\n \"compliancePercentage\": 0,\n \"modifications\": \"SIN MODIFICACION\"\n}\n\n\nnormalizedPayload = {\n \"id\": \"urn:ngsi-ld:Catalogue:id:KSLT:97146192\",\n \"type\": \"Catalogue\",\n \"dateCreated\": {\n \"type\": \"Property\",\n \"value\": {\n \"@type\": \"DateTime\",\n \"@value\": \"2023-03-20T18:53:50Z\"\n }\n },\n \"dateModified\": {\n \"type\": \"Property\",\n \"value\": {\n \"@type\": \"DateTime\",\n \"@value\": \"2023-06-29T11:37:12Z\"\n }\n },\n \"source\": {\n \"type\": \"Property\",\n \"value\": \"INE\"\n },\n \"name\": {\n \"type\": \"Property\",\n \"value\": \"Catalogue of statistical resources\"\n },\n \"alternateName\": {\n \"type\": \"Property\",\n \"value\": \"Catalogue\"\n },\n \"description\": {\n \"type\": \"Property\",\n \"value\": \"List of converted statistical resources\"\n },\n \"dataProvider\": {\n \"type\": \"Property\",\n \"value\": \"INE\"\n },\n \"owner\": {\n \"type\": \"Property\",\n \"value\": [\n \"urn:ngsi-ld:Catalogue:items:FRAY:12902985\",\n \"urn:ngsi-ld:Catalogue:items:WMSS:90165917\"\n ]\n },\n \"seeAlso\": {\n \"type\": \"Property\",\n \"value\": [\n \"urn:ngsi-ld:Catalogue:items:XSHA:97687196\"\n ]\n },\n \"location\": {\n \"type\": \"Property\",\n \"value\": {\n \"type\": \"Point\",\n \"coordinates\": [\n 52.5209531,\n 13.3256918\n ]\n }\n },\n \"address\": {\n \"streetAddress\": \"Franklinstrasse 13\",\n \"addressLocality\": \"Berlin\",\n \"addressRegion\": \"Berlin\",\n \"addressCountry\": \"Germany\",\n \"postalCode\": \"10587\",\n \"postOfficeBoxNumber\": \"\",\n \"streetNr\": \"13\",\n \"district\": \"\"\n },\n \"areaServed\": \"\",\n \"dataset\": {\n \"type\": \"object\",\n \"value\": \"urn:ngsi-ld:Catalogue:dataset:VLNR:72960176\"\n },\n \"publisher\": {\n \"type\": \"Property\",\n \"value\": \"INE\"\n },\n \"title\": {\n \"type\": \"Property\",\n \"value\": [\n \"Catalogue or statistical resources\",\n \"Catálogo de recursos estadisticos\"\n ]\n },\n \"homepage\": {\n \"type\": \"Property\",\n \"value\": \"urn:ngsi-ld:Catalogue:homepage:FXWI:96370263\"\n },\n \"language\": {\n \"type\": \"Property\",\n \"value\": [\n \"SP\",\n \"EN\"\n ]\n },\n \"licence\": {\n \"type\": \"Property\",\n \"value\": \"CC BY 4.0\"\n },\n \"releaseDate\": {\n \"type\": \"Property\",\n \"value\": {\n \"@type\": \"DateTime\",\n \"@value\": \"2023-01-20T11:03:48Z\"\n }\n },\n \"themes\": {\n \"type\": \"Property\",\n \"value\": [\n \"demography\",\n \"social movements\"\n ]\n },\n \"modificationDate\": {\n \"type\": \"Property\",\n \"value\": {\n \"@type\": \"DateTime\",\n \"@value\": \"2023-02-24T16:28:58Z\"\n }\n },\n \"hasPart\": {\n \"type\": \"object\",\n \"value\": \"urn:ngsi-ld:Catalogue:hasPart:EQFC:38298320\"\n },\n \"isPartOf\": {\n \"type\": \"object\",\n \"value\": \"urn:ngsi-ld:Catalogue:isPartOf:JACJ:87819283\"\n },\n \"record\": {\n \"type\": \"object\",\n \"value\": \"urn:ngsi-ld:Catalogue:record:UEFV:49174271\"\n },\n \"rights\": {\n \"type\": \"Property\",\n \"value\": \"Open licensed\"\n },\n \"spatial_geographic\": {\n \"type\": \"Property\",\n \"value\": [\n {\n \"type\": \"Point\",\n \"coordinates\": [\n 121.7,\n 146.6\n ],\n \"bbox\": [\n 46.5,\n 926.8,\n 995.6,\n 403.5\n ]\n },\n {\n \"type\": \"Point\",\n \"coordinates\": [\n 60.3,\n 491.9\n ],\n \"bbox\": [\n 652.6,\n 335.8,\n 341.6,\n 875.0\n ]\n }\n ]\n },\n \"@context\": [\n \"https://raw.githubusercontent.com/smart-data-models/dataModel.STAT-DCAT-AP/master/context.jsonld\"\n ]\n}\n\n\n\npayload = normalized2keyvalues(normalizedPayload)\nprint(payload)\nwith open(\"example-normalized.json\", \"w\") as file:\n json.dump(payload, file)\n\n# schema = keyvalues2normalized(keyvaluesPayload)\n# with open(\"keyvalues.json\", \"w\") as file:\n# json.dump(schema, file)\n","sub_path":"utils/examples_conversor.py","file_name":"examples_conversor.py","file_ext":"py","file_size_in_byte":7938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"177409163","text":"# -*- coding: utf-8 -*-\n\nimport time\nimport datetime\nimport calendar\n#from lxml import etree\n#import netsvc\nfrom osv import osv, fields, orm\nfrom tools.translate import _\nimport pooler\n\nimport logging\nlogger = logging.getLogger('DOTCOM_LOGGER')\n\n\n\ndef referencia_consultor(cr,uid,context=None):\n if context is None:\n context={}\n pool= pooler.get_pool(cr.dbname)\n \n referencia_gerada=''\n ref_consultor_ids=pool.get('dotcom.advogacia.referecias').search(cr,uid,[\n ('objecto_refencia','=','consultor')])\n \n logger.info('INICIO DA CRIACAO DA REFERENCIA')\n consultores_ids=pool.get('dotcom.advogacia.consultor').search(cr,uid,[])\n logger.info('NUMERO DE CONSULTORES JA REGISTRADOS %s' %str(len(consultores_ids)))\n logger.info('NUMERO DE REFERENCIAS JA REGISTRADOS %s' %str(len(ref_consultor_ids)))\n if len(ref_consultor_ids)>0:\n referencia_object=pool.get('dotcom.advogacia.referecias').browse(cr,uid,ref_consultor_ids[0])\n \n item=referencia_object.objecto_refencia\n separador=referencia_object.separador\n \n if bool(referencia_object.prefixo)==True:\n referencia_gerada=referencia_object.prefixo+str(separador)\n \n if len(consultores_ids)+1<10:\n referencia_gerada=referencia_gerada+'00'+str(len(consultores_ids)+1)\n elif 10<=len(consultores_ids)+1<100:\n referencia_gerada=referencia_gerada+'0'+str(len(consultores_ids)+1)\n \n if bool(referencia_object.sufixo)==True:\n referencia_gerada=referencia_gerada+str(separador)+referencia_object.sufixo\n logger.info('REFERENCIA DO CONSULTOR GERADA %s' %str(referencia_gerada))\n \n else:\n if len(consultores_ids)+1<10:\n referencia_gerada='CON'+'00'+str(len(consultores_ids)+1)\n elif 10<=len(consultores_ids)+1<100:\n referencia_gerada='CON'+'0'+str(len(consultores_ids)+1)\n return referencia_gerada\n\n\ndef referencia_dossier(cr,uid,context=None):\n if context is None:\n contex={}\n \n pool= pooler.get_pool(cr.dbname)\n \n referencia_gerada=''\n ref_dossier_ids=pool.get('dotcom.advogacia.referecias').search(cr,uid,[\n ('objecto_refencia','=','dossier')])\n \n dossiers_ids=pool.get('dotcom.advogacia.dossier').search(cr,uid,[])\n if len(ref_dossier_ids)>0:\n referencia_object=pool.get('dotcom.advogacia.referecias').browse(cr,uid,ref_dossier_ids[0])\n \n item=referencia_object.objecto_refencia\n separador=referencia_object.separador\n \n if bool(referencia_object.prefixo)==True:\n referencia_gerada=referencia_object.prefixo+str(separador)\n \n if len(dossiers_ids)+1<10:\n referencia_gerada=referencia_gerada+'00'+str(len(dossiers_ids)+1)\n elif 10<=len(dossiers_ids)+1<100:\n referencia_gerada=referencia_gerada+'0'+str(len(dossiers_ids)+1)\n \n if bool(referencia_object.sufixo)==True:\n referencia_gerada=referencia_gerada+str(separador)+referencia_object.sufixo\n logger.info('REFERENCIA DO CONSULTOR GERADA %s' %str(referencia_gerada))\n \n else:\n if len(dossiers_ids)+1<10:\n referencia_gerada='DO'+'00'+str(len(dossiers_ids)+1)\n elif 10<=len(dossiers_ids)+1<100:\n referencia_gerada='DO'+'0'+str(len(dossiers_ids)+1)\n return referencia_gerada\n\n\ndef validar_existencia_timesheet(cr,uid,consultor_id,data,dossier_id,context={}):\n if context is None:\n context={}\n pool= pooler.get_pool(cr.dbname)\n \n timesheets_ids=pool.get('dotcom.advogacia.timesheet').search(cr,uid,[\n ('consultor_id','=',consultor_id),\n ('data','=',data),\n ('dossier_id','=',dossier_id),\n ('state','in',['rascunho','aberto'])\n ])\n advogado_object=pool.get('dotcom.advogacia.consultor').browse(cr,uid,consultor_id)\n if len(timesheets_ids)>0:\n raise osv.except_osv(_('Acção Invalida !'), _('O Advogado \"'+str(advogado_object.nome)+'\" possui Folha de Trabalho em aberto para a Data e Processo em causa!!'))\n \n \ndef validar_duplicacao_advogado(cr,uid,processo_id,consultor_id,context=None):\n if context is None:\n context={}\n pool= pooler.get_pool(cr.dbname)\n lista_advogados=pool.get('dotcom.advogacia.consultor.dossier').search(cr,uid,[('dossier_id','=',processo_id)])\n for advogado in lista_advogados:\n advogado_object=pool.get('dotcom.advogacia.consultor.dossier').browse(cr,uid,advogado)\n if advogado_object.consultor_id.id==consultor_id:\n advogado=pool.get('dotcom.advogacia.consultor').browse(cr,uid,consultor_id)\n raise osv.except_osv(_('Acção Invalida !'), _('O Advogado '+str(advogado.nome)+' está Repetido nas linhas dos Executores!!'))\n return True\n \n \ndef validar_existencia_timesheets(cr,uid,processo_id,context=None):\n \n if context is None:\n context={}\n \n pool= pooler.get_pool(cr.dbname)\n lista_timesheets_ids=pool.get('dotcom.advogacia.timesheet').search(cr,uid,[('dossier_id','=',processo_id),\n ('state','=','confirmado')])\n if len(lista_timesheets_ids)>0:\n raise osv.except_osv(_('Acção Inválida !'), _('Processo com Folhas de Trabalho registadas!!'))\n return True\n\n\ndef validar_folhas_trabalho_advogado(cr,uid,advogado_id,context=None):\n if context is None:\n context={}\n \n pool= pooler.get_pool(cr.dbname)\n lista_timesheets_ids=pool.get('dotcom.advogacia.timesheet').search(cr,uid,[('consultor_id','=',advogado_id),\n ('state','=','confirmado')])\n if len(lista_timesheets_ids)>0:\n raise osv.except_osv(_('Acção Invalida !'), _('Advogado com folhas de Trabalho no Processo!!'))\n return True\n\n\ndef validar_existencia_facturas(cr,uid,dossier,context=None):\n if context is None:\n context={}\n \n pool= pooler.get_pool(cr.dbname)\n vendas_ids=pool.get('dotcom.venda').search(cr,uid,[('dossier_id','=',dossier.id)])\n if len(vendas_ids)>0:\n raise osv.except_osv(_('Acção Inválida !'), _('Este processo já foi facturado.Náo é possível facturar pela segunda vez processos de Valor Fixo!!'))\n \n return True\n\ndef verificar_existencia_vendas(cr,uid,timesheet,dossier,context=None):\n if context is None:\n context={}\n \n if bool(timesheet):\n if bool(timesheet.venda_id.id)==True:\n raise osv.except_osv(_('Acção Inválida !'), _('Folha de Trabalho do Processo '+ str(timesheet.dossier_id.referencia)+'/'+str(timesheet.dossier_id.nome)+' com factura já criada!!'))\n \n if bool(dossier.venda_id.id)==True:\n raise osv.except_osv(_('Acção Inválida !'), _('Folha de Trabalho do Processo '+ str(dossier.referencia)+'/'+str(dossier.nome)+' com factura já criada!!'))\n\n return True\n \n \ndef validar_periodo_inicial(self,cr,uid,context=None):\n if context is None:\n context={}\n pool= pooler.get_pool(cr.dbname)\n today= datetime.datetime.now().strftime('%Y-%m-%d')\n fiscal_pool = pool.get('configuration.fiscalyear')\n results = fiscal_pool.search(cr,uid,[('date_start','<=',today),('date_stop','>=',today)])\n retorno=None\n if len(results)>0:\n ano_fiscal=results[0]\n periodo_ids=pool.get('configuration.period').search(cr,uid,[('fiscalyear_id','=',ano_fiscal)])\n retorno=periodo_ids[0]\n return retorno\n\n\n\ndef validar_periodo_final(self,cr,uid,context=None):\n if context is None:\n context={}\n pool= pooler.get_pool(cr.dbname)\n today= datetime.datetime.now().strftime('%Y-%m-%d')\n fiscal_pool = pool.get('configuration.fiscalyear')\n results = fiscal_pool.search(cr,uid,[('date_start','<=',today),('date_stop','>=',today)])\n retorno=None\n if len(results)>0:\n ano_fiscal=results[0]\n periodo_ids=pool.get('configuration.period').search(cr,uid,[('fiscalyear_id','=',ano_fiscal),\n ('date_start','<=',today),\n ('date_stop','>=',today)])\n retorno=periodo_ids[0]\n return retorno\n","sub_path":"dotcom_advogados/validators/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":8719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"34938523","text":"#!/usr/bin/env/python\n\nfrom typing import Tuple, List, Any, Sequence\n\nimport tensorflow as tf\nimport time\nimport os\nimport json\nimport numpy as np\nimport pickle\nimport random\n\nfrom utils import MLP, ThreadedIterator, SMALL_NUMBER\n\n\nclass ChemModel(object):\n @classmethod\n def default_params(cls):\n return {\n 'num_epochs': 300,\n 'patience': 25,\n 'learning_rate': 0.001,\n 'clamp_gradient_norm': 1.0,\n 'out_layer_dropout_keep_prob': 1.0,\n 'T': 1800, # maximum allowed time\n\n 'hidden_size': 100,\n 'num_timesteps': 4,\n 'use_graph': True,\n\n 'tie_fwd_bkwd': True,\n 'task_ids': list(range(17)),\n\n 'random_seed': 0,\n 'batch_size': 100000\n\n # 'train_file': 'data/masterplan_train.json',\n # 'valid_file': 'data/masterplan_valid.json'\n # 'train_file': 'data/masterplan/masterplan_train_subsample.json',\n # 'valid_file': 'data/masterplan/masterplan_valid_subsample.json'\n }\n\n def __init__(self, args):\n self.args = args\n\n # Collect argument things:\n data_dir = ''\n if '--data_dir' in args and args['--data_dir'] is not None:\n data_dir = args['--data_dir']\n self.data_dir = data_dir\n\n data_file = \"\"\n if '--data_type' in args and args['--data_type'] is not None:\n data_type = args['--data_type']\n data_file = \"data-\" + data_type + \"-2018-07-20-train-valid.json\"\n self.data_file = data_file\n\n index_dir = \"\"\n if '--index_type' in args and args['--index_type'] is not None:\n index_type = args['--index_type']\n if index_type ==\"random\":\n index_dir = \"random-idx\"\n train_index_suffix = \"-train-split-rnd-2018-07-20.txt\"\n valid_index_suffix = \"-valid-split-rnd-2018-07-20.txt\"\n if index_type == \"domain\":\n index_dir = \"domain-preserving-idx\"\n train_index_suffix = \"-train-split-domains-2018-07-20.txt\"\n valid_index_suffix = \"-valid-split-domains-2018-07-20.txt\"\n self.index_dir = os.path.join(self.data_dir, index_dir)\n self.train_index = []\n self.valid_index = []\n for i in range(10):\n train_index_file = os.path.join(self.index_dir, str(i) + train_index_suffix)\n self.train_index.append(self.load_index_file(train_index_file))\n valid_index_file = os.path.join(self.index_dir, str(i) + valid_index_suffix)\n self.valid_index.append(self.load_index_file(valid_index_file))\n\n self.run_id = \"_\".join([time.strftime(\"%Y-%m-%d-%H-%M-%S\"), str(os.getpid()),self.__class__.__name__])\n log_dir = args.get('--log_dir') or '.'\n self.log_file = os.path.join(log_dir, \"%s_log.json\" % self.run_id)\n self.best_model_file = os.path.join(log_dir, \"%s_model_best.pickle\" % self.run_id)\n\n\n\n # Collect parameters:\n params = self.default_params()\n config_file = args.get('--config-file')\n if config_file is not None:\n with open(config_file, 'r') as f:\n params.update(json.load(f))\n config = args.get('--config')\n if config is not None:\n params.update(json.loads(config))\n\n random_seed = args.get(\"--random_seed\")\n if random_seed is not None:\n params[\"random_seed\"] = int(random_seed)\n\n learning_rate = args.get(\"--learning_rate\")\n if learning_rate is not None:\n params[\"learning_rate\"] = float(learning_rate)\n\n num_timesteps = args.get(\"--num_timesteps\")\n if num_timesteps is not None:\n params[\"num_timesteps\"] = int(num_timesteps)\n\n hidden_size = args.get(\"--hidden_size\")\n if hidden_size is not None:\n params[\"hidden_size\"] = int(hidden_size)\n\n params[\"data_file\"] = data_file\n params[\"index_dir\"] = index_dir\n\n\n self.params = params\n with open(os.path.join(log_dir, \"%s_params.json\" % self.run_id), \"w\") as f:\n json.dump(params, f)\n print(\"Run %s starting with following parameters:\\n%s\" % (self.run_id, json.dumps(self.params)))\n random.seed(params['random_seed'])\n np.random.seed(params['random_seed'])\n\n\n # Load data:\n self.max_num_vertices = 0\n self.num_edge_types = 0\n self.annotation_size = 0\n self.all_data_raw = self.load_raw_data(params['data_file']) # no shuffle\n\n # Build the actual model\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n self.graph = tf.Graph()\n self.sess = tf.Session(graph=self.graph, config=config)\n with self.graph.as_default():\n tf.set_random_seed(params['random_seed'])\n self.placeholders = {}\n self.weights = {}\n self.ops = {}\n self.make_model()\n self.make_train_step()\n\n # Restore/initialize variables:\n restore_file = args.get('--restore')\n if restore_file is not None:\n self.restore_model(restore_file)\n else:\n self.initialize_model()\n\n def load_raw_data(self, file_name):\n full_path = os.path.join(self.data_dir, file_name)\n\n print(\"Loading data from %s\" % full_path)\n with open(full_path, 'r') as f:\n data = json.load(f)\n\n restrict = self.args.get(\"--restrict_data\")\n if restrict is not None and restrict > 0:\n data = data[:restrict]\n\n # Get some common data out:\n num_fwd_edge_types = 0\n for g in data:\n self.max_num_vertices = max(self.max_num_vertices, max([v for e in g['graph'] for v in [e[0], e[2]]]))\n num_fwd_edge_types = max(num_fwd_edge_types, max([e[1] for e in g['graph']]))\n self.num_edge_types = max(self.num_edge_types, num_fwd_edge_types * (1 if self.params['tie_fwd_bkwd'] else 2))\n self.annotation_size = max(self.annotation_size, len(data[0][\"node_features\"][0]))\n return data\n # return self.process_raw_graphs(data, is_training_data, context_file, batch_size=self.params['batch_size'])\n\n def load_data(self, file_name, is_training_data: bool, context_file=None): # context file is the first-half result (npz file)\n data = self.load_raw_data(file_name)\n return self.process_raw_graphs(data, is_training_data, context_file, batch_size=self.params['batch_size'])\n\n\n def load_index_file(self, indexfile):\n with open(indexfile, 'r') as f:\n indecies = f.readlines()\n indecies = [int(a.strip()) for a in indecies]\n return np.array(indecies)\n\n\n @staticmethod\n def graph_string_to_array(graph_string: str) -> List[List[int]]:\n return [[int(v) for v in s.split(' ')]\n for s in graph_string.split('\\n')]\n\n def process_raw_graphs(self, raw_data: Sequence[Any], is_training_data: bool, context_file, batch_size=None) -> Any:\n raise Exception(\"Models have to implement process_raw_graphs!\")\n\n def masked_accuracy(self, preds, labels, mask):\n \"\"\"Accuracy with masking.\"\"\"\n correct_prediction = tf.equal(preds>=0.5, labels>=0.5)\n accuracy_all = tf.cast(correct_prediction, tf.float32)\n mask = tf.cast(mask, dtype=tf.float32)\n mask /= tf.reduce_mean(mask)\n accuracy_all *= mask\n return tf.reduce_mean(accuracy_all)\n\n def make_model(self):\n self.placeholders['target_values'] = tf.placeholder(tf.float32, [len(self.params['task_ids']), None],\n name='target_values')\n self.placeholders['target_mask'] = tf.placeholder(tf.float32, [len(self.params['task_ids']), None],\n name='target_mask')\n self.placeholders['num_graphs'] = tf.placeholder(tf.int32, [], name='num_graphs')\n self.placeholders['out_layer_dropout_keep_prob'] = tf.placeholder(tf.float32, [], name='out_layer_dropout_keep_prob')\n self.placeholders['pre_id_vector'] = tf.placeholder(tf.float32, [None, len(self.params['task_ids'])],\n name='pre_id_vector')\n\n with tf.variable_scope(\"graph_model\"):\n self.prepare_specific_graph_model()\n # This does the actual graph work:\n if self.params['use_graph']:\n self.ops['final_node_representations'] = self.compute_final_node_representations()\n else:\n self.ops['final_node_representations'] = tf.zeros_like(self.placeholders['initial_node_representation'])\n\n self.ops['losses'] = []\n self.ops['predicted_values'] = []\n\n with tf.variable_scope(\"regression_gate\"):\n self.weights['regression_gate_task'] = MLP(2 * self.params['hidden_size'], 1, [],\n self.placeholders['out_layer_dropout_keep_prob'])\n\n for (internal_id, task_id) in enumerate(self.params['task_ids']):\n with tf.variable_scope(\"out_layer_task%i\" % task_id):\n # with tf.variable_scope(\"regression_gate\"):\n # self.weights['regression_gate_task%i' % task_id] = MLP(2 * self.params['hidden_size'], 1, [],\n # self.placeholders['out_layer_dropout_keep_prob'])\n with tf.variable_scope(\"regression\"):\n self.weights['regression_transform_task%i' % task_id] = MLP(self.params['hidden_size'], 1, [],\n self.placeholders['out_layer_dropout_keep_prob'])\n self.weights['context_embedding_task%i' % task_id] = MLP(len(self.params['task_ids']), 1, [],\n self.placeholders['out_layer_dropout_keep_prob'])\n computed_values = self.gated_regression(self.ops['final_node_representations'],\n self.weights['regression_gate_task'],\n self.weights['regression_transform_task%i' % task_id])\n context_values = tf.squeeze(self.weights['context_embedding_task%i' % task_id](self.placeholders['pre_id_vector']))\n computed_values = computed_values + context_values\n predictions = tf.nn.sigmoid(computed_values)\n self.ops['predicted_values'].append(predictions)\n diff = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.placeholders['target_values'][internal_id,:],logits=computed_values)\n task_target_mask = self.placeholders['target_mask'][internal_id,:]\n task_target_num = tf.reduce_sum(task_target_mask) + SMALL_NUMBER\n diff = diff * task_target_mask # Mask out unused values\n\n self.ops['accuracy_task%i' % task_id] = self.masked_accuracy(predictions,\n self.placeholders['target_values'][internal_id,:], task_target_mask)\n\n task_loss = tf.reduce_sum(diff) / task_target_num\n # Normalise loss to account for fewer task-specific examples in batch:\n task_loss = task_loss * (1.0 / (self.params['task_sample_ratios'].get(task_id) or 1.0))\n self.ops['losses'].append(task_loss)\n self.ops['loss'] = tf.reduce_sum(self.ops['losses'])\n\n def make_train_step(self):\n trainable_vars = self.sess.graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n if self.args.get('--freeze-graph-model'):\n graph_vars = set(self.sess.graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\"graph_model\"))\n filtered_vars = []\n for var in trainable_vars:\n if var not in graph_vars:\n filtered_vars.append(var)\n else:\n print(\"Freezing weights of variable %s.\" % var.name)\n trainable_vars = filtered_vars\n optimizer = tf.train.AdamOptimizer(self.params['learning_rate'])\n grads_and_vars = optimizer.compute_gradients(self.ops['loss'], var_list=trainable_vars)\n clipped_grads = []\n for grad, var in grads_and_vars:\n if grad is not None:\n clipped_grads.append((tf.clip_by_norm(grad, self.params['clamp_gradient_norm']), var))\n else:\n clipped_grads.append((grad, var))\n self.ops['train_step'] = optimizer.apply_gradients(clipped_grads)\n # Initialize newly-introduced variables:\n self.sess.run(tf.local_variables_initializer())\n\n def gated_regression(self, last_h, regression_gate, regression_transform):\n raise Exception(\"Models have to implement gated_regression!\")\n\n def prepare_specific_graph_model(self) -> None:\n raise Exception(\"Models have to implement prepare_specific_graph_model!\")\n\n def compute_final_node_representations(self) -> tf.Tensor:\n raise Exception(\"Models have to implement compute_final_node_representations!\")\n\n def make_minibatch_iterator(self, data: Any, is_training: bool):\n raise Exception(\"Models have to implement make_minibatch_iterator!\")\n\n def run_epoch(self, epoch_name: str, data, is_training: bool):\n\n loss = 0\n accuracies = []\n accuracy_ops = [self.ops['accuracy_task%i' % task_id] for task_id in self.params['task_ids']]\n start_time = time.time()\n processed_graphs = 0\n batch_iterator = ThreadedIterator(self.make_minibatch_iterator(data, is_training), max_queue_size=5)\n for step, batch_data in enumerate(batch_iterator):\n num_graphs = batch_data[self.placeholders['num_graphs']]\n processed_graphs += num_graphs\n if is_training:\n batch_data[self.placeholders['out_layer_dropout_keep_prob']] = self.params['out_layer_dropout_keep_prob']\n fetch_list = [self.ops['loss'], accuracy_ops, self.ops['train_step']]\n else:\n batch_data[self.placeholders['out_layer_dropout_keep_prob']] = 1.0\n fetch_list = [self.ops['loss'], accuracy_ops]\n result = self.sess.run(fetch_list, feed_dict=batch_data)\n (batch_loss, batch_accuracies) = (result[0], result[1])\n loss += batch_loss\n accuracies.append(np.array(batch_accuracies) * num_graphs)\n\n print(\"Running %s, batch %i (has %i graphs). Loss so far: %.4f\" % (epoch_name,\n step,\n num_graphs,\n loss / processed_graphs),\n end='\\r')\n\n accuracies = np.sum(accuracies, axis=0) / processed_graphs\n loss = loss / processed_graphs\n # error_ratios = accuracies / chemical_accuracies[self.params[\"task_ids\"]]\n error_ratios = accuracies\n instance_per_sec = processed_graphs / (time.time() - start_time)\n return loss, accuracies, error_ratios, instance_per_sec\n\n def test(self, testfile):\n if os.path.exists(self.best_model_file):\n self.restore_model(self.best_model_file)\n testdata, skipped_graphs = self.load_data(testfile,is_training_data=False)\n processed_graphs = 0\n accuracy_ops = [self.ops['accuracy_task%i' % task_id] for task_id in self.params['task_ids']]\n batch_iterator = ThreadedIterator(self.make_minibatch_iterator(testdata, False), max_queue_size=5)\n preds = []\n accs = []\n for step, batch_data in enumerate(batch_iterator):\n num_graphs = batch_data[self.placeholders['num_graphs']]\n processed_graphs += num_graphs\n\n batch_data[self.placeholders['out_layer_dropout_keep_prob']] = 1.0\n fetch_list = [self.ops['predicted_values'], accuracy_ops]\n\n result = self.sess.run(fetch_list, feed_dict=batch_data)\n (batch_pred, batch_accuracies) = (result[0], result[1])\n batch_pred = np.array(batch_pred)\n if len(batch_pred.shape) == 1:\n batch_pred = np.expand_dims(batch_pred, 1)\n preds.append(batch_pred.T)\n accs.append(np.array(batch_accuracies)*num_graphs)\n return np.concatenate(preds,0), np.sum(accs, axis=0)/float(processed_graphs)\n\n def pred(self, testfile, contextfile):\n if os.path.exists(self.best_model_file):\n self.restore_model(self.best_model_file)\n testdata, skipped_graphs = self.load_data(testfile,is_training_data=False, context_file= contextfile)\n processed_graphs = 0\n batch_iterator = ThreadedIterator(self.make_minibatch_iterator(testdata, False), max_queue_size=5)\n preds = []\n\n for step, batch_data in enumerate(batch_iterator):\n num_graphs = batch_data[self.placeholders['num_graphs']]\n processed_graphs += num_graphs\n\n batch_data[self.placeholders['out_layer_dropout_keep_prob']] = 1.0\n fetch_list = [self.ops['predicted_values']]\n\n result = self.sess.run(fetch_list, feed_dict=batch_data)\n (batch_pred) = (result[0])\n batch_pred = np.array(batch_pred)\n if len(batch_pred.shape) == 1:\n batch_pred = np.expand_dims(batch_pred, 1)\n\n preds.append(batch_pred.T)\n\n preds = np.concatenate(preds,0)\n pred_id = np.argmin(preds, axis=1)\n pred_labels = [testdata[ex_id][\"labels\"][pred_id[ex_id]] for ex_id in range(len(testdata))]\n return sum(pred_labels), len(testdata), preds, skipped_graphs\n\n\n\n def train(self, index):\n # self.train_data = self.load_data(self.params['train_file'], is_training_data=True)\n # self.valid_data = self.load_data(self.params['valid_file'], is_training_data=False)\n log_to_save = []\n total_time_start = time.time()\n\n current_train_index = self.train_index[index]\n current_valid_index = self.valid_index[index]\n self.train_data = [self.all_data_raw[i] for i in current_train_index]\n self.valid_data = [self.all_data_raw[i] for i in current_valid_index]\n self.train_data, _ = self.process_raw_graphs(self.train_data, is_training_data=True, context_file=None, batch_size=self.params['batch_size'])\n self.valid_data, _ = self.process_raw_graphs(self.valid_data, is_training_data=False, context_file=None, batch_size=self.params['batch_size'])\n\n\n with self.graph.as_default():\n if self.args.get('--restore') is not None:\n valid_loss, valid_accs, _, _ = self.run_epoch(\"Resumed (validation)\", self.valid_data, False)\n best_val_acc = np.sum(valid_accs)\n best_val_loss = valid_loss\n best_val_acc_epoch = 0\n print(\"\\r\\x1b[KResumed operation, initial cum. val. acc: %.5f\" % best_val_acc)\n else:\n (best_val_acc, best_val_loss, best_val_acc_epoch) = (0, float(\"+inf\"), 0)\n for epoch in range(1, self.params['num_epochs'] + 1):\n print(\"== Epoch %i\" % epoch)\n train_loss, train_accs, train_errs, train_speed = self.run_epoch(\"epoch %i (training)\" % epoch,\n self.train_data, True)\n accs_str = \" \".join([\"%i:%.5f\" % (id, acc) for (id, acc) in zip(self.params['task_ids'], train_accs)])\n errs_str = \" \".join([\"%i:%.5f\" % (id, err) for (id, err) in zip(self.params['task_ids'], train_errs)])\n print(\"\\r\\x1b[K Train: loss: %.5f | acc: %s | instances/sec: %.2f\" % (train_loss,\n accs_str,\n train_speed))\n valid_loss, valid_accs, valid_errs, valid_speed = self.run_epoch(\"epoch %i (validation)\" % epoch,\n self.valid_data, False)\n accs_str = \" \".join([\"%i:%.5f\" % (id, acc) for (id, acc) in zip(self.params['task_ids'], valid_accs)])\n errs_str = \" \".join([\"%i:%.5f\" % (id, err) for (id, err) in zip(self.params['task_ids'], valid_errs)])\n print(\"\\r\\x1b[K Valid: loss: %.5f | acc: %s | instances/sec: %.2f\" % (valid_loss,\n accs_str,\n valid_speed))\n\n epoch_time = time.time() - total_time_start\n log_entry = {\n 'epoch': epoch,\n 'time': epoch_time,\n 'train_results': (train_loss, train_accs.tolist(), train_errs.tolist(), train_speed),\n 'valid_results': (valid_loss, valid_accs.tolist(), valid_errs.tolist(), valid_speed),\n }\n log_to_save.append(log_entry)\n with open(self.log_file, 'w') as f:\n json.dump(log_to_save, f, indent=4)\n\n val_acc = np.sum(valid_accs) # type: float\n if valid_loss best_val_acc:\n self.save_model(self.best_model_file)\n print(\" (Best epoch so far, cum. val. acc increased to %.5f from %.5f. Saving to '%s')\" % (val_acc, best_val_acc, self.best_model_file))\n best_val_acc = val_acc\n best_val_loss = valid_loss\n best_val_acc_epoch = epoch\n elif epoch - best_val_acc_epoch >= self.params['patience']:\n print(\"Stopping training after %i epochs without improvement on validation accuracy.\" % self.params['patience'])\n break\n\n def save_model(self, path: str) -> None:\n weights_to_save = {}\n for variable in self.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):\n assert variable.name not in weights_to_save\n weights_to_save[variable.name] = self.sess.run(variable)\n\n data_to_save = {\n \"params\": self.params,\n \"weights\": weights_to_save\n }\n\n with open(path, 'wb') as out_file:\n pickle.dump(data_to_save, out_file, pickle.HIGHEST_PROTOCOL)\n\n def initialize_model(self) -> None:\n init_op = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())\n self.sess.run(init_op)\n\n def restore_model(self, path: str) -> None:\n print(\"Restoring weights from file %s.\" % path)\n with open(path, 'rb') as in_file:\n data_to_load = pickle.load(in_file)\n\n # Assert that we got the same model configuration\n assert len(self.params) == len(data_to_load['params'])\n for (par, par_value) in self.params.items():\n # Fine to have different task_ids:\n if par not in ['task_ids', 'num_epochs','train_file', 'valid_file']:\n assert par_value == data_to_load['params'][par]\n\n variables_to_initialize = []\n with tf.name_scope(\"restore\"):\n restore_ops = []\n used_vars = set()\n for variable in self.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):\n used_vars.add(variable.name)\n if variable.name in data_to_load['weights']:\n restore_ops.append(variable.assign(data_to_load['weights'][variable.name]))\n else:\n print('Freshly initializing %s since no saved value was found.' % variable.name)\n variables_to_initialize.append(variable)\n for var_name in data_to_load['weights']:\n if var_name not in used_vars:\n print('Saved weights for %s not used by model.' % var_name)\n if len(variables_to_initialize)>0:\n restore_ops.append(tf.variables_initializer(variables_to_initialize))\n self.sess.run(restore_ops)\n","sub_path":"adap_base_cv.py","file_name":"adap_base_cv.py","file_ext":"py","file_size_in_byte":24601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"533860408","text":"from datetime import date, datetime, timedelta\nfrom dateutil.relativedelta import relativedelta\nimport pprint\n\nclass reloc_calc:\n json_result = {}\n def __init__(self, datetime_string = '08/23/2019', tax_percentage = 0.466, base_amount = 1000, time_format='%m/%d/%Y'):\n self.datetime_string = datetime_string\n self.tax_percentage = tax_percentage\n self.base_amount = base_amount\n self.time_format = time_format\n self.datetime_object = self.string_to_datetime(self.datetime_string, self.time_format)\n\n def string_to_datetime(self, string_datetime, string_format):\n \tdatetime_object = datetime.strptime(string_datetime, string_format)\n \treturn datetime_object\n\n def datetime_to_string(self, datetime_object, string_format):\n \tstring_datetime = datetime_object.strftime(string_format)\n \treturn string_datetime\n\n def calculate_tax(self, base_amount, tax_percentage=0.466):\n \ttax_amount = base_amount * tax_percentage\n \treturn tax_amount\n\n def add_months(self, datetime_object, number_of_months):\n \ttime_delta_calc = relativedelta(months=number_of_months)\n \tnew_datetime_object = datetime_object + time_delta_calc\n \treturn new_datetime_object\n\n def monthly_amount(self, total_12_months):\n \treturn total_12_months/12\n\n def due_amount(self, monthly_amount, month, total_12_months):\n \tdisbursable = monthly_amount * month\n \treturn total_12_months - disbursable\n\n def get(self):\n \tbusiness_relocation_date = self.datetime_to_string(self.datetime_object,'%m-%d-%Y')\n \tself.json_result['business_relocation_date'] = business_relocation_date\n \tself.json_result['tax_percentage'] = self.tax_percentage*100\n \tself.json_result['base_amount'] = self.base_amount\n \ttax_gross = self.calculate_tax(self.base_amount)\n \tself.json_result['tax_gross'] = tax_gross\n \ttotal_12_months = self.base_amount + tax_gross\n \tself.json_result['total_amount'] = total_12_months\n \tmonthly_disbursable = self.monthly_amount(total_12_months)\n \tself.json_result['monthly_disbursable'] = monthly_disbursable\n \tmonthly_disbursable_amount = []\n \tfor i in range(12+1):\n \t\ti_date = self.datetime_to_string(self.add_months(self.datetime_object, i), '%m-%d-%Y')\n \t\tamount_due = self.due_amount(monthly_disbursable, i, total_12_months)\n \t\tmonthly_disbursable_amount.append({'date': i_date,'amount_due': amount_due})\n \tself.json_result['monthly_disbursable_amount'] = monthly_disbursable_amount\n \tpprint.pprint(self.json_result)\n \treturn self.json_result","sub_path":"relo_calc/relo_calc.py","file_name":"relo_calc.py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"500280915","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 20 12:17:34 2016\n\n@author: zz\n\"\"\"\n\nfrom RFELM import RFELM\nfrom util import *\n\ntrain_data, train_label, test_data, test_label = loadData(0.1)\nfeature_dim = train_data.shape[1]\nlabel_dim = train_label.shape[1]\n \ntrain_data = normalizeData(train_data)\ntest_data = normalizeData(test_data)\n\nrfelm = RFELM(28, 28, feature_dim*10, label_dim, 'lite', 'rf', q = 200)\n\nrfelm.trainModel(train_data, train_label)\n#rfelm.save(r\"D:\\workspace\\Data\\ELM\\weights\\rfelm\")\nrfelm.testModel(test_data, test_label)","sub_path":"testRFELM.py","file_name":"testRFELM.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"631992078","text":"import sys\nimport numpy as np\nfrom collections import defaultdict, namedtuple\n\nEpisodeStats = namedtuple(\"Stats\",[\"episode_lengths\", \"episode_rewards\"])\n\ndef make_epsilon_greedy_policy(Q, epsilon, nA):\n \"\"\"\n Creates an epsilon-greedy policy based on a given Q-function and epsilon.\n\n Args:\n Q: A dictionary that maps from state -> action-values.\n Each value is a numpy array of length nA (see below)\n epsilon: The probability to select a random action . float between 0 and 1.\n nA: Number of actions in the environment.\n\n Returns:\n A function that takes the observation as an argument and returns\n the probabilities for each action in the form of a numpy array of length nA.\n \"\"\"\n\n def policy_fn(observation):\n A = np.ones(nA, dtype=float) * epsilon / nA\n best_action = np.random.choice(np.flatnonzero(Q[observation] == Q[observation].max()))\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn\n\ndef dyna_q_learning(env, num_episodes, discount_factor=1.0, alpha=0.5, epsilon=0.1, n=5):\n \"\"\"\n Dyna-Q-Learning algorithm: Off-policy TD control. Finds the optimal greedy policy\n while following an epsilon-greedy policy\n\n Args:\n env: environment.\n num_episodes: Number of episodes to run for.\n discount_factor: Lambda time discount factor.\n alpha: TD learning rate.\n epsilon: Chance the sample a random action. Float betwen 0 and 1.\n n: number of planning steps\n\n Returns:\n A tuple (Q, episode_lengths).\n Q is the optimal action-value function, a dictionary mapping state -> action values.\n stats is an EpisodeStats object with two numpy arrays for episode_lengths and episode_rewards.\n \"\"\"\n\n # The final action-value function.\n # A nested dictionary that maps state -> (action -> action-value).\n Q = defaultdict(lambda: np.zeros(env.nA))\n\n # The model.\n # A nested dictionary that maps state -> (action -> (next state, reward, terminal flag)).\n # model is matrix with size: (nS*nA*3)\n M = defaultdict(lambda: np.zeros((env.nA, 3)))\n # list for tracking visited states and actions\n observed_sa = []\n\n # Keeps track of useful statistics\n stats = EpisodeStats(\n episode_lengths=np.zeros(num_episodes),\n episode_rewards=np.zeros(num_episodes))\n\n # The policy we're following\n policy = make_epsilon_greedy_policy(Q, epsilon, env.nA)\n\n for i_episode in range(num_episodes):\n # Print out which episode we're on, useful for debugging.\n if (i_episode + 1) % 100 == 0:\n print(\"\\rEpisode {}/{}.\".format(i_episode + 1, num_episodes), end=\"\")\n sys.stdout.flush()\n\n # Implement this!\n # Reset the environment and pick the first action\n state = env.reset()\n\n # One step in the environment\n # total_reward = 0.0\n for t in itertools.count():\n\n # Take a step\n action_probs = policy(state)\n action = np.random.choice(np.arange(len(action_probs)), p=action_probs)\n next_state, reward, done, _ = env.step(action)\n\n # Update statistics\n stats.episode_rewards[i_episode] += reward\n stats.episode_lengths[i_episode] = t\n\n # TD Update\n best_next_action = np.argmax(Q[next_state])\n td_target = reward + discount_factor * Q[next_state][best_next_action]\n td_delta = td_target - Q[state][action]\n Q[state][action] += alpha * td_delta\n # update the model\n M[state][action] = [next_state, reward, done]\n if (state, action) not in observed_sa:\n observed_sa.append([state, action])\n\n # planning with simulated experience from model\n for i in range(5):\n state_planned, action_planned = np.random.choice(observed_sa)\n next_state, reward, done = model[state_planned][action_planned]\n # TD Update\n best_next_action = np.argmax(Q[next_state])\n td_target = reward + discount_factor * Q[next_state][best_next_action]\n td_delta = td_target - Q[state_planned][action_planned]\n Q[state][action] += alpha * td_delta\n\n if done:\n break\n\n state = next_state\n\n return Q, stats\n\n if __name__ == \"__main__\":\n np.random.seed(0)\n env = GridworldEnv()\n Q, stats = dyna_q_learning(env, 10000)\n\n print(\"\")\n for k,v in Q.items():\n print(\"%s: %s\" %(k,v.tolist()))\n","sub_path":"exercise_06/scripts/dyna_q_learning.py","file_name":"dyna_q_learning.py","file_ext":"py","file_size_in_byte":4250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"7511500","text":"import random\r\nimport math\r\n\r\nnumbers = {}\r\n\r\nfor i in range(0,10):\r\n numbers[i] = 0\r\n\r\nfor i in range (1000):\r\n number = random.random()\r\n number = math.floor(number * 10)\r\n value = numbers[number]\r\n value += 1\r\n numbers[number] = value\r\n\r\nprint('Random number frequencies:')\r\nfor item in numbers.keys():\r\n print(f'num {item}, chosen {numbers[item]/1000 * 100:.2f}% time')\r\n\r\n \r\n \r\n","sub_path":"dictionaries question 1.py","file_name":"dictionaries question 1.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"468617308","text":"#!/usr/bin/env python\n# coding=utf-8\n# Copyright 2020 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Finetuning the library models for sequence classification on GLUE.\"\"\"\n# You can also adapt this script on your own text classification task. Pointers for this are left as comments.\nimport json\nimport logging\nimport os\nimport random\nimport sys\nsys.path.append(\"../\")\nfrom dataclasses import dataclass, field\nfrom typing import Optional\nfrom definition import ROOT_DIR\nimport re\nimport shutil\nimport datasets\nimport numpy as np\nfrom datasets import load_dataset, load_metric\nimport transformers\nimport transformers.adapters.composition as ac\nfrom transformers import (\n AdapterConfig,\n AdapterTrainer,\n AutoAdapterModel,\n AutoConfig,\n AutoTokenizer,\n DataCollatorWithPadding,\n EvalPrediction,\n HfArgumentParser,\n MultiLingAdapterArguments,\n PretrainedConfig,\n Trainer,\n TrainingArguments,\n default_data_collator,\n set_seed,\n ConfigUnion,\n ParallelConfig,\n PrefixTuningConfig,\n AutoPEFTConfig,\n LoRAConfig,\n EarlyStoppingCallback\n)\nfrom transformers.trainer_utils import get_last_checkpoint\nfrom transformers.utils import check_min_version\nfrom transformers.utils.versions import require_version\nimport numpy as np\nimport transformers.adapters.composition as ac\n\n# Will error if the minimal version of Transformers is not installed. Remove at your own risks.\ncheck_min_version(\"4.19.0\")\n\nrequire_version(\"datasets>=1.8.0\",\n \"To fix: pip install -r examples/pytorch/text-classification/requirements.txt\")\n\ntask_to_keys = {\n \"cola\": (\"sentence\", None),\n \"mnli\": (\"premise\", \"hypothesis\"),\n \"mrpc\": (\"sentence1\", \"sentence2\"),\n \"qnli\": (\"question\", \"sentence\"),\n \"qqp\": (\"question1\", \"question2\"),\n \"rte\": (\"sentence1\", \"sentence2\"),\n \"sst2\": (\"sentence\", None),\n \"stsb\": (\"sentence1\", \"sentence2\"),\n \"wnli\": (\"sentence1\", \"sentence2\"),\n}\n\nlogger = logging.getLogger(__name__)\n\n\ndef split_datasets(train_ds, n: int = None):\n logger.info(\n \"Spliting the train/eval datasets into train/eval by \"\n \"using 90% and 10% of train as train and eval and eval as test.\"\n )\n if n is None:\n n = len(train_ds)\n logger.info(f\"Using the whole train dataset of {n} samples.\")\n else:\n logger.info(f\"Reducing the train dataset to only {n} samples.\")\n\n split_at = int(n * 0.90)\n train_ds = train_ds.shuffle()\n new_eval_ds = train_ds.select(range(split_at, n))\n new_train_ds = train_ds.select(range(split_at))\n return new_train_ds, new_eval_ds\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n\n Using `HfArgumentParser` we can turn this class\n into argparse arguments to be able to specify them on\n the command line.\n \"\"\"\n local_dataset_path: str = field(\n metadata={\"help\": \"load the local copy of dataset\"}\n )\n patience: int = field(\n default=10,\n metadata={\n \"help\": \"the number of epochs to wait before early stopping\"\n },\n )\n resplit_dataset: Optional[bool] = field(\n default=False,\n metadata={\"help\": \"Whether to resplit the dataset.\"},\n )\n task_name: Optional[str] = field(\n default=None,\n metadata={\"help\": \"The name of the task to train on: \" +\n \", \".join(task_to_keys.keys())},\n )\n dataset_name: Optional[str] = field(\n default=None, metadata={\"help\": \"The name of the dataset to use (via the datasets library).\"}\n )\n dataset_config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"The configuration name of the dataset to use (via the datasets library).\"}\n )\n max_seq_length: int = field(\n default=128,\n metadata={\n \"help\": \"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n },\n )\n overwrite_cache: bool = field(\n default=False, metadata={\"help\": \"Overwrite the cached preprocessed datasets or not.\"}\n )\n pad_to_max_length: bool = field(\n default=True,\n metadata={\n \"help\": \"Whether to pad all samples to `max_seq_length`. \"\n \"If False, will pad the samples dynamically when batching to the maximum length in the batch.\"\n },\n )\n max_train_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of training examples to this \"\n \"value if set.\"\n },\n )\n max_eval_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of evaluation examples to this \"\n \"value if set.\"\n },\n )\n max_predict_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of prediction examples to this \"\n \"value if set.\"\n },\n )\n train_file: Optional[str] = field(\n default=None, metadata={\"help\": \"A csv or a json file containing the training data.\"}\n )\n validation_file: Optional[str] = field(\n default=None, metadata={\"help\": \"A csv or a json file containing the validation data.\"}\n )\n test_file: Optional[str] = field(default=None, metadata={\n \"help\": \"A csv or a json file containing the test data.\"})\n\n def __post_init__(self):\n if self.task_name is not None:\n self.task_name = self.task_name.lower()\n if self.task_name not in task_to_keys.keys():\n raise ValueError(\n \"Unknown task, you should pick one in \" + \",\".join(task_to_keys.keys()))\n elif self.dataset_name is not None:\n pass\n elif self.train_file is None or self.validation_file is None:\n raise ValueError(\n \"Need either a GLUE task, a training/validation file or a dataset name.\")\n else:\n train_extension = self.train_file.split(\".\")[-1]\n assert train_extension in [\n \"csv\", \"json\"], \"`train_file` should be a csv or a json file.\"\n validation_extension = self.validation_file.split(\".\")[-1]\n assert (\n validation_extension == train_extension\n ), \"`validation_file` should have the same extension (csv or json) as `train_file`.\"\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n adapter_name: str = field(\n metadata={\"help\": \"The name of the adapter to use.\"}\n )\n nas_adapter_config_path: str = field(\n metadata={\"help\": \"nas_adapter config path\"}\n )\n model_name_or_path: str = field(\n metadata={\n \"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Where do you want to store the pretrained models downloaded from huggingface.co\"},\n )\n use_fast_tokenizer: bool = field(\n default=True,\n metadata={\n \"help\": \"Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.\"},\n )\n model_revision: str = field(\n default=\"main\",\n metadata={\n \"help\": \"The specific model version to use (can be a branch name, tag name or commit id).\"},\n )\n use_auth_token: bool = field(\n default=False,\n metadata={\n \"help\": \"Will use the token generated when running `transformers-cli login` (necessary to use this script \"\n \"with private models).\"\n },\n )\n\n\ndef get_all_checkpoint(folder):\n PREFIX_CHECKPOINT_DIR = \"checkpoint\"\n _re_checkpoint = re.compile(r\"^\" + PREFIX_CHECKPOINT_DIR + r\"\\-(\\d+)$\")\n content = os.listdir(folder)\n checkpoints = [\n path\n for path in content\n if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path))\n ]\n if len(checkpoints) == 0:\n return\n return checkpoints\n\n\ndefault_arg = {\n \"non_linearity\": \"relu\",\n \"residual_before_ln\": True, # default is True, PA is true, previous exps False\n \"adapter_residual_before_ln\": False,\n \"ln_after\": False,\n \"ln_before\": False,\n \"reduction_factor\": 64,\n \"leave_out\": [],\n \"mh_adapter\": False,\n \"output_adapter\": True,\n \"original_ln_before\": True,\n \"original_ln_after\": True,\n \"is_parallel\": False,\n # \"scaling\": 1.0,\n}\n\ndefault_prefix_arg = {\n}\n\ndefault_mam_arg = {\n}\n\n\n\ndef main():\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n\n parser = HfArgumentParser(\n (ModelArguments, DataTrainingArguments, TrainingArguments, MultiLingAdapterArguments))\n\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n # If we pass only one argument to the script and it's the path to a json file,\n # let's parse it to get our arguments.\n model_args, data_args, training_args, adapter_args = parser.parse_json_file(\n json_file=os.path.abspath(sys.argv[1])\n )\n else:\n model_args, data_args, training_args, adapter_args = parser.parse_args_into_dataclasses()\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n handlers=[logging.StreamHandler(sys.stdout)],\n )\n log_level = training_args.get_process_log_level()\n logger.setLevel(log_level)\n datasets.utils.logging.set_verbosity(log_level)\n transformers.utils.logging.set_verbosity(log_level)\n transformers.utils.logging.enable_default_handler()\n transformers.utils.logging.enable_explicit_format()\n training_args.evaluation_strategy = training_args.logging_strategy\n training_args.eval_steps = training_args.logging_steps\n # Log on each process the small summary:\n logger.warning(\n f\"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}\"\n + f\"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}\"\n )\n logger.info(f\"Training/evaluation parameters {training_args}\")\n\n # Detecting last checkpoint.\n last_checkpoint = None\n if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:\n last_checkpoint = get_last_checkpoint(training_args.output_dir)\n if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists and is not empty. \"\n \"Use --overwrite_output_dir to overcome.\"\n )\n elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:\n logger.info(\n f\"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change \"\n \"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.\"\n )\n\n # Set seed before initializing model.\n set_seed(training_args.seed)\n\n # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)\n # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).\n #\n # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the\n # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named\n # label if at least two columns are provided.\n #\n # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this\n # single column. You can easily tweak this behavior (see below)\n #\n # In distributed training, the load_dataset function guarantee that only one local process can concurrently\n # download the dataset.\n raw_datasets = datasets.load_from_disk(data_args.local_dataset_path)\n\n # Labels\n if data_args.task_name is not None:\n is_regression = data_args.task_name == \"stsb\"\n if not is_regression:\n label_list = raw_datasets[\"train\"].features[\"label\"].names\n num_labels = len(label_list)\n else:\n num_labels = 1\n else:\n # Trying to have good defaults here, don't hesitate to tweak to your needs.\n is_regression = raw_datasets[\"train\"].features[\"label\"].dtype in [\n \"float32\", \"float64\"]\n if is_regression:\n num_labels = 1\n else:\n # A useful fast method:\n # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique\n label_list = raw_datasets[\"train\"].unique(\"label\")\n label_list.sort() # Let's sort it for determinism\n num_labels = len(label_list)\n\n # Load pretrained model and tokenizer\n #\n # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n config = AutoConfig.from_pretrained(\n model_args.config_name if model_args.config_name else model_args.model_name_or_path,\n num_labels=num_labels,\n finetuning_task=data_args.task_name,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n tokenizer = AutoTokenizer.from_pretrained(\n model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n use_fast=model_args.use_fast_tokenizer,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n # We use the AutoAdapterModel class here for better adapter support.\n model = AutoAdapterModel.from_pretrained(\n model_args.model_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.model_name_or_path),\n config=config,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n model_param_dict = {}\n model_param_dict['model'] = model.num_parameters()\n logger.info(\n f\"model number of parameters before heads{model.num_parameters()}\")\n\n model.add_classification_head(\n data_args.task_name or \"glue\",\n num_labels=num_labels,\n id2label={i: v for i, v in enumerate(\n label_list)} if not is_regression else None,\n )\n logger.info(\n f\"model number of parameters before adapter{model.num_parameters()}\")\n model_param_dict['w. heads'] = model.num_parameters()\n\n # Setup adapters\n if adapter_args.train_adapter:\n task_name = data_args.task_name or \"glue\"\n # random_seed = int(np.random.randint(1000))\n logger.info(f\"random seed for adapter training: {training_args.seed}\")\n f = open(model_args.nas_adapter_config_path)\n random_args = json.load(f)\n\n leave_out_list = []\n number_layer = 12\n if 'large' in model_args.model_name_or_path:\n number_layer = 24\n for i in range(0, number_layer):\n if random_args[f\"leave_out_{i}\"]:\n leave_out_list.append(int(i))\n del random_args[f\"leave_out_{i}\"]\n random_args[\"leave_out\"] = leave_out_list\n\n # prefix tuning only\n if model_args.adapter_name == 'prefix':\n random_args['prefix_length'] = random_args['reduction_factor']\n del random_args['reduction_factor']\n default_arg = default_prefix_arg.copy()\n\n if model_args.adapter_name == 'pfeiffer':\n default_arg = default_mam_arg.copy()\n\n if model_args.adapter_name == 'mam':\n if random_args['reduction_prefix'] == 512:\n random_args['prefix_length'] = 1\n else:\n random_args['prefix_length'] = 768 / \\\n random_args['reduction_prefix']\n if random_args['reduction_factor'] == 512:\n random_args['reduction_factor'] = 768\n del random_args['reduction_prefix']\n default_arg = default_mam_arg.copy()\n\n if model_args.adapter_name == 'unipelt':\n if random_args['reduction_prefix'] == 512:\n random_args['prefix_length'] = 1\n else:\n random_args['prefix_length'] = 768 / \\\n random_args['reduction_prefix']\n if random_args['reduction_factor'] == 512:\n random_args['reduction_factor'] = 768\n del random_args['reduction_prefix']\n random_args['r'] = 64 / random_args['reduction_rank']\n del random_args['reduction_rank']\n default_arg = default_mam_arg.copy()\n\n if model_args.adapter_name == 'sapa':\n if random_args['reduction_prefix'] == 512:\n random_args['reduction_serial'] = 768\n else:\n random_args['reduction_serial'] = random_args['reduction_prefix']\n if random_args['reduction_factor'] == 512:\n random_args['reduction_factor'] = 768\n del random_args['reduction_prefix']\n default_arg = default_mam_arg.copy()\n\n if model_args.adapter_name == 'sappa':\n exclude_pa = False\n exclude_sa = False\n exclude_prefix = False\n if random_args['reduction_prefix'] == 512:\n random_args['prefix_length'] = 1\n else:\n random_args['prefix_length'] = 768 / \\\n random_args['reduction_prefix']\n if random_args['reduction_factor'] == 512:\n random_args['reduction_factor'] = 768\n if random_args['reduction_serial'] == 512:\n random_args['reduction_serial'] = 768\n del random_args['reduction_prefix']\n if random_args['reduction_serial'] > 768:\n random_args['reduction_serial'] = 768\n exclude_sa = True\n if random_args['reduction_factor'] > 768:\n random_args['reduction_factor'] = 768\n exclude_pa = True\n if random_args['prefix_length'] < 1:\n random_args['prefix_length'] = 1\n exclude_prefix = True\n default_arg = default_mam_arg.copy()\n\n default_arg.update(random_args)\n logger.info(f\"random_args {random_args}\")\n logger.info(f\"overall_args {default_arg}\")\n if model_args.adapter_name == 'prefix':\n adapter_config = transformers.PrefixTuningConfig(**default_arg)\n\n if model_args.adapter_name == 'pfeiffer':\n adapter_config = transformers.AutoPEFTConfig(reduction_factor=default_arg['reduction_factor'], leave_out=default_arg['leave_out'], is_sa_alone=True)\n\n if model_args.adapter_name == 'mam':\n parallel_flag = default_arg['reduction_factor'] <= 768\n prefix_flag = default_arg['prefix_length'] >= 1\n config_flag_list = [prefix_flag, parallel_flag]\n config_list = [PrefixTuningConfig(prefix_length=int(default_arg['prefix_length']), bottleneck_size=800, leave_out=default_arg['leave_out']),\n ParallelConfig(\n reduction_factor=default_arg['reduction_factor'], leave_out=default_arg['leave_out']),\n ]\n adapter_config = ConfigUnion(\n *[config_list[i] for i in range(len(config_list)) if config_flag_list[i]])\n\n if model_args.adapter_name == 'unipelt':\n parallel_flag = default_arg['reduction_factor'] <= 768\n prefix_flag = default_arg['prefix_length'] >= 1\n lora_flag = default_arg['r'] >= 1\n config_flag_list = [prefix_flag, parallel_flag, lora_flag]\n config_list = [PrefixTuningConfig(prefix_length=int(default_arg['prefix_length']), bottleneck_size=800, leave_out=default_arg['leave_out']),\n ParallelConfig(\n reduction_factor=default_arg['reduction_factor'], leave_out=default_arg['leave_out']),\n LoRAConfig(r=int(default_arg['r'])),\n ]\n adapter_config = ConfigUnion(\n *[config_list[i] for i in range(len(config_list)) if config_flag_list[i]])\n\n if model_args.adapter_name == 'sapa':\n if default_arg['reduction_serial'] > 768 and default_arg['reduction_factor'] <= 768:\n adapter_config = ParallelConfig(\n reduction_factor=default_arg['reduction_factor'], leave_out=default_arg['leave_out'])\n elif default_arg['reduction_serial'] <= 768 and default_arg['reduction_factor'] > 768:\n adapter_config = AutoPEFTConfig(\n reduction_factor=int(default_arg['reduction_serial']))\n else:\n adapter_config = transformers.AutoPEFTConfig(reduction_factor=int(\n default_arg['reduction_serial']), leave_out=default_arg['leave_out'])\n model.add_adapter('sa', config=adapter_config)\n adapter_config = transformers.ParallelConfig(\n reduction_factor=default_arg['reduction_factor'], leave_out=default_arg['leave_out'])\n model.add_adapter('pa', config=adapter_config)\n model.active_adapters = ac.Aggregate(\"sa\", \"pa\")\n # adapter_config = AdapterConfig(adapter_residual_before_ln=True, is_parallel=False, ln_after=True, ln_before=True, mh_adapter=True, non_linearity='tanh', original_ln_after=True, original_ln_before=True, output_adapter=True, reduction_factor=32, residual_before_ln=True, scaling=2.0)\n # model.add_adapter(task_name, config=adapter_config)\n if model_args.adapter_name == 'sappa':\n if not exclude_pa and not exclude_sa and not exclude_prefix:\n adapter_config = ConfigUnion(\n transformers.PrefixTuningConfig(prefix_length=int(\n default_arg['prefix_length']), bottleneck_size=800, leave_out=default_arg['leave_out']),\n transformers.AutoPEFTConfig(\n reduction_factor=default_arg['reduction_serial'], reduction_factor_pa=default_arg['reduction_factor'], leave_out=default_arg['leave_out']),\n )\n if not exclude_pa and not exclude_sa and exclude_prefix:\n adapter_config = ConfigUnion(\n transformers.AutoPEFTConfig(\n reduction_factor=default_arg['reduction_serial'], reduction_factor_pa=default_arg['reduction_factor'], leave_out=default_arg['leave_out']),\n )\n if not exclude_pa and exclude_sa and not exclude_prefix:\n adapter_config = ConfigUnion(\n transformers.PrefixTuningConfig(prefix_length=int(\n default_arg['prefix_length']), bottleneck_size=800, leave_out=default_arg['leave_out']),\n transformers.AutoPEFTConfig(\n reduction_factor_pa=default_arg['reduction_factor'], leave_out=default_arg['leave_out'], is_pa_alone=True),\n )\n if not exclude_pa and exclude_sa and exclude_prefix:\n adapter_config = ConfigUnion(\n transformers.AutoPEFTConfig(\n reduction_factor_pa=default_arg['reduction_factor'], leave_out=default_arg['leave_out'], is_pa_alone=True),\n )\n if exclude_pa and not exclude_sa and not exclude_prefix:\n adapter_config = ConfigUnion(\n transformers.PrefixTuningConfig(prefix_length=int(\n default_arg['prefix_length']), bottleneck_size=800, leave_out=default_arg['leave_out']),\n transformers.AutoPEFTConfig(\n reduction_factor=default_arg['reduction_serial'], leave_out=default_arg['leave_out'], is_sa_alone=True),\n )\n if exclude_pa and not exclude_sa and exclude_prefix:\n adapter_config = ConfigUnion(\n transformers.AutoPEFTConfig(\n reduction_factor=default_arg['reduction_serial'], leave_out=default_arg['leave_out'], is_sa_alone=True),\n )\n if exclude_pa and exclude_sa and not exclude_prefix:\n adapter_config = ConfigUnion(\n transformers.PrefixTuningConfig(prefix_length=int(\n default_arg['prefix_length']), bottleneck_size=800, leave_out=default_arg['leave_out']),\n )\n\n if model_args.adapter_name == 'sapa':\n if default_arg['reduction_serial'] > 768 and default_arg['reduction_factor'] <= 768:\n model.add_adapter(task_name, config=adapter_config)\n model.train_adapter(task_name)\n model.set_active_adapters(task_name)\n elif default_arg['reduction_serial'] <= 768 and default_arg['reduction_factor'] > 768:\n model.add_adapter(task_name, config=adapter_config)\n model.train_adapter(task_name)\n model.set_active_adapters(task_name)\n else:\n model.train_adapter(ac.Aggregate(\"sa\", \"pa\"))\n else:\n model.add_adapter(task_name, config=adapter_config)\n model.train_adapter(task_name)\n model.set_active_adapters(task_name)\n\n else:\n if adapter_args.load_adapter or adapter_args.load_lang_adapter:\n raise ValueError(\n \"Adapters can only be loaded in adapters training mode.\"\n \"Use --train_adapter to enable adapter training\"\n )\n logger.info(\n f\"model number of parameters after adapter{model.num_parameters()}\")\n model_param_dict['w. heads & adapter'] = model.num_parameters()\n model_param_dict['heads'] = model_param_dict['w. heads'] - \\\n model_param_dict['model']\n model_param_dict['adapter'] = model_param_dict['w. heads & adapter'] - \\\n model_param_dict['w. heads']\n\n # make the output dir if output dir not exist\n if not os.path.exists(training_args.output_dir):\n os.makedirs(training_args.output_dir)\n with open(os.path.join(training_args.output_dir, \"model_param_dict.json\"), \"w\", encoding='utf8') as f:\n json.dump(model_param_dict, f, indent=2, ensure_ascii=False)\n # Preprocessing the raw_datasets\n if data_args.task_name is not None:\n sentence1_key, sentence2_key = task_to_keys[data_args.task_name]\n else:\n # Again, we try to have some nice defaults but don't hesitate to tweak to your use case.\n non_label_column_names = [\n name for name in raw_datasets[\"train\"].column_names if name != \"label\"]\n if \"sentence1\" in non_label_column_names and \"sentence2\" in non_label_column_names:\n sentence1_key, sentence2_key = \"sentence1\", \"sentence2\"\n else:\n if len(non_label_column_names) >= 2:\n sentence1_key, sentence2_key = non_label_column_names[:2]\n else:\n sentence1_key, sentence2_key = non_label_column_names[0], None\n\n # Padding strategy\n if data_args.pad_to_max_length:\n padding = \"max_length\"\n else:\n # We will pad later, dynamically at batch creation, to the max sequence length in each batch\n padding = False\n\n # Some models have set the order of the labels to use, so let's make sure we do use it.\n label_to_id = None\n if (\n model.config.label2id != PretrainedConfig(\n num_labels=num_labels).label2id\n and data_args.task_name is not None\n and not is_regression\n ):\n # Some have all caps in their config, some don't.\n label_name_to_id = {\n k.lower(): v for k, v in model.config.label2id.items()}\n if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):\n label_to_id = {\n i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)}\n else:\n logger.warning(\n \"Your model seems to have been trained with labels, but they don't match the dataset: \",\n f\"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}.\"\n \"\\nIgnoring the model labels as a result.\",\n )\n elif data_args.task_name is None and not is_regression:\n label_to_id = {v: i for i, v in enumerate(label_list)}\n\n if label_to_id is not None:\n model.config.label2id = label_to_id\n model.config.id2label = {\n id: label for label, id in config.label2id.items()}\n elif data_args.task_name is not None and not is_regression:\n model.config.label2id = {l: i for i, l in enumerate(label_list)}\n model.config.id2label = {\n id: label for label, id in config.label2id.items()}\n\n if data_args.max_seq_length > tokenizer.model_max_length:\n logger.warning(\n f\"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the\"\n f\"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.\"\n )\n max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)\n\n def preprocess_function(examples):\n # Tokenize the texts\n args = (\n (examples[sentence1_key],) if sentence2_key is None else (\n examples[sentence1_key], examples[sentence2_key])\n )\n result = tokenizer(*args, padding=padding,\n max_length=max_seq_length, truncation=True)\n\n # Map labels to IDs (not necessary for GLUE tasks)\n if label_to_id is not None and \"label\" in examples:\n result[\"label\"] = [(label_to_id[l] if l != -1 else -1)\n for l in examples[\"label\"]]\n return result\n\n with training_args.main_process_first(desc=\"dataset map pre-processing\"):\n raw_datasets = raw_datasets.map(\n preprocess_function,\n batched=True,\n load_from_cache_file=not data_args.overwrite_cache,\n desc=\"Running tokenizer on dataset\",\n )\n if training_args.do_train:\n if \"train\" not in raw_datasets:\n raise ValueError(\"--do_train requires a train dataset\")\n train_dataset = raw_datasets[\"train\"]\n if data_args.max_train_samples is not None:\n max_train_samples = min(\n len(train_dataset), data_args.max_train_samples)\n train_dataset = train_dataset.select(range(max_train_samples))\n\n if training_args.do_eval:\n if \"validation\" not in raw_datasets and \"validation_matched\" not in raw_datasets:\n raise ValueError(\"--do_eval requires a validation dataset\")\n eval_dataset = raw_datasets[\"validation_matched\" if data_args.task_name ==\n \"mnli\" else \"validation\"]\n if data_args.max_eval_samples is not None:\n max_eval_samples = min(\n len(eval_dataset), data_args.max_eval_samples)\n eval_dataset = eval_dataset.select(range(max_eval_samples))\n\n if training_args.do_predict or data_args.task_name is not None or data_args.test_file is not None:\n if \"test\" not in raw_datasets and \"test_matched\" not in raw_datasets:\n raise ValueError(\"--do_predict requires a test dataset\")\n predict_dataset = raw_datasets[\"test_matched\" if data_args.task_name ==\n \"mnli\" else \"test\"]\n if data_args.max_predict_samples is not None:\n max_predict_samples = min(\n len(predict_dataset), data_args.max_predict_samples)\n predict_dataset = predict_dataset.select(\n range(max_predict_samples))\n\n if data_args.resplit_dataset:\n print('original length of training dataset: ',\n len(raw_datasets[\"train\"]))\n # print('original length of eval dataset: ', len(raw_datasets[\"validation\"]))\n train_dataset, eval_dataset = split_datasets(raw_datasets[\"train\"])\n print('length of training dataset after resplit: ', len(train_dataset))\n print('length of eval dataset after resplit: ', len(eval_dataset))\n predict_dataset = raw_datasets[\"validation_matched\" if data_args.task_name ==\n \"mnli\" else \"validation\"]\n\n # temperal script for low fidelity experiments\n # n = len(raw_datasets[\"train\"])\n # split_at = int(n * 0.01)\n # train_ds = raw_datasets[\"train\"].shuffle()\n # train_dataset = train_ds.select(range(split_at))\n # print('length of training dataset after resplit: ', len(train_dataset))\n\n # Log a few random samples from the training set:\n # Log a few random samples from the training set:\n if training_args.do_train:\n for index in random.sample(range(len(train_dataset)), 3):\n logger.info(\n f\"Sample {index} of the training set: {train_dataset[index]}.\")\n\n # Get the metric function\n if data_args.task_name is not None:\n metric = load_metric(\n f\"{ROOT_DIR}/glue_metrics.py\", data_args.task_name)\n else:\n metric = load_metric(\"accuracy\")\n\n # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a\n # predictions and label_ids field) and has to return a dictionary string to float.\n def compute_metrics(p: EvalPrediction):\n preds = p.predictions[0] if isinstance(\n p.predictions, tuple) else p.predictions\n preds = np.squeeze(\n preds) if is_regression else np.argmax(preds, axis=1)\n if data_args.task_name is not None:\n result = metric.compute(predictions=preds, references=p.label_ids)\n if len(result) > 1:\n result[\"combined_score\"] = np.mean(\n list(result.values())).item()\n return result\n elif is_regression:\n return {\"mse\": ((preds - p.label_ids) ** 2).mean().item()}\n else:\n return {\"accuracy\": (preds == p.label_ids).astype(np.float32).mean().item()}\n\n # Data collator will default to DataCollatorWithPadding when the tokenizer is passed to Trainer, so we change it if\n # we already did the padding.\n if data_args.pad_to_max_length:\n data_collator = default_data_collator\n elif training_args.fp16:\n data_collator = DataCollatorWithPadding(\n tokenizer, pad_to_multiple_of=8)\n else:\n data_collator = None\n\n # Initialize our Trainer\n trainer_class = AdapterTrainer if adapter_args.train_adapter else Trainer\n training_args.load_best_model_at_end = True\n training_args.metric_for_best_model = 'accuracy'\n\n if data_args.task_name == 'cola':\n training_args.metric_for_best_model = 'eval_matthews_correlation'\n if data_args.task_name == 'stsb':\n training_args.metric_for_best_model = 'eval_spearmanr'\n training_args.greater_is_better = True\n trainer = trainer_class(\n model=model,\n args=training_args,\n train_dataset=train_dataset if training_args.do_train else None,\n eval_dataset=eval_dataset if training_args.do_eval else None,\n compute_metrics=compute_metrics,\n tokenizer=tokenizer,\n data_collator=data_collator,\n callbacks=[EarlyStoppingCallback(\n early_stopping_patience=data_args.patience)]\n )\n\n # Training\n if training_args.do_train:\n checkpoint = None\n if training_args.resume_from_checkpoint is not None:\n checkpoint = training_args.resume_from_checkpoint\n elif last_checkpoint is not None:\n checkpoint = last_checkpoint\n train_result = trainer.train(resume_from_checkpoint=checkpoint)\n metrics = train_result.metrics\n max_train_samples = (\n data_args.max_train_samples if data_args.max_train_samples is not None else len(\n train_dataset)\n )\n metrics[\"train_samples\"] = min(max_train_samples, len(train_dataset))\n\n trainer.save_model() # Saves the tokenizer too for easy upload\n\n trainer.log_metrics(\"train\", metrics)\n trainer.save_metrics(\"train\", metrics)\n trainer.save_state()\n\n # Evaluation\n if training_args.do_eval:\n logger.info(\"*** Evaluate ***\")\n\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n tasks = [data_args.task_name]\n eval_datasets = [eval_dataset]\n if data_args.task_name == \"mnli\":\n tasks.append(\"mnli-mm\")\n eval_datasets.append(raw_datasets[\"validation_mismatched\"])\n combined = {}\n\n for eval_dataset, task in zip(eval_datasets, tasks):\n metrics = trainer.evaluate(eval_dataset=eval_dataset)\n\n max_eval_samples = (\n data_args.max_eval_samples if data_args.max_eval_samples is not None else len(\n eval_dataset)\n )\n metrics[\"eval_samples\"] = min(max_eval_samples, len(eval_dataset))\n\n if task == \"mnli-mm\":\n metrics = {k + \"_mm\": v for k, v in metrics.items()}\n if task is not None and \"mnli\" in task:\n combined.update(metrics)\n\n trainer.log_metrics(\"eval\", metrics)\n trainer.save_metrics(\n \"eval\", combined if task is not None and \"mnli\" in task else metrics)\n\n if training_args.do_predict and data_args.resplit_dataset:\n logger.info(\"*** test ***\")\n\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n tasks = [data_args.task_name]\n predict_datasets = [predict_dataset]\n if data_args.task_name == \"mnli\":\n tasks.append(\"mnli-mm\")\n predict_datasets.append(raw_datasets[\"validation_mismatched\"])\n combined = {}\n\n for predict_dataset, task in zip(predict_datasets, tasks):\n metrics = trainer.evaluate(eval_dataset=predict_dataset)\n\n max_eval_samples = (\n data_args.max_eval_samples if data_args.max_eval_samples is not None else len(\n predict_dataset)\n )\n metrics[\"test_samples\"] = min(\n max_eval_samples, len(predict_dataset))\n\n if task == \"mnli-mm\":\n metrics = {k + \"_mm\": v for k, v in metrics.items()}\n if task is not None and \"mnli\" in task:\n combined.update(metrics)\n\n trainer.log_metrics(\"test\", metrics)\n trainer.save_metrics(\n \"test\", combined if task is not None and \"mnli\" in task else metrics)\n\n if training_args.do_predict and not data_args.resplit_dataset:\n logger.info(\"*** Predict ***\")\n\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n tasks = [data_args.task_name]\n predict_datasets = [predict_dataset]\n if data_args.task_name == \"mnli\":\n tasks.append(\"mnli-mm\")\n predict_datasets.append(raw_datasets[\"test_mismatched\"])\n\n for predict_dataset, task in zip(predict_datasets, tasks):\n # Removing the `label` columns because it contains -1 and Trainer won't like that.\n predict_dataset = predict_dataset.remove_columns(\"label\")\n predictions = trainer.predict(\n predict_dataset, metric_key_prefix=\"predict\").predictions\n predictions = np.squeeze(\n predictions) if is_regression else np.argmax(predictions, axis=1)\n\n output_predict_file = os.path.join(\n training_args.output_dir, f\"predict_results_{task}.txt\")\n if trainer.is_world_process_zero():\n with open(output_predict_file, \"w\") as writer:\n logger.info(f\"***** Predict results {task} *****\")\n writer.write(\"index\\tprediction\\n\")\n for index, item in enumerate(predictions):\n if is_regression:\n writer.write(f\"{index}\\t{item:3.3f}\\n\")\n else:\n item = label_list[item]\n writer.write(f\"{index}\\t{item}\\n\")\n\n kwargs = {\"finetuned_from\": model_args.model_name_or_path,\n \"tasks\": \"text-classification\"}\n if data_args.task_name is not None:\n kwargs[\"language\"] = \"en\"\n kwargs[\"dataset_tags\"] = \"glue\"\n kwargs[\"dataset_args\"] = data_args.task_name\n kwargs[\"dataset\"] = f\"GLUE {data_args.task_name.upper()}\"\n\n if training_args.push_to_hub:\n trainer.push_to_hub(**kwargs)\n else:\n trainer.create_model_card(**kwargs)\n all_checkpoints = get_all_checkpoint(training_args.output_dir)\n for checkpoint in all_checkpoints:\n last_checkpoint = os.path.join(training_args.output_dir, checkpoint)\n shutil.rmtree(last_checkpoint, ignore_errors=True)\n\n\ndef _mp_fn(index):\n # For xla_spawn (TPUs)\n main()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"adapterhub/nas_search.py","file_name":"nas_search.py","file_ext":"py","file_size_in_byte":42640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"27102353","text":"\"\"\"\"Distribution setup\"\"\"\n\n\nimport os\n\nfrom setuptools import setup\n\nROOT = os.path.abspath(os.path.dirname(__file__))\n\nwith open(\"README.md\", \"r\") as fh:\n LONG_DESCRIPTION = fh.read()\n\nwith open(os.path.join(ROOT, \"VERSION\")) as version_file:\n VERSION = version_file.read().strip()\n\n\nsetup(\n name=\"pyFAST\",\n description=\"pyFAST\",\n long_description=LONG_DESCRIPTION,\n version=VERSION,\n url=\"https://github.com/openfast/python-toolbox/\",\n classifiers=[\n \"Topic :: Utilities\",\n \"Topic :: Software Development :: Testing\",\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Software Development :: Version Control :: Git\",\n ],\n packages=[\"pyFAST\"],\n python_requires=\">=3.6\",\n install_requires=[\n \"numpy>=1.15.2\",\n \"pandas\",\n \"matplotlib\",\n \"chardet\",\n \"scipy\",\n \"sympy\",\n \"openpyxl\",\n \"pytest\"\n ],\n test_suite=\"pytest\",\n tests_require=[\"pytest\"],\n entry_points={\"console_scripts\": [\"pyFAST = pyFAST.__main__:main\"]},\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"648167699","text":"#!/usr/bin/env python\n# coding=utf-8\n\nfrom train import caffe\nfrom caffe.proto import caffe_pb2\n\nParams = {\n # Train parameters\n 'base_lr': 0.01,\n 'weight_decay': 0.0005,\n # 'lr_policy': \"fixed\",\n 'lr_policy': \"multistep\",\n 'stepvalue': [120000, 240000],\n 'gamma': 0.1,\n 'momentum': 0.9,\n 'iter_size': 1,\n 'max_iter': 240000,\n 'snapshot': 50000,\n 'display': 40,\n 'average_loss': 40,\n 'type': \"SGD\",\n# 'solver_mode': None,\n# 'device_id': None,\n 'debug_info': False,\n 'snapshot_after_train': True,\n # Test parameters\n 'test_iter': None,\n 'test_interval': 10000,\n 'eval_type': \"detection\",\n 'ap_version': \"11point\",\n 'test_initialization': False,\n 'show_per_class_result': True,\n }\n\n\n\ndef create():\n #assert os.path.exists(os.path.dirname(Params['snapshot_prefix']))\n solver = caffe_pb2.SolverParameter(**Params)\n\n return solver\n","sub_path":"vision-detector_wwl/train/ssd/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}